From 44419b9741a88a1f4ad22b5d1b7a18dfa4827505 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 30 Nov 2023 03:50:09 +1100 Subject: [PATCH 0001/1088] Initial commit --- .gitignore | 160 ++++++++++++++++++++++++++++++++++++++++++ LICENSE | 201 +++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 2 + 3 files changed, 363 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..68bc17f9ff --- /dev/null +++ b/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000000..58e729f8fa --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# unsloth +2x faster 50% less memory LLM finetuning on a single GPU From d45e40ad71fa23ed3b5c2b79c6835967a048f1b1 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 03:51:54 +1100 Subject: [PATCH 0002/1088] First upload of Unsloth code --- README.md | 36 +- pyproject.toml | 51 ++ unsloth/__init__.py | 16 + unsloth/kernels/__init__.py | 24 + unsloth/kernels/cross_entropy_loss.py | 167 ++++++ unsloth/kernels/fast_lora.py | 414 +++++++++++++++ unsloth/kernels/rms_layernorm.py | 149 ++++++ unsloth/kernels/rope_embedding.py | 178 +++++++ unsloth/kernels/swiglu.py | 88 +++ unsloth/kernels/utils.py | 93 ++++ unsloth/models/__init__.py | 44 ++ unsloth/models/_utils.py | 61 +++ unsloth/models/llama.py | 734 ++++++++++++++++++++++++++ 13 files changed, 2053 insertions(+), 2 deletions(-) create mode 100644 pyproject.toml create mode 100644 unsloth/__init__.py create mode 100644 unsloth/kernels/__init__.py create mode 100644 unsloth/kernels/cross_entropy_loss.py create mode 100644 unsloth/kernels/fast_lora.py create mode 100644 unsloth/kernels/rms_layernorm.py create mode 100644 unsloth/kernels/rope_embedding.py create mode 100644 unsloth/kernels/swiglu.py create mode 100644 unsloth/kernels/utils.py create mode 100644 unsloth/models/__init__.py create mode 100644 unsloth/models/_utils.py create mode 100644 unsloth/models/llama.py diff --git a/README.md b/README.md index 58e729f8fa..22e0400d1a 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,34 @@ -# unsloth -2x faster 50% less memory LLM finetuning on a single GPU +# Unsloth +2x faster 50% less memory LLM finetuning on a single GPU. + +`!pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git"` +`!pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git"` + + +### Google Colab examples +1. [Unsloth fast finetuning example](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) +2. [Original slow finetuning example](https://colab.research.google.com/drive/1c7zxdLHaLJ9R9YTZ74y4tUERvS-kySyA?usp=sharing) + +### Installation instructions +In Google Colab: +``` +!ldconfig /usr/lib64-nvidia +!pip install xformers --index-url https://download.pytorch.org/whl/cu118 +!pip install git+https://github.com/danielhanchen/unsloth.git +``` +`!ldconfig /usr/lib64-nvidia` is necessary (for now) to link CUDA with Python. Possibly a Google Colab linking bug. + +For general installations: +1. Install Xformers *OR* Flash Attention. Choose 1. Old GPUs use Xformers. New use Flash Attention. +2. For Xformers, find your Pytorch CUDA version via `torch.version.cuda` or `nvidia-smi`. + * If you have Conda, `conda install xformers -c xformers` + * If you have CUDA 11.8, `pip install xformers --index-url https://download.pytorch.org/whl/cu118` + * If you have CUDA 12.1, `pip install xformers --index-url https://download.pytorch.org/whl/cu121` + * Go to https://github.com/facebookresearch/xformers for other issues. + * You must have Pytorch 2.1 installed for Xformers. If not, try Flash Attention. + * Xformers supports all GPUs (Tesla T4 etc). +3. For Flash Attention, you must have a Ampere, Ada, Hopper GPU (A100, RTX 3090, RTX 4090, H100). + * Install Flash Attention via `pip uninstall -y ninja && pip install ninja` then `pip install flash-attn --no-build-isolation`. + * Xformers has native support for Flash Attention, so technically installing Xformers is enough. +4. Then install Unsloth: + `pip install git+https://github.com/danielhanchen/unsloth.git` diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..0bfcfe9b97 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = ["setuptools", "setuptools-scm"] +build-backend = "setuptools.build_meta" + +[project] +name = "unsloth" +version = "2023.11" +description = "2X faster LLM finetuning" +readme = "README.md" +requires-python = ">=3.9" +license = {file = "LICENSE"} +keywords = ["ai", "llm",] +authors = [ + {email = "info@unsloth.ai"}, + {name = "Unsloth AI team"}, +] +maintainers = [ + {name = "Daniel Han", email = "danielhanchen@gmail.com"}, + {name = "Michael Han", email = "info@unsloth.ai"}, +] +classifiers = [ + "Programming Language :: Python", +] + +dependencies = [ + "transformers", + "bitsandbytes", + "datasets", + "sentencepiece", + "accelerate", + "trl", + "peft", + "torch>=2.1.0", +] + +[project.optional-dependencies] +cu118 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", +] +cu121 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", +] + +[project.urls] +homepage = "http://www.unsloth.ai" +documentation = "https://github.com/unslothai/unsloth" +repository = "https://github.com/unslothai/unsloth" \ No newline at end of file diff --git a/unsloth/__init__.py b/unsloth/__init__.py new file mode 100644 index 0000000000..c7f3ea94cf --- /dev/null +++ b/unsloth/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__version__ = "2023.11" + +from .models import * diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py new file mode 100644 index 0000000000..711169e06d --- /dev/null +++ b/unsloth/kernels/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cross_entropy_loss import fast_cross_entropy_loss +from .rms_layernorm import fast_rms_layernorm +from .rope_embedding import fast_rope_embedding, inplace_rope_embedding +from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel +from .fast_lora import ( + apply_lora_mlp, + apply_lora_qkv, + apply_lora_o, +) +from .utils import fast_dequantize, QUANT_STATE diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py new file mode 100644 index 0000000000..175caab0d7 --- /dev/null +++ b/unsloth/kernels/cross_entropy_loss.py @@ -0,0 +1,167 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + +@triton.jit +def _cross_entropy_forward(logits_ptr, logits_row_stride, + loss_ptr, + lse_ptr, + labels_ptr, + n_cols, + BLOCK_SIZE: tl.constexpr,): + """ + Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] + Pi = exp(xi) / sum(exp(xi)) + CE_i = -y log(p) = -y log[ exp(x) / sum(exp(x)) ] + = -y [ x - log[sum(exp(x))] ] + = y * (log[sum(exp(x))] - x) + If y == 0: CE_i = 0 + If y == 1: CE_i = logsumexp - x + """ + row_idx = tl.program_id(0) + logits_ptr += row_idx * logits_row_stride + loss_ptr += row_idx + lse_ptr += row_idx + labels_ptr += row_idx + + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + # TODO: Fixup int32 locations to int64 + # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 + label_idx = tl.load(labels_ptr).to(tl.int32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + max_logits = tl.max(logits, 0) + # Maximum stops overflow + lse = tl.log(tl.sum(tl.exp(logits - max_logits), 0)) + max_logits + tl.store(lse_ptr, lse) + + if label_idx != -100: + logits_label = tl.load(logits_ptr + label_idx).to(tl.float32) + loss = lse - logits_label + else: + loss = 0.0 + tl.store(loss_ptr, loss) +pass + + +@triton.jit +def _cross_entropy_backward(logits_ptr, logits_row_stride, + dloss_ptr, dloss_row_stride, + lse_ptr, + labels_ptr, + n_cols, + BLOCK_SIZE: tl.constexpr,): + """ + CE_i = -y log(P) = y * (log[sum(exp(x))] - x) + dC/dx = d/dx (y * log[sum(exp(x))] - x * y) + + From https://en.wikipedia.org/wiki/LogSumExp + d/dx logsumexp = exp(x) / sum(exp(x)) = softmax(x) + + dC/dx = y * exp(x) / sum(exp(x)) - d/dx (x * y) + dC/dx = y * exp[ log[exp(x) / sum(exp(x))] ] using x = exp(log(x)) trick + dC/dx = y * exp[x - logsumexp] - d/dx (x * y) + + If y == 0: dC/dx = 0 + If y == 1 and x == label: dC/dlabel = exp[x - logsumexp] - 1 + If y == 1 and x != label: dC/dx = exp[x - logsumexp] + """ + row_idx = tl.program_id(0) + logits_ptr += row_idx * logits_row_stride + dloss_ptr += row_idx * dloss_row_stride + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + # TODO: Fixup int32 locations to int64 + # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 + label_idx = tl.load(labels_ptr + row_idx).to(tl.int32) + + if label_idx != -100: + dloss = tl.load(dloss_ptr) + else: + dloss = 0.0 + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = 0).to(tl.float32) + lse = tl.load(lse_ptr + row_idx) + probs = tl.exp(logits - lse) + + probs = tl.where(col_offsets == label_idx, probs - 1.0, probs) + tl.store(logits_ptr + col_offsets, dloss * probs, mask = mask) +pass + + +class Fast_CrossEntropyLoss(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, labels): + n_rows, n_cols = logits.shape + BLOCK_SIZE, num_warps = calculate_settings(n_cols) + losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + + _cross_entropy_forward[(n_rows,)]( + logits, logits.stride(0), + losses, + logsumexp, + labels, + n_cols, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps + ctx.save_for_backward(logits, logsumexp, labels) + return losses + pass + + @staticmethod + def backward(ctx, dlosses): + logits, logsumexp, labels = ctx.saved_tensors + n_rows, n_cols = logits.shape + + _cross_entropy_backward[(n_rows,)]( + logits, logits.stride(0), + dlosses, dlosses.stride(0), + logsumexp, + labels, + n_cols, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) + return logits, None, None, + pass +pass + + +def fast_cross_entropy_loss(logits, labels): + """ + Arguments: + logits: (batch, seq_len, vocab_size) + labels: (batch, seq_len,) + Returns: + losses: float + """ + batch, seq_len, d = logits.shape + assert(labels.shape == (batch, seq_len)) + + loss = Fast_CrossEntropyLoss.apply( + logits.view(batch*seq_len, d), + labels.view(-1), + ) + n_items = torch.count_nonzero(labels != -100) + return loss.sum() / n_items +pass diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py new file mode 100644 index 0000000000..2e70444653 --- /dev/null +++ b/unsloth/kernels/fast_lora.py @@ -0,0 +1,414 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from .utils import fast_dequantize, QUANT_STATE +from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel + +def get_lora_parameters(proj): + active_adapter = proj.active_adapters[0] if \ + hasattr(proj, "active_adapters") else proj.active_adapter + base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) + W = base_layer.weight + A = proj.lora_A [active_adapter].weight + B = proj.lora_B [active_adapter].weight + s = proj.scaling[active_adapter] + return W, QUANT_STATE(W), A, B, s +pass + + +def matmul_lora(X, W, W_quant, A, B, s, out = None): + dtype = X.dtype + W = fast_dequantize(W.t(), W_quant) + A, B = A.t(), B.t() + + if X.dim() == 3: + batch, seq_len, d = X.shape + X = X.view(-1, X.shape[-1]) + reshape = True + else: + reshape = False + pass + + out = torch.matmul(X, W, out = out) + if W_quant is not None: del W + out += (X @ A) @ (s * B) + return out.view(batch, seq_len, -1) if reshape else out +pass + + +class LoRA_MLP(torch.autograd.Function): + """ + ### LoRA weights + G = G + Ag @ Bg + U = U + Au @ Bu + W = W + Aw @ Bw + + ### SwiGLU(X) + e = X @ G + f = e * sigmoid(e) + g = X @ U + h = f * g + i = h @ W + + ### Backpropagation chain rule + df = sigmoid(e) * (1 - f) + f + dC/dW = h.T @ dY + dC/dU = X.T @ (D @ W.T * f) + dC/dG = X.T @ (D @ W.T * df * g) + dC/dX = (D @ W.T * f) @ U.T + + (D @ W.T * df * g) @ G.T + + ### Down projection LoRA weights + dC/dAw = dC/dW @ B.T + dC/dBw = A.T @ dC/dW + dC/dAw = h.T @ dY @ B.T + dC/dBw = A.T @ h.T @ dY + + ### Up projection LoRA weights + dC/dAu = X.T @ (D @ W.T * f) @ B.T + dC/dBu = A.T @ X.T @ (D @ W.T * f) + + ### Gate projection LoRA weights + dC/dAg = X.T @ (D @ W.T * df * g) @ B.T + dC/dBg = A.T @ X.T @ (D @ W.T * df * g) + """ + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, X : torch.Tensor, + gateW, gateW_quant, gateA, gateB, gateS, + upW, upW_quant, upA, upB, upS, + downW, downW_quant, downA, downB, downS): + dtype = X.dtype + + e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) + g = matmul_lora(X, upW, upW_quant, upA, upB, upS) + h = swiglu_fg_kernel(e, g) + i = matmul_lora(h, downW, downW_quant, downA, downB, downS) + + ctx.custom_saved_tensors = ( + gateW, gateW_quant, gateS, + upW, upW_quant, upS, + downW, downW_quant, downS, + ) + ctx.save_for_backward(gateA, gateB, upA, upB, downA, downB, + X, e, g) + return i + pass + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, dY : torch.Tensor): + gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, = \ + ctx.custom_saved_tensors + gateA, gateB, upA,upB, downA, downB, \ + X, e, g = ctx.saved_tensors + + gateA, gateB, upA,upB, downA, downB = \ + gateA.t(), gateB.t(), upA.t(), upB.t(), downA.t(), downB.t() + + batch, seq_len, hd = X.shape + dY = dY.view(-1, dY.shape[-1]) + X = X .view(-1, X .shape[-1]) + e = e .view(-1, e .shape[-1]) + g = g .view(-1, g .shape[-1]) + dtype = X.dtype + + # DW_f = (D @ W.T * f) + # DW_dfg = (D @ W.T * df * g) + DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) + DW, e, g = swiglu_DWf_DW_dfg_kernel(DW, e, g) + h, DW_f, DW_dfg = DW, e, g # Inplace replacements + # se = torch.nn.functional.sigmoid(e) + # f = e * se + # h = f * g + # df = se * (1 - f) + f + # DW_f = DW * f + # DW_dfg = DW * df * g + + # Down projection LoRA weights + d_downA = h.t() @ (dY @ downB.t()) + d_downB = (downA.t() @ h.t()) @ dY + d_downA *= downS + d_downB *= downS + + # Up projection LoRA weights + d_upA = X.t() @ (DW_f @ upB.t()) + d_upB = (upA.t() @ X.t()) @ DW_f + d_upA *= upS + d_upB *= upS + + # Gate projection LoRA weights + d_gateA = X.t() @ (DW_dfg @ gateB.t()) + d_gateB = (gateA.t() @ X.t() @ DW_dfg) + d_gateA *= gateS + d_gateB *= gateS + + # dC/dX = (D @ W.T * f) @ (U.T + B.T @ A.T) + # + (D @ W.T * df * g) @ (G.T + B.T @ A.T) + # (D @ W.T * f) @ U.T + upW = fast_dequantize(upW.t(), upW_quant) + # (D @ W.T * f) @ (U.T + B.T @ A.T) + dX = torch.matmul(DW_f, upW.t(), out = X) + del upW + dX += DW_f @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) + + # (D @ W.T * f) @ (U.T + B.T @ A.T) + (D @ W.T * df * g) @ G.T + gateW = fast_dequantize(gateW.t(), gateW_quant) + # (D @ W.T * f) @ (U.T + B.T @ A.T) + (D @ W.T * df * g) @ (G.T + B.T @ A.T) + dX += DW_dfg @ gateW.t() + del gateW + dX += DW_dfg @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) + + # gateW, gateW_quant, gateA, gateB, gateS, + # upW, upW_quant, upA, upB, upS, + # downW, downW_quant, downA, downB, downS, + return dX.view(batch, seq_len, hd), \ + None, None, d_gateA.t(), d_gateB.t(), None, \ + None, None, d_upA.t(), d_upB.t(), None, \ + None, None, d_downA.t(), d_downB.t(), None, + pass +pass + + +def apply_lora_mlp(self, X): + gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) + upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) + downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) + out = LoRA_MLP.apply(X, + gateW, gateW_quant, gateA, gateB, gateS, + upW, upW_quant, upA, upB, upS, + downW, downW_quant, downA, downB, downS) + return out +pass + + +class LoRA_QKV(torch.autograd.Function): + """ + ### LoRA weights + Wq = Wq + Aq @ Bq + Wk = Wk + Ak @ Bk + Wv = Wv + Av @ Bv + Q = X @ Wq = X @ Wq + X @ Aq @ Bq + K = X @ Wk = X @ Wk + X @ Ak @ Bk + V = X @ Wv = X @ Wv + X @ Av @ Bv + + ### Backpropagation chain rule + dC/dWq = X.T @ D(Wq) + dC/dWk = X.T @ D(Wk) + dC/dWv = X.T @ D(Wv) + dC/dX = D(Wq) @ Wq.T + + D(Wk) @ Wk.T + + D(Wv) @ Wv.T + + ### Q projection LoRA weights + dC/dAq = X.T @ D(Wq) @ B.T + dC/dBq = A.T @ X.T @ D(Wq) + + ### K projection LoRA weights + dC/dAk = X.T @ D(Wk) @ B.T + dC/dBk = A.T @ X.T @ D(Wk) + + ### V projection LoRA weights + dC/dAv = X.T @ D(Wv) @ B.T + dC/dBv = A.T @ X.T @ D(Wv) + """ + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, X : torch.Tensor, + QW, QW_quant, QA, QB, QS, + KW, KW_quant, KA, KB, KS, + VW, VW_quant, VA, VB, VS,): + dtype = X.dtype + + Q = matmul_lora(X, QW, QW_quant, QA, QB, QS) + K = matmul_lora(X, KW, KW_quant, KA, KB, KS) + V = matmul_lora(X, VW, VW_quant, VA, VB, VS) + + ctx.custom_saved_tensors = ( + QW, QW_quant, QS, + KW, KW_quant, KS, + VW, VW_quant, VS, + ) + ctx.save_for_backward(X, QA, QB, KA, KB, VA, VB,) + return Q, K, V + pass + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, dQ, dK, dV): + QW, QW_quant, QS, KW, KW_quant, KS, VW, VW_quant, VS = \ + ctx.custom_saved_tensors + X, QA, QB, KA, KB, VA, VB, = ctx.saved_tensors + + QA, QB, KA, KB, VA, VB = \ + QA.t(), QB.t(), KA.t(), KB.t(), VA.t(), VB.t() + + batch, seq_len, hd = X.shape + dQ = dQ.view(-1, dQ.shape[-1]) + dK = dK.reshape(-1, dK.shape[-1]) # view doesn't work on K.T + dV = dV.view(-1, dV.shape[-1]) + X = X .view(-1, X .shape[-1]) + dtype = X.dtype + + ### Weight projection LoRA weights + # dC/dAq = X.T @ D(Wq) @ B.T + # dC/dBq = A.T @ X.T @ D(Wq) + + # Q Projection + d_QA = X.t() @ (dQ @ QB.t()) + d_QB = (QA.t() @ X.t()) @ dQ + d_QA *= QS + d_QB *= QS + + # K Projection + d_KA = X.t() @ (dK @ KB.t()) + d_KB = (KA.t() @ X.t()) @ dK + d_KA *= KS + d_KB *= KS + + # V Projection + d_VA = X.t() @ (dV @ VB.t()) + d_VB = (VA.t() @ X.t()) @ dV + d_VA *= VS + d_VB *= VS + + # d/dX + # dC/dX = D(Wq) @ Wq.T + QW = fast_dequantize(QW.t(), QW_quant) + # D(Wq) @ (Wq.T + B.T @ A.T) + dX = torch.matmul(dQ, QW.t(), out = X) + del QW + dX += (dQ @ QB.to(dtype).t() @ (QS * QA.to(dtype).t())) + + # D(Wq) @ Wq.T + D(Wk) @ Wk.T + KW = fast_dequantize(KW.t(), KW_quant) + # D(Wq) @ Wq.T + D(Wk) @ (Wk.T + B.T @ A.T) + dX += dK @ KW.t() + del KW + dX += dK @ KB.to(dtype).t() @ (KS * KA.to(dtype).t()) + + # D(Wq) @ Wq.T + D(Wk) @ Wk.T + D(Wv) @ Wv.T + VW = fast_dequantize(VW.t(), VW_quant) + # D(Wq) @ Wq.T + D(Wk) @ Wk.T + D(Wv) @ (Wv.T + B.T @ A.T) + dX += dV @ VW.t() + del VW + dX += dV @ VB.to(dtype).t() @ (VS * VA.to(dtype).t()) + + # QW, QW_quant, QA, QB, QS, + # KW, KW_quant, KA, KB, KS, + # VW, VW_quant, VA, VB, VS, + return dX.view(batch, seq_len, hd), \ + None, None, d_QA.t(), d_QB.t(), None, \ + None, None, d_KA.t(), d_KB.t(), None, \ + None, None, d_VA.t(), d_VB.t(), None + pass +pass + + +def apply_lora_qkv(self, X): + QW, QW_quant, QA, QB, QS = get_lora_parameters(self.q_proj) + KW, KW_quant, KA, KB, KS = get_lora_parameters(self.k_proj) + VW, VW_quant, VA, VB, VS = get_lora_parameters(self.v_proj) + Q, K, V = LoRA_QKV.apply(X, + QW, QW_quant, QA, QB, QS, + KW, KW_quant, KA, KB, KS, + VW, VW_quant, VA, VB, VS, + ) + return Q, K, V +pass + + +class LoRA_W(torch.autograd.Function): + """ + ### LoRA weights + Wq = Wq + Aq @ Bq + Wk = Wk + Ak @ Bk + Wv = Wv + Av @ Bv + Q = X @ Wq = X @ Wq + X @ Aq @ Bq + K = X @ Wk = X @ Wk + X @ Ak @ Bk + V = X @ Wv = X @ Wv + X @ Av @ Bv + + ### Backpropagation chain rule + dC/dWq = X.T @ D(Wq) + dC/dWk = X.T @ D(Wk) + dC/dWv = X.T @ D(Wv) + dC/dX = D(Wq) @ Wq.T + + D(Wk) @ Wk.T + + D(Wv) @ Wv.T + + ### Q projection LoRA weights + dC/dAq = X.T @ D(Wq) @ B.T + dC/dBq = A.T @ X.T @ D(Wq) + + ### K projection LoRA weights + dC/dAk = X.T @ D(Wk) @ B.T + dC/dBk = A.T @ X.T @ D(Wk) + + ### V projection LoRA weights + dC/dAv = X.T @ D(Wv) @ B.T + dC/dBv = A.T @ X.T @ D(Wv) + """ + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, X : torch.Tensor, + W, W_quant, A, B, S): + dtype = X.dtype + XW = matmul_lora(X, W, W_quant, A, B, S) + ctx.custom_saved_tensors = (W, W_quant, S,) + ctx.save_for_backward(A, B, X) + return XW + pass + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, dY : torch.Tensor): + W, W_quant, S = ctx.custom_saved_tensors + A, B, X = ctx.saved_tensors + + A, B = A.t(), B.t() + + batch, seq_len, hd = X.shape + dY = dY.reshape(-1, dY.shape[-1]) # .view doesn't work on non contiguous + X = X .reshape(-1, X .shape[-1]) # .view doesn't work on non contiguous + dtype = X.dtype + + ### Weight projection LoRA weights + # dC/dAq = X.T @ D(Wq) @ B.T + # dC/dBq = A.T @ X.T @ D(Wq) + + # Weight projection + d_A = X.t() @ (dY @ B.t()) + d_B = (A.t() @ X.t()) @ dY + d_A *= S + d_B *= S + + # dC/dX = D(Wq) @ Wq.T + W = fast_dequantize(W.t(), W_quant) + dX = dY @ W.t() + del W + dX += dY @ B.to(dtype).t() @ (S * A.to(dtype).t()) + + # W, W_quant, A, B, S + return dX.view(batch, seq_len, hd), \ + None, None, d_A.t(), d_B.t(), None + pass +pass + + +def apply_lora_o(self, X): + OW, OW_quant, OA, OB, OS = get_lora_parameters(self.o_proj) + O = LoRA_W.apply(X, OW, OW_quant, OA, OB, OS) + return O +pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py new file mode 100644 index 0000000000..e8d6b36f15 --- /dev/null +++ b/unsloth/kernels/rms_layernorm.py @@ -0,0 +1,149 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.jit +def _rms_layernorm_forward( + Y, Y_row_stride, + X, X_row_stride, + W, W_row_stride, + r, r_row_stride, + n_cols, eps, + BLOCK_SIZE : tl.constexpr +): + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + Y += row_idx * Y_row_stride + X += row_idx * X_row_stride + r += row_idx * r_row_stride + + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + + row_var = tl.sum(X_row * X_row, axis = 0) / n_cols + inv_var = 1 / tl.sqrt(row_var + eps) + tl.store(r, inv_var) + normed = X_row * inv_var + output = normed * W_row + tl.store(Y + col_offsets, output, mask = mask) +pass + + +@triton.jit +def _rms_layernorm_backward( + #dX, dX_row_stride, + dY, dY_row_stride, + X, X_row_stride, + W, W_row_stride, + r, r_row_stride, + dW, dW_row_stride, + n_cols, eps, + BLOCK_SIZE : tl.constexpr, +): + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + #dX += row_idx * dX_row_stride + col_offsets + dY += row_idx * dY_row_stride + X += row_idx * X_row_stride + r += row_idx * r_row_stride + + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + + # row_var = tl.sum(X_row * X_row, axis = 0) / n_cols + # inv_var = 1 / tl.sqrt(row_var + eps) + inv_var = tl.load(r).to(tl.float32) + normed = X_row * inv_var + + dY_W = dY_row * W_row + rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) + output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) + #tl.store(dX, output, mask = mask) + tl.store(dY + col_offsets, output, mask = mask) +pass + + +class Fast_RMS_Layernorm(torch.autograd.Function): + @staticmethod + def forward(ctx, X, W, eps): + shape = X.shape + dim = shape[-1] + X = X.view(-1, dim) + n_rows, n_cols = X.shape + BLOCK_SIZE, num_warps = calculate_settings(n_cols) + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") + + r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + _rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + ctx.eps = eps + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps + ctx.save_for_backward(X, W, r) + return Y.view(*shape) + pass + + @staticmethod + def backward(ctx, dY): + shape = dY.shape + dim = shape[-1] + dY = dY.view(-1, dim) + X, W, r = ctx.saved_tensors + n_rows, n_cols = dY.shape + dW = X + + # dX = torch.empty_like(dY) + # dX = dY + _rms_layernorm_backward[(n_rows,)]( + #dX, dX.stride(0), + dY, dY.stride(0), + X, X .stride(0), + W, W .stride(0), + r, r .stride(0), + dW, dW.stride(0), + n_cols, ctx.eps, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) + #dX = dX.view(*shape) + dX = dY.view(*shape) + # X, W, eps + return dX, None, None + pass +pass + + +def fast_rms_layernorm(layernorm, X): + W = layernorm.weight + eps = layernorm.variance_epsilon + out = Fast_RMS_Layernorm.apply(X, W, eps) + return out +pass diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py new file mode 100644 index 0000000000..ee1b20f88f --- /dev/null +++ b/unsloth/kernels/rope_embedding.py @@ -0,0 +1,178 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.heuristics({"BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],}) +@triton.jit +def _rope_embedding( + Q, Q_row_stride, + cos, cos_row_stride, + sin, sin_row_stride, + seqlen, head_dim, + BACKWARD_PASS: tl.constexpr, + BLOCK_SIZE : tl.constexpr, +): + row_position = tl.program_id(0) + head_position = tl.program_id(1) + col_offsets = tl.arange(0, BLOCK_SIZE) + half_head_dim = head_dim // 2 + mask = col_offsets < half_head_dim + + # TODO: Fixup int32 locations to int64 + # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 + rot_position = row_position % seqlen + + Q += row_position* Q_row_stride + head_position*head_dim + cos += rot_position*cos_row_stride + sin += rot_position*sin_row_stride + + Q1 = tl.load(Q + half_head_dim*0 + col_offsets, mask = mask, other = 0) + sin1 = tl.load(sin + half_head_dim*0 + col_offsets, mask = mask, other = 0) + cos1 = tl.load(cos + half_head_dim*0 + col_offsets, mask = mask, other = 0) + + Q2 = tl.load(Q + half_head_dim*1 + col_offsets, mask = mask, other = 0) + # RoPE repeats sin and cos so 128 = [64, 64]. + # sin2 = tl.load(sin + half_head_dim*1, mask = mask, other = 0) + # cos2 = tl.load(cos + half_head_dim*1, mask = mask, other = 0) + + if BACKWARD_PASS: + """ + Q * cos + rotate_half(Q) * sin + is equivalent to + Q * cos + Q @ R * sin + where R is a rotation matrix [ 0, I] + [-I, 0] + dC/dY = dY * cos + dY @ R.T * sin + where R.T is again the same [ 0, -I] + but the minus is transposed. [ I, 0] + """ + # sin1, sin2 = -sin1, -sin2 + sin1 = -sin1 + + # tl.store(Q + half_head_dim*0, Q1*cos1 - Q2*sin1, mask = mask) + # tl.store(Q + half_head_dim*1, Q2*cos2 + Q1*sin2, mask = mask) + # RoPE repeats sin and cos so 128 = [64, 64]. + tl.store(Q + half_head_dim*0 + col_offsets, Q1*cos1 - Q2*sin1, mask = mask) + tl.store(Q + half_head_dim*1 + col_offsets, Q2*cos1 + Q1*sin1, mask = mask) +pass + + +class Fast_RoPE_Embedding(torch.autograd.Function): + @staticmethod + def forward(ctx, Q, cos, sin): + cos, sin = cos.squeeze(), sin.squeeze() + batch, seq_len, n_heads, head_dim = Q.shape + Q = Q.view(batch*seq_len, n_heads*head_dim) + n_rows, n_cols = Q.shape + assert(seq_len <= cos.shape[0]) + + # [TODO] Changing blocksize to head_dim//2 seems to have + # some concurrency / un-deterministic issues. + BLOCK_SIZE, num_warps = calculate_settings(head_dim) # (head_dim//2) + _rope_embedding[(n_rows, n_heads,)]( + Q, Q.stride(0), + cos, cos.stride(0), + sin, sin.stride(0), + seq_len, head_dim, + BACKWARD_PASS = False, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps + ctx.cos = cos # Don't need save_for_backward since a view + ctx.sin = sin + return Q.view(batch, seq_len, n_heads, head_dim) + pass + + @staticmethod + def backward(ctx, dY): + batch, seq_len, n_heads, head_dim = dY.shape + dY = dY.reshape(batch*seq_len, n_heads*head_dim) + # Cannot be .view since the problem lies with dK since + # K.T's strides are incorrect. + n_rows, n_cols = dY.shape + + cos = ctx.cos + sin = ctx.sin + + _rope_embedding[(n_rows, n_heads,)]( + dY, dY .stride(0), + cos, cos.stride(0), + sin, sin.stride(0), + seq_len, head_dim, + BACKWARD_PASS = True, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) + dY = dY.view(batch, seq_len, n_heads, head_dim) + return dY, None, None, + pass +pass + + +def fast_rope_embedding(Q, K, cos, sin): + # We need (batch, [seqlen, n_heads], head_dim) + Q = Fast_RoPE_Embedding.apply(Q.transpose(1, 2), cos, sin).transpose(1, 2) + K = Fast_RoPE_Embedding.apply(K.transpose(1, 2), cos, sin).transpose(1, 2) + # We need (batch, [n_heads, seqlen], head_dim) + return Q, K +pass + + +class Slow_RoPE_Embedding(torch.autograd.Function): + @staticmethod + def forward(ctx, Q, cos, sin, position_ids): + if position_ids is not None: + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + + # Q * cos + rotate_half(Q) * sin + half = Q.shape[-1]//2 + RH_Q = torch.cat((-Q[..., half:], Q[..., :half]), dim = -1) + Q *= cos + RH_Q *= sin + Q += RH_Q + ctx.save_for_backward(cos, sin) + return Q + pass + + @staticmethod + def backward(ctx, dY): + cos, sin = ctx.saved_tensors + # Q * cos + rotate_half.T(Q) * sin + half = dY.shape[-1]//2 + # We reverse the minus sign for R.T + RH_dY = torch.cat((dY[..., half:], -dY[..., :half]), dim = -1) + dY *= cos + RH_dY *= sin + dY += RH_dY + return dY, None, None, None + pass +pass + + +def inplace_rope_embedding(Q, K, cos, sin, position_ids): + Q = Slow_RoPE_Embedding.apply(Q, cos, sin, position_ids) + K = Slow_RoPE_Embedding.apply(K, cos, sin, position_ids) + return Q, K +pass diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py new file mode 100644 index 0000000000..63418a82b8 --- /dev/null +++ b/unsloth/kernels/swiglu.py @@ -0,0 +1,88 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.jit +def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) + + # f = e * sigmoid(e) + # https://github.com/openai/triton/issues/241 exp MUST be done in f32 + # or else Triton crashes + f_row = e_row / (1 + tl.exp(-e_row)) + # h = f * g + h_row = f_row * g_row + + tl.store(h + offsets, h_row, mask = mask) +pass + + +def swiglu_fg_kernel(e, g): + batch, seq_len, hd = e.shape + n_elements = e.numel() + h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda") + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,) + return h +pass + + +@triton.jit +def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + DW_row = tl.load(DW + offsets, mask = mask, other = 0).to(tl.float32) + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) + + # f = e * sigmoid(e) + # https://github.com/openai/triton/issues/241 exp MUST be done in f32 + # or else Triton crashes + se_row = 1 / (1 + tl.exp(-e_row)) + f_row = e_row * se_row + # h = f * g + h_row = f_row * g_row + # df = se * (1 - f) + f + # DW_f = DW * f + DWf_row = DW_row * f_row + # DW_dfg = DW * df * g + # DW_dfg = DW * (se * (1 - f) + f) * g + # DW_dfg = DW * (se*(g - h) + h) + DW_dfg_row = DW_row * (se_row*(g_row - h_row) + h_row) + + tl.store(DW + offsets, h_row, mask = mask) # h + tl.store(e + offsets, DWf_row, mask = mask) # DW * f + tl.store(g + offsets, DW_dfg_row, mask = mask) # DW * df * g +pass + + +def swiglu_DWf_DW_dfg_kernel(DW, e, g): + batch_seq_len, hd = e.shape + n_elements = e.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + return DW, e, g # h, DW * f, DW * df * g +pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py new file mode 100644 index 0000000000..df4b85f0b6 --- /dev/null +++ b/unsloth/kernels/utils.py @@ -0,0 +1,93 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +MAX_FUSED_SIZE = 65535 # 2**16 - 1 +next_power_of_2 = triton.next_power_of_2 + +def calculate_settings(n): + BLOCK_SIZE = next_power_of_2(n) + # CUDA only supports 65535 - 2^16-1 threads per block + if BLOCK_SIZE > MAX_FUSED_SIZE: + raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\ + f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") + num_warps = 4 + if BLOCK_SIZE >= 32768: num_warps = 32 + elif BLOCK_SIZE >= 8192: num_warps = 16 + elif BLOCK_SIZE >= 2048: num_warps = 8 + return BLOCK_SIZE, num_warps +pass + + +import bitsandbytes as bnb +get_ptr = bnb.functional.get_ptr +import ctypes +import torch +cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 +cdequantize_blockwise_fp16_nf4 = bnb.functional.lib.cdequantize_blockwise_fp16_nf4 +cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 + +def QUANT_STATE(W): + return getattr(W, "quant_state", None) +pass + +def fast_dequantize(W, quant_state = None, out = None): + if quant_state is None: return W + if type(quant_state) is not list: + # New quant_state as a class + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + # Old quant_state as a list of lists + absmax, shape, dtype, blocksize, compressed_stats, _, _ = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass + + # Create weight matrix + if out is None: + out = torch.empty(shape, dtype = dtype, device = "cuda") + else: + assert(out.shape == shape) + assert(out.dtype == dtype) + + # NF4 dequantization of statistics + n_elements_absmax = absmax.numel() + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda") + + # Do dequantization + ptr_out_absmax = get_ptr(out_absmax) + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, + ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax) + ) + out_absmax += offset + + fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ + cdequantize_blockwise_bf16_nf4 + fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), + ctypes.c_int(blocksize), ctypes.c_int(out.numel())) + + # Careful returning transposed data + is_transposed = (True if W.shape[0] == 1 else False) + return out.t() if is_transposed else out +pass diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py new file mode 100644 index 0000000000..46529ccab6 --- /dev/null +++ b/unsloth/models/__init__.py @@ -0,0 +1,44 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import os + +# Currently only supports 1 GPU, or else seg faults will occur. +reload_package = False +n_gpus = torch.cuda.device_count() +if n_gpus == 0: + raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") +elif n_gpus > 1: + if "CUDA_VISIBLE_DEVICES" in os.environ: + device = os.environ["CUDA_VISIBLE_DEVICES"] + if not device.isdigit(): + print(f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ + "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ + "We shall set it ourselves.") + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True + else: + print("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True +pass + +# Reload Pytorch with CUDA_VISIBLE_DEVICES +if reload_package: + import importlib + importlib.reload(torch) +pass + +from .llama import FastLlamaModel diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py new file mode 100644 index 0000000000..32d20aee5f --- /dev/null +++ b/unsloth/models/_utils.py @@ -0,0 +1,61 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from typing import Union, Optional, List, Any, Callable +import numpy as np +import warnings +import gc +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") +import bitsandbytes as bnb + + +def prepare_model_for_kbit_training( + model : Any, + use_gradient_checkpointing : bool = True, + use_reentrant : Optional[bool] = True, +) -> Any: + """ + Calculates where to place the gradient checkpoints given n_layers. + We also freeze all other layers's gradients + + Args: + model: Any LlamaModel with layers. + use_gradient_checkpointing (`bool`, *optional*): + Default enabled. Provides memory savings by not saving all activations, + but only some. + use_reentrant (`bool`, *optional*): + https://github.com/pytorch/pytorch/blob/main/torch/utils/checkpoint.py#L354 + Optimal gradient checkpointing algorithm which will be the default in + future Pytorch versions. + """ + + # Freeze all parameters + for param in model.parameters(): + param.requires_grad_(False) + + if use_gradient_checkpointing: + model.gradient_checkpointing_enable() + + # If use_reentrant = True which is the Pytorch default, we just make the input requires_grad. + if use_reentrant: + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + return model +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py new file mode 100644 index 0000000000..8ab451f7d5 --- /dev/null +++ b/unsloth/models/llama.py @@ -0,0 +1,734 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from typing import Optional, Tuple, List, Union +from torch.nn.functional import scaled_dot_product_attention +from transformers.models.llama.modeling_llama import ( + # apply_rotary_pos_emb, + # repeat_kv, + # _prepare_4d_causal_attention_mask, + logger, + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) +from ..kernels import * + +# Get Flash Attention v2 if Ampere (RTX 30xx, A100) +major_version, minor_version = torch.cuda.get_device_capability() +if major_version >= 8: + try: + from flash_attn import flash_attn_func + HAS_FLASH_ATTENTION = True + except: + HAS_FLASH_ATTENTION = False +else: + # Tri Dao's benchmark shows xformers is faster for now. + HAS_FLASH_ATTENTION = False +pass + +import xformers.ops.fmha as xformers +xformers_attention = xformers.memory_efficient_attention + +# Final patching code +from transformers.models.llama.modeling_llama import ( + LlamaAttention, + LlamaDecoderLayer, + LlamaModel, + LlamaForCausalLM, +) +from peft import PeftModelForCausalLM +import gc +import peft +import bitsandbytes as bnb +import numpy as np +import types + +from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig +from transformers import set_seed as transformers_set_seed +from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model +from ._utils import ( + prepare_model_for_kbit_training, +) + + +def original_apply_qkv(self, X): + Q = self.q_proj(X) + K = self.k_proj(X) + V = self.v_proj(X) + return Q, K, V +pass + + +def original_apply_o(self, X): + O = self.o_proj(X) + return O +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 +def LlamaAttention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + bsz, q_len, _ = hidden_states.size() + + # Q = self.q_proj(hidden_states) + # K = self.k_proj(hidden_states) + # V = self.v_proj(hidden_states) + Q, K, V = self.apply_qkv(self, hidden_states) + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + # cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + # Q, K = apply_rotary_pos_emb(Q, K, cos, sin, position_ids) + if position_ids is None: + cos = self.rotary_emb.cos_cached + sin = self.rotary_emb.sin_cached + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + pass + + if past_key_value is not None: + # reuse k, v, self_attention + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + past_key_value = (K, V) if use_cache else None + + # Attention module + # no_attention_mask = attention_mask is None + # Ignore attention_mask + + if (not HAS_FLASH_ATTENTION): #and no_attention_mask: + # Xformers memory efficient attention + # Also has Flash Attention v2 dispatching + # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + + # Grouped query attention + if n_groups != 1: + Q = Q.reshape(bsz, q_len, n_groups, n_kv_heads, head_dim) + + K = K.reshape(bsz, q_len, n_groups, 1, head_dim) + V = V.reshape(bsz, q_len, n_groups, 1, head_dim) + K = K .expand(bsz, q_len, n_groups, n_kv_heads, head_dim) + V = V .expand(bsz, q_len, n_groups, n_kv_heads, head_dim) + pass + + A = xformers_attention(Q, K, V, attn_bias = causal_mask) + A = A.view(bsz, q_len, n_heads, head_dim) + + elif HAS_FLASH_ATTENTION:# and no_attention_mask: + # Flash Attention + # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + + # Flash Attention v2 auto supports grouped query attention + A = flash_attn_func(Q, K, V, causal = True) + + else: + # Uses Pytorch's scaled dot product attention + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + pass + + # Grouped query attention + # K = repeat_kv(K, n_groups) + # V = repeat_kv(V, n_groups) + if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + pass + + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = attention_mask is None) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2) + pass + attn_output = A.reshape(bsz, q_len, self.hidden_size) + + # attn_output = self.o_proj(attn_output) + attn_output = self.apply_o(self, attn_output) + + attn_weights = None + return attn_output, attn_weights, past_key_value +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 +def LlamaDecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + residual = hidden_states + + # hidden_states = self.input_layernorm(hidden_states) + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + # hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +def LlamaModel_fast_forward( + self, + input_ids: torch.LongTensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + *args, **kwargs, +) -> Union[Tuple, BaseModelOutputWithPast]: + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + assert(output_attentions is False) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + # We already handle KV cache position_ids ourselves. + if (past_key_values_length != 0): + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, + dtype = torch.int32,#dtype=torch.long, + device = "cuda", + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + elif position_ids is not None: + position_ids = position_ids.view(-1, seq_length).to(torch.int32)#.long() + else: + position_ids = None + + if position_ids is not None: + if position_ids.shape[0] != batch_size: + position_ids = position_ids.repeat((batch_size, 1)) + + # embed positions + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + # Ignore attention_mask + if True: + # if attention_mask is None: + # attention_mask = torch.ones( + # (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + # ) + padding_mask = None + else: + if 0 in attention_mask: + padding_mask = attention_mask + else: + padding_mask = None + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + pass + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + pass + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + causal_mask, + attention_mask, + position_ids, + use_reentrant=True, + preserve_rng_state=False, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + pass + + # hidden_states = self.norm(hidden_states) + hidden_states = fast_rms_layernorm(self.norm, hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) +pass + + +def LlamaForCausalLM_fast_forward( + self, + input_ids: torch.LongTensor = None, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + *args, **kwargs, +) -> Union[Tuple, CausalLMOutputWithPast]: + + if causal_mask is None: + causal_mask = xformers.attn_bias.LowerTriangularMask() + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # logits = logits.float() + # shift_logits = logits[..., :-1, :].contiguous() + # shift_labels = labels[..., 1:].contiguous() + # shift_labels = shift_labels.view(-1) + # shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_logits = logits + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + + # loss_fct = torch.nn.CrossEntropyLoss( + # ignore_index = self.ignore_index, + # label_smoothing = self.label_smoothing, + # ) + # loss = loss_fct(shift_logits, shift_labels) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + ) + pass + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) +pass + + +def PeftModelForCausalLM_fast_forward( + self, + input_ids=None, + causal_mask=None, + attention_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + task_ids=None, + **kwargs, +): + return self.base_model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=labels, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs, + ) +pass + + +class FastLlamaModel: + + @staticmethod + def pre_patch(): + LlamaAttention .forward = LlamaAttention_fast_forward + LlamaDecoderLayer .forward = LlamaDecoderLayer_fast_forward + LlamaModel .forward = LlamaModel_fast_forward + LlamaForCausalLM .forward = LlamaForCausalLM_fast_forward + PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + return + pass + + + @staticmethod + def from_pretrained( + model_name = "meta-llama/Llama-2-7b-hf", + max_seq_length = 4096, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + ): + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() + + statistics = \ + "==((====))== Unsloth: Fast Llama patching release 23.11\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ + f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ + f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ + f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' + print(statistics) + + FastLlamaModel.pre_patch() + + if dtype is None: + dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 + elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: + logger.warning_once("Device does not support bfloat16. Will change to float16.") + dtype = torch.float16 + + assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + + # [TODO]: Determine RoPE scaling + # https://github.com/huggingface/transformers/pull/24653 + assert(max_seq_length <= 4096) + + bnb_config = None + if load_in_4bit: + bnb_config = BitsAndBytesConfig( + load_in_4bit = True, + bnb_4bit_use_double_quant = True, + bnb_4bit_quant_type = "nf4", + bnb_4bit_compute_dtype = dtype, + ) + + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_name, + model_max_length = max_seq_length, + padding_side = "right", + token = token, + ) + tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}); + tokenizer.pad_token = tokenizer.unk_token + config = model.config.update({"pad_token_id" : tokenizer.unk_token_id}); + + model = FastLlamaModel.post_patch(model) + + # Patch up QKV / O and MLP + for idx, layer in enumerate(model.model.layers): + layer.self_attn.apply_qkv = original_apply_qkv + layer.self_attn.apply_o = original_apply_o + pass + return model, tokenizer + pass + + + @staticmethod + def post_patch(model): + # Patch model + layers = model.model.layers + + # Torch.compile fails on embedding matrix?? + # Workaround randomnly fixes it for torch versions < 2.2 + model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + + # We also do this for the lm_head + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.lm_head.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (bnb.nn.Linear4bit, peft.tuners.lora.Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + pass + + # Clear deleted GPU items + gc.collect() + torch.cuda.empty_cache() + return model + pass + + + @staticmethod + def get_peft_model( + model, + r = 16, + target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj"], + lora_alpha = 16, + lora_dropout = 0, + bias = "none", + layers_to_transform = None, + use_gradient_checkpointing = True, + random_state = 3407, + max_seq_length = 2048, + ): + if lora_dropout != 0: + raise TypeError("Unsloth: Fast Llama patching only works with dropout = 0.") + if bias != "none": + raise TypeError("Unsloth: Fast Llama patching only works with bias = 'none'.") + + transformers_set_seed(random_state) + + accepted_modules = frozenset(("q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj",),) + for module in target_modules: + assert(module in accepted_modules) + pass + + # Get LoRA + lora_config = LoraConfig( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = 0, + bias = "none", + task_type = TaskType.CAUSAL_LM, + layers_to_transform = layers_to_transform, + ) + + model = prepare_model_for_kbit_training( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + use_reentrant = True, + ) + model = _get_peft_model(model, lora_config) + + # Do patching + for idx, layer in enumerate(model.model.model.layers): + + # MLP patching + if hasattr(layer.mlp.gate_proj, "lora_A") and \ + hasattr(layer.mlp. up_proj, "lora_A") and \ + hasattr(layer.mlp.down_proj, "lora_A"): + + # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module + layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) + pass + + # QKV attention patching + if hasattr(layer.self_attn.q_proj, "lora_A") and \ + hasattr(layer.self_attn.k_proj, "lora_A") and \ + hasattr(layer.self_attn.v_proj, "lora_A"): + + layer.self_attn.apply_qkv = apply_lora_qkv + pass + + # O attention patching + if hasattr(layer.self_attn.o_proj, "lora_A"): + + layer.self_attn.apply_o = apply_lora_o + pass + pass + + # Patch cross entropy loss labels + model.model.extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") + + return model + pass +pass From 548c5187cd72a42531fcb8a703482b3f589aebe0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 30 Nov 2023 04:18:16 +1100 Subject: [PATCH 0003/1088] Update README.md --- README.md | 61 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 22e0400d1a..57a2666bde 100644 --- a/README.md +++ b/README.md @@ -1,34 +1,45 @@ # Unsloth 2x faster 50% less memory LLM finetuning on a single GPU. -`!pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git"` -`!pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git"` +# Installation Instructions +Unsloth currently only supports Linux* and Pytorch >= 2.1. +1. Find your CUDA version via +``` +import torch; torch.version.cuda +``` +2. For CUDA 11.8: +``` +pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" +``` +3. For CUDA 12.1: +``` +pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" +``` + +To update Pytorch to 2.1: +``` +conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ + -c pytorch -c nvidia -c xformers -c conda-forge -y +``` +or +``` +pip install --upgrade --force-reinstall --no-cache-dir torch triton \ + --index-url https://download.pytorch.org/whl/cu121 +``` +Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. -### Google Colab examples -1. [Unsloth fast finetuning example](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) -2. [Original slow finetuning example](https://colab.research.google.com/drive/1c7zxdLHaLJ9R9YTZ74y4tUERvS-kySyA?usp=sharing) +Then install Unsloth. -### Installation instructions -In Google Colab: +For Google Colab and Kaggle instances: +1. Try our Colab example: +2. Try our Kaggle example: + +# Future Milestones + +# Troubleshooting +1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: ``` !ldconfig /usr/lib64-nvidia -!pip install xformers --index-url https://download.pytorch.org/whl/cu118 -!pip install git+https://github.com/danielhanchen/unsloth.git ``` -`!ldconfig /usr/lib64-nvidia` is necessary (for now) to link CUDA with Python. Possibly a Google Colab linking bug. - -For general installations: -1. Install Xformers *OR* Flash Attention. Choose 1. Old GPUs use Xformers. New use Flash Attention. -2. For Xformers, find your Pytorch CUDA version via `torch.version.cuda` or `nvidia-smi`. - * If you have Conda, `conda install xformers -c xformers` - * If you have CUDA 11.8, `pip install xformers --index-url https://download.pytorch.org/whl/cu118` - * If you have CUDA 12.1, `pip install xformers --index-url https://download.pytorch.org/whl/cu121` - * Go to https://github.com/facebookresearch/xformers for other issues. - * You must have Pytorch 2.1 installed for Xformers. If not, try Flash Attention. - * Xformers supports all GPUs (Tesla T4 etc). -3. For Flash Attention, you must have a Ampere, Ada, Hopper GPU (A100, RTX 3090, RTX 4090, H100). - * Install Flash Attention via `pip uninstall -y ninja && pip install ninja` then `pip install flash-attn --no-build-isolation`. - * Xformers has native support for Flash Attention, so technically installing Xformers is enough. -4. Then install Unsloth: - `pip install git+https://github.com/danielhanchen/unsloth.git` +2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. From 18cc4441c30aa696acf0b53dc8828115679ecbd5 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:50:44 +1100 Subject: [PATCH 0004/1088] requirements --- requirements_cu118.txt | 2 ++ requirements_cu121.txt | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 requirements_cu118.txt create mode 100644 requirements_cu121.txt diff --git a/requirements_cu118.txt b/requirements_cu118.txt new file mode 100644 index 0000000000..f6be7e336d --- /dev/null +++ b/requirements_cu118.txt @@ -0,0 +1,2 @@ +--index-url https://download.pytorch.org/whl/cu118 +xformers \ No newline at end of file diff --git a/requirements_cu121.txt b/requirements_cu121.txt new file mode 100644 index 0000000000..7f5ca02dc9 --- /dev/null +++ b/requirements_cu121.txt @@ -0,0 +1,2 @@ +--index-url https://download.pytorch.org/whl/cu121 +xformers \ No newline at end of file From 0b14f48b88e9f91ce06d40fa9b605f439a2580a3 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:51:13 +1100 Subject: [PATCH 0005/1088] Update pyproject.toml --- pyproject.toml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0bfcfe9b97..34338e04a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "unsloth" -version = "2023.11" +dynamic = ["version"] description = "2X faster LLM finetuning" readme = "README.md" requires-python = ">=3.9" @@ -30,20 +30,14 @@ dependencies = [ "accelerate", "trl", "peft", - "torch>=2.1.0", ] +[tool.setuptools.dynamic] +version = {attr = "unsloth.__version__"} + [project.optional-dependencies] -cu118 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", -] -cu121 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", -] +cu118 = {file = ["requirements_cu118.txt"]} +cu118 = {file = ["requirements_cu121.txt"]} [project.urls] homepage = "http://www.unsloth.ai" From 47b0964b0cfececb4d5ccd55775b5f9627d9f430 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:52:05 +1100 Subject: [PATCH 0006/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 34338e04a0..eb3fcde9bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ version = {attr = "unsloth.__version__"} [project.optional-dependencies] cu118 = {file = ["requirements_cu118.txt"]} -cu118 = {file = ["requirements_cu121.txt"]} +cu121 = {file = ["requirements_cu121.txt"]} [project.urls] homepage = "http://www.unsloth.ai" From 1a505d31b0022c7bfc9dcdf93924c626bfd6a01a Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:54:10 +1100 Subject: [PATCH 0007/1088] Update pyproject.toml --- pyproject.toml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index eb3fcde9bb..562ee206a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,14 +30,23 @@ dependencies = [ "accelerate", "trl", "peft", + "torch>=2.1.0", ] [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} [project.optional-dependencies] -cu118 = {file = ["requirements_cu118.txt"]} -cu121 = {file = ["requirements_cu121.txt"]} +cu118 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", +] +cu121 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", +] [project.urls] homepage = "http://www.unsloth.ai" From bac8c4fef6e8a000c2f7fa6f1739f473eebafe53 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:55:38 +1100 Subject: [PATCH 0008/1088] Update pyproject.toml --- pyproject.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 562ee206a7..ea192c3df8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,14 +38,14 @@ version = {attr = "unsloth.__version__"} [project.optional-dependencies] cu118 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] [project.urls] From 54d6cbd2289bdc9d07cd78f8f474859498447a06 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 16:59:31 +1100 Subject: [PATCH 0009/1088] dependencies --- requirements_cu118.txt => cu118.txt | 0 requirements_cu121.txt => cu121.txt | 0 pyproject.toml | 12 ------------ 3 files changed, 12 deletions(-) rename requirements_cu118.txt => cu118.txt (100%) rename requirements_cu121.txt => cu121.txt (100%) diff --git a/requirements_cu118.txt b/cu118.txt similarity index 100% rename from requirements_cu118.txt rename to cu118.txt diff --git a/requirements_cu121.txt b/cu121.txt similarity index 100% rename from requirements_cu121.txt rename to cu121.txt diff --git a/pyproject.toml b/pyproject.toml index ea192c3df8..3e94238153 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,18 +36,6 @@ dependencies = [ [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} -[project.optional-dependencies] -cu118 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", -] -cu121 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", -] - [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" From 94843dd2a9445137716269137ff6e6904776ea35 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:04:20 +1100 Subject: [PATCH 0010/1088] requirements --- pyproject.toml | 14 ++------------ requirements.txt | 7 +++++++ 2 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 requirements.txt diff --git a/pyproject.toml b/pyproject.toml index 3e94238153..e61afb39c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "unsloth" -dynamic = ["version"] +dynamic = ["version", "dependencies"] description = "2X faster LLM finetuning" readme = "README.md" requires-python = ">=3.9" @@ -22,19 +22,9 @@ classifiers = [ "Programming Language :: Python", ] -dependencies = [ - "transformers", - "bitsandbytes", - "datasets", - "sentencepiece", - "accelerate", - "trl", - "peft", - "torch>=2.1.0", -] - [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} +dependencies = {file = ["requirements.txt"]} [project.urls] homepage = "http://www.unsloth.ai" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..9503660f0c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +transformers +bitsandbytes +datasets +sentencepiece +accelerate +trl +peft \ No newline at end of file From fff760cdc426f148c791c0a7c4bfc459416a23f2 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:05:49 +1100 Subject: [PATCH 0011/1088] requirements --- pyproject.toml | 4 ++++ cu118.txt => requirements_cu118.txt | 0 cu121.txt => requirements_cu121.txt | 0 3 files changed, 4 insertions(+) rename cu118.txt => requirements_cu118.txt (100%) rename cu121.txt => requirements_cu121.txt (100%) diff --git a/pyproject.toml b/pyproject.toml index e61afb39c1..43089c6c4e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,10 @@ classifiers = [ [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} dependencies = {file = ["requirements.txt"]} +optional-dependencies = { + cu118 = { file = ["requirements_cu118.txt"] }, + cu121 = { file = ["requirements_cu121.txt"] }, +} [project.urls] homepage = "http://www.unsloth.ai" diff --git a/cu118.txt b/requirements_cu118.txt similarity index 100% rename from cu118.txt rename to requirements_cu118.txt diff --git a/cu121.txt b/requirements_cu121.txt similarity index 100% rename from cu121.txt rename to requirements_cu121.txt From 67f711d289a319ee4114448c069324c09efb5db6 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:06:51 +1100 Subject: [PATCH 0012/1088] Update pyproject.toml --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 43089c6c4e..09208daed0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,10 @@ classifiers = [ [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} dependencies = {file = ["requirements.txt"]} -optional-dependencies = { - cu118 = { file = ["requirements_cu118.txt"] }, - cu121 = { file = ["requirements_cu121.txt"] }, -} +optional-dependencies = [ + {cu118 = { file = ["requirements_cu118.txt"] }}, + {cu121 = { file = ["requirements_cu121.txt"] }}, +] [project.urls] homepage = "http://www.unsloth.ai" From dbfb5e70d33d96465c96ba59e4f62efd98dbdd64 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:08:40 +1100 Subject: [PATCH 0013/1088] Update pyproject.toml --- pyproject.toml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 09208daed0..84ad6cb509 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,7 @@ classifiers = [ [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} dependencies = {file = ["requirements.txt"]} -optional-dependencies = [ - {cu118 = { file = ["requirements_cu118.txt"] }}, - {cu121 = { file = ["requirements_cu121.txt"] }}, -] +optional-dependencies = {cu118 = { file = ["requirements_cu118.txt"] }} [project.urls] homepage = "http://www.unsloth.ai" From 8e3f94fe1b462e485a8fbbae13a7701f8c494d6d Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:10:10 +1100 Subject: [PATCH 0014/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 84ad6cb509..337c56b6d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "unsloth" -dynamic = ["version", "dependencies"] +dynamic = ["version", "dependencies", "optional-dependencies"] description = "2X faster LLM finetuning" readme = "README.md" requires-python = ">=3.9" From 68af199a9660072b5f75aa5cb791575018fdf924 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:11:30 +1100 Subject: [PATCH 0015/1088] Update pyproject.toml --- pyproject.toml | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 337c56b6d4..4e16ab067e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "unsloth" -dynamic = ["version", "dependencies", "optional-dependencies"] +dynamic = ["version"] description = "2X faster LLM finetuning" readme = "README.md" requires-python = ">=3.9" @@ -22,10 +22,30 @@ classifiers = [ "Programming Language :: Python", ] +dependencies = [ + "transformers", + "bitsandbytes", + "datasets", + "sentencepiece", + "accelerate", + "trl", + "peft", +] + [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} -dependencies = {file = ["requirements.txt"]} -optional-dependencies = {cu118 = { file = ["requirements_cu118.txt"] }} + +[project.optional-dependencies] +cu118 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu121 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] [project.urls] homepage = "http://www.unsloth.ai" From ff84ba20490e15451e64321ade01a19990b5f54d Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:11:40 +1100 Subject: [PATCH 0016/1088] requirements --- requirements.txt | 7 ------- requirements_cu118.txt | 2 -- requirements_cu121.txt | 2 -- 3 files changed, 11 deletions(-) delete mode 100644 requirements.txt delete mode 100644 requirements_cu118.txt delete mode 100644 requirements_cu121.txt diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 9503660f0c..0000000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -transformers -bitsandbytes -datasets -sentencepiece -accelerate -trl -peft \ No newline at end of file diff --git a/requirements_cu118.txt b/requirements_cu118.txt deleted file mode 100644 index f6be7e336d..0000000000 --- a/requirements_cu118.txt +++ /dev/null @@ -1,2 +0,0 @@ ---index-url https://download.pytorch.org/whl/cu118 -xformers \ No newline at end of file diff --git a/requirements_cu121.txt b/requirements_cu121.txt deleted file mode 100644 index 7f5ca02dc9..0000000000 --- a/requirements_cu121.txt +++ /dev/null @@ -1,2 +0,0 @@ ---index-url https://download.pytorch.org/whl/cu121 -xformers \ No newline at end of file From 7166dce0ae5a7fdbd973ecb6c122a110c83d4d63 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:21:29 +1100 Subject: [PATCH 0017/1088] Torch 2.1 --- unsloth/__init__.py | 39 ++++++++++++++++++++++++++++++++++++++ unsloth/models/__init__.py | 29 ---------------------------- 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c7f3ea94cf..63689802d9 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -12,5 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. __version__ = "2023.11" +import os +try: + import torch +except: + raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ + "We have some installation instructions on our Github page.") + +# We only support torch 2.1 +major_torch, minor_torch, _ = torch.__version__.split(".") +major_torch, minor_torch = int(major_torch), int(minor_torch) +if (major_torch != 2) or (major_torch == 2 and minor_torch < 1): + raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ + "We have some installation instructions on our Github page.") + +# Currently only supports 1 GPU, or else seg faults will occur. +reload_package = False +n_gpus = torch.cuda.device_count() +if n_gpus == 0: + raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") +elif n_gpus > 1: + if "CUDA_VISIBLE_DEVICES" in os.environ: + device = os.environ["CUDA_VISIBLE_DEVICES"] + if not device.isdigit(): + print(f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ + "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ + "We shall set it ourselves.") + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True + else: + print("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True +pass + +# Reload Pytorch with CUDA_VISIBLE_DEVICES +if reload_package: + import importlib + importlib.reload(torch) +pass from .models import * diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 46529ccab6..4df2e937e4 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,33 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -import torch -import os - -# Currently only supports 1 GPU, or else seg faults will occur. -reload_package = False -n_gpus = torch.cuda.device_count() -if n_gpus == 0: - raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") -elif n_gpus > 1: - if "CUDA_VISIBLE_DEVICES" in os.environ: - device = os.environ["CUDA_VISIBLE_DEVICES"] - if not device.isdigit(): - print(f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ - "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ - "We shall set it ourselves.") - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True - else: - print("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True -pass - -# Reload Pytorch with CUDA_VISIBLE_DEVICES -if reload_package: - import importlib - importlib.reload(torch) -pass - from .llama import FastLlamaModel From 47569be1d66b7d0f583accd4fa3ff3db299a57bc Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:27:48 +1100 Subject: [PATCH 0018/1088] Update __init__.py --- unsloth/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 63689802d9..baf3919590 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -52,4 +52,11 @@ importlib.reload(torch) pass +# Try loading bitsandbytes +import bitsandbytes as bnb +try: + cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 +except: + raise ImportError("CUDA is not linked properly. Try running `ldconfig /usr/lib64-nvidia` first.") + from .models import * From f9b26edcca3e3add45d21c2cafb3f77717bab253 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 17:30:10 +1100 Subject: [PATCH 0019/1088] Update __init__.py --- unsloth/__init__.py | 51 ++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index baf3919590..c77ae60ce7 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -13,50 +13,57 @@ # limitations under the License. __version__ = "2023.11" import os +import warnings +import importlib + try: - import torch + import torch except: - raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ - "We have some installation instructions on our Github page.") + raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ + "We have some installation instructions on our Github page.") # We only support torch 2.1 major_torch, minor_torch, _ = torch.__version__.split(".") major_torch, minor_torch = int(major_torch), int(minor_torch) if (major_torch != 2) or (major_torch == 2 and minor_torch < 1): - raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ - "We have some installation instructions on our Github page.") + raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ + "We have some installation instructions on our Github page.") # Currently only supports 1 GPU, or else seg faults will occur. reload_package = False n_gpus = torch.cuda.device_count() if n_gpus == 0: - raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") + raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") elif n_gpus > 1: - if "CUDA_VISIBLE_DEVICES" in os.environ: - device = os.environ["CUDA_VISIBLE_DEVICES"] - if not device.isdigit(): - print(f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ - "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ - "We shall set it ourselves.") - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True - else: - print("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True + if "CUDA_VISIBLE_DEVICES" in os.environ: + device = os.environ["CUDA_VISIBLE_DEVICES"] + if not device.isdigit(): + warnings.warn( + f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ + "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ + "We shall set it ourselves." + ) + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True + else: + warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + reload_package = True pass # Reload Pytorch with CUDA_VISIBLE_DEVICES if reload_package: - import importlib - importlib.reload(torch) + importlib.reload(torch) pass # Try loading bitsandbytes import bitsandbytes as bnb try: - cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 + cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 except: - raise ImportError("CUDA is not linked properly. Try running `ldconfig /usr/lib64-nvidia` first.") + warnings.warn("CUDA is not linked properly. We shall run `ldconfig /usr/lib64-nvidia` to try to fix it.") + os.system("ldconfig /usr/lib64-nvidia") + importlib.reload(bnb) +pass from .models import * From 169608b319dd83b15af3d9ea3705db53adbacf04 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 19:03:51 +1100 Subject: [PATCH 0020/1088] Update __init__.py --- unsloth/__init__.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c77ae60ce7..8ed9c64e11 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -56,14 +56,26 @@ importlib.reload(torch) pass -# Try loading bitsandbytes +# Try loading bitsandbytes and triton import bitsandbytes as bnb +import triton +from triton.common.build import libcuda_dirs try: cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 + libcuda_dirs() except: warnings.warn("CUDA is not linked properly. We shall run `ldconfig /usr/lib64-nvidia` to try to fix it.") os.system("ldconfig /usr/lib64-nvidia") importlib.reload(bnb) + importlib.reload(triton) + try: + import bitsandbytes as bnb + from triton.common.build import libcuda_dirs + cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 + libcuda_dirs() + except: + raise ImportError("CUDA is not linked properly. We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ + "You need to run in your terminal `ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.") pass from .models import * From 6552b882096cc8e070fca9b13ee74f8e2a4b58c4 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 19:15:09 +1100 Subject: [PATCH 0021/1088] Update __init__.py --- unsloth/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 8ed9c64e11..c798bea2ff 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -64,7 +64,10 @@ cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: - warnings.warn("CUDA is not linked properly. We shall run `ldconfig /usr/lib64-nvidia` to try to fix it.") + warnings.warn( + "CUDA is not linked properly.\n"\ + "We shall run `ldconfig /usr/lib64-nvidia` to try to fix it." + ) os.system("ldconfig /usr/lib64-nvidia") importlib.reload(bnb) importlib.reload(triton) @@ -74,7 +77,8 @@ cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: - raise ImportError("CUDA is not linked properly. We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ + raise ImportError("CUDA is not linked properly.\n"\ + "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ "You need to run in your terminal `ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.") pass From 7c400adc03827baf386427a542d9bfbe5c571989 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 19:57:31 +1100 Subject: [PATCH 0022/1088] Update __init__.py --- unsloth/__init__.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c798bea2ff..cabebb7330 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -44,16 +44,9 @@ "We shall set it ourselves." ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True else: warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") os.environ["CUDA_VISIBLE_DEVICES"] = "0" - reload_package = True -pass - -# Reload Pytorch with CUDA_VISIBLE_DEVICES -if reload_package: - importlib.reload(torch) pass # Try loading bitsandbytes and triton From 21e83552e1d086994bfd7b484d0e51b1f916c852 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 20:20:45 +1100 Subject: [PATCH 0023/1088] Update pyproject.toml --- pyproject.toml | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4e16ab067e..380c61b7a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,26 +22,41 @@ classifiers = [ "Programming Language :: Python", ] -dependencies = [ - "transformers", - "bitsandbytes", +[tool.setuptools.dynamic] +version = {attr = "unsloth.__version__"} + +[project.optional-dependencies] +kaggle = [ + "transformers", "datasets", "sentencepiece", "accelerate", "trl", "peft", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] - -[tool.setuptools.dynamic] -version = {attr = "unsloth.__version__"} - -[project.optional-dependencies] cu118 = [ + "transformers", + "bitsandbytes", + "datasets", + "sentencepiece", + "accelerate", + "trl", + "peft", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121 = [ + "transformers", + "bitsandbytes", + "datasets", + "sentencepiece", + "accelerate", + "trl", + "peft", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", From 6c075d171ef1970358e6a0a629b18b58f283e74b Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 20:23:48 +1100 Subject: [PATCH 0024/1088] Update pyproject.toml --- pyproject.toml | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 380c61b7a1..d18e401408 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,41 +26,38 @@ classifiers = [ version = {attr = "unsloth.__version__"} [project.optional-dependencies] -kaggle = [ +huggingface = [ "transformers", "datasets", "sentencepiece", "accelerate", "trl", "peft", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] -cu118 = [ - "transformers", - "bitsandbytes", - "datasets", - "sentencepiece", - "accelerate", - "trl", - "peft", +cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] -cu121 = [ - "transformers", - "bitsandbytes", - "datasets", - "sentencepiece", - "accelerate", - "trl", - "peft", +cu121only = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] +cu118 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only]", +] +cu121 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only]", +] +kaggle = [ + "unsloth[huggingface]", + "unsloth[cu118only]", +] [project.urls] homepage = "http://www.unsloth.ai" From 0349faa87a317253f67f441653edebe7162ec079 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 20:24:40 +1100 Subject: [PATCH 0025/1088] Update pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index d18e401408..14a6c2734e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,9 @@ kaggle = [ "unsloth[huggingface]", "unsloth[cu118only]", ] +colab = [ + "unsloth[cu118]", +] [project.urls] homepage = "http://www.unsloth.ai" From e1d02652d4b17b1a932020fa79b87aeef2093314 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 20:45:04 +1100 Subject: [PATCH 0026/1088] Update pyproject.toml --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 14a6c2734e..cdfa3caffa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,7 +56,6 @@ cu121 = [ ] kaggle = [ "unsloth[huggingface]", - "unsloth[cu118only]", ] colab = [ "unsloth[cu118]", From 581235ba36ea4c45f1483f6f2b7654b3ba8a6014 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 23:07:19 +1100 Subject: [PATCH 0027/1088] Update fast_lora.py --- unsloth/kernels/fast_lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 2e70444653..dcc887747f 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -43,7 +43,7 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): out = torch.matmul(X, W, out = out) if W_quant is not None: del W - out += (X @ A) @ (s * B) + out += (X @ A.to(dtype)) @ (s * B.to(dtype)) return out.view(batch, seq_len, -1) if reshape else out pass From 03766c243d2404769f13e6efc4b2ff19b365cbd6 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 23:17:41 +1100 Subject: [PATCH 0028/1088] Update __init__.py --- unsloth/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index cabebb7330..bded2adc72 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -43,9 +43,18 @@ "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ "We shall set it ourselves." ) + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" + elif "CUDA_DEVICE_ORDER" not in os.environ: + warnings.warn( + f"Unsloth: 'CUDA_DEVICE_ORDER' is not set "\ + "but we require 'CUDA_DEVICE_ORDER=PCI_BUS_ID'\n"\ + "We shall set it ourselves." + ) + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" else: warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" pass From 3197ea0cbee196f1550d3a1d7d786b7e40ce1185 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 30 Nov 2023 23:30:38 +1100 Subject: [PATCH 0029/1088] Update __init__.py --- unsloth/__init__.py | 52 +++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index bded2adc72..100625fac4 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -16,6 +16,30 @@ import warnings import importlib +# Currently only supports 1 GPU, or else seg faults will occur. +if "CUDA_VISIBLE_DEVICES" in os.environ: + device = os.environ["CUDA_VISIBLE_DEVICES"] + if not device.isdigit(): + warnings.warn( + f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ + "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ + "We shall set it ourselves." + ) + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + elif "CUDA_DEVICE_ORDER" not in os.environ: + warnings.warn( + f"Unsloth: 'CUDA_DEVICE_ORDER' is not set "\ + "but we require 'CUDA_DEVICE_ORDER=PCI_BUS_ID'\n"\ + "We shall set it ourselves." + ) + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +else: + warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = "0" +pass + try: import torch except: @@ -29,34 +53,6 @@ raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ "We have some installation instructions on our Github page.") -# Currently only supports 1 GPU, or else seg faults will occur. -reload_package = False -n_gpus = torch.cuda.device_count() -if n_gpus == 0: - raise RuntimeError("Unsloth: Requires at least 1 GPU. Found 0.") -elif n_gpus > 1: - if "CUDA_VISIBLE_DEVICES" in os.environ: - device = os.environ["CUDA_VISIBLE_DEVICES"] - if not device.isdigit(): - warnings.warn( - f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ - "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ - "We shall set it ourselves." - ) - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - elif "CUDA_DEVICE_ORDER" not in os.environ: - warnings.warn( - f"Unsloth: 'CUDA_DEVICE_ORDER' is not set "\ - "but we require 'CUDA_DEVICE_ORDER=PCI_BUS_ID'\n"\ - "We shall set it ourselves." - ) - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - else: - warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "0" -pass # Try loading bitsandbytes and triton import bitsandbytes as bnb From 705b67c0ad7cc5934c735c89a8cc6007d309e2e6 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 1 Dec 2023 00:56:06 +1100 Subject: [PATCH 0030/1088] images --- images/LAION 2GPU.svg | 1518 ++++++++++++++++++++++++++++++++++++++ images/SlimOrca 1GPU.svg | 1424 +++++++++++++++++++++++++++++++++++ 2 files changed, 2942 insertions(+) create mode 100644 images/LAION 2GPU.svg create mode 100644 images/SlimOrca 1GPU.svg diff --git a/images/LAION 2GPU.svg b/images/LAION 2GPU.svg new file mode 100644 index 0000000000..724035676c --- /dev/null +++ b/images/LAION 2GPU.svg @@ -0,0 +1,1518 @@ + + + + + + + + 2023-11-30T13:10:10.501490 + image/svg+xml + + + Matplotlib v3.7.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/images/SlimOrca 1GPU.svg b/images/SlimOrca 1GPU.svg new file mode 100644 index 0000000000..a7861f7ad5 --- /dev/null +++ b/images/SlimOrca 1GPU.svg @@ -0,0 +1,1424 @@ + + + + + + + + 2023-11-30T13:15:28.212061 + image/svg+xml + + + Matplotlib v3.7.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 102799235309e32080e5099fcbf891912ce27090 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 1 Dec 2023 01:20:24 +1100 Subject: [PATCH 0031/1088] images --- images/LAION 2GPU.svg | 1518 -------------------------------------- images/SlimOrca 1GPU.svg | 1424 ----------------------------------- 2 files changed, 2942 deletions(-) delete mode 100644 images/LAION 2GPU.svg delete mode 100644 images/SlimOrca 1GPU.svg diff --git a/images/LAION 2GPU.svg b/images/LAION 2GPU.svg deleted file mode 100644 index 724035676c..0000000000 --- a/images/LAION 2GPU.svg +++ /dev/null @@ -1,1518 +0,0 @@ - - - - - - - - 2023-11-30T13:10:10.501490 - image/svg+xml - - - Matplotlib v3.7.1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/images/SlimOrca 1GPU.svg b/images/SlimOrca 1GPU.svg deleted file mode 100644 index a7861f7ad5..0000000000 --- a/images/SlimOrca 1GPU.svg +++ /dev/null @@ -1,1424 +0,0 @@ - - - - - - - - 2023-11-30T13:15:28.212061 - image/svg+xml - - - Matplotlib v3.7.1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From cdcd2a7953be489948e8e6edca3eac1011d62256 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 1 Dec 2023 01:22:09 +1100 Subject: [PATCH 0032/1088] images and exclusion --- images/LAION 2GPU.svg | 1518 ++++++++++++++++++++++++++++++++++++++ images/SlimOrca 1GPU.svg | 1424 +++++++++++++++++++++++++++++++++++ pyproject.toml | 6 + 3 files changed, 2948 insertions(+) create mode 100644 images/LAION 2GPU.svg create mode 100644 images/SlimOrca 1GPU.svg diff --git a/images/LAION 2GPU.svg b/images/LAION 2GPU.svg new file mode 100644 index 0000000000..724035676c --- /dev/null +++ b/images/LAION 2GPU.svg @@ -0,0 +1,1518 @@ + + + + + + + + 2023-11-30T13:10:10.501490 + image/svg+xml + + + Matplotlib v3.7.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/images/SlimOrca 1GPU.svg b/images/SlimOrca 1GPU.svg new file mode 100644 index 0000000000..a7861f7ad5 --- /dev/null +++ b/images/SlimOrca 1GPU.svg @@ -0,0 +1,1424 @@ + + + + + + + + 2023-11-30T13:15:28.212061 + image/svg+xml + + + Matplotlib v3.7.1, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pyproject.toml b/pyproject.toml index cdfa3caffa..070ae2572e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,12 @@ classifiers = [ [tool.setuptools.dynamic] version = {attr = "unsloth.__version__"} +[tool.setuptools] +include-package-data = false + +[tool.setuptools.packages.find] +exclude = ["images*"] + [project.optional-dependencies] huggingface = [ "transformers", From bd424ee105d0f0c58cb4d6d40886da634436e6ff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 01:32:45 +1100 Subject: [PATCH 0033/1088] Update README.md --- README.md | 70 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 57a2666bde..2819449ce3 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,35 @@ # Unsloth -2x faster 50% less memory LLM finetuning on a single GPU. +### 2x faster 50% less memory LLM finetuning on a single GPU. +* Manual autograd engine. +* All kernels written in OpenAI's Triton language. +* 0% loss in accuracy. +* No change of hardware necessary. + +
+ + +
+ +1. Try our Colab examples for [the Alpaca 52K dataset](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) or [the Slim Orca 518K dataset](https://colab.research.google.com/drive/1VNqLARpE8N8eYwNrUSDoHVjtbR9W0_c7?usp=sharing). +2. Try our Kaggle example for [the LAION OIG Chip2 dataset](https://www.kaggle.com/danielhanchen/unsloth-laion-chip2-kaggle) # Installation Instructions -Unsloth currently only supports Linux* and Pytorch >= 2.1. +Unsloth currently only supports Linux distros and Pytorch >= 2.1. + +You must first update Pytorch to 2.1 before using pip. If you have Conda, you MUST first upgrade your Pytorch installation with the command we provided, since it also installs xformers and bitsandbytes. 1. Find your CUDA version via ``` import torch; torch.version.cuda ``` -2. For CUDA 11.8: +2. For CUDA 11.8 or CUDA 12.1. If you are using Kaggle or Colab notebooks, we also provide a distro: DO NOT run this first if you have Conda - do step 3 then 2. ``` pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" -``` -3. For CUDA 12.1: -``` pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[colab] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" ``` - -To update Pytorch to 2.1: +3. To update Pytorch to 2.1: (You MUST run this if you have Conda FIRST) ``` conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ -c pytorch -c nvidia -c xformers -c conda-forge -y @@ -29,13 +41,45 @@ pip install --upgrade --force-reinstall --no-cache-dir torch triton \ ``` Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. -Then install Unsloth. +# Future Milestones and limitations +1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. +2. Does not support non Llama models - we do so in the future. + +# Performance comparisons on 1 Tesla T4 GPU: +**Time taken for 1 epoch** + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 1 T4 | 23h 15m | 56h 28m | 8h 38m | 391h 41m | +| Unsloth Open | 1 T4 | 13h 7m (1.8x) | 31h 47m (1.8x) | 4h 27m (1.9x) | 240h 4m (1.6x) | +| Unsloth Pro | 1 T4 | 3h 6m (7.5x) | 5h 17m (10.7x) | 1h 7m (7.7x) | 59h 53m (6.5x) | +| Unsloth Max | 1 T4 | 2h 39m (8.8x) | 4h 31m (12.5x) | 0h 58m (8.9x) | 51h 30m (7.6x) | + +**Peak Memory Usage** + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 1 T4 | 7.3GB | 5.9GB | 14.0GB | 13.3GB | +| Unsloth Open | 1 T4 | 6.8GB | 5.7GB | 7.8GB | 7.7GB | +| Unsloth Pro | 1 T4 | 6.4GB | 6.4GB | 6.4GB | 6.4GB | +| Unsloth Max | 1 T4 | 11.4GB | 12.4GB | 11.9GB | 14.4GB | + +# Performance comparisons on 2 Tesla T4 GPUs via DDP: +**Time taken for 1 epoch** + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m | +| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) | +| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) | -For Google Colab and Kaggle instances: -1. Try our Colab example: -2. Try our Kaggle example: +**Peak Memory Usage on a Multi GPU System (2 GPUs)** -# Future Milestones +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB | +| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB | +| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB | # Troubleshooting 1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: From 2a19ac282c3a6110f6e59c071d7244871bb3774c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 01:34:43 +1100 Subject: [PATCH 0034/1088] Add files via upload --- images/unsloth loading page render.png | Bin 0 -> 789661 bytes images/unsloth logo only.png | Bin 0 -> 57176 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/unsloth loading page render.png create mode 100644 images/unsloth logo only.png diff --git a/images/unsloth loading page render.png b/images/unsloth loading page render.png new file mode 100644 index 0000000000000000000000000000000000000000..0d0fb6281f977698ba53f0ac4cd86deda7012f84 GIT binary patch literal 789661 zcmX_nby$<{`?jA4qW%ILH= zd!cxldHwUsYgEcvp?%@%7`UV7!W*Y_oUt~o9d}Zg0^y`W zf;LMK2f>`i^pEUOLi@g!v9s6IQx^aKXW^tC0xb>=qAZ)mj7HXZTOQ#`#ah-2PW+?_ z(A45J92VV>!AF11|KFFl-VEKcrWI?{epDg+zgy~0IN1ODgk{aE(4>N#-Ew3mX6*sO zpn);j>zC@hgZB9k+AQetNa2itrn?N8$0I)#t|gO%5Ko_T_a(%|%FX>d2#HR{&&ph1 z3Cix0d!&izcWO)Jka}o{WcYdBvi5&R_5OQT$w^P_XK2|A4Xa)o6MBTS;);?s)A!BH zk!d81+FZQ$u9m3#!)0QsC*Wj{t8cjD7;N8q-cZHWHM1!bWh#u<|cYj%#*o+-V}-yMv9_e<@MiY|8I9cI`<&9%8u> zS-bUxy7%g%78!&ZvrhuUO;IJt0G_CY;eIHUeTydXmtiflLGdD|{P7!?5l`_myT02o z`TI@khnx|eefe4+wEJ?lW*Rxg`N8iQi1D@@@0|3R9P{s~6Eqj@gEO4t@>T6sZq0Li z=}&cXcfNDO(@ZxztNnLVE;uc|p*;=b2`v0Rj__J34CYVKl`FUbWOt*eE0vlN>Sg?oc6hG|Z9NZMcDkpJgp1ACCZ3LuI9vySr_8;|SLweK_}pEQny z_fWmF+vgX@Q3EeTR17pyJIIuxNc)0xh3xFmgoJuOKva13<4Hg4>2@?aTd{Q=hXqC0 zg)kc#m|M>S5e+(8<%>NuGN*zG8C^|s&JFdUo&^8lCOl3JA%4+^S2sG;BH_jw97@tF zh?Lm{&6UYZT-U+b8wEF1r-~_$?D1QeAf%3aJy7gllmoAxe1{fJJ0#O+L|2x@|MoL~ z2z1IKFZR0;t#V@wOvs`$tN4K`Lbz@B_x!rNy!rr=NpvHM2>jL1ko$jN{d*c5#O#H* z6BO!C0py=tqpq1#0H+9tC@z^lEX@JO_u)XQBSLSZHYn_ z@lA(nY@&@kmsMT0wbe(-jr@>#0J@0}Fa={$QptAPrGA4jN_+RcSV1wBM_18rlMc&O zW25L0g@6!k=J9NKD0c;qSXyN%&9k6nDEIoOf@B51Y4yKX|8mR4phbTQn@*kpNpcL% zYzl)Ky?4*HH}{vyomN)m^pDK;%YL^iUDLg=IU4JoTRk^jEqCZ1>Hx+0%gmzs5B(WD zTnScL7{l%k599(HWA9He^A9hJcf7M9*9BIoobw{0hVe*%&K^?zoz@o6@+`)BC&bSp z)J8|;LoqBIZc6raO$h2sINNW`aDv~NrK9b~q>;)1h$}NOrd*;ZY|*3NUO<@c_pfj` zpsK60bW7;f&EC7KJ9Inc%Wmx8uhx9rJW>qXgAX-tGzH)Qr*4j>F91B$I&t5YYfxGD z@Fr`D#l|h4i4Lg8Ybcw9Z9jQJvQm=rRl)~36l-TlhOaTh+HMoir=DeS)-Xw-6gQsb zt*@0;h!Dl5-3ciK82loJEVT$J@g!iZnNZ#srhdwYMYHY=H9(&C(`}O7}v((U&H}nD>IL8 zH=IbEXo`I4+z#r8T{2?Ex!iU=#(h=TU;#)Um|I_BciKLOjM6xq*GNXHsrG+KE=He} zB{Y}CoG{!-oe7GL{-aDGD&M58heF~Sld6rSSqV86xH}x#x~eB44ZLnu80{tmN-!K= zMBl`8b#w89NhADrzd(4vZ5CV47)TbyE+BSh)Fz53FWN8rv~qctG8l2uQayf{CU(e} z2(HHMU$@hzj6#YV$jcMiHcztQ@ zIG!x)m(Sg4b?Ob2p>^Zl>JcjaO~_SuloN%Rk2xOknW{yvYuXzWhzs zArIcD>|syOfs1`rXF~jd)Aa=p20>kt4ML5x2E19~U=-I@i%V8$lBuY4oKqF^cCwz_ zdV0F`ohZBbjx13LLTj<4qUtUS^A2G6A%HyqSunO=&3ywh0^a@-N|Z@fIZS{E5P2i(V?Wtlo6s!)A5BvUO4|%hb-#9o z;YCh?Oh$ny=pQEt6J15Y*QzQItcY%e6(4rBs)tbppz(|&wjFUSg2Fz{Z=QPkmmeE< z!q}!n8Y(KdLfrX1Ka~43%>+|RtkX1$YO>FJI&Rl~&{a0jV4pyGeF^WS0W7(WJ^iFK zrh#mD^*=10b`8qV*}++HLSux90PpVS`jr6 znto|zco_Lf*BnRoS8c(ts$a9VSfq#Cw=E;1lH)x_@-R;onj-e_@0)G3N>oxVU+e7sIny~ zW$vQO-+b7VD<6(H9!~`G|CVp&nwaCrm}to5__}%F*9WuLGV#8QY$C8?l3KNV znODDabEtSI=wTWK`?`eZp?R+2>|Ld>pB(hUYkjk(e|dKQ%)LFB-d^Hdkqn?{&`l95sO|;>y##a8`tO4x9PNr%+k@OdewV6z`vnB%1Dwni{&x<~?&|p7sUF z*-?C**)U1;v^p^Egh&j&983i0FgFf=dec7OPm5l9dGFS6O*_o5@S&q5xQS>Llz~oy zCh(OHdV6XoXu#P-L(WSIt~|w!1ce+x#|ddofINl$`*4_#RC8hu$Zf#%XA}!aU~NGo zL~#7Pxo@l^wR=sPq1O5d8miW;sp)oGEd4rCug+{W-PK8$JmEf~DnOGe4!aWlFqk?@ z!m!nMAh@T6QyRY-MqYeco9(l`@oRx#c#kJ`K={swn%AlZKtDNx*@9z(CTFwF)Qpy- z8sF*De!>7pvEJMBh!^1yh_sIetl6zx$==9C?fsY?G09q^ymF~t>T@zWTzuW`_-jJe zVUo8q^_Oz4r=$Vp_pwc4%wW|x_f%EB_t}Td1{@Y4k9T~S$R9`+6(8_o?_c3I zSXX&7*q=$-w%8~Tr#h9L11uKozUHNm&&Mz40xRyKPUK}?29&Q^QiBrgxCtf4YS6%E z@a)QV%9f2n7}`|Vma>1~w~!88WMc+j+UNeT!V}fpe`5EM-fVT?W}hxU;r=>daQL(S zZd9zbp=1@NuJXRbfzmsOK|@F$xX0)r9hKdxrso^Cg*Dk2^DCPnU@tpN@XU9h!Fy}1 zPqK|mvzwG8q+?G`fBm83U*-g(T`EWbOuEbHUp4r>09nrg@TbFK8XaDE14F=_4VXKM9 z%hPh3Px!I8aWb@-|8tPUSa(s5CBOt*g#d@5fg*PmKlCf;qnEZbViX%!l2S@YTp~V> zs+nn}yp_wK>q~60Kbqp$4|%gjg`&Ob*9z>C(|Zxg9bWv3#GSIYLv`TW#7YW47OJ<% z5ao@JDHxAJiv7o44_OMMTa_*AX-9K}{bt%$4vq#Nc@0$5>%SKWk=Kl_%GeL{pj&%* zO`|hY?zC{|_oBaH#z(GMG*Wrmdr@p1{c?&1c-$}#uLdINvldlJE?pbdXIT1G|D8Ve zemUmqGy|(UrJ;x=Yxft~fNhZj_RVlWd|XI%g06}Dsi)(c#f70e0cu8u=P{m z!)u%t2>A4zu`X9pQs3lig35lsuHaG+$XJYCW|UxR#oV8`$wp_=d87au-3H2i1#PIB zbDLaAlK8g~D0W&Qs`Ut@f)!h`>t;MxYKuQ#ZKy)?Zw)(4u%DYBRC6427a9PcL@UT-CpZ zS`?A}Ua6u>Q-oG=aP@Lc`JmE=yafu)G=#DXo9_~y5aPRCjRKtxS06UZ>LeGI<^JI5`INMWgBxXlWrn_K8?U*~J|42@ z{!N-xZYjJz_OHzrT3mI_B=-MY2;L3~aIq`Yh*@7!PbFD5xGL0nCjI*mo3``Aqq#mQ z&h~Fa@IMX0^{Pjig@c(0KXu4^g8rcK?Dn1SlyIKewU3DaT-D8_OOZ~#ssgs!YX>k#W z8_uyo^5YANjc_r=eOZnr%MTsNfWD0s6z4ssplx_U9_MI8N}ugkK6q67gw@2Y%Wk_~ z=4G?!mPV)4YMB01SUt)1!14{_b)>Y-Rn%EAyj9K+C#S%6=qt`wvEhnO%d;9&i*Q5e zM6HJ87z~=`VP49#$#p|YM>mV;2f_`|FioZ(C$0LU70#ay{JtI9SEljRPYUs${|WUF z$+pQ}o;(#^x3u!sNbA5}k2OqMpmB=Zl^VMMJ2^!scQ6Q3eLrMujNd~^*XSVyPn>j- z3tt14wOn{fm*??#&wqb`yDi*=>$}`9NVhh%t29RrSEhBEG4BN#^_!o?cCtySewzW2 zJFU4@zwG>D`XAaSf){3l!BaG4m>ipPAgAqm;iTzVm>SV#fQvTpOj9n0-@MHO}>(oWONIsfBtzVji@>-V7OLJV}hFP;w8ljGNm>(rRFfP@o zke)M%oAr0Ab$%r&V2g<7xa{LDcnY=4!MqoTTt5Lo6UNYb0~4*cjV?AI;4~IdKlCB+ zN>N$*wKm=6wnvdz|A1TOnu|m~L@n>@P^{l|Z#B6NG3hz{HDAo4ho%xH(NEu#sx`X* zog1rB(3GkC z!FPr}bk}7fYQw$Z`8)1XKe_vCMm^(@jOOS-_m)BK`Jt6JU3k<~z-jM9ltGSe?Wbab z=LuP>0;0l}XPnL08Wm055s%mkyq7k!y$!bEHrHRO%^we<@&7YrQy%xZOMr+(C5WcQ z?2OKrY?4{ALb7PRwBw%0xchrI36)m-!9tl?JkS|a%)L=|MG8r2wOYN zVO0<*C0d2v`ja&amReP8j_G4i9gGCc@Burslvv%A=`*>%o&6Ob6(KEGdvra*8vVMt zcxdbb#FFr?p14|a_^r4FaeF@;wO@1?!T+uKcxh>b8V+iDMkwvz$AW zT3sE8osy>x`QZr~34yHrNVX@wu^2b9-9G=~{dteZ--9RV%b=!Dk0R(Kx8PUr zn!Cs;T;2Ati5mM~ymQIQQn6$p(=L<4KF&|ij#VV1311*6Vc5|?T-hI35s0u?Rlqd- zZa;Z%(+;#-WR6MMrv5JwlZz*J*&lzmPodj7q*b_@q|md@i_V6KIb?f!?z@AB;rdy3 zKP1QZ^!jGX{**t@K)CmTg7;Dd(0`6h?_HDBBX}VtTyLs;+3(canbiMd_j8th9Shzi zob}{6JVgfr`yAm5`1>_e_54j95K`}%VL%f_!wopPWn7Gyk!McqV!wQ-LRR?aaqtcgYJAjtq@lFq6QK;s zEQi*oE_n&Rx2DfZ-&hA$XX& z{Jc3mgr-5PS&-pUJ-e0&ARi5Nr`37lbt_gN7K#Rg8?Wqlz%}zKu94=PQovIG$?U6J z?~1-ZW~oZ2=TDB!GVgQ!u~7{b<;Nac*tFTYPXoUdg}-5#BYm8_ae%r|H(ObsJW0Be zEMD%DB{`x^io!>dI%G2+zIjNS8(@9B(7|-uu@27EJNiYTGW>0{0$LnvpOqm$o@gkurk4_IG0fO4ML7pD;;W2!j@E3J3)p8ZM>6iP(`Y>Z4 z<1uoWuA}N42kaI2^?JbnWA$$d|GCu4Cw0~=dZB|HJ9`4WIU;~5eHAgE29=2@g0RGgSsW*y9`k_{||fmu2IFd9Z5Z1rW92d%|ip<5Qj7Y0LQ6b;^?WK^^^!9+m1o_b&ZcFY*XPvXN|)#FU7f?BOw8 zjHx`#6v4rH7e=0cSQpmNO;bX9`gNrd{W&G^n_pOw~SU2aq8-t=~1%);+Nyf@y z?Mmo><~*EC#X?w|-p{=EuDo-y=ncQKVW=d|l@gyO?Rd`3R`-sZFB6k6sE{e`Y8$Q# z721m>S#jR1+9uNj0yfBi$<+J6AQ<#xXR97DOIpcZ4nmY)Y*22|@qx?$m!^h?*KUmL zz58X*rJZ(Z)BDqz3^iC*KaZ=L<;EjdzB3OLd$L~4{71HoTS&-}e5Fz@ho`6!T-V?M z*|gVd!zO`27rD$H0VD5PRiJxngQE$V!=}mNwC#m79-r`DN%dbr*`2NBKG{oY@>vMBq zhr;Eo_c<~zoPuS_s?=s$rt)ZcME{iFmk-JRZzTWUZmqh_6U1SN_*+`0M@ckWXBP5) zA3s%P2(ZjffU&KmgdS*c)af2{;LOhpqknEPQ>;Sa*d%M6ndIp))-QkX?zOgQg z9lHr0mO58CAmO~$`Po_H@9FLmPn>6wL%zF61BSSALso{`Dz1t5#7i@4dpo64O*3yg zTq?n>MR^Wz@8wra_+?YrQ)l|?rRL`_PmZq4Gw~DYllgJ3P#A!dA_{w<=T_63xP*+* zr9#=s(J&W{DoSv&Z{=^#Wm#@DGQMv~Hyc7V6}D&pKonTR2{%ncc!gvYH+rGVpHdmJ zW8iO1kS(E;x=iRaXA8KgyZXuv-TuyXhx~T9Kc6z}=wIijS%fb{FqOGq6vw2GLt0?T zlPR0dLeJB9E6)TlgfSIh;wNLq1ls8J)W=Z|QABd^GYL0*;lcOYaxByxr&KIT*OU)D zCEk3q;z0NG8me^9a%`1d=`^~=ztsjhQt^puU3|aI^M2hu(7b$N%QS zxm|l_ZFzKMfAR+5=}L!w=Rzl?n_mG0EE(>=hszACw7Qx7M~-D|u5)5rRD4a{zoS{? z)p|+$asPY38l*7jS@ZLhW=a0jm!m0#GSDRGVc=FX`)9a2FiF42wp9U=s7|rYz8uPR zx?u`aV}~tcUcicXK-8fiGbEkJhpS+lhf&yYAFm@MX8R(&Ly%Jsl!0fw`1D**_@kC?nJIYEpZD@#Xk$57F~u}c*;{^1n4vpIFa?uYRkz&}J zaJiO=*=-{ykd(b;qc+p>Y0dI$lNnvZJoh;4aTiyiw3B_i@b}|}Zg@>S9$S=}uo+G{t^=5B`p;!(oDA_^RqAKoB%i5`*Y;n02 zDzm7-U0Yvk+T6*hkylPZv`Gu(bL0nV$&i?IFP z#V2Dp_4KJ=R6e9h-R^)oK2k+%#1`I+rr4&6e(+Xaw+jgOE2KF2SL4Vl zmmtcY*@YjxggJp3Y@%Gu%`a}SYGIH;A8N3~2q2qQWVy*U%&Lr?n!U5qf zJswc#%~w_BXz3^GDfa?@&H6;uQk|uXjIr?Tqw=s?@~^rUw1K6vQq4@(Byle%n&9dl zU*ZQje56X&bY9!HCKOx;ui?Mt;7d_%9muXX#~K!mnISU(!*In^@;h&cw|`?OCwo8% zh^b+!FQ<9JvMZv>mu5hOZS4#ipuf#b)9~hq^HYb3_v*46mkwxIxc*P-U{0l;^Y0zg zJbBMp{`JYyN_=r%w;G)KGw!eCy-2TbdKi?*8B+X9lhUgY4;SClC*`Hv+$5V+_6vac z;3L81&%v3PE?&k@i)p$%qE{|y=Ql0h2Hk`PU2a3@R0{H@BzMp6a8LJ0CYFOqPMb(M z4>VHo{x0y@Q!8;s27(5qzKr@KgpH?5{Kbp-upb)z!#{dYnx;dOKIe*f>*r`BZdKo$bfZ1(vz2E+%^G+?k}$VkCV18nznhY z1A$d^jmT<^EkwC+#0Srv(+tcHIr2QQN-!z44>q_qxEEIht)`frHyGPJQl%mb zD+Swv^X69E(a-G2p!I4tr2++e?{kp&3#ya*KQ_?|0MJf$wx7BcnB;XCWbdQGKh8HSO&^;(qFhUvKJq7!Ahr`-3|y zLl>i`;vZwruR9YTRK3D5>qg|u-_?wb|JiJ&N|71!LLYoMOWbp5pCnszeivfms++sq zu-E9+3L5`87c#H75gw4(;v}UJop=zCEUvk8nYQe(F3lU)dYh6)rAx&z9I{EBKAxMG zW06;f+49Wtm{8SrB09}iC_?`BEM`-y^Ve~j*0Wl|<4cBPWA68~`a<6;m+}bizzsur z;0wkraV9J3vg&HLC0g7KAb#CDdE?j-%q_I9G9y!-SE#lGMS{w!$Yto=i>s;%N6R5f zN_tT6Bf#}|iMl0&#|=X*N_WwzghOqMe}3!oI(sEhc@xysZ?~QMB(UQ?=Ogtn$K@2@ zdo_i%)qW|oYeKs>;;N`^ST;#|zEIW1hv?HcY;14{edK)?1qxa9e2mw#l3GmG)CQ9n z-#9?aX_8$YhAySt9Q&;iGb zzI>Zr_S^aI^q9~McgZh@Th(7$y3gwS2bv9GzE!2JRXXVpyoHf3r5Vqwd<=iuS$m$r zWdEcPR+QVcm9<_c5qw-<2QEB!o2 zNrOO51-yn+2~#$u>|>z9PYKN3t#HF#zN989d^BPpsRw&~dn!D#Bb?%Ax2AT!;dRLP z?#iYeQx^Qe|Bi7G?W?V_f-tY%uSrUMJw?;pht@VL*n1b+N~q9CPBwEWnOYO%d~l2C z3xc!$9X$OH3-F#94uO41zn?aRj60(JA^oGO4g%4F;Rzq!Ff8m6oN1DCG!XyJG}wW$ z^J_~b9C(7RS<8pYIvL9aw^Lt%+|Z`g2WRlVE_-)H^8D^n{7+E)t4!-+G7A6S-0Wnh ze|UJ}j)E&+m2dj_Dd(Biu2AwC;)+d{?-8Eg?&{mBSDu?Wj^}G z1xr+ceWNPg|9|W1o?-tx0+EM0WO>2HjhSVstNTq~(5U^>z%I>U6N0+UMNq{>T_|6Q z#e-4Bm*DHr(3m|aPD!c7NdouZ33|b%I~Ld!C;n zFO<+4 z;{2R1$K;{Esb9xCu7G3}D1WLTx!`J-Ur|q;an`cNwv5q0sqqQbsl>&j*+?KwXJ5M= z+FKu}W-jjk_HsLXV~&)~>*jZS%j458*F*ZRo~I4mo|2p2ooQ8ew!O5Kob@Np+W1z7 zdG+?|gm9@+Im@gvyF(I(b~G*-(bwzcEd@XhR))X4IZb69W|^io8-$nZL4{?iLIGhM zuQmm3++cRS9_(EV2HvESd??c?m!f)Q<(>gUq8a!{GrD&s9)x}ZF7C{oE7;aXItev|euGGoY`b4qewt>+X zFH%%QbZIW6dtO^>roEo2g-aYED$*L-t}zHP!7xg zb1Qaa0Jyj3m>$x|5Kv@UB*dE5NBxomO&R7AU;j;+ijL>2pv42#`uzHcB)~Mjb3y+5 zXe~#ZjEZytNSrdE$N6XM4!54~rI;C_P-ew(ky(G-LA{U0O&3lyWe?uIG6v-@!U_u% z+o9jY?rp8EK>{1tbdr)nsk%1op`dt**$&I0XB?YVCHU$#qoFtNYftG ztHp>z&zc%^2cl{+B9qs+C(48I{dw4R>+e?e=e>H_If@ZW41ag!Q{e7J(o7# zd^k_g`ERV4>Ty3aXnv(aj_tEr%w|31Im}~eMeYd3x`p=72%MBJ-NgtM_2#c|A?kM& z{*k7I1ypnqYE5jiHNOZLv3VPv$4YpR;#;^9z^!_m3u>^G1QX%H)|^2F)gHo^Eo3^ z`>~(5i{f|uZzhAW8^W@$5YQg3>RHz;Md+f_j^6Qg*_f?zEvqVX{^vLv)y2x3dnrF~ zD)0{TtNZEY`#)!0}f3MYnJVZmF-V&q?sKnA`~qb z$v1vHi(B-&{>f>|@GT`=Xx3kbt=d)jvzleI7yhHeSW;t7{93?z*dFVk4e0WNn%B$m zpyr52w@!w=d1mFT<#g`r4B!L^+|w?VcSwVF*CUz)mHX}Xg0Xj-sG^b3-<+A><5{w$ z6+z`OX5Dv%@y+xM2H|A+EF8UU+d^mmUlompZ5=`GM{mn;sZf)(<=s~!AyP{V4P3lD z4sq8t1QDq(2qhzb7|JW9zMXk@-1hS9GPXiCX|EWydX`NSCRuqqQrVJRdryeQIMEF;ZuZnN5`t%KRPDldisMp{-% zXdv<>&OPKpdfap-;@Wgzr@7ly=H+?^<(Q?^+tk7@@mM9$k4jR}#iByFDT5wq6ce*6 zhwdTQtAT8+@Otc+qF3e=18HjZWnD(l#IeovYQyx+Pw5$Wg|W?Q5j1ew*cqho?65eq zMw0EL!U&H+43&DeXW7#!YPuhP#OWD-bk>EmP(-ltdpOk9^Zc~U0$HRH$AuEnv$U z&6`5yPZmNTI<`2E6e9Xxhq!5Y}`5J;V*zZ!^rE`*|S&7pArvCvluPHBv0DiNj6szp`Q-D$-K^20j1EL z$eqE@4a6S_*H%R$mlIyq1S#Ggj|+6(VxC^(l@Mxi6#in+)?4Mpc~w(A6bfE^ZaL2-w+RUURi%A zOhItlBzWml_nks^pZdb+9H06ZeMvW%-UPSmRIyHKRRrH)27!DgO-Sk}l-oG*hdR6m zAFU^ouQ2KS4ZoezJ#D;J#|&l8i{6@;G&otb=~}sO3xz329*xL?-8xuJ@MD00uY0-3 zzDAwA_secBwh>@ukAzo-smOE?6I=4d>oHe7^iG@m7Vr_srh^)J2v;z0 z20iQ#x86Q-ZaTv5Yxqf_jWp$7FTX8be#J4akMlC6?eFCl0!j)F0EOJFQ$N2u!3(+? z&RsxF%@dN;`FqC?pRtG37>)eB5><~_H3X6^+u2@hJSxqI98>aeAmV{&K?nupJ^C;9 zl$n>yxiNlic$iogr)oizReE~}-xXi@%SS4(;s9j*kSLcdm5B`z9z=lsyl~k4HA~op(r+Vtm0g#r?Ojw?QoCwG!xow7vI&29C`Qyso5CmO@UvK6f9Bolk^jRJr zgr|97e7PXNrglyXf7PPIJ@dE@+w(BbXha~lBz^S#|K-Nh!)gvZgLsFd&qOnZB3l{4 zP9YkaLMpNL5Faq@yU;Uku8N)d^Hl9}Ra2wCe@S}GO<%4VUa{mBvVK&gRHD4jROa3d zR#6y9Q81twsQr#MNN1SmezSj8(OwdzY&HLVSjBh;!lc9g8|%(iP0wGJVBY69ghta= zwfv=eNacXp4$!{D;HUqi3=7m#moLux4rF{K1Dn3t(x30LL&*FqE#z1%s`CHVDBg{H zZxB}^*=*xY?DWjtRdyC*7s#9bgCD90Z4IM-JW!LH7w?^kxt^M<<}`J+3~!O0qRo@D z-j`f!M@yv9p0N}!YNQGQb0y){sS=lzw;*-QZ=-k#Dm4Fzy#F6SUw`ot-uB6eV<7=I zp1HwqKY-gIRQC8U8kiKW;`%x2m)Neq2bFqH9=YUBPt4KSp-Y?Ys*+OgjB%yq zdcc>MLARL4m%`ImL~4R`tU=0)hp9fS1JQUYh5xGFF93tlmjYf&NfKvCPX19K5fluU)%C*Tm z>BnPm*x7>s&@DE4xO(62_$V}}-3t{Q2^V>}3``8Z%Zf1Mjytydk#u zO~L;1XIqAAayQ{CX%<$wotCF(^TTg!CAE$0%ebEfiORdga3`_w+$?tnMrl>^`&}N< z4$qL0SWghe+L`=9yLR@tcA#-ljEn~t%!7n!K3zd#{XMH#)7JZBn5<diVn8^`*3AgHcAEqWIV!d$x^WZ~#&{PXA?Pw6J*!6lBLX;+Y6 zc)FX(@bD zPF`3?OHlPTMW1-`y^3{sIe`knOKxnNgn+bM7E;F+PMBA2Y5Yg&#ump7%W zGPjP}H}kLvt_fqCfM#vdH68*#VC?>z1-Nek!&E-ajL5FB)V2#1m?=0a*CDXz-a7>% zU7e6`4c!VU?ml91l}kx1AnLI~1*|H}Jb3V2ZJ48SP%UCoFUHgiS<6PxR}=GQ5@WJ{ zhB1c$oPJ7_H>#9Xoj)t(F6F_PIPksvz1{Gw>;OIcbZ;Ap#bZaDj~2d>f-(~BPDKim zy$1pfOcsmt3f3S5n?$y@II8^iw%6!F4;Jw;^vlA6rQ0*#%G)Embf&xg8{XOxy#6l+ zNog-2%;@*8I${y6h2bz(bV}=wS2aW50xn~wsg!>V1T_D+a@t26-<&W1MxHYroE-ew z7tlyj*DQeV}WT7B6xDiw0i52B19 z2=OUJTq3S3S9kKjn!dMtce*J$`h4<`CP@5I&}YwRVVtyZ&3y#F)g9Y-euclNFFv6i z?!FB)Ma`1U+BAoQFZwGPPKB;=NGw|<+hAf^-!He0C$sE+>JH_<*NT7qYP~j2usCZ> zDp75F+n{NRjwv`9Net$UKK_}ArA81o?-aS#;1*>M21kO#qpmjaHx{uD_8&Xu7=TzL zO(@HaLsJYyXq%K`4=I^vnjfytASCRj1$P6@D|%eEkvafdeUgOACWfBtw&BFUk#VmE zlqpyC6@y8qZ=-%{FRF~Gl?@-@KM3+XPvLRM+Ghgjky)fVNb&&gfy=qZr{3289#GKo z`!myG@d+tNV@9~7FOuywg;Xi$bId~&%BtOQ`Q^%BpEvK8(cXJxy#G$2vZXQAuG5#U zO5vcJp4`NneK{-e)#J0%B6Xt&&6T^2;G&L-XA=j5xONw&&=X%u3|ZPFTPLHeRjZs8 zuB_iTJ(x`YkE?eKud9vP0MoFs)!4~toHVxW6Ew+*+1R#i+cq29Xl&ayCNuMXGvCa= z{dezYUC&xK*1h??Y*pI`h)bqx5@Tg$#coi1NC8C@6v=Mv52H!u;0 z{wdRFSek~$RWRJF39qnFvZ=gNf(@1Xh4JDmi_Q&$`*m35Yxc{in>HSeyj& z$BKD!NDKf{(nSIoj^ip}6j7cc;fWbMEZy2^i^;&JxCPM3C=7(P!sjEwgt7s0ZfH?x zQ)k#^GrTSULkcDxwE9~!kR?@#f0E}G#A3m5P5Cye7`5lPcnDl?qYeZ+PhS$|VzXpt zDW?BUj!|Qq{6jX17)WNF!vFU*tw+iZ$8u!*PvXOWW*ZbcFX%5r>$h`vWb#dbcDd-3 z<0J(Np0v!mOEmIR1n*$iW6_bas4bgDT9JhbV?fnIhimz(W-&0v`}r82@BQ#o=z5Xg zAgMZ2#}8>9van)S|6roYai<(=FPa?QPdq?ET+)v$mOS`X)8P6p1bNdl*nv+RgyG`q zdMz0{4_D1FrM?G`QO`M}aZm=RbKEA+i(|D7g@@D-IET2^gKDHP$|t|d`YXVbp`s*x z7_qu|okUiOJGW<|K!L($ygQ$|{DKQC%p_-Uu-R2;m1b`BqA!=2A-9EvFLjb6*W2R} zkC0Y!k!*`RZoVa@ao#PX8!gDQO$3x)j`iaeNjLgl_VwOZl7kGnC&je*{;EtqiZEp_T^1guSv4pp7Y8(fya#s3gf5 zt^6~a7VtEP3}TYLxoAsCYveac`RH?|vcZ;;*vsvk~h6T9SJ|lG8L7R zM=Hvl&<5aZURXZs*+$BHN<>nZ3;Pa zvR`po<&Vb?tzYYZSv}{cd0w*f@&0W9kc{qsANRXiks5gZhy^p`{?nFgxJC6vSHM40 zWrbaO1bJkh&W&|lYnD(&{u0lg8dYS32%hX$Y&~(f4HR@y^bzTCfOJ zmz1fm8`&p#2+FvwmoJmEltk@Y%*lz{{m)N$2G;_=&i1H>blr>ba=MKq>19m9Sx9SA zqQ=P@&tiSNGvB7W0$b%*2nZ6*FeDk?ajKo6cHH$Tep?VNjX!Qfv2_A}Y5 z!@(xn^gQUGv<6PBpfzJF3A+EW7ho+#KY}JHBiTzWn`_}a{`=5!CrbI%9J~CjRZS^U zaj$#z$HPA2!!gvsI{Nr4Rl#L;&WvHa2ppeWV$s}(y)s3^5SNDgmm{RrRR4vQB9#61 z!s_;!AnyOE+b;_X8*(uM`5HjBj_VMKnf2K4)v0tfQSTCNa{ewTqd*2?{qXpKi@pkj z>+JpTpN{pa#qGx^o1|m^A;a+JgLBDO*VQr6dZ!EaFFy_5IS4W~cAi)zPx>Bzxcl zcazNT5@yBx{4(TgQc3hr(B<$&9Knc;VFyAS>5yB;!v~I+awl5;8BO^c=Y-ad5MO$p zPj!G~2gG0Q>^QC8AtTLCjxFSI_drJK!uCB(voS9J3Hzl*c8n>8Vg7dimjm9oAsFgp zAEnr6Io_pq6NGGAIJ2@ClG($UOXS?%x{3(-G*>4FGVe45kZldjF;91;1 z=ch14%pbd3mX-X%=qg__bym(}4DuI+&L8K+-Bo2|d32W?XkVi)wFq&fEMbn#yXbfU zY(X2wiMdaAu+Q0_5ohw=erWt*se=~RNVlx_UQ^T^nB)qTPVyaai7=t`g}xT7X%+AR z`3PR&~;dA+KVODYMzKwBMG{{-03rM^B2stk<9 zCg-3s(K}!&>-VDVM3-b0wGvYv$^8JPPVc*M3fBTOgGP`+>trTEqKkIRUVt<&bys9B zX(OyfZYD{qN`vWh^S{Y{-g>3#T!r8IEdjBc9ljVE5~301cPM71?E-O_HP%1X5*0mO zQB@JThz=&l`iau4QViycknyHwSaUJ{nc=39SM`Zt{0}r0K2Gd5b=zv67ZB9u?l6Ng z%?7SnzVG!;qkBeK$0@W11gNa?t%AB%m^9%0S6`(6ZsX4Zn6$1MBUacP!{OdZ1-~}U z{s~H!vW*nR4)%3UIAqRr?&iC`wq+sQ*Dji9L4Af3uVcV0`?Ckq7e=jB?=|k@g=Kp= z2CY9`XDcx1#HfcaS6~>UnNf#YlSzRRm%-<^BJ0emBR@_bPtgzy3zXu_$;AY*N1-|^ zN3I`Uf|~b7M}B+>bp>U*U44CzMC*sMBJRqUsqC3fs}3L{=+TqP(IB^Z(fZGUi)T~j z#V=^OvTn@klTBA?0i8Jlw(2$sl=cJtS8LMlO)`OnrHl|1Y$=mk^=qtCw-`Ov#A(YNU1RFN5148rGM9+L}hg;1~ zUOiz8Ei*lVz(GsvK~*5GtnYRifJ(jW((sGb;0OW8oiMBHpDNYvlz|!^;Zi4zr@2Ix zaN4Vw_ppX#gX*dVGmAa6i6u>{#tO`Z=@u1EF^$>2pkeY_Tz^255mbmFvvDXp)YPpx zXc~~lU=OiZ-OUA&B#l9&)@{v~=b}z58-Ifjm2azzSoKs=_C{qLT;Tr%D$ILTNvE1t zF}>B=e&qj8T5iSvT7|JX-;Ns0oy~F8%4Bf0P1Tjo$XH6Ey8Zd02|()Ir2Dyes-)|= z7w@E2y4uq*16+4Fd(e;4@N^${+8KqB?0&|4+lX1FDrtacZhLBVA$@!K8P@fN_^k68 zv9p-{H5DoqC9u}fA5@HnK=pui|+@?t*|CgB$!}|LZb^W zH9Fp9qlf#`#QN*~t7?5E=RW2M0j9t233c!ix1Cc{l-y@5{l zN-a?`{I_#LvS*h;>uYeZH0HUf25xS5xpem2`ltJ_#0vlH!%~=ugRa zzbBHSy<{V0J>N=QXNq+dLbaH8#X=LohnKLoAtJ@M<02gbirwKkRH>b(7q=j7>5;Ar2~j&mK_&!oH56t>g6{ zpC7B6JKNW=pSz#0L{AUtmoXAsy#MIIF2&LW`J@tBFbphPUT)Fe zJl<#&SKJMFc=-gX2LgEeq$a~zP9|g0DMV1Duz!*-qrqR3OTv*;>kiEJ8hUx5L?gsf z&g#Ed>KVf)>q-IquVtBqH?YOZ(^-sb)l0_k+%M_B;)kn8QzVxV5 zg|2AIWg03X!w!sS`h8a!s~GS1u(L~V;g2DHF#b=-_lGcCT4T2$_IUSf_^$CyS3t_l z&!MuMjeIA+5!-3+*GHrIDA8YRYbm7wstReL5UqI+rXd4G-HyqtFM-+P?Fp4=gP%ll zv6EUrkaJDa-7XSflwmpn7MDkAD6y4lw`cE<=bzHLv(Of&os(VxMyXY5+G8A{Sk26OZAWSMmV`|rrA<3?%6Ja zOPoLW!I8ar3o5`!~EX8t3=+>I}F~?UhlZ@f5_SQ(Nr~> z#B$-2OJW*t0N1M{MtC1*Jtlc5sS+@^kFxw>s17kK=2E>YfJ(g%=fH8>-z1q7>a+(J zQ*F-CE*>5ZyJCM%4GU#7LW#fG7NcW6SPQ$4wr+|ad&ZH=@u`GKJ54dxHS*_{6=t1< z$cuPXHC#rQc{gz5i9M4gLk7K}Hc}iu5Px}!Y&u@D*~y!Gh*wu;QV6~;3ph^%?g&fxib|>?tKfR0=06JdNql%43!go=-4OX@&Kf$CY(SHgKyPj1evE4)u20 zWmS|S893#|+LaqNonzgVuQf_yYB!9?9wcB5F#hbu;ZnmbIR@>tAxmuVtHvp&CfiT@k0w4x><#j?Q;m&(hA_Fd@S{QU~FFU?*vk2mG-iM^7o zK8+n6KHoD=F&=P#YWApGR1a50&!Wg|ta!s9(4C)6GK`E?+a&K#pUS7=w0{V zpUO3lc2IV_34h43Ky-}fCgINgfqwrRl(aWc=7Y3L_K%SzQa?AgE4|N4y}Q%hN1L2a zqMbfR?dG}hRg2^`g=T30VSqHyGnrI-Jom8lk9gkwg6-l&wDU^`!q6+&S(SzSZtKdA z)wViyP~6Zf#FErYnNfk0*6*s{q6BR6=lSl0_5T1bvc%d{qwe^_Qcl{Vu8H4lfzWRh zd>JF7FQ2bu3vumFr$jOBJi=oD*K&m?x}H(wojkhtE$|Z~1s>3b&PS;^{MyuY+NXjZ z`s6<=an&2;rmLT~+A}?B`XvJh%e&yuFQ_9jJt2#J5^xl7T=Vot41+thx{3FbQ&oR^ zLMajt`$XQ2GeaG5cTqZYy_}KAfM>Dd=^}4gI7`LWlyilD|d_KE>hN zUR-_a7bqj;-3i8>+pct|t~rnyQFYDbCkvvN+#tWH`U=2Ak( zs!qN8(^sYoPWUwXDZ6q78yfVc-#E6~CMLC3u0GuC8gJbfe5Bf%I#-9^EE$^2EFQnz zBsQxjuBtH1enk~5GX7U?5j}#KIM?QeRYK`(vvTB-A-!^05iPKYdn8Dq02w$^)y;!%Gfi6G2t_i7V;X1gRnWo8B zZA7;e%A!>zjrB?R%E=khK+M+eRy_7R74(8rKeNdFEhBSTrw$RBJ@oi|ttEK~c`NBW z&cG_#DUr9x!_a2m+wRmf!<2-@_iALpJ2OmG5&hTE7l@{|?+Ei{HA8N(JhBs1$h z*llV3m~rE(o!%BkXL9&PRFf4CaydGVRKO`hIeTctIRM2wlP$7M_S~@PK}=?pbX)VE==OMz!Yyh^ zh>Ax4WGcupKgk|WrgPndHNI?qybH*1%hKR`xJrLwaI&}JMp1OWk;0Bc#&I#<)K{~V z#+B}5Dyg3Wm}1<#k+kFI(a>#^duxoqnVRLA03(YvO8**PzTdu!mlO{?NYvj4(EVga z^4-kkNj?j%x|GBpI+y$vrTw4eUC2=i1I>xJ`f~AD_HT_odiFlGcBu+-d812bpZ4bs zV#P2QZ*@*qvP4%!jFjV-7puTrz%MNqjg%p0@ejrr{xjff>gtZ^E*^f7m={qR-3jRS z-d2@t>lU&qhlEaQEgwwT=Zq=pX7=}#)zg93HH)pJ4YM|8yri|*QJc>%0(NTG&X5oS ztF4yvl0?EbiX6YYB^k8NVV0@x$zuX9Ls8~JRj-3YKg;ey8KIYNSya8Be}cR}xwj69 zVvVJbYT^i}jgs0iV%L-NaZpEqkW`Sd<#BtG#)u1|zFK7xh89U9Fv_ax0i$70$a!FD z@Ao!4?Aj*m2Kfeh&Js43KV<_UL&A5gQ|-mo^u%EjKbYvNbG1Z$Q?%22-J@Eh2$=v3 zh^?MCimp(FvRDOolx7lH0fs?gI>>g~*P%d7`&hbgg}H?>=HP;q!e|9#IOE2E31;<7 z0dY(kkka^T!IJq?B2Z`Nkx4pssrSnhlo~xx(q`Hd|;JxRzo( zwkUup(wD1*Xa4gEe<|fX@8QMiGl=irdQ)Pd{~OO^JvsfxLQVEYj<=WK%Q51}*pK?W zHTgU}BVb#FpFH`pUha+Ee)Dy<+Q6V@_mFudJm6K=1bhiUbY3CQRucZ_JYG41>Klue z3M+$J;FvcHb;UZ|g`<^%A=bk?f^n7$QDjjT+>ix3AM)z^(!`gqqhVbk)5p%SBkSd< z!f$_{N%VcY1BO+`sHn5FlTjzT_H8%2=B4ja@$HTbO%im(jA+!65i75s6Bj5oX~WSZ zBNb3jN{FJNS2Jmstf0ZeE69Ll1=Tf7CpoMoQ{Ca~?7q|c>YV^WGqM{UX*`+;^VYV? z*Np04WwuDBw*Y{&L}R*+*mqS1>{RZrPyFA@+H*4SRbzG8(n*rH+Uc~wsbKBHp2XD| z2KE3YB5)NI)=Pik{34Vb-vCm>$2A80Vr8eL%;2^|x}%Rc`-PdkAEI(HB%)mI0itl_ zNf*D>=x+^CGMu$gWshz3#=87-Qt0Con6okB63WP%D&iCE4l5JJl?GM$aOF1SHb@D7 zFj!Bb_tCO!HE#-e-y)6Z69Q8kq;cbtPN!BD+}SvJwV5;CCfiO$G0}eD=l|cc+E?OO z3=DKCmUcM@jH|O26J8g*h+t1u@L5&j%6WFU8ZKFX^={%p;ar4@&{A)2XN6C;PR{2g zc0pG=hYoxr+jKpKRKs3jnZD|WCEKgqn$(*TgO2L4^t7U3aRa-9Dt%9`wvS?>9U~xE z+KZzP6dQ)h5H!=h3?+~58hxEeRL!YCef>?WlU8@5nDO!uC0Q49;IJ70SEg&*QEmP$ z@}=zV=}^}X|K21dF=+lls+5W1MBHfhm$?Z-V1X#(ITD)qz^z*Q$K@u;=Pr@YRF|$E zGm#5JSltR~ifB2&xAu5>cBhO_N*+SPQfIFfL7R%ZNu7uIAINPOuRqv4O?xwe0Jpeg zpI%XM`AuYA(h=f@G7mCWsi?v&1kKDywBPvb=p2ye296{Mt*CZlyaU0KJyi>;jFB8b zV*`p-z33Z2U9i_LTORY)ahd_*BH=_}|O-=6Po#ZvKsE0V$^xC&_jmXZB++vrbdqg+jXXt};&b!@;lc6$X=Xtw4sB0%pIjS2rAIC|qq1IycC z1uup{xs+rMd2HMXEJ;aBnV49uPlV%2?e32o6pV(AURkfBu(+$tK`ImN9g?7e6}OnU zI4lb|R3Xl(LAN%buvBuQ?EP*H-6vA^sw~Su@a03Xzs2vJD92VUp~^+PXL3nxbp{F} zspX0nC$`n_S0Nyp(*0hL<&zCHR%wROD0w7Jc~84plU*V)b5({~GdWZLr4S|2=^xXw z6N2+{CI^keM9ovHtBq4KM`oqLq@q2erheSXaOa>X=FJM8rfZ?H{Q|D$9K>!2KP%{spk4J^N4yZe;6xu6eJ1TUu zHgn)1Ts4`~Um-)J0qP|~xa6B#ywIb-Invzx3{&1WoxxWv^1pC~T(c%^+64Of^P9je zMfXS#hn&#ZGcnZ%4gqtg?z-6eH(edNeaY zsgZfZs`m(R1hEVVRRo$KAZ|56PRp}Sl`Z~{7r=jAB~AqhM)EITt5N7<|EFr#E3e&k z`_tP`mCgsrs`0`;ww`l94M7Cj-!lId+w*^a%R-j#O_npdIWkG?6kAiGH3Nf-A6*_l z2nY!Y+k=HDEhjxsT%#Lt|N7)a(uPqNx&e%$K~|PAk=C$E45D zFW!ekUV4daqK;(U)M@5@rGJgMu;0FYP(k~M3F60FG&u;i_Km*OGSbj?GKND-P)_J6 zFq~sudilsv+0}BWZt#qT9HRS=N)$C-ZUw*Lwx*$oTkP-uxoK29S35uXXX zHUg{IC_eOB!msP9vYr0R+ekny(kIL=wd~)w(){`|IK0>!(%ipDadixJa*7vc6y%`I zftyyK)rjx6TsB?v6TZDFlOa%}Dz<|zcff+XUIIInWw6l|I(^E+$GJ#5H2@y?ql?pM z6hTk|tHm&&U(rONPL4-X3gSrDi{MLx{e}{Uy)rWTR3VY*>%+Ezz=H~LJq~US{rsAG zJJ_vH8T%W1n04vsCjU;lq=^hjL`s8R1+$zCDZcNb6i7}tCEi%J2BV+p)YYiy!kAg>G%FjRbWie`!8|Pm;FqYPk?Y>o?ZNFsiTF{ zc$Ft8ay_kCYam1gS7JZ#dd5&G@apa^U}%V{gj_J|Fc+sglL~gG4bu0z{1IsQoA9%= zxg5?baUx8%xvj{n@n7ajTozhuxjD5SS=YV^NcjT#j4(BXtDgkmMdIgwpx7pGM)vyIy_n^gxag1*Sc$c{!i&m$PVMmYm?!AS6)tO!lFdoP! zQZvZQhba+_*}0oA+^-MQ@4r+WHdwSb$$}$9|D%C9v7{vKbpf!+MZqIAFZT!_fz;ed7Ydu>oppLB@^2MD0>6<2w*aP zuIg8faK`N1^RC}G=+1jA^gCn31}e>&(nsN3buPh~}G6Vs=3qV`MYnw(fr2 zGu^6_N%u`+xr=VKTJ&*)3R*~gF=+#zFIUe~sqf{St54Sij?nILMgZS=p{}=OpC(XDHn(-asZE*Vx|QA=5RLLJ~O%6DCl| zXhT~95AKwFdJjnO{Fh_7$ZoaC+&kf44G@%Po(yNaDrgRmhi+G1MhE=hy71gyN~~6~ z9b6lw(jpfm8QP8bs9doiBiBl(?vHtBE3lw0tleL7C?YCbQ!~2)scsC{QktE7(%D0g zj!!0=?42T@=tL29_MO>&yM(H8j#$_=yrH?&)N?qK&dP{m6l3qZY;le3(fhADNFEW; zap>maA(gAJT8+-nnz|z+KQv7wFr_R2+LddV$Ucs~cVlQHU0J@~UP15oKd+knv=134 z)wzAq$gE%qrR+##w?*t#Zmi)W9 z`hNcqcyL2b2M<)CUZQVZ*|F^LF-#!trGfll`6LM(Qm0{vpHpd{v5*KwRS%4`DGc~Y zo+590@a;>akqW1|jtY-7i1&2mjo*EYx_{y2=VqSJ82oJ=*yjI4HYR28`atE7!x5_m zNom+&FtaF}qLZ^6-DE7OoGM7?Kn zsMNE;QSO!S-MFR2rLZ_G0w4n`pR|Vl$u-!5&|aJ4Gf><)O?opaS-CZFT4E7ze17a9 zk|^ey6Fj#ro>JD$$&rn^xFDJYFUr903#&*94dvdauJ!&&Ik204&aebgD!PVZW=0^# za)B=WXLTPcNPf_Z2E2cBkqpawXl{!+_U>BLmay#;_`sFE(yR3fZx$Id` zb!CM&U#?aS>TtV1`R~Eg!x4O!UA=Pg2~OcPGalEHcfp|`NKv!R8*!XY*VGE^H@F`* zY^&2NZr?<}m2vRZ$&YKdXd`|SLQ0>C+&OW6OHxKijt>d&BDq>fV`PKwha2#gVTrjF z*$28abj-j4VjZ_^RNPH0DClL`f37IFv8=^DS-b5^OE%8-a1kVP#vR;H z4~xje1AacAeT=&J;uazO9E21WnRaA%Ev>j}c>GN!W3rTCxT-XgiXb)rM&)g#1*Ie) z-=LJVeqCfmf1pW{9}7d=k|?5smP8Kw&ve?;KHAZSaU?15j*hdRlD$1@lSWb%8c9)P z>2#!>rqLn#sY81Kf3uB9TaR)@jyk=9%`_O_u3-@58PsdH?^Y&mE)x{EGx7e{>Hk*- z#q+Qc?k5I3K3xu(P6nTZr-JaM?hKsO2^h^ERwBl`AxN#^XC&EGW zq6Dzwv{Q4b%aU|g8fu%=mf6}(K)#52X}j{)Rw>>OWDFQdC1O|YnR4t=f2yK(!`1Ii z-k8$u7^YLKFQy`61hk;kzyHNjb*bIGu!k3>sOy`6^}=TACZk1neq#{P*Fvz6b^HO2 zNCDHiex#RNywwes?>yd#l^3@f)5xBeNYbD@Ir|ql^6Q7LuZFzsUO| zC+2Ww^(CkGJXWGs?(A+RUcAs=0$D}d=ILqH+35sRNIS7#FrQ0%NWTJs?;=84|AY1R z-ggt`kRR|gV_R1*m2>@R*b1BT?NKUTaS8n~L`nOa%*hcR-^-emc6vhtlS&z6bVyP; zzHJ~^oapb=sRQ@R&KP?Kt9zi0iOy)Kj_!Yvo_l&fAw9R5;IIh; zr;o0@FCHsZDIBbjyh>kG9)Myko6eR0zq4Pw?BMdXwVvmJ%y8NAK9`Eze81#{*u7p< zX2G!Vz-gz5^8+HGUZ8Nc^%BuEaP?xNk+HN$=(tY?NFZD(4%M0n8642>ZHd_W#?mU4 z2y0rXQxm5PmrjU-xRW!TYFJ!*z@EAn6c-97i0c|!$rOt@mlcgL{!kf{#+6XTxxJ^; zA*>2ArDV0trV(T$sYrsUtqhJa;cN0fgNMYfkT>U8W26JDFpK|$P!pT0#EEx1*H#F@ z!aJcsBDl)!DCo9AdxuCBDEPPvxz&n#iRJF+#?dU#AwI&CdX+h8jiK{PG5hJh`#bDx8Eo2G z;F?x7ua{%6rhdMc8V5eajeXf>M!9KBr9Q(JhWj<^DR~jp6 zZHR{>Fq9<8LF0Q>IUb3hskZkgL2n5lHHh;)<5H8(^&fO-JshPcZrt77-_8N(kpJB$ zzHW-sW+fBYi0EgC*Nh>1c)|w$wkhNnt)ZRL^WkKOqg%Km2PmAfLpPI*&gs(3Eci~6 zonC=Kd&f`CCUR6c=sG9o@EKw?;$~ObZyU^LP)7FC7JDd`-s&h$=)LFT+2W*37}Lcl zzjf*mc1{WrO=Zn9dojRU3nOxU-U~`>!j8~5hF!4~9HEyR5J2`MTChEnD{YbbF3R&i#5O$@&ejgNe(55sWnD91jC-yU6b&UvzeQiYHN&0NO#S^n!b4< zAUtrI;Nl&A8`NK&g(Oz9xq7o-3!P7MtgLq=IoC^9uLFM++utp$qt2Qf@4#%EkyoCH+FCaNQh#hoeq*)O9C5@wk!_ z*yFIj2(^dErt=-Da0|^NB~FxYiis3>#)9yW3Y5sram<>kfXsWP3&T5UV(>#rlioscb0Ts65waWawzIUHS5_? zh*0nd>-Wk^AOfwahm;?D&my7Ywroh9{f0~fVk_XX$RI4u7n@TOIaWWe@V&kVwrE$k z##@BVHity_yrgg9t`r|0m>gyw%*ioiubc1{%pBXVY+CZ|#9eS-21h%3G0J;07*xtj zIsahgJGREi-s$x6ORlW`?1uFqX&h6Va_3S6G(HOzAI`97{lAmX{mFgfX*mt!WGrS^ zB7f8ik?rH+;m4{wlzozq<~KhYYhPNqnshL@4KMrf?Sl}#Y!oDxL&X5v2U!G&^gl{H zt&6;u3N}1xj%XT3SISWM6Cm#W`f7rm6ZmXc1f;x4n*(p_CsyC?&Ib(^-I1Yb^y~SX zu%4nEpGE)x!E_kfm^+ZuzXVY5hxZRv*J6&rNz4P>8rQpNMWYrJ>ylU4cieNZ>g{gK z5PJEM#?y|Jf7Gb@zWr-~@@%5+x)JDa8MSA;GqpSOs`v99R85Sh|uKsO#@7}j5?{0!eJDm#2>`ql(o&-D-2B(h*`{VIu??ZG6 z{%S%?;qJ1~42%+|3JlWlS{kwOa6$?M8Fhsi=34?m*m}doB9e2EpskzM70@VW%Ttjp zq2}uTIRiiocWux7##VJGaq__T%1nPK@JmfhvSKbL75Uek<^v1~uWEC*k5)4uHkZZs zZ>#56x9!CALw5Ywb)k6pKZ{5;?I@6V?@5t3&C!pmAl&K9{JEx%vXDXh zlJq(?j3$799v81Fcg5)|WPhl2m0ywQN-V#^qH_$Pv>`}gY78gyg$@GA3^MDqMpE1-6O18T|`D7Pb-XHH?4Sre{*M=|Oz4_bRpiN<3@B!37>L za5g6>dCRj$yKzfx3OFqejwU!esgZ`d?zr`tO3Ha!->6KI*o(^o3#J}cf-KEZ7^D*Z zRxO8wU1BcFM~V|)klk2`c{PdtB^C{0-%|tPu6SlEAy!SD|9OGgsu?=NgrjFSFP|q& zkTu$Z|0le4c0Hka_W9by^m%VB(ae2eD1Smo36~CslnjxnDEo!QALm+mYvQ^J1o9kF zY&j-V4u+qx9FS8C_041jYy=P6hd5O67>oCyDxt<$m}=H(TKCuAci%sJteg*P9|;cU ztcHerW|+nWPaMx=T7w1D}*w@K(_2s3pv<~IvhX&@7 zw8UIZsV0PIE_$)!*ik15M7$iP=3RRXq^?~-ePR`4d2SWd>Eu|?$+p0 z2;zujc89@Vmcs>S8314?hQ`rGsIy6BIG_KPIQ$MzV*y+CZ1UUx(@4T%Gl%z_>>znu zil;xdOJqONBvCXv?YK=R*w)%y>e{iE!Gekdd4c9@>+T-jGH#J5CK?d5$uUwUk>_H^ z#!D+k=@<6WCC7LbDm?fFw0Ar&3x zx0yrU@Z^G206xpism|%k3k0}@)7wK#9+kpxm4?4ca8lqN|FDtgovBBh zxkW_B6%DlG!annmD;|fTpJ?R$m--G^oVqt?#!_do6?mwKlp^O#V%s9+P4GBSr7P{$lSXk(_z?iK^|D^sA zIl4yXy@Y+am>>Boa+T0^)L)UP_EP3i>4@|U3``R&@mL~AwlTcROh#mk>S18m18gZN%}`O zF47L6s45~j9p7y{~PLSw@HmcksA5A7PJwQU-XSAt#ba0I1-Cws5x5x&0 z*=M~}uAC|x>UxaL0~i3PnPaaLnYGWq7H__tM1XXP3G1=Eqfc}GKZR8JLP=Y2ahu^4 zGJbSlt$2DN0CiD%n(6ba&YDx^JAY@-0Kbl&hwDJ+>9tdI3qR?_qk0l`=be`iUbZd{ zbIr0Y4os8Vs6R8jpvNaJ>wa>a44n90(4E82meWLpVT4gKx*r!7oj<*xKtHGx#*_EQ z-x%z>-hcZ}-Zc6KFQi@s3`>@s5nqdlH9l=Ko(zB|kws&|yw))jOAv}(>H9-*3s6vp z{fYc`AS>r&hk0yBtYMiiFba3m(|np1zhbIk;kP9I?U#Kw&C&4@pDb=xMWGcoZ5!_O za>$!Q!0Pu{!?Hy1hbF>}Xi7|?;1b*xi&Z%uxuy4WYmSdYZ{3$;@fOz|nNSOo) z>B-mIYR?x(mW4~s%2(QgxH6Li4n1$49UGpy%s6{g67mRx8CnFAs)N*z4&C~D{F#pB zh$SrxCJXkfU1C-BA9mS?P-C{e%(LPZId5bZ;)F`NTZZ1=M4@{9A>*3`J2c>Nv^-D)COTW>kTY;p8V zpn7R+MWGk$K2g@3!$V!frK`~MPUf$O>t4oMGv8@^Gq{&=(7hy_f&X*T1SVg<(Ffj9QA@F^V4BufTwrJ zF3eUg>SIIZz-MNYkkLYk?ZMLLnd&IBCr9C)7~923ioXTCzK{`a#<2(~6mk#cE*=uw zO%}w4_AMw$+AFa}W+!6PpPHb{8$r$ep$un%0qo-1@j5HzM_jeEv~=@Kg^XD$>eK!c zdNuk$3&m8wj@z!MN%>(HR5>@frlzJ-eTE#_o42QFsF;(HWzxsEVEg#DQwkKKA@2H| z;dm}|z!2L*BhGiJ-&lP3#WFQn?^6=_RyoF8T~jM=^rOODr|1{L(Quo-2`Q{{Dftpa zb98q0-tFK9*|ujlyd>T;Nlk2eLl2t$vOoML_jtE@c&XIs-tJz$e(C0U?q(4`-CUIN zzbk983zSL0$j6$#vu3Y4#Vrg<(B0hJ+7u?b6<<3h@;PkZlx; zW9))5lovbnzbu`-0O?GQMa3^~1i^-rZ${txdDI7!_3q4b2e2<*u{RID;+myxe>c>^ zx(GAYBs(F_HbMIB6e>`U2s44!)9sVFl1U5)MhgZ4QpC9V6CEZ>D7IytNRIvt5#36%Zx8nWS6m(SCizrz? z6Fn}6asRsC{<6=VgFul-gsko!!m1BWHcd8qla z=5zK__xX@N#)+G<`zT9nBC+IAxNR%*zN)#1=TJalamyw3kZ46fTQeIZ)Za0gua8oo zcX)?PN-Ol;!V?3Wh7pT4-jE$Z&ln{u0c+REKYR?HI6h9d7r&Wj>KJ4xkUDT9#3c&=i#ynwe7!I{DGCI%s=Vrvw3>1sa2IkprN`@krXhY%|=q*!1*IALJvYi zZh_CX)>Q3iu#ZTKQuDI_QU^Nh^pbYB_Su}63>jFJ^U`54b4h*NU(+Y*a)WB8jP0+aHz8#aSaqI9k z@!j#hG^hDq>k_KXa)4Jn(!6-7V8%M9sU_5R!emh^-gV@o#VwnjN~Kiau%HSQ-hweKRJ_O`%H z`L}mg8eJ?Fh!w~rm>(ambw^*wHN;SbnwrTqXTshvUMpeR3@qPW;->F`e4a>QAwD zx8MeHAmqr}o&|ET{lG6(aapB-^>ix;b)YE%%n4yErVJ}e9mP+JO`D0(SuWf`A4VjZ zGZ%TtDIe!ctC~%s8qK`lq^Pd8q5Ce~dMrC5x6gr_)qqiTtw(FHKq(&8%YJV2v-P6~fS8If{&wvtKMD>C? zQn$1Jp-HX5_-hHTR1uM50={-F{@^bf%$@s$%ymZguKZ(R5!}1ejRC4HEN9QR%T~Bj ztWhcw@bOSCve+M!Mv!$^%0EjP(MRxO{k`Y#Pwuzp%aTT4TdZ8}J+%*^*8RC<;Sf1t zmuW5Os3?X~=L$ak4l=Ji6T|}APG4`=H=P)0s~=*!>>vL3RjO8PUElfL*H#C6_tmGm z>4hk(t7@{8WU!^?vTk-fiOU0Cgw%xM@zgqWG#Q>E||q+J8?I9 z83wUqAoke*LDf5WWx{{)+Ig~V+fBBOJJ)2}wr#tnnryo}yC&PVtuyCc>%8mv{SRO6 z>)M~4D+gjVcZ`}~oQZHYV!uR>6t_m8T;%ti!#Yq-hOVo>^P|{lo8Ci93gwd+WR1@1 zTH7_CyUjRZVgZ@m9o&b1sCE1Ac-d~B^vJ}OBj$N=3YHqC=2y@hR!LF zGasMgU2nnG5JJSnS0apo#*@a_m+EX0S*b!=bot^Ix)4~yqSD6*}d(?YgeBd)MJCdV;~+OlLB^tLKQuFwczM zdCxc#mON2B0W_8HFh|2gj4h4f)RX7SJ?|O%P8F%;tgeOed(uqVz{!!g--v^GbCj5; zanx9&V?wPtu+gg2^+zjYXmZ8jYNNT2eoc*i-3sA}3P}NGOJWr2^yMiTZT#;gc?r8z z?l;0eQI%GhFmuy%+1+#3dDqibPufnipjwDw`)&DyE-7og-R5+?MVd~94vABe&5ZUe=!~7cZt0<-#Ae9I9QqUFG+KR9FmtOq`9536K_gFgjq39h@;K*pXIj< zia;0Pz>FNMs2U><<6QjQ6lV=Kk z_aoAU4q0xVYT4e4IrY)Wazi>mMpdOT_8*b|>kf(R1CqE;za0gXv{|TO#|8;8luout zUA$n7x|K+w%mt@Xw^Jyx8tH{_!Qw_tiPxBh~5?dFC3 zAvlR$gI1Cqarn>Z@fk4OUSy@p|4og%IMja2gh`iV$=d51zMuaU^DslSo2CU#94TVX zB0X1@T&(-Dg~3KoU|Oob>V9@>oflXEk>v{V~2hY36H~eDUyL0OMIVERe(}EW>5aEc; zeAaFJD9PqgY9f}Kd<^;*380^oWW~yIHkI6-!p#)AjA2?u9v(^=-2;`3A!g$>Hu?Jk zsRwR%yO&0Q_ z8x4d9ul#p_zo^AQs?GToblR)wbHUW0(7-|0_0R15+b+}4ESD#9#B0nsK*LAm3Lg0) zulUp<$a?Lcb%M0YUm?*5Gm8oaK4CmmRhINmJGfvoozF>FTDEweG^{D2h;b{0|4c-sF(&H%G@+Q{ zh`LJ|ZrV%CkC(vo!+?!%ymKqJoldKWFD;ut&j0gA-1j{e{qvG0-^(NBnlY(QOasRWAxb97nUBr#-D`V2 zZfGZ!N0P0>^~&~ZD+*SYWdRY9Le0sWTUI(TqL0s~n@w#DWOAQE8R_Y`q9)>xgLM1A z%ZN*)nW3m!wAcKWHJ{4l$;u2H`*^L&rK-otoUk0p9TKbKB2Eh84869fbBDwvAVdMlAhy-Jot1$lJ!Q;drUsFO zw(d(37bCbO(ENJ)ypF*beh{e*T7_JE;QK~ZN0bTd4^wGS0_fL zU{e3^kyS7DIq;7zkK9Ck z-ZnZU_u_6(18eaH)CbQU#()#ddc}39f86`y`R3<5xVhi)M*{LSlbU|E#i^L$e_@rM ztZO*1ZKkv9^R<-L9W`T}@Vn{#TlA}dhkw`?HbK#sKc1Va@*N!||JUWsU(F2$GX7}~ z0AB$OHHVNVf?5aEUxzhu8+8E0nsPYR6Iz(YGPb^+p^rECvt7K9kcX;+K>OZ0<4w~V zm8nLocb5h|BWWk->dYv~9ZOe_f*e|Kpfp3g2=SZ|I1!^z`zAc4Ic$fkjUw7f_Q81` zM$B{#HL()_wcvDAf4vQxJ5ko%Pj40Hk4-xfY$+~t*?!wNPTkCOToEQf0lqK^Dyi{K zI2D^g;#|iWNmj|7RMy6~CR+%1RH+{+Tq4YQ@4}5f*vRXEtQmB1@G>+$%+nKw&|=N5&G8T^3TrLw4O;-X=^uK{IGl;-kQbj2 zDp@pluy%9wbK?4O<#UEC7IcfxC)|7euekm15Aca8P;R7*JyOG~91_$azsVGy7KMjl zF>YHDG$)Iz+!#oHnlox|xMMRoMy9|uo2h+lkYaD`m|hF|t|ZK#L_ZqXimCzwhU`N% zcSq*<^G)@XC?!Yl{$8LA!|Cw7x__+S{rISL|G(#RxJK<(UG75Hx*;Mc8h-F+By<77 zpIu%^Azdx&Fjp**fe0#%z*~;LUAaLmgxV8rU{##IuOw7GrxK(fca|6(OIKm_j)=>Z zf}-V&2C3WOgWY`ussDP*K1UyIgW2Gtp#FxMJ%(L!s)>9)ywG$=H5ph9K@sfoK=i-7 zv1>mnu=1i0)j-3^>e8e>427NE;_Pc<3+r9`*n0uld!|ycr$k)% zo^1{uKF+}wK8si6+$7>>F8O@uA2|(9O%AfXWVnzi&8zZD`!vaWztr)~j_c=e#mvoz z^6mWC$&Umh)_Fhs(UOEp{I<-s#LzV#9p-JbVmuJuwR}^-d&R(;T+uc;Y>qhF?i0UR z{75mKzdc^riM1eI_oIy?BF`DyXeafc14$@$ho3%$G3D26{f83n@F(7p0A&ut``qot z!d1e&Ez#Qhl+fh!Z9@-uZDF;g&#sRi$D_R5jk%}9XVZQkE8zF{8Sc@C@oYpjL9X`Gz(A5F~u6&kE43_Q!zLYm~1egInv-?laoX*OWg$G8a5q5wl7P#`Sa zF$O^(n#ifoAE(?B`tlLS%46O(C`~mCc+i$NHn0|IXx=O*vdSLwMPP~k0Srxh!k?eY#Y>Vz?cdT;)2FSurAqbRm0 z-fX(h+>ZFab8cKctq>q;9*vUBWRo~NcP-~2#reU2&249%r{>RE>#>+VlJJCIt~Dfi zh$9qBUuf}uB$t!qxZKWjrGECKn`XtMK>bi8wz|O{3CI+Bqc9}7vx($W7w_9!NScIW z8QWHeA}NPV%JpK^S>f$A#ZhEz6=@re*xW5!0!WX+;;@Zx7gS-&9PN1apb!iqLnQ;o zKFEMe`D?P7bA3naAUHF`qniMYrTG zqQny?*=vojR05#2oHkPP9z*_ znQri(P}HqyT-DMPh#~g;80B1F(Wo9-A@aL?L#CViA7$(fy_uAm@Lc|j<~(O@e>rF1 zlt_J>hs2?x?WN_5xk_lOoaf-T6}T{g!Eskhg2_%>*92eJx7a`6shs zCguyZy7Zi+RqlwnjZGG-!CE^{3B1r;BlH2~IRR|A9;V-=XVwsze*|{RgU8(`(I2F> zs^62Pxgf^&1?TN3qNn6c+ujDSS$l;Xf70B}Fu0A1!*svBLfD(MXOLiDPz~}XqB+~) zpP?--TxG5Ta*e^?@`J40(M31v1nckv32|^Xvl6gCOUBeC77(>QZnjR)@rApZeR<|uI)J9dKhI6M@(k$k_t2Yvtk zOGQBvl=ef`w64+n92D+$C*c}xcW#YiLeNY4wOKd=(h?WU<#x*!p#W|>(-(YLHK-}f zz$A2$1xRUuv7df=z^^ug-|?rT_(58(+dQey+oV*Mily{r%}G?$gDj&`k?cT#zr(16_iRn(~f<2`*iq> zEh}HQkYSgaN7593zSYi*GjLV3Mg>sQHMjjw&1Hw3A~N(TiZyUNC1J&WY$AsYl3}d( zMPlsBS(Q*fDgPiQVx?Q3OEhy7dO(aEg(J-iM#hf3FKy!lA@OSaHR7o{@78xC_MGFr zW&gLUH{~NmWi}8cl3ShPM|qWnzkW#lerrzkNL_um3e3TF--o6D^PT>y*L1A;D?!1p zq5szA&e@g-;L4d`vINP0e`k2z#8=ukT{Wh?xmj%{y>rnay5BKfN}Dr16zeWz1`FNKaDb&CUkP};Dg`97v2;~3U0nrzLh)|9V^vQyL1@&1M5#IsslcVov*^E0Cg5wY*CiL<{nOjGfw&-ZDD!Cm7~U) zK3y;ymezxs%nR$>d(`d^(YHZq4K|UiiX$%g zH#tW9<6MyC^98NslfnHM?MV=AEQvs30P+_*;59F7FKxYOm26M(j}}e^n3K_jgka%h z`T^>?JU0clN60@iDii@_Wr#(AqSXSQbh-LF z0p($B;Kaexv+sFG716|aXc-70t9)~F)m^)}P=a7l-VE!sj?b88R<_E>ij)-hyUe{f zTYESZCa2hd0t)dcJvx4f#qw|S=_XJNY?W8u`M?ru%#N0)r7bJHSa<&dNCAuZViexO zS~mM=rLN+mFl#7l{C*G_5_bISqEt0#sR+u{96L?%U%(s5w@co%S#G7el2-MJ)24n# zovrT?A)x)L#d;_>E8q`(MQVw?{xgflBFp8Jwf+5Ri{W|!)L#v{mdE{++Jp1or}lYEb+&dfwnFQ;(T8!~4jge8&qxxQqD5+v50a9?3OoMSsPKtvyk=Y(0seUj`X=z} zGDaJn9)ua|`ke~?BX4O^8R7Jb|NCQhUJAfeeT#bRaprn#>bYh?I&}Ame*DA6EYP*I z|D54mk$bXKUcIey7u1nTL(Q#!&61$IFr$IJwMBe^IBLQq_CyD8VuHsGJ|z5Eu<%*C z?NWO$+8v^uE$-t0?`B3NJall-Gf2Mq?)3#f{r8O6n+TRu&EY4f0TC`874HaYo9~>i zm-&Z+sCD(>8W7IA3pD9TJ1yF<>~cIy7SvW_pdRjG@#nAi;h{2q$ndiPeWcSRET!7B z>1TB|@kSWx>mZ)$F_AE6x(=j1Rr(&l!a2nOT>(K(T-Fn_)+gy#4FP`(8X7U6+6jHX~S}K**~TIN(Z1E zu}3=ETiw_Ox?9EDhA=y0MIjC}pe4AJ5JT9woqPhsU_(tS6->wjKt{NTJi456P<$9q z8dSh$Fj!^Z4;KqdxAeO8tvV@bo?JQ$SGrGg@{VO_F_FBdU8IFWQMlpko$CqIrX0kw4sfM(`>l-> zkSUoAv6J)~O^+>-LXPf*Fz{SMx!_IvAo#p#A;dbic7+D%{=LGrNplM08cj;M#uWjN z!Z%PGJZeT6JJjjdwM$xh*YJz?rw_*nol8~88^|T8OPcmdruzR})Su6dRqaPVN3CL> zrSssVW1<^S)MW6jg?tggM9?f-ktxy_Pfp$fPLEq}_Wy4&XU3j4zN+&Y@LDvMP^(WR z^?qgs4T|gB-;~YV9Vu1IwqYAUMVir=GrDARumL-rrZ{d(O)?VrIc8|$M8n)(Z09T% z-SB2JcCLe0JB}bYDqsfz1xMZx9uFFrl3d$}Z!Cq)#6G#HhFa7^!M<&=Rpm4`nzW4< z!6o+|;{x{uWCTt<45Pki&kGo4neQOM4M*<^r<&pP+dJrOq{AHd1a6zpXf?W`7X8<@ z(Ju^pLMpC(3?D7!3U?lPVfZk7v^6)V3d|o_`E>}zB_2I*ep*3QNStUozy1qeuy^PQ zYpnC;J#sACBuykro<}@xRZkrBy}0G4dl#kpaVA`fd3O%H7-|IT#0)jLCtFO8CkoB1 zTjLP3T`_n@l?8MGOicx2j_bF^T_MM2Z70Pq3@v{eDq`R>cPMzQ4o*?hs5O7@8|kGDNh$!9C+U10^4BZVDD$`Sarr z^K{1F9Im`L%?NHvBP&9N{-Eh+ITc1iZIui!N34$MBKyVrjNsE+dp^Z_Uw^xNcT%3Tr5f^Qc{a&R`4G9CzcP7Gw8_(w;CH@U<%G@rxJ0m%*N8ZMz&TBG$lm zbil0`Jk4^@k+y0tSP~Y4lF%U5Wi269d9cf^WSP@UA(O;D9wLOkPC@C28reXK7#^u_ z-ndy8n{2(5Q%1w*{(F7}DR{)KGts+EjD+c5^P*Q^xy*K>W*e{Q`4>ckC{FMmQawB8Xx#Q$Z z+q}(*G92cvdE@LHF=C>cZBIUH11p+s6)mVF@%tC(BTE4Qk=I5RlfY${$)`6FOJ=T1 z@Z`~-)_d!}E=~z;e%R#eqy(I8`}5!A*{^3+`l$YDA{@24y>aUQScZP41j_5N>M>gA z@lKmbr$`px$1I7uL`EWUqGd7-qE_c8{P5E!pA&e4WMegZN_H}(gOTxz+>5;^qBR^( zbFMZp)C!D#Ruh1+FXec zv%D3SVyparE&!a^VG3!SU)c9{Qf8x`n)&c69y!a^1n>n@;XViK`N2K!e;@z&t|KC6 zt=hezUD3{u4F7gRl@p;_4HzQ=YXZ7AS^TGqx0uHxVzzz_^FL<1Ylh^WBkn4^#JFl? z?rR*1_*JjKcUyTGlT0-w)a@V>W9+?#xl6eSMkTJq{zqSiGOOJH;5 z9j1Jknrm3*(E=z6bIz6L+p4xUx4Cad+i&^MEJ zlWr-iZv3#CO$$sK)S(|hY<7PDY-%e;@}7#PJ{Qm zd6!6F;t^%6elsSiu@OJ?VhnH{cn}E>(PI?M95)VuIM}&P5a()QxQ7X>fp83NmbZW-F z&a*R^YNE{2P!TS@>U^VX_cv%8RK;td*bQtz^>DGzWkq*FkfVaIq>4iw(Q}6QuCUUS zR0Rku1wrOu0TO)hksj9+&;fVrGIg*Gj>q=|whr06NjvhNMp?DfHDB{!wxK}~zi1TI zTY5Z^4}-5yZXvSa<^C0^5tWA_=g}*l#n_x z#c5uND?1b0x`d}1#ESpVIN?;rX)WxQz`Zm5nKX%1^8Nd#rEYuYsUD_ak|%i%U@%A# zdKsRzRFga#8D?;`d86Be8Qc=};f1S<*IdRRH8Bl}3}@>Pd_+m8smv{MJxhpTgK!mJ z2x%HkNMcViJW`5BP5c$bOtLY@x)yho&P{sV!{R}sO15??HACWMvD{rPhJg%VK$&Qg z_PnkgCv=Co&i0jqH*HT>CYuq zB>rPYLqijeC{yL7XBLGS!#TPNCmA(fukVs5lQ6$zNRoZ5Unbz-}K#fm!*+{))2DrfDM0S`WMV**xmE@_{a7X4Hd;~Uu<&3XrIvkHPZ0wygF-Tq%yr`EXx;)vA)d%EV+B@8 zA86MsR+giV`ASOw&T7iPKn zygSsQi$ey)(mWdZSk~53gxs}6jm$At`t@zSoswmfmU(lL!#k#79Z$}}8naH^*T==B zlg<#WTDLH@oMS{X@&ebLxtqg0)Z!}3Vjtck109wR?kui-G3&M}o7D~;i_fYyV@{9h zAe4uY1BE$+n2B5MMB($9l;PZ&sgTxmigPOGj~#oAUY#`p}$3x_H2t zu!=t?aaiu|9?6N%Fx;comV5~t*W&kVg~*s)cNWvs(Zl;fBX`1_ zYE&G-Zy)0@r#+%3KeXfyyIO|ndPRNb=XbK1$XKd7LRJ5Z`g+7ZWl^wu#k0%fxy=VG zd-x2B0-D{r**qO{cJ+G(j`C|+K0a`F-!L<~F0ka$)e;Jurw!$%;Z6>5eQF?AH{KZ; zI2mu2O3Il9AlZ(YshRO0EC@Iu)>YRj-ncLDQ-DfpVpbpx&h8~o-g&Q?jXRgk6e?rf zc6|;OU7=cicqiK-DWZmAc%?Sv(O3!GG+h7E6AKbrMtPdsxDmkJs6dWRdzbWEH97@` zX{rGKXlEc*sou*KL{;~_4j$5ZQ0vYWtB1MI0DHnxb_sDdDb;`?i~uD)Cuf`ID{iaC zbp|r=J@t5ZgiKc5ykf(&x34c+s7cjKbI5Wlc^nEhU<9c4ca=WfU`f^pOZ-P%2cj~E zBt4l!$e?B#&IBLHuylY_gQ}CnjI>a@y99_DMi_+&*QF6=-G<-xEz`u4@KYux;Ks^FB_})iiIT6?66$j0W?;Z8siB%B zXwz)CtEK*^(w|#>MQItdGXr0#st($EHt)G-92rx@+eduZl9Md&c(yX&%xNRszFN-> zay1;y*VZ*#FqI|r&Wul1g6Er6u>U+(OtjPI-FhuJ`rQLg%@B-|f}{l__f#^p8QkRar9L_Ff;+BIaCWXD}&s z zwB;izhl&Iiw*JTdb6jy+7)^RBz#D1zdL*lztVJ2{#Isr>#voC(LDDBLvS;Sb8lOxD z5N=XDSSWMz2!ml6^NynR$N=5|TK*iQJ}6OWJOF=2z?$B#H)DNbRyH@DVJiPu4|+>l z#K7)tbb@X3<=D2bzTd2ct;?R8@&nV;H~ctfYV&x-yz)ZAH*ZO{0^jbB-<}+twJiRq{*G!UW_j%1*WUOdg z6=6e|hG-8d(iLE=q#WFNmz_RcfnGjrBK)Q66srr+E)cR*lEnjG>VlXSaWKyRSBc4S zXzLih?8L)=`f(O636B$PjZP-bI{12PoEy*{xEw=GcOU_WZhk6J^0*UDyd)}r6dKUx z6sL91m?X0Nl=zTW|B2-KlL_wVHNte2?S{)kL#KI#qk{}!QqdY znF&Q$4(O)aa7~3@nD8=^6auLh8N}eUEadTO;M9b%^nqwG_$JrPp(%8oWNCbT2u9_i zm`)`4FB$1B{h~Vx&h<6c^l8`z?2sXvRNNZtz*CxZEU^&2K&qs}B}ccBBtCL|AAnON zv;>Jmc_ZPL%@xF7CT`9&K@}VOt8G_xPb?gZVzw;ne7feHC3VHaytP5@VDY@0oJY0t zUqqNEmr+rZE>a@Ea^KVmgxh7!DDH3?#+>>(k{ch#h*~>GZx$!B*&`zreL&y*vzA27 z%-wn=`wm^mw+L<2X2{_b=tL#VtF8#&q}<7@CISg@e354RY|}5+Bbtg-t$t>S;(~V` z=hf&kJWu`H*KQ_@7FlL-L{oM8_WC2|2u4yN43->uH8DhoEDd29I0WI@_nJ&j8+SEq zPJrp3$#)?}*g|#~b2pt;tuO_))rOT{81F!OPpN5!(ZH&P6Z62@fz$V_Z2moTX{pyd zoY~0O|9&1G-xwBp(F1RPUG%@-IDcg+#Rg%Z-*}5IVcOTf-eL^vZ}oR4*V##?0c>D? zdqPWawCUK`#>}_|-a9EpE7pWEgviJw3m2gF$-BH@weTW@P)t zv>^W(wHIz1k8(hL6HyG$mxBGnagSFcs)MM#Ld)ryMZNJW!=?mVq0YB%qfOjN-TwBW zeg4dT$wla;cBWU<#OgBYI`1sRfA~8j;Fvg~Sol%Fw!+7ms3NQ2@+N(x=)icv)2M(W zBUI?HSD5-}KZOtwor; zOUnhl{6Jl0!^spwRKAr%+Z9qOlF%Teh!lUrH0Qk0P+&pL23Gvk$?eA}8kCBqHeno^ zd&fP?hN>#9%SZM4ofox+b>Qa~-yS z1{-M@RbBxB@FpFXuD2l_=MHyS>ZCF9dPMYA#`=7J#vXpGrMvdF*GYV7LGFp<;YZq} zlWKQiGkmoSf4N6#ivtlaSK5kIQ4<}_`N0CA8!f#>nBj~ULLGtv%6QV<{GO9}17AcZ z158<*o02W#T?a=&8yl|^fBKEhZ5$qoD26)squQ3}P)0Mncp1{=Styrt5Qc7Dk z@4ECq@l*Mg9bo(Jnnz%!(TDq9+9_;7<@ELh-~<# zh!zD6%K^%bkHTZ%W!xZh?E0d?2xYC)Uz%QV2pd?yc@T3?ow;RIQV^&lS}ECx->arV zK$8~jJE*h5FpE}Nj`ak3Hj{2$7d$pD2`{dBG-oA{?QZeh^L}7qKsbM zYZS63-9Ro|nVluF`UI}29(?fH^X&Sv^DH zMs(9;Mr2n8^lHw+_?WB)EA6S$bY*})N4iq4iU}n~7vt|!d2p9ZMYCCxOtf~w>;)*i z7;XXrA^h79GUbTHLdy)^pB3ItPcsjacPMlm^ zQbSNj?LQ*YCJAOTwij#Dl%YC`NPf+;aopskk=Y=OdG*0kVPe_Pc z>jb%A+6u+#+)`CiPfbgr#j!?cPRM(0d9-0F2JcalZ4e#k-_y0eX`678??stYt3o~ci}ha1gp5N8Z|~|1qj;3?%N3CY&8`uhtKu@E5^Y>ld-I$g;+&ebB}?jhmpX3U|G$)AdaN$c1= zyvj@LN}`h*NPvE-4Ju{P4*-E<^I&#&lR0brBZ7-&!!%i_(t@{XvFc=lbNMD2CmwxS z;I~VD5UsH?YkT~PxK>I0)l?`KN^(~r)WqAp&J&aV6DC}Wq~~@i6nO0!)f93PxuY%u zVO*j46XB_4Q14&J#YfEi;v6YUS_G}FllPPV0n7J`#DN_QJ1@3xkb=YJ&!W?vk9VKV zBX8`F0z!um$4j4X+oyj_qQVv%uUsY%kC6U4FKhEJ&+VLXsVo9=Hs7}CI@S|53z(l6sECQp=eC{#(`SU=i?o=yl;P7qqNZSax!KfC@qhAWE zk&xC+TdR{D4Ls~ae-6{#Oa&}yZ3sKlh)b6dHsS2~GJ0`gZ2R)n7Jj__z#4jq-i=F+u{e`|782hL^|?=!oN5=x0%^|_ z5k~uQi=EigJ9p}BwhD?2F9`}*vhj3&ZZ2ynfP??;sU4UERmbVKj{CvDrGx4ud7J`T zEWt7BgHN5Y1OtRen3T{)F2%cevaidoad4273*?D%Zf0Rk4b^rzEQGgE(2OL5sznIU z2uKIvf$7j?Wpkyi;U=Y!+ zLr5`BK%vIp05Mj^66)`lUGS10PpU$-{{k+v_a8LqHn4PJMnTTB_~+r zp3Tw8Xp)jMn0yBZe|`u+fXOYaTAspoNDO4)kCz5l6D*%f)QBT5!Ew$>&^g$MAGjqB zutrV_sO6yI^K^@(+NT(HJh<0Vl}t|giyTeXo{xn^xZT?pG7yxn$C%XGm%HzRh-SB$ z*QW~uN5s9p(%9RZ;mu7JJ^po^m6HWA@O-k3r)*#ofsEtGB7OnJOh5${g@rj4gZe<2 zTcO1FLF&e}0jRwwTdiIE>-V|#Dw1(Ky=y{97dC0YL}&bOD#I+sVCXRf3VGo3ZzX9| zip1dij@o4h4<-$c*H}U)E};!3^Lo`*OnAUreo>i(Q%9&#<8V)VP*b8w? zI>YiK4(IAJWS|i*CY>y7s*D@voY;`-=f2aIF9YXzS>s`r0iJiQc+rEElh@fbS=&8s z8`8w$ez8c|IUyGV@G9cw4D)M>&`wcobJ7YN01gJe%2%&V(;%*WlZ7RG&$r@sKmnaQ z{$Np6>@Z9XU#On;s-3{)lyzKEsFl`40bPP-Deg^j$~qOElu6^nF2+Y@r2#=@5;VZt}Vkd3`EYC2%=^=I^Tg*9Kg) zRug?6ky`J5uU11c^0DE+ni)l_Itt7`_C6L{_mo@$++y$kOVM~DVyb^VWZ$?1&9o^h zfso(P;n&wW@I~NA3tl|B-q1A7ufUH9VQC@zb2M`Ou6llq!)P>MV{z%1%=~eN`W9=mc+x141nG`3?2>}l81tHU!d+T>WyFs zjlE?v4EzQdwmF~oKCEG>$%FE*$KB3l=DXA!kroz80)RJeyasW2lM_Lfd3C&uT!cs& zl;Klu2K_-c4r!}~xZsSfzjTz>>=HcX&q8LcsoS5S2SzagCPc0q1S!X|@OR2LUZnjd zC60JE52q9@>kLp0O@-bBXq6ldNlq@VE*De-X^-I}IX6#M;ro6ws{|slgs-<%xT`UI z(>%z0ayIkH3RL9`3g-}#BwHu@L8=5Pf;)ktH(UyEanT2vbzm`mX^W_%JL|=RnOc0F zWYD@K1v1nvjeI#e5wiy|4-2m;;*N(?95psM7|e_Ww{GoL@u_gvnlAcI^b|umS~)|I z1$-pyk}I?foX2KB$6Whzgs&E7H(^%YlO6)pxY}pzJlmU#`0dE%bmvOjLf`Z^YU0Ph zZH6RfL5rV6Q-qPI7AelQ_@frm@nK5Rg@}coI1$U)m@n%v15+rsmxL3JN5uAeuuW75 zB0YbSD3aNs1*1`GAZNO3YX=4|HNSM#dxL4sF1{9W3KMYqiK7`*=lt)vnIlV?OcB5U zNA@_+RloV$Z3``~r5GXdRUjo{Y_&qGQaHtZw?#;Bq^tX*Nygsim@TfNXo|Wn4E^qI z>`5`-x(__V{PTtcqEodBeQX9Ztgh3qbL`a$^S*VpqF>q_FUvNV;UA$#-c$*9HL@j0M3F8`(uB=$X|`cAs3x^ znyxP9T4c`dI|cTy20F6^7Zh%E!4iv@NeGa*hPYqk3R!bzx9>0Jddf0U4_Ml9|G@UI z*GVs19=&z{{Egzb8#d?rjpsE_l^e0PC> zE}0@rMj0raupiL;6l~= z_T)ETbt5h-eWj3wjk}08C8$k!n+@=CfLT-=4uvOvf=nybR+VHxl*$ zWXnF(`-Nj0KQV#1y1No~Yc?j7-&l>++kpGa)exLttl$Kmw@b)WJkYg`83iK_k5oJ!BKRN&UeIL*{Xq|HEW z#3P4AX&C%67nzvLiOk#R@Q2n4q#*Gtt{l@Hu@?B)s^40YJC!ph-tFJ3(8tEr{ofR2 z+SwIh37w5!E^@JfMJ$y4v?Z?7;!sgRV-ZNkdCt@9C!JOG8-E2wlEsrMx_FJgClELk zTUG?R=?!=DFF1po3et?&3n@9zt*VEqi!$%Vp4Bi!@9oCKJ&%qCtp=GHpq4 zwNGyUmQ8iabG$cj3gQ46jb2_69J)mXf7wdb$sJJoF1M zqF}CSe3ezjYv?-ro?K;(9m*ps*GKa%^d`Yo0Lor9ga%Z_T*2pjMaktJn4n=a-*ZH5 zh82Na3xU+?wXUii2CZ$cFJCAKiD{UPZH}~i=Q-D0HJ)>sZyOR^4>L764rv9dlNqLQ z?K2b9@m^>W2rbD;;MdEDIuplKjb5Iop|!@Hp;b0Zc%nSIVvX-qlTrG(tp)!p}48RLGB`HUM@XXVN3_K0$< zi*wWOxOQ%=g=Nt`_8{P_6{_*r^MoqoA7(MsW__BU6vLXDDn6*h6Z=DY_D@)DkkIGMV?p=Mnejs`9t;4cM28 z0381?*x}d{QiD5hZhk$0IY-^spZ!+TnC7%$4UL0aGL2mf^c(>FP*uS}C15JPY3QaV zk=#*eCYC!w`1oGgJ(eOOPbLp&YLbE8zHlx4YHEEjYA*4Vc<4DcAG-zaC6r7O?Mer2 z(`N2bcnD_04)hPNZ)Z*)1}=tf|IeM$E#Qo>?Zl}?#U!A6$mSo=tM54f6>0}8U9&_s zC>oFcIK$no?KSwgWe+=}Q+f`MLpLr%n9!K@1MNenWj&-4l5IQsE-VN>1!N?jow+qRlj$ z#`_Z@$R@IArxPZ$hyk&qoF_u5^wTPVmcSk5ogPXSW)$8io{XSNZF;AnGkU%$_Bghz zA7^QMx>AK@l3zdl)=nGf%wlf|6S8J`t`MgaiOPbIbelY)Fi+UIiQqO8oPf1PkT-Wa z^iw8*w}a#tXIbFQ6r}cupkWeRxHh<@;J|C-F+ufjV?I)NxF90g6`sJcVmV@wV4g}E z$siEXtn3L#kvu9Gca8$)aq9*wKN4jpiUM?t)DNgB;HdnM_HvH~P&Y1sLEB?^vqiRk z3S?6otWfYw?zW~w-@y)4b~Gr~`giEd_eWxFV!+$}Ph)>BgWBgnkc~|jAPWO_80fEY zrdvFl5r**md4AkhJU-ti2+J%MBBYFXxH&#!f6+7ac>Yi)MYOn9$qhdLC@jG);Qe); zv2%UjlTkEt5)3;Wk8a7xM z5KjoNYNe2_3ln-KR)ph!cHj9UHrKnz zUT47-B^#JVCo>pJ+Hsg~!FHUD%~sJAFg~~&rYe*V zgb`Ds2Hk)=_51;*F0C70ei^J!vziFK%X`O{fGHjT*nfLPs!DX z{-dO4AaeNBLc6tFQiEo`L%f0`1w72_TgadGrKobulF1h#G*df>No!(Rj*77aJQGw7 z@l8mf>VAoo|`Bq*9@2`jgq`*HX8tGP_UBJ)3`Pml#~1YNp{KOG8LQgnalbrJCMnoFT6DoIsec_2m2>Rzk1V_= zj4ttuk(!<@b=PYoAkl3BuvR&gmk0{Ihg7(*3QxC!E~GC7e7Jo5E>6P_?;7$o9v+*d z%`~ZK8Eh;T`iaA|Ibk0Wy>%GOUIb0cmo#s?e*S(MOie<-{(ZK~%<6&+gDHeqfs4k0 zo;2EoRQ_wU?T(EEIc3pieKruD5GlSJeAvoIqQAj+>KN=BH|vF7VRqgj?x+rKo}r($ zOFiqkgv+yk3E<-U2GO?0u{2c@?OouhV~V_nt*{$icBaltbU%6h<7(BdTkVL_mUHcF zm5ra_2(8auxq!%jwR7^$7H8v_LO%IJttwxAdM;~+UAvE_5V-|HvTd!!qqy63^K1!2 zK2Se4^R~p0I7$e@!@;@$>jeM=?sej8`=-86yTFYH4s~og-OiT2mjy`c`hZ(7kOZ(H zsT$r6z>$pvmI~L^Ro&Vz4sOk*G;QastgH!`Y_w<2*?vJ%muQ>kZuB=AybS3i{rA?I z)Y9^^@Fz8-hQ%n*HrJ$vSo;*olKpBGl;nI#_uonBNf=Y8m$^B!jC;4eXDsdP3_!aT zm!^Xw7hiG`V%bH=QFjMdy}0u{!?s8*Mv=5URUp23N(p-EMQ5c;&c5su^f>@Htyjr` zzdt(n;qzdbhaa2u1Vg&|k2CJpinI**34PZU#Fea;YE;d1O;oEGET0c@D9fB-CbL{q zzYqhkMMtN^D^vr}3y-~mXQYAn2JOHd_hZjOr8sJyeARvt(zLAU4~dbn}kk?t455~e@`>b@WLwjqz&bSv+=5; zY4kpFq;bmzC~_)awdEkl#(;B^cP^a@!(N@TLdOlZ7%bja0L`*ZA!Z^_ET$mh? zP_Rps9`ViFXq}uvFWz?hgkBl0P)lYS0{qq9)0T+xuD%w(6y?Sa9IX&y}2?+2nYxkh%i9~d^ z3Gr2v!#(C4yqg*QpkH;51*3=vvI>anacnr;rpxHZE(1HYy_dDZ0BKD-#oQX;>BLy{ zd8_{9&iagxw1yerCG~xE^wV-MnWcRL^=)rtiPSO~%wuD&yUE7hp-Frx{@WAhqfl4V z#M@`INqUYMR3&C@hk8H{Bff`TfpksoM0?hT6H;x8nJ1xvB|wi#@jV-)5#cvFw`|bRJiL_j z3s&boxZ)E#O&3zecT^#TZV(NBV7*;Hg?@m(U5rq10bRN6+_>$Ume&Py`E+(J2Pvuz z^!9FF(JC4^DtN5N&`K%3LfYDy7@3}RPdI+rUE1>#z2JBdDCOBMWh&G;R+%HWI2Qlw z(brTDpc8C%i4Tc~;bbyJj_l(fnxQ(FFttu&zB!T|QWIx2lb&gUQiA%IM6AVtksznN zdZ^lpnz&y-NQem{FTzPmUb(k<*sVoz(@9c$TSSEkR<@dYUyG3NqD{lF6mL7@6*-c| z#;YOKp_rz(RBeG@wv#|KVf_#Uc2-kE>Mi-LY;(vchMjXTK?%cfe6&F8_x-I8*VkyIY?kYUR%DH9isFpIR{{TYE zxCO*V`Pczm7!8<81~R5-88Xe2>VNW8OvNXo(9ST?9gEQba$fqg%}A0;Md5e94fs_wY?XSz zwY29z*IQVUBinTbZ!cd-7S@r<2|e%o;&(V@(C^y<-UN+AjcM?Uq#sAQmlt=U+H^=6 zWBKje1PPlnfGnF7R`{G-QdlS_p$f^x)akjd=C6lAfFFZUVwH)iM{f^f0?r|o@9N1H z14x8sfIVsh`d9K2B`ONUBI+m#hN>wD8@a(vDM@2K3z#NkT>xxJP1zWPparknDR&KB zOYsl}^d<27GljRAz>-vJuI@zOK*FiG3Q~fEfi}#hQ?eL+A0I@q(;cP*(jtq{_D-TA zV`990ist+20A$zJcq{SYL<>)R=g7$1kI=FZaD0C~XKj*2D#!O|$gwA3n)@uwd^#Ro zWKBf(WU~4z?1CMlGG~PRm^x9k@T=5qbhC1HZf_BmG*Y;`HJ#MO7f=j0ja*!ZX#apm zA8W&1L-wJdfl8jm33W@wkWgn%igScd$JUkVzN!KTf!7MXrgya(VB_?0r478L3z5sM zs@BLVWth=WZ@(cqS4hix7shRxBJ&wP6!}_;jYzT)<7ri-UH<59Pp$!9U`Evf`!d>wEgxkLq^cv+7K}82KbTkbX?VWEYmXM*YX+^lU%LZxc~pCQj-^xLuytrinZv?hF^4RTmP7>Yd)lAc#Oa8LsaVFs`B;_bP>#}q zF&rm_cXQ>_?@a3Kv`Y58>h5?E7qRNYC^zF+C0#v5;dB5M`$r=Qa%D7i1I7e`YkLRh zkX+rwVkv&R!r@WrlS!3Lmru)`zC8yP3^5w8q@klDOTW57OSZ4U3c3DDPXi-wm<@BU zpWnc*YoUpLxBT=wqq!1E1pYvUQLA4=Qwxp@wf5bofTo~jk$#kIL=yS;6zbfs1bD?|!H`KPzx zt%mqB+~Ay7Wx+-it%hG5M~>17Ix3Xp2ROdIS;(fU7kCIVF0T(O#iEX#8CS} z6JPmsr@KfvzncS`Cy%N9e#dW-+}62y2$yK%{?HlyCN?k%M#%P(SJ&;AkkP; zF~Onrzjp&3hwmGFQ%fctL}bW_y%7+ZQ^+ce%9SI!qJFGlN2j+86yDN8vzCVoi zXW5&QvdI3~y+^~mPHg6f?H4v7HE^#)Afao&lTq$DH@Z)!)F|D}+>r1~RSD=FL05s$ zd=wWn3;qnN%=HTT!1{2AwBje_Rx!vOS*9LR{%#CyS`zjo{muwp8JLLkwJt<2+bDHP zw+WGk$yU`uv6-r`4;L`2YZ&bv7L67bOUv$>AFY|xShm$)!iu35&!FTkY6}?c6*an0 z;1&va)Sz~(fpAse#E~)6FPCtTA?k{nmqE{%xzk2rvtpMFs>>X3+fpR0VYDKo)2rh$ z&kyP0_`FooFVV>|dlVTV7q!-Eu(&)&jcgm3f>w)YfarYEbV^+!iV@EaV&-$tb2mO< zvkpIy==%EFkC2ArmCWvv$lkMi@b!bR+Q81<#XGzK8||^vS|=Mk(W#ew2%~NbdD+fQ zs4}f5tQi?WUcp*GoE9Nx9teixn~9L^t7nh!J6fqhks!whGrZ?IUq2jfmNm{FW8Nf( zD*lta`Nv&E-7~b>)qb&`D3Lh$bk*7h#VlQ_3#N`OWYk^{`K*JRsBfm?Uqow-hKZP# zml@u)ZLJ^J^Kg?T{nCs4nV?1_?Vtm<4~clk6#rIH(`xaT#ZTWi{(_A2a&;%=U#Fd^ z05#IvK`0ra;54_0ST-(E1cClI>A;WKe<<(kNX4plK z^~SVR{u|Y|Cz-*DtRj8YX00l%iN2@_wu$5F_D#@*8j*t%R5~qUCFB&L`#M3Q;A_8d z{FOLkxr5KCya`i=7(GR`f|{8`n*VIqy=j@hUS#*u<^{yxzINr+?_;TDONl33Al6eB zOJQJVS`9cw`_xGHo*wudstK}qsIl?>*yGotC*l`DMtXb1MpZkLtjL74XxPiz;`6Ci zqpiZeHU7^sGWpfB(g4EW$gF4Y7J9w;n>0K6W$`eW7y7je^&L=DwX!|%3_w32NM08^FhX3N;5(P)1T;so=3yltURYy zA_ZB+e?xwF{f%CnV6&aGO3JcP=q6AJHbhLwQmEXIG+t7J>TN z%;@a!;)l_ueq|w^@|Y^fkx^I)Uw?_|T){xROAG5VB4{v5+&pfz-&|}!0h_0yxgNQJ z=Rep#!;3pCpjT5dGf|g*!BIyk>hNW$cfvTOc%B5&Is`~M_j~@~5NGwA%u`4alM6a2 z?ks%cTCW|5(t^B~gRbTJ+r99|t56o$NA54Dcp51dEt&~ts(-2-EIUBDAeG|lz z93ILE7Auf8%6sjomZJJ62TsGWSSI2_IWQX4{% z!;xn)l&t#S}PTw6o(xguCr)oo<=}%Qn z{gu=DN^+_dN+`uhQhLKwN&@Nz)Ar%VPprM0o&nV}vV^sV7r2BwYZ}qF;SqA0sjA#z zV~{p35U;N6d_u0ZPaaq|SK|t?WaGCdo%;-d@)Vb@0#P!P?5&2J$(gZ2Se;Zc*aMSg85r-SSBcTAJSHw9EsjA^ z2Ob%y>AP3cIjb@zO{ypLQoKxI&8@X zE_FS$XV)f69FF|c04?B`hijhU|BXh^Xy1nLTl7+v3Q`;(zpNl?>^?wi&-ra~r_69&6tkbUkE5^bE^|(s{-U9x|@v+oi|~?G3#i zQl%$e4G%#C+WRxte+LgohrpS=#(7+ty3Y@hU!c>}>~hcz*8m)t0M-Ws1#TH{`-dp= z0F+z#;GUbv_4uvRPe0LC(KJi+&D$F>+wOcisjA={M_RGFQQhO)^D?WeQaLvEB{CBJ zn-B)Yq_3NQX5IK#LW8eS&2T-NapnhtY;BRE!EJyWs+@|+-%CX<#3Q%q2lH>f9w&R) zOADGMV#ct(A7H<8%Ep2VazomuMzb%`)vSwqNe}|~8g}8c&!KJ=BBu9bd92$OGF7J2 zY}qFLs8{d%W4gYYf~#*{OD*9@TDEGjRyYi!sbxA7q%wcRDYPb`vETO37R-snQT2>J=dgWyMxT`xuA(mY;x%DaYG&z!**}=%6K|*Ihr8;K8qf zYwl3bWaFf(aZ=GZz@;C^DgT+N9WjmPe76t53(Uspn|Bx_x@`py;rIt#XtX@oiwDpE8$6(`q;`$+^s>9@OP{FDoA{qAq1S(P^@UJr%-x zoY%z(*XZUz2`aWwWJarrr2a?vIqZ+MUJS$LsH_~qAujXje;L1WK0QQq5FybAgV-vP z{LV{?%BnF=eF|fb`jPs%OZ(N4$mLt<8d4(4=2{@uZl@~G-d^N?X>xsE%$~w+tl%nc(`{qF3u?(DnaW-gT_@53QWaG2p; zl96VL*h{69AMwZt{)f?^r=r*=YcMVc=dZzvUQA<%LvTqfzJ3+WO%!c)q4eZE$6>h& zXREPy-ZA(Z72V+f3-(`9RCvfY9hjj|R;@%PT@^4A5h;aBE%)0iHKf}1io48Xc$OnX5w`NIkb`7z)r;JNf0BzsTW#II-UcC zjjhpQaCK9=kx$7-^ZSIdkC40~MChfhdZv*l22^a=DgAB~YeZ7ogzeT#S_^^JXqFsy zldQEN%l1OlU)Tg-rPx%KY~JHd%HqT*GFhqg_s{R@UdBmLc6YcB3BZ10^QZgvaLAu3# zVqej7C@omkh&Z@Id{&!fSGB+Y53~Q}Okod><&?}Y%|86b@C)8?18ew^m;es?L<)^{ zjk=l_th3xTSx0QE@WRgy*RYuMQqVrK_}dG9QeA{n4S%PIOsrGCm;!pZKLTyd)o$QPIpFY*e+`6BO!)hOB3*0}D|&l}K33?kBkKh1fs))B!^Nm# zDUxsuyXQoC&a7nlw{v@*(@Yy}L$6s=lkZ@3xE)-Dz;=-t<{v5EFe@FMgXRHPnfc|i zP#A<&p;_cDi(%Ekko2sxlfU_T%w8UJ?_chOT)1)Hdqo&`i^ZH_x;o<7U&{U2 zOGuIu%oAyiJxnYA7OIn$;YG~IEYVL&^kTZIz^byf$KP@U=(~X%qjG?g{o{)ny*0`0bF!Og3$5%E$f8z~NZWLM#@{$DLXdwH5A zPq${Cp4+iLST!N)lEv|)RQOD-UZV5;@O*gA$0z~#M(_Etsy`}lEG;-56_mFbcl%Kg zTI{D2uk~L|c%lu+>8_3;NHctb-}l|07$4@57aI&jmzTAj-NPG3G1I@&1~lDl*!4p9 zLUj;^@lT+BkpuR($t6NXdZq**E8i+B!+sFr6GqCWcr1CTn$u5J1?)2yA^V;*h@~Ti z&dGcKMw%i|x~LNl=YZ9rVivfLRA>_yS96v895LN?Y9!?A#!Apr;yn+g+3qpg-~+lz zy7>M&@zE^xHj4esIK zEBVjpVhC%g&dS~Tl1Ae2tMGmNoJym*@JJcWPc{E&NV*D?SHt#iVAOQESpX;aXa_Jm z2&v$eu#CjE%mCWi%Y<+FReK(c059^_^uh zO&;H4@u&!5y`ciP2F@8x3_Xz)nXdAjf^}aJ$qur#4I#C=_eO0B)998)F^FE@XU2VS zI;-^H-?+|xZa7tV(e9Y4U3ZB#x#+{9@oExtPmN-_#>lfJ2bo5UwoTKl*uKaYM>_6m(H{*}jv((c zkiLH*CC_^GS-8mH>!FD0On}@^`^c6x6LJv~Wq3re3>>wJeFN1k8^}fUI2J2L;eVR? z^+U|~q}0=Pwjb9tsjHZv2w+2F31X+GRjrulEKvls>uq>8f+1vhaOs4UXTD>M(ASpmdkKS7CTDn^S*Qjog5x&{VI(%zTW?<@Ir#Q%WMfv z3;Ib0s8sIj=R@3cfx?5N5bIdYZ#P9WUlzIX?FEJrdvFFcJaAMnW zGf(NKB6J3k)i)HC>G@pV+o>J)PZV_t_V*JH`Zp`gsz!-eVTKejwf5WQTZ5MP2Q`P= zE@g@hZ$!|@Wpn5J9PP-0u93a_cvVy)ge{+iULhlrp$&88(*+zpQO& z<#%xQAvv613YV zGAdym=teq4oSQp-AUG@giQt3z0$+1`a}DY5;c zx0MH?$D(I$2;Ay|+_T6T(KJ=SvAOGrJvJAl(m4!zLq3(g>ej15or9KCvziIz4OAl( zTpx{FAq|(Sd3k+vXXgXab&VnL5d%j4+sP~N0+IM?8;ay|=M1k@+qohc;09NX2R?j8 zsmbL56<-hsX+|WTZZ1L)6wni9eItG(TXU5Vz;L925+;AdD^@RA@v>El2M;$Ft%5AL zUM_x3RIp?kB6SNHg%HRe!w}#=agt%?~T1gDzCYuG7@n&lwn;1f8dRfa^He9Mi6d|7UDDg7&02}vUdw@5(SX-3> z4!WsAPFpo8rRK+eG#W|vlE?Pe(LZEwfg@I$Y$p%)oRJZtEbz)rWzXTUvlcW1Q7o@L z@*kViHiN(H^^|X)8@a-&N}IiZW?;FmXRec)1uDM@rMrEfdFQCW$PuF;3Gs6e$dF`1 z%1-n|RMKkYV!m)u$-y6(d8*!oKLz<7=nUKn=)G=o*j?ppvziuEh5Lw%;Ke>y=B)YOqeH>-iHR8exov3;8;XHY z*FY$hZ*S(_bN6o>!q^2^|LU$XmPY|};#BqrW0b=JN7FSH7Q;{sOwt!9X91a9px{>H zn~R|JsPAVP#onH-2WpkdD#;eP-^l09nE45=#;e?kD6%Or2Z6(J%yB;iZxM?bG2OqE zN~J~>E=bTLBoFuuSDIx$LFH@`NJ^b%Z_$n4m5UacdctsU^$n!4OZcOCSVEa5+Z-S% z{eFJQ!5m9tzek&4w8ho(0 zArr&F$sM1VH)OZx-Mguq18jrcolhc9gPq-XwBbF_vj(TiVm5YEOVR@gRfPD4Sz$~v z0@z7WSjV{sxjQcNjZw19JD*+W@T<#`d{^rzl<@dPg4^uOOK*P7QZZON&@ zE*88ClfO2F;;ugKAPs|h-d$rCaKs1J?zHu#e>0|xy2Bo)d<>pwhpdvUc3DZ_FFZzQ zX)&3)36_*b2|Fk>|5T6`L{?#4VUeI<_ae;*)+`UGqDl)C%-&2fzL}Z?9LBse6&|JP z9p%2kqkj?EIj2SoapP}KLa}3`9$TwTWl_JL&#CN|b;twckf#mwrn-Ff!~Xn-G~v7H zmZ&rc3ZDNL`3}o5q@Yw15n0kN`t{1|-8K94oT2@a>6(vkD&t!lvqtoJi1a!BWdDSW zgDi2mSYwz~r_|YTwDGWbXz(fb_NG9 z%_s>*3CnD|%CTB4;jNoT1Wgd&yr3@MCtftiU7LmBO9)ARWD@h5IhtOGL;bfw=yb7> zt+~mDytWeI{x9+Ol~7M=>Tu6z4zE4#@dFzBWGf6BjP25@)||jNC5&SEU(b0E5;I9J zq;r=;d65-3+x{9rJb$oXC$+2cg3u!sx@Qa*98ir#FWaM*)|m2lz6{8V$^+0&bQi_= zlt1MzK{v)SNbJ;D!TV_CQ8cjZ&p>R?4&iGaHXS<-wMCO-8g*S6-o=Z&Dg`wo8GFxr zE2i1C+M47sXB_%|hCpTgCkyO$%cpcPk8Z`(Hn&VL4CevDgp*)%`ko~o4y53$D#ZaOomQfUmPx~>5}Ewn>-Gd z(zII&R~rF%jms&w#<6^z0dc)0ll#s=KS^++Od0ugT2Q7 zp_^14WA;8>7lm&I_g$f#sJhwPVkgFvV9f=a`fw)?8(B8jC%afTymUH8cG5sGxy#LWT07O5GVG#I*_1_tydrx_i z1`Ko*P$_g{^l6D>Rz!RFivt5jOd)S;#Xc4gPurn`Hb9S|MaQCGf`1zx=Gro0-IZLT z(rb`l1UnX-C(Cl{Jr*@F8*sy}vMYG?`Xa#K@z8TV`!iUVlFcflm6ue>-4xDeT7+y zB+Gm~jrJ@zEVU2r0;iB4fBm`|{!K-G$mJA(fOF&o@oyUNMnO5IY#Pl~TooeT2!dja zK@2>WXIOV)4EEQjQN$npI#gSePoS-WHrEDj01J{>?%sHVd^y-K@3=9D(OGuP+bBM72QLW43Y) zIJ&DmkGJ9aTjFGP-54Kjb}^Fd6%d}&WvgAOfeJn5n9H;64eHxU%Ix;Zq=mUcyWrF$ ziwZmYm=9-kt)L2JO2Hl_fHVhs^{abm$6d5tdyngvV1H!FQcGLc(CIk>hfAMEQot@N zwAF@+5oac`gNKj5+8QYV)xF)+8|jLenzMJicx8>3 zy6!)!+?A+7V@S*`1xhaSDvP(m9LRt*-l#SWl9e#E{8(iU!nlM&ZuE=_yjO-vw{1&P zN_MblSzUfZHVSHx(T-PgSe~J@w%u~PBT3~@EF+llAd3bdx@si__dvy zx*VEWOWb@m4i0sFua$!apYIfQc9$qk(o!K|uIWO&MNL&N&XgDNPt515k-7Wa#)Y2B zE4jH($+NUG^zUJn+0<9NM(|!f;9uV}_a}Txre%@XUJs>lcRdxQqcpcCeZ(GybqMe-kq=w&t0Vc#+gblFFnI(6E+Dksg(4YZuhCr*QnlgxCwpLa_) zGgA$-q!p7uH@TVvFVbS$siaDRIy8D%$j#;nN5*dn-E+N2ac#fj+ywk!>xw=k z-;gM)WHMwV9}mCa;o2`BsXUL7MMa|2qu}^<>0gCj(Gpf)2_%mF8Q{MScOMtIh|J8&i)1F~t?s~s==EM(d^9Ss5`&1%C zS&V}$;og2eBP-SMhZ}#@@#X*a4ZZ9aSU>E^YsOfY>zxA2-*NCYdnGMqQeen6x$!RP z(Q(#t*wF=DE`E=s^+&vQ&$$kc?I{{@MUz{w4+%!T<=aowYhV}+Qi}@jLCFEuMUtZR zJ90iRD^pRKZzHodIUA>Kn*$mZbv3(-z=RGx_A!hc21}W#57}DimkWdVR6?1+t%0^h z5FtN4f`p>s#uQ6#$vt2*%W`?JByUjcpswToAeDjg`&a9QZWnoSYfryW`*$bJeR`iR zmsFq8Ql@ek*m%ojS+KN~6fZZZDgT(HcUCo(P=`) zFIH-sFv2Af)4hci#^eYsYLR3=SnHvU1YxB{Z_XYukiZGK3Q7!+ zM6(2TxbpiTmH9+G z5RXzBc(fnbX&IjA*C=g=%Xxog(EkSBVb@gGwYB*D;;maPqnj{9af#Vev`E*X<0f?i zcB$5fC{YjX!qRA7{t55k|M2){Gb1o=%I}?(JG0nvsfNNP9Gw9iwDeKao5=kv_YC*# z;2%Etj@Fk~iqGq-?%~i-Pa>zypNWm!A)VCCVAHa6^Lpdu(cE{#qf`VcF>mijQh9Mc z#or8O(&FJ3Q!n&CE%|o$#|wrMBk-F{#UNYkme!*Nh6icI+|wL*L!fUTNNXQhp+-|q z9~%}|*M}Q6y4@WjYYU}#6N5JxuYxCk<1+KhS(6ldN*`U6{0l4@9_M9~G;9{V1h@ zzAQD^AC)b(bN$Tx3OkVuy;@B&J6G#xQ%$WgUCaGyxNx(}@$IY2rsF^?3}(u&gp@bcN+Ivew~c6$XinSu{L%r%8|&2d_23Jt7`@$GYb8b#_5#u*Z9pMGrp5 zFSyjQevoqtSg?M=vKC(Yvy+K}QY$G|eh~#2*e!f zwV=b7N;Ev`?1f}7U(Rv4e;hKjJ-~HcY^TGWQjwEGtX**x(^KDyFV|T?zH|4fdYCUayvY zTFlgbda4_76R7wJ=#5j#9;3|6x|Eg=vs*fJC95b;4vQ+2L*^~nq_BYmKmpc>=)YJ! zK*6s(YG6vsldB$J+zBna?}@x&xbM-MFd92Ox6f{OAHG!^F2PGz`gPg?^aZEsFtk4e zbu)tow)fI*yM|T-$chN*%CJx+e(+Kg2UHHOxma~-!OGraqJ5r2iRAwO}S zH(8!RW&Kk07>oM>mX{=em_KygK?Pz2lS(}tVEcF^Zo*Uy}(es zA^g-Mt!7r%m-vm{i8@?45RjPGd)X6m15Ol4x?RQ{~@;9GDeo@`YeQ%cNDn5|~hlCD@2bvrE{*$rPcc=y-Tkp924gT9fk zVkkK3Wp|fUluTnwe1BbEX`&0>!9QDb)=XO9)6VTUJsS-+bKhQ}9LRb%9B_C>&Mg^%rJz%|_8WgWezsRnQ=Rp)s3pr!OP;A0E zDB+V-rjz++vJQ=doaOQZb|oXizZY`z9r_}W?1r3fo`>j8r%@EHlCY|byg|5zY*e0C zm;)m9bQSvKAO^zq(){gG6p9Ll z_6gYBj zqA}H;`4qOAKbdi6m=WHxh;=3uB_l!*`83T0K}t-usu_Q)JTPC-e?c=@N>Yn3Ucup5 zqW6)fd$C~<1$2f$M=7$qn_3bKdXn{pnTBSVKQ_d;LOHOY(Q4BDRUaiGZ^$Tj#;|9Z zu1CAfJC2U`A#RAPjPTzW1?ZpXr0+$E5h+oVCqJ;6AF$c^hS2>6_(a`k6BZ*!D054f z595G^MM0^3wEjskC7k;pYG~^$-9wG4CEegy@q)1B+1PCfeFuc{hF?E#^F(1=+n(km{8$EY!s-Coyka@Y+ z<+KX(+uxXGV~5ODG!8L2H?Q|IR zDMP?@m<;@VV+F3}JM`xaI(x@8u&>p&Nq(pD_J~ok8nZKVqK6)!Xe2?->L7tAmvpfO zi$XeEFzCP7_?)vxKgaA{)wQ-ue5+9i{&2|^VWQ(Y$7QDQ5CVx5i&3?<;#gr_nqnh^ z1*0Bg_dtBzgx-$uRSf`!>&W>{GaXnv1K#RnEBgBS*gC@I6WAhf-?yA#koO6l86_V> zcwm;cYXUtm6mTj}EJw7NoVcl(J(-vaw?XR#3K6Y#U(IVp9I%>{bLiIdUbA*h8Y$$Y z346@K0j2c1*V3Y6e-ZJ3dsz@gchZPFkq}ZhB>tPDqoX+;#%UkB#>!^@=-<#v6k~_1 z82&*Ji0RM1cbvO}-Fo7+-2S#uzDEU3%U&zF1nH6w4Bb+dHNP($a_7m{OC#Jye@Z2Y zyJ;xlOh$Zo@Hbp9Xs@S7#sBHlV`4_v0ttuhR;1U@ zkU9tyR!q_D)Ya?!;G6V1DmcJj8dxqEP%8dUz3eufnUH{zcp+dEeiD@dzi*!4R$xnW z2;84l_!dp%vR0wzsnSL}Cx*}U=i`Doe12{@>f34$b%UTIg5S&;$_RcwE16uDpBz5e zD8=+r83jnXKQHg=4||wzm;_Yohh{i7H$W`w$nwTlbGZEnm43UG7T3=7ptxjGs;wee z;1F5h^yYa5PGhAif{AHNP&u0>cBRge*@6r#Bc@YxW{tS6WX}!3EvywI=2TS>o_}gr z{p3CDaNY2yJ&FuH(~&ep4^BuL<^<3T>ygE5+U9qb#P(~4=L%58lJMd!5Js1_j`}P; z3B=x%O{B>jDt9(>`|>&B$S#vzVgUYFiqio1U8c4<9s@4_uU8vioNnMdTOe+543x_T=Dq|=ov3>J5kQTHDu+t{ zha|x_R)Q0cDAQCy6BgOGd(gOEki+xN!B+;_pL9t#nomii6)|3s#-jQ6y}{rk#<%z7 zJ2;=ZKl!>|`-$s#h-xWu1fZPveSOa;MZ_3W0 zdF;@~u<`6$f7s}r(~_rY4kWY_VWecgRCio z@blqut8Qbx;k3errm;(04SI7bORES~iElKUABapavcZ0F{+ay9wTl0%>G;zz^t1w6 z9Ze*O)^%-1Ae6r8pF(NNc|GO1O&zUCi$12ZN6bTfj?%dP3XYbPXhBL2#Q}-3dqV8J zSEmAoC}RRj3~N<9TpzY)-9;WKRkV$lJ`F|(0kNbw+*Znb1WU2(Bk;!bNw!_0ekfF* zDvS&(jwO4k$-H_RoI*78m)`wmA0*;>(-S#!^v2^1dPGkKTnGp42_DR7Y~b+EAHxk4 zB;DvTtAc9Ol8-N+c^A;5V5lb_Gw1Sgn-FFPn$r&cv_d0Y_4#wiWYPU$&v2HD&b}IJ z%!m+35ZV7v3y_1V^xMA={R*-Br1KD2j>s$LsYHfz=)Vfx2a`o6Y5yC&h@T4Z!M$6` zoj0liydix|o}Udok^~A}S7^_exROV{YXarSZr5&4VGw#Vn9@*wM?>x2f z1{p%>Btp%8*i!OwRi<#eV25utU-{!3`zOFaZIKfI*jxu_+|59_lrI27aL4f-u5H*0 zNs!t--zv*%9q#tfZWQP;;Ack<7K*N=8~|@zV!TL(;1%Wn(-PLJq}t4ix@uuns%>>* zDWj@jTP}nIOFkf$Rgx!857p)G(5k#f`E9H48lyHl31c8YmN#}y4$)Z;DdWNuG$6|I z=G6g?h{OL9Vo_Z(Yf&+m%_Yoc_(o;pz99wI2^KSQ7 z)P-wkNYCP?KN{eb+b9)GeWt(;T-|o~PIyRul7oDkLr;hY3eswDZVPkslZUMLc^k!6 zCk(j4aY|5l^*vkT;<~!o?HIJ5F@}Q%B6(8kmb(uA*w9!?asPOwV60KM)S2@1tC6nd zSvIfO7t4a8D}S|7XH)JMGX9~5UZ-d6}g%ah9D*3m&*){Z0g5zDwxmz%Bt372G$D-74*q3 z*QNN^7UVI2PICp&{urW^>nPwHc%U>UdGYpTc$>-BU-{*Zrc6Vw+JGmjZ*7jItf^?4SAdh|m=}JCnckXq{wsOtd!v6aogl z41|(O5t5z-NbR!-Y_<~3vdQPZb9JJ>FsHJpVN^ksb*#e^Czo=9CzSSb9$8!d6Ca%6 zD2R*boOFv@W?{nL9MuDn;n5~#d6+2PTv#GGvQ}u`RD|7$f*mHkA);i;+Ma2uFaY=}w} z)3wDZnE#peV7|`u_<~c&RiScFcU1F7gG0Iy*L@3bj=En%r>AvqO>R2Tm_KOjWKKB7_NZ&O8=p{x4GdX)vn;xc@G?^9Sd~ zH!V30#PM341;8z;MA^NxhHk>z-|)R!Uhr_1Nto3%zPvL3+9}vF;y(c0fA0kBB z^e&^G(nrDJkC?!gA|Z|2M~25R-#CBH>uWOn61CmqRyjye0Q3UB?f^SGk4WtkME%T? zuFw;$VY+j=|P)3;7TKCnJtMDT{m2G;K_EJbg@u|1t>OQ0 zbxzTlhHaKkzSy>H+jc6pZQFKIv28n*RBYR}&5AYYHLGX(?~`Y}=kLL-y|3*gR+AEA z(Ue1Mev1j5L~q?dH1YaU+{x%fk>tr_h9^eO7eCEp`_iqmD~S*xjb4mIGYsHfx_rUE zL04SKsofHW5}Uj7c_%-D+6o@cY(ltA19m-0Cp~ng*^!@OP8pw!lU4fPkOO3EMFU>t zZpdD?i;4x@=sf3~L|mv%QSbm3v#f7}Ja+HJzwYJN1c(Ttda$?wlR4{S_oU5CZr#K6dX?T?1^s+l=^ zu(awx5}#eZf{izKswv=cSJKHLhQz~{f2f7TKqtp_uTv2c$6!E27Ulhv?)M!`5+h=} zVrzgd$JUF-gRAixE1I^&NkJnA39Hf_jo2WNU{it!QdZ=Ai6w&mG-yHC40|qOhuO$^ z%ns&lal}viH2QIgPD5ps<{#W>L?Vq);_t$$Nm(%Tgn_SkJv{$iA$6#R{F8@dSK3=o zrHqUzRY7zzy_=a{Sfxkd4kzz8$oRwwD-pv=KTGI9?#r8mRf--t_p!7DpaJe<(mXlr zy|LGOUS@)I1F6aw#BA}lA$YRbz?){Eckpz~sBV056#eQg!9`cvRmohxQl(=p5yi|r zNU6jY2E%j1a2iy-F5hXK=|Bm=#wO>%EPEs zWsd#Mv~kFGsbo1xs~)J1=9!@bdG`f`!in$_p*^k!;kjdv`p^rAPdO;TO~|wJ3~?So zY55QecBxta9WJV%?!4l5VbI&{`964lxtI4JZphOR5?PjFx}1wmUX4z&~#eL0Euni9!|CB`n2Tq%YI)L3)z(iWw%ay%f zw0h4Qs=2y3uAm-Uvh_e`mLb;}aotM}B}nMQcA~^^Drf z)Xf*LzTX|$*TwfIgWpt`@a=C#8t$`SijoIhyts^4-Y*kuJ0rs)2;WN}>ofL<_8cFN zlHS*pG?qpMfkOX;x##uHk!|u+{q&ZsT2=%fluR~%S(*t^nWd3G+3_d8(@x!mjlrn9 zR?m(eij_P=O_4i+$Cjxwc7T^z4?>Q8gvt;dQm&-<{Rkjnsl3Peu^)0&b%G6DXf)O3{@{Uk7q^L+ZtVzf^&KKLa7G4OUmF#;+@ zV@D<*KOIV!h)~~T-xW!Wn@y-l!yYy=HjIRHTq?14TwU`{U;U%^)Fx$KM1 z@EJIGevY)sw(8K<|5P#htv%L2#+-0}_(P$6!%y=JQ}q8a7vQKSLAMHr71mt&u`n4l zi9LS7FPVkUA{Y%I2?Hbwg(vSo1w8kMkR5lGBt0gnK?gzEAu$iovY*M6M{7KWfM`WfKq#&$6(OV5zC(T zjus9z{QRQ21FNwRgQ_xRj;#LN2>Bcpl!J)w8!XP+e;T%ett368EKp|7q6ht}apoOF zE9@UmQ>e4yB>EzaAJ{r^QPUOo(6DHwr(7S#etfGG_g%jFLMeAi2Svh3z62}b`ZxrE5oZb5+GB5U^+9Cu2M|38T^r z!6hcXY8Ch~R~s~I{a3@f``D3YG&g{ZsI(1hq1Fu4vT27$ikmm$I(}I6JSLylz9@s- zyXGZ-d8VVPSV^n%b$ofUqq75i>TD?^tqEDsD~ICaC2SJ$nY|h#1SPzp@?#z7)To=S zRd4lsMD~B35isy}XAl*|WE#{<)7*X3QbzH^Q795Ch6{jPsFVf`jeZS9AxZ|)5`eig zON{M*q`AW`YL>DN=>B@Fa7Y8}g-qrjpp(ErR&LzlZvNTnQLBe+NWIJFC&C>Y+;F;!Z;_ zlj$W7J9EQEY~Z74wq8Lj-Vn_Cc*3~8ScF&Js91nim#!RiZD7GfE*Zv zw{i==;tHimipfHpB{J&}?wg3zsN2PB2&_3qS-SU7b)UQrXNtQj`hdwEg_Se+Z687# z1U=rN(I-CQ1tTN<<*9HeJGnLWCn1m)mQ6qE$>%26gotX4PFj$Rsgsw^A75V8wifcv zPnfQh;nLV~xK^Y53_lPx*=~ede}wmDe+;wOQ-_*cx}#;GO`9I{y=|sN$?6WH@M`Yq zD+m&HYLB?1hJ?`;LInc_n;6lWsJ>Hk;zpdQ9)wzG-wc5KD`3+KOvc6ltw7(5s z8)yT|2=e+$mG1%IfP^;>6j-7zw;ND#0xD;h{6efA8!qH zqdfw{I$bQMYv%l1AUhb1$Mc%Hu9La4X+MDpZ9WjC@RQ(y^?uO0QCOVgH~{^*uR3r8`j zI<%dOswufR1%!pl%YjFUj@S;l%z=y=U4AT~L~6uueu4rqRIr;iNuW9$Ky>7$vz-m> z-kBNLq!NB*5}T06Oy8$74N6%5o)m?TyVfD{e-R1C*I>llIX3`Y z3AnS;Uh75z!=s2GdwzybFg)1JmzRB50yPZsTNf;71*v8zC1RoDHOFjZWWJjzMJmJXD^v|Nm<@fGX4Vz z)yXr9s*S*z@trrByQ%&OFNSU$aw3of@CTa1(-YkKgR_o~?!kk=2QWeROJ)cp#W>R)oA$3=Y@(A(7raoBN(Gt-i+XKFT9D6qu@aN1UT+ERV>Sqiv7^)i8bwuw zw4Zp2ix1uW890L@+gNGgs=uj(kjh5qw(u*$X(qOhxkATpD!v9|Vx@r>)|Z9oz2L}V zijv2=Fn)>;bItsn_x=^Fh^1_k`P;TrM9mw0glB@R!FMGCTN*#@Nv}+i$&J$Nfdk4= zEdja6-9Z_|Ccsj_sI$-}CZI(bM#@`XRf*SY?ecul_c)w50;jiCs;bwA9p2-+;-@9&%{ z`w#p?Plq7-F8nhc#&uKpU$9nBFU_GjaV&`%paXg0OR)4iR8Z;-Mulc~9?vi2*D&@G zrqu|TPKXf`amH{yJq*CWjpdsU=|P(h!)FA0%y-qeZ-S>DsZeu%6jqgZYR52s#9KImp4V)Y{fD&7|126KOa3myve(u8%5F?p($|cz9l+>z5MSgG28}F|i-zG{()&=e^K*KbwfbvToAkcMa#^{Ln0|$l`HIM~p%=^nVl8IaS zK58x>ED}>Q49F@K&nzyFU;t?wGLr0;I*;*eUyevN)bm%ZrS1WjTzp9kX%bAqffe`}A#FtrBQWJ(X&e31c-3CeE5|QAE^V0j@0%72B=hKx^1vOpf?*hcEq(u7% zSd3TQfkvsh6o#(AQ+z7_k!wZaE#?f-=Z+`>WhPIgN^&ZTAg8&x(4gOch^E{4=T`MG zqI==Jcho9jqp>br;4+L0(?6vw{?-pj(^Dc1X9rahtg+LS|H?sjp)WmPbedZtLfT@5 zPpM*>_jn?yLb9~~*DlW_B(jp}lybU{6et9uAmt2nlV?Y5!Ju3k5I1z@9YO`kB`nIL zkDyDCt?QUbf^s?Oik&(L9^;_IPwAyTX|EVR_ere_Q8d#OLW_br=2t~%Stz!1;yN^r zi|i1~%E4`5wF?h2d=PMJ)O+5!=O`6oYS{JK@Ej$-mSr~am*5ue_CI#P&5JwbX6Q-; z5DNs);9u#FFB(SIRt!+G%!@CpH>wtWBZg`4mp15dWyhn;Mh}e{n3*=okWJ!L;+3QT;rP)F_ zG%LW0h2s*xAyLYV0l@t@QJ#kiEG)9?{%<%5yoqBF_Q5XZWG!BW%pz1?*VDs&Qs?hc zOc?D76puq3R^wpDN+w){^e`mj@;SBageuaRWE*>huPhZQ&SJ$}WZTe@k*-`S~a#F%}2Z5;>s2C)QVx=Bgp& z1LBP=%=r`w4tJyMzmmWo!;)zTM=~JeHC$*hbeo4N6_IIBC*^?SN4Ta@eiQ$T?HC(_ zj!I@N4zR?zzqj9`FTV1{1<~8FzbzB+4Mljy;je7hB5E%RYnix!Xr4xL3J979D`%OW z>UU6{j^g1W=aygyygVQ^!3LR3o^s@DBo5);+(GiZd&5?}$dF88!&aUSst&X=yGu32 zZ?j(I=MWQBlTFq=WDmdE6RY!MBG{}B7dWo}aSF6Cbk(~4cekVP zHAmtaH`n$nfR|ysW!Uo>yGP5lRfDbd{pNf4ok76BeUn*oM~)u$+4QW^1wUpj}ZM8n8lISK{4}{F_=b- zLGF7YrCR8H{BU0ey+GqBsH%!@9AlvdM_*Ux!23_OS9RHyAo=oX(ileZmng+n6+53p zIwSujBd+&9lR1g!Xcu`Zb$h*&T0)SCu8I$Z-LWAOqi%Y5s6+8a_bN>x$-E?|VA>)8 z8HaZo6p#5DAAN(Wn-5hIF`O1)e3W(mCTbdy&w1aNX>TWCRM5|CNm$v3wxjrE?|Y|3S+X)L zIUl?(4Jq&qgoV(^j6aO7vIXUlneN66)Pv&7pyDu@>Pz`YKDVe)uiR)wv@=ze6Z4tO zNaSx_`Rw5?lPE|DNg_)i3kiOV`F4>n?hlhCFN60w|0ybb3tfuo<-98X&{jPU@<~3b zA3f=@ZlmjDf+es7hS@rVancz^Ei1s(e_p@%oyrJoN*+_LHEl+v*J)C zI5;aBryqr>6WUWpq~_Emo(E)rg9O}e=uqX(8Jff_o7iZ@)* z@94Dp47?!{P;`Rz%vp{1*3i$oq~U5Xk>-{tXUY#wMhMwzejDBkFGV1+#Z_$-jp+5G zhTii)eFVNEm@9(ESEaB*tIL;dpXU6u&>wFm6(+B6Oy|IMbRpTOMn6qtYe=MI3&k_D`3 z**|2>-<_Wd0b4Y3{MG#sR@!wr|v-swufn3JUA7l$c#MYi6@s}iJy z0D^{#6^RR&^5^wff0oz|qpz~FpO^Y8y*$GChyQusW+ zrmfn;SGQXc8or>d4gRJxH&UUaTGmQsXlN|Ek3nOpx5<5L4|f;_gm*ea!}Tv3W5#g` z*x#K$qD_|KZ!#?ep`Nw1SxCVb0>oGUp~w_FO=K z5C6=4HqN|RteLSQ%rcQXG_xI(lo7h=dqY&h&$|l_jYPL3=1{>2GWglM! zA*b^(%yMZ=zhs(0wBITDSHh{Vo|{@TQdquC8b?~s9WdskIvznx@>|v_$f5Rwq8Rjq zeMykdE2P~l1^*<_VLPeQk4*{$aaY24sT8zb3 zvMIpAY8Ga4Drh>C0PS$IHi&b$+9)=a(ZBn~89JQ?Nyyx0zCC)YfE^<Ikk2twfcJdADu!A#9*fCOU40g{$I+A4|Qp{eL5}0S~c9+ zIXQ;K)ddBF)J2Hn+sEA~^GVm{|LRFl#)CIw$8{n@xKDb`)UUsXSYJw?aT;DIO3gsT^gReQN@yfQ=zY%6MR<5_TO`DX z?4Gkbw1JG+hurkh^}&zOyUJbM0~KB8Y}+(yB0YitB{FinxO3Jt-*fUe+EWKr6%|1r zmA~~k-kQ2x;aFa)2~&^3IDp8ho!oq=Yvgv9QBSt8y;A2@c*iR1t&NrFf8StcQy^nx zQ9pc!TX~Zg1jf_ezKSy10c_vIxUsi?pKqVF;j@M~rPIs2g0;(Cug$WPFdt}`8%&d1 zuhUhR26t&ZjpbqGP_PQV4us;th-s)PV&{Yc(Kw`T8BZ+BT~#o>zBa``bV-FaEsqCr z&FVdKBC}9^qAuTa0s}|;Uk^67+>?c&#m{&z)5P(op8;3cl@vLx9uFcpz|sWJ1BnEJ3|MwZnFu<8ocY`HID| ziSkkdU&1p9xxn$Q5&TmvCq!WeKx=#@KmOQo(B%|OGD*%h1ijYQP=q|+*OBaiN;K`) zV5EoDYJaCuerPggR}t@HuXG$HMT^gmg|YmM`*o^nz291c%{`C%j`P`4Py7*X#RsMd z&6ruXeYP@#nc=TAo%ATg8&E(hf+>j8zJFT6CasquH^+E4 zrWEZ+cxTS7ptFsD639#g5Em zu#J&+Vgu2f_hj^1_8utfO$25XZ}+5FqzgWdjvf4^1DVYn!gugGB zHo{B{)n!Y>nVyn1()ZI!7qhwi)?%|N>cMf|8naD(T_}Zsz?nv#VCH*dJIi6r)_ANR zI6%fM1%1M;!~A&T>pRcuq|@H4eXB85%jp2Ho6`|!P)RD&a@peT;X9sKNzEdK2*g5S zCyIG?7g)9^Y&%XjsfsS!4yr^?4XnP6bNWH-)CcHZk{)07QZm{w)&3=H!fN%5K|T_= zT^vGmBoR|dSa?wdDozSh0o1G-a?NAOWnPBCpz0l4L)V>mzU|NEpjQK-J{XrU6B^Qz z8iGb>y5T)A>FkxV}EU<$*R$LC-rLLr^W)IDmm{rZ5d#Mb>DN}Lu=Wz!A6RFQPDG6@+!tW476atggBJO zeBhb{8CX25tSt61b!B+5G%*tdN&XaMjAbrXt2}fV_V!^_k3XlBLi@|g*2J2TF+Y$1 zonXs{U&=dTTT?Cs5DStE^y-|>k-}BBhaBs#xbB%paZ~vZP1iJRpAzbW`^aNq_ zcMJx>yk|2!Rc)T7ghf9h-!soPd4g6QIZ4a@DtrC?jvq703I~`qS>up5)C%2=G!i- zUpIdgWt}|b+M%_}lsWX}wZpr;f8W;{jL&--!R^yX-tFx%{ImOWE3A1peDSzLc48hQ z2c1)rSfEfAVuGxwk;5rd=$W?~9^3-fjNbL_)ZdxAOV1f;*sL@o-K2lm)Pk2jT+=&c zx$DNZIZGqdsX@hxy!{^a4x(zxPAqSnVRTp6 z>bPIX>Pxy|vB@zfVahqePv1!ht4R8te3QY5Zp0Nalvu$Kqz@@SEfU|g9DdZT8sKmu zJ^M1Vl&SJ(IB1A@t~y~H_o$7uZ?kcPI?}*9pPnz%oTzmkvVN2Lw{C(g97%!Y3<)tw z{CzwwSW(&}1Ucgm_ht&qIsZK^IC|cv7dI}E2N^R~jHu`tU;WB`n$9HP?&1+&wFR-G zNB!C3ufyC@Zs|!4b3>z?8b(PY5$$iJ*MVQKFd3RVGKbhD!g`b?lp!WhkRb7~NN1Xd zZ1jyYrWYIApLWelYTCB{{d_A|y%yy@H-GjmYd&3^d-2NP_Y&@HeeEH6VkV4cQbB9P z(M*8;{X;ni zjpNkEnay;J#b?KPnf4%oDM+D>(BMGa`zJu0<~_D#L4{dH<@X*4S9jC>fX}iySgJ%* zXk~Q3-90@>BB#-2~Qeey5{do-b<`);f z5Oa`WlS>KNEo6sHjtzB~wCMz86NVgRSpuzj?4X7?-M=taYk?23xqW$?M$j$RW4&5B z$ktdp<`;c67UcEB1M+G;Kksi)v|qX7S$Vjwx7KmWR8wuw90fN~J=3-uxUYV&b??gey&j;{*r%rD7|Ty2b# znzKw34F6dczsHtz{2A*m zjCCW_^0G*78+P7JJnqpP?n}m_A8-8m9h!VbKN}ri7oC02%?X;zU%*a2xMpZ}m5}AF z&S86a@_oM{6+AfBFpw042-<&jYxkj=^XM3PdtBF3l#7fsYpv z5f4a+3OJ5Xuc3eeDp*N`fHb6{^YdP5_X;KVzqct*#n@G0rQV`?72=ipn=od<*?^RQ zV^ezbKhh1ORDjpPLPiBoW?#|Y>qV|v)=%h4Is2xN(}=i_e`8h?jd!C zh)PTeS>XKfjq|%^A~4UM#sCk;vLd-i(GBmud39t(3}h$^`qb=dt^1*AQisULBHz+Y z4FJ|XuCPmGgxyr-t|Vh@>OGCusAG0261djr9nGmWR?`^C~O@Q-ychgZ(MtA{c$>Ip~c3zWIJd_^A8-)c5f45EP-tYR~n;WwozX&J@|o z0pACYD>Sa)_UDq!tS&l+ksFbKzw&!L=X%R+?+4?ZTo>9@C=<~3vRr8moU((8qt~5b zYq;Dj`>XNSEws@PV|KlYxgV_rHAz`?bMYME6o0?kRx41PPyCCe2Kee|o{P)~-8GJn z+GlQSZ(@S$VgY@hLe)98OhLxPHXP&oj?Ye?Z^Yd_&g1)-?s|LcU~dZxBq25p21WDN zMjwhL60f^Wl3*~9OXkJ<2H)4Gz*o>sk9VM1+C(}GBdy6y9TDTXzdz)c{~N#RJvsE8 zX^4}S*d*so^5!30Sj~-OkUt%32M+F(pv5aXq5YZthEXmAP!7LHQf;I57jD3&^E7s} zYqEL}o2q;PWEA02v&qyBI*>7=Sg?sJ104pKv!Y7J&8L*Qp_0WdrrM-sz3xa5=**An zL5`lE6dt)s7qsDJ1l24&g0+ ze5w@boYEtV$&y&uWvNYZr~C#&+f-8YNK@Jt?8sd!28Pg7%AKt>V_zsVOW`fzCW`vf zCQ0#4g{z7L{VEAJp-BoyB_|Q&szoZlp=m;+HI5@~lhZ)S$#jzv`?kj#HrQx^4?CBO z-a6O3^N;q&e(?8ya;~<5I^*`j5bAE&!qD&H8F$D-d=ckcDCMcqW~G7eB1o8)2ZkHF0v&c@FAI6F^o+%QaZExrtVt)burT5$;%aD5wCG?SNLdl@2=U zG~&4sp#fN5u0UMtBSp!#AJGX*!)A70-Q4mFe#yM5HkuyU^jzspJ z#X_dl9t2!wYX|faj$h1PBBas84@yB)8Y4K+TEPP1=8SVej+vMonc$%w5XBOhOk|D4 zvzeV@Fh?Y^%b>n+9j~WxJ!Cv_K!a}D)p(YM+M|K0?Ee1bR(Pf}=73L*$2@s(x%FjTr zfWDa~ww#T)nJm{DLmmV8B;JbvI?efSObk5ES}~l&@~c$X;{6BVYs5Fw#Nc$M{3$t) zd=k!2IUdes%Iboun8g>_V!7^l?|%s*#NBj%gO5x=4M?76@#w*DaLl|=;arupfia)Z^dcw^m#GuFIG5 z2S*RlGj8_sE(UT4F@!#h{nV! z&=33qxgI&*Cv|q`;&N-;kjQqqwu?EnZbg06TXL0p?^YJ)#^$?xrB>gG%_F49AQQ=R zqUc^IjJk{?=(;y^@8C5s)lNx>Po{I|DNz+fGAc%NEK8BwO{HH!s*A4nrxi5U98A()|)LLJ?wEpW*y3y z!gI{-kc=oZW3$#d(O!}R6!n4Zz6kLOh$`H6VakWQ*9iz4YFBStQ$AyVr*PwWf?!8+ zga;^!ioV3O6!AvZyW}+jgt|7H62XcmZ}C$fr2||Sz~JxQvUzq>{yEr)uptF9i94Dn1h%m%tnvsAtAK!p%)#1K;i?vmkRGnjoi(}~+oo!&lYs_=NCl3F- zcW4+F0+wKi0xyebMq=pJu%um8n4?6kF1IU572Hr$#RAQyPz6y4uPcp;Xil3jtp)~u zR`jwbyVXqwO^BrVs$dw^Z;SfZ;hfDCY!byHK;M{1nPwe_9p9*CxB56f6jZz#+=@-B z{jK?4okgZ3mwHk_v@QCt-c&mA;YqVS0cy0U_PBI)mgP+1q1zE_Mj!O!v92z?_vL*@ z|6uP<4if%`iR~BE_w+Q-YxUNV-iZftjLKN|p3^WVyvOT3`b^%ALyMr%O=4IIXP+mS zZ!lo1+eB1jS!x)22`i}3!8fWpLl~$4)?HU6N@V^2+z2+K$3OAqguhv5jTEv_1=MJ&C`fE^q zT3`OBR)F|LbIGfmCMLqKUYCry`f)khLL;Inx)*`yu2houKH4WYuPsH#IG-J6;Wnc) z7IfEiygVvK3xC5xr9fvW_%ta_b7EOu;-ny~gVVV!ldtXEyvjXSbZEWYG{&oY`@h)1 z2;W~W8CoT)-YFxhsbXdwsZ~2Go6zmLiQKe^k=di;iv1n0j?xAZp*d+Wun4L|s8>lQ zO{XDMvQ;nowrOkJQBwI*4^k1UZoMXfS8}!hDX7wCwK6V=p{KbgZM8yA-#Q{_9P{V$ zs(0bFI~l!cFFTBq(y0d%?Zi&?TeH5S7+2lVd>wW|vV4`uzCvc@Ak07|yu__Iog)LX z9)@QDD{6~r{4*lMbMw}dL3L`)r2BIZ#oF(ihupU`cBcO~oc`Z2^>h7gA2ueg3is=k zz~qO>#8q0uQeA5ToKgk@IMUA!=`=v_!Q)V3XSC2d(i0S@{F!Qat$`E-7mEd(Sm&`# z(&plOKz&Ec9OMahJ92w6yTwki!MA-k8X$Vqk%Je(*La#c3k|fxJ=uY>`ClOT?aH-+ zK>BLldEX1Y7-=$=8dBk9>qh7x#ia|o4S?1or{ICfC;4vk*4AHDUGu-Ltj-3AYD^xJ zLL?n!4G<}54upF~bSr)Z*moh4e)OmXvH`2TaZR{p5Wh{tyfTw0rQL8I__(A`FJ4|S zD(Zo|eoj;HB2&BEpT6V#r1)7x{s{<>xW$9;KiLbJ;3Vl#Y`j7WN_7%7!etza-bRz_?R{BeGm2Ay?(6te~ofc78e(Ap+p=Wq&;4e%p6RB z>MSxPI2OAil_l6lP#Oo<6uHqU<<9=$DK2cWh^HFDDWGJ`PQ+Z+wZ%ft11Z;2f4#Eu z`0e3<9h9vO=^Ruh_k^q3X5=gwQC-hX{#4awCN6+14L0lw!lP5&CcOnf>Z?Z1FbdU@20GBL#n`fIPOP4~EoG6WN zY&05qvopp^Rs4fC|5W+;IkQ;L@5>Qwcmx#_Jio|6@zJ+k4QceIQA_$6FphBU&NsYJ zzW0e$+3B6?6bGJ_!Oh`R7G}=$#mU4 zfJ!F)g^}|oIH#$1aD@NK;vHP~fNZ9#F`l~bTh;wbLe(&@d}vxOR%kKUx_83~OSHVW zZX-ZdS5%LX%}GE^X@epoe{O>RfGT3cym|0GYKJDHj$PBN!D5`FO(Hr46{*bzMe@+| zk9E?bGy?fvQYV~p*qNE_0X;?Gf!fs6y4+X|q};|jMis>Ya}=`6uL{3<*;GV_tg2ev za6ah>I2hL+;aZSYr)W zay7q%)$wYa?@j_l!LGh)xmj~WL<0DRT)m;8?e^aByDj%RdUaa@KX!4-az^EN=bAwh zeRdwk?exE92+A{h5if{I3|3{fGGG$lX3WPx1eeAvN3Oa)q)5kD#o%!%8UqU8o=)K{ zw3`8zxZdvG&;oE$0Xb?BP-;x^GISs$9mAX(WL zF$AVYlNE*eZx%wfhDaRQIl=sY7P$eAq}CN7hJ^|{D90=n=vv}$oIFI*PoW5O;KCcD z%rTO%R~ARG;p{)U9ahZqL=XPeRw~gRz^hdvVcLxpd1&jhN%!>_UCt@gaVeSeypeif zROf83>UpKvY^I2l?_SB^ku;BYBy$1dfoCEvG}v|%GU5@U@)Fwx`pZM`_X$nsdDP}h zzFJg_l^wU;lG!f2kcjS*y5417C$%9VI@@LfE>dO;27}$5|Mv(2W;m3)y9B&jX&bH< zbli+h*CMInjU*5U72`ahWq5ELM(M@p;XcfE7?a>1cJf3tRe3^JT|_iBR^g$n&V@pE z=jM_zMrbNBOm%%>UwV9;`{)4}OGV7+n+@)(4<%x{lWBHf=P*$HiULWCoo)CJT=nuv z@b7^y`9JS9>veQ~m&Wb3==Zd|nY84d?jeMbgco{kvVSO~hAs=O^Vn-Ma&_*#ou_P>twRQ`eXICem z(*@yuCaW`h9XeT2yYW3=9Tw0tIk3hhM<8Dmi3{6g<}v;!}RUMX^fRy%kF zj6Midwf5G>zO|w_dv6JNp^6Y;K+D38`{rX##wa16DENlECB}#_78PodYlAv{+zH~b z;n^x0ne|yPkaUb6$lXQwvoi-Ax5-IZPNSVp=E9VIH^#&P5=aP<^VK&_dGLt&fH|hn zCc?;YE(Bc{V~{#3BAvXTV0x6keVKS^c1jBn5Tvkr<@yDH>*UZvxP?3frQs8)x zkrLuz7qiqL4&su5@G#>cAciwj+xt^MhfdLaZ#3N?4A=tr2z@L#_Z>O~PVA!78z4gU=X65ji!tOZFDCFKM_`&yS|&28rX^D=D(HiBQbBINW|ljI z`i_Y+u7&R3)7;Yf-_~(&_0qLw$o(y@%U~tUi~K`7i!sA+%`TrFNx9(QaE#bB;QrGB z>dt-Bd{SIrBXGV-kiXCSA)I)r8M7+-$Y|swiuJ{>Ma8JVvgGbOy&Nw&tHC$uh0=Oh zOtD6@MbF5AG(Mnxv}D&$Of+}kN9&0Auw%f6H$!li)Hc%K%_n0F6mN1Gx5takL)|%v z`?Z1$13$1bF{L!|*=5*egUCwLGg#ZsD_F0~hs_eR(rqjz)G008%>CnWdB^$ljuYZn zCSbkzV0yMVf(EI%QrBjj67#audIn!r4RnP9j*HRRS#O-Oks6BZtt%i$*;A6V>5B|O z_fSst-&%mWxFPO2Eo14)0D!OBr8u-9_dDTQ?*1zzljjfMOX}Ozo4tgh;tlr#`NrlYzi{mRrIlv`*Tm< zbf4@AHdis-h7WM)48mUT`A@au<)~yurxa94IWYlQ*l=1+JVn$Pr#RQnw0F(^PD=u! zBlS_M8x`9mISSk=nAS+4pf0Leo$BJl)dkw+FkUvv5@sI18KPx3>3XlMf7y%Kw0P$RzE zcHY}Ye9qr{{_sEf=!0_WCeR3+Kz#LCFXcGqy>#ibke9mLZtGcl@|~pBp8{x0bnbCm zi+su7%YnBBu)cVWG7p5=Eht%{#G>7#F%HZj(ozxbqQ%A%35f`6G*la^_<&3Qi4k%J zvM+*d{ekUHjN^GRH+o1+?i|eom-a-5g>P670x(VY8INyvE9>mjN$eve5)GkC?-XI!xko{Tl@l`a<&dLSnczL@@*9cwkQ; z2m5HykW>(p*_s9?p;wbp)ShSp1Oh1Of8ratFeGs$X$SU8Py{3ghZ*%(i6i%2+k(4V4~vL3l%TlW(XRij=Cv;E9PL!-kl}+(6VS_!rQpi-(gZjLmA~Ys)AKb|; zih%~)e~kXbOGc9T#4=Mm_H*K{HXgs)jySih*58s^|67#y3Wy!)=jJ5tj!_64Ar5S8 zCc8+M5*y`pJ&wMyae>X&&gbPGmW_fYrjRp_2?q%q0A4r?v1}#$jUw#gVA)1r;F78i zib)WLVMGQ6G&%LED4ffeK=Q*eggRp-ZlTcm00~2!-Pz3d{=&T;a&GN56~oMvf0B(N zZQi3&&OcxOCR1#`Xzo@CWR`ikZTySgj1yC@osQE3K&BHY(4-1*5w49}sb57F?=;!q z1c^+UpAttz)W{2gHS1-+fFf&BSZGe7%ggU(pnDGt7;P|NT-78eGl5P}9z|(@Ctbqv z?I6nP)13%BEj zgfQnSEyBS%Tm|!~UxHafk2Hy=bmT963#Qm=D+@ye8}u#inO-K%8t_`a}pmT5BetYloS z8(V0YdSie7nW0nmUif_D9hraM;^#IdYdf7yElzn+TF~G>nqJv;c7jWbMUKdx<7JtF2+QfC&*9xTj${Fn4{B#t=Qs?m6Fz2iW zuBKt#0i&K#4hR$>;Ux2Cjfl8x7L7ji-IdU=)0!#p&^~A?TD?HPyI{0OK46WpR);sZ zvlYA8nr)n5$k4mGy%6Pj92KCQpf28Ei-dw&^VbevNrJ!(x~=?lLM@R4f;ClcToM=m zAU0MsR^9qNF&#N`HKyVr7fo==Md1loo8jK3f6Nx}SLt(`qQic|D+N+Do+OqM9hw@N z5N8X{q! zSNy$n$HJQtnEP@d=%0Non_0MT+T35GoJ2FY96A8-(l633{&n4UO6oy9)4P8CVXa-K zSaVf?hy@ZkI_ooWr7%&WZY&ZeDC+p8v{_OzAC1)M&WWW-2l}0!`G70Z|JP z9YX37;N3)3XcfA$BexC>b7f_U@uphPgLOz=F7r%KcQhe|`@{WlB`$K2U+CU^b8=+% z8L!*>F{Ao0OiIKmRNR^V z-f2>9)|$`vl?Pf7oUCgT8}l!nAWuh=<{oB4=5Nqn*k^Urs7hnmv6uw-jJWtr`1nZj z7=wEN40BK=NX1*^6E*(amuqpHW;6HPsVI3p=HenW2j zT{#2`JNr34|G@elRj`=+n#Iq&a~Q3J5&b6g)&?c{MTWOSUa?2YznOpYsGk=Xs~FD)9IdIN`RkyGx6+HWfGV7V{ts8@z??}Nt?SsfC$=@QZQHhO+nRV{Pw>XJIl;uX z-q<=hr)pQ7Z~ud??&|LK+*ns41>;WHX6(ta@{-PcZf!wrIHe{6YAQ*tH^y+mDEz9d zw{RL6=oOrk?&}pISL84X!;=(A8wA%A=Ww2dff|;zN#H^An_Pw7rX3*KsW(us8N;l4 zdctqh9JXn~k?H2wrTSS!O&JH_ZS!lnchzrV@~OI7`?K38!VZ3hi5%Rr0_JoYJ{su& zEbI+S)i(qg-Qz9BDDam5l*xDYjr)Nn{pSD6Ui{zr-ch@EOysQcRWmhtxiD$DDrS~2 zbbCT%f@e^kVfTXUT(aN!GC+NfYF5mMAn6B^)SyyO+3)_I{MtAZP|^f>lfB$#Q#Cu< zA;aH&s^w6VS+XO35LuT$(K42tj2h^vX^3M za^Tkf0P;XIMWp-%sl%c=|_a5)bRJCxQlZz^d zXiudzEz1op>F@m>xDSQ3(?BgE@`c0KNJ5f*EugD9h3dl;r^Eg{iThn znB}b9PXCYsrVSzwK028LBXd+JkP4tEOPxqY{BdMKC1ko#bgtXg?%VhA5qIoWX;a3J zR++Uoh8;9gbj8!_m-mvO6ot>Pa zB$sVq<`%q*_Ftc?=jvTB_jC$A2AL(yYHryvYBf3O51tlSJ-c zssa_Sxh)Cftu)D@UfZW8!1PJ)DN!-udl5iZT|*cXK^X?&gwGZ3NEU|6vV_^2q>889 zCA6lhJ0i$}%C<6AyD?QvT_})Jy@1(S>P4Z+F+4n|nxP=W*QvJs-|#$qS$4g;?kdF} z3=5yc7OHe?m|H?}*(i`7uey9ANDq=l4wJmb zS&>jjsd;wAu)B$-k~fP@!aQRMs;XX@q=kC(a;)h&Es|TMRjMS!a_;bOTPYZw)0TwK zh+z`SKRMZcm|}J+1O4am&bFc)R5+u~c&8e~=!~K^NT~m~>20CwzP%_3=bn$vvv(GU z3J$)f0G90WC#?NXve{w~3F!{wPAIE!2^+!Ubzk=TCDk^U2wOBV4!az6p~S>gS&iSk zXUpUf+KTiU>kg+{RPGYzC?=s9(LovGagQ#-zS%pT!K?|+e!0#9dL^R%K$b%y#%-9S zT>L4-WOfnKbY{n6cEOV8!L;6tuJo=70+*_QvgsGQuLD+ab>)5z(w`N@-bI3{L+pw; z<z=tzMWx1^vk(F;g#L~^XpmS2`O$0-S<2q%J zjR@Q;Rn4|9ih7QfPRD{K_yKelAYefRr!E=J;lPJXMVHg-m;=ML3T4i+oVnzsNA`SiQWSfl0%u|AZjHn-WokGUJ@Pk!*m9^DNS-C ztS72X@Q}=8*#4?s!A4SxRs75olA{4tMsE#Ln^oR!G#t4g2;8@S(T8ddgM)018sD#2y ztSwa!<~Z(x3-F7)h+HWnvwgC}s1M8Mb%8#Kv+f~2f)`;k-nJNN#zWG4G&u)vc(z^( zn)_+>L}V;!2s$m0bR1r^3jh4d4FMH{IW?R3BEztwuPnv|nFYIOJnQy=PgM|a=gn6s zQ6#!T)y7!7%Du5=wsB8}bQ!j=-}gf8JcBbHzu`%6wpkL^gUNIoQI`cCgQ;l{MVTYb z1B@9xeXloiP}jVDqwP2r2}XBiN+Rr2w-6Zwon_U+qRYI>F`)in^9*K_ zDi7Y_tg_9CNB(jUTZZnNotQTV9m1-+fn=!q-3(G)eav=Cifxq06D|KBTEOg?mwMbD zSqlovt*XM4(6Q1PWI5}Yls4q6XcMsz+}TUh^j3ykLvIx_$N=p>>5a02ZXFYH|)>CU6Xz6$28H;H+}~P$Lot#hQ~LBUnyb8C1$}J?MV_{ z)eG;Ay6iGW2Y&8MwZWD>1}2k7A4OW!Z}pecG6rM*)KV23kQ8jHTg)#MoYIJB=s16R z;%a{AP_h4ZB@>hQ=dJU|)5ufe8+8|Mglt!=l?Cs-)4VmQ0@>@TDol|o1?XL#7#waN ztJ!dqKk5E?+MOglPyH5cmf?lCmT~PCZ9<|e5(yR1knWby$je0+0{q+9^K_l^>p(7O(~G!PbuUVBaYz`JvLh`nL!?O1*J@S3IfT%a*t5=cc$gDgJk|^TGekC;kzr z|AomhH1%h#CxN{RECfvdDQI**K0KIaU`gL0< zi0vfKP@omP`!eXG;^`Lf_Z(O8`AUda`q9UXh`eJgfJ~&NUp&mM zSjC9J^b+;#A-@egO@v(pAN9l?uV5YW;x&kq-7Zjsuwx6}Ykxh+LQ?p^?jPcID?+Un z5lHP=EK@_SSCv3h(g0kVv$$u`X4~GODyulIadBLD*7tUDFMiWNajU*=V zywnaCYEg~F5*nHpokm_5O>h{!1ZJ%ayMoSnloLppK3TqG z(rvw#j>3R{rGs}M;zS3&KLT%0PhM?-eOz%kS#6ovtTimC&MT52$Ss06l)*}f{K4pgOp)U)s=wylg_9de)P0ArFQx~IK#Jh{$;EEr@NK>4A; zDgE`v7%XqM$&JOE?}LEolj7AG!x*spk*oYboN^c4Fmz}5-#FE@!7bj5LLgN*3S!C8hMirTIEbqYpO; z3pIjs835H;Y)v;tJ73OWU^M06O@M`L>M?L$}nki!>CLDoI_g-_mQotg$ z!`#^Jzf-KcJuc3VN<8Y?@esiQ?*KVi7k_&hs)CR@1b+nZjeSs{mvFY_rIdl+ue&q0 z*ZGESH@^;Kp|mABR+Q{2Re?gqgy$EqwS>Pkmrstz;62?xY6Cq>y|nc)G_^bE+Ri!TIsx}65bK|tQF)S4 z2xiJoF6YrGV<*%*Z^iSs~^#rnADKw2-hxp1_@7Vbt@$|I4wV>kyn?V<3r&! zZnH(-99{A&M1q&?hUK*M-&y|L>2iQnqrew>V%6xClJv5s;ylNdGGav**ShtvdtbpC zlg_Md92N_MsWeZ3*4x_thN1Gd#AJ^q+n4yTl`Xu(o(EW}gD0!-6i9~-YyVAdGVKYA zI`2Y*P^EX;5V1oEKI)!!rgDuCR)F)zGM6B(Z?@Ko-zBFNjzV1Xjk_Q&`1ITL4jhB+ zWZyqp@^rGV9xX^dP^?vdsyw}mtX#^a6P|1dbA_!{$UP*Edi^%)fFERA-M+JiC2dOW z3JolK&&7OX&l2S|svN?!yAuZXcXjx7$L(6U72iu;qRTIF<^~yse<_3s2nl|C)jzssR0HTqft#`B39R1 z=J{bIOylBYYX73xQtYG=TGglyu|&pCo$Gf_%`L}i>dN2@Ntn*yuIx}Ad=6_`+rpWX zy))DL#&U~=CSt9@%0M$mro`AQoAACD)rq;J@Z+zOS*HpWKRNJhmO<0q-P%p?L>edg z=i8xMJMLYe=@=$>x~-0`JStz7j_t9Gp#oBnJ6gKeR5;jYgOXutDqgS~!^2`z=3<23 zMx>M{F`Y4SkE=gC+P}t#+YgUl9ep)Tu_9;emeL^h%3$H2neZ2G87nH7@7)i!!{?XZ zj2V4@q*|KomwVDv`}bcG7~t`y04K2+XM7EAud(~OpK!9HP$Vi=o0-^ z44B*QoI=XqV_vF+#je+K4b|&3YwAd)kR3)rQ~7YBAt&Vu4m}=-*VBD zOBy#Kr&=(fMAGV~Zl(QTkIDB6*mug@Tdip`IM1Ed%!b4xJOU^EN`b}z+Uy0$o~#$T zi=fT;QDBSsN~$dkC{w)F0=2jB7i9rGT3?@3#X=WIb3%s*Qtc9-Vf7ix+3?KRl-Ge_ zWf0HKjENcOeMM^T8gEX}6Re5b;*Gr!_)_9DU82~0VF}bA@w2Zcg z2#VPaIaei~lH%8Mvr#!!VuW&gd{kyRu;(SXx75ny)OnTkqT_zZr?1VZFwoU}x6#L! zdbZ*RDiW`TWYw6}NU)NROfyLaFx zlVOp)4ha{@6V;5aRLZ8v?tN`ghiq9m#6Fc6*mkFaXwzXg>|y!bWXvrI3=S~N4+X%T z#x!GP@@#+sN#otXI+1do%1AD zNa`z1GhR1bQnW_y!)@6S*-`MQ?(@}%T5c=r54RvT@hfb>dgMFn$WEl6Q-4m~OT+Bl z2T>d*Pu)(O17$DD;yqK9Gy=lCEEYDxEDKDH^^5mei_;LYxPIhq2dGeU`2$szuS;dW`l{j`SRe3}em`tMA?Ect zCFGjs?^N(acNx{<$c-HO*rxVkmZiVqsQhyFA#hCpA)M{^-)T6o`_%?Zg}W5F=gIP9 z>Xl7q8~cZE<7verDcpL-%wSb)y72tBJL@EoPuU_1%x@GcjN?pQWkVRoID}5L#m`Ei6|9JO2Yt_XQWzDuq!@B zdp*wgEi>ru1{p?;q;S9TE<(GY4yzEQwwa~-K(zN!vi37~6j0j`GRuOZmv}&F+z0_$hD<}L%>(a zkUR!sNZ!Tq%|tDPP41Hc1ARjGKfuc-!=EsHd`{0e+1PZ8ZHFZRbB8sq!$PMRf9DgD z7?6eOqqvSmI~U5iZ=Z}>SRwT2v`LYKQ`m$iXyD~2hhf91(Y2R77=DngVkIBFIHYoKdTP0E+dq6gK53#~WvU7*-sYTafYhd@;1goF$( zx%Yo9;#R6V8y1lgGHjurGWjjmS36mI5ih4e-f%K5H!!bTsBzqR6j*^4J{o#tl;DDF zJXzF7c)vvGvCCQQtJ%(x#W`@AueL4N4YfNfn}G??1u(DVGmR6Jx}!5J8pucHR{mP7 z9mh4MvCT}NaN87uot-S!~+zC;kYNpm^bq(i}hd!C`EY2-QfoGTxOJ&2Ha#aQ^Us4ommk? zEb8YYYcm!kqG(4)aVvNKRFduvw*G$XaZ9F7+a8DaCJsBf4Jbh&n*p+TKXkUJ=YXl2 z2+-e2i(c!0d_st)s{%OfE|Wqc)C99_Q``lAGbE6CHyvZ$_>a`b%Gsngn*wYNIoRF} z9&|aZ|2rS+?zT?#J+lQyvDAMe>z3|s!K3uT;Tj6mW7$%J%QBI@e}L}#?ItL+bQ?K| z7?(i4RR}a_(x$(PewM58xMyx~~@~{FF)24D7R|dW+E^+L~6Vd&8lIf-l524YB!d6XzagVKbm7M5q41guzs}l`V2x1?4Fy zmkJ00d_$5y$CWEXGJ-0E?L&x%sQgKY2iTQjV@!Q6#OFG&bHTWG)5iQw&ODK8*rTk* zSl#&cJg8Zez?2DRK08$r^l!~aZi&5;=bI}BD^$|nwe7z0lqb~0hg69IJ;U%qUnx|p z9@U!HvyfnFjE>dG)nUbk!UWEUjO^w0M3ngSl&KI(ywy~OdhgyHv2RI<)+DJpDrQ)R zvO(QPccrTQ&o7rWRbNak?k>(alQ|B3omamM_KqFCpeApG#c~=u$y-DlwzK6}uu}u3 zE?=|&ziEHUOafaMVnW?2Z46N+2m+Q%0i0QZEkR-6t<%wXZIx8PYc zH`B_;i_0YYb7uK9^2cY01Jyr$xgM(PwX2Q=__pnbMzAqB@AA zKdvg?6%^SEcN8ha*5DwPV_xHACN7kxIucqS9#l3G$`(f=MgRswmrRKr7pOgIELCST zh2S)M2GrHOjdi~{Oqmwj`>tR$Gy~Bt_ry%}_1M3DSx}K|+g=vlvfiQ|VQH3o>VjzI zKb+ek6fC$ZBc zM$2e;>3K!%N146d5*dy@!km9bcOY0mY9;gbs{YxI{M5_o`(O6wuO&?~=GrB!oVfSa z>eo<{JT@hK?8EsvDsre*pOQh`-A`1Y+b++k9`IUm z2kjsRs(@3F5o7^HEA^1fs)GoNxb{~L#0?(yWNHAHegYX1n>=m(Jo0UrvZ^IMNJkl# zXlEjbYbgMQ81FbZ9UDVr%?)*Prm!1P&VaoWWZ4{+RinVT0ZurWY6w7@?%;9m^TF>O z_gb2f;0t;#Q_-3bJZV6ZHfG8ITEQuXTWG7bS$~(3C?s4PFWcq?3lu1iGA3YAG_l^> zjOsITK(YP4y?iqx{L_KSZV!gEs|iqZ^vV-vP9*kZSE!e(=w=Y&3INyp6R3icug z%_n%N%L1S#cjh7!DTv9jf6yTNHI~0@rUY7evmP>Cm$Ex3*?$9~UEm8J7kzwQ2N*Wr z&I>$M^=tU65w4qpZXsbZ&gJO5mzz!22kzKv48Mb`ZB(PB@7dEQc48L7A}kx5yU*8S z2g5hJ!O{?vq_80BLJ7n0dzHfyY9_j6a4eT=Ki3IboK5TeKvkwa9HL;4Cxh-@LMFL( z`4G7B8M*Ws^kDf%tNM=wbG}mBL@6(2vjZMZnyOMY^_Nh>vKIw7xz{+ z^B>O873;{Er-!%St*Z5O)TV|D@-&lcU^;K z&DBa~OkUl85Q`_P;7&4o(lNPjCg80>Ms7c*H5OrB+yc9Alc4|fPmd#1>*TFyNJ_$_ zV&gA8s;WU6mWbZTTWymTdPZjqjRdhqM(m4vw;k(maNP`Z59BH4 zPWE4nBT!X#X;YIUqV4lIh589X%fvncy&vAuuMBDjPBTpz87$Gf>!dxPlq2;I-7bm5 zh|}E;-D?+e?E0`dzCsHowrHT@ec)0^qd=qKFbU{j^~9u&+^jFrNOmX=z0C^&Sh7lE zZ;*@aiW5Cm|Jh0BS~g=rSi1e84-(e(+DNU8K+p(pnOFMGks6&kWMwNEFb`CicqmYH z-~}CrQL||jm0S)MqJ)ZIEejs-mz;sUmOoH(LUNWj0N166%IYw(m8HX=i=XPBt{Ecs zrJm?3AV9N$rR0!9NpyWJS^+&va~yw-{1e0%Y8AWOIVgqq14`p~kIYoKEn`}xYy>;b zcGLLxFYAxH*&A-AqP`t8R$cIZ8~xOC$iw=rl&~;sOr^?ajE^VQ@;^6-0RQsz@2BWJ|}_3KeLSN7vYKqOm@|(wKp>q%Ihk%$dA< zrQ0c>X#3QmqP(Xj=-xvcx4PU^N!wq)_(|`f?a#+-*svd;`@9?Xvqm>O?7(k0G4j>& zY_pDz0tOCEyhwmA#WDcJUOOFK`nV%kHmFoJ44JPR<*p2ER~4vZXpA71qZ^qT(U$^6pwBMmqgLY4pQ~8|$~aHZ-Loj{wh$02ekH z%Zn(kbjcJO1C1~pD+E)V0}4Rm`B|DuK8=gffAWsF}I&~88T>F0te>xj;k%$?Kb`9-kaIAOWROU| zFz9{{ZT=Nr*4%lSGL#Nb*jbaKg#pC+yb(c@GBQ#L;AF9BgE;8N$|gvZ#sE?oY>i1c zFLk760tcMhcW!P#uaKeNKY*Z)SKn)mHhKAy_V(QdcD;tIaahodld(jWqzF`VGmXO3 z`E2kX>{z51Y%HO1!pnyeTp+j4X+hFL)(4i%moZzV9=9zBvl+x$(#n zMO`*(U;GwBOrGYp9=97)>_V=nc?gjY?SgRn)m+rSZSaHuR%PAu5f#1s1+4S!!OBWE z?EADZWZ?9!iMn;$en5MU2aK3)Q8dO?u`vRcuYJlP!fS@PEhz1^f=zZf2!HH;3Pe*F z%UjjzAz;JzQczJ?JE*t|Ht6F0`H&c_J#A(Xh>`;1NzjhQAoVN9W#N=k4fR+q$QlNt zV~1FaJ0?Wa%b+L$ z{Og^~ZdcZ0DlueUjZ$c3thLGJeu@@(D;LB#LRD%;fmv8eanE#6(8(Rr3IW75%1I$ zv|KCOdab$o?R);465-y*EV07>QjacedN6EWa3mZ0y1(!qD2`P^WHy-yLW@YB-*xOz zu?oa`3QI&%i0w1u`>p#vfiRu~D6mPvVUbuh%|Qd+rAExZEgHI+WlIl3IaKj$Rf5|n z__FhcR-bfg=ta(7LVs#YE7L-O*EPEBH}Zplk{#@bfC%kpb`{JRX_%tG4D|V`_)Re6 zzd|Z01%A;JM;(UEDOQN`4lBc?2^IcgTO+b^BmjRXoHaVX8)*<9s(RW~f0VP~7}(F< zbeX;!>|m>oSIh6mq^)3u=rG|(nWS4%{5z9G{L8ALu70tKnKMRqtl zXry<_0_K}$6p)ypZ7GNmHw$wGD~kYyv#2W?Ex+X=GV$PqN;+C%T~}>bgflUDinov` z+R(rLoPYX@cv1YSRB+YJSCTtbbhuP0abLC217AA~{_x6hd0_3)>t@#0lvVK=3id=q zV+U)kLLee(I3aWXlw%-!#f7xs_amoJ9bgE4$a0axYxV&3RQYvnXW5s#vL-gVnD{-p zmHd2}PAW3xBXWLvBwQEQPoN?JIeoZzttyRsl4bs$Dm_2}Rt{jraprvxVfzZ^R59>A zf8KoG?~f1z{O+*6J{EL02$9-)1N&ZFc2Qom0FLiP*Tew%eiP?v*o`>(-2rs>TjYes zz%4?{`zNZ6cehB*@Alu-6ZHE7ljQq)^|w_Mi{w2q78?0Ge{dM9XWf_*6K%aE4yFW% zi{_N&?DK&)Ywhrp6m{SHpdqXf9;m&jlfYa4aaDEd`ZCySY23cbHj(~LoMpRMs3;f^ z=sEEH&qhxeg}2-oYOCQQgF69b(IsuT`0Hn`O+wt_EGPH3nF&AhfEpr|xMC?$o7jS) zH%~jm`f+eKVBq}AC%$|8Th6L$BI7siz0zuzJU0MB8>okO2zG|mIy!YFzU!nF?}$dq ziFyC~P*k#R$)4{OvY8PZ$lzUgfYfPv0Eb+w0_P6kfs2BrX%b(4R5qD!A%(=?S;c+@t zHM-p+quJBT-Jx{1V#=H3To7Q;i54Fqz_~~$7HFh{+?3Iwpq9ZPdLaxF&^QsFPow-J z^8R_A9s@xwSMqvKTa~_SEciPUknCSTB@G|BYn+$NY%D9!{9eC0p@vq6vj=U>4Y7vq zia*T+|3qNQjR&9-78Orx>zobp#|C~*?q~#doV7D76j3uzwXTQQkp!iNw{_|rm(s_; zq=K~-Syd`44pgwJko9h{)NeVpak{_W;;;s+8?}+=&XScmn*l;Pg19d{v2H`?%Lv+A z8i$kIZqTT?lEr0+m`70)Fj)HSim5AF&95h39{lKBRij~{l1|#W=D4xLCQP}17#b=+ zy}W7SGInniQqr6k0_Lv4V!0QKUkVBJ?T^h8Z#9?ozB@7M6CI30WZQpu^!vS8K~6zW zW`%?*^p9DQ{U6R)x3%kc7R4fk?7_fXhuGt|x&jkhzn)@=Yn2h{E zui*<*GwPl63CzF`x;zGL85#7eSuAoRzY7JMyN+L8@jn7zVFf?;IX*uZ@Vj^k+DyMw zkB}>+a~_STqWnh+9AK_yb9oxxMxF#rViFj11^YP8UAazy9Xp@w-|Q`4zEAhQuZKZ~ z`2&`1FG}9qdxGg{B9hhe6ns74)nEQj;nwrrR|7R02oan7q)TWKRs-AW zPj7Rd4iT}r*Jubhc$x>Vww(vTt-Tv3iMT2#YWy5XGui8&L&Re)QzD9$Kq5;$#?Tdp z$!NDgpp7eS$7nuC3(-I(#V=r` z5iKeD#I}=%@;HoiY{;-IClV?QyZ#!bu??%G_f&SQFhuYr^hhDruBOXm{JMn;BffX@ zwnkvx7CZ}gb%yr)r%E^xO!lZnxHDy{@V&w|F?6+sFa^sX_i{w;I&xUM=e8T-fE*8h^>maFP4NSc>^ zl*{EP3(eqY2PZ374tc9Fxn7djbXnW1e$hhj$J)R)dTgno%%83_@seXx7vZug!?Lxh zCSQlM1sk9T(>gF=u*;BB~sORn! ziLyEqXs$DPT*8M6W*(f0xSDdLC`6X2Gh3;MYh5oK6$9(|N6)>rkJ5CY5h!^YUr5|lEmWDI+#BEKWWqqjp*B-ij+ziH_j7;z zxcR))?FxMR*1M7YnBBTXt)9K(`KpJhIe$C3=VdtPU#@t}A1VlT$gvR-C3<~45`Mm2 z3GM}c9dBR1UgB?T3hv~O!NZX_KweNft@miXB>%&bLfZm*-OF>3*M-vhr z(#16na*22K@a_VQ`wlamd%F7guC71spa#F4^X|R?hx%g4a43e#&|F+5bGc*GNKem| z%ij&+n4q03$H>=ndWerd>}hY;qFEDRyvl6vza8cG`v?~%v})6WDy$jMaS<#X0imKd zscdF_2|WeNPaZ;||C_N!z}|ASHsEPyE2{sh?e4Gb9KO7C-QepK?_D3y7xGXHOIp}6 zs053KKx8z@OMHNf~oNXnSjmuadOGIAP$;R2e}-)p~EK(ei9Cluaq5K{2#YS~7xLCeKQ z&?*ZD#txeUnXN95CLtG7tam->77_Mx&WJ$qB!ZA0hOF}s*)cg=0B}aL&y+;M=t0buGGyAR9csRVyo2f(e!Gvk@dzkX94S`G2hEf$AI`#OpG;J zs6|ikSD}J;or=jA=Cj+`IsZLRY}ac*Nc4aew)k{>dVTi5mjEEB&m;|BgXHbU7IAqK zq*BcwS0dl+`g9TF)(O0HLT(x*NtNQPOQ?#5}Ei3|CQjb<|4C0qoZN=?piC0A(6Uw@zS$Kgp?yYL#Z z6YpNb8sCZ%{OL&Wv0@qSCY*+8?Mf-;uP{(&ccjgvM{y#@o zL;b4|J&-E%^;N|6@-blqnza{DN7_!UYr3!o)%6Zoy5$wP(D!$D?0eJeyzd_mw(rZ^ z0YibG%Ypljb%6snZfKL3Yxu7j(rgnAN$0r&Fp+=K9kml9KW)1Y+q_yXqh+GdXjq2` zBWO%BEf8D>wzmkAZllsXaC&>iv&RF@6M{{_w$?pH{a+ibQ5cj)%=M6uU^;F3n9=^v#mS{D;UrG}>-?JW|?N*Yii zuyYeVy}p2i!0qnQ!A(LWWKSGLsv&DLkQ^duU#MAGQs!6el}p2&)y7|5H@A@a{DILd zAIDGslLg4JoNYMwL>TCpa}f|{@pEN06tT)0^7=?zZtHIEf7%Y1TyDM!+lXoEt?EMmN~>Ytl^C)as^USHhcPFg%col^{mc2o zO1kI$+g>I?q&Y;LMrrjOcgKKxp~2?wWY0?YaIJMgO@W^Uo!I9d3mSm36l!6^1dn%0 z%xv!Ubo|K91Z}W9!lYw&Anx&B;WXil{peYOH;C>CiT)0?qNzgWNDv>`-SGZ1OfC}W zCBxi?1^Z9f>HIVXbm&r5bfI2b?|>=Qod&Xp{!{WdRvH{MT{~|GQqnJEU&l4F@nrjG zLbf{!Qb3IQumfjKch}QVd{}S^atOpp<7YPnztDhH({xwqx4^sR5vTrDLk^)%i` zn->ZC!0l<356^Q%OrC*)ZE_rm(Ni1tE6PaBnZ=ue-h`CFOz^onvFKhIGOEokdO~fa zGD7EIQ|2I^{ zejglv9FYy8@mxj&WW|F}IaO(}jC7(!3t3KoI=q(q5%KOFB5>vg1TNiTYi?TEL6jx?_L>q$03Bja_! zW%U@Iq*T&TVLK|MyUEjpKT1AgBi~<_exNlvus5~ww1UJpGQ3BTil6=FGBEKGuMT%C zsh}L?`m5H1<}MUGfFifYre=)}S6$BwkxJnOyZksLs7XqjD-GS5Z|`AJ)9d4v<2B%5 z*%@z9_HLHDp|$8vu7&T$EQ9=9OHXBUA5y=JyMt z|Fu4-+ySVDbJsuKoHny%kwA|cCmg{evUS_4%YoB8$I`TquTIxeK&4^z3f+gepo-l@ zab8AT`5{5jNwmG;5rrd`ZldXAF&RY@F1h?brqw7Cg|#)#Bb-a6eMR7SRqqWr&c)JM zMhi~1C>plYzX$i4d(xrkA0-DxUlD;rdEo*m#Z zH_8=ST>Q(p3S*rQ+aQ>HJD^xI0Arlu8;zK~sLYtc|NNI2HaSlsDSazk@GEifKD))K+X-AV;+|6*LySkyiK@q);Lhi!-hsGyJ>3 z6K!Qho_O@PN_^YU7!N8=Kk<{f3Uej7QL;?QwPnDqJ~JY!_Ri{urjmX`=7#!F`GisRfwos^JORGke4pqmD4+$Yu8;n+7q zO+dWnpgnq;=_9apLLY(NnzaJg+&@I(vZ(6X6QZK_jhV5rjS;G>8a4TvB(EzSF)g5r-it)pth8NT~HINk@=ipL+c_kWSf`Qt3 zt)vP94(%M8z(P&%gc>KvjsE)zoHat_5!e0I#$^U#bV5S}o<^gJk8qjAcaK5#($F0^ zN){*+JxncwegUw_A-|m_2r0Dtf(?G9jF)-Gqoa!cB=0n>` zbuqQK9A9kBKh$ViW!u=v|G*lCFkbl_)uBhm73091OK}rvUeZmTjzP|w-fZ54S8@LA zI_XYkkwV!!{(V4Szen&7cSbmgO!j{qpNI`II#o$@ibuf#3s|yRMqG2B+!wYBNl{Nr zLPPj)?yK_Hl^9pRja(I(n6f?>Jj44;83w^SO42o*VBDooM^lLgYps;c?c=OAg`S60 zFtm>=w=_k<>vQ8W>0IAGdX4C}2>| z4K2*iwQ)H1xGu}8WCM&e8>TCdnx1Qn1)W)b0v=B1>o#`w`o%J^{~UXJ5BM0dHm%%W z97AR!n)%1lDUT8HT$wMpyTodx=m(VC2hztb(x7o{<<%VF@4fMb#QL6FEqesHr5N+1 z^kz~FxLnOTgZ-N&sNa6K(JO21X3`~RXqxw%lGB1BM)+iuUFj>9DOkpo%+!gI!dA)# zVhUjz;@I1w7!=sBX$Oeb4iW04Ym^dXpj%lxLPY?#%SSjXS*ySvDrzT0wp6#7l6Sl! za(uOO4rnhnY}2t%=_pK4pgDf}om`MwGYWjJ_5oOGsJgff>?p~)Sq>H@wBX_i690N| zxO7ycaW1nQhLXIR+RzGqT+87yJ`(8kNRf7`3@s@;dbLLdY@s#%$b`v7^z+Ga90loT zjhRmK-vWo@i|pKuJYcZUCmy^wAsUH=Sm3xlf>@L*dPowkf&JfG2KqGH`L6CPgqf5? zlNPUWdq5Qrd)c#Y&gF7c&5k#VxAGuFdw>|1);XXfbeXndVAK{IcAXolp^y|UKV)u@ zJZ2RK5R))N^XHQ0ZM4E?xU_jVsw4a{sp2@MrUVIT!8oLx9^wCRb&lPYbb+>xZQDl2 z*fDl&t7F@?ZQD+V9a|lAY+D`MxH%ur823GYp~k3HwI-g4AN|B}WzX1-v()Y5@_)wd z2eet4w^Tl=Y)awQNj@C_G4H-nvy$QFQF%FCfx zQs)M$5@FGe^#ED=UiM*@z^&6ekw!v$yOfiN@{gdgI}U89^g&%Up&yTz9tTdR*5zn= zx*V3p-I%KDFUES@*xPnVQ*}7u@dvzElW)#!Q&&6zvmPDp{o6aw@lYh1AtYS}CSe!y zQbBF0bDk=Q&E%pK_4Nu6K}ATWg-lkD!kP^rQR`H33YWIPmEu05?x5aG;V6LI#F3(h zNy&2J@=dmfgr)`?)cRTDUdk>D+UK7?j-gzMHClXv!A}2UuRG}`#cS+1!I;#E9mol5 z8WdpUGDTbd zB=RD-YFTk1{5juZS_^YeA}`1x<|C%#^H&(VyZxpvK&I)RgCXF zY^r+`QZ7Vns93iUn1hqE?=6!UUMfrGN_3I9`5G+GtyqOZIduJp!F8d7*C&RB)! zDQrc^gynY+6}r47?xZvNF=OfXhYvY6hDD9^l>~x}UY)8z#~3Vl;HtYP4fTO8ceftQn6n%Ih95J#tp#1 zbdsO}gPLP>)0VPzMOZ z&Ddn)8I$}?&-+z3x)v%F6a`up#mG?*Afn6y(^3dxdx?W=BPN!&t;YB$ZbPhAncpHq zSQpl>EnqM8uW&-hlFD^$7@*&DFD#IUA}EbOIYUYD5agv9?je$Log;k$qxvUFGPxvR zCg60{Xn@wxgq23U#87gFHOar#j2cHx&jQ;KNCxBL7g91Y%NJ}zjp-B@e(skrrSb3- zZpv5FHdsJhMUE--Vy#5t3}`p9mD1|$(oUS8N1H8Ua< zI?!eTS75r?lNeFfaaF!zm&1SWg?74>lr z#HzBx4{{-9(4JDY1%9Ju5){jPm{GsSd)Uu!IQH(g@4L?Xo8T8w@7Fa85f|1kSOmID z@Az##{Oea3AK!K3MY{%1_I{1{9nn~8;)aXI=hwvmwa!5^#?6D2CLt1EnLI$&n}{{c zu@Q==+sdFjbelxHX|4DhJo+jnXnEl!pg?oMQkJHkZosJbN-K~wO_}xh->V#u0Y%Xc z5$TP*$M?Izncq9K`595(`+=k2|3(e^9*%UNqml_Km$S1MaML!ta53x7dwFoo=h~Ry zWaKLGg0pa_U~wt@Vb7gy8IwcUKhAGyis|=u0KOm> z8VBK_Cn=}*BZ9wUqbP-76OvPGI09Xn@`FrGCPD=6b@E7&gCSXaT*Vo{IeO_Y@D}Q5nq4z*@a~W$#!8GsnWSBKS%NAd z% z`V1S_4x$1CgbU^A@z4Q@&yX6)kR^OoY~-tjbK#UE+yH`KBn9RessVvSP$Q5ArVf-) z?1dttB*Fx^43ZyPaleFMmMVwRee^Y;lVDumq!p3Gv!Rr=!0^(mbzL|^EdrLG#IL0&@}t*poR~SCS`x3U*QvUz z;5C|HK5r^`?O#a7tu94)Ns%xS$9x&qOj0C0l_Ldp!8)O%vwQgR@^V{yyOWHpCCNuXqrX9A}sWkYd$z zAqE>5%ZaMOZ3TApzl!IsU}%0eBMN*zlfQrN$6pIuH#i3ViltFn`Gr5FsYyr!UVhwL zw~iYAO*U01_|^Y;1Zmf4?c?1vVrE-!NpG{pZ`Jw7%Q>KM%4x045umz~3_0f({yN5n zvkoV0G36}cm)T)|XOtmna7(6W?%Q{^%!|;C%>m`PchZCW5nzD$$IqY{(lu1j^OZD@ z?=6D&Lc%_??%BD5*z~A%nsPmHp5nWa%mnE_wBJErd>Lb z=c_v@V5d1l4YcY!i9>pD?=C39Q+MoXsYD(Fj12XGs?JebyP{Mj0+8R{Hm!*$-pZUj zY!ZvsDPq7pTYCUY*j@p1cqLUjyHMm)yzIV2GiJR~6n~kLvMllrQprg}Kv;I<4Oxkt zDLIFN+(EozU^qt1$R5xu!Y^mx)0cH#5=L?$hRyPDOL9AgoOc1rNJYQ^Ey!$1;W(Ez zICthXaN{t<{9tkNX-#)8af_M#yr$d}$HAG8B_Oq-6_7KH`9KKdv29v&P>;l8qo+(h z5z(QI0w`|r&YxxSnHA9m1j#K+*P1FfCuA9#%o&Ya3F4Up^X9zamBa&5Wq?N9Q+3zx zSNEqmfc;-T%SlXDxp5JS`z6~+WevDhy-B0~{dChc0i*R8$y8F`rcllWn+ap$D8zj!Mz|+_xBl_aY;SZXRw5}X_-P>m*l$1Go=uz6H+cT0Jr*_|QcpFHr%t=^4ca2t^S zxSzoG+I5Fli}|XtlY|#2QzNUAWV_1c4X;^4L~>cyZA+&ldDN*8Qu>%_4N}e*snxh< z1|`O5Q8DuC0+$)kR~ZHMoyiH)!(a%&B&ss-&jH_jn~u^l6f=%pSgrIxg!(LQbo?@c z-ql)5B*J*h--j^>%>}aw&#P3l`GqlvT9Y)68U5WEWGnFW6vPDq-lM05HezM#n8fe6 zC`CWiTX1Cx_fNY@ZQrjD2=I-st#{eD-!(9WB>Oa|GK{F0i;g;HGELrSsBVpz2$Pnu^XP3>6@DqhvDiML-~n*W;NU z@f(#k`dg35RhiITD zl~|ReLJCT(cL3N)MjTOuD7Ne3q~}8;Oe3*0+XP3v;G1?l)m4kr0@&=Cn~_I0~|8s?W4TM2_ufsc$kG`ecPr@2-%$s4O&p zrYhxZf5HbAx&XQ)Cn_K#Ta^_mu!*q`%f%O&Tj41y3-$+DvFyFg86?tgYTgoZkrUM7 zLH0%25a1;Z$kVi9urPkjk&9#;G9lA(%1}bPYxaWqUB%KH_U*@?0~;nK2M9{uafImP zUIppJgb2gpD-BtsXR6fUQBDQ4%PDtNd`(xSWh}XBECp%p8As+!B}=BjhgTrrV~5B6K$|6=p#lJJPMDsQNMiP~Azn55r1iMOKZ189 zJcJnfCVY<*K7wDH@ z8b(`v|Cmp9F-~x4%`FcEa@p7$3^9t!T6L^v(ym`n3@}_xpf+rjTlF^uu8wo@l z4z}6)FSL{W0_k8Lyx)4V38O(KCIuT!!{J$_cQdgY!gja>2GtQJ;Xt9>uaWXM}RJ3LG?(%q(Q64CyCW0961pf}1#*_cIGg+-WsiQ9! zCAYy;z|3E67KftaC{`i520hjVxpgQ%79^^=m<09%@2v%Z^$;FqQ%azLPMWfA;VCjz z&^@==8`1=X0$C?UfUx@FL}4X0goJPx1uw&=QL*-~h*C&G^gx!p&V2Brt+#~EQe)XP zHCFk5+A7oxfy)cs`ItCBynmd>^seplsitEZAJ|Z?m(3UDcX?f}i-WmwT7iCG!767B zieg3zY+jB2fIYX?k?(thBOg*1KUi4tN#;Ff%tWzBC`Rq(i>6Q>{6op76*QTUhx*6mtuZ(rbDIti;}zLLdktk6l!GCV);XI}wYAe}G(gZ#JEk_@Yx*j!RZ~T6L+|&y z%Eks_f>(@L0C<|XMR??tn%xJM6ZJ}6Q0qe_V$dRB7 zaq@_9dO@jg0&{>I)>HlbM-o>pxo)oEiS zhc55X+ARGG5o=`ufF_{~Tsy5REC_jUXb4}5O|n~LNNkgbJtbZR9oKwD74imn3P1jr zh4wEEBbdm~Pzqv`#bix+7A0l;dvenV2=-qnxU5^=NLthb);Rc76+39F49*;k!|0pX zmMM==pF7XmC^!E-7Z@K}^)mZBVA|;Wi|*c|WyXWuJ>|W&;(zJG*?jj0+JF{(_npK# zS58Mppj&Nr1;33iM+GSJ%uI#H1p%we-ccd$mADmVhaj#l;th>^^7iURKqL=S*C4SL zxw$4vlCY)numT-J*!&X;NhB8dQ}LM2 zmyluLfV+N!`Ya{`1|i?J7{-JAI{lVJmTjPx&;7JxsZdYN;a6#x4wdxiZI~n?+@M^; zFQZM<+fNx1&U-!sb^U|BYnPv60#&L;6JPSUxTZ9xu;;oS86ycX!!-h8sqA53`6GPy zx0;L*&Fr!k4Coe1$=%{KV5{4fZa+!1{pU&#ypxQncQ49yn{LTrY0F*|X*0!k7~K<8 z+8Dwqkhx>fYg>6YF5GNr#X-jo;-@DMvX?HCM?m{+M;lX%`4 z8#(y{jQ$+y2 zr@GH+Wb(8plEb9nwL_r8C*&mY+O}WD4vow(enT-Vk-pCjptkPnTB&Sg$7454k9{j$fQbHcwRdVP;5#_0RsxP`@OS-EQMY+#Bc&j zLJ?A!FI(P{J~8mD2buel`d$JK89OPv3@P{^Rd^HTn|LAA$y%`%ASo?kuB5UCTFIK? zPpwP_B&Cdu*}Y)a)R)pjoAmDSmvnfsiBY3Zon^I@#2d-+l{Mfd)Z!#-cae5vvcYUskw_0PMcCK%-(;~;s6tt2p4hUq^I#hHIXXMfCI zGHkYbrq{|=B9aoN<5m|ISNDS>CWwAZ+{(9*g~H8txoXWa>_4l=gr`F~E8Fa zU1C&tgZAt_@}xK&i=UK#sEKXDKFZst0QG2*$sF~IK&Ay=cLDpq7Ui~qTDlt`5O`(v zL9v8{T+OxB>bj7fg46~g|M}Ce_t<3mrRSX>^;}1}oKp)u@O>gYQg~YHC+TRoi&alI znVG|i1yTP-H9xWOg8WtiNuE>-QB0l`Nd3w-&V_2Kt;1FCnhi>VwnnN;a4p%n0nTP& zoRhjyi3aszC_dmzt3@GT6h=kqaX5vMFs7)etWU%&-R4Db)xP4vbwJW-=QL*?#axoY z^|c(yjkw9Zm(q3EQG|f%5#<=#wLqDK?jr{l(J~zY@4)g8+#fCkBTqFM3N>NHR9RJe zw9Cui=pq~>3*c3M8oK2=EfwnsY^-Nck98x=K<~d}>qxtkih0!c5T)<9Z zz<^X!J3pytop!1WQRt?!bDKC^Y{D`E`b@A*%{tj^)$$E{05u$mwl&iR1hG8ni}3`g zl=3OHu4HNs15%dtFE2caejjBI(wU+2sQy{*3GHngKK8dBytae>t*-qQWMTthrqKi- z`1c!*8m_~i#|TyrL?aD@j~+uUXEO?V48gxu(vLJ0QCO|8efP@d=({7QGux|zx@15A z6bTtZCLKTO+;w~&7d777Hf>k+dn)Pu;!sQyeP34L!~$K%EJ@6L>yA4pw2m9Hz`80c z!1E<@;v{1}Q0H5ju1{`^-=7o&hVnmeh&4@7|A4+-q^0706VlQAKi=VeZVt^o4!sEg z?9Z@7wzbTBFED;yf&bbDTdo|}FB%C*o(zgw~@4#l0V`usfI959m2T}L$z#qdI{hqP~_lnDVN1)@0c022O z1}=}pmk_A-=!nvRHa4xxn%laD&02#kjQTob*cSIaSb#fI|MPm@^BarB({2haBqEJ4wzP&5G1DNd@40MBDbVgo8;YTMsPDhVM@M= znaHT|+k&b=s6^=T(jv2j+No?C;`dq9H?j=AiwJRksJC1Y*Jc7plL!_k7D81G{VSMg zzN{S?i})48#)QD16fjv)n%?6>v&}I{;jPTUYAH*Qh$$o6vP+(N*vBI}7JrM$cNnTU z1|WT-%L?YuvPt;?Cf~V-Z4otY@LM9V9fka4 z0fZ^yh$%mI9lS&Ci4=|mfuUVtlhn>q;k=>b-pS5PKdj^0n`G=Z2R{2I%Z>v}JvY?7fd@$tR`AJ>2+2q4lm?;xifb?1 zUm?*)j93-bvnU72ACkKZGy^@c0!*$C@S}Kh&K&yv5ll%oj>^%9|Ju!2MdX7`g(Aif z;Al&yoWPi9YUf}yEs6(sBrfO7Edo2Nz+ZyBLGgB!s$GL!gsmjo=paj`^?9%WB#R}p znH~HpdLd}oDAsYBA%RD;*woyoE|esZsibJ9LQ(Jph}H(N;Pozm_9k->@9Jqatzf!20-tG36MeDpG;TQF#Jzf!CJOuhZhygcum3`tMJ5qEZFS*qM2#@%0Ys! zJXeg*#&6_sB`47`!=b`6hZFDn`MxD&LxtLwwR=_gXK>RbpRteWY;9V$ZY6^=m&r~=F5 zV!5?Vo*Xfm$VoRqIQlXXZmwfN|4+)i55X_oYGi@F-nV&ThldlEC58Om*>LFhqhtk# zhkI1LFZ(apuNVDZVG80m#6wQL!TDvQ!MP*F&wUGMefiAiNjRN8tt{AMU+$G!g{mm7V`+$^tGr5qZd8`t0lAwv^_v%= z2zRJw5768$Is$bVJsU3a>Xvq&x@>3C5P;3$PAzx{{ zij3Gh4hCi=bzu!*Ygbt*#{PG!>c8x|;b*~;dat(}>!Zb(mC-c0q ze#5&qsIZ7Kcnf5rhho?Vs-1db7NUTEAonWB+sGLcQSRn&9Azhp67YK#bod^2s?Nmy zQUVDZ6tYvjXj+-swg@jn3Z9z^Pmb+Zsy^|hWiaY|b1&p-BZTOflP`BEl^4 zk|0vD!P{P4!kw5d_j-VRvB#C{6Es^jQN)3v8dSVW_cBGJ6l3Gb8_$*XupDts-px1Y z%7~)zzR?0Pbccp;`fBSz_ z8ODd=st%x@gS&QNK?Z&3W}0xoqtS@@mKo?rc#|!^D&-5y)jC4Tu`lnc^38_#wxq<- zm|wTb-Kr(MlijVr+j{2jw%*yMCimOA#?EKuP%@V{Rkb8e0e=Tsia7+DGh>`XRLmho z7#O&TYOcYqO?bJz%Jq(O1_cRhW^~acVL}c`fU6H&B*``&6n`-FUkGXl891Ki9GZMo ztm^!cv8liTG2;poLD(8XaapB4VyG|J3}o~gTuS7DsB~^{c=c)TOKG{nng$$&*ACO= znkpt`A)8f8J5}19BGh@y(>y{lBQC?!?{@yKyV7WM9*Fsr$S_dgIK-d<5rU$PXO-Kt z90VsNM+rr)zmMI^hULe$Os!93sE1VH3q(3)WA5tBHn0pJ zWH=6mM$#Asq?{<@Gd?`W@PH7VWI-HsV zfi)2I7BJzX(gOwgkv-jP7Z!N3FWF*+X!j)X&h+*6)lbXt@pUh_clC7*TdN-H;-iwz z8ra)By!i-a_A{9lLtVqVCcE6D>l0`56A*Tdw{F;%=EO2eWrc$}s**Rw#FmhRZa{U) znVwHwO_eLMigspL^}bc~D(~vl+kNj#hMGB)0J8i0em+=IZJa?;`09S&5l3kewNB43 zoJsHtexPmS(rsYUE0j8$gFuVUvw>V>!x+czW>Q0cH@ep^>azZr0ai~75Zym!2p~Vx zj9GD&cz>v#)?|4)-u4gr_PAL@1dZ@Q435dLt8XF(Ke666$!qHJ`c&yd%FMum*KRVe zXWy2;Sym<+C%vBCGeX8|>YMBtR`G6P(s5z7uu2*cF8ZXDD2e1#-}2e6Y`hSZoN&iV zC|rY;ceCvs(zI zHzjYOVRTU>ajavOAQ%!a^&x6m}UWI8}E<9%&66k<5_MUi^C;#-4?i0w%9zROani1yJtc#J?R4+<4HC!UK&L0;AW;dhTAT{g~ zn**P?h{f@uA}Ua_)Hd9{>MY^0t2;p6U4Xo{J_Urp`h#hdKh>0!mXemAzfe>P;-Pj7 zeGfHJJbGOx@NxkvYbq(s)Z~n@XS?bbE;`QdKk6*o&`Kuze7|q`+Q>5Bdb>%^wXFUV z7x%hyofVBGI@h{rz;1=C*8UMf}i zG@2XXyp>O1Cb_edjm6QCymXE1r*PS(2FW55#4L4#pFej4k zmf2o0nz$z{fwfNo7pIG{E{%KPgO{b^;8eYXoW8*f<(f**z~cV!S%MTCw5dqKwn&7G zcPK3M>sw9>8HHEYel8yIz+oZqde#ltR%r<}XR^AjDf^&o6c!mt{1Dk%#o z7%Te3@K4;is&O+w>fE*8`Aort7!p6t?ytnx>l%O+Ul6LRgsRoF2QMtqK7qYtQ84qm z57X~jq}CXmB`M#i6pR6I1Om%C>PQdkO@wH7t2{$#TX5lBMH&Qh5anNSFW?igodcdK zIkW+f^1Xh&?SA^ckD^P}#63MJ6pb`hVDYY`%uwf#(Cy;DXF2o{cOBCUoDFu!4ppLl z-nCbLi}F$m@fZ4H2vq(Epr@~Qa9!_PTyW<<4nj!Q&M4vaf;3@vuHaq=`BW5Ss?gp1 zOMgpUG2Y!`&GH2+T`{6-aIhZl_Q9`Lg|86o>E1!X&yVfhtFL>Bxv!nSOuc{f1wPa~3X7GJ>$o)BMqMs{a*C9AOToc9ATi_oMJd9PnOW)2n` zMzJ_?7kqdnZj3L354X+Yzm$nmM^G>*Qe6*M9!???A{((>_7i!hEd_iZhRtz0kq~LS z!ie%e4)w2x%ku|Tu+*CO0xoutVdg=Nw+9u#wL5y$Z@pN5Si@nBxR#411dZ}eX}4S9MXn&R&7Q`X zE{e^2Om#S@folL7bepJ>iTZq)4ssAl+#e;_8XA9?81gH`aoy0hugd$nAUMR6>ylS#{i>pZ4A6w=pbs~b(;{M>zI)X| zXjmvq$illk{NPhP>Kfbg%boCKtJ}|*hC%cFd)($Ii>Fp%G_#Rd`@pS^emXUH34PuP z8+_H)Zb)G%Q(+icQ)FC&=2=*G@cTmKj^)`Z!zDi>M2O9beArFX5~-?4S&g-1r`Xw? zp0rLG9sC!0N}cke(57|`7bZG(j)bz#*!*4&9GOnnZ-;A7XX>Yd)=5hxogqHm#4PfZ zU!{fn83AlLfel(*jbDZPeePsyHjVvW2A;d~tS5Ln-T6;z>06yl>Z<>hXr`60VPo!) zf$6Gv$bxSJk5fV2{)i7PAJ3Jy3_a5=m(aocddttD*xbW)VW4g}m?!-Fd98z(*pK*B z;pRvFq9D%|6vD99AmySaP~Q(Z)y@O<9!c6DCj|CvrSeodhzS?Nsi=#@6ukH|&z70N zTTP|D49dg#nFj75HdCj?_;dmp!7F4z3#r*%vYQZP)A8LC6*+LRQ_~(k{u!GMs$J>40MJ-@P2z0&Z@Wqi$3KuGoxTke(0-KNrwH)5n;YJqMFiSpE#`bge+O;Tf?&S;Bj;USjFW8aNdFoa54 zH9GMi5q=*k>1|eA&pU(W9xlGYo127RdNs8m+X^R75-#E3wM4N5p9ED>tVtwKa9M{K z2mahd4Yk%(6eUdp%&rZAuxWt(7bI3d79oa+A6YQK4rQ$1T5Y;|bWR03Q&*LqW z{i(83M30br5jP=SoD=gojObv?EU9rW^<7iB%FCDHY^0!y zO$qXaL=GIUcTl>Zqv5^4R=fi``LoQ`MwG^IGp!C36^~YDti1@A9KEXi3fCNb;O{YC zBKE&G3p03HTrE~Jt$-?N6t~Kf1ec^6l*mL|W@1hAB<&OGw$NNIuK(6PWIh&LGy4=f zLolR0ZadjPKJ9l!C-8$kx39S>x9oRzX`i3UF0`Ay01Y8gqc4c*Pu`aa=HB`HAZ)U$ zX=T5kJlD^to5lFR*%TOEj9O|u49MGy@g!Tsggf7gq5kL~3d*UgH_73Ix13s|fQU?C z9C%FSAJLw>+!xt^Ktg=KzNrb;<(pY%E&a@u@NBd!vZ~vk0T8?-u0QZVTOveB&$nx#gHz9B-j({vFua3k-clM|1aZhGfI^iG4Rc|3TKA*59)VD>&Q|<}|=D&n` zVhmeW=YBXetg$p*s5DHvYa>r%XgZsTf%f1g1B9)j%WfnLzM|9X9DKe1?l+OX5og%k zm?|GX+`8Gqt*(QwxG4(mFz$Y1L|6!|7W-z7N+rPcd`u% zJvfbD}HMu4fhjLh$1Fq799^lO(ThCBFv`-FC7(oMK#K4 zI&+f#*dS-UclG?mXj5!-a}q}wPYGHP9MnX_)bobtUZ+2hfVsRd_EPTr$IT?onXd0l zA>(3oR6D8`a}{jy7YdBm4(OQI4D+*so7_BxV`t}}#hcBoBIAEs=hnv%>WHx z&iWz&S5}3lY|;l|4DQ~jqi(-)-kBZ|pK8x2jtB zTH>iupOu)phR|YybQ}f<=4lpd0iLQI;xe42@RGU7GzL=)wezEIi`846S{41cU(lO+ zTA7D#^^3^Arz)!gA37e!*c=TVe9b6+O9JymMUqFyW57T%Zp1x4uMl$zXmlQ;Rm1xVPRUc=jdFB#D~D$G0=2`+!}9^$b; z0_NilsTx0EavW!`sAEPV47m(>pkh)yXQLVY2=J9$uE8VLF3vLh67F^vS*?x!ropP* zBgDzFROF_aGjE@99M~Dbcq(Z@OB6tTtrxan(!n{IUayQ_<)m86wBxA_FVXXVc;hKO zE%5904p0pS!}Sb~YwO{-bb5(&o(Whf}4&-IK(fw>2! zQKZ$tJvd4cBru9N1Si-GzVavu(5u*$u-yKx*N4rc8!bM0VZPe2wbR3DYnxVYRo6ih zC*`x8oO!NVf5*?F#|r?wXlH&gW38|%BTn!}Bh#iN;RWXLN&TkE<}KfD3`)yt)w2&? zBP?s)G2x*zii55iK2&V1E%Qs<9hu|{T9PhqEN{3h!<1E1X60X8@}{=4K+%uSr$E`U zVO(Z;phCfjd zT#&8vkr6+TGL3I+iviv4&LPpC`|n+@O-@A3$M@Fr@7e*u_nT==echXvQbC6nw4!h7 zoSQ^U@j1TMKSd(tThfv*i(kcjILgVttJTztOUFU)gqLv1P`K-RMwuc~K#dL#UJ~3^ zSV={x%Zo03lkPK2^i4QiwqOEkm%?fYjK}i-(#lAxM}9LbkdLX`A{mOkG}jAl#cP%V zK~LR`8?<#awZ9q9o1FC}oD_ov<2T%9jq8<=1+k^p`@XMabvoryja;!U;i_23Uq5I; zMfE_6>EId_3}d9(Y2Yn~w;VUQ7w8R2aw5G3%4+M~$vXs!&cLT9L9xg;zbxd#vPQJU zPQlHeUzy_%2D*H8UzFEHVT`CTlU@g8<^#Io3!DjNYr&IofEF`r!b1Y|FDpUH!R>Me z>lHON^WO4s{{+69lCHKgn9ddMjRR4Zlbp9}_dIbTle!^w~JYm6+KlfD7!%`P4UO^3>EsXo06lGLqnL@@H!FhNeYQomDFG~>`G?P z2y-j~UYszP%^R};3|bX^{{kA>Um?hKD}bsAZ9g11*$;+f>|d3$ON{F=+nX}fn`}JFI zre{9*+&IUUn+9@s33zCuptfN;Wi z+FNPl=dWd_33h6cp{NBb`C8&SQx${j9O4qiq8v!mKcnN*C4JU3q-J7{dmfn$@9rWw zm6IuQTIfom!=Y_vMPlzS2R;tZpQ8s^*>UwZ0cB)!BBRj{e;k-ynpi5d|8h#Pt0Pu3 zo%ro>hXAxEp9~Om)NCT)`>WdfwTLO%|E1Cyi6Z^fuYt$3jB+v>amKW%xhTzE6#U>;)wjgCVR$-;^ z75v@5C@`PuxCN(AGFW-SO!R>)BI5}{#)is9G&yK@H6g52y-!Km{cqLY6!+d%DF4Ys zm!^5?f67fxK^^AE92~n zy8iQ+XX!q3o7+wqahLM@s$I_k)owr=U;l9-7RSGL2@;dM_L4D=JpRJ>$0j?3tzbXr z005XiE`Nuph|VM#rHj!7=^PV?N`R&({@otDv4*fvHnpnh*EsDGVnP@JD!pSj9M!>E z**P!eZaM`fBu=@oylyf83Fo0iT^C{m&px(uf`Qrz(@Q;I1oS6hCLr(l2=SAkLugDV z0~;oNje-S@`|SX=rMzr6N&ykr9!v~)Zpw!uctskdXyMpjDMscGn_*MrNto`T!+|Js z+8CBU2!-I_eFWOu#HQNT*J`|5{EYs?Ss`U8yhHdT0pB(_q7r$i(sudcgO1hyQXtmp zSb_Ih{@qM zWj#Ei^CFEHhz4v9mqB)Ss*{`@rP4Vrca0g7qVWWc{=m>}X~47Zz?v5;LB$VO1Hf>i0Wu`$sij92humA~p%dI=~E$MP=; zw!(8Gv&*jgi>F(H<{xW z_ll8Vq{xIxsLb4B3CPE-ALF2WEu|*Q2nj2HG((6aCY#a>PsWhP=N{yDMG9B_jHBaQ zUb~{D4`%3l=Po)!s^OM)M<0`)^G^SaX#~6%OLSrXj$4hiNrNCcei>ZSCr9u1G-DOk zp%uo9P(Wr!7uDzBD9>f2*;+WlB6?%lAVv>(#RR6sy5HVEXoMc;5vtoS1nUmZCP6#7V(tMNhyQlT< zwhFA9NX`%7pZ|mk9?&(Yn76{`b_nh}AUr-&9*lXl{KtG5&g68HkOlNx;?PACx10l7 zZ*~!ncQ#>iqTeR-r;l@Dh_kRP9pDbqh+~S`oUsL@lNe0SNU}k7{ylJVPtF?Cg%3*Yw(xFRp>?|YiF#LsARSbM1<+{2(D zfHa_-0b*dIqvMSHi?BOyAbvb~1Md)yMA$BV%B_GO6%gyUSkecMbvQ^Hp-6rgI<4y) z8Ea1wIVYhs9`NwpLtCtr8XlyEcThOWk|<&Gqb2h$q29O$hU9G~ZT=pIH(=?Y$U0uS z<)t!@rYS_tnLGdo>|{j9%)BcyVUq4fL=ufUc+^ClB`_T>NQt2QSxkBnEg9z; zAZ%kl%N&0W7Lq2gMg0j}5JwM8Fo7;zvc|Bp9?PPscBgRB>e~ji8n+ZlLqxibX2bT! z$a*Py#ZX3*p0>HqX)s{{hC$*)aLi~HhK_mDjFeUi+ zE)GVGiS$74XT1q-ZouFLBOb*2KlkbDV{`l(-9BcQZxXYN&%-IywsOTI@H7mfU>aOt zB(vJ-FFio5GpgUR7=T~1MuL)w;oQ8>{CT#H|D9{5JuN!|aUdiAm!;|>KlW;8^v0X@ z)^clA9pagXqh%ejP_?iY!T3yp5)nxS)&L8ER5HMvyunnY5>6PbN!viExIvB)#AT(C z#?Zd-gDx>RQ4I~vpiZ$hU>+F5;wh{}2A+f4P~}~72Ab^}>UJ?xLvt3y%A|bq`|v_+ z0S!Xl5E~eRU#EppPN~9pcO_qk_{74N=pU(M~^BR`@B7?Jgbg4 zK}yDnZiH@l;^=|{JviPp+KvNNkW;w=(|%>^HnF$fr7fvE`;q<`_J6NQ zhFQ`VH8i61>Q~hi8jDBe?b>yjct#V4$$wlX?bBs#waSbDFY&JoWVU0Fo%ZpaV+wXZ zIajX@*bLu6PrrSRHe}@?8?Is@Cf`RQ=r+TJg>ld121SXku?oS!IqXGdF-cP@7C(dS z;~Ymv|NqC;H$`U_wQ0t-ZQHhOtCBCQ*tTt372CF}V%xTzOnTOu>Hhm>uXA~>_PgJQ z-aq8;lFL5>5kPIeexwE~P?-~trDh$KLY2j>RPFTmaM(1^a%iEFZleJZ()Tu0>{&(9B#eiZCtbWyP}fb2O?x3h zmyJh$fFU@9xv>Qjy~ksmtHi?G~3U39=b89knj z>I>@&64!=;&&HOyfA6{X_VXkJzT^ziMp>=$U;@cMdw^QDU3#y}>BTIZ9BEFrQv%za z7ekHqknGNK;|6G^G^ovCYA+64%$f(zDv&a|3=hFgyO9!*RZhHvDLa|0W+m^&p0RfC z*OHxus)$e|t`5@uJuX#BC_&d3{@5`gKR{bi*24n=#qZP=TF5V-vUvv$JL%)?njKi) zKKUjT&*gnzuO0okJQOUu6-O}?+j!I~pia}Q+vv)bOTvFxGTZByT%*!(#7xPTFh$>} z%14+ypLejw(qFqThi)tQr|h)j=hE--P)*69fRG&|+}ge zdf^sJLd`7M=R_jh#A~FLB4qC*CD9>xQ&kzLN;3j$RSdELdrX2|89QN%*jax}iZ9~T z#k0{-;@#*IGDQRu&Vub6inFVvv5gEhF&5Vfw?J(nH3Sg!7asGZpo4xBk<^5ero^}; zy6vf1m#>>QOd%#38S$IPC~bj*hjCgpT!6?KyAy^7?{!s1oozR1t-fN-PtW_5dHeEx z_G}nXEE!KgWn4b2bkMDNe~WPJ%u~QqnuIz5j5xy5(gNg8edI~3w!2~vwliB%t6^vB z|6}X@RB9E3QU2%{^Q~(jYuR$ zZa!afqHJIJ2lw#nVzsvu7qW$O)zyoF!S<>3qy!XUpFkB4`BH2HxbPkiWkP~%so`)z ziUz4cmSh+hGCmNUgn?_J#|*YSM%UcEN46NNVk&4fP%nk05%2`*Z@Jz=x}VlPr%4JYe} z%#7XC>_seEEUWuXRnnT9aZE%SDjE!H{JSb}jL&*N?$0XrI3rM|$3|~T8<(=S1?IOR z975zkLH4qeZgV_nlx$XYR}BXLAl1?E^X7UPqv!Y@Lb>kC#|O5upGR|B;dye$!fIm7 zg>$42x8`Yq9%(uCI?q!J<)OIjfp`-c$wb)EN{bAT1&I=k21bN_7FA2KJ(F^b^<2l! z0^skhRr|@#?|;k!azL~RqG%MGs(0Xcld>>#>H8tNfz%4AU?SVOpBD#Zp8;^*)_E#`oN;@=O6@CP)}`vECfOIqrt;q<7VS62{i5T01p#%u{8m^h__!~6UhwHQSZV1;@~6L9u9zgwRi+pc@p3~r$%wQC5GLg zaRWv!?VzePC=5C<1wX@St$%KnA!Yw7CCFwNY~&s2fZ=s#>wL!t2+3>N6W8N*ro|6F zGETM~!>+l?u6{0+==8>}&M6w#*>FMQco7ILWE zC&of3M+rKiOSX|j5K?VPEcm5Gi%mHIS(ngwHj~>uO1_}-$gt~xJMW(RdeJ&O7lD6n zI4lWWAQG1U3O#ZtL#%3HCXt;ggn0lE71Ze)V;Gj z{2)-I@!!n+6{wJz%-Q}}gw8?Wc_wBiIU)|wv8{X@d<3)MzC8!;-~xh-mvl?u-4V+W zYB`JY#tgm@V=;8P3#h9%^15ex&hQio+07jGswZMuNH4DX5SU?hndNpeMv)#{Q_8!l z185mKG1V6W^e2wj>$w!gB~>F|{b`_+WOV}OvKTDwIj$_9TN@I_yWC7KK#{E2u;{_C z==Qgol`M(b#3}X)gz27>e??|q7SXaf;HF7xZ4g7iLh*KWD@f?Ewq6}il1n#X#Cucg25Mr?p1|Vh(z&D zb{N&fX+(EYj(nEep(Qj9A?y+%c4~9Q=-n~w@?P?0x(2BOe`KE^m(E*)bOMMLldNn} zflvGUuUBL|r3t(^@VgMApaLtEooUWhSnfC^Ph8JqAJc$(Ea)Kh&gvT2{lslZy4=wb zA05RLir!x<$b>!k^>pQ90e#M*C)sRru+c!?sjPzcbaM`w6I<0X<#}$@>oPb*Np*9q zmN=QA$`bXLfN=sV2n~a(Gyh2#EmbA<>Q)yMuLdFs6GyTp_$=?KE}%C#OJ41iB8kz; z2I6&$wWjQ!NT476f$)8c!^`J(R?gNfY=~5S`Kw#Y3u%!ydTQLF6|BDztx!EQsmNLf zObjfRt|1@DCjB@CSh-C=YZZ&y->)ho+%Pe6AyR^F_~fm%x!=s@2am4eylXbl zPNxy~#ntG`<15 zv$f^gJ{lMDNsD@QqILUEw&u@P_ZDsdu(zPlmyfRW=bBA9Ljvci_vFEJr%rb^u&P6g z;Y7TkNBc7& z@ws_mA8UWe@X`b{lnVew3L%)E<;wM-dx5}f9ry5wJQ^Vt6L3kRq^&0ZcL>;K%@~jc z?=Tl`sSHw82@{*E_=>KNs7PEynqF*35=7@E(#I^vrI33MDe=hJ@K@!mb{V zGd!&_CHil?$;q6ik_)R?jW>g+=jfps0$81TzC%i45!!S?b|O6t(lo==L7F_?&)lE0 zz<;(F+ubg83a0F}%=uwE%k&V#DU2bGy=Cqekmb00N_j4k9cOHi#cn*133+U;fy9UQ zq{G!!`kf_5Bm>#x&A3j|ECyqzSPM^%Ik$E|l&X|M!>IF<4OJPPU1AcD#?(Ru!!kY| zzYI#7CZ&$dJQpPLlJUp{*H$^3RRw#>lZXNYSROREPeww0PSi!B^z~8ko#|O*4=881 zM9pBjjYl#U<&c&!B)eH6o2gS0(G!a&?8}Tw^155__PNV$7m6-*(m$Fz$)qY4MqB3s zAEf0;WdfK=d}w}OZtwQK-}?m#@|WMd#|et?gD~16a^b~tU3f9GI7Spp-w&XL5nGlO zOXk1IyftM`DZ+sm=_b)<7b?g#tQFlWiXmsFj&@X9rFWRkS>z5LZE+hNDu!o5G9~2; zmtum4%}JMncE+s4VQB!R0|w z_&U7#)eueeGc6|CWbHJCKW4K7lt4lS&s)=cUHz!T-16FARp^w`uxodA4Qa`-;7+>& zc2|g!+_Cm|qTN{n9|FYGRDjh(JHiDk`+a1|B3JVo>gouKQRKszP`(HkYW0SZhvqhet%7F&RpM% zuEw2qF;o{$333^%ePyB1&efOin1SF7R>}39U_T%XrVHYhUd?!r+FiBLW`XQuA33Z^ z>U$RvMN+X&^hUPB@|Nt_cvd{$@#+RfwnHn^M}cWjcdsw{=E@2o{7t2}7|Iox!7Lb& zJDKUVK_emIx}qR>gYtXe+EB=LbLhflCgA(nL82h-<6Zp|?;@$RPUS+T*hh4%C)N;? zsbyNvtG7S1Y3R0L%zH7b>{b?aBZg9C<<1&7gqMVxg~~$+58QpD)r|f=g32Nc^qbS8 zX$}2f2G+j;AX3Oqh==6w9j}YeU{NglXwWCsFhl;1H)aYxb(umGrl19EhPr8fZ4LRN zeuv{(HsDHh{URAPI)#sRDNPg<5-fSmfsTR|Me>B|9<;biRVy?+8Y84GaIJhwT z;;@9MW7Nmlfd%gl$;?3fZoXpr;8k4K+81+jfU33}O~^mo_(df^`ZjlQ%NZ$08dnxa zwH!fthj@_@i%jG|l!r}OJSAa0d*c)6^VUnRg&Y8orqX=dzlMVAtTCjz=TemI>M0T2 zzfmwihyzY3c9&rA&L3e1{{CU7Le1p*T=kgc`#WkmG(gQCGXfv*p#cqt&~`)zdxJ%m zq7rW(Y&|dfi#od{P#Bk%E#Zy z3E8Z>#)tz~1J-^=p!7UYcqL!JE#qXo9qV+PwcOcGyxHK(z|U2LAQucMO$@?)lL&)t zgxfyPJo9$7;U^x_KbcJZVg~=$9^rRg|M!QWci{J%8+xw6tn%_t{e;oFS6A1${rodl zRZwY{zsWJ%ukZ}Ar7IU-ggDqRBb;5n2o5&FQY|T}punU07!h*~_HtXDg-ZzAdnojT zS{#F%5wl!B?^r=uS+@sS)b;^|i@BbcjZ0Y)K@ zg=>@!2YDj+S`MN7MBdHoBk1ZKa+_SSZn~19r1b{!`H$g?HGfwRQWE-MudEmtJi`3Y zhl!P$t&%_5gri(0ShJ^#kzBpvEV|JrXH<`QDp4kgK!HI|>iIG~Jd>?t;(GqFt4-2s zSwyGtK%n+nF9jZ1L zYhu`e_9ntE$)YcF9|7#--R_~62{ODYGKHcdsDSfDM8J{~WMf%lsiPtr5 zd8Cr!Zzyfh718H@@)7(%yj?scnAG{9RUjn9f{Zn)@G&$mxi{DMu5UHM_vQDrRVCSQ z@DGL8%oTH+gyi|(gpRapW9cHiFe_FJ2HcGQ0}a#+Vy*`Y#OKDG{mP_S65Ud6N=AMA zyZ&Ub9dqXq0kXDkB3;F`=1t@nlv{6BhJKt=kJA)9lKa}JZk*P)|Mb@!qHuubE9uHx z6)3+&r3>7<+-5|>w814eMW1egSTd^p`^N@h z*;UNWfC$*9euC(}tKe8O`~!m&(2E1GX>H8v>zQcy8rQV8ja=jPT;px_l~)4M-$<5W zN;uB0eMk^54t+ugiG+1!R@pT)e0ntw9R^12RT)D>A^kn)4n3PiG>o$`1n>38@5P1w zl(3j*Tp~rwAxO7(t)7r{<%SXu=u63#)2RV?TgE3d-TUMm6M8OC42WKbi>Q;^%}FsbMJ^IapL>h z<0xy4XRZwNLbj}_gVUqG`I*{;xg_!h06eJFn4PAB0r-C(2vAB0{>l4O4oTw+_d(H* z{K%7vvFE*KR;?3ZNKLW78SXPJ*I^h``N!HY9mn8IB`()zag4C$TR7 z$O0)$MsdFBaVhdMg}xR6;~vOtU($=liw#sd(b~ArOiUJkHI(bLz@X1uHScMQxs;cN%4Wx&nlLxAfHoVb_WUAp3XN zYt_=wVUVw{p}`p7Ht6Z1QEAAgi*sM*V-C`2L~9dM+2|Zo;N~oBUd3U|I{T%MD3HO} z;K__ATyc2PB`Cr3Xq3i(Mo0yj!6#g%)H8pFsmX!n^7HjKj#6qZ=tlpDIo0E~{dz`g z1z%zE@~&r)#I>-REWmcZ0{)*Cz%+|_gJ=5^4*4-XS&;3Q4aq45NDvqcX_8A3SZNYW zP%>tVSiM%hPPNh!`}+DVvpG(Pdn^C}x!e|l(P&1_lQ_TFL3*NL7-ZyP#tokae8dBv zd?3j&#(TVi<2d-T@H79HZdM#WIU@?}8R~nVYv3T1EJ}O5Df{067cF%92|zr1vzm}V zqk^bfjOmW!#N3#fW7A)mR|UCfd2ak$501^JBRODla1* zc+`3$KuN;vkE2R%@OZMsSULuYm|R7EpTaDbh){9WTj`dR+Ang)Fe3Pbze1o7F<`Lk z_sdMVIY`!(Djo*1q4wd4l3Hjjun+m+K!6H~%zfdfQA?X#Z)g1m^y+@;H1W8jOdNGN za!ON?FiJeTD^^m2$X-`?o?CPThXygb4^u#a2MK9dBBc8Lm=6UC7Da{5OXJSNDigsT z8dk3Ub&!+o6$N!{t-Ju{_Dy8_QKnVRe;j`;lHXe5LohA^4XteuysF1m2vDW_0w*YJYyAQNn}rnE z#4{YxB1N@QNpFbzXIKG)@+xc@5#Noz(OxauFT8>9;?2r+zN{{~mD zfgxv0!dZCN$y1v(qeBn>=;0O2yHS|W$sf^9$NZzM{@4|cWh`nc_Y2|A{w(nrN<sx-l&eoCcM~F)CNg$?x$Ig=B%1ea5hmkgg4C@H*v#d~ZEgV~NJ1oGC)plAoBKQ?x zLn+DRLi$TLaATgT4liy>ED=JKXMjzcXRWF#=dvYs&Lmt4i@qniL}sSRZK$bmb~QDf zAt%;=nxyVkF?%q={-p@b$UaQaP@@KzsO#xq@B~cXots++JTN9QIi*I%*yE+!Y##S1 zd|#DqFL>z|S#E#lAD(FJuf?Af-B2}4``K#Ahis&N+~H?`B?R?g`cj%B5kLSe{>`rrb5VY`~(5I&~Qoq_xaph zhNemTSEd=$MR|H7W4_VFz)Rq2!$2=NG(%5y0oKJjbXH;WN#GilGLBX-#E?+X)tIsi zfB%3fNW!s^=Gr#jK(D4P;PVHBJ3QfS;O1LpP%Nx439qUClpruC&}BbSqVy~4n<#1g z`Nz&!ETe8clZBJk3Dg8?-8x#SsrxZl+cV_%eu?Sce(CQx9u4}-id4gMv{5QN71TXb zw8kkaA8NtVKpYeXU$DtnS&ApYFtzVUJSK99WO9T_25O6)N93Q>EFK<-J?29DFLkXd zjIL0Q7^*g{S4rZ1YvPk#dTlLuEbD$tqH$>D9qqxf(pL;%3v#uxsN7}piR?8{qqv5L zGMKmqrVk|8e6&U%CfoV%Nw9}3xsrUhfs1x#PGe8_I7C5YMAFs3a(?vWySnij>iTh% zli0GY$VgcI%6sMC6plA_$i_O=WsGcFiiApV3lisKllxJSpCP!+u4=lOm?^%W zEL_Dxd05!S$4yca7jZ>Pdk9dlUNvUE52nUfP;`cjEM6X6mV^RuYzOEvk8gMM@ zIZd+v|KriPVy`n$Tril7sa*FxtpBw2t>GKF+sh90bESWLyBl$oY^qgR$9vgF#+Cme zA4%=(hOirg;QRDkLP93>(_!D;`!0Ictp6VY?3^?n(HW?*MDqqVwnaX|BN@spgV+@$ zd1bmlzLmtq6Mjn?ehnion{HfgW~5{QZ;S&nN@8uf=mOb!Auvc^{axYqenk~VJ zE9T0R7n_;bFQxbvqrE^DrnP$U?xoh$MlwVKm6$k^WFY&V4Ka1+RoG1e?Mb!_VbEWCeaQ)-B#pJGiVG9%@G(kKpwfNkb_C0EL?a8eD(w|YjqZX&eG-HX_jP<7NB;!H+mDFxGI%jEoAFMqJ zFXC!zI7+p$3C@SXhReuv3px-=_CFB|G{qi-wq4%9JsrbP>Zze^H!n|HIg$1D+1lmI zGXU7s&ggZITnqXHe7{vP-ma(dUWknE_#W#LGbC4aI`I7_5PMWnlgBb(l;4PR4tT_> zu0n6hMcx&PN-E0EZwQpd#&Uy;bSCK1l03~6yP&MLQP<8lsIKF|N46e3A6t3uqKAPg zT{cAelT-9gJ}%M~(C9H3qOt4w9QR#u0$gfR1(w|CwFd)z2td@>WP#Tu8@iZa}kppx@n6KPVu*l~=GK7luw zY##GNBh5N{#Tx4>6^!yo9tay^*fi_vu>)toghUrb;LQ1m@>9}QkNv?NbvyU0Ev-&> z_bvnKWv;CPjErkL4<1T8FYwl)|D*e&Yt1NI`b&%AOwac_d1>Z zK{vu(JHICZ-@6a|>>N5JO$b<+YH~qeaKR20y3jG1voFsWQ{{mKLA7`WKv6q|GXpp4 zK*VC13}(nfv!V=Ch+4Pn zapah2t@cqMfz7`kLNo*#vO67FEOy6G1^WehDB(HTR!Nx3IIvr<-mJjO$UCiTt#UiehnCSW4G^fUsNDykGO{#90n4(o!J(Hs)^X~;9j>ioAfFZL zbm$FX!*;ahfmPMYQf0YhusArVEw6?MT`o%haODnyeTjG-E}}5H8bGoGM;1&~Fv$KG z@}SjXCi2>}2=tnM3nbuUP%}Za=s96^4POg-#as@jX|2%L)Eexz=6OfdPJ`Brz!UB)jB?mHmUf=mjdgA7QHD%_pi3-V_i*@N{*74AK@m8DWuWJ1emw4HoJwlgY= zqt?33sNVkjE&G4J1obyF18!T+InIj-VV?YO^97d(ljuC0;@cAZ4;I*P5T*uO zH|3MQ>=8J>mJS5<%1N8fjXH&a@i`#Amqwq%Dx9mV(M?ci_+DAeeTrpBz))2n?ZhYp zUDqS?IwfOuuo&}`)&1N+zDE+Q6D6Y8{jVmBy%qtT==r{QVD|a3t{{^nKv)2x=FkY8 z5KEFS1u}_qUF%LOJr2w^3kLgD{X$q$&z2|{i|A4H?Y)ZaLgH=rsX44V9WFjEZ$iQE z8*bmPEml5m-6&2sXtjfT`PU+Y4oVxNIyyQH^$75wKITD@05U>o3|ja+PXlVT7D9Sg zqdxWC!G^NB3tF_2KquK`adO{XV|-G`ZAggLEgc8b43T&hW@CuV=M@mYDVP-{#X@@$gkt_(=i{HDv1Hr!n@~d}i@0rjKCWzkrzN_nuX^p+3aHJ0=6}g1z_g6%S z3Pj{cZG~^d`(JM0@~F*w9n<}0eABu$o@od3s$YjQ>D{Ew0aP?R#P6e_C{AIlQ^Un=f~ zzc5KsJMZ2&QRnQA=)yRW#D{wFc{TA*<|;TQK@uvRr4I!!sYWD_klZr>i1kgVRCMdbavNyyIPH5|k1k+L*R|!WW zheZINJ5INJWTt~+?1_xNqs5fA5gS8Qu{d%Ve?0JQKCXU@7Odv{=Zwufi%SeZ9++ou0lWOrNkjLKLD2l6fKB!@X?Y&HvkB;HuD29 z!Owse%k8*ET+R9yNd>%bDVe-wEHlrhO~kk^7;`2oFi~_4+4PQl2|i;0;rt#zU-WS@ zWDq$hPq|)3%}UnXRZ=rAWuTMRG47*Ej-}6o>NVu`wOnD_>y_UL)3j$OW^(pKgPGxy zV5|C*t_Xc0f%w2*cTbRZNf6Z_@msSjlpu&udFKAKD;9vf=VfTY|K=NfULLf-(t-`@ z_VZs@2mIw^6e`A^J6ZwAV0|$MO1?FvwU(Frjn7fUOLERsLed&WU<^E7=O68jiKcN7 zJHCVG2-Rg2hIESEF~-~p4kL14dTM2OrutnDoe9}vRbv@gDqeVAR%tEVTC%=WzrEzn zHi)zlJ2LpgqT)oSp9IZIDcygnBy#zEqicWl9K4Dgzy=(EiFFzpbt>~j439zy=rZ^G zZttjn5+CT(IcSn3J1U$aR*6@&uK|k1b5z_$9Xp29Fh;{lm-DuFjg)-8Hf|IU)?N?^ z&qV28EYqMYTcTt_E4vOML&y|QxL1UjMWI2+$%3S6M=8djE>Z9UuR!%k7!jA+Txs%+ zf!R`N>t5`$ZkeBN-nFM1>qHJvUM3GMF*QX!-lAJt>0z;;1|7s^D?`>B*0P?yUS^$9 zq(MiW5T#@5@d9xv-B(ycf}Cs*_jfW>M?eNMRX*XLW3g9wQbz02r?*Trsau=}R`$qV zFO@s#EgGs33$UXrIH;;Z?0!NF5FdduhZVNN#% zjX~i!qk${D$^u00{MAczckh!*8_oF}=fKyDE8U<~$VBYaf6j44vMhXz)%p;t)^9_w zNG#vz5CvBD&?j0Erp6ru1i)| zDkmjmevpC{1TdU4pTRE);q+itZ3q!4F!Ejp!oe;(0Ft#+zrjP%uoPIIP&+j6awo*Q zP1q9(R;!5p%j}s|a-R|rMIbJ*Sh#*avEW^c^yFL^!R5?ONB4+})9|n7p5dLb*pVVw zP*^>4B=oAvVGvU^uk0yEJw$#D(XdCZd|XI1lS|cgF^n0n zSkXSISDYM4W~6jl!xb$4qP>@>f;Hl`-%v_egu6gOnGpJY2gI?~^x^x7{Sq*6lu@}* z1Gu!<_|PxURU$-{X%pILAts0~JJ)Kao0*)(EWyXA%S5>As8DI|y>juGK_gwikn5Z( zNT5{HtYT?=MCE1vkb;PUz7`Fm_xh2EPwwiO{uEF{^P{8ZhiA`YwUgV<@IvB{RQELqq^{eq7`iMeOLQe3am+C13#^zOooYG+?Lrk zu12V?hBfijb%+L@O~%X<04RB3&}u@9xY;~gSZJWU5yaRSw59J(`bifaQI;6MUT86y z9uvo-0QDmIT{s~79&~2M)S%s^;foJ=I-8DV1O;)AWieKt?N3ku$072>jn75Un+)H((oX~@OKQH*sEfp zaUnGPhfhv|UXyP2v&QMV8fL2_#vUsUjEf)JtE?7k#QuOuXluYls@iiVU_GR$8HT2q z0?OJsoByT9cI9JYk5USTI2O@Fi+vODQ1>m%D# zQHcLQD^=CbJjRg6vAmMTXwy7(x4sQq&4fnn)=hgTeUSc_1BOtYb_ueH@_qE)%xRv z5GgqN#6}GznuvEi@#?R1ts@acXN@1guW^3~){2dk3Eat966M^V&Sm~9fk<+ZFQK~$ z?17fTM#rV@B~v92E`p1G$i+4Ub3i5O#H@W%X8sQBY=sshJsqPLy~dS>8~WsGiI0Q& zyd?c@YvjCk6eKdW_4vGWI=C6%?F*&Zcw58GmKs~ea|#y34_!E-fj9mKrb8ajya7~a z8;^DS>M-3sI^%}bfw2gPphP6x8GPOzRup(c`o3oLqwmwZv3Mg8C*CQG;bz4yH7P=$ z)c>2(p3@he0B7WhvJQm5!a=@HqJF6rRY=E8l-K4L8#B|x4mmi|LZv{T#%$7!pa735NCx1 z>D~}`GYm}E8dAA&QqoLuKB_KZ^RY~Dl*o#7IoVEcBRFFZTovWR{=O1gF9ZARYpgHLz``yl?CvmlWdvq9R)r)AI_PfG*IPXTHWSN)A3V*AP@Rw-rirRvav1T=ZTd-O#Ee5e{7JiwcSUe(odv`p(~_gX zfCXXUWw6}{3)5Ef(5tKsx( z=1l$CeZ^2XUUDPIO=e*~RMu`=gU&D+VwrlOmX%uq!rm8!Ay0efM5f3db`XR40gC=( zKR%zXj%@uC?!L~AYS(GI-fP$3{Z{kXvVFa-rUpY6Mega0zflu_9ctaV{kLuRYR(%5 zRSx9097C`BJggnP6P)OHqbL_pYZAJMH$o*wS3D+I-Rw0)T+1nY@p_SsX$Y@E|<;*L&qb1V7ZysvD6_{3Yg{Fdk z*`?+nd=MwGLxD0*`@8>`JrQGE;CDlP_>!uCOSJhyg1|IwkC^^~s9;%0s)0Gedo#+{ zc_(xse{N7K+;FxvY)3y50P^7>8;^=J1M|&@*C(8p5B#;XAXl4l*ydm~ODd161+m(A zZrvnR1(MZT#D#rqwYcU|ROBE&Zfrb!z3R;N?~-(}WQqTn3w-XV3vBE54^HlAHWWtK z6_@At3ex-i0p8LXx?0oVDtDJq7&!)l38xFi{g@kPJi85Cf4F-T_Fk3qE#01iOsp*z zkv@tGo<<*8Vd9u54F&={ia-Y9{;&gfE#Ma@iFj>XU#4;_81R1R7&F-oS=9%n;ph~u zv=i$dEXrcNc5XsziXuhN8p3QW2;T2BbIk={*RKA=O#IUcHv`4fh7f6&dvH z5>T*F!Sm~=eVi!cOs9I!rz^*aM%#69Dm+C&3Zfm7)SfHPVv$P%V8w-6Pvs>vqM_)^ zDfXt}1R24|g#+dD1yKi@1Fp^|{ zz(5wo>1bg~x zC)>-am|{hzM3d{7<4r>k8p|`}V9E+~o8ZMTF;;?G#r@ri#Glt? zXg?KG?uR5T@%kL(4Ha#~5ZMmuA*3qig=4zFJ)FJLagLE}Ma1GUyaE5v3Kwj0?W5qu z0H3>0)%=a~w2BxwvLA~zLa;egcsf2+E~z-%vebAeBZ+?`8aMKcw>SAdiQlKuk6T~e zvBmLnwg}$CwP~oFwxQjQ0P%NADTv990e1xZ|7igPRPPTDbH}3aeV_NNyx-2)4Uprd zj|Zi=Jm7{8N#5yTP#))i^^u4H#rtdPz&W=oZ@G&kP2XZeVCwc{?21adge?{gC|?A^ zu7oH+QN>!&)C+uNID?YtvaYt2UaZ5@-oYvt4~`UvsP0JDFVXNW zypqyc+xI}iIFualjx4kEBJBAz=d4iUT^1fI^`}*HVNomv5~9Km26s8z3gV@}MAi(J z#IwohB5*QePgASD){|A{xzk1dIhjsWgZ208V57um(ZZj+J=jFmU)O8V-F%#?ek}g_ z02g>YBlo?H`wqTbWH)E@zg!s#3_WfNbO)R1;5xhl;@WPCm6(|#BhQw;Ccok+oRZ-X zGwD^~a@LP3q*6i(|IyF$ukHjPZuj3G$JskJ2x&WtKvY6dp#sE(zE82`x(yG5mY?@e zlU;%oNmYTIR{ucv#q{q@nM7v!4%KZI{1i;R{?$Brg3sI}nCuB4guo)zkPz!a`%9i` zPN|iX^Ak}SIBYfv2JH4P?u=;!2Kg`X3o%?dY|hA_XZO(RDs_#4oe}KBkk;Sj zC=x1vf}r~%ezY7}6`h|!VD3darF%l%#l0pQAu>pX#K=vmor{KV#f0;Te;iXm;NlbI znkHurS2$B-HK_ew6!7QW3Ov06DI|&X&$PS!FaGSs?Mv42tUMpl=i+U}o-ou+zD-OP z4>(=NKjpY3>@570;co-C>~$hnN?XIZA|)Vof3788MjAZ_opf5Y9xY-%ilW^a~tm?TjsRwY8}}yTxX|t|eh5k;#qc`lAR@qH zgB6eW;~PlnIk=wDOf>%$qeh#iXL9nEJ{Md6{^26JfZrp|_rts5&1sZoo3+%U5fcP4 z(~!8Cnk@PFU|q%C&K6*xovRYC1NoUXmg0Ny@}WFa>wG1v;s%!yr;Ykw2GKA&2oIQD zf&|cD)YCr0S7k{Dk^-g#!k@;aeq94;94uGRs4R5`@s#A z+Ca{S{|myvtI{M7yej3M*nKJ4qj*Mxv1P*@vM8xHDIcOEfc~9YbKj51Q%l}!;?&XS zb=C~N)8fT)T8t;T#WHf|P8L`4nc=2vc>u|r4y*#%k_fi`TUzaAY%I>6v)UJL1)Tl;W=e4gLws5J|+md~QE*JK1D0mUNLt^E<$K5l^Ib zB1Ch2k1!EoJPNU%ZjX38r5KU9Zhu#FC?`Ym48OyKp1in^GR3&tEkj1y7lPj z2!&BqO@szt=qqR#!PH~fMfjSB8X5+I@~;{9dla=otQRWK2Zr~uCfiWz=wE&c#Z3`U zN=-|uCBlM^=KO9qZUg}xUtaY|4gA#L(gm93DT#@M6=*EkJ@mq7^P60W<0586Ti7XO z6zci^sM`r;ZUjm@Var7=(HN!W`jb0y^q9-Lb)QkF8CVt}^u0+ipFl(PVSB+hrs{1LRy=zc#BkzWl48!W#ZpL)^^R0) z&+*}5DfA^hm`0_a)cEx^5oaDR$FBYrlCCqfG_s25otR^BB(PDC8$vOXtj7&W*~-7f zl)7P9{`+r5$baqH4VkP=TR+W0Hg`+k2>+y`!jneKgc~A>de3SG`?MjzirR{CA7f%LhZXhqB#vE^>3RcTP+ zWi_%yQ;A7DJ>nqm@`dk1J_+nBE+P>6J+gZW?l}4(Dr!)+(dk0C{czK<{07;1`}4KE zBfZ{l;089hY|hptGyYbWk&B;{$ph)gV3Qd;OrFqNN1s@{FEcD<3!w^9aNAZ)9h$>I z%QO2c;tV^#z}yCI-K8+8Re?-grDo`&kp_hOyunU=Jbz2|DLHdWgMg}gy2H;vA@P=< z{JD-+7kti|C|wVHxF_~XLzSNBZDSU&1D&>(*pN#lCq8dnI@tsxJ6ccebK-R2-)2sT zRmWB3WTsG}rj=qrr*w5h@*{80eA7(&`u9e)k0N+&v{sT5WPa- zNIared4BL-qG0yLbW6Te=JAj6DqaN)k#toL5U-%P)(^e)lcJSAbIAro zh~k2-JE*2OdVlR7jH!u!z`#szUpw3`F2CIEE8gBkP1pCX0orVLI|)R$?>i_xB%;*@ z7GWuv&yv_H&gmnUYn8j*10b*Yh6hWh?XMpKoPB9`t|CRo;NG^-vw!jd2un+8tCnb+ zmgy@A!~0&&+IjnR4maClWler7`Gews85o+IOZIq-?T1px$r%hd|5oLnC1j6tZ z3^1x3NkjC6kB4xhTl|k5*6w%&o`R@EtI8B#EV}1Ife>{sl-Kwx8D-ANrL-?WkdOHr zawbdN(??7nIAA2ERa&IPh;&vglm)>Ohn-2iF^5LV5aAqUIf8i9sgdzo>5I#3H=qq}53fU}>v{cP`js#Hf5d z?Xj4P|FNK3)etIw(h@U$artFr(A_Yvb<64hwTm0!vVDjlqA_0jxi#(5hjtKDHKF2% z^^+l`LF@C_{YXDw9~ASz=SQOb@scyXHkO~+o_0P1Gk~sEcKzzEyLbgwJH?pN=pyx*#fbx5md@~P%*)!=c~4Of(%M& zXu+Ut=Zhsni0*5I?ki|H7cqP*S{%&Y1}PL!oC5)Vz}SxQ8MV{Z>9Jpm6r-gEH>`R> zbeuyF-)!{bzuMpRq+l}=g@y*~NG9k&(h524#0_N7gzzO?u`V1_dlY+69q#4{48eaA zfE0c0^H^kSi0?#4R{&7upmePf0J_VIEOD`$5|+AhX8dOs(Zfy|i{qOBF2N;ZbzQTt z)lhN^ZONCwZ|(5&=7HKT`AOxjvtC-7WiA_t z+fNubSSQ)PUd%+Obc#nT-~+Ptg@NOtoeI_eu|Z8xqijY_2Yv;Y%7R0)F1mzcV#t2m z_YFXJRQ$?{8*mNRy=^r?V{TwYX<5j%`-!9DE_SceNSXY@NHr6S%9W=U{{8;N?|s^^ zCnkZYz(i-f^|20UWV`xGH2F5G`Y!+qBW-mES56xEUCNcr0r5`lv~@66K&Dob zw*K<80m&SQ)N-|dhD3bdc&C<|y#);et)cjtTS{F(Ti1)>_}f7C0__jU{+aPhOoO*r zoUHDtc)@i&cGb_&yf8ePgkw<8lTAvW{=i$PL!uq{wg*CLC*@QtOQ#Q?Q zB0iN9tIP}aSr{cyeAL+imIrZXXfySm(2f<{APJw11Ss>89R_+TNvKA8=dpw(-`%Q3 z0*XO(rFLvWH@q_qU=Sv{1LH+IXJbX{66RfIY@cbOX~>ckC|-K^haBxS-VqKIyS5UW zLhA7Kv7{-sfg}tBvnjz$q^LNfNW5R<(-JYaz2$JIAr+M|=DR1^@H@ z2%GhCg6)j~B;9;whSux3p~JpumjkBi3>s9K0D#mlk;M1lxx~EW=R94}Ds^ke4t(BP zwxG85L`Lqw8C`&tnl3=4V>^)`i@`Xaf3N)LD9{=rEUV=jPe$Hw%`!lwop|*u$L^QZ zi>WX~x%9T0Wxwhh^$7}hR7a*efBx({!Bz)~FVRG66){wcKSmDCkR5Fgzh_$-9t{LO zSUCDMF(L!Su!}<83j#eV>32_k<-xP%%@9M+0K?MqhgS(ML0?`6pO8;8p`$c4@X#QV zx>FH5aZo@U$QlAx1*gr^C`|ewh3$QBAp)IXz}P6Jjs{V$Nq?ILDqMe`Ik5;OeVQlh zg_?%XmA6*WGEVwZK?1v$3mY9CQ~dp_q^j=`C>R5Y@C*@cOYW+`4aJ2dc*UYH9#Qk7 z_)t(8*rF-4Yq~iu_n2xTMH?(5{8Z&7({JUflrLud8TxydJelBr$$axzs0x8>!TGn^ zX=`k0yx5djcAVK%$Tj!&k6>-7E{sPl)B;!2QE9x*&yesS4ID+| zMh?)YW%&2~4@*bjom+J@TK~%hbw|gl$-2XQ1}2tU+@ucGlMIe3dS>|~eL;AW{gm{2 z`-wJ8AY*Q#y+Q5d2u+`n(W>o0E9EG(xsF72N8^}<_4Ob>^396y8;G#q6|GW+7z+d$ z9s??*EyJbY=HRL+;_i~O$a4@4FAbw*B z>1(+E?VIkk9sMKeH<-88VmA|x<4K$^*qgvC{0X*_HrzpTJ@a<8JE~Q{R;bu(h-HS4+qLG#hdD3PXn+3y}XFD_1)5y1SW)s8;P+dXFNug9QZrse>`AqR}dfg_( zh(W)%1DD6Bd2l2s+5V#^CuJWo6J=vgDN$&4Mk8A>iCq#x69pfe$R`Smpn0Go!J8z` zVbGNiYsw!|_iMe^&HWg2;I>_&202HalVUp zp@3^o0Yl7RR8xKyV`Kgat!+SvpNs^wAuK;?tmMyHi8$;%7n%r){t4O7#0q8w8i}lZ zDG0a|CNN)Es43Wf?pAb?0(FigmsaPvyznw{SR8{Xdhj2jjI9F21~WvW`(p*v_A6?> zDVT@ToG!&1d#h1TZxaS|^8}s4X!Y}^uLOf|Op`rUpxxj{2BBvl%|9$znyzkNX`@#L zl4QX(1(TeeBXO1TFD9fkv?N;5E)-AT)CquB`=7>~!_nC=Jm8b;K~i$;n(lv|S_LA$ zOBOUHsY4RoN-wWa)}Mlhbi=wz7P%eYGvoiys{h{}fn(@ld^bC6APVN_q`<~yVI!RM-U|_t6 zA0JT2eJi;8XUW#%z9g}IFwxckIpHCC1(+l#QM}y)_cDODcSyTgjkzz!!B`JPmy&hx`iPkLOJ5H*O{I@mbrl zKa&8(A;h$_o+1rLw%06ZKNGT$8VGu2n2N(;sFe758dj^l!rx1n(90?jb=>H(sd>T` z8=3K0DAFX=#e9X03fFdYF$*b}Lf&73m-dFWTs*wI!w&v;tbOmd-&+D(q%p%(OgK3A zI@^3~*2w(L6cIDEjmX#C3fonhKzl{9a*Ff6)=%xO_6t4qaJ-&Bi7U;$^&#KyLoAhX|ZX{_K=-6$tAEv z0v#dFXP9r8zn*8GAA~>Qy6@lt_3g*qSX$kTOzl4BK4rMP_qpSQ&Y?+8V~K**m3XeJ z2HX`@@(jvMjl6<`T88~Vlh(n2?>MwPJ#?^>4-Ob{E~1qjyxC2k@Vh(gYw@6n1mfL^ zX3OXO_I`_|h=HTxhLgZ<`{0x@DdlV(ZS=bkWL0#9~sO}Apn}bc9i5h9LJzF&9HpRP;P0nZ8vlIHeB=*DN~-99V12o>IEAel<0pW>qmR-Lk2XtI?X)yZJaqGOD|r0(n9X=b-&}1M#VK zpk*5PEL}u+5K8q$*B4G(g;!Ab-?)o7RwAJ{GcLUWzG|_guJyN~H>TPSe(qoW(ZmE~f-40* zCJ~If7#xfW3(oBYftKw!Kw=G|OdnnM0c6#ce=rn%zk=QKfg~HLX^HuG7cLNi)WV+L zH~}z95;-&}5ndL7=N{zDCbQ+*-PfK+f01 z?5({!X~>4&;yeT!$_DN3N8Fxq3VGWZAi#&to#Dm3{y(Pedn7Gl1@0{ zN)zD$;h!FKFIsU87A+y}l?_dq*Ndt!lR4rtXt#vlW785a#Jq@JmML zoG62XjpVQ(12t!oQd2&PdO~|BPA5;A=cRB{a3{JXp=!>P7d@&hE*1~!3)xaoB@jrC zwQ-Y4k~o`x;9Sl@uNhZ5&9htuaPbK8jy8`aT%T>d$4A9h$H<>Q#r-V@;5gP!;-#ex zn6)AQ5eN(MQ!K`u3LQ;7LG3P*)+Aoih^nH)wYv0~%!Xa9k$=CK-5Q$}@(oWA@+6Mw zGIMsNU`sd;(~ElOmDU#Y40i5CnqAMQ&d6=5*9})iA z&3N8jn8ju9>);uyvvq9UD!Gz!g>ZL9k{-k|10RtatDr=6^Fq#F#ti9uT5ToNm7jn6 zEdRPM$RhB(opDjE4UL7EW3nAy_WX{Te!?*HyJ77EFsm}j`97y0-|S8)vvT+#tx|24 zuea9}MvX9{jtap88u*3)zXw@No-jp!f?2KC4NsiV5(#_#K8;yA@e}ZSWm?Ypu^0o% z*QTeoF6c>!%xY;mDU7OSrB=+!jnT~I%Jd69@i60e3XcUj@6eaEJ7i@SOeS>Y|LS6L`bKP zhJGx}{gnZDzBOaHt&m9YsI`Dqd>C$8B21HDegy}z4;^Qw-=ibWN??cHC=HicjHIqu5Jcu6T?GHg4p=ZyKN*Uc}be+uS>|of5wi zweMZKrEJ#?4eukQO=|gLm0vM?$u>CPRA}o}mg7%QUWuChPte ztE6U-$raDQg5RiIJhFLL>`Vh+%vcB}+2ma|Emtz|=qcw~k$9r-tx~8j;=gl8m)?VH zOq_t3w4pLcbxXs8Z`{!=fYnB;O0XfJv-^^Y!GD~%oObaJ-{8{p$Qdf}DDSlKEx%_% zHmrz0oA0~`WNvXjKd~#v?q#cGmK41IeoI0P0nda^2Q@!7`?-4MPMok;C>Wv(T&N1W z_yF6Z-zJgHk`D&vV2M@&%pcPHuBt;O(36aVN`={MogZYp_8M*eQMSiO?6k-HKvIPt zUjxq(#4gr2QvZ0~lbmF5C1q$xVdWI))$28nF5(J8MF=X-dx8RsDjWxD3ZZTK5LV?P zcJ3+k*G!FVHRSt$OU;>c_-4)KD@^^h`^kU>R zoT$0mxMKwwL()9##QnxT_4EMLo2h0%%2D+kmtNow=&NI02?RtJeC+GLUlq2s z>H_P3cjzD4`uf<~`+64sV#bYGE&~w9SSuDiG16c5^u2>=81C(F(h9xC6zm%M zzf4JZ&p)_IRzldyVDZRR64H9?Zz-iQE8VG{QeMA*Sav*Z6J;9@>{;G_h^NNj)j8*q zi=3gX6U^I6na7x1cQ#>D;YZI;#;!2;BJt`B&)%3AnITj4B@zwNc~y2&r_Z`1!C0Ak z_a*#fHmx2Q7~F)UWvy9N>-79BwLD(&x)Xq(^C-)vaNjv-g#@PqqB9{-2(Og)c>%M9 z62#i%hXHz=YCE8*WoCH9h>oJ!6VAa7jLeR=r%3ghdjj}zx-nUM+E z>GjvVwl_MX!P`sqP+No3=oHgnRAJI$aYJmr9WV*i_wHzg)D+Q7v}mI zf78(6l&En}9+I8)*tZWq?t)=s*E@N+#oKj{nv1)8`J4|2#;3Ge6N1!R|6FP$|Sj;(5k?x3rh4WCcOsDUPUsZ-a|BnS2%dXZ_vuxy=q!N{7`&7|Ip4O6~ zk_QuAFHyQm{K#Ra=LjTwt(KN zbvhPMe)rAfZ3}4Ali0uN94?qAGUO#klj`J4QjQc2Q7c5zPACB>(8PaOBu`Jc#`hKs zw6D%h3Ed`fL~5N`2@n!&HHIzbmCJ7~*<8DK`MxVYD0Uey+`YquC6Dj2q7Maenn|+I)4!(Sbu0F*{l%HqPTH*9{4xvo6 zIre?58Llk!f>GfAh-=W}8>e-|uZf}8&hK9zEUHnx-Q-%U+iD*@*xp8i>svbs78BgW z#USU;6x32m1VSVvF#L8UI2J+NoA9*d7yL&5cMHj{um&7j9lpam#KtdrdYGCE*=%0~U~2~_-mfSbdj2meBt_X>*j3@%S@}LypVfntjdU^R0#s72V#@Ool!qs% z>XPdHS@iQ80ZP|qSfNpf+IS#(W7Ju4wQAG7{4}V?wt59!zd-sr`1)jiF8Dn9ilm<1 zN^+Z!*k#u(iHdYYLMGZ6zI)mn#3n|za@X>_3PE=C`ouE~c;ubs=X(;W!`y|Zq{u#n z5!3UHRcub2fO)(vkH04TysZ}=nGN_v{pl=q3-v@SIAYRk0!1PL47#z)IV=iwQwEd~6ZQqZulN7{85QptW6 zFbh9@9P|Fvf5dI|R_@;)+171(l+J#@v0TLHt&p5Ej+6JybDh z^rNa=-v#5MQXazVO^qpJG6pQca?k$ke(wF~`P$@vo{+xUTT7AFX0qjx-j^$H6OJDg zZU1ZZJ@>;Ze^RMc0?}#`{6X=ky=I{|sm?N=M8DrD{MV^qUIZus8LoYPoe+V#f}LU2 z?sK_$IguV;Ycv3dFgfuUCaR-Vp|rgEFCeh_4cW>2N{C3vKe|hAd-s8|@A+K5=lv9t zk4*TlAd!G)(951j8dA;Hn<6A!!I z(BymD(2FcI@_379y1*j3ztmoECs`94QNbkt1lc-TG_UOc39z&hBoY=L{Ce(x$0QQ; zhc57QLh3-A^bRvoCCdyR)NE>wo(G~W6P3N?MR_EZ%y{=kG=!xt9hI{3hM`QX-j64q zhHwg5IXZI;H$evVr$+r@H$LbQmBez#z)?;Ya#xpB$y8WEqve3?kbY~A662`JG{TDq zVl`@T)M?x8>^znBk+Jgv54Ky2ONoYNFgfj^7GENrZ4x0S=~4ZIx=W7S%#r1^9}#toZ$uUa6tV9qg% z?jjKucVh+`+RX{a&k8ywZmrs=HF(i3C7WfwB4{oBehr z8_*V}h{oY-qarz%UWFn!RGdWEqm;KK;0HWmdwSh)3Voxd1%|$H*cpa;wxDIVm|Iy5Jex^R;v9nyhf=iw2_jQU3*K^ywA>Tx( zfxWP!2z`~D+nql>t(9}zJ+)lz08|~Y+>vYF4nJ&2|dC%`y}*THP5^Oq(fwlX9*6y*E@d$ zl=>7K8B?K}kyLlkYO8iVwNT<1Y{!xP%pANyX`}_Aa@q(R!9m$-+!3XJW8;bC^6j`d ziMIGmMB_|$fHm!L-e>0&YO0|&QLkW;vCx0OLjbv@0tV7dpjCKoWQz<<5q&m@tZJ#^|?o@Q~!9psnkW%TV z2xtCp>hIHL*$dtTD%5t3d&5~So6^96@f?XYJF2S_QaFhh`g~tZkw9zUjj`Gfh#mUjiPnL#5!XK5DAR#o|7uHT2!~8oMQx6|DofHKszRo zHh|i6A#0&qp&f)7Mw^x9_IQZUQ(D&^Z-}nFcLwtT^j?VP2`2pEo^iG z$})jyw>Lp(WbMRHucyz5KFh8-qO)h4AS%PKe>c(RuLB&)XGFkrz_rKM`qvDXwsErS zFIJsYq_>a!*}aqA9|C@l2pRPTjhHig0_j=Bv#{bC z^BTr2AGcVGDO>UHs7K%PkE>J#{N6)p_(}^l%XS$i1u!(w`aF8j$srOvKnvhS{LIdn zqbH{i7@K{pVM)Ex5u(9D-Vu9fz#^lRF0zpc;_QlYTfFc=9Tt|+ng}Zm_ibAN5~3$V zzRwk{Kj1tfhHnsEJP=L0#n^MR&@RUk{_WF(mq<`rq2RS@MJw$=%Z5trL}92ZmcW!t zEm@^;n}%7?FR^{1$pMS;i<_$pvOrwVA8;qKKQQJp3JYWg)2qbDSM`x6@=?OzlVZ3| zj*eS9fz?)~a9&NFDJ_l)W&c_?`I%WlH%#p#9SbQUdOAH0x>?3MB= zP!?V!x^A#@@N)FLdtmvz@%_?2Y1Lml_W+j>tu_WunaDEbu2hGeOTa6KWLQOQuqT6y zd=cy2H6&EfgGA4~Nz|IJwrLgeWnkdondOnNq_;!i4YkOlQFP!-W+3oXqu>yc(ZU7 z{iVA32^Y9tQHb^sBkQg=WCB@zR-Lf(6q3lVIYFXC)$aR`uzn>om6c$8!<1$On7{z~ z^oHf`6L-3s3;Nr;Q*8a=_>u5$-Hws-Bu_=H&J}7iVnW{z{!Qyr#c9kmTaCL10C1_>;29|yLjuSuxMjO%voiZYT2_vr)rZRxcj3Qm<5Dzxp4ThDVVh$a^`EEFNa|Q>zG%Q)1&KE-fKdCbXC~mF3lSnQb`+KC? zH;UFdt#SxoEhLnyMp=qQ6M(K(;tBCJ|HPPG_@B*efolgqV`Dk>x?T8ll_HVRHGFs> zk5aV1rao>#ZIp$6A+%qcm9B@yvlc@YlxCI={G>GwkmKg7)Y0 zcXnU@53qV5;mrJ=z#K2}d>pu#@)3;@E4czFq5|;{STpk=`+@-{0{-{Pz&g2n_)d+4~UjsM=1iWS?I` zJF|Ov_oTl>??L>-KGW4T)&P*x17#t7Wszcx2c2FO|h4T*f*AkW=i!9_0vgXiaM9I?JsH`SL7BW?xV-2#y>w7`F zoqiMP3Xqmi<*=Uw8G;bYu7Hb#kZdEQBs&M~Wbq^ZqmiXxq`;9l5m6MZKQAZGb+J@M zyIWk9uS&u&;=2l-QxTaHe*b#nlT)c>pDZM$L2j-wm?>4WSTlS~nYSdJ%qYX%H9sMm z1*eV)YA|DR4n;+CI%z}@x>~q$``z(p06wNZ}F05X-}p*T(LBc}QiOU1$9) zDgoi$=uW0mS4WqcDveq%Zmc0lz84i7#w!>u@Uo62DGOpSys;I(h+!Vq#n8yRzI}57 zR}v5{==JhL#D2y3nWd6%HCyKHJN>qyhEP{3Ej46^<_ipt6#Kd5P&AC^B+$v_PC!#{S_E2?-%0>JtlwIeggp*l^D&1_*P!r%SEDH)XF z^AyTq(qt*o?7;E|hunt`?EX88!C~G_FoinVbok_t z?H#MeHgsAMPO=KG-GI^MMz=N}RjRL(QT$`9!+iazoWHyEKCRsN6JM{rNBrwYf%2)uqm(_(?#xRU>V08kP1G6|d9(zt*ua70VvtvmwheeOGzV7DbB^1jNx zBUC|G%%bY0WfTXgd4NZM)JV$KP|;ADXsuOLY`d)Yuz*#?O$0I*PLW3Ti>1 zI_Ed<(t%JzeVZ(Ut53tbm;-2_P$!a(!%PX=+WAR%G26$8OSE-ngs5!Wsh3gVDnU`X zi>~mUn5#Jw2n3k!;KNB}yj|nqoxiQ{d=IO&x|N;gzlg-$f?fd$ia2vf;C1f0eN=C4 z;_E29m@4s5@Flo{f(Q4D+8VYs(VVB=gHIdA99FNW_Et`tfge>egtE4DVo|?XX(~Gd zBh|=CWJX60^^n4AZg?CdF2m|nyvqA~Nsa#FH^@Gl{k545wg(dWp7c?a%7$XUHYWVx zU*2{V;^&DT#j0~@CR}noYX{!EF^pi~#GdkLGFt(B@*XToLC+F@zhyGSE#@2 zY)4hN20_X*ax-YONM1fnNSTvnr}=!Xs6vjZ5Y*7A7{yD5i7_6zTGec#JNV|X%ah`G zPDaa8nv>7UeY&micm6XLI+ z!f%sre%{Xgry!DsK|x;ePX-kxn+{3DA6R0ZC!aoKN-!g~KlKc9_io?8GBPqcy#3-g z4mj)T9|5AxH1wB4%o}92?2CivriH>+D=_TdP+Pd6^zi&+sQ1Q+jKkALe5(V|s0Az!j zIw`7HsOM@%rrCNGX2b6}DmH>A+W22?L|qkJDtUM$`?4N5GOy#@t^O~NFnS$C^i%{y zzREZ>-{QF@r()Kqo0NhN%C54pmlbB!RkVPIm4Fz-jQ*i1Y{Q0fe^qZHtz=j;lI3@z zkFT*!AI>%RTpbg7mvz%gJ@^F@5oq^B0DEn|wc_vb9vTw^&VF^$-c!-&L^7%9lM{PM z59BonnP3t}C%>${wrO==Dwm_muBYUIM1W(5utbr)24S+;XKJzTe^893t4 z8}6HFhn|p8zTwLT%O%P6)bS~PmQis)n}i-ljW+Ho-xRI!cRbKGYCEsOT!W@|o}r0T zxfhDDQs@5EX0GLg#)>VJin>`vv6QG`V@Y=KSuHG+s)4g{a=&Q7&|};(&?89z@*|7y zl@>f-tOy_Wyv)ezyN#~kH_$M*tyD@{Fw&k9zrImv;{G zR%X-45J76{!)^iQ&+Fz=*ed$Pq--ig&-*gB+vNhg6=ZO+m(`)QKfEN^!)9;&3A>-j)a2_cy4sa3q@CK9Hub(MaOdRZe+yar|PY3&mJS%Uh9~m>Y{Av_D(le$6qi- z^xnaT!TFiP;|+WBp9EhQZdYGax4Vivn+{bgV3@LVP7TSk7%u%v@=#hJX^MY|lGHD)qq8s)-Ll(}NFSwEy5#qngVygS*X zAQ+I1nSqX4D$k8G+!7EHHIWmZqXU^B)ERw&&A+o}t%5>peP%L>E=N=im@ZPu0nk%b z`|mJWShu{fXk=7g?`DEjW^&GYXy!@%zh*KIMXSVM-xQBZ8A1mNMSODPkwDDTA#5>b zW?9Ct^bBT&L5#0*{(~o*J|nA#mv7GX>Dk{Ooz#Ss+5qaYIHs zF~g8%mU+B9v9GGgOniC=r^;t>^!mNMfn|7}ts;WnfPU(zkV)&A<;_9Leg; zlLuXKeU}HFfFNYrmUp>x9i9pRbxG^YI$~UrC$k(jv`8U|v>7YJ>LLMD@^VYB$nWvgWW)vjN#9?cRabC2D_PU zoQj^WW$=Gqjs-44<3kZahRj9X5Xna+D=+XfOpY6oW!6p|tJkdNfyp2)%p;33ooFS} zD~~nm6L)A^AD0ng-n^4sS!_BK-$r{9VmO?!!Af0q-Bg zy9?%VvqB%#EgM+4wX|lsORWGMiJfEQEw4Za|7+>z%l7^t;`eQS&)3oNhS5kuTEu^_ z(dQ6epCIdgLD;k3qUG5}yLuhm;TI~LUq7~BM2~yomc=ZxG321=q7v~b%@x@W(Qnxk zBRMo{R<2L?@8w$h=|0Q0uUu#FS}V~Y^DSIFeFQ8YlBiY_Ag6fw4k4JH$bI*Cj)KM7 z6_1Q{$2MNl9md%7yVacre+u=6H=F;1+2Wm{#&t)0^@Cn{Q~kZ^+!^+0ThIunGN-ck%ew{huT`J;|D%>Z_g{QKB^IHxgiXqM6u_6#k1&B1_I6d_M z^9~3RN_{Jw<11IW!SeF4H7X-72yL*Qn$lrZ z9t`3e3$g>ZWi^Mtaay0#5mMtIRf-9C;+K$Kn<*QJpgeUg&C2$vKacCF-gND6l=$GjGk7= zUNErO^7aY$-C*Sw&UI0&2i}*sLimDgU_5?hgg2A{sxpib=UBK+bbBJl#f8k^IXTw4 zV4i0of5~EF@b@MZni56CGWVaWsmSut(HF^Vw$NtfD7<_NDag)PKvR_%3^_uV!uD$5 z;Hi@S%xvzbQpdIZ$aE4R{#`rEtU zxe=Oi3?ddNfaIp)`o0T0 z7=_IDyTR~VXFrxJ)t00~dHjd1viuSh#4|NX(pZT%*PMnH@*!6^r_I?FhtPrcA|+Am zUcnu*uj@H;ts>9ht)yBa#5;OBBe-aJG(H6>0QoZq!T$G%Qq)nj_Ftacj+L5BnIW%V zL3Y;&@)#03#!3v;zAC0TpaNmz1YrcuN~u^jA8tpR`o33h0kMYf@8~V-QjsdB;YZis zvY|t!phKliv+Lc6S#^u64*7ql0R%FeZ2)wjh#=k3Aa zx97Wn8}avv0$*YW!Jd#V4$rrZHy#e`)mBC%#x;e!p>=dU2Z6Q-P~f7~I;uU4L_UNf z^ci(|bb3h|5CSxIwJq!&rSEW1;vf|9QF)>hZdo*_Ff%<&L_E7<`_`C|>{d%dutt+b z=qCtNbwwgI#yKQq!Z4j9^C=4w+EIf&^2+4PlsBwBSIE)}_K!hH)M44*O`9tDFnrJ+ zIGG(dkKhbCc)Uca$BGeWPBG=(8J@15v9s<4d%KR)r!m0TV+KD&28M0DcnX=x=q+@5 zR=i2$HDVQlN9~J89-VWi6M2FrJb$n78oGuCa!p8;;i4NPXgK7pi%>I)7*>s&uv~ak zXZ?u@uBpLCtg<=&IV;q|VYHemI`pUjUWbXeSyWd4l+6yO>4ZPL?7d_3Y4u!acmnoP z^wC*^*8&u7F=xovWsRJCjjA07ao1K^fXBO#he2kpx)irVfoJO z-_7c;fACZlQVD*|5od_LJ6RIGY?E0BT)~C>MO?Z00k+y>Tbu_fPQo_nLO+ zu3RO=wQ;^1r}5c(Al_aWfHZ%Dad>dS1aYqRd@m%#!&0O>3o?a8IV7sb1cHCR=A!Th zrtrroQE?=2xO(_ZMF9`HRBv<;`?`I(e`$Fmwfr1!3;k|Fw8W&q>J`O|O8p);6+wI& z0{J%SD0JP$Tp3{^gH?o2J9QLF*nmCFCU9BHC!Xl32IXv$b@{!%7K*o zapS*Q6G+RBq&JPtd&?)Tqnp}k4kAP$VTOIv5?-wzhM50^4_Y z;jmi^jr^^$(3Tz4R1j!{IiEF|f$q1oc=+Cy3=SYk!9aq|o;XzfjvVa9`0So^?9$ua zAN1?tNAUD=in_E3!Jf_EJ3WBsSi$FM;w5{KX|YHlu|FMm=LiSP>Yz*5puqgD216$# z4J5khnbK)K*8Y|1^yJ*9UGjacqgSt5PkXc*%FZmKBX$b&Qyu~IK%DwC5=74iq%Zeu zZk~AaH`y~DWHL)~*d!kwwhUt4N>D_x%Wm(C{Op@jNQJ@Tb4vg7kN5kMgd2~5506|* z*4X?d91M56TcaHd9DxjT<3)magCQUzWx&3I&3q<}N|7LfNY;+>*Z%DFCI3~CozK4H z=-`3sVAqc-y-{?P;Hx}!dEK)oLK0g zwSre8zt=IC;`2(Zn};?K)0r}I}6VIV$MFJ8ApN6h`I%Z z4Vcau#*TtW7oZXLF;&c}4>pa`(Oh`KlFrx z;F>UI0psxomCfZwy@V(60dXcvLhUU_JG?TpCSfWqH_w*V^gaThOk5 z4h-%-@v&a<1Q``9TZv*qmgTO!DWbo_q)U`gG)8L;gwXF-3QBS}`+E{etwSEZgl!h` zqiDu9)$EAnv7s-91MaN}TD}xy@_9}*x)l8{aK_)O6vQg&Ms^WOho?=&7XRXXHkd7> z_XCmxAM<14KG&iIqB8l_#N4K)84@eKt#{uysD@&fOuDf&03Ye2CChUy;?jgPXhaQ8 zbC60fA?fl4ooGCrTY-n?vGwRi7RZu%wFA~{W_sDk6B7wWMj%|W(=(W}=`wIxQ9fCD z)tjh+Fr*{6!m2y0=&ptY(*)h*kG9L43q2taiSg4@m_4sL^%w@rq6CetgnMvKCLtrs zU@VLPVVZ=uDK;Be^+c7mv!L)a&u`ck2GSp!n>vZ$f0aejRFon#Lq3tV%@D75ZiQqA zjzCVd+5cUIczzcRz5|cqDz>yIIPD0>=+Yu7=}TgG>dbOj(+h!Yzhm5xHd2CLDY&hQR*zy+SS@vXRY+cI{0VyEaiR&*b4uf}Kl9Xj`TOnzZ z2m~1+k8IV(ZFfUR)6c6gsWPr;=jWUFxkr1q?us~!HAgOYi5i%0laQY+x}JZ-)`$MiqTBW}!?Go7L|52V_q(5eEl zB!*kk=~Vjlw4P@?`bX2f3O;Vx?>7P&=$*VZckXis-mr)mm1Z+mFMsy%|GCJPl}RG(J_0GWKqf&1x2Zq-Qn8R6B_90 z?e3mNkM%EH`=7oZIkCaz*uL;(%3}8;o86#J3lgrQ6xJX87G-?*^utel0fmIZurTtW z$FHXryVvfkL&V85bHvcBiBK5dInE7rOF1oeV#yP|1cPOML(rENdxRXiiiI#rfh)Uu zJkAF9IMvk(^2U5El8zDNx&%w;n!PEBNw}Whfoy{?J3^D&yxqj1Lf5V}AFBvM2kOei zin5!+<)PnqARnobZ)k)ir3PM%MGUyKF~F6U&15@<=w|17{}uQ18La|X4=D+z z->M+0qG98XtCo0ILxN}pRPZ_@X-}k|V5szz(GJ4)C20=`XHEaor2OU=&O4BG@({1g znoQcwpwi$4{m@}5g2PptQc0Ic_h5-3T5Ezn#qysA@5H{9t&bWBcqG)3@IqoKFR;#u zES;`BN_0UN+Zb*#KEPWk8ZFECMu8zXd0)*r3(Du8c<%WZ{lePn*OU?uiz>Pnxl$vs zk`LxOiXH+uu1noI&HK5X9woJL>wBn7?5XflEND#>L0L85Q}4Y z{=wRj5asHb_weKAJe3TiuEOg-{CcOi3GO9)CDF}&(kS76UhmD)eV!SS%o^Qz29Mt@ z1>73!p!W#_VX}#1%&I~6NeHnDV&Qku{Z01eZ6e|9RKC{^%6AAXI;Nh89!LWp607g0 z%VRKqX;kbr!b}#CLUXKYt8`&N@CDc|XJ%E`@#MQl=nPF%(<@>K+`d6}XLYxr;>je; z^xplKRs|9Vv^lQ8JD?P%B1jTXNvH-k(%$x50@Cz$M^5e?B+m*s02X})6};(Rh%$)A zjMCd-f(+enr^wL}1% zma;6T1j29W1_i+ZD&2}_0%F<3lAc7I-3#hOepP%=JNfp`{WP=l1SWSI35wXNU6Sj( zK=xCZ!xd7MZfmEbdkkX#vvtp-K|ixajmuE=dtaupxm+9RiPIHwkgMugwc_Y1e@6M5v5NJnzWa*L7qEY|m(Z^f3h-oOlg&{;jq&Ir1 zDyRaGS}BGSD*?;owF%9FIpQoQU zo!hCMPXK5l_LH+PZ5dYc!M&g)CQr?(Bd@#^oj#jA&zQ3HSm4s<5NQ% zJr$lFKA6(&9~=e_7eQE&I3hGW;vyV$PQ(D8t8{0%;E+@OQtd6D7qs4-@F8M6HlEph zJiU6iu<<8|xXj6`>)X%h!%XGr*Fbjkqhq@=s6Q1?JSdnQ#Fg8u1%|QUHd{PEs8YW> z+;B{K2D4&Ihtco0wFq&oU+mOJ`IrvgcVn6ny&7l;|Y@=aQ zqr~JnKxk*(JsQKHL}2QlX9;er^(o5hBDqh(529P;9zn&(_18Ompo|&9Gv(|!5_*R+ zLf*H>EFhILRs5UD*1-!JMfT|eYoLc?dw>5tXCr2rHTR>sWx0Rg(xZ^yo{I;cqz>@3LG%MWO?v<2hd|Q zIA+bV{%1wA88+hoogP?)x$(xY!t*9%+nZWZ;^u%rNP=%v2NbR!F8=IW6)r`MjfjGm zY5XUaFgsXG)+1eI%921u4Ic8#g|2v?0y7ZKP_e$h5R4ZF6Wv@I!m4-#iL%RdZBVP@ z@wD-D-rPsUP6mfor!~1TUSu4!LkJipim@jrH<50a1$&0)o%TB}9Np~QTwCH+ujZ)} z)i726En;xnZ89YjJ1JW-z2F<=We{IBU1vbc%W@Jw#?*WnH2>uU zD%iu`Qicfb5HBdynw+U6Us@V$VaxGI;3Y6Xez=^p4Ck;PMKGXX#_

jLi`0hU-}l zdU@YW4e@{yDL4e&c%$eD3?9;Nv~LU2CKRaFL9*#G+G!HoyK!Qf4FsGQ{}ORd0;sJq zz`rf?(b9$nu~C6b?h6toaW5$B$y2iGi`mmTI?Z90Srd9y1{vAr2Gj!vBXQ_4w_acW7INfD9|gUh)k zP0-^}46h_WHH9s~P;8i)D4Q1Pr!V1EQqI;gU2FM2r{ca4e?ETRlUyZ{eB$E5`JCx^ zN=Tpvj&O7?eTAqVtSH{Ty6Osik-HI|MM*efbeiJZM&1 z6Bor#vsPL|8hR9y;4nmg&XDX1AfEn|ZIJykcMmZAr3y zdqC5*4E^y0^e*Z2h_uvNvBc#UmEY~G|y|hTLt$$)_WYY^dv}7});oq^- z(Jk0S&m*)>Zy5p$hpA8dyTwm;*WanxV8#pc8)%BJe*s{KUa5axY^C{{PPY)Z+Q%^( zwAu|fi{5pd%PcM+k53s7>ll$S(dn$hkiigfs^9T4uxLxp{27aBcJ)stF5Yb5^ZKvu zvetZg*OOrR9gr{3$8j-5~u;bBanoJwva&kY=Ct3`)%3&CcwMJIgN${>5zu_l4I3Npz z*0am&k_GmODyD2)K|HvM%Gi^b_l}#$lzrf8z?LxG2nm_g_p}#cZtJ3!>EW{8C70<= z(iRf-c;O7o8aSbmV0tDkI~{w87w=OexO0XG(_c;ugVXBTe({U+Ja~*Pqkt^WM#hEY zr^tbcrR3$Y%b7*_$Plh=R6eZv03OXiYLmAc7f}=CmS8E>T^EcVCiR(2{#MX>npA#R zyc?2h4yL}0?RwEYok3i@$9Ze@0o!)wruLSLPq1B&1fQfvsLV@Xdfeb8?zjhY|0+Wl zLW1R@I+1}x1tfUd#qU)k_@na(c&_P|DYAlPX7OyeY}oHE(YrHgZhM(`?xL=knkT9d z6B;4gy*Phaqjl$0b)hWnk6ax|k(Fv;0hY7@ojg$^ijAUVPwzt`G)zKpz}Uk_k#smk z6n>dSKjHeNN21C%VrAx>9hyv%?Q9aJt3%-@=d4YGo#7-LkGGwG&d+Jfr=wlzC87tG z4k_%3kWrAADqWgY7#5q?ktN?FBlCqIhE(M@)@06y-SAX1$GOL$7VP2%adQBI+FkZH zor>!93zn((q3wXSzs4)*MNIkNq6$~l{}Q9l?CQ7qJB%pMZ??mCkscZU;j7uBUjE&g z9XvzKUL~UlT6j{O*DdXEcqi=xdv878&O{#aZnOc3IE{=PJy5tUGUjwN^%TeDY~#e` zY}FMrJ2dlGM=h1$sio5&XX&y!jih`@j}d8BQU)59EWOWe&^2=M^E`hM@hRi60JTe4 zb(l}hNHgfTBmI?6a${>v5jIeSTmEYF1-}+*=`Z~drT$+@nE%%<;=GtT(dA*#{@3(0 zmmhLh@%6UsTXIJwR)Eh*B=K?YUB0mcoW;#<=QAA#730ih)oIJEHAA~`kBLCudBr`f0hn&GIM(%+FyhFvWeMbUq@Ry&IJ4_|BtHg)L{APlEz4KxdlZc zXHJ2x4HDdq4%ts(?Dhj+J+ywJ@?gw2*X_e(D|r5A8zoqbiV$jF@KtWKCz1&&S1&K% z3bytt)kGxc#0{sCRVEGz?MsHhW!NzRq*|?*jnIKkdDi)b9!D^ub7^8}oJay2BQ=ilJvYJmMyX;qj znS>cH{Mw(*x-C*&&gH-G!dd(Zd0KLsP;b6TU+1(aU)oL`dAp9VF2T*h!Jdz9i#@)Swf70J=A(%y$JCdE_OmTC2XF+ zgnXvY1cBv>m<8(_%waa;KR9$Su@LpW#`6W7rC%d?L=1IirHAoW5#my7+d0qtS3dKj zCtOkix^@Q_;@&qvCr!xE`17cnhVgjhUTI7XZUIInH$mEz1gDfg0mjWk=9edZ-y{NM z;~k9TeO7q8Aom?AHI;dLHKlh+_Pkm@iVdy9JI(x$t?BEMj%5D(LYG6VX!mN&i^GjBUYMN(5^}?pK!qCNz1gW zv_igSB}~6`${?%OW>VdQ9%HE`k9BnbM2@}~fs}LU3)^xlqm9=x9o|*3Op>#CCuCUt z;+fe)yWpsp+YTVivpl;s1%Z}{6G;#S*VVo-s|031D1o}^_;ht*AwzMXzg832k!)Vg z8j5Z2zW8A$4Mah=t9wlGI`l_w?z{Ap_u^BT@GEl1bvAoe1-n2~(st`QgyrGmljrBN zbZ$O@^MpO9rYnD%8p_gJfsb{#_o|?znR3dzZ;A`|MTuB>gw8o=T4s*6S?IfrU#X~=WTvJ1B!8g$S>}ej+$;#JoNwB;OHEabsWahR3$l}0tkFK8Iw6duyS}+`3}CC z<++rUo+%!c^Iu1-q1DPL_n>~7-WFwS=k^l~ct#y%XI43hTxJ22wM*q-i*7!FcjL+N zOU}O-1u3{_#|#`J@gj+%CJwqZ6Lr}wM`wl&^?beWBI4kKO{yz+8&Uum4fP-295lb_ z_|`Bnlw140}u5bd?2(f2Nb2RWuyD+AxZD(2ofv8WZL%YudWa0r=Q=GOLikmkakbW(x{Kiy>mk&7 zd>4hA+Fv^rbv!CJZ)r+{v#|%(U&eb^$EXNZqDS2yOneU^u~uJpK7wyaLrycE192oA zmlV74wtT<*@2ftLHoOlrpRQho`!+w;f9wjsk8j4$L6G=;PC9;mc$a7$YUk$c?(IRj zU#xW>w>$5dE!LGU*M?R?&q+>xT4Z*yS~%hE7HAOaMf|LcCR8AqTpi*vu60X*#%-r2 z%Dg{DcQr6>V7lJ>azOZh5Vnqo!lwwZw_K)&p34skqoIZd4Gw=WOjhe=Z(Y2%dR#3C zs*h%9;7DrJHV|RZ6Gk&0`jIlU7)xwMF%luIlUI2#IWPg1lkQ7D94|Z=HEMg-ngEJ# zQq>f5mb@D4tP!j%3C1xNhDXmk2|w8;sD=zC_NJDQ9b&@p0BhZjG@W>KVIpULJie8! z$HlXW5vF%JL8r`yIv!H?nmd@79}poY$4nXQ4ji@?Ah%cBw*TcYSLk zskDZO9j&~h?SPJJkU8qANS0Ysx^wzG0-g`(!k=f(K5wJaUoRl%wl%IJ-YUwdQ%{Sz z2aJMBSy9=er?gv7vZ#Iqxa*drMWa>QeMZcLbu zUWh9vjU!kt-U}(^7ylAGhE#o@n)sjM<^R_*@6HPh@X=MEq9=O#uuYeD=R=3LFJaR) zumK_!(8x<7ievS7Q7uf$Uq6k2#~qHC>o=raQE*qIV==#QFz_%)s^vZEM!#pQKv1U4 zJAh}uZ&}#MU+ZKvSzsi)N0H{19N7JFFzhB3tpjFa1Q9oW{Z$Y@1{*7x^^f}+Pt8O$ z!07!d(ebf&{^mTo9v90`KAhSjDQ{2;h3>#zOU?v^tV?4JUcy1ni=4(m?@$$pROEx^ z?-EN0X#;VCiksN{4FbK0Tbs_&F@@Q|klE4lEB;DC7x$YcbQ$x=Ou*U?p97F!>sDV* zg>f)R!o&f^DU0>d8En1XU}@CpI<{6OWX=zLM~7qkqnaZLW%>{Z6%L&@Mux6vbdd{Px&EsfRcsK>f>grqGbqr&?BstR) zK?d+nOmga0Fj4|@SJ(oZ^#M>5`E+sNEg`w7t1z&GhqP<+9^;y+0o`)MH;P#gc+Rr$ zv+0)RYeQ6Q5#rM6$ii2TiIpFWFAp|cNPGD4VJXQ4UW=T zG3X4)Jq0To*D7FAa78r7bN!^EC+9WR_)GC>MNUe;Wk^cV$I9T+tsHn_I6S5P+wG3K z@d|}9vZQi}SH4a$I&`qmh&JSd&+eY9)Gd#p=uwks(#)Ov2hk|!Y`vTy7|gE8Sg|S(FG%YBSj%6Q?Wi(EgAvLQhpK);>5pv z!UzcJ?}>%(Vm!|t!x7`~fdSEr*LrLtce+0fud9MxkcEqQaV9ofHsKF}RSa@|8Pg{X z4rU;~P1`9FF5C=;VPZW1&l!QfpJSKh*Xyac^9fqW_XEP4e7P1OG@Mg=qJ8T=XbwH& zy!X+->jy<-MK!ieC7PnTnlPtnoW+@$;9O=K%MUu~0w#%c?rNt#$3{%%^@72rd)yXu zs^xT7G0Iuo^b9up%}nV(|D6xBSYiP^aA~d>fbY7DcOpQX?S{etRZ8N}YdN?36$?V5 zRE%=-<9D#KOukyQ-})K`)543ckOm9lVr8mkC$TCxtlYILk7@2X|wKfJ(H= z)(I`1VJ#CPjS2_NMQvt=Z>b_0-s%mK%|j}4c(o%vdm!BHbM-vK>0PZH1Mpr#7&UDd zqK8+=%0>cx`+rR<=V@I$sBpvB(n>eq;I_DUErsJ_=F;wm1`&X#EJ1Tnh|#@j>JkHa zo+t;48b0$I)qCdGox~s?iG8fOSFW*OFZy)!#g3kQfqU0VN?z zQz(ErO>-)1JiZSz=sn3v7W(SbC48$a88w_*z35V0Q1Osif70kv6j{2^~i27DvF0>T}t68%|H2PF~lBAqr5XEZt@o$UIJH!c>FK$9I^VF({C@W?6J zyo}Dh>*Q?|w{PNxitE)zptDVk9pG`e-Aaqi&{Q5;hv@O$lq1CL##_@E&kZffZbtw^ z4ShbhC^IRU9xRaDGmpAP!A~&=RQUQq>{|DbNGW;BcwYgS&jy z!jK?f*x`h=shMRwjl-g`pxx@pUC_TjAJ zJS~q7J`LARULEf{b1ZBPM==QacJz}7^7Ok|PPxMlVd1An;M9% zDHQ%R`t-agdp9=rQZEwqzlY{h_KPgW)luAW(*rCrJ32kfmun&a%B^rzo6nk$-%rtC zm3Y&J7>%Hv5AZVq0Z62Ia~xUQJ>AGvKgz1&I!`i)zk!NIyIh3PhQ^Nt<3hvYi#PHK z!n=;HcW&@kuX$VgIS0&v3m0EIvmutb2HVSZ@&c`GW;4W|bd*r4IGR6OaqmZlS^ z@gym7Ka;0?s5|uHU1d#n`z_{hJ&MQ{;`Q~sKNH|N`Pzd6e(24=6PrUngx#NWMB-vyXvU2vejZ@@xJAJYg z`5@O{EBxCs%kjEgq;9)Pardg6@$}sBAN&YRVvDbL9pU-gAKI=7J7Pk7$W-RBjn@wZ znc-35y^+h*(t%hI9`u=}V{%O<@ZaXAzP@~6y=ga3QPW{hR>l3yE6dhX46f_6oIovr zdYo_@*`DD*5YU&P5izQg4&gr9%EOS#Q>El!Sm97hTT{-6jTq$xKUW5^Pmc3iLg=6a z<^*YBQDpC>(5RIkmHH4BAf=XlhEmu?dRl_S#7&@o?rs$Z9&?HrjK)9@<6aTKmqUkm zj7swC2q|@=fI?I_M{S7nCev5xqsqj?O-G?mN^pkhWei&UC>%>4s93zRJMZECDN>gy zmYI}*I+&d+*4J*KR-|<10T8pUmpxz&T2I#ZZw zh?Ie&k$`;k;TKFKRpZ6ScakizS`Lm`SA2yfb-RVRd#fB1yhma7r-DuaPfGp z)}^M$WPVWP^-BH-*uDa+4hoC9zeD7};Uip82YEe0#33UqZ+s2k+%HVVAN)gQ0S9am zL&aZV1WWBu?zgN+3FJbhuG+?1K08+F!ET+~hI_7@I{j1+DqS)1?ZPj_;j!vhtg&pw zk9KjXS}w#nrnx^rbpMu`kqq0~x(+;n=MMppf6YfCovl5xP4`Q&C0`mo=6G1@GvvPaclLJ~i5qvM!I_Nu46j2&>%eTTVmE5kVaHA?9_N$TI?H*7+r z5rNt=zQvGkxP0*A*NwrIhXu&J!A~;ZTYRCS=4vV2dSC;1ruh|SFg-8&1{L@zny7Zr zG&)h>6<}&XTNes2$1&(&S(@@>mB_osqeg^~e$Byn#jpb<<49r@&o*Ega}(6>6Gp`v zanHoI1R9ozS*gVyD(k&J8@hDbyM7v&YNVFf^ggKGt0m(=KPMcP5*m?YMRbuq%{yp= zH+|>Oehqe(4?xh#bg>I7;x%&TG4kdy+T`GI5%fa9Bk~^RHbTU2AgeWzg}yksphWEE zgYiIyf_ot}Hv};uB&YAIsIjDBgB^$%Svh#BXh#bnhk&W$BW1?=s|4_^NYcd%p4ilC zXtIPvYNrN`Sf8>dkmLh5Wm)V-8a-8cS++8H;q8&COeK)E(GinmXbZ*Hs663yq1L{; zi7X_!#&&~>!0w0_!<6`Der68ITH-dh3Qju1Y|^f^|LbNLp#2F6-CW$;o3~Tq#?@U) zr@N8H6ILKe5EF*#W>1B=WJ4ohsjFZ&RyqN<%Hlc%O9kryI4dO;&8*|pGgB9%CnI!` zDlt#iwUxZ-8LWn{dZDR`q#aPg3qDlGX~0)QR_F|?_8{(!w)CDbS!zJ?dl@c%+Orfs zeDuFu|FyO8Y(V|3v`xYoQG>`#ff7ED^_q_xKKkcjzf<%e_Ali?i_ZSh(Zi>=I%VE4 z<7V3J^}M}`v)@_X@EH$+F}wQZHs!iSXk~nS<0SBr6bzIS;r!c>S;M-ZLxm)vyubA5 z%*KLx%AWg0Y{>hm_bD>cUcR+er3@%_x-c0sd1|SwfwH@5wH#)4Hgx<$B^4P-@g}ts zc;Os7Mz&Y(FX;{rBi(toBF#`bDRmIQK_S6-pJf+;5z@|O;~bYAhl9b<+*ryRmz{i}Tc9F!;1Q z+1ceLOwO$7GvP}K$}WN=vsfRZFJI*2o6MS>A|FJSPc-C=n||a+exa5xOvKrT$+YOJ zJ7v)4^j>3NP_N&>NiUC*kYTyS#Kfi03rxqA%kj7iu?JG-k}<{qVAbY}Fa3v^^%sI7 zS^h##VT%!|?|j5~wZ3WT?D}p8KlK8d>i~h^U$@0Sibu9)DKjcPN?n>#HjMM$F&7fd z0-v*F4>Ntj==6kB>;z*rVIs>>RJwJUFsd9!HxO5JgFfChOHWh&JyRlc!JSu?LZCdvyhwWIY z@%s%y@g_9BYQ(~FE1f-i{4g-MzLsKjn%KrA7WTR)lAs&!055AoO=-m7r1!d z!M%e+lCTfH=rFXq_E3MYOt$> zfK6R8W*^j(@y&vxj~q$;ffWubV|E*gg=R~jSG5v4nPZE|l!`g8OUWmm6EL9*&#oeT zGwh@wl_B5L8gh5F()=wTu?;pblgk+&bt1e9EM4g=F&M5UDMRvoQv8RlimGEWP#2hH zR;f!N0prTcT#9fyAJjX;JeV@ft_D8EJH`$ldmc)>pc2lIoX)a#JSJsm`+cuJM>dcJ z*}=`2cH|Pc(krizs{&j-&b#KO~i6+2Do5FqjrypBs&Ejr~xm*##t{SOk5$j z6F9&mqP~ItjI7l959aSkNmS2N5DU-n ze7vOB2h$bI`^CF4ZpT#Et7$_<6Y%_ua>K35{lx#5*vdBZD++xinuRRwgLA=smNe@l z`+%~KQwlyhO?=zx&pBtGr0jg)S%QSCv1#?cXd?pKIa`$_DSq2= zU-L)Ziy9to-O}5s>UHlw`1PA^?hQ)Ou0ex8vxm278@S+p{?UiQ3-{a*Cm2tTZ!d$@ z396jI>5Y8g^iY&aB!YsUaA^u1V!1mNDBD7I_5S^S-Gdi1y8*w}Jo5iCa^G1=5mUN2 z5rETpL5#8WF3?VcBDu(M%%K8@mlt+g*1sr8ptNbVx4wQyX+ zat8&dyX=5kKTJ@N=#y%zU_(@f|0Py1EBXo}ROoM&ZB-G?K_U>8LgSkk)1*TQef_x@ zWMTz%e93;I1H{fLa?O~r(V(nROFV{D0L@`B9IQfiyD2GETOyHh`2ahKh<8Y~*q{r* zc>oOzM?aL>h$SE~f)lMTboj~06dF3Bn78aL=V`LjB)DiY|5mwsyv#gi*1qfBoZ^sl zw#L>O2G|-n1eo-@!y~d`SS90yoyoE7eNqd^uv2ys!6$%&=A{OVKu6IKj1~LTuU}J} z2~xuHuum5;NIZwVCHyq!Nik+Qs@l^Sqmo%DcOlC7i21o89SF4j4u&;2+#sbPKw$|C z#z~v_Rqd4MOLY&aQ>3~v^y71Ua|b8ar-x`d=GXQAa9+y7{z0Roj2r%!1Rf5T;3@kM z(90hCziA0++crXd0ohB&*>d0-4b#S_sYI1I!ULP1gP%A>e*_{JUwV-%ZMFxncb9r% zc_EWh*k$zV8BPaTU$9sM;|X}B>F>$CLS9aNnQFmoAiH9r?l{Yxwx-=R0<8o~xupcjaYh#iRpG4Sm#9xAmJQJ}*s*0R;+&Lw&@5%4jk>)-s(&j`)Rn@0{rangPH`YWb|I>O zKcKpCa9-g+)eN$nkbvBSk0xDte5#~qQG(1S9T_g#X#J*EhYN;Q4ABGXv~%Z|GaH>0ng-bfsr;l@!LN}d;7GslZNPRkP}CWO0>_j?z^1EuEbEx7|anKO>> zyXik}h$ZwqMuM6Tuo74iRk4WWbsY#Z3KlXjO#h{cqIdZ8QPIdPG1%4zI;pWGh2SVH zBMzUbT!Acc?gA67NJ`WrDY6D7ReLo|2{5kUM=1t|K*wzbK%F`NZ>s_-(%x|RXrrrc zSuLKBVRVsHRq1N1)3lX2(y#|UJ(F468WMu~%5JN_3|^?DIqYaK&~5C{!_+ECo69k6 z=TJNBNBLp@L_)snZ5I}1MX_gTST=pnW%aCKl2`vDYM`f%80WOFS|J$uTeLB6EXpr2 z5XwQG(OrzDan^J%dCkZ?z&*mLuNS2tNEiYpET+KzM`IZImBP77iH%lPbp7ZLQ3<{^ z$OK~3QEveN=c;@S5ssW$qr|12M=pDg%*JzYE*g(8{nk)3`Ucdi2l%S24w`s=Z`Msl z0{>5s&?630`@bfAYzWs&JUD1vWcJbK1VqQRN@5@^Y70=n5Tq#w<+@{61!7;^=zaY0 zQk}6MeSB3qs@M=}SDNDo_;@k|an#DzSz%99g`|A2+Uo5seDb9faKrLwC@weloi=bh zg)tkz5E{J|Z}N!&4+h@RUaH`MRqclV%9^j+bq$t0h-w7~C%?Xe9z5yoEfT8@KmDiDDUI@(n6Gx7MXb zSOLE`Ub$1%3hb-|m5P+37s!u`%TJ6PeSv=O%l0_f$BW&1kkKxc+;?(zsw^xL&R@`g z`$z$&N&dfdM{6v-l6js{@4L)bN3&n62B`3Qx>%UaZd;MTI1l7rA@8Y4mvD^Gb^WiS$42ExMwd+BUFLR= zKdoz~ipTxshI$pbpaR6gX;e}BmuR}`FZNpE1-T4ReYopu2>52_Gj>N?K(fWjKn}A@ z&7w>xhYsA&%aiFIINH6Ock8!CErG$K2hU-GG+{`lWi^{;u~<6p za}owi^O4Gg5}Xiq^KxsM_=j?Df3v9@$E@Gcr2-hT!qFDVvJ$9zhHT7&l}lQ70YAh= zPSZ0M0Sj9vCdnzL%z9Mr%Vgf&O*GX6)4muDXNvlf6l`#=KA+7PzT`k%N;t&zXUrxG zTYjx-!m&f3|EFuuWdl>Hwiah;R*>uefR>|q+PZt3CKn;dEq~=X0{E!+BzVB_dY4tZ z5*RJ?P4EtY=aEr#i-b2w*A6t61bnc~qj*=;qvSAG(q!{l5Tk2<&%NbrjY@}X4#K0? zvmP_MQbpkfkh#s{*N2jTmg7 z@l%O*JLXR{g}8I^MKsAub!AU;WQKG(@MTJ4hFFG- zFP9GV7i~Knmuou=dJU%VhqBn^OQiXra}Eq)-%Ej6XQIM(U&z9%3PmqS_&=#)F|nrh zL{R}C69>YU@M_Wm8}A>PDv3rg6th37Z1Re(jvQ-)#l%?B?mWA94fAGX=f^87yEps z;SqQt{FO?@MKraFpE;Skd;2OZ-XYMPXlpjaH@u*rR0BgwA(njL4wK1GMF1yYpO6mD z+rR&WHooQHb_tFxaf01ko~|RZWNtXBkJNn#s*S`B=Hn{9r42Z|*r~6XJP$c)d4$ z^&^`Yms%Hd$<4Z;zx`O@jut#WE`3fiQUf|*tPDx%pb5AtSw`N!a#jCO6`=$0yKr4yXpU1Jz*cE32PH3NGJc65HiOlxUN1(?_ z@>m5a5S$lUlIrzZ@9HBNcPkBxCJ`>?b$QJ0Q#|!~<0%Ky%AX(k2$XEY#k5>&F2cid zs?A&%8I?SqNM4$+QSF5gw3k*x)fbz7%U@x`8EOnKAGadZ)kBpc=PfgS*vqc;q(3sos3vm9BcbzO-=a-U6e@C*1} z6J{3m!)`vCwtb#o?%eHhxXtS|`l`)D&%sIiv_u_F_T6%iyzNj}{|01MvZv@s^13sX zKP$m8bvw{7kO_6jA=fD;Pq?0t)5z1+(%=sv7t_dRslqvC9KO}jMuGgbNi_?wrW4c; zvC7j?K0Ldt5VDAsx`%tatYNAOOKHJfFMUQohqL%#K=9K-E#op+Mp7_nY2fM&RPQzhDb~MH97v{vF2>mZEunyCO zm|;KPdEV)W?`s8Es;s9!yeGl1&|NU2QgpQuw=|N>%g>YHkt(ZG%q*_d$79#-i5NS^ zA$L3WEXT)66Ry#XdjSa}HIP75TSR>ToQGU!*H;0TAi~rF?7qMbw9=k%6Zc5L#Z5bO zDFz&J+1w>}OiZNuPALVamTJ@Hhu3T=+5|%v(f6Q=sW}-*-@B;tlHTzx2`@s?lC(H) z5pLH+7)tuEKIsP2R@}9>|1`9J;Xh6a&c($)H=D`t(S>@ z3Si59`aju1wDyC!uOZ(Q$tgUacmE-hia_8Nb+<_qJs)$04h~eSHL&Q}9+A1oK zJtj31>1{)Fk3nLsbpLo=yy>eMJh9lqvmovPS1nUSd=XA!mYkn4wpNko<~pEqxn#-i zW|B0JzYgnmRzPB5*O07WSKM$R@`G&E8t)Zc-QVFpxi_dY!Th ztxd=5)IXX~@k#h%l8i4n%E+Q6A+~YD4`Wv;$wGdk8Qzk(e4{yoa<{cfVulS7Ka1oM zz)QkXCdB{-0Tpv-Q-cDR6CQcB@t=*p{{av|wp(QAnHc{Otdk1(h7c)au)kEL2ZM^D z7}vp@cc}~T9W*@Gq^RM4!aW$rqt6I50X|5dE0xn@lq*j_7G`do;$w)Tc-Y5ThXtpd zZz+Ijh+F=uMGofNcl{QMvT#R@t~Y3K7tSRp8i^MBZx;E4H6C~Re)L$4M=E|XMupkS zw@MWbub7nYaMt7a4aK^CEu)F3T#9Wc{YOoM4!H83>MCemf5)V?H^uPuF7np8gu*xP z!g`!Kk5~A}6JK_D+bbEHrvavTA!w{;h>wmrc<9>;GEOU!(C4&p-txJ_!$PZe_L^in zpQw)d5PPqUu8IfhmBIqou0xlxd&m!;7w5OTq}*L6_pW_6;H|fNjUm%^-I>QYlIYU} zU3Ct`Seld1T1Yra72Z&)su=D6;5W!dPyk-_06lZKG*hMb+O#0iVSUNngMj=VV0wTE)i zcu=m*sMxZ{ZH=R1M={(`t>%?PHJTJ#A4oFQf(%Q}x&pb zHg_E}wO-7oVhGAu3z}`VXr4Tr7Lo`Eg+7`fGbIs42weKt3NsX2H&hwy8VsPwL5`&T z1lT^uu;JB%ppMtQp0%*q#R@c`gw7jJ&?VO%4JLJjDU8;lNKfFux$ViL-Z4=%YBtO> zJOJz*LP;d^`cHDSr?jL35rPb>Na$U{APf$*BWgA|mq_sIVQW2(r@1Pgaxjj>?Wq?W zxhcW1n@5*!0_{lPY*~n*5#}nTeqkLs5)FU`Rork>h{7(|9Jm2TVX<*wA=@)TIG)`S z-Jnv{11k5xnFn;9pLla10CJqbV~ISl^sYopqya1m7mtpat$<6yP@sAqm@O(MtmDp$ z(S@VI(*36SVE84?2;LJ%S6=Htv2v8B`Ox1+i3wB+%gnmJ}K7w3mwRY8{BG!DTN)Spa3gX z1sDz?UjIT!rxESm7Q1W64LXy~_E&08T|(ScjK?brl;Zs`)J;*%?r<_FfI=-?J5W@yG-lehpR6!Fvs8`Sr>nA9%RS#eiJROwj( z`^Mie0$F)5G$@&%X38SN2P5iK*LQq|^ik&(mFdBV+bFz72&NqN=dB}pAMl$eRi-iu zZ_=Yh&KyO?OQCfA*#z`8J#2Kgn7ooJ4+)bx9zkpl_$rFQ(u>Ac$hJ}HTF^=-R7!T; z0ba^n8qMZZNOp+rllKSsi@9%@t2mP?IXR{^LgLMN&EjSz;JK-`6q0-7g2i_KJYMe5 zgx>pUJg#@B8rK7GCW&{eJC!hTYNQ2fbZ--y;`J?%_}pgG*{2Vb=iv)n&S#&E#HLG$ zRJDtNPBhK2o*L?`aV(MOz+tl;beP^f=mcEC9K{iCIOqvliQD3%tqM{`(;$>9+El2avbDKZ;I>h)CiE&H&;V^^tONI zPgxJiV{on3JXuN6Re``_z{MC{t%iKs>DyV0uLE%WyOPI~m(P=Rp zjUf81jhmkQPzt)S=8mYgaG>eTQtx;<;Ttf6JCrg@JQw|u@owT|uE5@4ix5V6z)njQ ze}jXhzdZ=&E7TKpQzaw*=-GZWYSeq3Z0-OOYgk&4$iViH@#Qm#gS3SH_Iawc+X zJ$blJWs~ZFk|c;X=f!An+M_+>^5#sOFnag)f4KU_=*psQ%hLebdywU+eyXhyzcS3`+a}TIRDSs>&&&ML__~r#9;zdQtnhSar~G?CYUFV8coLm z+@M)(QO4yE@aS9O4p?&_2_Zf3*plSVM8tJt|H-g4O!P6_6nLxzZeElHSkWcH$SYn? zF5x?RedS6Wm+S5VbSJjI!=DUwQbDBp$r zc!Uz{qpmRxh1cvhye4xgL4OmsrH!n?(4=T?I1evap`qx>GJZJw7>sn zaO&p-k`4=EbY|U{Z9;4!I%)QZ0;!kKITH?04G{ zy}Lm5=CtI5i#+}uaO0`DT?i_?0_T(HvJ*WdPdKY-7Tw&AG3jH+427qLlmc)D= z8gs0CpzC5krG`&gQNcEbuPO345iDO{zO1gNb0}nh6s(;0{PhB7SK!N2#vo4hoB4-u zFX!Xfsx(YtP(9fTBbRFexS>I9g6-kcrb-Z-M@LD9w=NDOH1$+2N}_(BfD8B&(yEb5OZ#IP zO>vW{8Lspg>c5Y1ldFZ@lpvr11jdJ4sr_fuLdn*D(1sKmaj!=A*mYa`iu69o0%N3~ zB=Rw)uVffIv=>#QV#XhN*Uhu^7pBn2i6uEffif^IxBrM+ubwks#5_Bw;G4rJv~qdZh^2+pHAJc?plI<=HsE1>W{9DJc%M!PF-8B1 zyo`m+h3^>`$len<#zej>Y)`)|eb@|O+^_o}j(bG8`B|ufmG11kwXK(S+3k#m9OoZ` zt_uD=R-&S%r)Sw#B(hLkk;Hrw8k@!zd;hL^D#WGZkPKaFx7K#hxwlHMu2o41Pg^s_ z1OKi5Pa8e>){Q~nTylWpy4W>fG$lp9Dz0@&*RJlrALregUj|}5eGWknePb?t;XB$1 zyWMb!j<4QK&3JIZ-9ejsnPxAft?6v-c~AN!QmnhYnS;|ky|0u>|8Y$w%S3E3n(S9B>05~LJ_ChRz4&_>xU7Om$(Ima=Sgc@mox!x2nvgm;O z!~lt1&5)226HI7)_?TDMe!{5?RgA~5EWICF4WrSmYkS)1+)w8w{zkaai%JLtEe$ZM zEcIHx>_Ld8N323WhR5Qx+}O*GX`;y@yh-(JRQ!T_d-8*09Bc9564dY*yHQ+`L@3>6 ztrz>lauu{r@XSAZrHAf-2MVJvHku?tG+X~Zf;@*Y$7NZ5dxhv^R-wxdDnJLF;Ck65 z7osX&(qNPcPs=ne^sMesd*(i&K8a9#I)@m@_*)~SW*#UvnbZ-}G0o;-wGe6$!%@Od zE4bJZs^3$+Sp&C{ZSlEPlej`-_K+}ZWk-{ z(xDql^HKL4(XzC%BK)iT3PVvm@f|N`oThMIcL!*P5CnZ)m~V`OW~*EFVmyQ@3FJ%@ zP}_k%@z`&S1zibW4q4{$ae2Rrli?MHB<4evvJ|QHuiLy_oOdqrnk$`S$g6k5=oV6T z?{9)~=VA4rH^6yFcaGdfhiLbNi zesy*jj+;;XFZ@8Nn9O00M8n^5naw=ROkzQ5hRAI~dkP4}^6mrf8XBLIT7L7A?+aw!34wNR zP5S=bYQPWQ)N+q(+bpeayBX30mYZtC^|}{x#q}got$N;7dAE~9 zGMa9k6Tq0#LpQYfLQbf&+st7ya#=*Soy=+y;;EWya@Md6|EM;mTwUf;)`O>I4|5tG zib%^fvQ%{Fl&UcW|9^wyf4^<#zdAN2y*dOh^j*I$6O8rAw%!sA9<~1gc5er6-}lL+ zo3vge<$v6uZ%~k^K)p^|1>C~d>9r9BAe!sCtyCi>=zWS6LTu<(%L{}~LRpSUnQIyg zJN1>RcF<_&HpQ57{bGf$OuMvZd7`D`*~RjPF;vk8LoL;XZEBT6BMY)`V{f3 z2rZ|dZKGUsfZYbGN^6t{L!j!|00A0Y3q8%D{Amr%)JBu3Pdzrr2Q2UXLux+T}8 zwjINcy%Bk~TM;BuL-CE{St6J-iW@F+3d5K?&9P_iHcv;owdobul#sZBqp*UTSiTIQ zAn0i%^N|-<3!asWY40{#Zwn|_Ou1wiqw||nWB{I*0Dv!6uxC;hgSOZ5&a|C(r*W0t zS=1=E+9ar4+niyRG|he5HqrU)?8!2UTprjk&lm48UB>}4zNhDW zt!7gu+oYB?#Cq@??UhP}wZmQIqbBVZtJ6c-wb4MF$(&%ZT=7{EH?diKuwfO;FFzxZ zP(O4lOB4~?x0sc^XMz|ekJsQi#dXgtW%t;=p=n~mx z_HDDF15z%X#R36H1s1pBBII2EdAP2WQTh{|53#C#{!o}2&h6p{ArSx={Zt$D>9?o& z9=VS1cQ+Y0T4Sa4e2V-DHFSykok#Ry`^c{8f0nArL45y~jw6SqLXJLEjp&VG&X7>YweK;G*2P*Ax;V-=k}nv-yx zQw4d~Ce}EjU%k1#ry^{F&?k&iVW06^0dwzjUg)OWL}j+E`uP4(VqYTh$9SQs66jW} zvCZbD#Rs$jJiOS1Iz-Rddck$N{yYJA#S84(a$&R^*`Voc|6W-?yz{#Y#MUY;r0hPyi4ikBFXl`z0BYmxL}R1bju3ccfD!uTD%BWaiwDOpYDK z7`PsEWfjf1aU^)L=_|x?yfRgGB_HvFoZC%UA%}do!-1qx64NT^m|rG*ba_Psk;@`p zESqo~^6wibIyQ!(k`WgjOSkgl}fU|7?eMFVO|v8Qp2X@JQOCdG#(4? ztsqD>f9;A!WmGOX%jalcr5f;0@nT^8y_$e9;(s13u7q#1wp%0Y`kC0aTIy%bTFxH0 zwCm*B(j_kjHy@yT(n}lWG_lMP=y6mIWgsO0RdBt$0HabOa%^WmiPUWy>ab|CeARgF z;g?u8gZADTLR1DJejCNlJ~qVo$Qtw=BL%_T*fJYH`lss6ta&Houu{@AwD{meNK_l!Ca)o5YuuoXQ(UVNZ>lZib%H}ColU@HlAtB2B$Mrw1ak3zAo(`B(Cz2y!j>=m2k>? z^sI{;I9T1JR$a&OelW9a6x&NagDzrcKVnarP;i#7*mz)AA5h%&Y?<@J^op9`{?e1iMUPxseNF zLYRpxz6kWXiqxUiDbf;saAPa1T6|IM_$*)_2f}w2dr`3@u~h%bT>b;BAnAmX>9{}unbsbnUy z?jg|ksHN1nY9zg-pd%ZhxM9j4ze3o~!-;RNy!S9o7;zsI$y$UZEZv6u*79{jeAE7Q z7&!3gG(utib{{y3-S;A4e_JxP7)$?tU=Ojk-;)o+-Es#G5AN zF2p`Ns^2)AWVK2MD#K!->;)&Eu;_}?rgJ8&_)_q{wEzcH&|~KUQb88kIp02tf#h7O zlzBl=S*k3^g41g<^cMNrq>!c*ug@-<_EgeYfov8~76%7X^Zh+&2* z^~KC9f3eSV!(VSWgviM_5+2Jm>HXH({VUvV4P|fU)L5#9fY^g0?+R)j;f#AG*;*M* zh9jBAcl*Z&LF50sIsNZBbKb4|RSz2{{CdS^0kRS!&99da>6T*w!ojkRFu3iyMGNFo ze*7p4l5bPb+J(5(&UXc!65hOsw9G}~{#tz%urjK)Cx^`J{ZVG#9qGx&;s^C36+{j} zXqI1zraC1Wo{X`2&DoecltK~oz35MZp}6Kg!NJAC>i{Y$S^UB4)>1M}!3yS~EQ+=; zuliwFH559ugI>`#rdNk>DQ_YVQZRU5^bX8Et{K4!d_d5Ov`j_$jMXF;>48}xD8U<& zdz_*Z#@;%QrU6HtxOg?Z|3WQBv3=ZdHT)emGLt2KrSUSgA{ZqdZhH6Y;*aS5p_8cz z3(ch{G?OdC%xg#}Q}9#{Hss3n-=u~2ZK2F`RK7Gp|Jr<#SP@M@o2zW?yBL|cpsASG zswMR}5hm1EvCN=?XmUjetOz`l(W6mcqqo;{*8u8pq_)IOGguOGO7ACAwvxx0M*uyi z&?n2=J@z)5Ebh7nF&G&9(n`PL)m#ZgfQHBtJ7 zdTnjq-$vgqICRKAN@!x2>fy#UstYu14dVedcKI9?#J=+4iK$1Av*&393-m;9G7fGu zNqjz(tnUQF3P$h~ApqN$qO$PGA`f6$Q)8gxy4u9%D{k==mpCs#ygm}O*1M4AZf1It z)J2P19yIkRKiu+7!sfN0^ntfMNJhbS@hHvT;CTF5tVzH4)?5R~vvZkQxc~Li{e>O) zbziv8@o1fcr=U^N$FozKIdgj)c!&Mma{UF;T`pmO-O)pVUw-_2q189RrMuO?oOE+r zgM4%06ztz_fzJ%(FGm^+@!PG_wa11+@!?)>V55)ZDWr4P+MmJ6}w$5>9 z({3XXU2$LLP6O@cVDa0!V)QlhJW%)<=gM+Y8Dkq88%s@-ixjz!O6-6A_I#`K1=sz7 zKCDh_^P40S+sF#c3<)J_ZIgLPpkNVWiFT%&rmBeG;SBl4M&vY9TdNb#{|3*48JAlv zDd=ZTdQW-Cs#LULiyN;Q`FroEyV1|f+aV0y z>}PX$TEt3~z1Uh<8-p<^T!sLm8fy?vHl38!mvDnQvS~eHvdw1Bsi$k2HaQ8+q_YxI z)gU6t_`m4{42dAqt*mbymAvBRbnG>~PiAx5H0q1XdN~m?Lcly~(R<%L`Go%amN-lg z$?(bMq2e`ju+pF~L+X`yhBp;9OJ>>nC>{vNGntt)Eh7jHnc1$JP0N&lm&eVLy^BABFJY`D5-e|fiv z+NJZ^HOJS4u$nS$)&6czx)y|C<^r(JOA2Mx&Fw~{P2gRl#8b~6-zVZgM3hbKeVuCf-v0O z5z-(`Xf2>L?|tjMTtWv$gbZ}WbrdQIAeDcBEmRer3epEbF*`#gsaA?>*QgYTf}cbd z;}eoloQAR((r>5=uP##5xETzS5O$<*%1-sKB8HvqHDGu)6%j5xATcl0_yrx<(^+N5 ziGfhg#-rhNg+#4izn(?_FyKZ~pbwLr$GYHGJl0<-aOi{>WyHyAXSz-bl|VevxY505 zi1UJzwWN4pMz%$!q!=Y|?WYUDXu&gsqy0ko&EIhcx%Ss&nFLxW@dMXt`@VH+i7>Pa zJD0G_GBzg)N$b{&3NousZrvufZ-B0hf&{;+w?gqUdOQJ!pZB&F`yA)s>LV?<2bZOV z%L0rlG}-gf6?kEemGOw0ql4zIIO)Y}jlubZ8NOkN0uz4lJd9~kRHlvWP{z7OAF+eW+c zbophQr!8(Kpp9m|P?I1Q1`Mu6E#_-DUPjIFdLqfd{%0&nOfLd`z1?2upI5%}^Zf=_ zteVnuu3mipl(`g|h9|vfRAekvZLxSW$S~lmQVLYK^F!*JA$kxt;xG(jCh+%v#;F;u z_+_q((1b~n(-EuW23h<#B3nmbhU1%OBpJ* zc9x{jX|)&$5;>QoVj)PyE?Bn^gQQ}aknJV@5Q8-i`Yc6eWm^CArcCNtdzpKXk++8& zb{<}cg-e&&w)W62fW0oEJ1CHiNowA)gr;KGmMNPMqrPyDq0=bhF8QSwn{U3lP`QRg zhA9JNwT;tOhdHb`J+G%B9 z5)=n~gdL@WWeT8BY)Su+H4OOtm`r#W>^&yxMx~MFpyxh%kNX9gGPyHNaOP0-;a7Wo zjqlg4-PG7Pu#4n&hLCu+uB1ti8N3cT^*6s)iLjR-e!TCdCObNn)T}uuRP@p5;O}aicTYdyBe{+mmF|^@KD2A^H>IBup;9(ip>dH^&pSV_r z>&zWC8c)S!nI9(yA9kK^Gnkj?%4Pef3AR9>VhQS<{hBQ;$_wIXlEIp04!QUCS3#1i ze%_$fe>TB!?zFk(7XE+*Pk*Zc7katie!kg9zHJY@Uw$4h{DggUk0+D;2}b`bBKljh zI#aN>cL&yB6c{}}#A~*|Y}7M&c-7CP{MY7swSnjdKflRH9_oY&ATj0+DT<<)bBA8> z?up=EgQlTjGenF-=;V2n9duscJ^V&R)cLhPvhYW4-^gK0A}TF?Q8jA<`kF&**XB-S zE_jV|6hSl%?@Z89GOBIj({A|PAWR%2au$!9cK1eU61E|z6A(MfO8z@rqDyp_T$gyx zo>`?0v08?Hxx@< z;kwBgvOb5ADF@L*BUWLqsN9!=Hd5Mu;2<0gr!}~!qUbwBNf(4uPhh~IJoKJ6s|A*M|j9vATr2ZEB#+1oH zsApn($p5^1sk;`b!^H`_knAJK4A-XPfvu1a0G0*0GqO3IyH?(G_e@`O?B3w`dn^3` z-vHdR1o}g4SU1uiZKtSlJ!MB@))OEI^jQ@7C0aaLky4CsICHQ?TA^_$5?1{%RL-Pa z7e8UXNEqUPR8LsL1@5$X0Ogi{c^vRHnAtpv$V8)&vR&wx4H&(T(fWo4Xa1FoJUY-O zWl`34AUg)3jh9|(=slt$Gl57uV zFcOnR`bxnEDrHsjzY7@SQ9~u-e5fWxt8pVRs_i6Siu5G-Q>}u{=uM20ki=p@&0B9S zE`3}y^9|#lCf9MA>u-orn#&?K4a!vic3#b}P53Qss%FVWuykE(a7yZPS<+gV>V5!6 zeu7anxl`}9od}`Z9rd$vHXLqVVsp^NA$&M#IT|i6f&_HTgV6O#Bt_ZR6ctAQ0Lg(l ze?lUxRR%+PEO^V5b>*x@)}d%mun*5pwpzK zSxZ8Ut5-X1a~_R<$)CD*72Qg$luJ!Z_Csu47PSuRuRyr02tACZ>K~R4N^E(-(9n zclThcnSveSuhYV^{Xzf_#`Dc8y_-U0iE>C(>R2oJcDKEbr7_<+mI68*l{M$EAWe}h z9S8iUhFI3cg~xY#RC*V!C-nQX@Q0PY|Fht5$!=4%-;l4k3OCPYqIE8#pN?4`6_rYC zGZZ6dxjXiwG+>s#`7-7Cq1&Dpi+5A-7RL#P*Fsw&Qk8#yY@YD_8Op&qwpu~{->*|7 zhxNVFdqJyK`G#w!K!IjSwKxk+<7Zl%C=U@_-?EibV&b6NC^ktV^30Su3tP-G^yo(# z;HdW2dfKp88QZ{~d~q=lZEHFPS=AMil*5wG)i4n#Cf~dJ$s@iPtA0qyp2R^sR6`be z92sC?njcl0la9@i*yR!1W}||HoI|sRWs8nT=IISTts@kxcWx?h!JGJYvAOr-f~_y* z-+gGN>yj%rd=1!J^N{-=v$x8G+WIIjW_XIONL1&!q&_l#Ofd0gik>iUY{0#+_@~_8 z5yk`o+fvY$bJ*|u@m}Bl(??KGN5?tz>{}M&Gbs&Ss5p6JjvI}EVaav(6Ayg1+=Ocr zj3}cv%xy;nZ}7>H=Js;N#H*n)JR89rh1ltzkwc$gk(-=XKaI@dq<~$fi;0(rwyu~9 zK65Tf_WKFI6Ozp3RxLdl)vb#&r2k3@ysE!Ba2kP&6K|I?Xg4heXojRxX41ggp!YLU zo@X^Pj3ZeS zDNV>wzj`L)ZwT%tK`1)5GcIXY8vKx$^IMpYlJ?3(#M-Eo`@H=J$@j>P>U^MgovvcymF$44y6_!n;I+Wz)`3~iG|hXm+J~_ zkAFsvdaz^hX{xB>RP)^MUf`@fTv_7N`V0Qtu_T$7hKJQu{7YY({R=LIda_d+LS-xs z^sMX*7HpUrGNo!LFbecTIwl$Pg9Fa^swWKK0tNd!Rt=iLR?81<+eg(IrHTz|heJ}} zUm7&qu7Cp0_-7>v@zb#JahWab*%F}*gW2po3RyxzEEoV+JMNC;CdGxTxDD5H2@6|FDH2$iX)}FZAV;wPCYsr-J`t?FMoEKmikSxy zYHTKjD7`H?Cp5j>tY;Rj98zca+U<%{hd{mypgk53iAy33==68EjZY6qT{Am zYIVEBgZw&}vgQ{I^~0*A)FX_{ATl0+AcUe*OOig)Q_-vW&+x-8oW-FE1ZkvSl3AHD zA>3j^GzS${S&q}k9#|=Umi{pBPPP4QHNUYGFQz2vJ?%-!ee+V@s{5&Ki3z!Ci;eD8 z_0bIxWR49JrRoMt4qvlu1c)wQJG|C@$y)LIJ@Ia7-|LL@r6MNX@8h`I15&aS=}S>X z5S72F>FT-PMMukGdEFhjH=bQqCqfq~GwNwjCLlJ(XlyrlHqWnV&LZBulFsi4k^YGZ z7q?z&z2UK(U5{uZ_X)5&r>V@&_m7OSMsi;77%ODe<#qsg8kY4mB-HtGF&whg;vw&a zo+vTbB)_%|Q&lFaY1c-CxU!yP(1q`C$FpfRzz22AV6?Q|T48pdQ$~eZ@m%s4qAZpb zq$!fpsC=UiQV6;!nH|A~Jeueew-xai&$}*oX**$#N$lk*TK6X9m6Fu78rt7Rnu7w2 z{K+L>!*p30#x0g^_QT~uOnf=9(xh9G!H}g4IjqZI30~wfC^#aaMZels$J?H+;mB#s zf;MOljZkf$wkWa^FvK7rs9xl6=cqbyTftOZiXc<8xTT8MD_*NY{tI?MZ3M=+Xjv}C zptDHeS5P-Mx7XXDme>6eBT)i%-j!0;s|-dchPF~XoJDv^^>;GqWh4KEv$K2fkW;W1 zPtj1qQ0MNSh*jv)+(RPS<=#6deCTaxb^geo+s03gV~3x-3G3($4ZD8KyJ*!Uu(A_g z28Ux_Z9RSL9?Zgh#mov)YF_oD}rN_AFcxH0s|Vp!S!3sgjx&wWz{TlV!1Pe z*MM_x%1Lm&5AUBKNoJ|P&hPwhQ8ki@W)lAs7RmabBg6&9A1-kJ8M~hBJu?#Ldza%y z-FDmPyYvsJ*av*hCkAfi>uw>L0Pi2+1LiaaD_Tp^20_Ck&+J|GRd6*=uU1btkm3_=n|b&0nuP_}7V zWFM*RK_x}xNU@~k^~+$H8ZpSB5nbC@`mXKc2YY1FmXD17B9d$6g@y0ZVJlv&kB7(COT^Kf{lK6ejjhBT4&%U&8MpkMJvZxO67nGqt zJc(lj%t?mViElXDYr-~U!zM4SKPe%g*@0}Q6dCn{XYX-0D!1D=@0FccPR3YQr<7^@ zu?l4ocIs|WE~`C-DwPiM6~~!Cir|fCEC7XbLgEbUMaY78*7r78`AJyEf25!E?~hTI z9U`i@vO(CrNF$2KZ^wihcV(<*tr*p5nk7ml%VPAAF%AG=ODIeaIz_FFtghRMf+$6Dp_DD5<2oUT zydQQg#;{coF@xJOZa7(JmHakzX*5D)nS+$^~x zAc~hv3P=|NcHdm!V^Tt_W!u_3eq_4Cv*J(qmQQ+P&GFU#sim%1rnOo23`U}l;>Rv7_R=@~&sqOek+p)YIV(Fc`u#Ruxi<{r)w zs(NZcS4WCRtva>0tW42D#;E^-f|Vg^1Jc)a$&yTxGQfTSKs#i;PKQfINMEk=2Ui4?f6->f%JmbEk-*TYWj_z0b6nxR1g@6NU(WCJ zfkj?dvIY@B;YLcWfdk`kyj;`R$s@Fxb_v6t{+3NOeFvsSn>L!7oODLmR2qfpzesxf9m(eZ09^)Ei(+T^XU;gKZ(7MDU;7CTLPB@h zqd&o=iW@!yuF8M!>~8SqKv*l7hVNtd73n5=aRk1+=hhu*SbH9Hw}F8T%Prd?U32ln zw-9*$v%4VxGGY;kRc0yl29EjgCcX@^^^h2+GqxP4Bq=DNx?pp!3sSafJE42(qtDXq z7^v?kXdofzlfBJ$Z}6VGK3V-$wuVx_bDp9YdS5(Tmd^1qu>NMZRAnm6Pmh37<0#QN z#l||Mw0Z*BbR?a;`@=A>qzG}9x^AHK(IzpdlM!D3{8#Lv(9BC}QB)P=QOD+SjsE-@ zIBqc7-*ww-qQXaqLvqN)LkzaHwHcy_$i<{|BOsL><6c!p^iRA$1wG#XCURB4yrmLZ z9&sYkq3|#?K>$3;k&YD13*l^PC^00=-?nLbIfXj;{79R}>grPlI{Po2}X>x0z-6St1N23&KM2xTqkUF?Y z)U9v>y~kmZPbTe68ro97j9 zy*m_;4P*(BFoG4u14-6tE(gkBpe&B;bL;XCBSGqh;h^~h7fRj~urP@HO=lBUwG+;`V6c)IrzR_RT)}FxTf}K;$!0SXHu32Gw96U_nq^$j zeTia`<@ks!Of|P&(b^4dZ}rORk=S4BFJAES)JXGRpGuzy!XL4Pdx38w)b?^xn!?@n z`@Av6ptD>x8KN^?olPJlmaXDj7b|eHay&hBKUGOZnB096nRX{<_g{=YFql8zsXHh~ zpc($XoqNb+y9G!|VmCH|`HdOgutxWGP_v0o(0&v8J2q5W@@Q)PFSEkNsZfIfDt~jR zO33q+JJ_49^1$ca_yqzxBcI4WU;bh3-^N888G-`4gW8>Z#%*OR;#1g2AMw>KVyU%! z5d@tAJx8Z=eK)oXMDpUT=zG~B=czYo1UGuV`MBJ)4aMswKek*!b5$XWY~_h7mBi7p z&jKuNt!Y8P`5k_(cghu_<@~UR({IaeEbL>(`bom89B?A8Os|uf4rKI(Q&CLt9-xNz zphR4Ickf%yJm$5{5>62=OvWk+Oz+h4>ckvtzalN$dlY3V&f6oH(&KW+8d0Nzt~#fM z0?MpWqbaBXC9-P9BBY-|V@a&Y^K-&vGAFJvD>I_@ai{##!)Ms;F6dLc8OI%K$O5nY@DEaR|0&;mJ!zA? zh-5wMx3-mCWuv{-E8GI&g5B|5*m@6af*>VxOvj%iX_s)Py@c9Rnl3|w2M6p#sp|=8 zQ6*9FHcdwkb%FcGtN#$$-Ts4LN7mcU>3_b4J@}e~wqD18i2ZLZz!$0At)8VVUD$+N zdp;N_Sl16o`USdiay{;m@W15X`!AV#<{%mPu<)nVNCRNIo7YA*F;B`*!+Uyq+KAswTaeL{LlPAg`l8g$ftFHrmuHvGF9FtMOT@xd z2PM5D!rNPP(E>9le^i3T$|DD)tELm&hocp%Nc3wXz1*U8N=+!+i!TCI9b}020Oz1uoUJ?!A_9t=S&C{8SGxzJ{00KSvnq(f3f+ z>>a|7B`(=i2=hTXfU4@fhmkU<;S^xaS_u9eJf!@oALquly;0Tyea{tY7gf*E9K%vG zXz3ha{-mXZmRHSPXG;<=8KY&XdMHUS%moKd*R1A@cZN`yxXizf5bxD&;%s|r=3$>2 z(d-*GUPTto0ECf2uB1yWuT#ETNijVlV$FBv`T`%)YTtG)me`puUnK=mC2&T>Qy~UTV-mtApmxU~aqzlv z^TNrwX&@FGo2Q`R_H~~7dD>BEY-k-lX5sE04v2#K2Zy#VW>B5cj|#A!KFwUv>VaesQ8?#zTntl!4)zmI#U9KL|M-YhEq4*N}7lkd+>mHg;8behJflg*bZ@g zfhF8rauV9L-{9>d6!*8b>bNi<#^^HzCeCR0zTGp+x^L%Al{_9i4H~j@j1uWyiRLf( z&+BXFx10Wh5pTTRbGQT-ma`b~dDj;|<=Ti+r3v?A)JU|UCW&sF#?5PIXF+4>r;|9Z=b-JgZa*m|qmB$w~@!LDiorAHPt^&e6!z~}K`j_=hryeQz zlGT`N71D}YfL?rRwN;*i=~S*N2XXwca;GBc!c$v^NyAudCIj}T>jg2O;%(%uHx^()=yplK~ z1(LJod8*wKDR`kVOGMNLMyxeb-L7AtS^iN2a3?NnqH~H?pFqz_K z;bD_-0kyrN;F+Z{vv=hoN)?&4P%2vl1_Mb5sU$`+3>zto6+D)|j1|3hkk2R&=SnU? z+REPwnpq{6%?g`?+xDcJX1GeRaS3%Dpe(hE1wH+x;?f3k+sRxH0hRn-&yjaMHd;?E z*1ZrDY@9hAqVVz6jarXQ(n2*&GqP}0#}4olHNH@}J9->mF{-FGT$Mqmr>Ybozuei1 zNitjie3UvFBKS%|JIRNHXG=-qfy0>zQl(Ln5YmhKyxM2bQq9b>6Pv!mX|N{IXzy&C zQt0CWF>)$1r}?@hNu(_+9|(EQ)o#WPR@w-1T^VF052lFDqF_t`**DKAKA7pYK`bEU z^Mdg-$BH7`fR0cv4=}PS@@g7mestK$e<{!~_WIXzxJB)2Wgc20L5Q)0U? z7=2G`?f)n}F9^3#vXKMk8Lg!di7ey;hT!?Uxcw#g)A6dwQFiNT&&~9$OFct?OGvZ% zdj4}y3SnO`$8yZrWnFM!WFE2i{Q}gn_j`kX1Wf!YFL!#2DAdAJ&V@BRk1vciw0djY zTtDemrJ=3Oh(bk6`4S@Gsqz_M{wnZ1vN#^t{{b=g89f+v6_4Sh!j1U231#Nl@8B6c z&#`C8x5t%tYuO`721y0FUT)kN<){hJk$p7#qN#YIt7wj_9S$-_lZi-Fc`5i-On83OXE+((}eX~!yy5z{!UOXhWC`ZPgYO8 zXy;GQ(SSacb>JmDb$H+76$aeF27H_=-Hc#6-iT#kQAoT(()a;S*VrwZm!HMk|w@+{nyJ>7>{8S9XK#>KAW%tPml=oKlMQ zh+drXz;6`xNeE}3ZUPD!E$x)MPaq>dukhOx_l=MAuOWqOL1#+_P_FMz|oQO{39Mq3y^#tA@>7%L`1GvFK*hrS;JbXNdF z6vV16h18e~1}FKi{mU>eo-CjF;K)9z^N<CKbBh%{lA6QvyGO{7Zg=VXdMRFv(rRv`dQxgDI268^j~ zKckAx8j*Z#fFqTsHWSEHfu<%?U7u5@)x(_4^A9qd6FU|GPeb2)F~}Ss&^*mL$TpP0 z9-NHule31_O9O=mG8SS&isvo5I&G%fbE(&gTBabXoD@jAL;Tsd9za<$3WEY& zpigm1^qyVj)*X!8GIqd^ri+>ZHm=8UYKjiS40|LtWe;5<7Fem?^iXie^BE!88&hLHDI9n8z<}VJe)1diC%7>e*lQCu?I?7D`=d*w z(A?@hpK-#Vmz2{6%{Sc+7}osh)`;Sw28L-mDq+Sw{>KAduV+JG2)RLMaO~~aYwfaW zlY^!m)5@sm-l-eT`8=6Vl{-xuvEnq=r9b<%zm2K_^oUN`=YSTHG*McI4H3r8v8!+B zQp@pX#Hw&_a=F1dP=J&Z|GD|coV~J#gt;3^YE9I=o0!s)S_M1sH*f}BOxW1(gL0jm zh>5vTn4od}y3r<@LD`PC+d4p%o_|$q9I9qBE$Yvho5#wPr~LzV1o8@RwG(FbI$Aq& z+Hv7987kgZp}NgzTQ($}Rw*NQhi$`68X)!qzK5r$|Na~O{A6l$0MIEst;k-yQf$!0 zIFQoz&;(L_E9lG3y}AC88$RY)dq0#dD>Ze@D{d$6_}2>(C#=zk%i*U;taZ$+2I z&YkDLtL%F=r*^v?tNz7KH`J+@=KBx^8VWkrq?;3%wVYSEFGMwqwS|O)ZijwW`oiB= z^J1JvMSljq50}V$umi^T{XV@SH>fZyllhmdt=7XGg1C_2u9AN=HE?tB2uvtAN|*B- zOl!~63b6#TG6g3gWi~t>IUjxbG*MdLo^_P^L-43tnn@{n|yNf(coznuevJa`t}fqL1NL_d+ILfp`VzJLKTA9xlYy1o7I+3XqnT`ECcOm z1MtF^0`Ot*7T($OnnVlE(s=(q(Nx9umCgPOiyrNQp~>c8(5^E(qaaTzYGaYy#Rh%K ziiTAaFps6tn}j==`wGQgM4@nvbmG(8mjLY%{NW`<0k-;mx*ghD}g6D@jJZ5LQWvCIMl2Fw+@w7jVI|KjQ#yEBU#q@CQc zZQDl2wr!go+qP|X?4)D+PCDw?w$U*r^I_h1o>}XBI6vU5z3SAix(e)av}k>adIFT9 z;BVxx{}TihnjybWbg+;+!915j4Ky(7b=o{9w}vmKSS$HO|0wY(cU6cs6n%k_o>@lv z`Wa*DJ_c@hh$52bApV}DVFXKC-u&0BNYriQf@ehGxZpiC6O{$2^=D(&!M}Vbi1^XbdiI~FnDf*D+_9G&dj7`HZ-n)J6fcQ>hpz3t5~2@eas1o+zAxV4-m$VX z=HVp!d-DDbSq;!jnj*1mnOLz0h-@k{!8UpwJ-+LPfIF0*+xg%6!K1yWL@%E(o-h7E zg5pu;cPL(U5f0v!S;Ln;Dju8)Yo&Rv?m>3<+p+jlSI$sT5?eyL4cPD7#y6qlS@K@Z z-wd3Z5Ow>JZ!7!kNwr)3`!BD%2(O-_6=oFsOEQL@{A*U2aSD&zT^@-NAufe#1gWr_ zeV+5Acg1FV<lil2R(4^Wx>}6tIv2WS$;Qs<&fejmL${#iZ@e4dM(BE*gA*U2m-THY$zB2x zg|BuoJuc`~$fOssYkd~iH!tA0x2I!3(_w4pYL)F_jIGRAM4H^PDCp?LfUwy)39x%! zCxMMRN_ZlnzKu{H+D(m%h*17<*X!WOvu%{AP!*qAy{A7ynzj+()(D)Uw?~?aS*lWzV6xb`@FN#=u862M^Ixq=IDO)f`_*0RV+#s3F0Z59h8$ zyUiVBi^#+oyp5}`Sq%6fGn@kESN+omV{c4>?Bb|a@&;TBK2w{IWR!po_Wo3L$c0Ki zNP+y~#0CSVWO)C2?)zXvE)M}qMbD zX2o7giyUZ>=!3dNW;p~+JCQv@p0U^plDP@s0`O6BxXfoMp#pt6V}8r6s>^2EaZhN5w}$8&S-}Us%Q9 z&H!2p^|e6J8A-l&viL59dVNy(>2kKK>jZo=g(YoxEhkCmS#DU4Xq3Xgr*4?2Lp3c& z22P}>6XcuSNrS&%cotc6rjzWuPI|09V@?0q#tC!7KIWUrqicii6NKzt%&T8EbWKhn7aNwuu|RE%^pAL zM4?{~?uh9YQ?jMbEn4VFptlJRRo%T2KV`Y9XXe0TRutNo$bUvpCm4FWW{``O$aA^hIv_;xy++1Q zl*`F9k+{pMG1aYso{4TtzGmn?094iyW;)6g9mnDd5K%HwXBA{tLLa4MJ>+m12GyHL zSur6#ZzTi-?{D(^euM|mU~Enae`-KnFxz><3%m;C2rk~6$ z=<<1W3>`I$loxF6HqTt|XjK-Xi_vC*qc9pL$AZ#lMvH%nqpyhp-^brd{ydEcoF|Lt znsF3tVw-8>npSyq3_>eIT&%LnUM*&l()lW3fzV91LOiw)&ND~;UScl1pjt2;T))@d zh+r$L>OUbZQ(16iKa$gPVe?<`=`}4@!6l}J>9V+6x&1`__Y4#3kqf%76b|5B-aWjc z@qc)Dd!G9j@qBYfQ|EzI!PC@$ABg}#-R`3+1qmrg?IKD4x|65FY_lIYeQ#FU*Tg#~ zmBURQqMj<|9A-T1$g}JDMI1ULMO!IicU{)6Eg+Vi;)kcrHrK%lYM{x-baKwF_U_VR zIKSoX;}Ua!8_F!_TOf|XAuc9hN8_VcQ@|I~GIDaKCz1g_;f*V#5=?9yMfwd?%`KGUjr>-vPQxrxE49#dDT{2N2p zJUCqv1zEBw+a-6N4Z<1{8!bx>lq`Uf{uiieYAr?b6!8<{i6ttcQpN?jMo(#~=tSP; z`$>M`MBAa|w1;3HeREGQ90LD;8d7~9uNttisee7PW1riU`X-saJ)rN-<4Fea9BlNQ ze*>%j7ccLWzK5KGCreE9G!I_bun_7xN}R*^=I4>a71jWfNlXbyEucd}FOp(fr8X$H zaR;Lv24C)CL?&FhdsQVEw0RJB;TUnHKCi3!r-O%6H_ zY;=(e#l!r`xTmCj@wu8o-Ly>P0 zXkaVge4Cq_E^W-9h8Io>wvp0;2=WoGYpBG3nm3u~qc=qK0r%G)=_phFPZTIQX;_2A zOTLU`hj2;_ZGe1;`pQsN%!=Udh-G@zdb1!lQnC0qZg{kR$lZNM1p_#77}!`aUesnRfujaZhQ zjf70Skw@hun8lu>)8V)65#xmFa)#{qI}V8@+?(IT4KB`qdx*&D(|EFt zO3ePu5{ijAz;I$ZDM7ioWNk$dB)B&JFdJSqZ*8b?vFr}T?IxW z)@1~GAE(u*?YVXY9r5ctCB3sDGtWQ9KJf#NCx^QuGS?y>xdkbwUXz5pCu*p}*`k9} zHH8)UQ4 zRC5Csqp$F(=z|~bgq{vBXWu8}Fy!~FH2+>GFW@)I_Vzq$@qo}pl$dk>v){Oq$sj1z z%cJgiDqy2M;{^sx9W&`t+asn1znQ3h0=;Bm&(@-d#3q@2foa9XvkyPN|VKAKV zO9}b5cd#KTw<4L#{$P&@1T$8Hd!=bJCgc+tit6ab#{oL33xJ#X;_MYBK{4JD*Zd#Q zL>GoTZ;FEb)zy~mYgfui5Sx@By563^t`$#8MyaHMqWduaM@N$)wag~5C#ol_em8!s zm@-grITKl_G2Gxvk^^tWlvRw!$dM3gEbEU&ezZF96*N$+y+WVjRnchsJuzsMA{y? z?GjUSUG^dyVO?Cm1?n59-C2f`W~^G}3}IuB&4m;dD)9lGO9ew+pys8*{%b=qM6{TD zUR7aiJXFAC#UKsquW=y)Bb*5!hU584{|fKuT1KztKk`m}Gdc{VpZ&jG>l$C}`>?Ya zlEQFRf6!JlpA>l(g#lhQtE{xf zi13CWLYVN`nA-T#44KL9Gi(jj?i$TcJ)=I1Acp-n<2cyAuu2rhc4*nn;6&09@y{59 zbYNqM7l~oOqd>tgf3<~+LS!IRIG?Fs(|4m~6D0PWPi=3&-0oA{a2FJMeZFNeH{3TQ zI?{uNNa7%9tt5&;#7#rVWUR zJIm^vj!RS0glu^H&s55%O*1n4vCB|IRZV#al+nJSt9dRfDx!7aI-+CSkUY?Qu@l^C zy{NN^Ld{JkJK^Ig>A>$Y0it)%_m)r3wA(R9yDnjvd6E^^kB^>OUAfd#X%wl1!-!-(48s_*{KBDs9rRSVW4#Mlxb3 zh@#pmTftV_ac)rmYR3hL1gD|uz_L!Z7ckvg1t1C}|D71Td=VvQgr2>w`iIkZw$QE~ zyjrA&(-KlqLrjugTx1blPK)a*Fy0Q2r6i?u z(0vA|kf0Z7sXXKb5o#+gfr!j;e0;E0ru_YgfLpBGr@QKaXNY*i&0rRND%*_5Xg}pi z0mb8~bZ&_6b-1#AF^fnl#M`=cM7PbsGz^0Pr@>oK5#g%hD)Gqtpqs-U%4$WGKRDPd z^?r?0vOJVQ9*wC_Y2)B^n?qZQ!^a4u+fbz_LG>urL28GR`+vm3BOGWi5Pc|PCr@UR z5R}(pcKlR#ryL_py!$JO{`D?2+yxtyIJQtlFOs6fu_R6h){MdSa;9>SYGds%`vL~5 z)%H(6VTt&>!V(JnBB|Ety4!uDhx`+&D#dXcK>r$C6mPVGuZdig+Kj!mRw_JqXAV2U zRgWBwulTJ_`7|DJCd%W`B(*aIE=y-!qMuSps@%*i?9msYjaH4SaZua9A$ssxS`$&k zuuZh>a$M7kNQ;ZG!)1m?8bfm-1f^l2 znd7KKM9YU&RcUKS^&R|A7C?RsO8iBt9OXk)sN3V_DaeIS4gm{@SnrtdXxx0RLo3r) zDSzt6S#{s%H!o)RpQ`EAq5i)ublvmL*N}pQ7k;J-UjOoPW5|CnT=eUZ`zf-+@-eQs zqKJmLJ}SJ4 zy|qnrZzTn$v&r`PLL|{ga_bz0tP%y?qtXgQM)7U}*TSSSkcU};qKgoY*-^xO-ER;= z_MxDv_yfN8wQpz}#{3?e)1M*|5=xq`4>6zXsdrIFSAIWq*|n(H?wH=$ijrE^(I8m| zBT*-Tj%-m$_Hioy+nK^r>V-y0FNHxsfI_&PlX+o%;1|a_CP&(6SC_W#py2RkFGO~y z*nDb5HL9w5F#_l}>V>IQ8Ux#=YY;Y*+=W?bhq{_}c+V7oq68tLUc_kQpV(P!1`zF+ zvxFrBIK+Fuao%}TA3j#S32+2L4Va?n7nCVrjcY-=9&99^yb&# zMKBS@TQ96^MU!jkB}zhE!s-e5y!E-k7A$bcpn)OJh_YC#HAs7L+C+l|q%tAXz;Afr zDc|?7u&RrAvuB$r4IB!4>^HE`k}p>h6x6Gl43)BJkF<;I=e}i=yKU9kD7hV?osT1$1(GJ=g!j4t?KWesAzp zD)(%c#!UYT%q&4ojiXM`KTHrO=4H@)7y*8HlZ0Jnl$ZO~=&lphik(UhLZ{%&x+<+GCW2l5D4V==CE7JBwuO z{a*Mo$wGd=IC+I=?ep$LVkYn2;Ev!7c5}D?zWyg}OBX}-C8U1az||*0Z?{)m&OX6< z=^OzI*J=RlZ#h#TfoN8VR5u7TGcmJNS28@l&Z;4Ck08qk+iveW{QCUv;rsLQn)vmf zKdd6VEAUlP0`o*yupILQ&bS-!eil897yJev&R9PeIV>ws)f!2Y%PTT|7lWrFX{IUc zvgT==SACOF(Y)EhmWb^!RTrVRZJ!{+{2mIJ@i}FW#K728<=w8JFlKPi#+^-B*_xfy zunxLj`$bHI0bELECFr-b)!KNPHFl8!TojxGSbw!ngxuby6+dP^bRJNjxD=gx=mDiIGCY zirJY8ogEs8$YAeP=b9U3!lE=cJ?7Y06a4FShTF6LPdn>8K+Mk;u%9p|a%M;h(R=Fa zZ#X>q#>(u!?!(ka=l`&$YDnOYU7$y@MS$Y{YR+vDI2K5Qct?4|_NSN2A9SgdOIO&u z8!mQ$|L3HxQKxbc__;#D!b@H$8lSZz*AbztS3KNZ_J9i|1X$-lU;+0e3C)}dh|ZPP z6X0hm{7!m>Nt94O#vJy7F%AVti_*BNkO((pD;|wr>rD_OevV8S|2PTf>|V}yE)-KG3bgpye{eH}0D!W$yQHSiB`FYv9z;y+iiG~g~H|aq0YB|60tsmdjU@No7 zm6;-E!|I>#DL?2?cL-B+YZTaNOARS1)>Y?)N($d02$V>Ii|T2?V=^N1$v2)wBu#JX!N z{GRx9&@LZp#A@^;$o<1f)%1?`EaG!Z_t8>Xk6IUB>q&AA2pO{&NeY`pE=qdTBDC)T zZoETbrK&CAM3G|+4^1X^poO8*4~E0h8y8uspo=-%vX~0F@1Y;5Gl}77_{Lo4X$jiq&55L1o1J|abel5ruX$e3UaB2tAu#nEMq@~fe zG|BvNCvS@6DY1cTG2h$ySpbOe@M&IivcWnK3rovQrjfzSBFOjZoA2h$pK15w+T)VL z-?0y^DDdttP+GRUKq8#U=U5?#{FCE~i2S0|C==#;2m}CS8v?7E@z=X%Iez)IUrTOm zZS4esSVU{xKvY$wy(BK{qaTV6%ROkdby|eZzMFrNacvOIJpY^BpQKXUHRv7eIAM-Y z^-5GyOA1+n8Fx#ucp26vNlbUyO2vwKscs@i_?PfvU<@9Kf-TJemPM}smV#+!m=@U& zO(BA<9L9Swkq<=JvMpXUgE1a5lPL9#4H{ZaqHPqa)sxp263EHUT`hG*-F>izGATB9T0|8kfOfe!q?G0XzbDN-xuaU{Z&*b8$L_^Tqd+Z z5t=u%y}xn&jj>hQqg~^VT{`@l*~$^rPR?64K4#2%Nni8HF(|Vfp#^|LN0XZWO&|Ik zaDS%yiuwe*XgGw1WLP&|MWm5}Fjpy+GAZaOx3uZ(YJD=W*c!QI_`z0P1{Ek22wC`U zVpG+b{&ct5{bbSW((9sdaQp|`zO+tPI!+@&du(X582zE9SiAPAVQW32KA*&ni9(kK zxG$lSJzFxn;vHiYM?0KL&AGzJc;72?FyQ^$#H7r5(b1k}Ihv#^^tc}9;@PY();BRK_6EG@~mC$6v+^n#SAm{jRpLxbwFcx=lcwr1`# z8cE1uma>n)_;Mft$6vkWs>YOkgmhq2ft9!yhy3PKckh(Rm7>fpF>auYFw(-oPsG!# zX!*WRz9e+~iC|B}*Y=BomndXG*snD$F)~*R6H@Rx@rHl}CCdUwHDTC%1Lu+W26wlw z5OANy);EcQk<#O~aJY4FS{cn9V7iUpa64U9sLM7u<--P%xF(OpV}M}BIT&SQ$nitE zu=N+P>**Pq!ss&*xeGB;DCJ9)i|`3!K$IRd6cG*R*@m+Khn{mt8d_YTu|{UCoCQx~ z6}*U9A;FtN)id7F;?o<8&y;=G^F1g?L>WSi#vFaCqtVdQqLhCJPt$k{P#%qwibVw_ z2M8ZiHo=O{^hr=ZN9_dPF@m?#2I!7*BFNz2?pHZyQxSthf|aopU50|6t@~#ts8WO> zV`~hKT}m0m0B=X1ZBOahtQR`EGl|6gL?KSGd0FuF6q$!EtpF z-*cNgflXc8$0%~@>eXbNIBbh-ZHwlB*HFRLAQE`!y-c@mPw!Afbe_XJ$kF-7VvIybPY-pel zG|*i@9j*GnJOnEBjsyb*xUb?xx1%yI=hF*(I&^k-8iakfiqAcm-HT``%VA4JTD3JS z(RKTVqnl`&_2>uw^jkDAl7$7~A2llO_bM1NOHUTfz;H&pY3b{3Ma0QOim4wZw(<*B zQ59yB3#&uy^V?W9ZnHC^<8HEZZLN>hju1{i#^0Fugw0=4FnVaNxa;b9v@^xxuk8?Ll0Ln((C=>Osx z3Cr#q+3Ks7T_hwon*X(;;LsC~@z7Zv!;qR$ZmOQ>waL{%TbqQV>{Ckh%vu7!jZeQ%6GJG&dZTYus#XsP`A4XLn9$hQHa1M=;-!sCBmu0T z!K6!LCN2F1JS3+&|7M_+m64=51ZITFGsm zcAZoL3+;u*DpI;*GbLyPP0FpB3>ucvw*|w#qq2^pY}^00czC{C?oR_gx_}GSS5_5g z=)37F`V3*ps*I!e6$A9FB0vdBUefWaFWAwP9rkHUNPs2YVSKJB+2d5;Bx*j~pDR13o>MW7@=7h-iC8y!og#FzAND;nw|_ z1jd5HJUe?@#*TqjI_F`xNNkj8a=ryma4Rvep~nLTmyk3O0ltU(yR6AZUYM9OBL45X z+u5`$>U*x82r)B*7>Q+*t8WuQNU&#_nE|zU*gZIl5f0QyqG&lY0vi%{F6E zU1g%AdsXSUsO5D>t2{V42I6 zUx*qC@p6AV2YWqoqKEZd3*1;0hB)NcBY0obH^2TGWg zC`?CwtQ+Wj9Ds)%sm2}Ag{pZi``2#w zUQ|b)KAAo9V^wl4`#fouYApXcNh+$a)g1Y}PB^IOIL`i;Re_RYmS{JP?{=g9c&RCfqGCK=}~ zi+Ux!U!(m(tTI65ZT-WkEavD+a`PG88+OW@*&Bv{rq&AVsVf}n82rS zhU(~(s0zYkHG5z*`OcF4fqy@&8q94(l9!uU(1=L;* z4z*}T#oV@y@x1Nc5E;VUmNEEN-D_<#zr_jb`&yxa?#i#Q+JA(UFv=3?M%GEY)=awQ zrEyw5hrhzKlT0RIid;V8QTOc?J^`OXYR@_?oe9l31_ZQOBu3<;W z+5R_kwRcBI>8Jm1ND|B3l*92$u#LA-&D$MY2e4k@G=%)VC-Ci$<$!p_?Jsc2cw_Yz z@K;8>-0oYD`tFxZ7fwdS0#i|txI|jhx(G{?a!n`JT_s2op)87tU z3?(Y$3|?#kISRs!U*e0yz!-!*7JiT)xMfpIBBE5BZj&0K%vN_*GLa^JY{+P3t{}1x z4kYkgwnAMsFrIPQwf)3ti)DXFB6WLu7T+z#g_%Vv&|@&CiD|TpYD7R*vVgU&L zTehI*Kc?S?Rja5OR2Y{eu=DZy3*o;DV!PL)%h{B)am4@}u2}Xp#W}8#q8uF|M+d$T z;Z_4>T{gQ5FFx`Za@HG3<4O1I+hT&-r2g-@>&Q{1QP|Mz zIhivy2e~i5d}pq?9)wUtvQoE(T2@NhuyIT?pUzw?{`7Au)Iq9*V@7$clGAN;kWBe( zIg|c2@KMy8>7XH-q@y_S%lqzKh4(@CLtz3{aN@hQ17guxh(UN_Hyk+UL;gI~nf^== zGZS$nbr`N3l-jie_z-STICT=SnRENm;$$Q1F)gelY>b(#e4h4Z@OT$WK)oVZ#$JO2 z_BgpnSR`#^4@L?mJS$xg=xQ6aRz`nczf41D$v+b8BP3;MqEoqy8u+oieLG%qhIO85 zEf-cTP>e&xC0|%lzWhF}{cC+a0ZA;6iYm;6pLpHQ%lil8g>_-@C8fHu_KVWes1?89 zK(9MOQ6yXUfIKOD3Bm~x30Hf&wYpqie=WIuoTDFKAhvuygVX4q5ETODL+dl%Ga1wV zppyiH_h)7!C@;WDJ3-Rh0;^o6Unoqa5oh>}xd9W%p*iJ~H&0M; z(}TzT%W$7^Sy+5gU%4WN0ar=wk}=Klk^9K@%JUd>Or#q^uk#hOr+FdS*TROuE*T|XrIG|CnAm~djLeeiP=jL-iK z;r+1S17gW5Xx)Q|_Evf58FE#fm|5M29O9ybp3njxi8r?DK(RSYt;#_gLNb3{2NQ55nLOtKxI{CLuK(Yh6}~#J`}S^`4)QFksi~mMtR0zIEIKw)KPysoC^pN48^R~tm2DN)@V@2A%Rb*u$}R5!h~#; zh&=|3n4YhzaJd44Lv?Y^lZh1fL2A~yK}jc${PB-SPQI?OnnXR(K1f&l)0bUZwaYb1 zg^;mU9L?qp|0Y9tNX{XC(9Y>TVo%={6<*LH`FJ~k0j4%J=+S6m^Dj1Yb*S&!8%8W} zTm4AQq=H&c-B%6?d)Oi;qiYB6ibeE5W5}S|m@70f7ggxHo}Hpm7pP;5Y0Ak4HBA1E z6xWD{>t`w37uodh=I^h?Rdn3GxcneX*)G6K{ojJow$9N1V-Ou%b@t)Ie+9uaG&=F&8HvwOY0l^;IUcnzDfE?WRw?}@y*P(4w zCT9S6d{?}54AIAW-!N~$tL6&7;E;Oaj}4D4-`Ehzwnq5=i0uy&D8w z7b&{%mmIO!qz04%*yv>Xg%(Qd!ygEd+7){#UWdCN;4q}a*?{QQ-i{*6Fg#D_{@di zjLGD;K};)gIr`kk7kEf9ze^F+6L!-9%6OnR9(T@2O&y(ahyUY_Kn@p!GC5_JyKj&k zZaNbrq-6Mq`NIN=;*OsJpcF|PN@&Ko33FA`PVn#?GD~2kq-zE5aNtLPl)XPrD=W21 zG9xJ)PAwlYiOD~4g7Y&b2el;Ci`Fu2BBn|%oMpQ*v4r5vU;FxK($Rj{5Kd4CP?W`C zAydsOW1f(2^fRW2dz70|Qz5V>tkuIBG-uUKXzL^pQ1;0R2B%Pd?LToHi4Kk48+QF@TRD(2ko-m1W)vhSbi z8c%o{e*6d?k4n>=3f-{?_|oLxv&a^gd61!W_7yk-5RnXzwuhOJ=N;$|-Bgt7)=1CK zLkYtUy}%X03db+uxRQ_12{cc`fnmr|cGx={V^!d6K9F?Ah?V2r60l7Wbf+*hj%FzT zL8x5}iWHA{bEerFd#|W)yYB>AxM6BG|Xdsb71kEUYk86q#^ z*}J*BY7s%TPPq9A>UfU5>qvxm+)(ln3mJ$3Z?qQ8}kIDx`bs0p(rq?>&UE!;5?QG?7^xnPFc{;X?o<<0! z`!VsTtC{6Ph1Vn(n?*1uoTpMB{wWT`cmI2-*l+*6w~!Y$3|soDTl2jE!M|aUw{voU zNJn9}*smcAuInTrq*ffO9wE1W8+29={0 zI=0w*bIM%eH^hU|aFUqV;}3tzMXIHcHgY^5>k0Syp9pkNnZ$29@_iw7>?VKlIWcgF zl?RZKU^_B8Q4=cAE3vB_h0*xYjk4_8L1g|JX<%NZaRvq*rIdQoGQY^W>FSdlPg>!l zNc@EDL&Mh4TXxso>F0nqY)^TZb)ppLKC7?-v|243}fS}@>P_v?ysu4Q1IXJQSrKCRPAoga8HI_k^8z-TwDSooAjYt z3C;wKHdRpS%!tdsY)jqeh{m6ibM8Y7`Q#0fL%x4c*K+EHtWEhJiof&z+ zrGv=K2?cd??h^W#EX)9KcUKQ<>wP!h@V?~G(Fad-6(Idn%!2HeO$w;7D6q8ZU9^mxPA>GD0#^oDE>;C`J2dYh=+;L~PN%jZ>d ziN{UNetv3q$z&N}CqWI#R|FLJs z)*Ew79}ZYi$X;Smm!^iqzYoka5WG}XF%hwFA}PO~Jkq->KhXQm>PMDvWGF`wY;CaAHO z&ChzprbO{RjQHvY{bN3mU>?^_%eNdFhMv;)w>HnmLqX^It|H|hkQ8oNS^W^q%Yx@T zHk4%qNvNCLn7h(9X$oWBs$2Ap7bsNxE1b)YNBXqaY(O&$w1*Tx{GTiU%%Ba?$dVv8 z$wT}-K<-MJ$p)VYpP74LrxVC?EJNMdQRhAAYLLo-Oo0^|U}T;fs|ECXk#*Sh5qxFL z&+`xe{$8-_H@ZR&@8G9X{=q|pF&ST=QZkqoC@+dn-#wMl^Hg`B%o;hX6ftvWtdu2O z+)*bjtHa?F)iwltVg4Bq_)Pe^O>la;fJMsixbgJEa?5dQH@NpYp$lW` z&NaWL0tN_;$>k=6fiL^U8>Yd2;v!fe2vaqYebTqdZj5rh`iCaKS}UHnizu3 z#1dh{Mb^N=1m?ZScXIp-f^W2==8DdF49;1=GhIi{;PBc!J-@K~KK}i<`6XH3f5FM+ zLt1Eu*g~T*u8&SdbtY1tvJQuq{Y%{dT^4*eJny1UEVjvBeat1JbETmBoB2(gG=G76!#{KfiDT#ql291WViA1Nl>Zh{@(BDe*sm zVZ|x{p_^JCU8By1u2f%S1H3ih&w73od|O@PB?HP;XG-&ShI_UeYrYLGHVw3qy}&K-J+@*WW*(sc0@19H-_4oep9UC8ddkK zSW`mi?QO0m|EesyOJH;riLFx1hyaQaVGKu-oc7?~Q+hFy1Z$*mv)U!#Qfg|ysgFFC z=lO`pJ?n$s%>TI~;aSAr`+)eD5*f$jd;i3pQ&B)V)c4r+L+7qMK~pDTj0pZCKq4o! zay}0P&~yo!1oGx1HOWzI|AKJo6=_AdF?QLueQVf>ErK-2eL6|00;l&)rMEbxfr~CL^le<%9IX7@)-wedLrD278>ChOdD5{eg0^78M5f zlH9BwjDjq5=>Uyh2SnP8{oH$qwDUqm3r3NJXnrzD*2$HPXSKEXog zkNL*gsyc&PK<=3UW<&IU4$tt>MNaQ@xkT1Ul;X!wvN6kx;h5($Une#lDKz9usBxIx z4+iFhdY~cWz+mI0WQ>fCnSexvPcOqbkwBSCB#ZQ%O=?+$9V~)afefxLjdMT1AL)Qv z%j1sY$t^uesgwd&V)a+FkFB#0(#iRuRwKnTa5_C$sPR4R)UHHiFcimx`8&lBwW327 z;i2e)R)&@msX%OonKii#hf)Km@)I_8q_)H%H~VewbGXS1 zp``=`M<{4wBdaFpRQnz~ag*FfFgDZ(-uvghRO?@`q><_1qZU!wGsq@KZoR$QfIE9W zIGC^ZzAvJieb=BL3EueG5LL&PRvHLA4vxmC`DpQ-Z4z$2p;8AU#e>t@)5#|;8k05s zJIr*e(2j0V>Wc96ll3>$Y4~s@Sy)mK7qQ(~$^hi<0gu zKl)VC>-+|L;xev0P# z9dAgzn7E0H&|R*0l=e~{q9^#1AF+*v8j>n@Ls7v!8LBV~_8mw?!vl0(8}r7rH%sLp z9`OwrMCOswsGdBlEDuQ_;o*|FRKyfW!IzVhXgA~hzKM}RJ@wZ!rg_urRY1(}OM9G? z+TnH)3||(A&`Y^1mO1eoc28^IK5=Trx?L?EiCHjNenO?cLi3(LDSAV}<3_5cT*T4W zrI{f$BVm38UzS+Vmk=ki7fI<^oBFkjTb}VbhZ&!0n>IF>jM`u!%V~?%P&eA7ZTjy5 zU$ZNI(ZIiPaO+dxF&?QQz%g*~6L)ekSslwwe;|2nGCB_{|F^B-b$;ig2Mq4OZtA=28o3o(9-#o5N>n$+PlEFetvGDdm? z+=7qn^TZ8$@i7J1pF}ePwkQ-={|B>G8rf*+SaNOsx`}Zg$T}49=qxSXz1P!%|9{=` z-*2yJHD5dMu?4@Jd%w$H;t5aHi9xrM>T$6`CzFJ=nOlyDS1T8cf|C~SIk_x;UDD~w z;8!U-Jy2Q%1Wh9_FG6kL zj&@{(?@OAu??5WgV)93A*5hr#=^v0#}HbR2be*I zVx^5TgSeDzTz~k=cln~C_wSeVz>-*yMZ7@j#PW&)EaQ@qiw%VSUgxlr@UVu=k>Ivw z-*HDSMpd&XuHRo{qL!S%78*BoBnFD5r!G&IM!x49xzlpd1o@X{O99TB9QcEFW%^-+ zElYj$4SiISL2(1+kSw^-$eMqnSF)@;PNnb+0rUkg#~#^e?`HNXsXFDhrCt{&p8V<(R{DTZoK2t4CnP<_Kuw= z5bYOy>=NzHeBEvDoJy9rcL^A;ld9OWoLcLEQqmv=vEn5sGTikT2xN-MADT5lU}hSz z6o)H0lcitaS)$;^IDBtwzo$0rz_X|@DT&dgdV8Ve;=1^QXL)4_NctH)e zh^897>qYj9c|k@OJ6|Oxb|gJ=ZvCjGR+cFfPX+NIB_&-KjOq-&DD)*E&HZNMCa>7!Di z#uJd*Q26MqvB}jq6E#8_c}I*2)|Wc-o$9!~P1qXS{v6rfIrEAr>EUg6dlPs>bQtP{ zEUQD&4-?v*&7C0y-AZm$dh{xP^J?@*6_+um;0@GSvcA-uc6aP&uf(BM z<`UEB1AmZc(I_P^(t`)=S$}?Scp0cyXkbL>uKxTN-3QyPzGq0y{*XK!au87!VjTgoeMxJ$mMd#IfpGNkK5h)>|V9^fS)kAIf02 zmSV;^g6tr}GV4K+@@l7|PPexp4m^|*(=bV&BCBX@XFU1x#kRnnHtD@`5m$LR$0QgS$FAFi}w>>ge;wd#@M5G z=+Q3Xf`22rA8rg2(Z|E)LY;17%fgP4lu?lX7;)RiFR=Euy;y-C@b4`@4H{f#ZEf$p zYK*tQFMI=!{yU2&&K@`Rclly5&S-d&52uB|ZB63JBF;=(nd1al7sXRo3!a&5__!SK z0^F!sJSFlTb-Vu>N>VF3%2 z6QZl8RT3#=3lU3WJ81&R(VcanX;8Ow6(c3iJ1g^@q4gvQuuNVXF#@NCgrmEB|8eMh z-Z2@8E2eMfRoLM9;(fu4A<+`sr5g0rnGghn4 z8z0m^TeoaII*xo%wOQh|6X%T5sYk|K@wSLb8u&3!0JW+5gN5CU&uW~7S2@BBd9`lEz;Qhp`*Iv@Cd#ycH1fN5SJ+rzVsl(m48J5fSQ3_WDyJOo{P`sU zz4=+;sAYk`81MEQ=+;NhbqVjfaSzv^ZGCT8NC>YMeZt)QJLx}6T@RX|fzlMyV9BGP zi%R7*NA3C_?l%B52$dWi!l%W~(Zjz*UC3|bjjE)qg%Ve~z|X?-lyK!SVpBa$!a5rz z1{+|MWA;~FpevS6zC&t)ks8He?y}nfyH=#oKba=G2Yl90McV)3>KxoM0lRLWZQHhO zyPj-ovTfU(Cf8)wlid?1+pd{xPR;3@>wC{R-}gt{`&#$jzqM8p&2TMeBVyww%D2sy zWvsUZsZL@bE=Tr>!$S`$W|f>TjsvT7ve8pjT5&oE$VjM1+sOl`EaQ?I+gORft6yC; zqtxNJ3Nxz%J{u6Nn8)X>05Ka`$*ny!9b4A!hkSgVMh_J@0^BV*7u;!~ll=yOPff`} zaotC61f5AoL+ecq+7(LTU`&2D`&gH-u56CcglT);f&%CzkHXT7>(m4Slk^*&=9#h` z+Qz)BBQGooWdTELnqh~DJD434qoTc5nD4#&EJaX@_gpQIAJc+a)s8*^B`-=F#u<3z zewFKax`qq4Yf!Vk^w?)s%;IM(lsQSsscyY5`sn^1Wn~Me$%6_(aP)#^#2%8_^pxfqu!1;DV}M`qV&%nu7Rh5hQj=kWZHYS zu^GY}DRY|Ko7BX_D;Q{Yn_VIgz?2q{BLzf>mV3lEF~ zeFUcz6T{SCURAava~8sqN6$92!v@KyKg#sc)j5MJ9$l!G5gT8RXIMkv`R0qs=S4$P zC2?GC3Zoqi1+15FwIyH`y)SMnUmmEI3f_^B6ocdZ`ht&U<$V3iiHM8k;43H#P60io zUUD`B7WPMag>OB-b4YDT$5eX?u3gq}{)%=i8sj=1G@Fg;g*|a@$rV~QVcYEj)j|n| zoQ78TYghv;7!@+nu%<8o;%6ig(}*d71TS6(o!D)gZ_cupZ1>-2zQ|g9JcO$%E3MpI zdQ~h!Jo(hxh+I4)x=u4qGqJsx8mzxGjd=ngkcGT&bWoD~*1&CYw&($8Mlh6`L>STs z0JLi$c$Ja59;Zg+YD6m?7|(1mm3x>RB26V1-H)VDrl!d%G#w`J{4o8Cxnv1|PtKox zTd?}D-!h@@2<|G>@+Bb#c9~SCi}W`n#VN%}Kef=5R|LK9MxmUE&Z;Hy8Oq{99O2JB z6&P;hjxnlHnw%&Uf7(WaIj#tz`N-o?9F3{?)TUoq{Qaz1mNbxzKz8U zk*U(wq{$Z4s>&UUiFLNz?0g%I-~R}C!l`_V2oZ;vnP9PZ_YCDF5gq6iZ09e^3FC|r zveiyQ9>kHWE~5upN+}^zu*_hfJ!YiPlvEy&nWsl{OOgeEq-u#ic>6vL#*KywKWj+$ zy!-DRdp^r8fNYbt=*=E66+IR;OlxtoERa(wa^xsJ*6}Q?lc7M_d?Ecw7P6Z*b|n1; zU(=L6ii3{fkFl>+W8&(~VR8XwU29XXl04@P1FgP;9V4}Uq)JeNm?g`e) zw2VlNzbUFF4&xbZ__H^zK1=ash>rlk2}19^>FkMn3*N)YDa^9F5vE1D ziFi552K%h~S(S@PqR4H*X+QE~%&M3*eB~A~I9(H4G~URJuiioHW6Wrq#TRRU_(K2G zPseyW3+aPG1TURIz}ho4bmd~32E>otIR&>iiiISS0uF76s@WJOa*psA!;Km{fzN{F zn5M}N-G?<|u)IJro-9tH5f%A*P+fdSHN;HdfZh&ZhOb6Q7aBa?uLFlJc09tS1fR+O zJ9_sBi^m#zsElYS6Cy!?4ea^gc7X-9)R}+TBJmj=(5ZH}bnIsFotE0+ai@+77XBM< z$9&j||0S;N88w65xLUk?7mVO^dq`Sa*YVZGK%Z!P@NFLDSk-N z^Ll*O=dZepzo zGQ6&bHjB)Cl62Ti=?QhbW`3AERT9=JRsRCrXQgGr5-xN!{%rm_GL;1seVi>iblUJg zFd9@a`g)m9k{f|?>=~Nr2qw4~p)f)}$f-S` zx}J%@|1>jizFRQ5sKdt+cd)>L-pl@zjn|_nUl86^MGP4s5jA?(%{5lTQP`43AW@05 z&MdG4i6a|wTU0yeJD`xqMK_VdgmoSOe`p;xRf4+EsiI8>1%(Me4-`bX`j_t<1WE6) z)vK~c9lG7j8)u-%ud|}$W@QSqri{h;*3}$R!IPXnq7qxdsJGOux9!gHZvqunq-L=xxAMRs5+kp0JoYJ4)LH6;mM8) zKsef*^_Rw2CP2q;I-hG3eAr|*Z?{;WcqYcgD;xiP7$qM+zys)Qb*_m^kW0XT=&uwK z#3Zg=^|>6Z>2UYRTXrK7Gg{^gzJ(AC`WM(5IKl(vU||U{{H@VHlfn30*h;OD7%z1i zB@pofXwcKqH2`}Zl<>th!e!$UYLoZe?kV>??SUsH#q_K}f+W&cBI{kbc*h}DW_k>CYnNzN^eI=mAoVFwSjySdg5f#!mOWV&&0jUVAWlrTHbofchE9){gr+ zSSTc+Jme03K?PEAa?M!?1Z&DvftLMzQwVh9SSE>5m8r?G4ui?+EVjD#XUzZa9_#1N&_x2yxZ>c!X&AM zmO9c2FqilKS&-+swWE)#?CgvmhSNu&|KE@@{aB;k&kiL6Zi;BePR$-vqHqu;aTwqC z+(H6LcPxkcQo(OyZ4`gHFolLqI>6wHRrLmCk@EB?T!0{`J{icNx z?2GQOEc^vkV>nngv!@X+HcI~|O`o4jNQLU`tpj5xZu}#Je+{=_RQmT&u?($am!9_I zSrK8(U@M;sEHb(x7;pfyUh+st)`jB=@;XM#L}o>1C82Js0(IaT#Ss1D6ntB@@_kq zw%(z6P%^pks}TLmA{0ibvm`-TiCT zfay9QBv%3liQ;QfKF~;@<|vz&Jkcd5{ViiTEPqsE0zbXqbUU{M1#cqnt}E@t-q7D@ zgI$$OyXos8ii@=khK(zTdH5nDc@TmR!HaR9@w4D)V%#~vPF0qz<1FklYcr@}D9I5c zG@rJmBzVnC`tZ%f(M`seNyUX;YeVv@k0!3tmw|CEeo*eTVWR6s})uzAKx-?)$q({^|JgTq{xKF_d6R zzYdD2Lc)Ztvdl46Yn-cG=>O(~Mn^*X`8mqvq-BeA%j?k|TD1SWE{}PzG;7-Ui4E(_%}z9_nR6#B z2CN|vo_l@Gqkwr(;>ICe%8qX_7Qtpwc#KBqiykrvxcQ{*=(*sdv~czf8A~<>9m+m` z9m*lSB1PMA_K zW|6yXHvHI<2js%!zUN`9f5uf9Epp1tVr&yWq!u{ zPi^UQ*U`07(4MO3mo_|Q-XMWH6&&zd<7Es&dy=I|u#`TM9F_hJAm#FT$&~$l?jCS= zfEo0e`CQvE4x%go7l^i^9_)HJ$=-Sn_mdRVrnL+TXbvU2`Ne~M<_WnJ1rHA4T_w2H zXykp6^a}{Ff%0;`F z`-1wkSm4$gO9u`KF8q~^?x70!D5a$SARX7wF;#MbJI-sgH%mXrEDMAZ zhcB34{GE~1z1sulAxEUqCn4928#F2{Q+X8Uzws?Ou-auz*8U_`9Df%38G}&#Fyg+`NzZ`Yl$SdyO4FgL_2s zn9VxIvd>tuFB%`bIQ3i(azLcgd{UnQs1d#(bLSWL2=}0;*e5@6N8fiM?W;+W%zW~o zl|d5>(cTjY6NTt&8cpN}eQqnPq3gYZL|EV+HTY3U6^4LMr4e~gU$PcH_X9OFRGb1L zMyQOmyJtjSta{ypz6A90F}x*q0sDppgET8q&l4 zSI4d3fRN$RKuxXWjrcmGR94G-ieyGATxJObEt;WBPMu~DYVZx^1u@CteCe12q{YZ8 zeADiLb+W%~bVqx8bVZ>9^~;U|5UFYeBON=*1a{qx-N~rgg)Bl_{Wua07vtwu;_2%6 zSjC|X_(_KMyi-GCFQ06GHxxzc81trhsxX#;Gv;M6ZIayCuc21Kfq`7Wz(lRomI~8;4ni@Dc!=64P}O-pcaVNewYC#9PrDZioXOv=#W;ix^)at! zHGxtv&-)6f$c@7EHpr2@Jxn(hhUW)a5X>b5EJD)6;~nRO6<^zxlv zea7)Js+&Log&7&7cit06A5d}pWgt%8Q3NT#TaF}}DB|d5i007`OMSWLb+g0ON(Wn+*>%J)?(HDG#JAPEHN@i zY;%n6n5s1{9!*GEVw@?_W76%N=aO2h%-nHNtwe|qH6dQkIPMc$X~ka(0sabgOP)+8 zLpfsOZB_c>j4l1c-5Ssq;q)>yELMmZeM+KITcq=JW%0EOtV}QWRx))ntCm+EHi8zX zTMcDws?Gn~?(j{kI@{XxujKNs?D0)Af%qc&95cz8$-_IV|5pEXP7hLq_3SZ1qi32Lg%;lQs!yX<;2l9M4dV;Zsdz5W%0tJ z@(af3q(Q&rvm3Cr0&-vI5DZUw8yZF`!J(33C1uJzT&$2NujG4KtayYoC`TORv$zmy z-{JGxH4}6napF!RYCS@mJY?-hh(2KL0h)$N!nw(4oF@%Vd~Gabya42!w6pwUorh@#nWrtx|X;F>q_zjv7qJW}5c* ztM59F<+$={pAj{ykaqxT5(%YB>cZoFEfn15*%}uaXSWC_Vk z++(?lVDI#ZoHSPpn5Ye7qn5ZKJhY@g{nU*eww!lhm8QQV@Pnox)f_rmnluq47&DFBMc_3HL`mr;F~D*LeD{Jk zX*u9DH=r`e_%R`2I_KJSOqhz-C9B5E_pvGU2LZJkB_0&^Tv#)u*zYv`9PnXjIpIpe8X2Cq4xWgPSxcbYQ9?}Zoc6yiR z9rCB7zDUx)FGfAjgR(w%f82XspCpAkf{~bhORNvAW)!zz6Xwv74`U<|^BZPAP1IlB z?#q(8ihbPX`|E1j1B)klO6(1E7Y8~6d!F7bdfsO=VWh&kH*4kw1u-?~)w*TbhzA^SI9*LERc`EX_6nr@l81D6z z@jUBrtRg40B|SN0Hh3l|0JGvK@-_$LZmYuRuUMK_fz!a4Z4h-byB=$2HB2?kZ*Pc@ zMedoH-t6N>H*(3GfjyL$5$zZx^Jnw6lisxDB}%lCv<`FCkyre`erX#f)FRi^?voR- zR?xDo9HMENDc)}e1p3Xm6y{NlPgug3{CZkWwRG`6XhQp{Z#;Y>pD(Cca{ro;MHJjV zSUIjA#KtTaWFYko?Z2U&b|V@->BLkWb{2X%&Arm$Mx*EGC<;&nyCrYegIKO2eCFTb zDa;f7kT9CaHB->K9QZp-$d}(cs4AEa_<-=%xMtqyo;G{(-^8k8d5X>jewg$Q{CUMU zFitJhQC3DEa$t=hjhuH@mNJ3#%{0?)4t7uxybxI{61|@DjK#LHgqM*7;!@>N&;k@U z%n*sw8=`;W_s+Pb$1Q)@NWf*(aM!t0@V|b7xo-K#w_zGVhFR}cQlR>3`jnNCVj^@R0W|5J&7=H9wxfr{bv-`19JCbf`D721$r3NcPY3BG_xT zER%`tsr3Z+yh6CdSVB>>iX=?8(;8pQ_Yx>OiGp?wbNt87b=V8z~9G+yzO<*KA+{p>*P5&^BOctT3V}$UXn2bFUGT6k0kWA*S z>cu-x9IvO5x8d}rmW^kS-xI34Ex?jnvPw!I)>8sh1OkH9=omduVd3#;5y=pOe_Gg% zqtsXN6@~~F>5)_91C!(l+OP>W{AtDOz6B&M3eSEwPk(fSpu}$Gt3g)8aEk(SpUlD^ zpP7s26$USs*96BR*>>cqtQ*X0=ugsW5koMbQJxrzxpYNfu81JaYwxIy%Mz;8Tnn=C zQSQp6YGXg#ZGm_o#J6eoqY%r2gN1E6s}@3sCGJlGH4pOky>f4LdXok}e|X=V2EU1@ zm|Wi+w_hDTO(1~dUTDmU|0#nh8cS}~i4Th(SUt6|QCbuEsOcZ|{=7-vdwTZy_aLKX zjZ)RP^8OCpSu#zZ?yU7boPi@}UY@*;k!;7MV-0DzT`{GSi-;FrAYFOOd6y zY2FFPv4I1xPDhW>*wr0TCi-$NIb-1xyQE4rdTHp?vC#c(wb6IoL457HxyN(U-sn-C z-(AGOZm?PWQyfo?!_V;Omr_x~CgP=K>*)JIGJ{0deb#vX?cU#ZKkvCJ)cUV~6 zdhHJPa~$vYbZB(?XIqm$n+5QqDGYOW%L&U0T-l{3wOARtf4FEel}O-$Y18UT*HO1t zWNWttVB3qaaJKI#GZ>*w1D)Nb(8#ZpX^+^|2(foWwckr!fk=(i>h%2DFlh68<0loW zl?fJBl<0V^JU+Oomi8m2ouLEOot#o&w^bfiEMxK4%#ekEK7AKW*xlu>Alwfg;7ro-{9~#wMA)WkLG8~DxHplmXO(2^~jlID@_KsK-Ynv_BPudh< zg&J>QevH(QuedW>Bl?3)zuZ7(nCTG}>H*?nr%-YRPFVM93WL3+w2ts3l)2qUc) zlsVaHItHq?{4H0fHT<92{)TF48ej8;3_30CYVEif4gr%4UYT{qAHm`)d02bOr=Gb0 zVr>kh8M|JIjK5g1pfY?o(Q;SY|E3AQpOcpSy#ZdV{{4^#XHvX&X$neB8G8U-FUJ)w z{)R0TyF8Uo$nGip*b3h+l87o*bSOu=`TKxWK`MNzwsz#FxpXV{k^o~cdVqe}O3AAi zQ81%AJqcwxVb+{0l+;Psd8#sTSuWk{q1#O@aJMH@E|zQsmi+O!zPmy0@j&AhE8+vA zye1JR)NCX}s?UT{M!9I&WLCo4VEKqPJ{rZ8cZN^^DGDuAfnxA9ql&SlNCwGW>(mbQ z`*N=~1UyNoa0ww&5G658K-&Qs@2z|b1;R8zOW}c+lDtrd7 z_OD~DaUk@2vhkqzzrx^f8&c6>0kEkYy~zEROcnoOuj3of$UPmq!pD zc1Mbe;~mNMSH~qn2NF7NlaFrp1$KU5T4cbsS(nm}11&AF$zly5MkrnaRpgu&3?<4f z!=c`CPLC0_`r-HRu#y25NIZZ8<4>q5+gL5J{U&Ekp{3#mW>q%A8Q0vu*0*+Pt|?=! znCd@;#%*u8j#a1rx$W(JK<2~WoTd_8Tm07VZAIj7cUzfF)w~rC&86Ts;47c?4zcb3=xj#YHYjm6N|B9sgwnq*~R2YJ?REVABcQ5(d*QHOCx} zsy0i61&7=FUXiy%g)FVG^V_SP(7K4d-(XR+tsx1JXPM9N$>|p?;idC2aHP4jJbNP9 z%#UO11Ap57$*J<+L=aanYhPej<`MS@f+X1z7_AG({E~w2FQ2HvA)Ou1jf&bc5b5@L5G6T>-hto)K213le{2uyS?J4kMTEZ zaK}Hi-Eg?LDH+9awahFNH?YxjRCr|TYw4XfWfg=Ot+W%56VmGw`T8MjY%aQ(&jd{W z4IH{$qj@KpmG&h43XSMXu~D{Vj+g-Z7^}3Y%rR=Jkj&l5wHNq;=N`c826(*%SV;|FKO!qaJUq zD5W^;UQLxZ^`qIrV9bul*|kGpWcdoABD-+n()o)f(1O<1vyHPywR(B)c0nXR~x8RD_NC~`mezKc%Q|HPU{p7yberCAX+S;D=7xQP9RbJ}HBs!dx zX=uK&oW2;F&N9;jXwZhN7QvQgPai44Twa1oELGtY_Z8c_Gyj7-{HN2`Gx1N4Iiv5z zMs!xNurQqaHLQX?{hw;@W$1iz@LK|;oRL~`UV~z{psZl!tif@@qoEHV5~TG4CRlNa@X1ZCYLUL`1~ec8f|~G@R$$4l4$CYMxzgGI?)WFW2lS93i7~ z)n{b28(itW3jB}*hqZ<3;~E{u6rz(Lgx?DWd2MCg#iqu@ryIC%%J|H>YaymK?>ZnF zIbw*pVglK&W+Ca=Pl_^b>68ac>z1Z4xE7{6MMM5s^(hlbG@#=x?&i+?Av`N3=L@*# zVoEF;zD4SHhPg9)^RX^*8~24$q_K9a<{|i#!}WKonlOV=Z+lfNZ?^Yku))B=`&UYE z(UawOY4gXI3sd3D^-<{QW)=68ZW$I z6hCUPRBVn>WFB7?Cn~sNDF@d-vS#3TpNzDL4qYIFWA_buFDK6 zNyC2-ZwL)p1UduI*Owpp^jU9@Ws{q{GUA(t&F9HR3ql_#g7DtQILGD<n{<~JV-2ak~?=U?$FDNueK2YF(8JUi8HL&MCkuuz1l;}N)zYsi;lLa zOEiaP=qf?@p&ot1d20%JZKf@sPy!D>)}BzXRSP0-<1;M4LIC2VrHRZIStuB^>|#m57&|795pF497=B@@VTsNBPVF}j-{jP@rUYVA20Qj_ zHlL~1B36c{HEGO8PtBKMDiA<@AiAsb$2JM`=&5DsAnV`Ei%I-*v-8!5HnFZjb=s)N z93b0C%1DErJ)MTl5%6KfG0L9X=*CFYpn7v~2eF+*fKQRmHG@7uXJPF*8a!Vak0n_W5U2%~|Mj3G8*b6h?oLQ85h`U{}7^B*jx zIlq}4wyDS-{tDi5dW+4lI^C5T=&lyTt8wahM8B4<%?G5N)^|o=sGFrXI>n|)Q-cT1 zdv%1 zd4{D5l8;RdwBJ6Orr0jZ|f^V9wIfVDz6)={(R2)*$9+uW%Xpa>z9W9-+>{a|@u^`Kf~ za2baP>QOSu7G)^=#0Z`q%7-_&e0_T0Vng&;UfW^cj%j@O?>E$eNDTq{`TgA9X7{CB*PQ5+qXf z98<@g6y)R4=4Ik{pr0Zu_5uV@6bApekJeu|QQjrh?bDH#nspb(Vc`cP7z=c2&8N`f zsF(J3FFRmt_bY^O1uJ$PfwcOLA}qCdY@g!O2ujA$pwmNQLhC#aoT8oomO39C{Z1J) z0d^4LNLUx8-c$V~p-u1P4nfmnn&LF>KF`}+7yK>@BA4w5(*U`k={ z+czeieVyS?gtBV?-9d_Tqkw^|B(aWkkhv0q{=-iDgsUbZ6NkN}u2jvxixvZ+L@FIa zWhp$oJ`KNetRJxn42cEsOut!PsX!vS*@s>yo6+c^u7+Lfli2Y%W3*JJ|B)ASs9_`- z^B)%A3UBHa!wN8URy4`q@IguCWmGT)(N#oa(M}_8 zW9ie#iJHT79L0q7uKUBS*CjY<>>^ z7u3EPHOSg5Vn-*mDqYLMqBd(2U|aqyjU7)^xeX0pRknL6Sj)1S!)({oYZ~k}1_iG7 zErlXt*jO^Q@7cjug3L}C)@(AF?jpgl{9eL%ochjCtFLS8=7X#`Gy_?aUOw7kCBp}t zq)r8W{u5g)=AQ1)hZ?otBIlZT-@4^?Ghcygo8W1awixb6Lu?IamHCHHXgcrifRNOZ zIHK3Ojeu!)-^kgzM(qr1lR*E#+XcjV#i=bT50b~y)c0lDVniZ7CD?>DwdIaV7G3dq z`w(vEDGZI$ENW{`@Dn8po=!U%ryq}3M1m`cLB?6J} zbOV&E`BT>zVlt=0xf@j}L!l(mntYk5_+JhOm~HV( z4G9@jF=qdx0)eIvvndX`J)HDQlq$$I^|(5RJ+`ifCUb5UQei<0o*YOK} z)XiOf1yDR8C6kDPZhLiHu6F`^-g`~PF5eplG7({6TmK@hfCcwx-^4T2Xr=mZ>a>s} zbiUm{AxUhv9Yf@bsWM1P4_b_ujfV|ycAZU4qU_p5z@(^PcHP_#t%9H}Hm*eBzJv@8 z&?j5Q$IM)&&ro6+;$t31p9j~x64RD`8h}~w@5j1SZu4BZCvU2zSHGhpw^q`&3EB4W z?K77k^!l~wk7)DOKI&OzC}e5p6i}vI4FdVQ?S(te7!hQZ#W+G-J*HOAqCUFNq}q5O z62_UanrBZG*E5#&jGf&2Sjxw|=}3N+^H*6d_(Vx+Uwd6BZ99yR;F&8Rji&Li7IOo3 zyyi(@mA}d3sA-RnAde9N!PnbTl20SF(YVfr!r>xyvf=V{b5=cZ?RlW|qa zj9YIr!bla&)i&IdE9|+HvsJmu6MNI;)8EXuYDks?Wfb z4U=*{n#!NiiJKBU$p93YRmxUmf#!Y6^9KBoZtCiEX$sb!>6^l)=vgW9ufe8M4}xqC zS&8u^bXkA$+3|RJaf^OtB@}X&hMsb7B7OWRVk|7XF*`JPcy%e|9(DlzZkGq6do125 zQicP48lm@SEipO~O z#GM3#Hn3momR9<1#Jp?~v7g1df5{w&T;frGLTvbolBh%KoGG>inQD%e?l$Fd&djm` zxFK<-C?m}GAldbD_<%uGYN#Y2wOA{3$mU3}YESrYfXJ)+^*Z)Mg)I2C2jLz}pdfOEa-LLNs4jrC->O0z zDeON@-U|dDG5A*8vqBs@g0VQ#dLBMFgFeWVMHRm-lZ5e9F@dRHUN6Z z7^8;Rx|P5dUo}p{AzNB(qQv8Z(CfA5mlN;4B#`-F5kn0J1JBc7zMyxLUPH%5io+Qk zU!s>0(p}4>SsU>^KKi~ow1;+RVQe(a2j$G$JgMJj-pn ziG8&j4BdXUWmWWea6mL$D0+I{vs24nIL)5Ndb6_W%;~g+1Z`4Sa3>iyoFumnR}gO! zcoTA2eAbmNB3Yej9bUg}`kUwX_tW1S%s*-X=)Nb4)IFCbWR3Aa_;0ybT=FIX<+Iokd7WnFbeP z;;j?$2C>pMvT66$l(5jM$`c=-rPam4&7L|Cvr0h)&OUP+8pzWLb|ODzv}@c!;)>TJ z)|nAU+sIxOLc%8bG0{mnJ?j<7_S?tU)bpD=l$6Y)BNlcTKHW`5L1`e*4r=SN|Q5%l!LLokd|u zc#bnc8vvvc%Nzh?=73X*V=ZT;WCD$%>m7AN2dQRUBky($SL#dT&Jh3H?}rq-_!p`i zftLE>9JkUZU-}&c|30q6T<1Hx0?!*M8E4q6zme~1j5L8It*{mv43xWDR)3p+yjREw zrdVD~>93lDJ@k`0Q_6{~4N6|gQ;h~M6zUxK_i;qjcCU+t(r%3%^sxiq=l_n!TFCUQ zg@@_N{{N(A@EqP@uS%Am#=9FQTc6K6MRd*lzlW|>@8uGzf3f!{<8_Q4*;88exd4}? zxNm|17+|C@-UNd?Z^U;=JjU;Qy?Vn*YL$+9$lZx2s!mf zCSOB$7gl=26U43H53Mb~N0I)uOQ+~5?o@nYQJUhvfn?umc-i1EqKJDoMVJz&=HAo6A;c1Xk` z5iKE3exF4(ZU$saak|Xm>Z;jgBMIGN7FKXOc<{7D35Wl|@E$ zhcCnD4oC|9&PV5XKf|vWTp$v2xK9te{bGk3B2vM$xvs*saoPhTvxa*jM{J|P&%MMJ zAJ;DRfvFojch^pHGK#*E17-yF{G8sI(SdN_j;Oz zbxqh^lo{Xt_yLu}yDQpnX7Zd}QKBsJ2yV>Nz&Lnmaa7W$@XRV3E23EDmuY!hu*pGl>Et~bs@?Y-b5Gc6Y} z_>OV-z^3288I+Hg{lM@#%4~F$d>KRYiTEk6sVYjyH@9fnE3EY9`I69%(JC%^cziKtF!hd>jpTg~@=pQf6MkSkk+_@pb@MF|j&o5pD|g5Jfd>_R9Q zC7y8C6fQYluqe@>0eJwH1G{K)%#|XY%~jJIzUs1BUGe3Ir6|z_Ro)qrHJa!+&nv~U z$|OAMc?3JYf?pm%NY^_egS^hfhsz+Mv9e#Y2tiI=*3lsG|NT|} zuV1@KxwdhY3lI#{VA}1q_>)jg!`yKDmsJqRwU&WqE>2V*pw3pwA=j3-${}Ar0&m_a zu2r64g6=;HYu9Xe#xJE#(s1s-^JhlodjINg#AP8|f+ZVZ4H9U=rk2Umb-}5utp>%A z1zp2XiBZ9TL$@Z{oieEw)R%?1MC1F*DXsxC$KR!s1f6+79V1QkxRt8`m2@CYnO>a&!t4fgE7S_^Qmzl>l4i z^Bqc^Ke@{*r`Ln9{d#+JyPb6J(J1`h=Je%Z-lCR71O$>*y?NNTxL6?statFk)}eX? zzZI}Eh=8G!NeI`u1nikE$dvyfYPKoT`d?I?V|S%tx20pdvSZt}?TUA7+eyW&*tU%d zE4HnQlL{)fv*V<5y8At+`wu)H?lGP<7Opubf-OM!q5x{70;$!XU_q3+80OZyUJ%6i zg(?$i1WHaH^AOyHn;!9z4Uo*92dpIEBH(p+cDwBdl8hUiTJ0sl?wl#DCLO>mhcLUt zv4lbfh3c`Nm7qF*$`@9?pu1vB`+}MgX6a)tnoy+!%g}K5=>`KEUofzVBF~+eD4GKo zj%KeirzJd^iVdh{Tp|v~QCo@=yMoD{Ur_nd(cni?$3=yBd>g54X~(12Zlt59;}$g; zxi&N~A^3p~_eqrv3#{W?&jKxejrh(MH)YrgCwjF*9%BrD8pYclDsx045?KGefIIep zz^J2nHfGIz;f0VXc@avG|# z?dTwJkK#leMf$(vhlLjU)v}GsDsDW1?)M2og8TNZ)p%elIu1H* zh3GFx!Ebm%RwN$<{ill3W@y13>P8XpNX=7sHo$}Cd{-^@l)rar*Tg`3Y_%S-sly6E zG1IqyR`g|ev6aCn7VNmG3PW@5Ne$nJVKGJYnUAzy8LOGCW)2^fpn%$i5w0^$$Kh$) zh}-c7qcGr4TcVIk1sGf!9IBQyi%B#xXt=^$;cuFd9bSDNMD;>=PF@7pECxFZcbpse zGts<9nu(hfn`&gyW|yuu$mKr+94u36c7BfyAg$%>DWv**;i~udnAMvwN7;xqT=X2B zC}*2kcP6pg4g1j;16Y!V1@6dUd2mgP!WPbYPAvTB*!Lh^=Kkxyuo3t3NQh;HV`8kd zZ4BYZHsHt`2M@XX5tvNx-%+seusOqCuj*)F7VrzFKsres{VToUG4xsCC2a+JNKAAI z*VKT&=zh$%1W_A{JN%+l6X^ah(WshmT@ITIDi&w zq`z}NVsV5SI@*gwWJ$^$;?I8s9F?`GYD@hkjO zU%$Jx_TDD{=+m!s$GzBvgJx4_ig^9?{O=Sa|0$VjuT{-vqz8@PH}n2-ZtQniHQkv+ zr(#XoK%q3 zP%{;|;F=O@8BEaTKQRAXv|%Je^%H92H*^PPBBs7+Pf~VJMoIR|g$v0n+1Bl3Xm!Xe z5z!Jwt;GZ(LQJi}X3-zaglJUsXR@& zs=5(fd=bBjDqOy95SSI={#VgZRqI~aztpCRNFVUqKU8ILmqw)75a9IAE6BR7pWymH zBKLa!M)D~B6v*mSB7p?I@o9$+86X21cfeM-4Se7qen4hXV}DtGqu$)i3Jnya3J;tn zvp-SQZ(_p@8PSfBP{<`jY2Lb0_bpU2MrvOvH`_RwmPLW_E7~SHljul8c$&Oq*I4lC zNFH#S(pwU-iS;?9z*WDn6>yv0qKqy*vADoZ!KFfB3|6fwME*-29svwMyuwN~-L8f- zN@?6kZ98OV=X@jC?musULBXfOsF{m5iZ88-FzY_hOIt!$=T#A5Z(99BFjtys9?2t( zt4C`qq`J%$u*K4x5;@DRT@NpiErELiF@^t}zfXjEbBQl_&{X3c#GJ3URE$+AZg*eH__#{Da`| zLX0^IM^-^aGDdG92x5MXF`Qvgqpi&|c=E1XOJC?8Qmk3-{-VvST{?p!C7f|Ta1-eO z1Xp!zWR#7`D;ZfljPZE+cRnUawUHpKZz$~N7CrjVkRfK5!BLfHDi|q31%464OYUl+ z?8jFc8>F`W4KfV}{)Ra&SjY^ml+AMq9J_qA6$0G_8tI4vpa%ND_x*$Ei|I2_)C}Y{XsSy98rbIA zVq57zFZB6oSUU&Q)f|MN23j9Gz4*~1Y@UtUdL(%yAvu~RbG{)lS*5IWb$G}Iy?ff< z{uHbVm82_*#$04K(Yy|XnpzC$N&NHB8hVTVaT6;F`|eOlkLcaD^XTBRkH7KjDNTAo zH1ed7!R3F6R6mgf3}C7ErbKxBH5|`Y`y`0VjVc7cX`3{1T67p_Q_Z*|E+8|`tEF9W z*{J_qjppMPvAg6U;a?X}V{sk`#Wcf}f`Ty5P1r3vL2qUJyIi;mc2FxeKdlVnj8dNP zzVy|ZjZ4v9iq~X8kosCgwlaqcRZC=D@v< zvo(oTEYKN0iof^s35umc00Q=;u*0M7(IEWRz|I59@j!T^GqKs}* zEJ&eNfv|f=dTX5P+3VkHoAHBQDhEq}#Eu#-GmGKOyoa=%aicBQsvWo0k6r!m;SUxY z#l?{BxL+GFXqLrvC9`-aCpsgsKp-{_lxeTu?UQCnPAw;_33b3{5Iw&1Pbh4RnS#<<^klIpK)YH6JJ< z!qcf{&9QBR+mKuygiSGcd*or!KL{wlWI6 z1wk_&m5dUhDanLj)t0hrfyK!S=@}my`N7LRkIwBayBj_p(0es#7jEm`cy#oe0GXWm z=g_U}_cY=#doCl+jCBPS02z9{AU(TxO$+s^L#u*Lo?)$1&*ttoRdYAOquuk4IQU@) zG?w^&3k`C(%s~&z5|{{tBpu1RO8ekLDCP94mm8wFoaKAZ>yeGNfV0~5g@@}bTr>Zr zH)8EvmaUHsdj-WfW#T#iSBT@&5 zM#aIEBAFp*HAIBfJ0DAkLHhoE%9Zfo&PctYa|F%XBcQO6m-^2BJ&v&7r{JOkxDuAC zIQ2esn8X%5uYxQ68u%qxRE-gag~)W&cKYy2Uj^zBRB&pQJNJkWKM-Ow}22nLCRi88XU0RI_~|%hE2X%7>AbcL`08 zCG2d%Z0MJB=D!>fndr>K>YVA(f9UD+V8YPGh+&Ld0Qj+<~I7YPGa#$Dw@@`;eHmk8TornfjLhuB1N(s^p9v@MG z@tTeB;m?SJ=SX&882{V!;}B}E6>Ll+V$le`s*^Xp5*+iE(nU|jf~r;!FYb^()(W)%G+dOr-x$F&dQUPTlmL(G9ECfJUUD%${dWvjx~u%djK>vz z-|J+;L!zhkUzGl@56!Q(#{XdfdY- zbL%tOHWy?|HOf1ci>HU1>p7eiE#gdcp#2j91v%X?UxMODsq}X+tuwSN3@hlV@{lei zlHVGCFsyc<3dj;@Y>v5sB;NuxQ=q*aVJ%BeMKvru839yE&JTuMlR;f#^MU8u*Pa4X+Rby^j2hJ_-+L|a85*;kp@J_1YcmV;y~bf5c9*7!CUvwn z&S78#Pm4^@V)Fgq5cs^^+*VOcumLa?7ojV3GFmcHw<%nzFnCybT<;zgni54M&{29x z>Id39iR$&!@pPPgaiYW+eb&H^dpolOGG)>6Pp5 zZC`99BB6o7eBX$8|0nVqVZlKWgOl!6MHWwdIgM);C-h;Dl5}n%3$o+-q^O;Q%QXYe z5`X5l#DoM}?~q@rOhzIe$85d&XXHNnVUrPcA!(3)Yui*q=2s#m?NmOqO#wI?eHRO_%nHNF{f3}s^S^M6=)doVC5(7t{B$GGz}vt3 zi5g-fKy`@qD4rss{vyDd#PBg7XC2ZRrnffcqI+pgDDbAKM30@DWFz`BuItDaVxyxn zwG)sa^ml~~R^{O{x0tu%BckcY@)>y!H}WMnVV+}4o7S)TtxGL*5f573suq;2PSW{1 z+OB+ihbM1TA3v3{3Gc3B<@{u!=(z z&CDsE=T(D3>(MB~_S(}7G^IcPFaRiX+9}W48;5RA;Fq)IbEar-%8%;pwTbUJab-o8 z6*o~Bt5rQ}R?ouHf*u}DakDP)oJr1lNT7m2g`%(mo02UjYAh0COuvvz^+>3g1Wv1l zy7Kls8-`4{MlX`TF`VK)%B7d$P$E(_tr+$FjTRFl&WG!((C{ZXM;UM0ktRu^v>Ia@ zXoIQf)>U6-ImlYYJCd|dTm&3a#<3jUQ@n9oxE=_dxSL1GpYrNf164vIo=t+jckSOO zd*|1D1&tMOHrI^X*cmD7E%VP7=x|_I&I!qJ>7=})^HVuyQO^B^*V@Lf@4=r>G*4W| zl~r~df6rOud$>WT$BQs4+4(WiS|}t+hfBr~L00gWvIE)uV1Z%44C=r_`4SR9NW*JK zLL7bo+u$9HJ;{m?UY=Qv?e}x)N2)l9*mecsg|IQv{Lj3lIKKr}-sVD*mmYB-kYHEw zPPF{rwOs1mD|W05&K>T(H>$BrRqgQz-Jza%&@7gbS$!$pSkZ#AeeN^RkCd(`XuI0X zgZ2hPBC)&(tMG~=dL$7q{t|eB79Y46E`=u~_YNF72d`9P5o6TRVkL^G+ zH=c_2y1o6WOU_Z7fZ0yR$1FF|JF|%I11+t&-12ThrP1u#kUN+!+&+YwCpcP{$8?Qq z*coov?l&R2^ZbxI7XAM=hbtI3;n}}tR~)vW{4#HEvx&~`R&{xsEgGFT)}LB7ux5!2 zcXw+fx6wq{!mBMJnlKK!hGK%}IqZ_6^bqQ@Z7mx=6Z*_1A}bby%NP(f=rTCLu!&%K z`;uo{zJ<7ecUEN8Qd}LVR(%l8xljdrU1PhTaWenlet9NNOU|BFzC<%x@}H@;>>@V& z_7BHSR+&fr4x(udBbq#Z?|=aNh(1Snd)K9D;B*&aDTe3-%i<1WsR&cqO1Yj-hOZ6U%0?v4ZcD(LG#YNIYPA>Jy-M<9wBHcQwR7KM<+;Y)tY=- zQ(SZisWcum9Et^N+gHDSjphW&uO$J_jcZ3DQcs~82{w^;u>K$$`8=-Q_3%eu;Aw>a zX~xkPmH2aC{O!J*_@ATD8&3AaV)9Xvzb2*R1zC$aS_;+y<6_{4%uL2|E1tT~QW5af?~|;& zo^iQyj9;rc6JNMKcXp(Td)Wg)<`t4$8eK)+RmF+mScTLcE=0~UbvR{Ex6#c{7QeyueD=ZA@P`E~Oahux_7d;8xCyj5!DUg+TYi=^9if5H!_X z*Zsmr)OA}`p96Ib610fUtmw>HQ-+?IayCz;I}BN$8GphoV6T|YqlMRN(a@HuhjzNV zYB*3W1V*zNgprWc+$HskoKrx)hCE1v&q0@Hl-I>vC-M57Z-gYDvG>&`hdGTT_OUdo zvtdePiU#nPpNah2Hv8QDf@qj~dR#_-vvbS_kJG$I0Vm5 zerq!!D~cL}$65D0q(JX3BFwjGao5((^({E-JJk>TH6sK}8_brb@2fYkR}H!K$h>p8 zs{h&X&m{x%(aV*Cv#L944&0mZwJmT1JF@nw0@R$0mbup_yb4;9=; zI#_+h0Kg*|^m?JP6x=XYsor+dkwayI5qL5r_^HdJC6jw4(v{oSe#ThzZ_TLVSZ_wL z#XI{)oF!&v7j}M4mK*jJiiKRHx%Z-yB*j#Ug47|&_%W0dG%(U?vX*I9r)IAq# zVOM0~Rj*qXJKK_V|>gqnBG4J)H2 z$yC7*cIhDrCjKTGLWqy0HP<)c*}Q)Sz<8Ro?sv16e!gWLp_zFek{m@*FOa&b3AX%oTD@Yh@e+lM)DS>k`H#1^XmOKbU znc{o}gb&=18i3u&^?^a)V8-nI7&U6c6*sz?%QP}l16=j5-sV&?sZ@U-|M-cr)kS*g z1M^zdA>!^6xsZ%|x#e)PUo&8{VAQ(h99Tgwc)8gdy%uc14SbIDQ(xiI1Ija}2{b`- z;H~BjWoeyzLK`e%jKY$#E@r_Xc&zqXP@C&|yZSiI^f^^}ml~NiZjn5NREeFOL8p!` z$Fa)=Q(WKDqvMn02SG!JIFRun_-NJwof3Z4kBR*R|E7-9;k2kHkGsSUs7_A-PNyo|9B%4SwIUsn5x zwdYz9ZBPLP?k`o85sCmDaVdB)AV!#(56VG$4pR4)DA-#CNDAD7KBIz-l*44gVR?o5 z8QZN623o=Lj&~N)EL-ix07!L9v=+vU>1+SeW?tPp10aa%viCTC#cH@3dKnGg3K=QW zm1ze~WS@=&?=Bg{_T@|dYp9`TI6~}Ex?4kPvI*;`Vcn~@skAy^us>SRsX9(T=qICx zT0LZtd+0zlA(8+^DQMk!!!$q@=O?1h7-=ioEDXN;B(%>|y7U8NgV62ln@IkcZI{a7 zz|^w2qcjScktiz~QK-5@b(B`dswbSIR$0Y=pS&ZC-z0$&qezN8&pzgBkLtg0I7U0m zvM+k^0>c8=0!5S){@sLVJ3w~?L`-5h8>p)CVYAeqmMAAprNHW_VtvsD$W`!tXC>xo2b7-$$xEKFjtO?%Hlb#5^j5X7l7p&Z^vACpheluUiDCepfUiGCiYGKRW3f}`{A|S1e9|*_@y~X ztTc@mJ5<#kghvKEDB0zIYw?zBr871S zifZXTc6{q^`jb+3jW-Z4XGTHTn-mK6Hq2Jce42;by>>ot4|m8Pb<&nkW-FzxAN)v2%r7n?n|UiLA>LCtycv@ze(#1P6%+lfNVR^uoqGfRVng zM?_iSDwhRCOBt=6bC}(Y%J|Bt=+3=KgooQl2fTdggtr4gXrITinm5E8vFIB$~wk*`^@uHUJfjmvL3-+lxo+`BH9nd>!yt(W6tN%o(Y^hepkxlkTHU#cOAj zhJkq^(P0&#kjB4~nE&SK5FDL9v9)@Mcqr>XunE!M*m-UO#Kom$%pxu}2i7AkyViTf zx7d5F=sSs2b~#kRQS1&JogzYhzFF;=8r45YvJ=a~!de!Ds!#IQvXsv~pnNB&Cs&TH z-RzQ(bvaS3g$?!j)@NH#tr_otQ!pi6%5sQ5xA*6)XktQ@!JUR66HLEqs@#T!jH8Md zJO}!17izob5&!Ao$0w1H-y`JPgW(%mz(cvP$g5qbjRv_>bM>m0GBCw-GI6ZMM5Ve7 z+1?d_@AhC$&J6pJzSRYKXmvMrcEm7o;)oQftrlQY?7lgO>+D>*i}Y%E6kCwn1(PnWQ&xr1*NHK#G{PS>YhAWThO3lz-Ra0>)v>KPIJKnE zK-5aroP2Rv;KT_nY=mkJat5O%PnBHGlm$;M{$Cx2dK+&&J3NAEY%NFK8rj$e+xkt; zRX2VjTM0!Z8y53l@oafkIck)gK+txlUzDTY-kDV^Kc+ZyLFJ=@6V_d(_e{umqTG0O ziG&NQ8XwM6-s5-Zxnw(eV@tJvZC%^3eLdW)R54qb$xR}Wg-9vf=C$zorssitV%Pd$jc`x7Q0a>E>6+@4YAJR z_%}0jODVZ>GEseeaV(Ju*3a+2gL&#|wyfSU#l(R>N)Z)xbW>-z7Ad{z%XW}j>40Pv zgCni8!La&{92#XrX5#a2K=C=jQr7P-LnK*HEz3otWR26&5cA8lm8S)_ zW_w0ER-_yKvmmG!SRk~nq?H(w0L6q3<5MEiSa-L6#E=bR7Z3REWD28*fP_#-0i_jl zGN5Y`z5j1c?;HJPbQ7DCu!V~yoEA5KOGQm}5PSP^gSA1S?YTwLiScQ(Lx0ZryV(V*P5N2+Q z#&!KS_egTdi-Eyhb*D#fO#Z%H$PH=R^O?X7?INV+Bp)GkYf)gKqTq!oPIcRwTwK+B zrAWz{^1PNo8jV3-vG+*?**kg)k;c|)PD)n) z$*>`os^YZAMU&_fsjjf_7&{xn@w%z3RUB^_6_)d}-it~w{{s6y(q7oib~MjM>}XY@ zikdSN|5Fi>q^6$M;!Io7;JSw_JCOmvSS2U8eFsJT=TAYEM~5fz1>T#|m0#fJ1-0;C zzzyi^Y1GgsWZaS;GRE~HCH{XHO8@qCY$}$I+Ox(tqStGHx1bq*hAhd+U_8;@ zAh93R%Jkf|R0CT6zo54bfPk60#SVh6&|ErY$mf|2;sDU%PEIx^I7mbjvH=N+h-z+O z0IY}rbn@`XIL9Tjjiogcx1&c}z~*I{TsTOQdTb_Z?ig-shmbB4iJmM3X57{34gK`U zxF_mw(mymON8C`IIHA3@pC-C9PE(J9&QpE|9T$fUr$4k{iA~dn!Qy28L-0S#cs-EO7=S2Lq2AKRX;aof@IF#X4;Jbs0 zD_L)U>FeTwvV2!_WWtAi)zm%|CgRm8?KB&(;L;~A$`4-b5scqYp1a)^nLCJ@s-&tj z;oX5D3co8b9-9q6Q4C&2Haw5Gdt!+{)=+OgP(Kjz1-!%d){ubMDr!7-*b%^gH8Hge z4fHS<>?#AByd`=9cMlF$!;d(-0>N@)aV)0!k8H zGfcgGfRp6PJQ~e9{g#Wj&-0*((tnvHA3mRvc%>^ge<8@N!jWv;aNs+fuXR_Hud`%h z$+AZqNgS7LGsp(>MB$~cUiVMLVDAUj?*D8m)?2I@u)EA$l;38rmZEr84< zQp9wovqCOM13Jx|G!xoRQ>y`4MSGgY8P}IP*B58J&n#x4&-LtRzR4H^IujfIEtF&q zYOabd;xcrl2A!Oa!+LDjUjkip0x{RJh+WK<;8x&jC=n{bvAsrbnJNsOMT|@j1Td

dShJMZ7fr8)DY8Ni!mkERgZy-Xul=|5TcB!t_<1+L!lt1q#yv`P- z0#(H>3Fg<-flnDox9I*W@BNowm6keXLIR} zf$lhf?~cmx7445Lszqyg9qlTg*~g=t9^XN5%~OyocaGK*GQkJ^8S(6z%1hk}83viU zbm!odkirp=yvqC>MF917D`-4APnIIfQNCF$#vM8f?wz*2&CvA8E&O6My^lS-BexjE zRYP}NV4s~+dnYGJ#7CKoO{X4sG{;FZ0M_f|#5}9q)g$4@c7#+Y@li4m(!fGm25@`h zV{GzK9fkj$v!1?Pba=IY3Oy0~!ack4RXZ)8u1)CAI(4kqbH>g(FNRDskX)v{9-6=& z&&Hy=XHoqv|B4#kar=15%eYF7McEt4`RY!hd|_*&w3AkiT^(=FO|xFe5o>uHX+6Kk zh17oxz3);}OJdCq1b*Ihqh9j|+`;?5=5k*@UTqGX*rnC*__6BzQv5&a#{U|?-`RfO zc1&-%FR`7>ra-=j>oh&IL2L_>=)l+LIQiozq)zr%&WG-Dwc*ehc^IZeB};P$(Q z_S2|7B!N4R-%FT}1R8@xyY>*l7w52PXq?=^HZt8dP+UP;pd94>c5r2Pari}PqPt}1 zFyoOlHE~PuatJJ#ha7HH-O9B4aVe9}KsK=*4dkITVYj&}@^IvDf#O&ys+;;w8GMBP zHZR@@XHs-%2-D}lX5;LDx&ni_(}npBom$9ZiG)9j{^=nYG*GpyaJXww`L8(OttCUG z_|@yIU{8N;TAf~}xvvlF{15|f-idckl!QhOirJ?;AQht!nySFEf)2G7ZPUjZB=fMC1bQ3=0H5q4ENAvSY)XqaUvv|1;yucq=R^GC zWqNgkyktA1CuO}w1uiU=+N2+TN{!7GW*cYRSc95R3xFES&Ix7XE2WuHGj}W zz4Izu#vEUf72q4X^aYusy?69YO;yTyJy9qy*r%r5A{p*UUbT2k z1sNaiD*LA^Sp88qWXyVY@{D4di`}gc^_5x@q-+}2zZu*^dnF0)8am8-2IC@hMJ%@l z0R3HF=DP?30FvbXsA*9MAQuIimdlX7fk)qj=WJeR-Q@8EW;o1+4eMI;5T4Y2tflT~ zWniL~iL6D?55ndAw$(_0d-j;@tMSCP z=e0JzBRW`mU+8_oA+GhXzS3@U{)7SoNfU3hx8HeF!MQ7$FusJR66fErS;?_#`eqN&(?YVx;u9^K$S_HS)aJFP$gPjVmb;k?Lb^A|nI|*vnRROAo3% zH_4kQpMzfkoGU~#H`^blKb(HBezOds-S_$L)#ZCq^DUM2mAk!wo#9^cbM)%|{w(^Q z=aY*^iUeOD#NNLwln#QiV?xK(Fb|O?8>gFdh^A-4`v$XT7wtg767hwmUl`EUX~5~F z{_O9QfkVD3do%AFZLU{v4@DNyMQb%?*Ttof)Il!>+=-&#-h*~9q+~ou9r~uM{9@=f zl^{;TGZQEtq^=eA(k@EN@T2V7dCO0unx;-~=5H>!TpLW_AL0w9t=98)cvF*Jx9J`> z?)_jf!3T>|7SWyrx1QL2L0%{y0yZ9@E2jqTAu_O7cU`D9^1B@B$-&N%GCQN^i$bAv z(c?u?_MT#7E;+$DNL;sfaxJ3kZ86kx^aF1Dmz75*L9 zNLU?O#$G?d1j3P${=)(|5`5Ab_}n4;zWk$pn$Q0LBmUT_bNbkaa8)&#Dm`^mEEp`% zz?|Z>7O~nc+Y$2KN(voG!*<{FAI*8?I#8E*r&T94NCI& zjjgXg>*~E)$+4;HvG8_&{TXar~Zlg)5^#y_?&biY4o~=W*#J{ySgUoH}{72 z;Rh;EtIk48JtyF8HA}aAr;C$gCM`6@o2Y?vg)PitwaJU~6K1I}TAF1ooH2nMUV7o; zN;=EFr-MWm$NA-0wB@U2P2rxBRL(2P4~5$ql9VXkIE4lgQfG=b$9)x{;@F`QDizUS zTBwXl(=D!_Rv}N$g3zNmMeS7wxZA=IQ{BeXT+evNA0@(Rjj*2 z_W8Ssp7wjTf9~2yCnfMUBn(*ErLoLTE-j;2&u2poEu6cjOQpD0yIV}6A`)C|W3-6W z(Z%Fg$ZcGfDXNwZ3bvW`WV}%>bL}>@D){K@z1@`|1WpC^6$q13B06Hqp9SXc*Zfo$ z#8n;t2vRi5P=oJxal6WS58FYrreJ|m8 z7x=YF-g$<2^B?6PX)s2bLHS)#7T0Lr4j?;81KvW&)tPe(vQ}t+Z)|DSmXB@xlvcdJ zUIWc${CS?qMhUn-;r+P2;ms1gJIFbIh%#%3CuPb}^11nq_(G8Tv@X_#?@z>1HGTdF zp&%)cm+q1p@-ARx`0iA(Jmzt5aucjmM%8#Na$+pkl|aw6yR~K2Z!Dy1)VWoVL)f4nAOFjf+x6Od(gwnNstoX&mu~1E$i3y? zHxyb;6SL89Tl5I&)6-d$LMN8KCRUM~2!zxq9ek@8@)j&0T)u8t&WVXBiEvQ{)dxEH zF>?c_{Z76cA{pn{Nh=p!!JKm;cj(WCJ2k4x?L!kY4B(XLjg7Q7A-iasa8-L%EAC6K zK>G{4@B*<^p}Fbxk6!>9sdPqAODKptr_B&r5QFe6lx$*Sh>aU`zoXdB9}pqBf+2uz zAXGtgQq$(@og^v6b6Z^#h#u1k%7~M>!hf@AcL^^<8C_bBkh-N)XS}_Js`WWN-l$hO ze+M@ldn1Vhj_{vW0`C2M??WDyQBk6qOnS&o3Z=UqHCReq1~)}Yw*HkXT`thTPMJ*0F+TR~f z$2g9{p5Gb(_<}v_O6vbIlP3iMh;5hyfSTDCy#`hF`pM5aapeZT~WBI~9iO@~#PG^tV zCvPwADzi2#gBPYcbj9;>4Eto6lEK&fWcW2AW8yV*7P_%F*&VMs&9qzo!{$*Z%zA!p z<42G1>uJ1$5Yx5;Btceui`8Mj&M^qhy9ZtSQ$3AM+hv*gWx+aJxIbQY%wU#77lAE~ z4fc2j-09NNHC;{!GD|tQyN}d08AtbBo|i9V(2O8%WqvtqDWL&gwS{&w`y&ieDiGzo z3uBJ_a)hKRT+GjuKVbumtSZV*pp-^elLna;Dx@M9wI!%J{H@qC&))!vn&O&svR@mD zRr!7?NGNo4$0Y64ky^{ImGl}D>pMTP4#;prMV=|d?a*OomrZjxr6kg;|7iU{3_rbq zev{{1m;N#`*CG`_&_bei5|1?|2~;^8EL`iy2KQGf@U@S8iPf!$KQj50qKKSb>A=Ru z>b^EmDmYm5pYd3&10ik)=Ic4!^u8UVYxw1#!^$%vsFJTWfQ@Iy)=r*>nBs*#)%fQ$ zdtP_?q{*+%x|Bkq0Fbp?LVvM7U+t$UlB&7wdja}Eg)50fYtNjC*zK=DiMQcb!+G=; z`5R~MH|vk^)8Pa>E7p4KG)GtY@Asr1@uj zU>>;ioEB6w{Q_~|8wTsx6{orF34d1%u{l>{O;gemdqXz81v8s2udXh60H&rpTL8>L2e$L?NG?AA(b!4x3sRVO>`A(Ba0Y%_7V@dKX z)=M~Nr$ZG0m%_Z5-!b;~o|`P^@S-?v@MQCla89ycT#Jy>UrZnnzNVT6Wna`u;)REF zix#Mib^2H<0%;k722K{V4rSqG*dUmapso^RAh-(E%zd-_hyep1!&o!0`5_-(T(NYu z2D2R-9-V0XCqV2l^a2S1d?QqX%pV~h8T!iQG_77PzLBBgbU}$uSSFi?Icylc3b3&3@YZrwDHF<5Prk9Q2R!~z-<j*0<@|;_|UQx=SkQL(eusqo-I!xQ9&Kmk;{4XkLJAlzy8(sY|)CZj`o7^ZGoQ-!4h@kXvcj6_72g-{tF zybI;O*xk_FlVAIQ%?tlH;UTv~Ro8@H6r=_?s*s8|p*x>BT&%$;h2 z$e8fJa{PoLccm$p;Xb60Q69YxtYg-4&^eX0A|VjjQDH8S$gj^heOF_DMqGcRle`wY zmS{$a&Js7Niv`j-|L#DPrF3cT>o+J@Z$Odblbw;7`xthU;4J8D?LWZBAG#taY;%qJ zg@HvGYC)f=L#vG$vpS;@2Nf`i?(d588p%vYX|66<>JV7Nf<{iNfmaWWS4V5+rA?*F z+!V3TsjVic@_RU^{Vmb8p} zAC<2)lCo6qP9FB{QTPr0qrX_`#?b%h%=2vj`0DLV_`hu+%0P~#eNRP8-iC6MGo#j! zY1pd7=fz_-#haH}y!HOxkxtL(DDl(1{=+$Mx{lfcUynjiM0Ef*#v})^2 zdN^o4+cR7>D3FY9bM{T4dw0R_I~MB;=x)O0gBywVbHGLYX^m4sqT`+=Ns_jhT#X2d z5KOA2!sMsuy%k8yAQ*%@24rci@4FQ3BSZ*fB#$?o!IFW}_rP2VXZjYD9$P)dX?K;T zo3Xz{dYcK`+MPoZ6W%RuLIL}b%TFS}@uUmU_bX{5dnHQ)@B@S9Tt;OcymrsIZ(mkr ztdIjZW;=om>ADlBrN;44Yee4_hw`^p0-ui~#YY@C4vg)H&q<#{o7aGvih^K-b)?VO zl$@HtbL@G6vC%9xF=^9O9!RlBiWli43`&CH!vc;Y{EVKIcN3Rr7C4lCYbsi+?l2##|;8!UJ>)}IOho~)U|7xA$7h26B(u2^C z{Vmg?Dhs`X5eODjg_I7CVTULI#$XP`HzJb|V^lVo$r(zwBF< z&P*xi1@_$|2i=@phIf5Q%R_{rW7i#f>x8GOBK!VUp{ttJoEkpE&bZ}XCG}4J6%b`9 zf?%=VH3YoNLC&{zm8uba$75+UAX)o4cgAYD@$D!V3e}Hzb2VS^rnQt|#|u$nNVKDIb{Fg;y6s8i4T z8?6?i5+aaUL`MNDkfGo5mwEginZh)=+I+7tvhxMN^HhXj0KMuJhPpyY1+^GL$a~`{@H&H zRJQ_BN(aTSpwBa(&RW~c4dr1VuT*?xJt&9iqLuT0Ii06d;m&E{#}&{ zK7P0!*4h4cTZ2ZV^+wi{%msIF5< z1Zmd2OPG9B)&J|qU?}lA05vje%6@Imh3N9T#S;3Tlv70iWzVQX^Kg^&?3^QvU0&Th z_x=N|Q3Y_}!G3aS=%H&UhnZ& zWW~wJhrQ2x-ZARK^9P^7QK!@d6IRW@2tn?U$#av!&dAEf>8MatW*XGEm)I)jg&Sz)N&g<@^(*1;_s`OJW8Kqffnx*3{ zMrfdbeDmXRYv326-Vn^y8~4Yts}Eu9)+e@nId_DB0f!r!a6NHY_zeV_HkNBSq|CMs zRv)j*LyI>FNG33WOBvnNHmV>P8;^Ye(3NtWBv==r_MGG2D2r4ydVnnHq|N?)A0!Z1 zPEXQbKM{t~bk*itWRqrP1h3vGs(;gUxNwz4!!IOUgKQ!Nek?Dv1CWNS6VY#23WGAe zG<(?>`}g`!zO2;+aoBO`<1(*&H)53J^JDW0%O6={x7Q<6eUCLagU)h(lUmY%&HeFl zv4q-cg;r>^Fo;UKG`WcqEM5|v1ZEWCpzf#ABHpwDTy3G1mbL4013$x;JM=3P&CYww z08h;)#YgA>imk9}q+0-by~PuzdfhYWVwI#TMcUE6-D5j81Wwik#Gfot^70|=b(gU< z_~`Sl%lER+oSl`kvwMQ7M2ki~l_LzkTWX5PtYs2S|KQ0PBU9^*>nIJ7blSwa$?ci8 zxY%Pe-$YSLjRKW-GC5<^DVdi8a4}(sJceIWLc%OTQAE*9HO$d?4&`t z8p_IvEac;vGbT_WYDW%F+(i!0el*aN7Gb7xi=PLBd9nAA<$EX zsXI)GI~0u(5}vkLw`RWnzW4XwAAg%R`^TDMov+y9G5(`z zUr{%hYgOb-Hmlco37D_BTbNs9`8^^oo-roM zi$smfx-}4RZpSEzd|K#gQt6)*f87eFaWwKDu}%+KGt<_t*j#fG`UYM6>$%xTQJj~_ z&8OLbe{k^cM4P6>fP@yTDN)cUfPh8Xy6vHId)`vBNqQUx`ff3{ zOtI}-r)}!yh)z*_nohj4u@HZC62^cR3%lSty}5mc{R12q?|mkN{i-Kv9W<;cK_frb$$?1CU3>VEw<29=kDGq8m0&j4L z<+&gY9f0a%p^v(Z7EVxT+;uIf1!ml&G!LknbECyFet6?VvpD51%OQ*Gq3*V(lP<6M zuKlNYKP{+Iyizr~?DGgU0e=^CSQ2%(n{wF!8c`D45j@(N{Me+PXq!{wLn`@($t7XUE`(SX1tOWa-3WV?3^vVIys$>w`F za{R!aDC$Y!^1O!?d>$j*h3R^IUcPhzWOv;yEH{pb0;hbFZap$E#!yLP4wY9AVo;A zl6~sDz;Lt+Hi2P02&uC9Hk}4q_vA5*E&owoCZXD_J;c5t2s$;gx+6%_b0UaH+0ZT_ zxbK-v3C&;4uCyZv4^#U>-vYhG)riU4;gd5-n{AeYWkh&yDC}GFdn0kY@1o!yY|vU4 zv=5hHLL2koO=qfi;+z*kiHj~cP8gon2GODtOLCS;PALl61irXebEaR-%coK}q^GV_ z7H0F6a8kN__;_hhmGSMp(8lk2ojfgQHpm(q533Q2=#LQb-@=|@*e~U131Nv0#b5=! z!TCXdX|eb8k^=QavKB@Itx;4y;>L^lS`!~gEQ{2WFc%3sR;zCw@SxS%+&*+9Yidb+ zW4)#4GAjM#SJG3d5v@^HGkR|SC@o2-Bpbx~$j2*rwzjFNcY7O8Pd{5_A3XBWkm`VV zhK3+!%|ITi!H{ZrI2lq)uJVt)NvSO;YoS9{BEd>lq!>J*s)oFL4;(0-fpeP+&{>s+&p zz^{}j-i}%M(3FZ?ceyGCi48~gZAGrAx=EIkkiv)1YT1TLN849}M`1)TlWP2Q;Xp72 zrg&!ozlW3AzfL`p91Nh}?)rtboUHawarax(C*BDO)n@MG2)>|oyau*7xRhynM@!#o3r=Oah(k37O$zJ#VzxawIF zRUP;DnOwmz(IrMIQxd<)2Gx}Ttfz8R!q_jC$9hZ3@$SA_Y=mFxO!%!ygvj8$6Q9;< zbGokY!TZDy-~6pjcG!h9`A4##w|R*5a+CTluYHQ)MK?Tnqi#)JJ5fDpZrn9YM0V`X zCL!cdYu231U9tl70*wW)MaK5R<<#v@N#Eg3*6 zWi)yk9BY^aq>*q42}Kgxc)S%v|3D~zeM2leDnQTm?sR( z(>SGo{{RwJuD&z>;VWxJxF$JxB|7#dC)`&OC&MH)m4MBj!K@3M>XM{QQd@jW^YC)G z)+}AIxi1;2ZDc3F<3e~yWj5yr^GL~ey0+p%qcA)v{;UgT{~iN}Ci$rzmS5>apmZ#8 zUO%qLZz!L&r_sX=y}F3pZW>^$v^R8o?wz!g88tHFS;Vt6b*f<=qPTfsd}c$h$tF2< zzT!WPFln7bc4-v;eQ-D?)i1S zwyjO9#dIg_kEBaimQwi3>-5u%vi3Vm&nC)5-9Wd?!(O*3g_d}a&ZzO2kk6z|csl#e zLks*-xB*$+hGE`~IuuItM9Y$~sTFTx-GT$5LAaQOE2!2kb&(!)3U@exHZ-O}myFSck$PwCoHv@{=) z{qANH&N_`ua#J+yKG8x%B|e*g2Y;b6uk8=VYn{lBzoD92TDzMwy7OZqczAdu|gBz^rA%Kmh0lDdhV1E>{SB^^ycy9d`4GAJaD7>DXZk(L-`3>A& zUJq?0Q8d^)sbfp49hw+pBS%Lyz&qsvzk9Ec*6? zbI!oEaIorp^grcAI!ZVUxp$_vsYW5upeaM&P^(>W-P`3uz}fjmQ+%N_QTr_lwegBh z$zTij`V@dvs+}xBqT|*bfw(!B98-i!mrNps9{Ort02L|9N+S~_NT`JYtJ4o zFpz1^iQN@MY!*hfeWe@{;7-;5c4A^G{`2*&iU))2yA*AMu@`^Q@IJH*1*^v!0Uc6O$j7cd8F51)@93&!bbX7yB!Sw9X>5}kvD4`g=Dbh3DGCh9n zc+5yA5p^Z$*GRF>>&+4FQQm~sz_V*m-IMc%=nK#w^u^10lK4bh^8Ng#Q+jyM zhSx!Xf|X4wO$(_L8Osv20k&AGg<=RgW3@vG05zs|%iW;HnK5xE90!~GTTI3xkyT&8 zOJ`I0Cv=N;*jT6UCileiAzLKzg2yT)<73Zr`gwt$!o^A{q+vQEXvm?(VRogH)rxGg z%I`arYws7*a29$pDH~FGx?!ikBi&Pw;AWJ7Xi3r~uz3dB6p_BINuPckcuFj-Vx7b% zJ*dq4s3>qf`WK%4Of}$W3y3((ORr1MokRkMcyy z1?QF5XhSzIz)3ufKEaxs5)i(+`eG4#@I4)U0yw|;vM0ubV*}LT7(1SVApWN^!B?Q$ zCrl@PXV!0aD89QM+-C0!9K9|OjywTgo&&oXfs$hr4S54+&ffc!Kxjwc9hUr3Db<)E z{>;*S8f$XC$CgAFaP6f@@CFWqt6jZV+9qFT2eJIxgKjiyg?(OQ6aR?uY-OEh9E&Cz zi!);DV8xQG%#$1tO~I=n`gr!p4bS@8zr4jVoE|-R5?QBcDrykt&K`d_f0boo;YPs# zyIm-7$@W^n@^j|I?QlFiLMmFkh^IYaZ?i=f&|jMCKBLhgd9Aq7z!qDc-5GDekTaI*U%xfDUZ+8 zk@qA;E!F?w&gV*3%@bL|+bL`fUy7~Xb32|P$ll`)58I0M<8Hp-`}Wuas?|GO@BBIuW#CZ8nP1cbqnuer=Fr#1-*RItKbtsaWferI^(va5fSu#(Ux5i#$)1Las^Gr||_UWj4 zy~lR5ed;z?6vZJquTrA*>MON|{ONLbbhnW9a9eoMQB4|A^jW`OM zz54SX(RRYOlB?>oA5`9Yl2=VvmoVF(&7PRm)jV&QTOb12j2Bs#rM`I_7ZZ*Dtq-Du z40(h0v*_r#zTC_BV>{wKk@L-&3ywg;y{r!SZS&y3V?_Zj#yxdEJ1ewk@^ zFnK}KIo zifCJS!Vk~?4$Xh1p0d|#G2enuP=UV;i-?4%Xo)ls{GO@9{R;5T2%9`%_`q4URd4^h zMY-Tk3q&Sm%sV-p8(Xm!;a_LLG2q$x+IFoHE9kHAHd?)V_j%jo$H{vdasAit{UlSk zhOAFrGFI%#DULb%360$nDm@_k#751gchf8vO_?pNW6Dcm-?zA%P>fMxd_FRYB>sH% zQG}1HU0P5o%yM9F(%6U!I-T4#z|;)MzNc^(Du#u*Jp3v0dTYLUUQ)_za2&}`wx_og zg&KfW#^DB`TZ_D4s#M{ErRsPp%C!17U&&E2hnYvPOE>!iL#lZ`xg}tXld5cw{Vrd4 z%k%P1ja}jbw6lSRUR$j+{;1^=S=dTbX-UE&sUHdRC+@`wSq-(8lRL4GUusn>euy|B zwqX1?PfLa;(43+_)%@f#Cn1HTaf zFv$TlXjOwAZSkF&E$TeDPA=Vor@@ESI}1(oU`#s~C=;e?a5qg-g1q%@Scu>lsxo-FXH z!MUl@)uul6B@4Y3yjpSWD^B`9B7=H9>A>dqgyIx z^EmVWCb2i@j<2`zPVOnJCLfb_KIWblGwxW_9Wsi|9s1?Ex&4kO6;9JB>-jkGs7e0&Yllf7 zI=CQtd|Y7aBnIKo3LtH46F?C$fNaRe zl7)8PAsQ7HD=#~8cOs=(u1wA^$20oOM|x~eTPbhG5?D{|MlIz`VQDl5cc>)psz+Lm zykpBU)Hg%zb1(tpc5n>-aVsGhR)Prn=sueJ>Q(=@nrOFFA&h|Sl3)buAUVaKd2z8V zFFZVSRSL=1g`cvU7<1li_1^dJL8G1*aYw-W?-H9U9MeJ|A)+TJbcwU7?N{`&@jeB< z)%1J=-t`Vw%&|_1g`C_!C7N0C_HNH8s~wQC#l{tqFZ0vpdf@qz90Muawcx!~QM-&J zc<(EHM}4scwi-(ZjgeE5Fglzb*7Ms-ow!feR`EckM%@lhV&|3fdZiZe2#<}-j9X_m zNO_)lTw$qS3c^hPWr@8dH`OCF6Zd?IXd6$|r zkbes6=n~JwlAyM$1MTHC=%(X-jok|?i!HHC18dTHS~SxxUFB!;g|@gm4-_^k(-0Cz zpHzYNGgd+xmKWkl<3u%(MT*lXj4;ms0-8m?=!dni`3K}vGD1NE+ki^dxvnsU|RxF76y0^03rG$f5U!4UKIESVcyh1;#CP$A7+i42Z7X zue*C)A%6UkHG6&1$ay2X)a~-Wdd8)wv$7U$h}_l==L)C!D?w1(KqV~a zHcer8Tc$Wy!KC$>Z|t!DJ=4^S8z$iJHJvJi*Ifz^!^udGa7s<%dJOL_)x4~lL&C-bu6=_;!(;pP0KuRy0!G~ShmWgaA0wk{*w@Wgx+{(+z z%+1*9v+xM&{LYn{J>;bvk3nW8AeSE2i8>+$(Vb_YxAl<9rhKMU&-;DV5s_%yHDmfZ|nCmC2*H0&QPgUf#Q4oho z(Ud4*6mMM%{z6#|YdlhVn;zc3eET}n72Ggi>;+V@EuLL7L{rVfjrRMgO%p%ogQjX% z+!=W#qK(K=C;CtIo-i>6ZwlGHGOwfWDl2o__bF>?&-VkBuvZ212g0Aptdhz9GY-GY zJS=?<`hE&_0RQ^%bCTIM_I&kScP@5&R~vp>SG9zcsf(=8OOlY;kT>X(G8BV=H`R*A z%Hkg=6VihjP!Y(fIMhF@l;Y+s6U4VAHIx4ehYsB6uD3qmT&{NxD%HgbzN1lizM4~h zA5&+REYR;nfhZ_XY`>9lV^`ewHPQRPPjLch1Z6#ra zvj)^XDkcj$a(hWDxw&_o+HU-{WbbgKmyn%5JeGQ@>c=-jUOkW2dMH4rY})uF?)E+l z32cP3bUVZRl$-F9^9lPDNA6G3t1f>YQ+xIhl}**MK5*P;kbuuH`-X~w4n$T8hbvhE z*RL0rBS0j>7JEDoYB0D$&@Xx2u|GLztymVXBZzrJqxwZtlh>rOf&pO!GYLR4Quhgg zV0u`LsTogP|0f09H2XteQoLpda*Tc}$8NlN^bs`}g;4K0@N5@K$7OT?H@ZF40>7K>2 zfDIMe|LBgN@8N#V?bCBJ(ouoW-`4Y1i*4SndPk;J0h+WsVb$nK+; z`+f|aA-l`ZEZYO;H+#1V1>6H8i&gOtUG{{md3Cxzq4)^0I|*z}O!a|+aq^7^s9pecc$daqUdAcm2S8IPkBv6#+G{5|7Md$rs|E3X^{DYJ8HYi;QH4%6;tmVC08 zsQb;yeiX&}YZW^C;dV9SOiQi}iF(F{jcm}7`|qHFs{HfZHm+IiqRljS%27%?+w1go zdYlb?jIPtFZBIER6ont-+xXy}lspqmB9;<~p-W5F(BI|&$AR}`1A>JS8W+tLfA0~) z)fCwG!G7oYoB|bP`F9@j;3hO8+Kq}>z+&nhMmER zib8v}@vL9V%!K+b<14Wd=e!@9pb|~`WOJRdsRRi0+4UxG-zTQHn;o(jUPjl|r7ni0 zl_Wv0fx2jYe!U$+?*j~*Y92k@@j~7a@vIfd4!JGq9FOW*CxEmC|NGj9n~-_N|K+DK*M9M&xZam%R5 zn+6I^@ej+w8=tE0HlE9D?wEQd-i6AWit8f-PM1e7QfyY zy*fsD$Ge}PtwaehaO$vE{zUIdt$Y|LnK_q~Z;`uY8<87u^uBR6ZAB~aJ4&OYY_Ot-#KD#Q@jf4 z-W}Z*Ckd^M(#HD(f|4EZpi_JD_eCJORAiOt4p4W~A|ea2K1{X$*|gIp@!SkNuW8L! zST~WMF4mU#V?LMqv*qziECc0=z;nvvRQpyegA7hCcDw|z!kR#unM4D^DZ=el?f<5x zP!Dny9;CGwpfsm`;K&|*Uv>AhR6r(hf<0p|_VhIWQHl@kw4rF#s#ZkoZ^eKctC3zI zk_MI}gWPxTPNKxl zN)~=_*hoWE%p0*=9XMMQ`9##2)cZ=zqbLmu7r01x~VSwp~On9~mR^r+iif8|Li zAe_rYayUmF_>ukkNug?wp$h6WSNd@IWlCh7M!zeMDr*wC>M>oxQ5_c7#4T0- znGmGWGCIQlogubG^Lpz3qi0j%gV8Et5@Rx~9{uG6vcl}qj!$sV!B;4itv#DH9ET6p zT2V23HTK1X_s=&~Gp+n|II{G}rL!Mn0}%LWwyu`vSA+)qq7}(9lNDNzEIBC0vO)`i zbL5`3x?Fs*A;$fh99D`3gw&9O1=o?1E12$##BgGJNU`mQr{idM1l?X#0Jq42R)8bg zjI+N+iuqaoc(l0F@@|%KYgBs#F*4yIZRlw1Cf#R~=9rLJKryr=m7wpd;phH5IWpaYC{|zNRfb7%w zLH25|LOLmgB^>L9*}km!D{4u?Pelb}j*Y_{?=ey^WPD+fo2|js16dKf#HFLtJ;%AB^$$S%sneXO?uWG zU^OggOUXcYW_agn;u0VwoMarNOJ|mjzJfL2DNeCw!&eo>R7+>M=^HXEps%98XUN(! zH|N+qKRMG1ud&L{$IjbbE>*Y*yZbm@VR?A_O#xyWS_6) zb0Q$Il7~pi*lBO18GxCeO>VB&)~UT~%l}F8;UKsjn(YCQ( zJr$j1Cpbc{Qz=={6fuX&J-WXYJ!C_%y$#aeI}?y_e3oJ%n*?j8%OQHaR1`}bgLOT< za`@J{#09alEuF}8sk4Tgjd^i!37dgNgFn%oFz4-=bF8ERE;yI2!S_G%mT&@T`8=rr zRf^wr%KxQk#4rDMJ<@j?cyuFYI&dv8h9>J>$d4>M_( zp7VTQwu5aeo^AE9gn$O-GXaCCK-1bkc>v56S-Z@a?Pz zj*{=xGd=H7r_(Hm^9AHMKx44Vd=CH~cQ}cu7!3)L(aA}I^AYM-rmG3KccceMC?a)2 zSI1~-KywqN``jNugm7r9$R%u<19(p-x2U-^wsA<*5_SAVJ8MROvmn3yoD1f&Kc2zm zHey&##!7m-1t*J{fR48zD_RnV<=-h8*;OnHIFZ>$6bgNe;iICWE-3p-et4(VuxoAh zzuv}jzET=O&p0(wj!YR5>UJ>T`j@7jn$-o!gt~9K$|ETBS-~hknKh6`uRrBd3aw+< zb(rwuFq2V3pVVy$@Ywi$f%gh8N}`=F9n4L9nF@fLP3AMx;=udEbGE$aEJi9`kbVk0 zPmRZbf?5RPTo{L#&h|EE(Y;7PA@#JCBGK>4Ye zzkZ2Md<2ntuz;z3L-NC1FGA@z+Hz54b&*8bD8)4KCa?vHwS5WJR++Oz5#=-K`gS)R z!0hKdfEyapGt1awMJzDNKeb&9`UXuJn^juCgIS(Ep$?6lMdAC%Dw5-Ub~}S9B9a-U zrks3jYQ?l1eiOS{o@!E6wZH`G_$zXz?cT)EK?DnpWJ1_W&=JQMdT|oJ-TjdOsD?VZ zn40&iVxY9;kG(_P?qZwW2MWbQ5bFRGlB1NxEDM9dwiYs2JX}zws&=v6q2SQH`Y)9x ze;qBN0o&)Z)C|r5B5?oYlkHoSF=Z`jFJ5wFuGhci5;>S`8u`ufH=!5;m<)#%+@yy4 zCR+IpmPoRz3WJF!^YRp28cV5{6W7Z|3f-LS9uluP=w@!SmJre+?xm{0$rCa0C=5(9eX2$zxn^#V{JGakMOAo*mka- zg^Y}3zOWa+o8�HR>h#`~|8NrNDa7;8IBuKsjifO*aY(`VgqpI&_b(qF*YpL}ZSE zsL4N5F%7AAXY?3P$C#3x+7SWDJauo9d#&}L^KtU`Cyn5#;KWvtm3RE%$Vop z`S=mGKcRh`A0TemODx?eY(A0rTujOwLnpljDLme&V`FU9AWNB{exJAh*3uU9<8v*g zYf#xx@cA5=Dx2=)BIyvnAhMYFzpt!c9}-l*SegD6CE7-IV|TcmdCD>ja4;F!v5C>z zwl68$th~{Y_)ks;Ql{8+pH6zO`#$p|FJ1LL{8(_BGyn)Uc%u1L2 zM%_t|-)rUrADE=TpzZksmVd*NJR+x!wmRR*j z@lnnNA_bfewbvr1GQ-4497QX|nG&=(r6%K$U6#H%oe}HuY9n#zM5wz61d3!h2nZ#o z{iJTpO|6@YVS_k@N16O9W!WIPM-trOaN2{CpI(4Ds~3L$bu zpGrD|;=cnQ!0fND*R8-Hkbz^7j%enmXzZYf+TB|nz)Is)nV{M@D(z0rWMxC0TXCE) z<9Zs-5Ni+)t-h5wKUeS5d1K6-(C)RViTC^Bfucygpf${Ea&984bD_|`c+$0eFt?VHY`#~6V^amNxc@Y*4RqW_q5u=AbvU>(d zeU2wq*@(M}D~#s ziQ0Bo#1hv_L+5)xR+NzGgD8v#Rx0Py-!0Y*ka6=@Sd3=Ci`*~LKw!+=3Lx}|(ctRZ z@0HqUSuQfrBV;8Ib-mdccPKK3RTcJ_46gXM;&h-lBOXxpd>94lZGSw1zz&F$hg+jV zH}j7k@_>#RIgK_&fEyNhi~vjYT{LHZMO?*I3Sy1q=+-!AF>D54>8`<&1VJwkrEP4Fq#!2*@1ow!;2q}+4aiT>R^lU7_EwgG148w0R}ho45Z zJatXfxkK*)&fztK{d}*BQtZ|dpYE=Fq3i!iv2@TKtRlW$G0+y11Fg~`9tejsGWuBsLszvmc){2KK?g0n%B?P7SOi_d zFLR|zUB~MIE~{dkMi_WSZjcJ*@KT;Bj-Y-Xij14!tjWr>Ab+Sy@4?5#BUbO~mp<#O zHKm{b1}yFrS8hX-3YWD?B$gi7*pJqwYq~Lsj@2VXh(1TQw@=paFn=|14+2;$gL5B? zf6~8-Kr{~;5BCY!u07g1rr=Jg1=yWh=k^QDW>%7wXB5zNKmcZ{ie{BM%_(z!eU1Zp zqF3<>5j{A+;5?er{@_xJLddMe6Mw)8hfxnYe=DUCkJ!NJNUVrLerDIP?f(o-nc7?P zy*6^&J)yiw+VUpo`n1gHy>emtW5wanCD$iZTzJjeb4pPI-)2jl zqHpEId&G%?0GZ;Xh{`7W95h2S#X%Bu-AkgQHVJ42_oeF}N;?~DpIZ9N?-)n$%NlSg z5PTKXo+hw(Gi&rb4GXei?X6;R!#`=UG3X$NwvE0a+>!1Ez`;^zC2dEf?W5`^$hN{i zpJB&|ZH$GdcmAAML8muS3j%jW3ai`T?e^jsI^9GLymN~Fy;0k)wXpbwzc$^@JCY6Z z5a{vHHgaCiiy;tMawPFH+IBaniG{_Ac%i((`H9R8nR{h_@@CS1Lj3YBlbN*>O+@#` zYj}e`-|ameg#pR^?25R&sT$?Ep2B+T;$`qo{5_K-e}#?Zrd9^r2G;?5&+Fr)sa z6zbbVPGTAo8XlsdA)VY4<+(oi2tj*V^P&Y*v}@<66Sr%`P=N(ghEVdnzG%#8Rpw z$Z3|!B){%4Y~qX?a`AG?>nBSsb9Tn|C}G7^;=C%w4~MO>GjWn;<s9!$h+f4<wL>tdvQt2jV{1*&PRkLJI!b5Tqf8q=a${KJlU zNS?qSHEbPRmIfZkGX>kJvaSA94bQ5YK>#b%#=OT0lJNJx(iB zb+f7q?fU2>JQV!wSn|O=LyEBUxr^kNn0IW=V0$XksLB)6-98d{^BI@=sR{?EuAAV6 z`2RI8lmP3h-3l1T(!IE!$Dsm#u4Bs61C{VC$y;b??>KnL?y6`BTk?7W?Xm3#BtDmv zk^Y1~_3cbUVyI^74$oZi6!k}jrx1jhW}sdqo;ocQ57Zz=>bSj=t$!7v9TINC1vrmJ z0cD>tB^0zwk{elCv2veQHBC2@Lzcl^<~yv!Fcg|~JDYEe3^t;zb0Z5W$pC{*>Uz>l z1KpmQDMKOEBweiEtz7|(HBg~u%4`1Jb#P^9KOLMGdQvP&rxZ7~6_gn+T-=KRa~c6_ z5dWd*mv?JXbt$(;7RVC;K+fd^?A~-2ABYV`b@2I1WE5tJxoQ=py+NJb&@%a3ofFJG z;00#ht)N-n5|!ar7{zkNz%<;!-`V=hqHC`yILjrfziq{#ae{_GFsC&i>AXi^R@GVf(+Ay^U0>5(L6 z>}PA|xTg**#ue3}U{XEnI{sy)inq|vHk*NnU5Fd+g-c2+7I8+*$vIcN=pY=#1?d+c zjo5-*`+EhA>IF<@J2i1#5@OJ1ntaYsWx^k4`z64QbUOn*SzU(iuL_&9P-7|+*WfIS z=nG4D9`^4l<=zQdpl7@@NmrmMviw6GJXB>>;&NQ6C_L%DRhS7Rr!Dthh4P%%J(~5V zfyWMnx@)YRheH5`EOH2ejlfP6q~Hz2k2$YP((={3bap!L#!HQ#Z!QVcFr#GoW$pH| zG&NDNDQ4FTP{##Nauq$;0^d=+^hUVBQ%P?%6$+Kaw-?&JouB>A5+VC$G65_5FN{!( zKP{hFD!Ev=OWTkoZTt4%uG#tZNAd>beB&M+wJIt*9W)TzG-vVx$51=eT;RAZ<}6^} zav6|AR;*bSCeVg51Rn8Wwi2%A=jbUwnkIei((VSUwPOfTRe%G=YrJQJ{gVCtPHU3> zxpuhqm~)YyGaZF>&kRX9mt9LLDqh}|4km9@^ag^Ozr)NOKMTU?XX3aFzL6faenyk3 za)Pj(x6yRPWcr)ba^hP3%ncI1M@;^K4#2=6;$c&_;z)vvU(+CU!ETP*tU3OT6o{$x zBAkt&%Odh9=G|uADX%3RbR-JVmyR(vTAQi5^=MwKqSNY;#2RC>*&4E!@mR~b)#tf+ zr^g3+G6G0D;{E;0HdXl>2`}rqH|54BTc|mhOFHZcJr7Sm z+P`5k?`mopIr7=R?OZzL9F*M`Ga=AAWicu5pZ~BpB{_o@#5X2A&Gnuk+b*&mEMG}Yk&M1`Sz*@& z4T8_9U91}f{lO`pHBb8*L75(7-dn@})AL!cNE@`zCC(>)d|>!8vNBGJ&(d!1I2f~> zBbD}l12=hpN5%6#z`5*n3M4h9yz{1msJVr>^}8>pj#&FUQ%90@uu~J}uFjpw60>j# zcOtOdy>jwfxnS^Md&R)^6AX9)-n!YNKv}+bm0*>OSA55svM;8@ODea2>nbjAtZS`$ z+IQwont5x@8Cd`Ty!)VKX?dB#joNf!4Nle1EN0wt$2$1N-tt5WH3mj?B>uR=n<#%d zg_EQu1VH`KPk^nin9s^5LD=pWQh~&oE7tXwn_Dt)4aZO9X8cwJ`Y=_A=E^Q`6{V_V= z67Lxr{2|VA$|j(&D`}<=nnU{??5E=N-FUIeIs#%17ynL0Wb$}v;1gVsc+{*`ek94O zH`1MZ$m%)kupNn2gP9HKC8{tF~z}O7SWW1v3%3%^b^i^ zlg{s1!?k4+f#BOH&()4f;bA)Xe&6kuWkk$y)bUXuww`N3;dOMp0i zhS83po^A)L&J?yZTgRymfON!lyo3m<8!YZVBL|24wIv z$AghWixsVGX6R}B)E3T#ka|c~HRJF%H%J9caBGTZLdUa4V`w*-a+Dzi(vHE+Yma4txkH@d*M0 zcHRLGK_c0s>$#A32FcEfNNV{FQGX{{Sn5NdRTi{x;qz)q6b#AX@_H5@Q@dG#?a|`@ zP<0Mona16}&UQ^bPc_-LZBBMQ+16y+wmE5XlWl9VZP(;Fops(H&RXC9;9hI5eeeCb zuDvdJ#IRd#o;T#Zxm(HD8NFf?8KuE!R)Y&7g<;#LS7FER*kOO+EO|oLB)DbCD-VN^ z8XwoKZduns*!5hGrH-Nuxn)!SiEbj5~V1-E=~WxOYQzpdzC;v`&St5F2$b7`f%1T>AcXTPen!cK3F$ zl1@K$X}+FeIOg4MdoTxfsM)N}_aENU)o#+#)#=-nIL+yK51$G_iN=1kCL$TtVwH#t z1J+Sox;LFsrgL-$09t>AAm=R>CLySZ`@x%ku-o~Py2IWx;Hdah;(zIjw$Max~ z;mvAR88uInT`hn_(j&Mug^?9(5pGWAp)W|>8XM0j3CVN(A~!^+65Q0K@L3^me(ErN z>K_-bI~XvfX*8eN91%dxI@HT~4=y2SUS&9+c|n(uP)IH5=NDBEe0Yy{vxU0FPUKy= zznyrEq?&177v=4sy)|0nr->lC<6*z08@flxCxBHV;Cjm8m2AS$Jkj{##-;kQ9~;XgFd!m$188_7Fax{ zU~;AOZS(9u+QC_uB7rPk%8Gv=Mw#WeJs}#TaxDzF8Cm^ISY02wn0mG*XduUv;b@0J zxHObv{yYKwka(*}t=-4^m@GOE0SzSHgthnJQOm}fjNGh2#p~(Izxid6BJ#Z_w=g|s z(E@~F-A{Kb;_#njdtD&*;Hm&szb*6GWi$KNW^U zJMk}zNH*N_O0X?Dhaz3GVkE#@>Y%J0)};fLzzK7oV+B1 z;3gVN?Q+UvQfMMqYIj%9#qAE|ho+63|Ifq}mW|IuBDzmCWo|3}x~mXO2$}tQ&z4aL z!z=LAJJ{n=@ct^1?8)%&5;Ol6r>b-HQ0!*pWM-w6Zu{g`e$&fX4WEk2b#^|xifzQj z|BANWW)dYtaddS}dA&8^*m>FU@>j@!Vm2!msm8#O%)m)SR{k@a(OTNV8Eo|NlVQ@< z1H^jGxc}$Qj!#s($Z~5(U^ihxH{o#mTr=fJbXcqhiXQTFpLb8{He70S?BoSg)sw#F zSL$HKOK0MfS+1?D-KrP4WtZfN4%U#x>aT3u_dX&Zc6GXZ_S60{rTad+rS7NJEz`2~ zs9z+KLuk*yZD`VD8FIZ?iLB3<+))9@0S3cKJ*+xy_Mitgwjl#4yaDC$haH3)DM9-^ zl=iJV^{coR&Q)aFa*F+dNaxq64S#2;=KD>iS-oX7K4+0{$82R}tIz!_at6!mX!v7e zUcK$Hd&T4m_Mx#7sCi^;Cd&8}Pd9V&+QiGBHp%CWbNZcVYT@^Lt1f5?20l5@_SKbh zQ?^vTd;D?JmH+a4XM{%gKLmk~o47sayjT2OzYmY~7DrKtgi!FP!9dn~G38l99$RkO ztD3iz?!dg));v+Y=AwsN1wD5ZLT-1YZr`JnK1{eR7YtE>JP3kNRl`Q}qK5}U{q8AX zwBTI;LJ-ejX^vy#LI~fhx9@IE&$+HwU**Rssy-N&4rEGLW4R zujG4h3dsF@jJ-L;>5fd&urG{MiLJ)hjR`2mmcq<6;g?d;2`PVOvYp*PAQIQdXG3O| z20mGymJ+&fv^@a!V2!JC*167$E7VQ@1$%_*|y0EVo)aVV}z zQpF|m*K4J%k&QoX=8SmlAJhGseT#knA+ZUm?IDdYCY>%AL|IjUr^P|BuYq&*^Eq#K zR%PBY3%OJCM$4^A?w4NHS`P(y4H-mk7h$0PEk?{$Ezx!xgq%SWGv!|hv| z)si(rJcdU~pIS1j`2A)OgxVa=cq>s7%bRa>RLz zP0RLBTELV?P_`tZScZ17tjrXSKCu}Cs;@5{8xX>#mK*;*R*dO^JCZTBRoNt*0>c(r zreQ7zk}C--5Z81}&|3YJ2a7-lnC@3{hN=z6tJH>fGGsF@`m#Gen!@OJ5mJ~0nRQxZ zwUHo-`e#u+oJQl%IGXy$Kd_S*siej@k+6`CbzAEf-0cDbDPzY8(nN(SIfjV-Tj;fPcw z{@X>3;I8d2ZzR4lf>7p2E1!uOjS8zu(OG_S8cro6BpveiY^}-+^&)B11I-|WNa>*0 z>%$~sCCZ9*9XZaZB75uB7SKL;9mctAL`e7x!&_*;d^brXi?wT*5O$Pd=9iJKHNCMX z_9-<{D6!7{TAuMKCwx;OA(gZPBqXj4zoaU>CZXSQds5h(8_r?N!?P01i&C%*=&noq zhk8yeb5Zp5FJK+d>};OUH!eWM>3Qf5ucuBG%I)+|L37s{X>_TLA7Xv?7&pxZY z%{-f5^W)^22}VdXAWe>22AxrjxTe*cjlb_5vd*|SB2^DMMQpwO-WoU(c;)s!P0AZ+ zvR=Br9cSFz?DUdTwLwHcN;QNwsMFku(tpGFSk-^Ma?g3X^ZO7~xP$p!N2dR?V{ z);VTPsF$=Dn94WA<#nDVYGC3_zi}nG5he7mp!;daHako1hOqN0W1sVBasu$b$QZ%Q zgM>RS`o!BDd=Qz95sB826y_$CA^*A2RWbn1x2Q3p8zJm6~`&Xtx>)d z$fSRlj*w)wzj*k&Zo!p4!2Jl>GIe8n>sby(jIgz&vgfzQJO_*Wqv|I$eEgUkeAU-_~PsXL0P<6Kc7sOUbT=xftbqmlKgQ35{d^ z&NE~&ymDqo;54_j82~%9w*H2l5#6BiGX&lmIGN%DKLz61xgqB)fbl!AXW04s?6Btq zp?erFY2g~$!j8V5h|`Cb(u8xDU&kt0#)m9OJ$tcc`Zk1eywF@_Q?k zLG-kv863la3F<+`e`Nv0g?5mO^5YJIlJ`{-jb+%(;hW&ErIocdsYk|+{S=u>$pWB+c)g$zU+}ykVzqc4aG_Rc>RJNzv~KZ(RvhxBtEU-940n?YPLX;NQK|4 z5Dl9`xqp-tjSxTKhq7X>6oEMqRgHwX)RI$o>Aox)N-|AoBPZ<$SUY9BP)|L2|uGF8sEG~9&U zVf4U;EOviU4o6lrD#R}?pjC)_-24hhJe|Y;EnP?veApC2E>AEq9;4UcY zf?g&y&e@}fJ1t_L#kZf@^+i48RW95zHf)i1g;r6O#vH^pFBXY5I-j7Aw6+%oMUiwS z?qUmrl7m}uc>7yWkGHsI@U+UVBoWbqmToKg;-eox&fE$fMV|T%U9oIgq-^CK#B($ zqP`EZ>HIZ;)Z{RKRlB=udL1Gy7w%mr*JM?^2Nla`%&ZTPnQe1?pZO{Sj6#63wyyg_ zeN=;^nNaL!%4>R>$1FQ~-Gzg(3(gPaZEObLC&BC5WK8&9} z?iBXg8TmpG#WE2N=-J@)OZ&{E?^4ZrPZV_W%1MDz$aGvhJbjflU6Fa8&w0a)pSO&A z_xH)4c8`%UHw{3sq+B(9Sqv4whnf$yw~@FvxLo($(Z<7>&#On;grSm$AXd2d{#d5Z zpW_*Hc#|mB{OVCCpWCbKy1DWh>~t}$*4nI2;ie(;}4w%9r`T>Y`JS!%xArD zuGk4m;FGYQ-b3O>XR_9e+Tj~rs>&YTc<^`s07J#Eh25^TMGqynek1tt&!J7X$c`pe ztGsdq(_de*in>B&^vkCFi^bVa-z71g@aMX*#GJ7N8Hr@87EB1IKc$-13nwHcD)Gjg zd&G%(o;Go@#m=0=;yx71@v+l@zKFZ*Dsw~&rSXMwCRr$!a@PI(ymh?b{DCE`d8o^? z&qSDq0MhU-2(dv=ZA%G>zn*P=a|d4~vIEZ_)-Bedo^LQD$XNu8E2Fz*jg5X$G>n`& z-C|LRJ+CMe!;o3DQ!p&+hrjf1XUdx<0qA+h&*;q4Zt!s2OfKqA3L`H8ngePgThFIfY85&kJ_8H>R^+;wzJN3cn=7)ddvkqOTC{!x6M`DK= zzvn;Cyk4hhRPQWakj9?pH6g$FZp$I^%q)SBeBig2x3Y??tVU6G@P2be4LuW^H`{{M zmiyT{tudH05AC{=-WC#FLAj&DSJHh;?ly8toF)sFH6AZHJBrN<0*j7gDr7_t5uO7^ z=kHP8!K{XgAfbqFsu~Uv#1A>aC4NI|+$elTh(}tr30XKNz;1ZeoQB-3`c{LOYhX=b zQ}$(^-kzYZ(cayZEBLm{NG1%ymdSSDn*#P}21zWu`cF(`-2L7f0j%t19dp{qUO-fh zU9-kB)x|nIx`;mn(;m3FbnPP}@17EKOYWnDhD_kj)mk4jvWyfugV@SOFXPo{hMD0 z;zMLo^>B^%!=J?Qh5nx=<pr#EcwW3S#Ye-CHcVGWmY2(or^RlRgwNcu7u$HD^mTdn$P^SYGP!oX{ytAg`Eis-Wkm zC&2hYA}igObMOwf`zR%Sg9Ql&+v$p_&dB{#&NRyRBv_*1`yf2u)JY)}K1^8S~d2 zA|;e+1qQ20ZgvWaK^365;AK%tzdHXdiK@E%$^S~ObPNXuZqRXsyE%XaE52ep!EcZ` z|EV16thvo{tWsIbkXrgdF{RAAwQm4#I|{kp;Hw`UYN~y^WL!)~H;`O9<~?7aCv@>l z)rmw(m6!sdDVS5JW`lK$z~JsApAG!7iq zgD3WS^tGd@gb~+n@Og3}e791x-9!X6zQZOO-?E=-MGfcrU|iTla)P=8mhAP0KhD9z z?nK#ae$d^`N4`-Us<6SrrW4@j3vihjTP4mHs6tRkpka}Rc!mLPC=tDK#p z)CC{|iHqA%ZGU?{u&+u-{p9HPY*K}9AfQE%&6}(}_qvSo`iAZeSwT!DT_&ceWaG0$ zI)@`(btaE?Eq%C?&8(C^wpsYgC#QF??|Oc=m&bJUxiGFxP8)@|!KQQ6T!|u$Qr1Zo zsx35|-08+4_V9gr_wD0*$Im_N%MC$^a8?*ii%mGSel2oQMzhpH^P0VU$FdQz4rgi7 z)#%GDtLpV+C#(~a(MshWiNJ0_lib_fO> z0i@iFjNqt09ub=6JCOfn&iERD#8$&tS|~Cs&#enn&YNkaN^TgmFz2i@Y=C6#hX$Vh zozhWv4UlrDp+4w8_HfU9h4%3NR%74x!GYz_X1{PhlYD1jmmhmt(}dbGvi#!_d-Rut z*E|U@zROS|E9#Lm+1jeAf`QS44UYZf0-0H1#u%D(AhLghwC|kh8pPK-kndBx?8o(x ze9dg`pFBsW6eP82RFr(>XX0;9=gD?4)M$$aFZ2*}`!9az_S>d8p0yAOpUp)vwdsCg ziuH>eo~>Ae)a#lrom96`N~iGqF8=GORB4#WIaQ$G zY_lC)#4UTcS&sO2_tSd^`?7k3Qg-PtvQF2mZ|gauy(9>WOuRQFkC^h@D{Z3v+dbn5 z;@yw*XP`ifAJFgit>LL|oUo^4)MfqqJdm=KZW0i4jY@YU1rA-gfKbzMiy-iRqOiB* z_r9d{F9Hspgj&W@95=1ekNBB_h9n4>(ZV!GvhlfQqGy9=J%55F_);Vo+5O)9$-$uC z#fLCdaQ5Ch+>QDAfd6@anC9T)`iXk6z2^~r>bqinXflAuuo}ckL!qiEls@rW-GMhW}~aqM2{PpXk#rvR4skPuUPCIGx`Nl zMcZ|LH7Et`xlsu?BL=o2V9WwUI!%Du+HcB!!N{OSi%T~~;sXQaYv=&4HDMJ_+Rq|N6_s zk>guRO>a#Akgv4ZvmupgE0@$t+Fh5VwSuOnp^_8v#b~BXC!HyjHf#DQ$b3hiwSseS znaime5C^aO552Wi&PrZB<-*tqMk3}_c5g(&3rmrZr%#U<_S;`94&3*F3)&XXJXZgXGL;jyCl>{C7j?w!b<3>!>P3even+!}CSGMd zAZW@<1vab`P8*xN`Uhzq-pdMf2dXhq4?9rHcH9>us}kx-dbXA^p{ zhVzS~%6`^9i~E~|y?^DbM)4a;O-0>l*siYRt=JxVoLLfZvW3M3xgm!UYoO1@O}Fa0 zI$CMkS-WR_lwiQe7dOZloiN9}GhY==`obzA|nFF-g(Pd1bkwHyh4);;l7k$LBg}e3YeNNF8m%fpx!(Wwo@Q&u{j> zfR=6Ui03e^)YU<<~<`}0>t>@v69g-#dKSgU_7P3zaQ|=zjB>Lcys_VeAJbAI|jRM=(Ydr*3N*YLyXcHet+ zWO1Qc(5!w?R_O{CTAld!B#~P7`YpAwh~Jt1>^f#>@5=k1$6NE(JGyQ~<^M;b#p!mH z0|926G%!uskWsHS-2uFLp9t!1mqm7^nhCRx$2*xo2;k48z;GOw4Y1sfodi8RJO#Y# zB&-THZN8kj;e<$P+Wyfj?}hw~xuxt9RVo$`nJQL?S@P6fFH=@bDqeqsf61C>Jpm?7 zHCKK&f5&I7GC}*rUjx{4h=6)|-xsPV&y?vBI@2{piH#1tmHjBuk)fm!%HbGTxtSqg z2W7^euFB!{zM^CxM|HRhzKKw3JAIED`}Qgk81&Bniw`i0hHPNdCAlczv=k_{tp6mqW=7aV6q=A#|>xU z{MB%O>(JafiO#pFLCffC>_zax$A?XRaqajgsX z%uaf_JKTfWvGkswicjRaL)iJny89B=m6PifjQK--o;eqSE9=&&yRwOHG!TW(`x-#y zYP14tfMDtD$8F%g;Z1~AcuViP0SfWKy=U_!WihJ zKvro^kZfisIV8R3*0wUoE19ApTEnB_p8``c;F{+BMKU-WT>O~dKjI8q9lt1<;*ksY zzXC>0y0vUGDqj+nO!9ak;g~{O z4Fl;JmY4xb!aj`mU4Lp4O!KI;Hl5x_FEVeX6W7ya%xSnWmeaq8+}IKFXOmjq7CufW z_J>IpNrt8);H}uB2lA+ITfpeoqUQTj4ReNSg=|eD^ablN>%yHFrrTduv54UPI-_8@ zQdN%8?>*mMyPOsoSUK6jU{W6rQW_)LlKf-c7?sT#ux(X(snW!_wAHPX<%Y)KpMc3o z#&MO&mcMf4hlpC4tgn#V`ue@$SA9~M zMXDNc(1lvuCUV(JWXa^nt3Q~|SYkdTCHN00cT9)X2k=MHX%KmwKHort;8Umj-Zhv# z->0}LD*hqm_kX;#Zao#+H-@z(#}ls8rJKmGB)r+me-d(Z;0Gcy8h2nvu4{X{#V5(N zt%Axh!M5Or5U?8%V`S;A2#Y3)_+a`0S^}d=6m$`l>e!}l>yJ$>Va?8d4jS7khlBVPA$%em{bK z8wQ_LvN67yM%IVu_#k6bAc_PCI;N#mfFRzASDy1kC#OIahDsYM#|%=_#b|IE?DFXE zBb9S_o=O8XgrtGj(@%sa6t6lF-5M>c2!B->=G`Ube+e43KN|St+9Dq+iHDkOtaQrn zF-Cu>vc`H+h{ShG6=ix;OT?QAY|A#Q9HoKk&j5V+)#74#;3+AAVfz6}vRK4=wi2Gx z`}2-G+i$%B48Fq+Ek4ct@2{^dzIWKZkIwp`kF;!RVuJVAbr3ELK=;=2*+tCEpK2%) zuNOoT`@*boMi3jwPpUnG;;9R#qxTvrSUHJLMt`rM)@yV)R36KfzHKkMa^um;np}dZ ztSCV+T8TXYe{PcJ4!9HNrUhllNv4phpd#sXXD5KeETcfkT60Jv5d>AvED=CHP-@$M z-Ex9OwyTosdI(kALT19dv>QwHUkZ=U{L_j0i*B33IG z8N{Jo{aXtsOAYkDzDvC?a!QEerU#0lZ#7<0Aw!M7F#IC;vd3O35EdDh+)$<*s1fBp z_Ay_q6OEcKv#5^!k5KKeA~F=I!+Y_MY)YWN?!QC~}I2Pqyj1f*6`65qudjxKI>;ZxZGM59ewa zWjuVAC|zz>@^2g6uWmO{-A_?1KCc|+>gnrAF(rux%MudFf@{{Cd9DpM?L#`>R{P)j z{hm;7cB2?~hyC7$Z!iUKFmd;ZRMbl{02RxW-H_K`Fht*Q*W<5i-pA^_BmEC7-;2pK zu~@A~M>bfsx*-eI&$SU3DMU^3EtODq)2Oh3PSVd4JU53AYu2}YXNAaSBIwG$<}nNZ z^Gj^1QT10RjxeixHM)zAl)L%;Y%HjGOzSJfQ`_G@MuHaK*y0?F*ZoKm>LrU*H@J;k zTS8j^YCI{Yx%cj|RYlYxQ7Jk>*TBs=VfS1}ON7o$y}AYG9QNat?ro+L)7aVH$8yyv zAhrivWztC89edfpr#l+p^Tq;ZkY3Tr=wBx=%PFeYCt#vNUrC~$;3Wi|G`t_j=YE|A z*51>Gt!v>Q4^_nOFD!ss2C`x3kyM;fk6L&Hhe)(w3as;P2IC4@8ZacDSS(&K2rdJ; z+9vz8x}CivMvnr)L*ItnxH#yC_CaS7D)Ga0azgiMBRy)bC z00*%w!r&mgXn9Fa@b54|0oe5?L=e1H9B3Pj;s!wh1p8q79CiV?I^n${A;mvaU=UR+ zJf==!+<1=8H|iEa)>l54UW~L2s%kV!ZfVuGov)V;Q@M|izaM-iG1HdGRxp5Ek7Xkh zsX|H-v5dfMM5WWro!;W0B&0;RTspJXedk~tG&yPZdD#H-ctOac+MQX!i0(c_MzO zVD+QOlm}kPI<<0qs&Y&xT)&qgjr@DZ5Kb4=mMaeBY9PMJR-i=~+I&BZVF4gl3Ny3p zxnF7bu+y1J0ue)p&TYn)`YCE$!C3)@E%lKeN(DFI$K;U4iCRW;O^(cXB*C1LY>Gpn z6V|8ru>!5Y-e@FhrcpkjQ!+6ejS;vWlD9mSTUjzi1kziY$!ybN1QI!3oQ91;u9zP| z0wy#}M1&N+shiS1Xe0#KhaNEND@3g*Vm3OC>M&-%pNfe7#gWYy`A1@Akh2%N1OOVG z1Z9dF!o=f0iOZ`3!$nwdawdha5p^o*r?PosE-r{Wc!dlP0SdUuroQZcDeO3?` z;y}RfqYrD{F&<^hCMGnpRb>*z7_=g`w0LI0fNo)i@_4#DA~WN0;-s5g;gv=LCc|k$ zYf{Z|A5gDe#L_hJ2MAk|q8&>>!PMV7T@tB~;Lqc&w1i1936pWlG>jH?g0Yjxxz-q0 zg9Tz(warWwflNgpH)Q+5~+He$Wq0q41g z3=k}nXH7<2(v2mEtQ;F;4FieNl`)+~6rd5Om{#v*me@47V}r8*3YdJDr+T)x)VosC zLXjYB70CNJCrhyx^=Ag;+-!LRJFUAz@5@Ka#m}o3(h+`_^0qlXBx@q(>2oNYiV5`i zN;-8jcqq(ji)mf!|8dOqVd#38=v=OVp=zrdK9qO``x-6=zFu_`cHiK22gLc_`*N(P z8^w+wl9RZ6s5D$8jUc{1lCkWzJkobPh9KzmL!$Ee!xDOGr<2Ra{3?c{oa!mgfT zuX;Fr<~et!c(3Gwjmbj#x3PNw2$8Stb3IY)`8z|}In*@aUtZO>%F_hmMJa_#LZtH?pY?NYg(7z44xybY@9BQ4pW zC@+MXqqY$9+Y28(f#Pj%ud)}!MOIpZRJC!@{z{G4y}Wb$f*?^OEXnr=VVQRaf=++$ z^_f=ytx*e>f}HF+;^}lmo8XpdX4UaqWj+v11AyR$U6?C~%zCaN@qpuI+n*E~{zMf0 z#j@U(R$p>`(A%?V*}sn8I9HSfd6HX4q-{q)qz8-?g|adu@*RV;7i%pj?x@9tDc5oT z*&d$19O*Zq$bx6iOwY_2Q>|MBiY6QR11DaW!TRNnrVq}`@9V6!UdBkL=#kZf#lU`~ zQ?$?3pMXE1tx%){7&+^)LGC~}Z4oh0^dJ4I`w$x}!nlkT09)%{O%jPd$tQqqCRHNR zP1F(Ivk57Z?D=~6bO>CZST+f+u#9SRgzs;sj;W$BR-9y{BPWPZ9^wkz(HSC0P-cU6 zTk?=GlWy;+1vK8-nN5`*3JsR)UVwLu+8s$x-^$M$KxYl|ag|#E= z@s4|QH$e1duF;hrS<+gN5Jp~Ui)}=#Cfxld21u(XNEhA+f+q&j6u)fiy1D`xUvHat z&oi8#)=A@b%6~Cz8`uX{=M*x)5M@{>6WX_POOj4y#rKLluKrjha0Fbcl6LsGNE4;u zr^Fi=|9l9?U`#5c(X6L(pTdU~cp)GrSR;>>3}<$;tIa<2tr3RQ4gv{ZnLzEk5}+$H zp)oW3V?SmyNYVZK+4ov@VMhC!UiIDR&+hC(NqW8c2b_mn3crW~<>Wr;$4jJ)vl53Q zD1hujV(*WoHb=+L(CtdXd9{ z6}d;(;(c_i#-~m4Kh}f6z>i+um&)Bjo66v}>G3m9koH!BSuaQMimDJ?N)I{*P^23# zJ+ie0W7?`sbMM!2YMP2uK`a3Ms*(qS&!sPz#J31|V|nF^;jwcE7JJT(&sqaUT_z-Z z=~wpz%!wz=k$RlVzWXzob-m~2$3KNEvA$HucS0hI5V!T5gSw65yMuEbq2Mio)Y8AI zV7%dbFpKhQ+N2=xjD^W8c*iQ(Pd23R{#S57 zo^$hWmd>4~$g100N`u=T{n^nCW9}2c_a;JcP?+_b!Y_4}z$ixifCSU6SC6QizTIDf zH|9@yo8E+WJe}wrOsP;q|6QBvvlSx=pI<-s+ClYW%#@$FY)zP4C5HiaEQ2A_e1V;k zc@ma1a#SR#6Q=NRCVmkjEHq<8$ZE;pGm4n@cx0j-5F`|^YTvdKuVS{v(I?dRrj~Ba z6FqJ}cEG>F*AOTuD9;Wf36_hIa&n`kzi2=PWGp-~tNT6D|D0b=iv$V5!C4&+Onl}( zz;$l;2O!-(IyuI42A$obA(9{Ao?M}VN~x61nR&AdzCbktVIYmzVJ#%%$+1RiNxR5y zRLbij5`fw28x*Zs(Y{cb1Y~D;Lyh{v?%v?}ghIs`Sway}iN&d`3jlAL)`J|suaNnA z_Yt3%PcB@8sL#F)Sdj=KiwL_^vO|EB4WXx~db*J%-WdR?QtgJOaRaFsPXsw*njCFrE z+Y)nvFc84@r|fbp-ec%K8aclp&Etn@LeE|;6wJ5R-1HRmiBkk`{+?p%x(2@yF%5n| z9aKuD2DTB_cf9f9bND1&I(b-bpyeEXVqBJ#XvT*of@@~dbcUq3fI@`{;9;%QRJpfh zqMbM^H+q6KzABQ4JiQpQUsLxuo&tR8Bo_%2N`i1mVl*;8^MDo7HAltP+x|Sf!nnpb zWHKCa$^{(nycxXg66(Cz0}I;Ac-`d=SyN9h;7t3=GSb)p{FKv}YgAe8+q+FcyskE! zKQa>-wBZ=C;n=A{Fiw_$jnm4NTn=9dsiIF@AQAFMs**0GtG%W=i-%W@uR{85Em7QT z6Yt)zDj)%ghQ%b4fuvpnClApM&Q2;x@@t}R&dA2^W66~V@9xG`pi1<+tN=Q*`HWJi z=7P~5kMN-1kkJ}CE=7_2ikjwGfc(AzFBTF`%M3B0>yMF_b_$dS(;{pJ9(~07&o-v! z`2F0NeC}Va1_F7z+t+Aeuwei~0;f>$(R|GeM#QWKp9k`>k#ELqDns{Oyo*gIf_aA% zMaC?Cmm`EPoPbYklL*~0?E7jr`ZfYhV(VLOfzNz`0^)ZGpFns0!3X zvViaSw<6ZO&$_d^2L3}((%+!ZI--x3&6CJIPd%P>%V2f{(C!4rui>1+qzP)za4&;8 z;^_OhC^1rlv9q4>OPc`k-g(|hokK2>TvvHQSb7zCU8U1nsT$REo`f@S&olEH*E1h< zX6?|0ppiXN^6~tUp{0ln_*)wDxysIw`Es-*jQkyLA1(@<%XPk}nzc4eL5C%coi_*e zWe4-|n$-7q(T>met6_z=0bXvefF3^*@IH2E#cW@}*s5c_D_1iEx{s`JSv~uR6$d9{ zu@)$npnb|mF5W#2zlSBqFSqUb&o$fS%}tc8{QdHdG_$y-`k^%Yjle9}v!}cMsAbz< zH%(C)dNnRPSHJjeaQGh0u*#TooG4&qX&5Fu(oD5d|KB?ZzMaDW4EK(D`eV8*MJ+*n z&zFjN8xvL7ZdKq9Mj*$9{oBtjcLJN(f_~e#M)a!>7L>SFTdDw}dZM0wEV{clsPLB~ zcET|lkUD)PQJMK8%oE1KEG1E4OU{y5v`8ahA$Yj%#t@KfI7)AA1S5Hh zAh4kmy{xd^^MC=MOu#`GdDNcT&7cJdj)H5Pv znJCMhZzUI~Mootb^J@+sE&|ux=YO|6r1CPAEh)vD%&t32z)=1`yO&UwFpV)_Qjs$F zjXYqbHh_d=zb`!0onm4)veh$mNeT!9xyuD$phzh7e zfw)Z}OB~J4nEW%A`M9XJMuUc;Q@-SW`C7=x5 z!$(IKE8dde?$Jn>;LdT7jghZ9LlZ0Wcr|e?K{){#_Rbn)fQu+ zhqH;tTVLbP>gJQG__DAboiPUKrZjPRT_V}>_(8(BhOD?xuxQ^sL%oLarn6)6`RxEz z=~x%~Tm$m)2rnrEnPY$O-{ZlIpY%e7Iun-R#}pp^VANLVTPKNa=kpV!m98JVF_4>I_p`0Lox|{rN9tC8>O0N@zYzY9qxZQ z#a&caUsi0b9^)FSB627IMTvogxHTpTkVgeNJNu7h4NSzOP=rdiI@61yU_)NksM2%M z>3bXc&F)@rdXm)YIWOT&FB9xobH5vHvUmKUR?0gaV)4)ow%_H4L3fffnX%SQoNLq? zvO86h{G7L`$X<_C{_?V$v(&RtV(z|R`!&o)_0u{MygEKDcoGt-b|d|E`kWY&Q1=@; za!X)=+0(OJtdug1#cvalppLb4oVY;$%xbm_o??^vofj ze3Uf!qEX$7SJI${D$DkTI{z_i?-8u!Z9nFAkt9YDj|BWnG7v_5$ zc9Z+q0UIYs5RS;ch!PwqXw=v##kLPMurLyZL&D`>L^jogMb3D8C9||^6ngFM9ggbv z)c@22E8rhS@IrUla`!Wv*Nfv%)mP$&>c$)=^VFo-T}o7}v`W5fdXo z&XZ3%uin|UChH(SE$O=A(urIr8S{TC+yAX^1swxWL$esYPSgngXw@x``*-g9q;r-^?QLC?N{Fy6U{zdglCa!(|XVK{-D2OrxIjGGGqU>FS8-qa3;%$11uikLH* z85f32*@!%#aQCVz1XQG(;}eRz1P z0#T`74=Py%Mgm=U(naCL8T&r>a>-;xh9bbtBgbNZhD2^Zsm8L3f$)HrYZlf6CUk_R zj^Cb$oh?uh;-|9y%nN3YXT_O8X%>o~f1G+xFpI zvyp9WpRP>vR(=*+HjRQ%RueFsMrm<<)9AyR3~wmQQ#kI-Vdnz6X0pejz$OarXqVqmP;(=RY?Uc?v!i3w_KaSjz1b}u1D-0z<6mb(lwORekL zofCQo+x6XGYk2iIzm6iHJo>M=1fH1SzcXO?GbokpYRx*qNQ_@h-G$RDrLfDK61v6| z60kTHVYp~BN=%4gY9&a?La#=_nUQ*Yj`%bXcbFw+@a2O!o|AeqhWt^^r%h8tU_^LW zn&C0w%WUg+@rmr54+GdYIaETAZ#`EeR{=AULONJxRR!YDCBkvTEt@wLejr$^>BaQS zEa7Dh_;*mPB#8YTGYCOWlzdS1C4jz_Is;Oljo>#>F#6h8p(pu{`%OvCAff}>KC+Ly zN>*f!F#$ZFlw%f*6P@%%4mwcM$phwSm%}KFTx+l;t1(6p3lBF_pf`T{KB6;Sz;g() zm=19;DsNl5s^F0Mz8Q%8c|9Iee7MniQO(PTC@;tpFZKN0ka0m`x=EF9+BB7r!Nh>b z#K0ZsylLXas`cuk(DSLnB1&F4Fit&YFxml=qhq^VSLJxeN{C@ zFP{FoSC26yx+hhZJ{aHz8tuCSrG9f4B_$gOUG3g8%)EO?!1fQ~`a0(?62%+ybq) zHAcFkhU{S6{crSoKU8#FQxfZOf34YuuLZX|E;#Sky#1jUeNIGH7a|Mz7}r3@{(z%* zQmo+)I#HeNWsuW;JYhSe*D}6owjgb=pXnRr2%^hzmD1S5&`(UQvT&@eV6DIi@8Ds) zEc#@ysNj$#r;#>L%A#B_W~tZL}NTXD!0mlLz;fIUFDX*H&`dzW%jy7$G?*q`DJY zy~K24lSe|^$^$IB8?&gA%Ktc{Rczbnze&vH_m%SzK0{4sJe95Z?S&YM>tb|xeYJjI9#!=7< z7b#L~ka2sujZhq&b}TgZS*e-edX@(e|E!*EOS5lS9PUMqLS1$e`Y~DVm&z?J_pxjp zZN`NAoI7l+6{gkdA`jQix zOE$r1!H~?984$?&NF3GU#z#jVwt*fwS872wKITXfe#-`-;zIvI{)jN0nSu$8f zHN-@yrVX};Y$+9rq6^|@#Sf-=h7yX}{XQ#|(pad4jUhf^At?oXjf9RsRGy{#DM+lN zM;ykg6n85Y-5w1&2N)7z#E&_*zKO$iJd7N{0y?V?@L0K9#N=4QmvDMi_~yd!zr#d< zV2qI-!aVy+TJd-cKj&z!hXnEaB?IN|Cw&18y%9g)40r>K1rcq5THnDqX0{}a)~CFpvJk9CXX`1exw`6Aqp%Ky1{Lw;$(BE*jv%Up{Csz?-T zbcu3t&Y4{G4LT|dGfE{eOTqqmyK6HPR}3{IL1FoG*M>ups2tbXVl!8{f@=>v=Qxr% zvq)lpp=+s2MrDJbVZM3YN&y9doo+lx7JVNUMo5neML3&|kcWhtNwOJbN^!TA4mEUw zH2;-Q(wH;TIB%HjVKPC_e~ftkgg!WF$iKRtp+H{)Q>4+4Q$aJyI#hgHIZJr>Kd5KkWFo zP)Cs4`gwYDla#z>QNt3~Nkm#DiTLTAG`KnXt3j8*x~r_9evH@C?qqIr;DysO`61b5 zRzun`w%KHyD#h!;Qv)CT1ugAHD1-~IyoQ>asGV^mMR~LtlGnyGzgNGBw2lXssI%iv ztCh+eAcx&(%78E)d=7{8w2TqmvyWk&kFF6RELHnL^{a52U;QDglg)(OQbRb(?=^Y< z*6H_{@z_U5gOJfuL#%W0znqbg2=ssGs`bR1-u@r9i|*QQ_3}YoHryHR&OYO<-WUDu zzU)muG>GwZhLh4OI{qFq-tKS~0cf`&fWPMgmp59}al5dNAhpqH>95!y<{s`(0Z#?Wi z)oehxp=*jOHz{Tt=ZG94{kUh|k0j674jpRHi#Uv!L}Al{pp9AO%)>Y(e2F<4`O= zowzV(bngJ87{-*tM8@^lI?!=v;|nn|85U-1Fow+KrLz$kr21fj*JppT`OqZJibEZLJ34S+;bfOoL($H8hA)l&V95c?=<8*! zux%Ai18a}8tMHSG<}0WNxKXJ=puhE9T=O+(?c}^eh^*(Yfst*kVsseoxT@|Flc#=oY)O zk85(s&3h* zkkZ15T~JBWCIDf_8^G$(h+6iJfaWmC^>zvG2lZ&-8g z>^3b2`>I=d(2aV~$!(>35Hx3td}E}UMDK{rkQ`r!>$szMXS$BpJ(a-G+v0otg zonP89ij?T_l#q)jwX^fQw7Z*iwAlod$Q{4MIt4>;W*J*O95-398fnjyUEY#<7#ESx z(xge$S?5328ZlixT~N@)?ceeAiz|t@tNT&+E_7`1`+}47erxyKW6O|TT*%`ebW4qn z1g!&al;M|5|f8if7Jf1n*1Af+b1Jd>+|o8_4n7kh@w2J z@jKbOk6j7)NQfx-Q(Md%TZ?rAUyf~iyKN7{YyT@-*0Je%9@3RmNt>26AW}tT6Hfa) zI{)_G{iM5K+OBz-RxpY8GM+~$n-HbB%*2|+5_&LHb{$E*fz;}gGK!J@K?}RI7zpEs z3G0I(NfcONx?P0vRY6+?xf%B-D|}2Cy<9O`aVlxCM(k(x!XL`8P8H+_^-R+_JtLM* zpc968Bvnaw<5*wRa*Lo$p0^NUtii;k=i+hfbOt!g$91~=A{YXYIgLmSJZi`}(v@fU zMKAtjMQ=fcM(smMfI;ZNWcp|c2}5Qv(9i7$YxP~4W18*R=SzRiq5CJznYM~TiS%eHD0EpiZq2Skq2sr3U0O6xK zEc4(=a!*~G6*KA)m2EbeuAV`EvYL3Lc9d7{l!i;(XouobPx3^FnyK2l@6!?aD35t9 z_CAOV?S7X>4q+NNdw{3D50F})xm_^{9?_IS2uqEjiR-cbP*SFFQuZ8nTM*Z~RB)pL z64~XJ8~|$*{Am)85d1F-(1brJf5tiv3|cx#q`vE!T}%m-LNGzbP>F<%M4uGFuaut^ zY2D`o(yN&JaqxOP$objrNHFv(2gBv_eS<5vL^u z12a5lZdRUAuJn4@Qo~%Z%xS;#ZhNES%miJM6ejCOP1*0`f!=%RqHUQbEt@Bp8f;|R z1-lQ}ACQqTD&{(xP7)!4poK7rJhDe}&)W$)7EQXr5VUK|!%$Bu$w2?t zFk&&cn*aA2>5gfxDCc`L$%vpXJ*ORY0P&`3k~#l;UG&}rdI7hWn0N1{jsgSQALa8w zz;@(lWYQDT_9G1J>iFg#NpOkUjgBKycB=p2ZzEsg-7^}NN2qj-Nh8A*+R@U(`_655 z2iO$E@tcK)-qnn^pYKX)&Ra8y6&Ia_KOFb5wEtgickc{Oru>Xl%ES)>AMCjzBV zm2OeDZDxf|FEvptYKfM%f<|N2D|Yy$^aVoS%S+4VQ!>vIbF9)$OP9$@Z$ev()1F*m z`6n^hM~h~YeYB|64wmoekVgEA&o8Bbp#s=knN)I>fSLT0tJPcBRO^sf@!TeG-%Esmlp_|+ia&xr20$dIW~JesN4BzwN?jf+pu^S z>1OuABgVb!D)+w8@2-tjK+)!D}m zXrQiFwHET-hQGw?J8{Rgei2vQe>h*dt$lbcYmn)^*mMy$@4mHlVd=BCDczJV!9sN# zm597vSQmCTfIad3e#|SPvN4qyQa3TA`@fADVGh`|qbxSlo~`a~PqGv*GqCerUN4XL zM{=7hWMyfv^CAUy=jhc)=5%@XJ!WNb;}-zsT<3&PyUXcHUR8 zn`&N@(X?ETUo1g00x;q)ORFyIUN!N$mN2e4lwDV4(hs3UdZTKAMP7`_B-Umar;`ABa0v*XaGRaCp>aV?*ePf9z%kE(vvo(=Pg@surq zcjG{)Y>~nPSJ5b0rysD6K&RK77P~S50d<(keYX2!<<~a1q-3Cac#H-a@VcUR<(Anl zcX`NJO#L1h2SC ziViW;Z57ux7@#xqF(3dGRmPb4e!J(X3W^L&GM?9yScN}jP*8H|DS7Q)FVitD@UcHp z*CZ<9ftZ+V`=`h{EKy{&wQs{*CC5k|d0Z3mJ_V`Y~In zf&h8Wnb=%i-AxC?ZeK+XDeUPI9#+7xAtXeSXkY8)d=o$!fR|2#KO7$xQti<OFm(x9ro7i@^EtV96xDfZ=o@cMWnAk4aB{D`6j94`OkvA#%@c;L$O zd=#?(2ny$D!ER;}QvR`dN;QfNI^hee=M4K5Ajb83CwQV=$h#1~4=A9jy3Vh&-%K}? zY-ySS;Xo;WjfsR>oUn51FCjCRW-JqmNXPf6r-h$-@qe!A78~q!i*}pri)ZuEtGy1L zJ@DtI}bi$^6An+U`iG`P9!$Q$a2L(1u2Phv2G8mpWlCL+R>Xs&ZJ68 z+K$1<63K2vTUw5=eL{@T(fcXAB%gYxpW@ehAt4WHSsISVC4TsMB@d`y@Bs}F z)y$G4dR>5`%N|-2*giK=pqzraY{Hb*k+v`cV|I6WvYNq{2{9d<|BR*iDlBv`j|=AW zR6u%tphf*^W#`!~efNh;4^mGc4g*|OM zwy&tJ9(D_w$hJNr@VO}$Ip0Sny7=uPzejV?99`)zdie4c|8}K#!W0{_ns#^KU~Clq z8kG}U%VgX9|3|m#)=OM(Q}VTiPa*Vy3{OPv=M%x$#exN8Lj|-OmT94KFieP+zaYYJ ziYSj2QX(bIjaUGCl(us(5+{*;;fA=07BTn>Z7~eFV(woOBknjt+{9^RECp4fV|_f~ zxG2OuT9V5vFu&DM4(tX*SLLh2e=e%V*eG^q+j?x6b6+^-W>SAhQ%bii7zxaJVTys) z9AGW}%t)P5{H+E)V*DF#kN|94IcK_5m2%hHIuhdgU>nb za;7NDf_1oh?FoeRTFwz$h?vy64&@PM4j`yWL`b1{?ZS>!OsZnEX}-EOvDUo3xW{^a zLbVp3%b4H)St#Jxie^3lQglPAQsd;Pga9@L7ylrrfb^8cFUVIcxNv5HERzxv7FEWI zl2)5oGzyHCxi`(tZmCqWmL>)NU7~(afi-{ z4D5}OHt8(Z=wNeGt0!Uke;M8+4bB!#cP$9P2}&{IK`(f=L)fSQCrb;*sFkYVR(ll-!Er@{1jWDB;_-0jxO__67^n3xVU{94AsXeF$)&mm(%ZjVWJDs?`shb%EjFH=n-w z0oyN$ev34XAQ(LT*?)9|fAq2d@6HgjVO#%px-!H+Y%kVndV81kn?HEk@qT(O;R_WEkf(z{V*lnSqig;rn)H z?b_=hB|br(m62Cw>+aQ+P}{LaJJz~sObhA`3F2gD8>H&LY=FU)dm|X?L750lcY5cN z`CM```*s_;HTdTpatCv4t}L3Fz`v7C0!>bbo4D3qNzaz6d^O*t=NoMx{UzQW_@cKv z{^(bck2+xBONRHIc%q$n^_eked35-e z6I{It+@z@ihuTM_y--7#2t8%YxhPpQKS}CcA(8kP z0b8ac)9m9WF(7A3D(_dWM>?VWqP*?($!mY)nJa||5~8fyL`(DQ*Sxp9m$gyPV9}Km zXx!WFRCS+8_YFHaLK0?U%i;a~uz1|tYxk3%Ve8k>QN%G`(+Am9>Ew<1E^e{DD|ysN z*`#f4X1p8eZ`0L7($?$YRDLLLn^ezq;`hDS+s*z@&Ha>;Q>0lbu2(kjZ^P-njO*9P zZ}{z#YzxT`%Xg``^Hp z_BXnyBLrb+8;NsXfDz~O`sTyiwIJ4e1{v#LwC`-KW|p+syGpnTR&>IPgAqItX{|x| zS+D@CG98U}V=Zfi%CmbEg>Z9Cb5spqdMY^7i1UJl2V6^y!XG0C3D21Nai>`8IQ*kR zx*D8-X%wBBh`Wx2S4T~izXco7%u1&%CCYhqd_I2`!c|I5M&{1NV302gPUji4>%qI! zQg_O8a6gZCQR~7UQn|@9W$<*B13WG6%~pb7*W>u^2FtXi*r3RGXS51v$?}}W{%Y+U zHpAV5xU@m5p7kWzRTjM!ZO9`WemZ_AV5AaVSmKsdTvn95lm%B-B%C5s~-C~;!~lmv{N zEVocwYBvxlO9+8fb0;+tJiS)y{={1iX$1qU$?iY(i}N7-epp5rYNRp$_}4Qk$?|sM z00u9sML>Y7prY}R7f1G*f*YM?M0%#>FHhSe3BD38-86iq`(&CXqHrvK`p`d1MW%?{ zJL^j4{L#>F(-(UGI8y{Mp=YRaNHwm5BuopGp07G_cV1*5BQ@D| z7^pUdF}OdGMx?e`p(6OB@d_w^fAB(V8dXNnfQKw_p-Te%+zw2_SzW_UY2=D0T;B?k zgn&OwGAU`4_!2i>etGp;Gms@HUMg0P4}4_a+D^qt@7gxxBDMm?iiuYwNCF6_yA>ry-2v z78#69o;v2Z(hlE-vs=P^TefJQ;>WDAB~+iJPEf>vjEy^4pxTiGZ*wh1UTR0cb>UVw zQ)o&eiS(j#lP*!0bEolOZ0YpQ#kg7lZPJX3O&f_doJ-toOkqBrndZ?Q@rFaQLLOB; z`(zXw=2w+yv^N^1W!brZ8807Bc!n#cTg7Dm6yFAO^ySSt>(q|c1-bD5vs@Z@7#$E^ zfB{SfZSM=y6x*0cQj!@5qoAq&nsMcU$>1<%P0Y?Y*d|9V!|Z|tj^*N7H`_yD zt`!ddIU9eYKC2s$K1%9-m_eL~B)&5~s782(NB_P?@&L1jEc0*xHom@f`bfi>W5xg3 zRC0COVg17G`Nm+KbjVpX!aM#D8ug#-%sy|w3;+I5bfOIEINbb2xp?#!A}7wa49i(Y z#;UZ)#O~9UiMWqnE(AeHij+6OZ?79W?cGo97x8Zw5`FB=e}Jy>Oq?QI^nSlWy68&}Q= zETl)SQ2-Y7*8ATQbG$9%F;U3=^5j2sV<__<4$jI!lR*mW$zzBPP{y%lW4>(n-@F3S z5EL=&`w%-=@j!-nc(1~w7{%vP5v_@Sn}=9w6`i0(@_!-DsQJEY>$qOz++fC!LD@(V z?cx~1T4iRj8gKgW%x@fNteXmajaFmNGJ>EOk2`~$~!8!{!oyK*(eJo-#)+Q_@ z>7+A{h7+7KdL0j#j$D2jv>W=i@q9k&d2QIQ7-hxWTP;4P-4 zGasu0Y2%xH0e_Du+W2$|JZ#}vEp>XrY~EbhBV1h6DP9uuL`QJK8kWo{4+aRz_GEen zCgC3=QE=rIHEE~J9TyFC%4#eT6F>+4ZDtF}lQoCq#oXO;=R@F4)!yyCT%{NRH<*9C z<|lh90<)*gj-{xB<9Hf5=pUPEfQ;7l!V2)86Y)aBh|OO^(f@k**};lUaCja zY4az2H3vtiAn}hRmsHJY(JNcJP9a|zBf=t<3Ft{gn(jKu@Q*W%SWZ%ZwML!fli%yT z*I+3uUdGI+v^KRSoPoN!OexqEG%2FhPyZ!Xu}5c1L*iGM$TLA zOBF^l#33w_$pL$S>%%3J+rH55htOW4WfhWKv=modRGfKAX*rb~sy++R%ipV?x!0Hi z%-Hpa_}AgRIZzexRQ}SVCwFr*Iwe8}W7dm}tBJa_0;l``Ed+bNCU z=qvl_xt>$iF6hwvXw-)gKdm+}cw9wnz&O36i1iG;4GlT%hoEG+fkgD3Olsz!Q{Q&K zGqq;W`QWQhh;)RGt^ga&{&3nLNxGb*FJ*Fx7*szXht=o5_cuD!O(6rT{$j*PvUnY5 z*idiaGvI&8n*}f?q!7s8ow@K`F|j5m&LGzsVruDBU z;qJZCW{mwz;RBo4I!+8Qn)kZ&(<4NFQ_gL~>|WveQPB|=S~`OK-T|M+HcPtK%3%c} zyXiaUA43QA4ln1$@$eiII45Tsu50JpTT%G5Sim;eO^Hb|!fDy{7pZ9U-6l{+r1o z{d2PW8y-3Nsq+SpG%jo6mbcv6r%vHVA2bdTj;rmGnCNE*laN?Sp02lF-W$5Y9zhCmDy$@l2jgc~>oGO2} zb&=GB*I|{bgX_~vJl-tR3WhuusXxuo!4{DIf-@FOhk|vj{l3CT9<*|si=bZ>GeOD&5=&5rq%OhRrPdm+vHfbhI~idQyHfV3##Y;T$#a?nXw;Z)aKOte?LkTEOWSy;(j|LUeMj|o7MuFwdOk1)i zCO79PZ+_dyCG6Ts6XVlf88k4Vuzafmk^*qgvS+_h@EOjb0A5}V1R$&?xXZQDT91^E zJ=W<+T%EA+RFffMzAZ}QeB~7tWoy%oY`2^AQ3Uq9sRs($M^Qc%k?jreZ-Rbl{Wh>atG&gEn2>@!^NbuxSi8 zg|$MeY~` z12`)vQ9WC=Z1!|`bwVb@>Cxl z@Xc6d2mL~iT8KsvV$EZ$7FJB7qxQwTr8df3tBBfM`6bXMuL6yq#VVg*lnD(xd+s@; zhcOAV?>lzcayXRjxN&^i@9W?LPwBIX_j_$k%BcEWa4*<@2}FX*ja=Fsccfv%nsidY zZK8t#m$;C+m>!$BDYr)s^&?)+A{47J;vasnXp0;}R7}f=QOoE2$}s_pk!K~&ew8?U z>K1;Xhc3Xfsi#?iBY{UIhNsXNeTSh#>~4`LkoD*a?#BOQVR8AM@1owCtPnNolN1t_ z(Lrr=u?+2-J^Dgc_uYnquC=8p+u0AsTUm$^B(-^yz*7a`I>iF1Y_*ean5|54(N>dn zGVrg;8+!8$RSPr~Z2Iu0rHUF}kEv7VFWKa~F+J%2-8tU%y%c5~;D$8-w>9Pn;ncM* z$b_Dj2LK`&@y^!D91{GZ5d@f4s6>@hLYmm;i`hI_M#v?OgO*d$7_sO#i?d(an+ zI{1)1=S=-hOiJ_)ERNk0Doufo6QOv)$n^{4i5*5`A%(qCC3xRZye6UVeG1=89PoytrG$=1_oo{5%5 zyVIO-&+;SDC}7)s0v?BCV+#mZ((5CS`;Nofr5lDX+K-vmaD^!M?x(f;<@^`DJfc*D zl8NY?j1m7uQ=)3`A8b$CBd&KCUGa=8#y10leEETWlo24^^|#l%oiqdc!OpvNvW`g5 zTa1~;=Cdiq-I-|bS4|Gu0+YzB2y1dYE0rw&h=wlF%x&8yH7B_St3Y+!GP_#Thh@=Ws_Weq&o6e$?d;SZu$Ju<(&uk|wOq(2a5%xrZ5AKvM2EE2um-^r9G z_!*KgOX#$pPIBW5I{W%DlDN)~=}2=Sz(}VIxO(QcLyWb1ocT3Y8>~C^mbgIWq`1!= zgC#tIb{JUJSd%$E!b&bx()0Qa6tFT#D*LkyG@9N{!DRf#dLcGgyk-7U2xKw$3?(uq zm-Px(8$SMCJI*ssi{83ZOMv`HU3%ogo3QAVDajsa&DH1IS*6=DH4~I%Q`7#|`1-lh!6z zdA!$lmNA`3H@FZ1(^Oy*46lc{F64sCKZDj5n0TwRu5e??wU(HqyL(~+%<+7P543|n zWY_FBUrgYsSy$zyxpsnV;6dwe6fiCWzFjEsy9KyTI45lT zf%<3h9M{mdrtw}uq(-=Cl87+lFa;p)Hu;Y?c~cX>?)aaIEuqpAUY42&CgEUrjU}~C zDmysUSQYRW``b!7B_wmQdK5a!-q3kp5-Ps?*2j@eZ}p4d2-+m!q5~&5>~fV@n1;aF zUuH+!ZF|3aGjPvaxtE{lPhv5GJsz?$K2l$fD|2$A(Bp2{l#1}Q zZ9eyQWq*xW_G9=73*0H=JnAj>Ew{MnKu2cbah9VA2NcPrOO`M>N0hb#=Tn{h(Evua zA+psZ#=I3J^bs1XsfnKXoHUgm{Zk*SwDh0c#s`{88HTvp^%ea56Ms3<;lgR>#RT_6 zo2dLRu9oNlzPhL`yYwIY{>uUkCi6|N?()r(os;zT*lG?-68sun-{NTY{RBU2^_NnS zv6G8sKK1oOAH9`f*+l9j-M6|kQB9~}Damm{YYE6KG>7Y_}^4_4Iu_HXDcv9w!WMR0e6B! z=nt&w0*DA6(PphdntRmrBE>{OYPMnG9PiD_oF210Yr#a==NIVVfQ-dWMoOC2eA&hJ zv!kwPqcA4xe-G)y+Qn1VH}Fo zVtvD_GX^W}xHY}y;ydj#YkZoto50Mqb>*X8x*)$zV{^#2XfOwB}$eJx=233F(3GnWb9;Is`M2B*c-LS3la9Xo4Q}70r^#p!XmHvFyPA5-WOLj zckx<9qbLAq3kW17Q?|#jr;Ix^m$is{C7nCkLbC1EwkyNVfw{#;Fa)h-I?>$ z`{mfl?U+~26~-%;x?c!}5=LPBB~QwlAA%Ji1Ye$WqmJ~ox+E(tamZhF)Lt9_S@Lu@`(~Jd#*jv@ziv!XFXS5`E+DTIe z&Il+sjR*rg3GI2w3(!jW8aq*}$G`WP^hf%cb87}TjL8F_$Xo7BRnPhOG6#HWWx|*K z`?@%}2NRD>#yQOTfhnMyr~s7Mr7$~5YDHPC5{W|dwf@P3Jl7Wv{$l--83Qy;u+3K( zkbq|{(R>;n*R8N_^}-A{)RoR%;_OyoIf^UnKOft&Kh>_caxg1T6d1QiL=D%0^g1HL=*8xY`QmJgbv~O< zLortBUCrd>yYW;1JVyhYZ20p@O#=r;g6PdlY~LiVq00%-M1(SzN6rnCgH;`fNv7O+ z!ih4gg}#^suIIqpAT=iRDgIorTdqr3x$7ID9W=0tPl{7Y zNR`Qqr%24^voAd(6W`Mkx9v)m++!F4(bjDvpSFNp&$U^JWr0RsnLPn@U+v_WSPoN@ z)%dM|D+>4sRMbE&w3P}s)sY(@R506)Cpd*5`8nuPXl>X{d-*3001%U;*!o$w5+!k^ z)QNBj6AAKeC1W*?gWqu9XZL$bRL+Q!k3Y4@O+X8bacAB+CcXsXBFOziB8qAM+pcT4 zbgvL{P_Y$)@AtAo@_cV}*3)Ub+S$q8nHD6l{&M<8()~Q%9g~|&A@)nQ59q&3LhReh zM9*$G(WRRbDXb`b#Oadgoug8EzfdY3>hAMzX=|bU!@-w9jPt!4vKl>1r?+)H(J-TH zHXln=_$hy$LiRTw{=tu5?9GF3ryG`>Gs(CA_q(p^_^xzu=XX84aieJm2N8Woe1m(! zg3;SOZg=vf+MQ88EZUv^;mXiL{0jWOvFgVlnUY%P1fQDl1jQ$V`3=M2vOY|$T-_rJ z=rKJ;YZg4Dc;5mP8l_NP(M_p-2byFA{!D4X1ObxHLB3f=3e#a~_ z0Hz>BOjg0pnI4TnK|nn(*ffq?C><%erkvD^Fa@Nv;BQv9Wp-wb@Lt z^R%pSeocTm&Ra&l0aa9NKrc-}@{ru>L}pCI5v-e=yVJ4Cer#5a%>zf(6NOn=zmN8ImtIj`H~66Su-S=&wS0Q zm0zP-G`n=ykH7{yM!A;~o9&<0BA07II?IuyU5YI&{x$iiYojv*cNvEgP%;Yc=V~dp z!yb9(sz>;R9h|{pnZS~|eZ|E-$Wtr*l1I}`8h=y9E%qVD<)*QK$dw)P^Uw$3Z@FOh zk>te#Z<^~mj}zsR!rA&X?{I_(N?_5X%h07RnkH#>-3+luI;13Hjb=bUs~ap=rXX?k z#MfTAL_~B%%UDTG!X~tZcT5-m*!+p3hCi_HBC&_M*pHrHaqxldk$chmlnAnD6pq>X1%QGHjF; zqJ)nxyz?{9&(0+O`jM!Vr`WZEo(QfRdUu;s2-L1)r*g@pJ5!X)=WRtdUkeYyI ztDs||WV#zNsE80|6)-3n3|>cw$IOiNoKw>nL|Dgdk@I7_rd(m1NmVPwJ30Ml4$7`c z!A~RyGw|ia0whHqTU{wgmk|8P)HHpdD-Q(E=><6u!G4nu=cuVVX=qG|Ql%)|9m z!w6V_kVdty*6S|R7abf16C#eTGeeC$wO#Mz59p)B!1et+f6K&p+Q2!h8zyh0!{+O9 zcxhQhtL#gIp1>p*v@w>7Ic6Q9baOtv)WBClA4?{tfWt0_zs^8T?CQzAk!L3*XDABR%cN{v1KZPTeY zr`M}@i~neUhd#Ehn zFfjN4E8g`Ka<4^+IqoG}Ww;ojH@~^y7|?zUYpvqUFqG8sKUB|Cg)l9_OA!Ub))W4R z#w!?K^eHmj@qs74uH4cZjTe879WZJ<*^0;~z|Y8`2`2R-t~dLjkA&D3MyuZGA*8Ow zJo=vJ0nby$MU3^n^RcVZ%YbDgIcQ6e+RBYa)`26$!p7#xjd>^-LI|TjaljeCfy$`K zmC@0HC6TB`zGQ+>d0!-~q_5wrk`=wk7;w3Ue!BU?_4r_`D;L;Is0Ms)Ixy2yM1$?N z!1qrOzht?agJ1a|7e*;Fw{ar^ub@&8h%2Mimr&CGO8F^pI%B5bkvq4e8)X1dz5&g~ z#tv@kFFwU8zLLRpQ326h2+@y=L6>EZ{0KV(!b}@;u&cdt5i}t=B0r}`66r`!>In$D zKt;iMUvFgsJ)pEU4d|@{>KGv4(!$QZ*?rk4wgw?ag6dC*|K)b&53xUpLQ0(l9cr4y zrH|S8r|ouHOY`KMJ+b?dh*3s%(IX=){o%&tr{rS$uSD=Am21E4Y6cU-j@+GubQG|m zEy+@=gklDTq*w?s^NVxLH56J*M~9>+mC_^;p_c8`Zty}(JN{NW45fF!aBkgZ+wTdL zIQAk#C17t$H0EM>F0Bk+Mm^(f9J*S1V!)a{6|Ua7VX;?M?N z1nJb0$g!ZS4A9s?zp2v`j2`pUUaCM zO>iLdo6uqPtZ*+t$GSDR-i=*5;yqgcX@&zU_GB9P@h@$`=_q>fr_hB1`32TbW3C2t z>Z`Uos)?3n3NOPqa_aYkHeEj#<HW2^9;CU@NcXM+)XJ`?7Of=<8$5cS8W5KR){ z{i6+HbqA5xgrujRy^rN8fBcUBwY>FH4XsZR#30ck&3&?Gc&J}?8S*1SR8(+sQ%`-Y zo*WU(d_zwjN>;)iLV%@3dPPg1&A)==;3Xy+Zte5lsXu#I2O(tL3-uFF+zRXo3hvB& zA>^W^;fhM|49uW0!#gy^4IELibN+fr4A-9xNmCDO;~HWv3tV(()EJYS=wzNBXu)xV z6bQvDCoLyy31yWIAzPfBon3k4Ov7tP-AsQ!J!;$zP5FD^0rU@!52>oo#no3LwcMTI zHq4qJVo*Z-QBvrFOG9wIv4a`|lTzB+D~Ae64w%9?xFfApxtl|6t!UEQcK?x_8Hu?P^T z;yex=8CwLFKNHECzDtUTDd0x%1SWnOMnLs1UB!r& zaC8tWgQ6c34p?o1VP4W4C#RqcT!`!h{>o_S7NH287?5mgJ%x{WEtxz-M~=$Z^*Mua z7OhR-T6XLk=i$2kreq4PxxJd|8R-^j@SGQXZDon3jQFO>CjO{M?vLHcdXOliM9yGg zgoX2uT;2vRLVm|wH2127)Ez7MhM)rLX%uh1BuC$_x*z2YyY{fix!MsIb2XBEqgk`>YP&pBjX zBz+OByuiZh))m^ILE#+)H_jmP&S}8z{9c_$w4T7S`!`!WXZxLE4Y3;@AUDeI7-uD{ zAFD7(()1yau>qX@#}q`o&eUstB4Dpc0S9nCbGki38}H4&h&yhpR6u2X7n3(;^M_4?ip$?9~f@aD!Fbm8E#B z)knzm=#=wOM~mEBdL-1OHQ=mN+4;aTtMwCtiq=B+tokZOka8%&^~?Cq>+t)-3qEif*QlK}QKA zbV>R#xHftL@>$1oce^jTyrLExzaUY^3|tBSh6zDjEn=V8=Dn6jg{=cyF_%BfDV$3W zd&-^5!5LGDrXy67QAJ9R?~+!UfV+zS1*MSiB!=P;Nk868n&T_!j^5}cV$E~wkKAwe z5Vc{~Qh-hl=L@r8ZTnrT{Gr|(gINZ+#MO2}Q`1|;)Bkn?qmf0ceE1i0rf9QL0rl+A z`1j-l_vycXiHl&kC%_=C4rL;0=TQ;Wjli?+tDouwvVW)!+w;ASn0==2e6Tr;JBWv; zfQOEaY%x1k$1NDqoJ!Tv)9u%4biz8H{(0PQg*$Ea$@=sQ_`q8m9-D)!b*sZXR);3< zC-)^n!y0zFVDyN0J3r{8(UW1XV$q&mgjL3RPz7R`;1a(RE7gnAN{l<5+K(W<>!Nkr*tTuk zc2coz+qP|2?4)A6v2EL~sAB8pJA0pdZ)@`}%(2!SqmTaFipdaVV1+eGII9gTKS+Z+ zOiZf9?0d(#H^%?WTM?e0eq`f{wbjNfS%v|8{r9s512bP(HYwQZoC-#MeyD@w@B^I)f0>F*yM?4ZiHekM7KA-!(6ZmCzp zsH%CITST|C5ZAt7Y@P-q3j-Um@2<9 zh$YgY`6!XkZLJ+do2#s?8pyOU_N6z>=<<lb z6f&r&?acK3FQ8tTD$eY~3cC}R83r9ao#C$?2Ea_-=*ZdPrR0D-mf7^lJdJ{Bu<#f% z$sFG9a-E+GQ7CZ9bSl(Ym;=gaI#`~LmwY1Q!Km!<54G3@+@9#BElmPu2< z;3|eefyd6FlHNm@w3ey>`7bYUt-<^G>Wy(Yo@=b?j}3<}iLLJ>#6yMezX|Iv-2|#R zHNqF^DGg|&=09(BdtWeSXI7JZy(WOh&eqte1mW0ns(OiOeZq6$^in08^t@5mf-%5; z9-jW$#6*Ro$h=Lsi6j~<_IN1enFiSO0Oxf4Lyu)dS&CbXhglW{ELa7@NG%Pn9{Uyo z+R&k7Wx?O99^^$1LX+xG@Q1beJ$BhWHUi@4+WO``oH$G2cGi`lfPF)cfMgROyhV-h zsDAAtx3N*?->iD6j-j{PgjxhyR7S|15yG<;tQA-Qf$-JPngD%8zlpdrsq9U4#g*?^ULj8<6=~cP0n`pb4!;rIyB8LP!C*@|#S;JY<_;t`^{Gp;cnRgJ zw~F90w~0%0{3gJmzqm8OL?ExQda#S{ku|mKzz{^oib4j4r~ohz&55Xe!`)!G00jHF z@f`WQ!zGDx`s$Bh^?MmtvfHj^#!rNZtf^6PS6H61!?Og+Bp*Av^Kzrq;SGztco*Fi zV|+DgRYg^VthAv<5NExZMyq80UKZeHl*657cIjJ9sxe*9*%L|IiMuA?Mp4~0^HB1b zehh0+LH~40|8=z9?LQ|)Myo8Sat9(KG89W|YNN9|NG&b(*v-UsG(@SvagNd&&vDz` z>IV)1xP(XZp8Zb@O!(1HICPjG$N;e|%|qjVt>#PT7)$)@WU*#5KA@@ltM;kmkKas7 zYQ_Pp!0vTf!0!9b^Fh!1zd^*$x!q$w@Yq^J2!~CsgzMfb!O>2_SJJ6*ujxOW>$ISB z3y9n}h``T9G zgtlZSzOh58{!Jzb#&nP7?f-0(f(%eq#gcUTF^#J~jGrW--sScKsgw|znw*Pqgu>ab za;$Rk`x2(2CQ~+YvG;aC*@2EJe8kn}h{t|fnL=vB9TG=1=VDcIZ8lwu>qPft(vt-Z62R4Jd( zty+^=V)z{DoQ4-9!SCvnuU$66C8}&yg0l}(meEkvI%$}y*RVqZRrJT~w?s0N>zTrv zQ2~jV<0Ll0Ni@4n(L&g>C6dzj-De{CC6_GMOb)*lCC$brX{mx&EYtH6Oh{0&U%NAz z62PUq<9p*8&nn<&t8)j8Iuk5B@+3E>l@Ms-Ve;5eUj?Q|V0svook+Md`7Bd`Wb)8I zZv&Q-^2e3vXR>$>*o&FJyL_j}NQ9qBBNvopQmlBN8K@>0$`pPA*h>po4*1HQXdU}4 zcSpP^NpX?@S#NGptjtE;smv##S5Ur?P3L`v@t^Bv)w3(ynNTuV`ZZta$LNd+*Rzgm zt__S<=HmYD7fB$BA^*aJ8f94~vSNpr)@@>^&b)fUnb)c$UtWE_Y3%g`(ccRoj&!>m zkp-Vj1j7>oJ`Qu3v7~*VLGCXFmk^0=*YiF~Y-2w<5@=s>@x_CXz$|gKE$n8_LsS|(VKE!h0tpK z>-Z^KMh)mMX|y2`lsT8Foc__g-q7!4D7f27Z*T^a|LX)b)6y3|)&x1!{AFFL-H&TRSy^FJT!l=iH+Kf@y;>b0FNEr)_wjw%?UiUZu1D}< zmUq0PqgJQiUGv_()45zzqq8j%EDQygeA&;D2~9fi6lpgAivMg5*B%3|02;TBI}@nkL51T&3zb`PUoEZO=^ zO$H1TZ2zpzw!kQmJ1pG|t{hc%P{65mbL*$AOm{@aGv}+)W}%8e*kTtjs$A+qX)&MI3n4Wnq*F`KJ!wJOAIG&xAzBt9CYw5}avnZXp?g}zwOfa}#H z)c>?5d+3R^Fb}9q1JCfg3NrYe{75d&VdQU(%Oyqko6lJX zVcB3MX)RnB?9 zV8lzZ2aMFT?gVReefa0_`i0kUYTN9z$?Gk*a7@0>32OQMzU6i1nWK?)jb0r2)LOecS<{zcKANVir0bi^yLy`fII-9(awP{~} zqf+7UP!uxIAeSuKk=sjk9D&9Cb5ymG!cRhdjmOd=aRTkY@@N>bpfJzL!yY8Te~F?q z=Ra7=a}7)TpC(?Fga7zSsLJYe*!_Zqd!8>=4c`vc@ZeCrY5OH4@ZW}rAgthRPDYU|w!gTSU!cb{Od*HX5e}NzsG!9Gq zMcRM9wdY7UP5XUV*gIN+*f@B4M_>~gwjr7In7s9<^^4er@uIaTL&~&PL#pz~U-PEB zKVEQpf1_ZiECmk>hL%wam;|_IMe-%WV||$w=uQ7RM9ki+B@YLkoxyfH_H!Flx62p% zOLiQlzrqA;nm*Q}c$P{l>-_c0Bx%4n>uAv}qldNga-fMxhTBvtWm~j+t2Ev_<7wTn zoEROJt7%78qhVboJSboywF>k#(JtbojR^Mw7OrGu!QWiTRQ?orCN;0Wma`7S-5K!$ zy$!*;&fP4*sww7kox!@%p1$Yi>*d0|tw#Ov`#Hu(x7oDBzA=>LSX@Ha@m2_D6EOu( zbgtFcu6}m1krl-&X*#L*Y0Iz8>cUhzK?@lnj@7|ubMvMU(JylJ@ZwkptKVKO5Ap1E zndrJ$`>RzqsQ#^Ixt;IR|Leoc_oCIGRz8;}jPSjI(8O#z&Xx`0`E#Ch6n6Q}M8=f& zIKg*W!w>yuvzdXL!W%|<|9z0roBwMI@;eRek0hzqC#-yjrXc)%_$+8U@W*P5#tr3l zM_Ox~o%w;HW3&AQ;D3+7$CVc{mJ}I;DfTL)?&p-rTEG!CLch5WRF$`@05IOk8ACxJ zoj1hwa%53rl|BhW_AB0_WoWJnZ>Ft1+zP5XIYi~nqgh;0RoEESRMnWvk0-o-oRgnR zE7XH(ZJ}VPK+Qk{D{cfU-<3CZmj)5yI!E5)d5zpperZ5T#ebI{_ zrhY0RTF;2Y6buKNwZc9f@LnV!)~np)&6ma@uTsW_N=CUMTb6V4d>dh9wB{OL`vjn0~gvJ_=2F-G)K0NfkKvKc<4b?l-{KyRQUKfY1$ zN+V&u=#eZ_wv}Vpye7hHAhqX{Dsa$Abi}y{ZdL`W99nP8P|q@*aG5yO7mOGO$;P%z z=DmA{rZlfT+|npR;^ZN^u!DZ+^Wd+32}8YZvsvv*4uRY<#m}mlX!FIs zJA+TjyPmth?gS=va!W>~q3xXhf+vp;V)!m=^3qcF)~H-i;jX!#`piC)1rrGaf88T6 zGj2#x66OhMDJGc4x{J~rTn1v^NX1Uy)5`d4fC#{zQOd`AIyZzm$- zs`T!)=u3CJuby3JObS8|-l{M(_E6^-YS;6h85Vl_BdhOOz`MissNl~NJ9gPhdie~2 zmNuS*vbMa7tE%SfGclM3SMuyVQjz$W!A^>%Y8?{}!J?KaLc>$gVwhdJdNCUM5@psr}!YzU87pigcJ4e1Tizpxo#YN?wO8 zkZuJDyl@b={8gTc80SW+H#=(n_gwqg zVI_a6&h07Y8NOE(WYeG#5y1uNn-VLvgDgt~RWTxW1eRURbEuBzg7e`Zm8Nx{*A)yO z4J+w@;%Jh+X>AMx#AEIea3gY|T6 z_KYIfF0yds2&)j84MAUiyVNZGVnJg(+5T&Zbd$BASeQA37(R=qsCOUL8n(lv){9Iw zLX^ax}M_N;7sKXT+3&Y~1g!Ks+gc|J?1B z5a%80$HsNv7jwHtDG*PKS!cW{z9e5Ay3r&4%P>4BBc74&ZKfwJW`ioZs*EK&}zJ3LY2SIygGSItHn>_N=i%^YOWw~MmHK+X* zL)N|n9oJ%}0S5FDs<7%Pd-kpD7l*l)J#b@gi{13@J>9!I$Ob-&<<$~KJTl|cdWut$ z4(b|VEtAG9t4qz2-ce@+$F>9~{!iTSxIeWFg495F)2+p0r?b$Y*(e#L^_l>U z39B5iP>sO@R_;$N%{|<~>;>EX!2BlB|EAmtO!#_=xYZ&2pqH(~H)Pou==U7~nA%d7 z2ExI@(Tfs4#+ltj+I8nt4OVCOy>SnjFdczq#Pd1Rd*YiwICG%^$4DpG|F*Rf>po&H z;I*!I#7S7vpB5)QE98h5W$LCD?IQ5$h?s1rd^97MnGm_77fT{d*8m$s(~m^FR<>%K zL#FN>jl@<9d=kFnVtd*6{a=g@chB~jW`7GJ+c6HQd}JzIl&u+T;a-Fj5j>oeS>VJ- zN+g^81|HEPY`)&b+4MTD+wW@*F1Pe)|>?x(+QlV(2=DS{MX zqjZPB4OUT9r=3^p>I60TF2{5bAab|Qz*{4E>h*#2F!@Iq2F)%L8(k9q= zSI8HLTkSew_dLYijr6(h#`M_r;}n$SuzJW&@VhjruIN<4PzJyoSb~_i{WH3cZ|!-k z>8f+^zyICj#xN(o$9w(Z{IPKyaog}&-eZ`q!66Bk)e)b%bM5&?qVKy;7hN0iM|WaC z^AFRC-S+>So|jE{VkV%8mlrW9tA<7&Wc$yjUR0>tTTqFJFmdKUm@5Zi>E)=S7 zS*M`+$iq_UPC60MB_fq!#~S&GKrKrmXJKxvQpxLL^?@gxF{%KI82YMsP6ao>ku&JN zXs?Y~7bh%}p0A3dXSb4Z53j}d2CT`B4ka`_YUZ~TII)Jo-Ng0w&L0R9j$bC1Ej zk7S<##U%P^TQ3L{(oga~o_-9|bY_X7FW5;*ODr%Ye;5_sURU#-o8k{|Di0nVaxy-9*ff7;2j_?>GgpH2A_8YJ(QIK{&%)zo{b zt^fVn|LF5*a29I(F=XM#G3~4i=g=cPoxb%1&YiXGHT{G)>{ZExj>8UyKH@k__JzzY zL6nO512u@rAnY1q_)i`wH^V$<29^HtY}$c(=_!522j#2YE>ncdTn00#OY#$yVWm2IDM1U_bz&vzOa~twmVV8;)z=1&|Rij2rrYcSO*0&4|e&O>wwd zQk=&jlwPG9s-4MoER+n`$tM)SuOJ+}PLr<^gQCy$+gKR1Fj77O#<+3*p+g8n=ca4} z|HV=QdL^Obg8L04Vojc{dv1}fQx;9w6A%&nXkE(4J@*^xNI(h!vy%IF&+#g1`?gPG zB()eYa+2A=i)*Z%uViDUDt{c0S;tNj{_)ur{+8dx_RD3Oq*o_&pY@nKqm*!TE@aar zOw~t}`?e-P^B~-qhoPDO6^+bJCtWiAO>9N*PC2C%BjU@{&)U+yJ^{!6+v>OaI|mcg zTKGLnEFqr+*f>vd1Q*IijyI9KZjYRF9J0!{EdOAUBOM=Kg@uSs&hPMJ8DcP$rMw`oacxKmxv%2=#`77#-UiPESZPVc|G zoRQx%f@7=2Vd0D4=1tiCm!n^ZT^D-2*?sqFv=v=75U!`2pSFRn185v_xOl$8w!uSuO7`rSKv&zpWoJIa^QEa%|s_6zox`Q3K~PiH5` zON*_)6zSeU+t?slRc|1@huVA{9)Uhpc7Vt-ug~l*%~m&^|8w4p=kr*DoVLVDb2-}{ z#wC$X@kOMCH`p=y=}L;|HBd`V+O22%Y<~Amm;wkA>RVdI1K(_UEb0TV5UIEtkuE4l zOT-KJQ@}}K{g7y(-sl=Rj1^M^07+prf0{c|fgXsskqZB2IU2&EVJ0u9x{v}q_K0NC zL`Nlnv<2rR1~zwp`(~Vg&xaeK)a4fo2!(nDbJNM-D2s*c)l3a zqBh!*N@c=b%28}K|N8FNTPOC0CS&bP!^#65u0zZT;nGc+zhBIFD8Ki*W?0`R68kRI zkcMk{iGgyo&2jgt$?)i#Godb-uR+V)f`04YvhdyQCm79N0>=bqcQ*{_CS9^aSFi zly@XITxG{fE2=7b<^>4#E2>>N9+5E4>Nsh&Y@8D}pQfTrs8UL(!)Vi6~)LHw#`80WWq}_ubUi))&iRizHkHr8(*`AhVgf1AdRkEd9^McVJM zcxH`}Wguh82X|)m#fyUT8>_o@xjveXno4tL7CxkpBj3|9Q#kS`ZP=Stw=$2IrQKmW zVy7r{qgXrdN4X{7R@XtibJh7Jth=?FQt`DjmWTkZ+faHcXfAUELLgh^_jY*y^ zpPHr#Vitwg93x9BNz)=-2i*|K%jgs$E)R-O0`%2>l|=5zOb^`qg`p#qcwogTt*GIc zTz&4)_9w6F!JD)r2Q%aH4u^EY=LQdm=-2Bdt-c2Z-8j{Y-DMvWO+aKMpok#QVw1HK zoggEU%NC(jX~!DvwbO|BBKTiCA+Xj2wJ{~`Tt;73CwHTsk)1fjYjLLZT`8Zyg;nYO zb*5Q*r4CEqJPH}8&pfJ~^P$1BT4RE#8llpQ5Vzd-8Q-9hfi6#=PO->*&J4>SK3(z1 z^e6JE(k!?$hIiYX0sps~pzntV4F9&*Q1G?yqcr~_0_{0|$+U?|Dm$evEwq{mzrLQk z0KeLvG@b5&%sM~?o-8_}(1-%tp}^MT!Ls&jZL+E))Z5B9eaEQKyR9X4a}k1V;Fy{SmKd&Lb2%>?Y&{PsAAsWh5j?o zN0#9IV>KO-4wdrLV()AYcL*w_Jd>4^s?OEq&#-wULhxh%;qwoMs^q#-DLrrSQT?jl zs)rxu|3Nl8J3C(h4(}Ykd!U4xG^%aP))9_Fw>a6C3Dww@L?`qPuhbt)3f%Y1 zq>Rzo0pCN{dl~s3dc=hqsl}r$7OXUyg@Z~u^3R&k=!~8pG?eH9^_?-D?tnG>xl6j$ z!`c7$fsPr#8{Vgx2UQCwmWusF7=YF!$v7AhoQ7E9S^(p$5qa$>n@?xPb2;pgme00N zUQf=S3`!7ddxLx6veP&Nh$vk4)-n= zZBa)usn)LFY0?<&Rm1SXFbf5WLf?*4_Cvg?w(@Jx!HiX*T9vD%D>ie=c-MjZQNY55 z!3~XWc4fApXobvsS{1;)6c}PH@(Wjiu75#@@R3cTo9=h%2O&4PQwKJJylDr!ZxW?F zfKkmEcdm+qsmUq}bpTHb2T8&T9Z%3msfPoBJs`pbbw?0Z+Q&kLf)Z;)jimYG{Vcjn0m{=H1;*zFd{{J8gxGch zne0m)KrSY}sGoM65!$Eb9Vx}3H-?7oTW_3?JrsJbHg~t5m=&dV-u8cKyO4p?i z;%;RmgRTK~fTToyfH2yQQD}3!;G-WJ@voOKWj=CQw9rbCyJ4A1qGfFY3o2u*VIPk{ zV- zFTswu<(pe1DehO20PPmX!-4Z+wu{el1AlvCe2v8HvV<9OA@$z!X)iS!|fL3Ybk}Hh(n(YFH#~g?lkuhomLTP&56_9!DmM_#`nn+i$Zq z;^04%ies)dWQ_s6VQ#*72UTEn7B+SR6>0AF3zy5I4+>#|gebAefUVk`&*=%S=co*S z%k7!J&GEG6h4K4L(B#K;VBr%nYuBXt(`SEG>Y-B}e{yV{>gdAf;?(`cy|$nO?!#mj zw7nS!_{X#h4Mj=w;2JMOzC>SP5CyFAOME&vdfw_G`qT-@2}sk*UZ^gc13m@qa6I&i z0#)y%gs0arBrXX}IP+j2@^2;-3TT#9wN@yETqMGwz|j`Nb;NscwnS1IGGr`` z=RfwpJ7z0(Gw|)i4NS8hStelkA&;gPa#~VX1*;NE?P)4$K|wRgFwYS`4Y)#*Lur%ihfP?d~m8>ms5k)|kWPhD4Xw!+_D8uzb1zDGiB z3Y03%!g>vXRUxbmqxxKFtHYy}1(kgtCJjW_IBjmuo&&)Ak-6`KG0edF zl(f2mc~oJ^2)h`mXPONw&%0n}vd=6|$nwm8Km0b!AOfp|iwgY=t>Fb5R46v~0AGGmcG>FO+! z;3jR>yR2naY3Hf;zXDv+-HQJ&3*f+W<5XyeAmi)`J)ANuQ^a9V3ouC#+|P;~i_Cy*KkFU+{wGPPls-y*K>^=0mmkJWoPt`PcC@m97F*282px z-G*CzgYYJtAa{LYUn~XT~5xp*SfF#10_6K5_T4$aS%?-6SNI}aY9ubx|t*2 zaoTB)Q|h?f*BuJRwB+K~TU{hV#lljd4qGN6ou|S23fZoNgHvpcW_B2h-I5_=nqJRY z-5>x1_*dGza4VyC@fuoWi#Wa7$^bO<9QO<$Yj7V2&pu#0WbLz$(($-5W{ts0N2d)S zE{;KaV<}o^n4#wR8mkzks4D3np2+cAFN8&hmV)uGHVxZF|5-5i6V(NZnp%+qZr6bs z;~=g6hp{YF3&kWAy-?acCUPf*S~J1~<^3FC;2HTZe2c!r8_j>}pj&C@Wi7vuBD3R* zfx%@-!U{WD;PKex+Yui|%W{kJVig^18{r8-p~S~e18HIojSHVdbCJx22eL}DH*F}w zz>h98iy+;wJnAm){7qeiK$K9m%D}=kFNJ5=Ldb(grAcCB&Wk0?u?>_+l5n0%;use) z-c32N28B?5?z;&biHp2F@HR-yHZ@1E=)4a+u=*)}%X$od_-Q*`2Ud>GRINmi+jWB& z%lh)6>ORWVeu98J=&;SGr8G~Pe%jYD9QPS)PuR(auiEcEpWrZIlCM=4A%vJ zbb%>F*xu-v7Zcd|&6*fSRD+}~PfoWZYR^9IH>Pd=#t7r5;5WhhBKC;exO$@BDyyOr zvKO-I=ii8)BU#lHaof9-8#YQ-7cP)$-D$8iYPUnEf4wjXhM}&U z<9pU-CYIOb1Zw8hv|SF?-W)pTr zo%TCFo8#qjhYuN1-(YajO9h;QWJVfiGzfish1Sc zzuk7|fXglZmo!R2|6JpSOT6v9E7JpR2;UC7+pHK3r{{KjYD?|^U$gj2s+p81^J4eI zOXL#01g40i?XI12&Y*+B8oK5MR$C22rL=&~Q`B6_h(q!~zoAXV$`Lx$z?sDV>C znhJYa*C6Rhqz@T24Hs>AcSL z5C_hTKQ9(%+~d)&Yv0RO3u>7UTBoUhT@NPg1JTQM6<0EZ|D>eHd;8I>=pq0UGBGlK z!4w`?KNKL5JqLi~8SdW&p^JXxzp1+beYwU1tjJ1u!b{5c{Nw|BRXgrI9?*>$1?SZw zb3X|FzSfGB+?h{Q{Ot}L=~!@epL*Dc9kX-M#UVfwNS?4#J9tQ{gi)pRa2y3?94&p# z=_gU50B`V|{}QF4@*3(-;1%5I*C#Cm%#Xz$ZPU-DsSy`cK|H7kK8a&y)AZ3cY&Wcy%D2K5(93fX#J1|$Ki^w_&Fx^d=InZlemxQ7p`w?JZo$-~W;gYVs9^@BPhA; zNq0gm8bQ-E&J#&jc%0tN z5%iE%s#tXb$M^Nit-I2vnueo<#2HjFH*OUO;~sn$vr?Iz4IdM>>+8{>ul0wsF`Q>~ zWXiq!Oi0D4X3@oATo$3!YbwKtgeke`T?X8?MdaKQ$jzJdzz#6)`rvH$6#y^T0I4YtgB<2#UE0w7jH?P4VJ}MoMxEDjy31il7PoK48`^7QzFQ+2c zs9rW`6x)N-sEUgQO7M(yV-?$@(Fce?^>n8kbk!wZsE6mUfNNK4ii7Jd6ZlWWGXP*nOBcxMu20OIvn#fbU5dt zDHGk_&7Sa7&X!+zwxJ~C=+NnruH#c6xEnI#svJxN9^NrCOxrPiEM1@Zid}$W`ZFC% zJg5IG8;h7HCir7&bCC%Cg6T>VFuX6%>Cz;TUH%t+Oq}~mJx?))qb}WAj0$}t;`osz zp5uz(=~QD5pvU9RNVL*nKgz~))cg3rfJ2YR3@)BQP4C?jc&^4i4;#_%(y4#?>SxY~ z{z!m&>E=ZAerFSSfzma4p|BgWIZ6r1UIF-Tb6)W4P;iC}=vbO;c~r*n*-3auGU)l~ zIfYW;z^CWb=Z5h6(C6m#`y+pJJcXCXk=9>+4G z?6F>2Pa*Z8i){{r@rDn1j<*bGF+K2kh97qT+_NjmAES%Qv-l(}~ChuPycO#XFRm_zB$s;kNaE&mUNR7f4h6|jW+TLZi$9@X0 zj^+cse4=te(?MWr9U9e`nRr zvfPF4#3h!K6Bz*~G%T?TeDZ`>Oj%DY*`aUuaQJ%HDy=!EHmE|1C`(Hq3;(wsTmJ01 zON+99WY^vZtOpNdSdI_KhIE+GlN`pLz{{?S72B2(8+TYDE(jid#Aqcr%;{+&;}-;n z2Vh>#j2Tw_3OyKrBwS($m#VuTZQqh1eO%%t?h6si7*Yk==B=w!6D|i7id(g(VMnfR z@Y^7QLD1k0Pmj^GoGW%PL=F6ut8yQrhpf8!yyD!X|4K8W>Z6R>8JMTWF_MTKN0@Zd9t)*N#>}gtFJZkUE<#@E zyO~x3nW6rB#A<=%byFS+troHq^jIsaMFokqu$5B&zFd@iH{Xl=z0ma{o zGaTA6fq#C0j+Og>=6@E|q22qGPflDYc|iz4~8q!shu7bqoQJr*w{dGbJMt@WuGR)F zcN|@BOLm^u$C6}2HxT~cO;%+uAAPpr4%rNfN=~T9x@rdXa{n{=dk5Vl_J_4|4+LywF|Gbkv{+Y39lsy;s=$4l>Q+baA1`eL zwB=-a*ohV)^~u3V66!MOOAPSFcuHMxuJZHtzivSqH)8UEzbc1{yYxW;SWpPbn?hz0 zm%TE}p)uT>Hb`>{X|YEE*_Q@)xzZD`WQow@@nb9$mo1`W*c$pW>jENwu~ebD=^(5Y zB!TP;7tFlvq$=7%6Va;!>u2$SrVH;>nxG<~=C^|9fVR6NTcIvprhubeWq;kuWxyqv zF$rGPW#CLLR2~#D%yF7hqNCwy4fL=t>27OK{UL?Ix=xRd7C9%D$gDt(c!{|`ST|}` zIV^jFqu37|*6xZIsSjbh@QrVYFdVQ2<~?h^G3Bu1IUhj8m*Y?(sVB{iZT^ap7!i1|6{QzBJYT(SDq#kBtjrQIOOxaiiGSei+ z7tIcLx|is|Yo_xO32E?VyybF152p=)^r>*`8mlo5mkqvfO>p&h83qYIe?Htv4AP~p z-(WaJTC~yCofKOD7nsN1V=RFQi-|EISkG(G{WosNyG<0%Wyl51v@d|1 zyLxDX>CH5v3W~RVchH@>i5d6nkd^l@267B#U1RQCDBUnr-#>59^OT34V7eNzS*d#Q zb`P${SQ(+B69Cr*D}a8ss79N=?P#qL&Ku7Ry5X(W*Ea|QLDA7+t{UxB4`(oa2lp5z z24@W~#Ytz#mw9wxR%!8tr7BAkCxP?karegJW{grWWcTHJC!(XGzM(zpwO?RYFcl-Q5~ z^_GW25uNFZ5;-7cJTxGIxhZKfkjLcoHE&d>{FxmbsIzv|l}B`wqk>8?|BmfIam(;1 zYE?)DlRZV|I?9?M%b#Mh!^qrH2xt#fSP|uV0I>ah|9nFpexSM1qh)gA65WSE zEn;W-&v>a<2<<(GiWWRs=yLgJP6^X$eQco4+qPCx+9XJ?n;1)JquEm5zHE08BfplQzfKU5Qc>)*$f-aR+%TKDm< zJt4ow1z#y&BED}34^;BsmWT&nXz#;Me~~z1l)8A^2P*g;9D1&H?u0qKl{p;f%G7}N zt^bE@JpZ}b?0s}#z5u25*n4n4&(rd|^ToMt-@!DuOHi2lN$D{rsgli}gKUItT8|qHWtIUu-)S+qP}nsyG$fwrv{~+qP}nc3z$P+IjE(hu!vEYt1ow zk8!@^gAbN)aMAi~rmgXLR~-D}1gXz97AMe*K&$7_&5=J^uz&(oO&}BBPemg!R|x1e zW?T^v)UY1~jqiE_b|vFy=UyU@;DPs8`mB#t`glZF=-ldt&m$`&&@w9%`EmiQb&ots zL%J$N!UCC(xBj&06^i~II(gXcNli4z7V+)o$1(+RaEO{NkFo~)`L;tSd=Buz#+9Ke zP^?v2%qz&HF4TS(JSdnS>hQDETHws)KWpGrX@GvCSnrC%Lk1oBkpgi=@I~iAb0s5| za<~31$6%h-s5FAjYb zlh~x<1XXM^m_mQtaY1rr#@5BD2OoT7ZzPqRn>mR440;XwIXa$o@H9E%=Nj5q9TnB7 zl~~CysD?|AVe0vr6koAM5x?1)RTxY(?($jE!gz|<(fsvns$WJG!-+QdVV#BO$|`R< z;Do4J^e~zeN!jiO-HHc;*)2xbyr#0DbLPslCP7hoR*AhP-)k`WhClEX-C-7_(aAS0 zG5|t$4we=1?qPA&@Q>?ml$R}c45|Ao1mI*8Y0BI&e4Pq5xRqZUc7y#G8hGq6-mbV3 z&}wp|my{PerhEBnu3kzT@P>7}bphKV1m*P9DhG_?cc9a$)+?eF;AVU-+XJ&C(_Oa% z1s4s{$~e0YwJ9h2J=#n2|H!)=>nB8G;@_n(2yvGq2rp*HKY|+_3Q{B#P z11p{WJMtqj759ckt(4-A8iZA=lI8PM8PO^{`$$1<`7?jc@vbBxtW$W1q);Qkd;j9? z2nRKMbB7f1F(xE66+G8(=l9+k|9<<>&R2dR8ut>&8};F;a<@oqc3qqTr9X#AtF4 zb>=q68lzx4)d*f_;r$R-vIu239|Z#`Ucs*QGmLg;6e}&brnQ)Sa12%D+S|41I+nK* zW7pOsuP9apQ!se$TggOVH(}gIo|t-5v&k9yYv#?~g3Deed|^e>WXm5c3&a&^rj;>< zEruuX_X}=z2_xZZYtG$qw$(*%%xp$z|3hMwmAQ9|rrW^rYq5L<^mM?{CIWlqHu{sd zgh~P3G6Mx*MY1a>4CwY-jMREH#A$P+Gn!3GMy2@{#Y95=R%100kz6j5Mc_v$dxtml z@cWv;*fS1|fQCDE%t2_e#1ThOVrSpVN@kSp5D;G0GZ7qiF{G_z^=Kl#2t0DAu23lM zNzJ+fgD-LI`t8e_#`+->QTkGM#_HN=aK}ca^sGx=#F|oly!8Dn8pr zSbglu>-?|WeT3HNz9^G%b8(JaDK!;23VVl#lV^D5@GmgU2B_u>54^Vj!Q z&xfnq+l2n~3=)>$PD4+R@94Do)5lN0{ws{zn6zGfQ4xnFjzzTCvP#cuWY4I{l>Im| zk>j7JuB2@g!WZ>+_Rq@`ibcOtDRju53S?~`p36S28dvoMxNnze{PO!Cad-QF;)%Za<=Ym>95C_`C-T20jest?&uE*GkYDdSNMRHON822(|0 zfTt{1rizKAPJdeg>-Ooz^>yERnmvYH>`DG7PHF+}SH64}Xpj0h3!pq=( zgrrU1MqCtyi<&WJCIida!yyR{OO9Y~$j&hor@uApD z{F&xr%a%@RBM!JTH*RiQCLe~`bS43klR(;=(U`v!we}7;-BqkntpSx(BJH!4$aO`Q zPYDCor#lMCNy`2GRh33HfYIUZ{7AkT(Ldd8A<@hO-zEI0e46TZ!-9>Ikpy4$sWbO1 z-K|1nK~9wCZe=Tm?>dTGP!1Jgbs==zfGKkw9!I6#tuHLGnq6os)6dpTEvl3u7}8tD z2R1Qm_jpyj31D2IkXR1T&~h%- zuEjX3^$<~E%WnAY6#MRUw$`KPjoaDOPGtO_)tmcOSrY*n^{D7O%Vbj$rR9(nJZC?X zZ-Js`FJ^))>^cvI^b7JO!xSgD335gly4)}nb%|ksdI3tA?4hVC6Qs29AKfHeJ}r)> zE&~Q|%0caS4H=+@-@NhjkpejFGL$$*t~c2DBU58RF9DFgbX9gf;fvTrGFjU`$s#i< zn4enJ5^T<;s6!nz3|wUMR%H_yoJFuRpdl}5Elm1E(n6@!CQ7dWXVPYQZ}~%o=n6=s z-rm7bW!7xtOv(-)kMF-I!2-mBPmUAkdB)n2>-qa_GPo?H_L$lc=4(V+9rF3x#Od0& zEXh_u#ep&rA>8a=r*c#DB8@{_~@JRM?|gny@88<*hPGgH`&O*{Do8 zt^`Q^NH?*aDRnLz<~aJE&)rUQBH8S|kHq+Wf4|oM+Vf2)1`VF~sE5O3+OBmYYsg3V z^?a}R0&#PfzOt5>w^-RJ7;UD>iS5YlZT8&4^!Oq8+y+*~wBB@oxM^GC$okiHba-C+ z2wvrPUu|?pOQszxVz?L1c%Mn@NBF+W|4@D}@4RcjChNbU>2$czYW`J9YwFAo+nY*GOJ=?^!45uEKlV&l17nRF`P|#~HYjV;&er(IfQ4U`RZ2sD|3< z814jAM;>>ZtY>jUDUwqZ14vryWBMH4n`=Zn((XuN(Cd=21r2*d_n3Y62(1^Y@!4KcyymZaxw}k4jUvMcAq&SaTyq zs_|8mr)ln2JA8yQAY8&DmlPABX{rWV5>B(d^3A1XJkRTX*^Z@q%`J7UUeS__^-PRA z&*tAd`Nq^RO6r=`<^JJLv@fuKB7dVIC3RMV=5T}ykPnGQ(H`u%?l_a{Jr{T~f(G+H| zQL;ggzbpQnYAm0QeUGM6jCS;32hxkF>7i!55#)K(9|U74B!Aa|c9a({IMh!w5x6Ez z0T`m}#aW4;96YNBjf!T$%=atT&!fb{?E5}`1urEWm>pEF_)I2y3f*J39kbk!8YuXR zvh&>>Vk=H)!1y-r$yiFR3pw`a7h?)Xr3MT%05}6^*#|0G| zA17+qSnttRC6mMVf%f&L=S?t5K3l0ki^6KG0K zWHo7$lq^2sDB}%r{B0D@o5@2iMizhZyx@q@h^O4$H{7XAtoT^MWTgUz(yITBqm0zx zFl3QBF(Z)+PLt{i|I*&=W%(Ei^>?y>CD{v#X8AJD*`lbbz_HzuAvvJ4Mlm&`CmGUm40TWM zYH&RaO}eOC!^sPFvjp)q>BE02i~)MJA6%^a_+t`C30_6mu#KR33vD*mlaCy=L{`7c z4yJsUyq^;FGma)qiErr0jO#{MC_t>CV&OoWLs`nwcP;?=hKyna4diTPocHodUzq)d z!OHBkGN-=D{?mY#4jluuGgoi$~t-O57 zp?+@7d;P<@v#8%cjN?X;6}~2VwWl+5Xd5-sBC>0XGUJv+jt+o*Qn#DuO87Ue=+=l_ zs;Jl`BYX;VD$zo2_yO#Mu^3iG%3EY%iUY#fu`{{& zwrT@rEvl@w`Sh;s%RM8J1_>~YFb@3-!X`t+{LiEO!M_>{|Nq3V z@!w}dIWnPo<45kqSl>5WdcLl_Hg!#hWLAEDyq@(w3*~#%V?0HaCcA27Pc|D%(iYu7 zk>sys?X8+Bf9$L>)^tUgw(OEYsL-90`ei)enkNV?eerVNI}WWG@d=-DN0A#( zcQf45r8$+6vz|v|uefmQY-AQtV_?Rz zu`m~k91Ox_ps#ew%Xt-f;R4)H%=doh<3pF5kn>-gcJ~_Rc10;`gORas4faZ_Ct1_Z zD>@q`>lA_8Z6nl@t}rJcIA1Yr+nshmdpGoYb{D!G8szt7okI>`` zv-u0mQ%*EV)nJ2D;3r%NSJA&|kXdx+GXV-6FTc<%{0n9=hDw|iCGawG)l7#I`0psH zdmjQPZh8x6jD8&nCe25KfeoF%*O-_T8rB%h23&6ar{fX#<@;&VIk%J~+WKk7TC;CN#`4b@cFLWIP2p&0YyP-gD2&<6etv3al;9GFzMVva zD%U_~=mOSQ5}9JvXbpXnzJF~(aCr7pz7X3Q?h$@!G}d%Ca)z^@FBwc*$$@vim4En@ zlqA&6910RoIU*Fp(Pxen5IvDYSQJ$dsBCgar@=*-skAoM);+5`F-B5|KhZ~ew{%Bj zgPdxaxQ}ix7?GXDPGFp}ArKHLzG&q>>8FH%i*WwccD`{D(kY|ACHgxxL?=(1AZhq& z<`jd?$-lHZD_it?4>y!p=|!HySi4kuMI!>^l|pkWqx4&MAMHym8w0aH&NOZIB%*9~ zTy%?{n)}~n!F7K;3E_%wlG$QS79r%A!{0Cbuh3q%fjz-pmFl&eYlzAD7%)`NfEz7v zo@6YIckmD29eh4MoqWlhr30WWJi>&d0!}<^foj#S0bQ~q*i!FUF`hW&0HA6)D*5nF z;|+K~87PIC*3ZAHB3izn}^Q)&G*&nV^IOhA_EVZCTyi+_$=2zsxw)iYoev>Le-l=vv_*U3zuJ zfRLtL-;N&$pov^j^PK3) z7}rOp_#2)J5d!~9Z|K|QaYpa;y*Wj1oN!Gk)%CGNR|{5c>ueMjwGH$EJX33o>bOUq zhvA#qrwzKpR&7nqDafOfM4~mKkyu$_94B$=ORd+o|5lAYC($TarL&{%B5vN952hHJ zXo_(X#>30dyJBF}iG+5j)DRiB9p&+AVzQNgDareq>9YzCec$|V{9YsgS*09%3JP6^WMcTZLXZ?2P_T+olgn9`o&>6G&$`y0f2X-ye>5eW&Aiv;8*nnp*LK^LbMDO%9y!JumfvR{yJ~%T+@!@^Kr=9~Fx4 zlNRf%1R=KjJCl7+mE3m7_L?svBB4f`zzHNn11^3U3u63#>|+5f6# zhiy1RMjen7vG`kfRE0Q}<#bQi zSS{WnW2Tc`G3FznyG#*04I0mZUS@5HU21!syYhwq;~h=sa>x4A?MkW3t3k^K+ixPy z6H!DQ46dZ3_k|#URSK03dDD-CH=pbu#2(yFV{SaUlNiF+CzSPT2=cF)_JQe|k`jr; znQ24CLjAOfoLWtqewd@viv1?cb4!AjstCH8>=vk$?NX!r_hc%;0;m^FjIAG(WxbuF zbh{S7{1Q_%YSc91t7hO$GiJ*+oMRQTL}OJ4o+r{$AuZ^%FLr@DSLP!lFn7Ol-dt0) zqX&Kl$wxX6fUnK%s9;bcXwcT{9Kmdu8OuTN$YOO%yUgGy)2J>8i##<&ssUN|Hn=&5 z#mH?96UX;NT`=|6ZnhQNvhGKky&^_e22_bD{ldtCq?`N`J|GRy=T@w|{Y75`I5q*< z%kPOyD0@LRkdeaf&l{h207ITB7;!ERn7TYG_EVlVjx(10P>q#-EQBWi)8#k9fA4wf z5exNkV`iYQ)+WC~uO5%U5e@p8P*DDO+Ssloh@Vsip4_`zLzVG}-N-a1cy|gmX!$gTf`V#QL`dhE@9#{9I z3b|0>eqP!|M3*Rrx;e7IKDldkby^nSt(m;1SrK#^1bNFxk;4-=Bu5`nxQLp2b3Z>C zNyBt>YXxil?D7RvlQ1nz3{0H>83r@()mKL~6K1MvlvJ=NAz+S*xG6$6Fg*w?N2AJ8 z$0+1(iu2?Yu~uxA_C!0(Vvg7@{E{9Z9DF!W=L~c+FfNzAq)_nS5b_*SJ*T^^ALAJv zkF|F5Z&Ne@jM%hQ=)YFV{mS5jL?f+*&W)0kPOCgWyHWc``_vih}wqO1y z_lE-PW~F<0f7cJ{Py2cqzH+w;^ai_<;0BeOY0sncpmn9Fw`FFsb{9@eAu|pcX*Aet z>{(aVOTXmp0K_~8GTN+d23Rc4Q`s_=4=_`ak$$qA-nHm98y6MuaS&p?CluIJ0vseI z-^ExOe=d+0)&vB1+0k}_b9i#O^1C*!k0zc{0hMsWc!X0;F-wLSl-@C`7bd{Y;n04g zE$r+XRi5_=)zp*O`j3qPQx3r^e6)66`H1<*FEQ2FdDSwVXoZBM-y_ZPV+grLeso4U z(XXF!rOM!22jMxijFRy}#&-qVz2TN9I7i?pSOvqkl50}wkc+fZOd$m)QnAc9+&r#r zt*+@%4GIv=F2oGA1O&VqbwLL?7YRzEr1ez8eK`};X9^~W;M_HSCO4wqg?Zl3XcdY> zR3Ok4r2;M<3O{)|uynooH)WQD%AIix6VL&NYaFZIw856)8AE2|vl19S!{u;U>|n4= zOO@T^@bZ|L=Y^O2Q((F-J*1=uSbj^8fRuVKlE+y2Sp@gtzq zgqnzL3*JJpG%RN`9_@elvoq^=kD0FCx0;`M2=f#7t!PS zenn|@u}OWeBF8gZ*5~kYDDNu`}KML z_xs$0_sP2ZhJw3#%p?H+dk=s3KltxDrhQ`kx6$~&`diyGEpuPD)wbT$-`6`|9rv2= zoH)Oudz>~>_gnt*`UhX|-^}>ZO-cCXXd{O+R@`2wyWBaS%t$C2KH+B7t^Ak0v3!M~ z%l$0(Bjb$PA+Oipr>y>aPf`lnkyis<>oKtnk zL4nL0Prm;H6w^)AB*TSlZzTv+c^MSUbOs(#XhaI)c{G++n=Wig>Nr1}aLaVgevuE} zFHjAngrI)2W`ru5SUsms6(OG7#=kyeSB58gLY^;rq=qkm{9n&|-AbJDTQ zesx`d5o})J`Qp60d#C8M%BNYF9$V}A2K2GwQpdP1#8`fM%hV&JAyldf3dNiZS_z2MOFw{ItCYmz*{KS2!E#f&u$4Tc(Y7c2hqJsWo+&#OMq(pu0#j20+|#{O7c*8 zWAE^o?7LLG2YcfQbE}VaF-ORl7e6+u0s5@MWo>{w?h)%$y82eXCV*|^G>sM+ftF9% zclC^|Q;I1(+NCL79fs8N_?p z(h8lj&rnpJ694t8q&bJKVHUePbYI4!6#dO{Z;J)4om92*NW zHS-WNVJ@BzW6Px|Vb)_pp#3y@<_Xx2zr}f`{!T}%XNb^tq>R!=4 z8kv~ml&8!ZOmN3XAXw>qjPTIN&88RJ+46p0OJ^D}WX5>JtUMAi(HKm;r_LD2?nq7e z{wJ_(J~vlaMn4Al+T_iQ*wI7l)8R@4^pI}HL}A2QE`M37{ksUSJkUSs45D!_0zPah zHAPfFg^Z2bq9Lx^@#S^60!MOuU>l^BX(EE7NYQ>9sjP0U_8*&|6k0~LFIFJ{T8;b6 zYT578R6Lxgl3yEt)N6G6`YwS=<#Le$fe{oGW>tUYqQf=t3SEp2Xa9y)a6VG{z|!Tw%$$~WM50;fc;6q z%6E(<5XoGyW+XmC?#Vj3dE{dAczUd(iSn7t>);aYY3Uub-Mge^H#R#U8BUZ%#UZwi z)e!3+7a6ON4+711C(jnH7w>R-LgxE8HM@iv4H|>2x@P#&bVJB7=jpF5R)f6B!S?y0Y2)$5wJ% zs?-Zs?|ENx8^F<=B(x$&n1!vHm3R<}YlRiq#|3}ne$a~5a zL#w?Bd|zSkKSA~v9M!JL2@a(FJ?sZEU>uc?yxk_7y|>glq+5@v6!PXZ{p&DBgeaYU z8Y(4T$zjWhyJ~M>yF)uWi7?v&7nto#S`VZ|jjCGhUyO-LIAbFp>M9DC@2r_t1h8Q1 zGuv^2Sj+@N_x!bS-g&y+!C7|C-18!Nsk=5bY=W|}LC(0A6?Wpgl79^t{{xC)RSilK zx5^wY7$;zB=|{oWs8T1}d-XW~TPI6~p+Jfg5ou9blQwV)I9bA+5~6)p)R86ufuAKc zAPEr+j6ojPMb5S$yJfYD<@X2CIW}pf`r#&{pSfS0$4BH@IRyxKC<>VSlF($e${v_5 zY&@8X3PA!BABC5f70a-BWxw@$e)XzKvZTnfH z5tvr}IF&-2Sf!8=1)I2D2rF7aMvOw7m72Vw!hjT9JJvY@xvp4KhJZ+rCp+gMA(;~N zHLSv~X0H_y+{Ei0HR6i)QC_AkBGv0DQB|N8j8(dV1;p&uvYAxx?;9Fa7MViPMQg;h zbk8BM&MId#L#~mE3PoVRUT{{ZpBGX_JO;E|$@$p@jpcqt0>v@K!mPed&Addm(ON@V zA=)YC+=ERhhgDkdZg7Jh!)52p@v|WBp(xSJ^P#n?Dx}}ofwY31U=4nA4pD9HsQ+xH zm-*^5tV+k+M5{4B^8<0iW}JXKxXA`8Z-hOqW$0``yeN@N7*C$1ty~y_!l0sjZZNpG zVt#KGa6@1X_xf<}c);k_MByr@zvP0WzBN2H9}6zlrYCUgSV^R{y~o)Coe2=LOrJwq zuN$Ej{NU0SoW}i}Z&k%_DP6;xb3SU& zQcq@;;t^qlu85^L{-5R+LmzhxoF~XF>yYR<%zs;NHV)IfW)U?K5_Z!W@n&GlVI&{3 zf)|PUOzez!TrN5i&Y+7 z;G(d6<5dQCIO|32)~{o3yS+{KM(+_U(3KL;G&W^5`AG!y(_`8VVYMO11ItM@5>P+Z z{61^0qc7}a6fzz+yS^bBuV`pEA~Lj9m-EZBO#2PmmV@mNy{K5P&wCfX)_~G`h+vwrG3E4<1o0)WS#@~L` zWi<714UalXMzDuq8?tRjJrBGC)lq%xEBcM@;Ply9dgl<2{WIRzjxq+T7yLU};GQ!b zJ70g`lS+WBlmdUDLC2u==IXhg&6D~d-U*He|5F2hCimQ@-Uaf%2ClAi5vDkrB&wml zdxxeXrY9aH?0=M%ADYzn1wpORtvzpe-Tfys6Y9^u@})mDU4LSslW!fB`x>6RYv8CP z*?I8mM#9&VZ^YXf2ha185#HE$A#?kmWm@y)N`>`$gJ%GA_~8I%(c?zbOQM}WWHcC7 z7Siq0!2iLF9Qja|N}yTK`x#yLe{Ebt3wa>`!qmFPy_e^BbiALOdW&(Ed6ArXB&ySF zzTZDCQDsCH1R=BWdgsk*8u zkNv@o(y2ztD*gufzhU52Bbb_!P*Ev~GQv$09LeJ2l_za|e%wMGJ-m0YuD|p14)HiPb0PZXpWzcT&;iSlJQ^^vwB=oN! zamdPTf=Sfd65ov$N5ZNK8yO6vw&~j+P0;JBR-c{(w+k_iS>p~qwCXNb-%rl3`+s|; zN!nyV9+Es@U{{Bu!|S?3R+IF_d-lW}DVx>SHAkqGP9+ya%Rx+)V+~ew-4}$AAZYUD zQ*I_yB(H6c0da$>-4nykmMsyAU1)H3MR8adM4)d>k%@J6mzU8HQJ@v5#UhRX6W!yq z5c&wkBw{9f#x;pF=mutFi*PBN);zUsCw3NcI%QG!2fu`pnXn}#J6L-rQ8npovumAK z{7#%eWc5pNp!UvaMc$P&K&3P;WCp8?9ZH zwWOn3G!RgMjyesm3v-cgX}8;3ffxkAFx*FZMcuSzU#kF9ZVFw9onIA61Lo0%n_1~y zMgKRyqccbjc{k$gVYP=NaElo#(Ka))WA4g@5gSRu1TvE04^fdLBKDF9YP<-S!H?~5 z6SK0eY>-e(uCcRsVuO0h%5q&D=U{p=vj%TAQ8~LpKF9o3}~yTNiFg|me0Vpe*8EeCw_VvGNNJK)d#zI?i+fFfy* z^@s{x7AwO+;5;YS6jx2hGs80-O~I+Fpnba%P9|uVM8IYie7HHavYKYXOQ{Gfsa+OBSK+tA=3IwQeNLiZ^WNRqW93g9^q9 zD*PjBYSIH-L8V#|J>_Xg^}Z0Vnj|}uoO`K|b0gJPZ{HAf+v(wv=shQS0?+mggO~uW zf1MlDwt!I>PfW1NkRq7xm0`IOt$bKt@Ig!sqY#|2JQ8j2wX9UT`sfg)(_9^)WmzNz zb7xGPEdD8lD2d?ws3*Th%85VLl|FmQMGSbcBI_C6_9=jYfUTvwYN@>RS^E#(eyZI| z`tR*hTK-;qbODe4fmaW5lMYg0aC**BO-dctE9cjI_8&;T?@-^MXp~Y;$KltgP_L|P z#qj0nl8dUWNy1knfqsV*F%-&y>fO^m%RkYPn zjwmmBW`9V$44k3b-I_{Q+|vy_RAxYi!xanJ5bo{ zR*wAJkk}Dd$YuD#w_0?5kE-;U4488Wv9YY+Yz+{j2_r}jeIjT}l2R!GW> zUk(A51^!1(Xf)Xx!*tUF&%sX3(Waa6YQW>7DAVmRpJW!8lPqG?nWOBU=3`vPu2;j4gFIT%l0k;i zCl<%+jQ3RI4oJzCc7)VpcPR>n4%zPlad`{SbCxZP?973@4=}A)F~0p@2wS0hgNQ>y z{s9uXvpQDJ>6igh!C^GjlRmfORFoN%^u-mj_&O_@ORdbP7W72Pj&GSM0bP zV|nh%><0{GX5LYyS8e3hLr_Uy#qFXgn~vfrN7Q~hXtst-o8I{(06;oVOq(c#w=NvAvEo-@1VN_>K$LLw9KXxQ z|A1U>e@+~4SQvpnVg!#DdXXHd34QKbhS;i!fLmE$+ix5t=*h3Tr1u%Wv-HwJ7RPKs zL+XPj21NljSWKjLc1FnHB}de&t2G(z7pH&Lx(Yuv_%@t|d0zfz8F(1(!Nm^HI~dp$ zQ=B9xAr^N2;NHZ`UPM<1qn5T%WT2-wtdpV~Xt`?zo8&f&AkIX4y~=bc%SVOo4u*3$ zxmp{{_4YVr6|5cAC$Pddj;<;l@U?%Xmoggx+b>4*QgGp-E;_TlxGi+eCd?y|!Eume zq&PVeB}lAQ>r4*+8YF)tp}D|&`uQ+b*Z?mdM;JMzTQ(}0vV1s37ntd4n76u}>Ajy$ z)&+x0*5rS2)*01`?@AW^rqe7a9PE`btwF$MQt_Y^(Qh;g84qN0bnHqTG0jUKOL5f5 z`*w8TEF!vkgWA1*7H!!1Rg9|)0i~i<%xpsj_5wa}9}A1&HWpD-hbMMiNGWTbG8sxo zgB!DX>kvD`E{yfT6yk=X+nBF&A-Oxyzxj8{+!J7_X3C`1RQw&iUI$$U^)yMqcIEO7 zR1E6oESVabdV+H}w~Sra890dDoV}qw2Q38qkT_d>=S^uNMv3L|4N8A^btTFBrJML61L&bet`UTV4;v%>u_J?0c*X>R@{tuBBoMP@+Ul!bQV0}l@6Wd zDbD|gE^1={;xeNh^#d78FLRD^&cE~ywsw{oy?cKL-*L5)+&Vm6{mH74gr*)xSqEGk zT&AEOX%zm-2lXtRZT7VkZ{@u8hFLqG?)t(lG=-(HFmr+`O0ZbOzI>AN<`M~Ox4!Ml zZkf>fZWVBHT)#4-WS)_FKvJhz_)41I0TQ-`?O45hN&7S(rj@zL8QOmPI(_cY(9<=v zN=ZI~TO2K@Ruk^n8Z_w$G~E`Vx6C6mS_}kzDvKeJL5z$$@hS4MjNp1^aTNrGK6^*B z8el!d%#XtgAUYJM2-{@6$+wqn|8O=pHH_&kJ5`pF7LecON={KVUTx?bXM@8@vayD9 zjAzC&x^$4>W!SH^5m)Y+1=nXWfRY^A9O*lhX&PWclsrr zTP^q%A0fpsI;X7_yp3y^&lA@(*>Km+_n3GT%-E7T;6*;@l}MT^Hh|+extE#{@}A)_``dZYsRW&S>NUD;*y<&ZMO?RV%89vu^3b=;*a4X0ZMU)AoIhM?i{ z8zKP%G$)ZoMbGM=V4i~NSN&SbPEjI5AV$M#YaPUjI_UaDV~Zr9#?=X03qxyiSgeC6 z=pyR21yiK+iU69F(TLD@kpMxi{=D1*bdog9tl`N;$GHwmezxUl17k-toG!SkYJ|9_ zkWmFf z!9NT0tEz~Q8nB=dve8~D25*m4DRMbM3LrX74F1$~JJD}z;AGrLp~3aEX^mK@E}4`q zYpRSRb#yPYl~|2C2sQvxuOn@PuFD?^5vZ9S{B^x3h%^2CHd< zpEVP+I8-%*OUi`MOhpzN8`jWgtb%yU^P}@_F|~mLQq9ytd03DnoCJ>1!NFjk*^>&y z+En!%gi4|QIA@YzV~3i5sD1~-1i-~@Kx@(wphU=!VI22;TZhD{!s3(>-*MqE*&+%e^TUk&9??*U zvhG}ucoxIIt6|qBq#!U=;caVZ{sv!BQO;-^{moX500EWWt+i}A*=Cl ztRXnJmfIa6k2OFM2PsDVb1t-J>Q+@d&-wT6Rn`o}{!Gdr zwD^9lC?4EiM@bUEgOcB>LP_&Tc84l2^)$2Di5MaG=%*_3}qXq3FT7mn5#yZLX+u=oK8*-59A9>Z+4{S3}dMjw%uEH_ZV7K>!tD&)L zy?Q&RxL!jUI>rFF;S-Y=MT`T`h~wcER#?fPi|8IQ0Wiy%?on;*wFUF-Y1~3#!wd1# zvlC*c@%60u=|m9i&47;=UY1-e1LNB8;wQAKDmr85+JNh0vW#f(^=dY)DY6WE5Z1@| zGHmR4s6f*X4pJltsAKI1OhV!1ncDjL5DD?s(G?_CVG`#;&;mxj2)MrDt4_gn0;4l# zWiPW;hrVQE#GQ>_d-*JzRl*XkJi#*?6e5PGpar!JD@qp1SDyU|{-pMB^47iEFdFeD zk#vw6bZH%|Co?kKt0KvgsPFRNY_`PIc_CoX^>aGEfQ0>Zl~WC8_I+5$OZt*2*oeoT zP;Bd*W03FasdkXF{%kJBATswThiv+LL!>83b*l~h62Iv^AE%0_6`+Gw(+|_I>l5s) zkk7O&oe%V z;_kKTzFBt$dlr|)iBj{Kj)#b?Jz7|{ac8o*;2A21-!!vTrQPQ_X1Uhjy&olZJK^3L ztLSI%c@xh;?;v~+)2jAI@IH0DZQ%cK|9ge`H)5V;SDL#zQW+N4n5(xf zG1k(Z!3joPGt^qXRo4MX&&oVeLN1CfeP?9)DwIf-{Xw}u-Av zTc|(3bSyMsJ>sZ@g$Qz*+c3&@f!FuI;ER^oM&;HctgfgToBsAU3pD#tH#n_EytvF_ zqh(84-1XU|&hzE9qf5)kH_y_xpSrBHRx}_J0F)WW0KjdnNEsEFt94>$J`G++=`8>ByP*K(tU6I7id^mn-`ZXQxSp#DGCIi zr>nL10HcWQI~CpfZ zLA}CL&=Vw~-!}BUENMUjD3ise)*BbKWP`HN!3UgKk`*->5KE;#7|tXFwJB}yje{U| zIPrnOOwCH|4xUyDL?h#@K}$gxjT0ZSV3zDFaeFGfZ#JpRs;L03olPKev_>`6Ly&c{ zLeM|Bfe&;`Irz)tW5`DH>j`tJ32_BPHHIAL#u9z=Mc4*JftLkDY8qp;eqkWbR^$$1 zam7-&LW@>12B&3yn3NPoQJ7h9vZqZf`lEt7%g~~$b)zRp*@Xdh(15HNKsPAF)d%Q! zVU;N5pSsDozYM9%fDs=lg-ApUI1^EXh|8ih(3$A!*S&EJnDMH z%-8{VYS1Khd7ks7DOqqs0xZWt!x3&20?{n#YRyueg~)#e@ucTR_Dzo@$4Z|(jc?xyPA z77T3vTTW_virt>wPcp|Yi=re1kzFuN-C#zC^yC;>I=@X#Db36=f*8#!QcCmOk;W{4 zEwd)l&$KG)gMg0H38c^n=6=S!8K6#oQZwkZyT7n^DJf+P>Fm$4eDaf%qK%63YGTyu zOiD?J0>a~*R7C_rfr6&J!>up~PB{AojbDs|A-Cthxb*Yo6c94fB*|pUnx=ad8rhy* zDr)?prbbHm&kOlajg4Ewi>H`1)-;C4&wg7kE-O}&myxnIV$(KS(XtehZ77QpiO~?L zAxtivFq*?LuH)wStb=4=heshz%x*$Z@MO?LW?_`GRmh*QFg7!U^!a*bC8_tWeh}_5 zH~8J%S~%ifehpIowVp=bun|f@x2Zw#AexF3yzR|tcGq8K{67GqKwZDg{>#DJWKpF} z1`wpPAv55AS{NHdsuqrgA>N8nj$&s5hEetnI-If)^0Qg!>=87sK^F_pjAdBkTmIH|kuNP$*#u zCh&X%HV=oDE!oH@rgz%KZ5kM41x^n~W8_5@&{x9Q4Q->$u<1!|tn%cADAgJKUdI)> zvU|!!fy8%r_OfAL!A@po%t^BpfAmLx#CE7pXWL53Z1;0DL&#(xZhuUwucr|3_rCYN zc*#p%f8uO7I<>eYmH$%wvJHAcF^oYO?Cz&u0ST>4ut_3 zC{KVEf<$1@mLyz6AE!dVE_#To1ZOV(?I!1y$sJo5lr@U} zeQq%*EGDQdAnRu;bx9B{Z>Xv2780{+7~W~L&xhhmE;_Qf~b zLKzxaE-=aqbbA8~ha>cc1)4HOPu4N&kJyf;Gf{&O^H^V9!EiXhnbXIxy1I-LCr@B{ z*CHlo=234?AkNz$HFw_C2ouTK7;z0KV#QVjl-&WVAgQ+^Ebg7du$SWaqsK9)XVICO zK)R7)YGwkXK?)fZ##0rzZ*-=rPD1lOeu(>4`*s{2o zhro2@n7NLq%1+A`TFUDY21ACSFwETC|5Ff z@fXiOkz1Pqaptr%HSaG12suaUYMa}8VZWRwX+yhY6>CU?zaaAX;E%5%^K6I~4G z5co^B719BRu>Hr{SyASR7^GF{@I<_w`W2~}VBX_KvO4dWvFWU{Nm(V}GiW)*BBMHeCR+-5op)oir%m{C* zrC|fvoo$`4mQvfbXUkIASC$Lm?_ULb_JI{q^(Ms`uepV7TpJF-eU&`YsD_AtzVPJpl$a;6Ov8=)bC45?MQ9zAGcFEZgw9IMEW zJ>$w{98l&EFz5}D4u-rvl-*=PSqFTdD6E1mj$#JZoAn0oW3$mjy;(yMXtt0#wRDEn zh?0Z$&AoxftDs^y3YwX~KlHKUM3Ropv5g0uL;2 zSSwyXt1T54j{nnJl@;Zb*gfM_78SKcWqD-owEkwR(GDXlgiYfnYp)UNMHI6Ana_OY z8Mp9>q6m#fgEJ>utrkw7K8-VH&hX#17H!;r|NZ!-U-~6hO|)9gC;gpUv2wE6d%V9t zm9`x)UnM7Kt^Mj(zZ!3U``hvU_rIU*ys8AklRZD5|NQ51>eNXrF77s;#Y0|!Lld(Q zC$?ltD(#DBo-Xp@?56qtPpg-QM=567=KSqANbR(pw$panPTOhUob7S-l_dyeD^&&~ zbWbjE=pw?vR~MEEqRvvmf*9`I)#7?xRa~@=50u4$@Sv__T{SX*7k+IknKPo6<{eH9A}9Rzwm2EzeHX^xGJK33K@@$iFp zYX`srsl9~_dJqjgj9@JJ&`m*#I*$JD6pW7sdS`J?*oVSpu5p$P=~6( zYmGX3UCJNewYaTFwiT)zgzHF&z*dhs5<^!ON);O#dpyfr3gHjA>pj&p4e5Pbtfht& z7EBhlAHssso(1rAu}%@>DtUeAG21+FcS4#ejM5SJDGJNhWSt=#0Avf#l-;(%R^*di z$}q-YHN)vgH?Vf5K&v*wwFjqh`N4V2&Bh#lOZ_CK&tnTDX|1u$u~W8!pK>@Vm&_Jx z5qJd00Z*pOSTNTSY_3RH<=!&DRT|SD38-o(5pKHiYD_mL@mGKSH#m0uK@772>ccMP z!@X$7QwT-KKo?y@9Hj0r#R|yJI1=U25GWT)&PXA5s|Sx~cje z_xo19!}m+7lbq2203ZNKL_t)0=z{>cHee`w^>avVVEpC{%*M|N3otV2Qsj{JI&^P{ zyg#6h#kNslqsbswk>wn)OF+@;It93|AgUREdCDWG)mn%eHAJ)y&aKOltvX}tpS(bk zNw%0GB3MMyT9{ZsPF6sS3iVV9m$tKl+P>+tj!lK&8B<+&8l#lzh|2f7L6crZq4do< zqQck%W@Ia<1!|e;JKomGo{v|VJQM#nFJBTQ_Wx3dA-(w;4qepMiWm2EaeI<#XF+4ND46oq0p zTTN`Ptph6NYWWwu=-cto!w=!+zy2&{CfXQoba}iN4#*_?9c9!NG!ZsN6ev8|*_HA~PuKA!khN$ZE=jvX&e6Zx&RRQutSkihr2!G@3AAzX+^$&sF%lSeM)#TgEJ#@g{U ztjbl?TXjrKOklFnK|LhpnZVraG)`_bu+d*<3ro7J<47XOV^CCdNP*yNizUi_p~?&q z0de+NjK55AdCYfB|wN>vZVO0e#{Y>Xu2;Z&67lte`**GPkPGdXW*E5|V z4>V`P(3nzK%Ow+BFW8Dr_jB~u`q2FpEm6bV?p>Ih+k;xtwoIuxdu-Jvj16CTos_vB z=Nx}3l3_#Em_2eT5}tqfydud#=b;X;ch_FraP^H?SzE#C$`Ynp9lkG-VnkR=B0GWO zycl;NY5NM5vVv=;UED_J{p%anK1!PFLaA{=%aakd+Z2`J4ORKs8~ACXzSw%dul6Cj zM)ox)F`LxA?8uQK$BrGtkt0XOte-9gnNcMqsGt7AFZ=@D{N^{K-R|%Xo;r1kgSCJ8 zmwy?f(dexA{>h*G3EuX$x8Y?kd+9~q&Zpxu#J0b0%?cp}#(UoL9(?qpALadjypZ$r z{jLWdcz_=Z3%knw(ZH2cyU~y#hBk|Z6}5r1YmeB_(D(VarUx8&F#0VIQRSP7WBs6$k`d0;y7ic`Ua*5rAE6I zqp^Dys@p}AY+|If4Is;nkVuJ~0fU(T=79K|LyqZlxbjmPQiJU>LZDHb!0yEv9N4#r zes=?TO2N@NhJzsnqZETdhW;=`uQ$T-+B%k&*Rk|SAIm3}v3lw_+7nGQ+jX=jTgaLT zSH_a!rQT{Fsn@tZkb zVL^Uj_aTtRo=UOrys=O-5^!tJMAz@sHj>H*z>`md6dyW$0Z0b z(h_^Fyb}NPS!0(Hns{ypEt@J}4v`Oxy^jTuOf|6v-9`=_*o&+7?t|ih=iCIj`YK}W z@oee(P{m>%F8e~4)*oX_rfrA{_w+bnQIXo)Cz-EeJ29)Yu<9|omW2%?H^*K0Rb8Po zNZCR-kwXqMaA*qooR2cLGa`$(Fw2o-DQe`!pXC^?t@BN{x3&tY*I2pLXtfbH>P&6~ zSp%Y|5z@yw7&(FMfsup-vQjfC%QDN-$GxnQ7nQxQ5``t?xuyB8S&|fGZipQX#zIIc z)p%P&scna4Fs>FM)OEzVz_33+kt&GXRx#6P#QYxReOQo&uA#6DD0bJD-;9cA!&Q?K zNG)ujPB2kJd+$7YkDWoWl5uv6N)>XMLxogfocT-|QsQy2y(7g5{Ck$JiMR6!H}_;NNNecHV`&4-jd1OFM^0k!`@(kQI;a2FnuX_j4OzQfU%(T zc5K2bD}*61eT6%Wa>GL8We~IF1T{oDVO2?)VPoJ9UoWs?atf4d8#{ASYP9HsiWsxCH zVkAL?7zv^?Cpf(gHYqMCK z--F4S8N^A=pChfz!xu4B3Oo4vtte2pYh@u)wK@Wo#kkct@#h=1Ly8!UNaD%^S7K## z1%G$@mr#r9Xx5w7I>wT@Or#5|gq2KSa+F6tLXRd#JMH2&A#iLkGAf8|#&}1v+8lXL zShzWyA5+~NsPY)USy)no#z{+HJ#S(UI+P_x} zN^8vvX%tf4Xg4nDemJn7EjKTqT?jZ%ifbYhE|lOTz?gvJ^Vx^x6Dzp&bGPF5zqtdq z|IOFXU3=oO>FRH}3OBs?dOZK7&%+hZy#lpHazWd7sTv)>;}4$T{kr?_@5X)q_`q4D z#pAU@Hyp%)Yxm>I=Us`bpMMqhUUk_&TQHZ7<%x$*;+}uFmyhMWx88?iM~xvqI{hOy3%GbsDoWnhb7IFBdtMUBrejcv>_UkY?H@Ray_$+AWR$zFb&n)h!`hT#zj?wBS z0vU28GJ{F%TP{oWgAk}-e%@zQ*qfOLWQ9PI)X?h>u-Wfolq(MUEeJS{V$_-qv?eCd z>a;LDHG@X0j!vu1b~O2*&*XuV>1&rqU@O{%_v)`K;B}=$$ZSEmkG_<$1h=&hiniX{ z)p3ArOvVvBgIk8|C9W)p#(vnLMAEJyj07SAdUEpSq>yE^@Qi3N7?>(dWf!-+Knx_? zZD^NmphTt8E+SMM^@c#JF~NSHd7ASoWiS}A3L{MmWEt6FWK3vmtd6j{+QZVy5|$o5 zfz?{b15KDD=*&!_-fE)J>97ZK6ekFxge&o;CYv~T`2jp|-w_;pS6BLj z!dUnl5g0)bR>Ci};Y11xo~05PO7zzS&K%#s`tljH>IJ^zTleAe%O^42jw#=sEAaDN zAt1F4ks%D$*$Pr+h%N%}3J!jDpbWf45uEqB1An#N0%E}T^RW^;R+bdLf;=zG@7yg} zHjx=$u0rGBf!+AdmpmVT{kOMbetrVe(-Vlv>IKG9E2pYY37Z!q#!du-7==PaJ4=v` zlo)D_4oS|S47eENit|AV%BN8ZvC<$YfGkC_UJv}usYv%4y)3LlJ*f9EQN^EKhZjfuh! zq3+l_+aG@*mG-}RdPS533s)~U$RoeWc?sFZfP>{>fp9jlkv0&=d0Yp~ni^2oXK{$} z&o3cOE@b92pXHEAglM*d79`T)8DLNtJH%W-3~~fPW^!8ix{9p#nPJSTpS?{dd7M8! zeeGO+-sjWA*6|erQ5c%AVG5lOjHQsVOEcCswR+6}m3e{QW)Hh&cH<3i{2~1IZ~Ynv zU)Kn54nhovLss)?t2W>)mb!v&2_TWif=D=p61?S<8&-^EQ{iooT#^ zmQLaL`f;o*Eg@8X_&Mhk%>;Xtv|HH=}6Dn#~KU6tmOh0zJ z6HK`x0oxXKYHb`obQM`P{ftXVB*h&qM3jPNZJdyoV{rRc%5 zuYBbzd@sNjRm7!GVUWV>BOmz)zW@8b&)EC1eQHTyl4bGBUiLD!Q+oaDU(fj$uF>oM z_HX|dX_|9U&ZWRF#@EW`Ur|x~kH7ZZ>mK*_?UWcqF2DA*ujT9tQkguJ^Won2*kg|| zY0qU!cHT2965KGcfcrPjn4*MpSVmpUesH=!KtIb_`AIGje(m@TL}L8K?|cFu`?Wv5 zp!dA;&9A`Qe)-2PDY%@CGW?hK{2~784?cTA?;{ZK|NKusi}qCe313Tq@vr~HZ{X-% z4_(lA{OTuu1=qgtnsff03czmp>s#^3-~BX$nHSpb`|ADt_;3H=&l#Nl?jL+9zV}W4 zf&u<#TD3rZ`PkQw;eYWBA-3{SSQk(_eYg$C5}f>TmR*Mrva^j<5L9@5K$@e%;e{-9DMI z<3ihi{P_>?)_fKU4#GSEw8|f zUiqSLSn_hGQ61|xgA-Ly8mXza7#LMy=jfeUWk2>pge-+;cu5%0&4UqJs(DZeadp*) z(h?#fj1VLW5e07!GoC!xh^UyJ#?C8zBy2rr=U3 zxr)&#>8B!20F}mR47AdSw|KYisDOoni${b7B%x(?k|^ z(4Lq=7&jQ~nVsxlZE6xnzy2_$8*RvT2Z3s^y@8Z5TZPcStSrbWl+s#YI21%w!Y#L0 zJNSxi;VPV~%X8tY{!7Tf0nZXro^A8vn#ZHPiovL8Trr7s5)L}=RDqpoq=Sqflampj zhb$c-ie(uDpOK@DK)P9=yFA3{$JRl?BKvnw;PBOL%+EJatEZ^RoCAUln4~<2Qq72w z1at6ro7V|zaf6_7#LideWF=!pvJ!LDr13(6c4)4vO-%mh0Nr(WUDQ|xC>KGy`i zP`|n6@FARBU#H*+)M^2rhQivR(B?&S4s+pcw90GSfzxIIOJUwR3DF-4EM9dLhPRwS zjfl!Pf*OsGgvJ)Gu;H>qg2oiIOV_ic5OIMk_U%E3ZuZ#-DjOjt-|&L2YsLMWE1*5t zQU$AozjmwkNN@zI1#EnOs1U7V3qK2vic07;tH!ARuxctq5Je1d6?Ra|JyIsh^X9S| zh~VSTlMJ8|nc@1iv{@h)b+EzT3US^+oGUc49BPmu>kUwJ`xp|T(H~%t4zaPhhA6Sz1%hURgo!M{{Y4OgK_GY@K#j~b%fK(A>^8uj~y&U4rtdEKuz2H2x&oSN4k}r zvkdcAHZ7m#D|El3&zhK;X4M5fNKrwB1+dqn`98_p>FwSuVG>d{RctE%gFG|WePIhU zf-*13`^4J~;?cn>A+#mM*@j9Rx{v-IxP%F-p5wg}j+{2CObX3jAFi`%fyd|wtQ3ig z1W^%jNO!B=Mmy=C6}OPc#DM%FMie9_j6S#hJeEGzz=ekUj3sxS2Eq^KQo8eDEvPE} zRg8g9_C0~)KdkDM?-Mpd#@cm4#%7-d&<+}yi6>BWattuUq0xRU_m=VS$%nDNypE)w za6s&8xQu44iI6M?34>BGkH-mVGIV6@7oECG8`4Kwg@qgUdAeb(JpH}Wk33tbSy54j zS-ZvRg~H=j$#QSG?)}@$-{_FjIA%n&$kHJb8KIqh{9IR8?4e5Gu0Cu0*S;TA z)fd(A&3u4zcER?&tuVqh?fL3ez|W^?hA(`f1jF=}`_I!0UZ1^or6BI_{Lb&JT)*-x z@S1+V>85YNWtUxsqeqX90s3T(L>5>F4(uNz7cNGt)EkX8f4;OX?S<_+mY!p99$Vj| zOFOwO8%_7dFbwfCKl3yA>}NlVwY4>SY@T=yA}qmQ&TY*#efw><*)t&hbBPFWxHaQ% zFzhIxoW())GtXHc^OPE|iE9xC0%$2c*5`JrY5#dNDnR`5r@o9o{*6C9@9Oy{Y6MPy z^249v$G5%eTk*!9{Q(@j;ov3jQ`C2LiRgR~c`QPny!SzIq0Q9f?e}B)9xy$D8nje1^{>A_LvhyPKcG~&3 zi*-#w zbByJ_+EP?=C5$98&ooyMlP7*H%&~iJ273-1z~t-EqAI`J0{Fgc;dx65Aj^9I`KCI2F=5`LZ573tepbc zAm~srkWQs$#33g%b1s}Zq z1=qe9jcic@K`;T`AtYjFZDpnYg47>bj=X3fSF;!l_h5Bn6C2$=R@T;WW_2B#y#baU zdxSyaR%Z%vqm7BFS=5sT7H2wGc`U%aw|^DSeeUy7C(=kuq%=`Y0%RdfrF>wdcuWvz z7;78LO_<6BU;8+=04aea7u0Nlh_T7_c%_%#W&z_rPv|XGm#*s9P0CI71+21yEMx|o z1MXk3h#(8eRu*}`Kon2~s6ajh`s;lxpWMLGu_a8mTR43E91a|)q1~1gZ(uCK2vpJW zm~z6^e44$HoOhwEvPOC}NbV3YdtKT0DpfH8aiG06{@8A{Q#r2mx1dsSDN(H;R&4V? zpd}p!>eKCZ6IWk#IgXrOK~W4LQDgfIKE7llm1~5RAV^fLf;0g#Ia^NA$xgLE5k*)y zbS3Wn>OZiJNLu7vna$9XG*%iTA_zk>A>ul&VIK#tJcP^U<~SUFl#Ljml6L?7;N%wB z)+$Q)pU1~ASP)uOR6GvPb?-T6-?6|tflqmEY+hs-F%cufkb%#v(AG=XR3H1QULU+` zqpDTlKGaku!&=Y;M!h5|)jSMx@wp0_Tq6Ko7C@3t0K*J5s&F2q$ooAEhC@CFq@W7x z)^0DbpC`r-h;Y_wK4jT?pjWRymr(D<9$o%kDbp;8jEeHicRvekx#Op7#*8ef5YJP( zYHq$x1AEO&fv^!F&<*Gm$Tx?`@{~q1qhK24rYfASEgM9v9HJ;}#lGcEjHQMyK|MQ; za*v_@M8_%-2{PF?bJtopZBUJ5%nOG{4L6kfk8hI_Ss{YlJK_EgbH_n7bcp6|geU%3?z-E$vg zU9kFrD{lkqt1mQ0Nse%$Jr;WINoa@)xmZPVv&IGG2oGK@<>K$#tZ`TK1W(6BCQSOfn5l0E4EHi*nVZz{P3{u|WN%+8Pzu7Ju zp}LpCjX%mort|jR*}{$7FpH zQ^_;^ar7GMa94qR!DAu_nY85WAWc=BFN&3^|?$)v;&SBC_KdXEqr-J`0sg;T+J) zJ_T69Zzsh2Vz*&YU_fJ(9agM)pRseRbcb??)uoWOa^LdG3#Y`Wj%5O4-rv*~<2Lps z6a2HZ>U*xtKlUgddg$zG>I)+-F66c2Wb3P5^(wU6Z9i`jer;r5EzBBgZf=fk1!-HA zonNxOX%MhFqgnuUF>uN%c~Px1arqQZHA3@GvVFSw=9_Wrt+%qe<*Q%)Dh?jJ9It%k zD_Bi(!womEJyu~At5wBGb-!Qu!WZJEn{LAAKKHq^{&pT)?3EVO?z!h4K28*r?)HJ^ zuM2U@sC}i&Q!(50IHT5hdkP0zC{rrXSLQqESQfSz^ZdSir#)Mn12RAM>wn^_;4ela zko2~jZ^x_N`bzx3|M5CjXIzT5zPy3I_`qM{Gr#{=7hGNbRGK3*2*~}ux4jxK`=OUT ztASh^6TbGPJMjO$_oH~=wy$53?<7^n@4frK;J^O+Kf@3G+z+xUV+Ra>>WzrSKYRbj zFX}ZxB(V7>E2Sv?Lh4$4>1 z1nU%Gp0+@=a4YZ%f~&36!BvEMYJ@746L26Vbcm*kF+J0!bvDvMquWo>9}Kawx`D@z zAH&kh8cr{t#OV_cqmeY&vLf%D!P28gu`)l2L}UnR4di(p&6z0!?rYk2Q?zWaCF5@Z z03ZNKL_t)1;;WWCg56kP!=w#)18)TaU6Nf@Zet8!a8*EVarL+Z$yUHmF!1E;tvCym z0AjLp$~3ZG#$K6Jy*u12&?cK8C0J2|RI?@5+R_Fdd+;P=k>T*c8C-qEEOyN&sMSoK z1%;17$E;Fvh(K8@TtY}%J9f+t1BKjuz>?MY%sYY8yz_9KpRo5 z%<^wEpaWTQq{w*zy0~i=XSzL%$fk%wr-KAq1_-6VFt@>{E6{J>hSY+d;Kjj3KU?svWQHTJ;T&tX?GYd9<{u>eaHXjj25oUFlB zdJAh&q&$Gi+f3kCE9NnZ?1Zw-6q1PPOtLb=va)1+WWL*y6$TV9(Ez_^4srCujHzZ; zR%Ih=62a~knzZ0nIYw(kw)Y|+8w1D4C?E;U7)k_XXg^L@5A4|2oYgKu!Qs=^gbOLx zHY~bABzww|$au1vies*_B?7IpFo{>Z`n7ofUB8A=Rwu`3yX_rHd_!u$j@_DP*)hrg37m->Sdy4e6h6tZLQ|s5Q~VbYmI|bBnm+=pDH8*j-pYxrC%vgNPy&8Q-k=`V+ENhWB$GhqKi*8BJ1D zsvqqA(eEig=g4rY)H}Fh|K(Vi*^8t;!Q^;u!@`|qmvG#is;UY#DyQ!Jo38Aft_)PVJmcl4 zX^$3JXwn=z%UD%1-I>Mu`Z|ZLlhqzoGzeD>Bup)ZFAm~he6VdKc1)x$Y8&PXgWM?3 z3U7s0j;UKoVC`Sy)U*}K)ho-!ZKRyKhI$8Qo&^D}YtJJlTe`|8(Ch!|-BelI6^qGyI%;&Fbn3{_!8biVu9?1Gws{tML5iKOZlD`OEQ&SG;2UoY@_5B+ZZg z*pK0hU;HA6m_HdH#(n?MqYvT4i4)kncW>ppfL0hIWDCl2UoK_Kl)@&@&owS$x7%so zK#d9}zxd~WgWrAEN6stX;;A+QO9?oA@Xn)n`+I&ImtC>vqJ0-#Z};4CFMj9WeE5QG zww}&LK<)?s&4<}y=STnTn{eYxZg|!OfG-_i#vlIb$ME?-{la;?`sr`fH~;hh{d>6i z<9~y<{JS6JjFlZQ{K>V)zJ3fJ{&)Ww|MWj^yCmO7-$R7gkNoQ&#w&j0d#sbjPJ5=? z_8Az$`1RAGkgYx_%<0T3RByoHyBzW?&h3^l{!vufs7-~M1?*{p$nG3*!gAO>3kz7- zvxp#$kU?=(aiPq_m@E`%;jY+zKH|Iu0@Dd#%B{tPRt7*8?DMa9(Q258!1^^CP}tZ( z2oEy2;n!Eyai6q+uW){^RyDyS2GH9W;q<9949q1_h*qnKcDsRQvyQk~!*C!G*Ci`4 z$R6JVdx7BXRw86@7S>i>IIyM)3RFDgeI-y8IPi;TS*{p?(yoc0b^h_DQk~2B0tZ%^ zfBD!Fpin5(+XNfEe@1Dk{Q@ zEx_{_(NXDIWMf2I#2E~V?JiW7q8`M!Y;GP8udJ~eB5Z`na>)GC7Iq%l2cv*F#MPtlA>=0{3SEM>2&dN+>0q6<9BP?v3cw6X@ZH4kTMk zNe%?g4ftp6gLDOm^LR`ZIB~mP!D^g}$60}RH3}-h!0s?9#kOmOwS|c|55?Hc=rn_t z1uG}mg`HFUD?QN%jk}E#9w>uVTA8CM{dfDIUCqYntcVdc1BjW3AEPh}Lu*Ip18E%# zXZr@pf0W5OQOb-k4Y_9Q z;mm!`0k&M3D{OeNt=y(CVj3IMk-+AuOL9}h9pwA;)sL948-B6;V+w zDsycp8%S^NH!cUq1vi&xMO#wFksO}C^7E1Lf`W>!%3S|+U;;T#w)^|Lt<-nlO+_7K z1Ia{Xfaz!k!E}U1t;N|T_ny2D8>f3vMTpL12mNS(SO;vYMKiP5O?hCl#H3XTDN)%* zq0330CoWL=*R#4~Vdo`Dugk)bG1y>~Ivr@buc~2=-s%RHk1ruaVG=IY!Z@S_csNMKwmAi$l zUf0SfXOggvPNRb~?ITBSi$RQCnY(=nSA?RCGJ|Z;b_k4%+j@D3 z6A8tZ`?@DDwCVq285nl%$@4K=7;$5J!F5q?)OQFJ|I(Mf#K6|Yv5Y!*Y*wRs5r@pxEYo#88gt(U4K zFsP1&y?Ze=GsXVm1yx7J5rd{-6nO=N0rCXmqy|mZ$Wg*ol9{l%7?cIUlcj=zd#c=3 zy08Ib*0zN{$1b>B=pa9@O_jBt0%za!vsO{xL!w^8|Q(xPg^$4b6Ij zMzev=)C3TR9BN!|Mu=kqBr?}u6fYd7fKoGHLS2xmu>fqO<(;ws7=$my2S!jl*_ zFA{$BXsZsDD8X_Mo;&*~3tl*T7g&Xc$-=bXNf{)GKwNJ?s~UFiYU8rqZKS@o5YUVO3F5D4~3vu>-%%PDr+)(H1~-f%K?U z(Rt8W>+!!;Ne=}dO6{P)Dq-RNb$}pSQ_x8#>5WqCTbnE6(>_1h!AgIK^-&5L=WGea z#G$KXbBEDYqY9gsDOO9-|sw7se1OeZR2g3o{ag1+!@ps|D`|iiE*F(FRzz7UA z(x5;x+hoFzbEpER4y#t@Rxx4ahkWhMpJOA(0$R*{I;U$A8WWQpwvDQ{8Xl*X!j%{k zd1lm8<~CROq6TfjZ&nT2?T|LqoW>;@2XY%+TT(6NnhgCe*fH4!ylYEhxG~&=ehx@1 z6_Wfz0hf}?4;mAxGWht(W1Ll03u@RmaTzAtbEwy9a8t8pt&VoRg?f;n5hj?dcMu1Z zf2CLv2Wt^%ztpOhgcl)rn>csXJMdD=F>K#u=4hvutS}gSJi>8XCbH}lwj`hPzGTUU zX;E6ZIF~23DI(Qg7-2G;Lez=Ts5LM(F^&6#va;D(*X8Ek-l})d?XP2y^=$r#uoiys)}U8`&eh^PTTxB7z#J1m5wEcW^kjQ)H0M;rKnbyq*soI>h!;@Y8PX$E@wzxyX&G z1r@OUYqmC;?|$Vge}}ic<*j(+kw?bV6IC^ZQ=B+^CchU#;I6ywVkO2K-}pwn{q1kZ zwbx$j|3+rv|3ZN6!kjM}Nm5q&(7kh@|t-_Sz z^~)uf(CoBJ-U#6T#D_lljF1ov?J#C*$`}S9V@ZWjgE~RDE z`L>V$r@z2McRz%mc<Zu?xLF!zst;4ks#|M4%rp^hVM3x#nXz4IXs=ck;E zo%Y1-$k!fZLhqSWBasytwH*@UnQz+?7@R91rCE3jV6-_v_t+_G1tRRCOZn=h+3Bow7-)7CgOyN2Hg-{ESR?-@O>PGToKlD}RihDc zGCetqdQe9?8gcbH+29N}1{idQ$Y@d-6bK^nG0jnr6lPipW)2^MsKp$}DD5Iq!dY8b zB?5ybuENG4j%ar(vI64zKu+t|O=J+C zC#f(sTZd|B%+4OdzJ*B~x_lAGA3ceCzkUp-Pd>=!qea2xg+La?e7>X&Zj&}}lNa0+ zw(b-nZep+9guIeXfD$+xs~oHVM%$_{wu1I&cS!HmTo|)y_NJ!F?zNRwTzmCikpAb6 zJ0HfKx24EbhKWuc6ZH^>4=m#9tEMqGTStS`HK9>l2<~qo%j)TR2+86?q!yr)9?H>Z7(>X7_N1Z4u^=E5rdJ0**hm&cOv}%t&N*14Ct=73Q9AuJiWLVQLBS|V}&ysNNpjR zJ27Fo!$B8QGZQ#`#X+ca1d(Mv+k*VUX*?j+6|8ktnaNN&r`ymN)qYe`WR3_d&Afbo zsr%g$GR&`rfys2SRdKXn41}pUB+IA35sS9+n2WuowI#832EvjRipCKN{C6SHf$)G2 z`}U?eictog=cZ4Q)lDclG+YEW2Zz2uI@2BD2{LCh<^uD*Jzil6AR7)Zs~DdBiAg7S zNje*yEpEkQ78F*w<<%b+)YhI+vfF3X3Bm>E=L2iiC#{8F;4gAd5?h6ht6I-Pfsm}b z$l;&7|A!iCeTZU|B2DK>cpr!geNcfdn)9h3o~}gudK)r2G;J1LW8H9 zYyqavtRr6^U^L92MwG%q7FL-7H|gF;PBx@63uQ?h`pU@d2cffj?>XAmx0m?9wxnJ8 zqAJmQF2rTaR@reuSsOY>SXIYh>dUmC;(G!qDp{2kGKrB6GhB1cwb*~~5bnP1ACW6l zaZUXM|~D@)pzz`q%vY@UI;R<#@3l&_K~ZwZDxtT!5%nw~|H)H$oi zs4l3&xG*+RY|m!w!N?NF+>;#O>J6V*B}%U^_%#9NV6e)g$Za`6Iqn(q9o`CyQ-+kb z=9>01Jy0lYptNH(oVrA@>cfOYo7a*0YWdU(P98f2nTNP>&vUSE_kK(?XPKO!vC!q~ zkZLWo#0)|BkuXY8-hv=1dxa2XJwpX>td8XVT%*LW-Op7B%8+!z|5mCfEzs`HqjEJA zB{5W;R|!#)4J2Zd5f7exZUWInTo2LORmW63iTfTqg2zrB!&y*1@`VcfZa2@Ie=XhE*HsVC6#$s{Is;0ByMx9ZR!(wqMYlmm6jp0I61fe($r76p@6+E=;f$Wcs+3XJuAaWoBf?9C3#; z?ETjN@8RrwBO-^$p|T)$tyqzD?rZgJBk%-NhAj7FUqQbvS2ZSQ`lCLa^O(T~{Q4q`+@0$&Uz#Br7N((HU}k0P}tG}uC{8~f{0PzkuT~s{`cgrDU8j`VDJ9@7~l0U zzJ1RTEG|CE;P%+$Uaqjs(S|7N49rIs%#C7=q+Z#Q8R3fsZanDfi~sB~y5n9?k2#Vj z43!%OX$c|t%bE>kjS$fvQSuCUcg9Q<0&_{aqa#auy1D*vnP-9d-h&2jVrI% zi>a|XT1|m8g$;-=_&ggQ_84)Lvf`(p;Az3H7*K0qK_fWa+gAu%^#t5v%8?i_mBXHo z(Ry%Iq!cQy!k!%GX`(CHe7K;hHW6mg2)@syC`@2C?OR!5qMl+ok(lrIAkqf=b9+Y; zC~r$*+>3+>(=T%lyk_6*vP6+7R#P>1?m}~FC$gm#)QNaxf`a;kWWu5qC)hu;6H_B? zM0(AG*$!Nj7QrEc#00Crf=}97pg6El4lZO})v&Fe_V|piB$hC^wV}tQ9Fzvcqp^{*fJ_0rRbm)xoIExpEp00f$a~A_r5er7gAR^nwUNG-Sf#o^fdM`@a$9} z#%o_rG*E9Q$WAVyJhg&;r-Yzqj=lH6`8Y9TJz~(#Tyx0;h<0GDcK&cqd1#(DGPsp1 zPIf5g!430DS8T5xFDp=_LYjqD-sx}EqK~AGAn=ulSYhm89H`?q z;jMM(JZB1xw25wS1+9@W{M#4)0Pg(Cm!ac`)k<{!(N2Lng=bXN_u;X7yBTH2Ch0OM zbVX)3RL*A@R|HYL*2HLg3~AD2<$$!uU0IVEWf^Q{mA2!a3v)LXV(wXDo_&6J~E>e0r+&ruSzTb?knW2Ll8DEM(zcvTIY%TkcH6~`2Uj1& z;>r@+7s-~f6{S9?lR9URNo$G8|KjJFG-?2MJ^a9ipy!)@CQV=$q)nRFVemI+R8=;4 z(sRe!7Y*>-jF^5UvAWd7s2szM*IkF1@q>_Qg6ZAU7-^64ypiU_TA8907T-FYO@#Mz zMKAe5KG7BFv=#6Au>8PY7Kx$lxcmbttktU-Kbf{yk~6cKX%ovLLBGt|E=<{?A0E%C z_=T+6Xj~U$#n?ON{oHQn*5-&{cj{Z$XM>!J&>nMGeKGu=vu#b8U_7k_b92)c7Z)*i zYVML>!O$v+{{6PMy$xUc+Sk}#>Ej>&_<&W^dh!DPvuT>*iBEKj1^<_IHc!Ltbhl%u z#Mn%!LHl>lJ@>G3gw%JT+5$t@$L;%oO9HU&c<6l(Km0Hg81H}o`|-m+{KI(Do8H9s zSvz;`BW+@TeH4c~bCu%n>`OP!ui16*k_|J5UGH*1 z&81~iw%bK+cYpaF{O$WcaoGU;L}GmS9sdn)e$N{*bH&VgZHqE5KKLI#a#@oX)SmmY zXJh|$`!9n%S>Nc~ecD~DG-+Dc+dD7LF{r)}{t>5Y4zyHDi zhp+toS1+4=BJy-<&ko%BL;q$w%j7X?@kW!^gh5N~D^r$^o@6pX5di1crNS?GlyI&& zn!&&bK{davGEOS)(*%8TEh$Wp zcfX&Z$TIYMt5}#lj-`{2Vt)2vl)V+~**S)54o;!noW%I}I7@ZO8f#>vfh36`2tCwY z_8CyFa?#WvR#kwuibT5#zi`qO$aJ;aZ)ZsFKrXE<@an3JU6q}%Rrao0u3F6VCS_3G zT2{&r4hH5x#+s?zj}yqOz}W6-JfYFT`1CH^d-p?l_~GN|f+wt`hhLD3frYtt+jRZZLRtoZ2CGHr zn)cV$pz;n5UcDEM1ZeF@G5MTp(98BRP)RmmwIt!dX}3>N5Jb!wl|XsW001BWNkl!@q!Y*B9JS3cvgHk*^2l198M4d=&3PXcL+FHkcj;FTIwtx`rt)kwWO$05ycVDHo_B zz4e?g0uy%3VxmMCdnx;N8iykKp*$WUFIKvc6mBkz+N_k?hf>o~vn`ni|Ae< zvet|R+WNC-YwkzE-=y%+d1*3$O6~1JNDbmdXO7@-041ceUxM#PO!!)hA!+TT_)w+k zLm62tC1&qP-Ng59Um?m2vMH?Zjur#42oxsOiKsL+PVEwcqYR|l!G=^4L_!)r|HNeakUd-^nAD(t#g!&@jA5k%<#baJ ze^%n=8=r)0uD>3KA9?`IdW2k*NXF_Abs~~VoBe7WEf1{+j{{++`U*nwSuU(06nIW5Q#y#A*dmtaX<-w_Q@+Twn|0>#Igfjy^<`LOH`HT z;Q=W>xAFxBK8wYSea@mQTg%YzoKH*3M%&uulxP;loWFr~El2ZEJJWbw7O(lJVHVFG(yRuk+!Ejd)WJe!@_Zs*Zf z^2|;dgE^kVlfA9z`o42M{iqSA7>$i}f(X9r)TvYGQHI%L+)kW0fj|7iKeYRIdLkff zWIOfjXFq#jS{c42Tq>VBbrLHptE}$$~?#+_$kSri9iPn z7RV@CKY7&BS{7)w?I3#>vRH^x0x%jV`_iizgs-Fvr%%FGBUX_Lvf_h59zX?w` z2?S%nMHdjkid~1G-N1nkp3C7@fr;+zgG-X(Bi2EU-SBnUd{S9Gkd$!5iZ-6w(%5Uz zA&tru^+p@*_Bh(3hw#w-4`XTm7@A0svVxO%yG<{7TJ{s3sHmkNQEwH}}MYPD@Ez@0r-5Ug5}7<%0`ALRV1&~9NX z%0r)LM~aa`feBhk%CpqyI&8KMk)y_QO9g@4rh40f!~V|mwrt_qiCBF`U?h!oWN8!A zF?D-TojyP|8)eB8K^+^9bB|d0u=`;^%Z_J5t84Aa6XC1=A&i=e?*o-Hi2<7j5%-m; zwv~pFD1`_<8&>NI27^7pTN>*t&wjWs78-d`+A8meZOK$oKy|tXgssJptvaIEOzzq8 z&-u?QRl)59mgfpGv3-{A!$bm;Ee6z!cpjbkHPi};8mTXeKA78JMMcD8m9g})L=Er% zU_D@UfRXJwju5azARSvOkxsV}*Ba<2E9ftE(aU;>@|f=vqspjp_KELjmYda9aiZ-r z(q)9K>!x~Ok1Arq_H-ZKkHI0q@4g@40|LYSH5?;@v4!8FJiW>Padv_YyViC;+&vTC zVA@;%L@4qS5&8I2Ab+EQ?|??H zq{!Xn{=OUBQ(*^n?e(cuaR*!=uFYLx8zM_ksZ3#YX$^~~79olV*I#`D_D$_YD;eSQ zPDGis9$Uhm2R9yw;CsS3i}qbS87Pf zscqQ5vhPw}tqXt6H8hQ>1rbqwOioN;Y;4@9KS(hg8w=D@86upObGCrxQWb3t7Ab9f z{ln^fH&5{dle9tqLL!kVn#~sCwMAB8$kgP}mAdlj7tY2{o?TztZd-55S#IonT?+!Z zJ>9l}716n(%F=F+`tNkoKs!$TnB=xjq0; z9FZ5K2zmL-Uyk4Sjo)B$Wqf>$zn{p9Pkrjs_|4z^4JM6-k8L9Yg7)t-pZN^uU;OH? z{whXCN4Ip9d@hI9oVU^I_c@!%e~$nf;;{|xU!$VNOPs^9-L@OGfBf@*!c%U2GM@Q^ z&)CxW_{u-rfzSWVzg+g`^7~%(eC)e+?`5zjgN>|i|MrhR{{{4UY zh0F5XZnyP~vN%5ahkt|D{pM@8^DrKRwvNE4YOc)Ws<*U;&WSlw&7qCGhrzUTu$Xho z22_iB3#4i55l^$TD+6Au4WR9%mE;QAq2R9oa>Y5A!r61_OL?3=zkO zYbnxZ1G#n<8o~qCLL->OL=5z@x4f-`B>=80-SJ8Wbg72Euod7sj?v2$kSmri^_G^g zJbM(gM;^lTNF7(-bO8HzO<~7a6RjqJo|Jnb%>GGXJ)X6%%&tyw1@;Zf4KA;y{e<1t z$da1IE@tT`t_j3(ia4pE)@WhJjw#%C?}J!gJOW*;Lgo_?jS-Zf5ZBlQRw4q$WH4AR zjV+iICJeH)`2jBT(_o0gS}r+oP6a}gkdi3})G7w}6O7UEHoUcxAutFy>aDJ#PWBGd zqiBsp9Mo%oep_+R)t%)aCWruF)Ir$2-A5+IJW8o-enm()n0y&Ww0Y|&6%ZHxb;mu} zY!?6ZLE!W-p@G1Aecj^C`L+8vaeGdLCs_Cjlm%*0f{`@gg+M3oGq_IbtB|a4pLMS> z(Wiy?i>HNKA%OPOH0l#O(3yP%qYPYU40udUOknTs8KB#N$}$dzqyXE=Ap4(08pVgu9vF=*k;LjDvglKl8pt93+S)fU`CVGSe^sKp zvJADn!WLC=qsBQnWCJMb#_mPCvS>IdoUybb;v*v4Kq4SG)K)_@2xM#^O{byK1lj9fc&kLSuQWiSQ|<^HiQ;QVXw9XMl4w^#P79gja$CwsW^Dub$H<3Z$Y$yS}QjE zkRu1QA@l|ul-xepa#U~E5hyQA5kR#s+GeX1Wrj|7g|8JA4M-+nWBzf3GG|*zYY}FH z-?bHwImLolcZAwLgCvvMY;mM3yC6BpF$Vi(e+xZuPln?%5iZo}B-;(GVe#Y=T4@_s z?!OWOE0L$P4KDN6}c+uxdi9}Md`1OEqsCI#*}kG-q~D?ga;#TYpVRhw{B4m%%P}HUXGRG+wD8AQC0Te z{_!Vp@Roy^+&OVh$4Bam+yC|pTRta^kp`arf~VouS3DP2-+UG7%{u$^ch|aHxJG%O z6gvLkHx4mae%I%|#@~6a?W!kUiSK>!GcUV)%4LUK4!I~>ZKUG)$Z!8Ce({|@hmrA- z$LB$Om$lFS)j#8D-~ZGtla||U>uo~?hIE!fq?|al0JYk+`44U#UsY>W)o9gV=CL34 z>NJ)pwX}vbrOb;Ay>6GQT1!~ZVy=#ic#tI!lP1wFMAv0A*wSM2i>lfZ3X4vWQuT+@ zD9B$Q6~H*yj9)%1E$#mWpMn3*66RUqPsAMM<2Wo{kd7(a_tqk z?f~T?G&lnzib}+?Ykab!l*4wl^<8$JobbRw@T0@((#y2jP)s-TcEAqrHCd*_5qF?b z*+QdvC7SITj@)-YPMvrJE3eE$SR)}xMV4%sH)lmG_LT~ zgKa@ZhV4}rc!>N;%$)Di1CZve5noeBg7CVE_=p_HI(;n7E#TmlGZ-ChAR(Mh8tX8( z5@(-TkD*mlapRc}46OTohK2iErMmF1xoxXzVhx*#F+g0v-(=?dhAW9I0b&3#;lIO^ z7J-#ab;^Pu41+#f1xCrg;jBBf$!JMLgpoMH3SAe0&B_5l>|912UAN2x?qYofu&E0I64i2m9V z8bu32S*sRWKt~Z17@|(c4@0sDTY)cy&#e&NCd&~V?z-E1j|iqnLbM{Jaf=mY{kc^p zpL+c@wy;W3XS**=VdJUU2Q8&{83U6fYQw;J-`6+zw;v~l`tADOUj5q~jkMUQw#qZ@ zt+o8U;Qy}qypv55XP9`<$KDTIbzb#pmv_Zw0cnIXQ{4CZd4|!63H;!TUxMGe`|IpG z-KZr-jb#mK!h@=S-0HZ)P*r~{qI#t|0h-6Hyq}>gat3<@XBm(cRtc3<+vh-bTa>^8 z5F)0fIRV1kI2sa5hcg@d)sfH0u!=W`0rEmP63H>8q6*nJV^5%Sqkj{*R|+i7FQLEG z!>)+~sE(0l8{MqW&q+h}Rq8zkTGb%9z1=QOBfA`DeWqSODkLv(MQ*dWw-D@}OS?0VAST*9Z%^jZ3Mk|az> zP(DNj=C9wDd+m3B_jmD^fBBaK+jL{43)m)A#IOC@uW^X>u3fvuYn2o}~&t=Rv898OOGr^!z6Y)S1 z)ukTl)(T^nYN`*m$Dr+YY1^Hjy$fIchdc0sfA??C>3bje`h)!0t+Z#o_JJ=%z`+%byPA4iI1%QANJ-lqZn7v;#w- zg^9wj%8e*8R!mj3m|)uqsMu6i=o(wHh$!dDo`_Q;+m{w_bgB%h>ZK|;Wdb#Yt8irX zg{vaBp|?c3s0{t?GS=p1F?-|zv~>@+J?F{TH$8zyRH6~*5Xg=HIsu$X#Qy1}sBAXi zz<)pJg(Sx1UlAf4VPqGs3;^gu{G30%X zctjy-?tm`)C~6V%%m9m;0EYx9W5LQ73P&|SuCPG1v&)d)j>#!oOxUyZ6~J0JUvsCx zFxe+8qEHr1Ci|#Pfz|m%)FOeI9W68x^S!0Yx$e~vX3~`CYG(nXDnJ9Spf;#%4i^7L zQDN=^r=;M|D*r*V4dqCW0F~4g*bYl(_yS%Blnrc0SGgAs>vOg_8{ojij|A|HnC0Ci=9eU(KxN^L-L^LMJy2J$Gvjw|-#*jK-T zp3YI%0{i#u#`IVlMYjW?3m$7ooyFD3VVRo8T~3eqxI_0qSO743(!#!`JlUfvixNR4fTYf3K+(^ki@wSC z&?5lJF5}31!#gD%cSf=vq6`Mg0+&W-MK)v!O2q9Nn)`R6CX*fXI zf{>9&`QIusqyl_e$RIVQ{rR<|mJ32B|9T<~b0S|OMD-?`^%VL13bLhTs4|Dhr9Gz- ziLP6aJF*}_$fj*^F&(JLf?gev;X%K!&oi%(9Jv1jYeoMcw;}DN$(a(tcqXiUQMDiL zTg^V0Oe)D1QgpHigt_GzPsg4E`*44DKc-UBB^FHXi^$y@HUr6T^E9N67B*)^xIxep z3}sfJ(3!DntdmfZ>@EwWwHi{F52KC!m{EaPGM;so!YV0D4vM+Qls~1yk)HN`k}e-0 z9Gj)AlyRPDa6n=;o_otfyZ@A<)m!Z&U&}B(u?q)w?ngTvWpbVF9SV9bIU_^yu{mPI zOsxj{tP4SBwbmKBP#Cm&rZ%tu4#BC5+6L_xVQt?kRfIMKuuL(YPT+>=Ct|cdjt3VW z#L<3<`(JphzsdklBBZ{g&z}a1f{g$p% zVbvF9f{FGN(ztHM3Z?D5sp`9-s?hhFOVo65tV4I&Mt7g>Z<1BA+RpLa+oLE!t3HCI zXc3(v<9kf|P!D$#iA@39?RMeXTA_GEbJv%v^(|37taNit!4<%4)D<>D?^XD~z@B=$ za!b|rX(|LBdGwJBdTcIOI|TQ`<8YwHK1IfV}9N1UWdJV_we7V zY>M^g=Or(B2@@D}JZ>8s5fLGIqgY|APD@yvIR`_aR;yLTjzh=A1P21L_H3RzR&jS$ zZJ3R#>IaaAFNeip+XK>fUE6!j9tP}hc*gY%-tXK$gIc4;YJ?LcjAkm{1Tq=!lz+uYV54uPxJ3@-u|sEf$uN=sUO5^e*Gs{fpIR4z;Oy3r_aCW zH9vsGlZ*J5k9`4u_n}YW@V5_dxTVWgeL?&FKmN-{FW5CaaKi!I_9M^ZzH`MB_HCMJ z;z*!x-2N?m<}d!~tVy6VwZD1a$8hi|*WyXfz4?M{@4KbZJ#fR*ujli2^-WiC$hNx& zPd;*rNvFHNdJn$#xv%3Jw}113os~O2^RG;1IdX8jU7EHbfdQW6lu%iY&g?w!Zk-UIA3_x8BILZyhwjzq>#wCy;7ph`vpHl*>tCVL_s*?d-61#K#Z(QLrD1>!j9mh3dT zD_DE<5Ef1x#mwI2i%Fk+6aEq zYKa63`s)HHyHqK;jQ-jRZg|3OjI|QP5xuX^-r44}rV^Z(6+vG5Vh}#p7UU3qJy+#g zARsIT)f6F^801^5Q(QR9iQv7kWQPOr{A(BhMF#@BT`;%nDLM!!Rkmk3ve8$H+K^HP z4GX?5BXNvOmdMH+rK)=+jP@1E1@%0&kq*-$%B%4A8wmgD?B|OCDFb-@9CEmzl814J(4m zSa;CAa#?!R&3ciWuyS2mB|#%)uXw&zYpXnl5^1AJa#j`)83Dp61Wnk82#%a3mA1EG zRZ*_W>V6>N*Yt5xwke~m%W@>0KH}9blDI@v6ePvt5!VDPmJnqNf|5|ET3_1zjm?EC zh}?o=b(SN_$d-wO7L1p*Vnq8Uk*ETzM^B<8MFf-DmWa^`5uc9)vMvnJBQ3c&P&B9q z?=G~M^b2E*^h|@M&?F9Ks2mNk_sGlQJDWuFfjGo3z8QXwuIW*+2zo1p0 zM)mK_abw|c2S;1D=Z){H1KAml)N}`2+3Ps+k`x$Z3*|zeg_gm#*Ec#Q%C@M*L`ai; zVuDV$i{^M6&wB22ap?BLT>UOZ>;Vn)^@9ta%EHDScc9=@R_WpHIesXZ{i5&ADV#nw zmS#jelZ_Nt*t_f}6+;qG1ET~fgNxu4eoQi1VsqUyQdVS*bYx&a;K23~mucYkU)wwY zGxssXJAF@1mVC=yl)Vx=ckIIcsRL+4O}-wK7(>q}r5%W)y$Z%BSq%e>ehk-DwY+~h zu)PZA6{n**s-e=o_YybZEIy;boucz^k0Y|yY*Ez0fzkaKtG6-U8pT6PN3cSqVp?FN zSwckxiEa3j#hl#`qeNmt(WT_MC+u}M*GhWQU)YnTgtL9qp43uSg#$Yj(6#KY^ssm0 z3XG3VShmdCpvo%mqPjT3M=6~z!aoe^Ixz8EYtJ`v001BWNkl#a z2iwGZJb&wKJ&y`OBJj@F@j*?z{8;fp^k*B~s{eYBUCY?yvqCU;Mi-o%ded_{No!=Zhp>9 zXQh5{B+&O=F@xJ)^*koU{_OYu0{`-{+s{i0GJj$LfBn9X;lTC#2S_m5H~Kl7cJ%&7 z@Xpu&F6T6yOMCNsehJTi<*i%Z_s61r&uzEhWk35;RzYl*MM3vGHM%GN*EhWi^T+4$ z_y6xda)!lLtkl9r*P8D68=rY2l5|s8eY;)Y77xSdeo+JRj3o7%U;>O z#M#zT4o{AJz^$z0=%qs5>)EO<0Y#NVanHt_Dr>bs6J;VGVy=qk)u*-C(b9SG+Jzy3 z%T%GL4Ma%9N0h);L`(MSr5{ax#IaF$uv;uw-nYP{z;quk= z5yjaa3?K(2mC)L2tmqghy0#%>%HhRwSwQDGsN*weV@W>|0I9yux_z|O&16g?8g;aG zOkw5tQB3aIg^@IcEIULSGT1}K9Ha;!(7W`k(_p_;j^MObaB%%!IkzhFZyqOxax@&Y z>rR;YoHTYwQXubkQExPu$S}3HL@XH91%am=1T12dE^yk1O-tVwnfoD#oJa{_JiyC> zEYeJdhR$*n%WF_;U4~O*A_p8$DXRNAT=c;%z>E#m@I4v=nIf=1fd?PGO^{Ey)}a%@xfsgaEZk>_6ySu7xBs(Y44!ADWYZrk;*v)TV;KSJTt_`XcMTXOm4H9N80RxNSJ=fE-Xgj z!=tCPimZt`(s-JcR4Wh6BJP$*Wr>;%c-G9)w@4}gnZoBs@ZKwBRVSfZ#=@1J6f&?k zlhxd<6+~kEh?1mz5VIYsx$h+`d1Ng^VbvGaqhD3J zPP*qv*-7tFWJOtE>C_ThqK)bC8PwCJ-(TUZPu&fy5IonaYqV9GXfr?m)4dk{`D|@? zgv-Csj_?#rZpI=(EpDKj_n2s=u}I01&u8hzp6!a~i{6TL*LU;ldOCZk(>13|a949O zY1lEldY%#ZKRbKE)=r$g&C^D0VBV(`e*gD>pVerU?V#}LMz?RBP7n9re;@9-=UzPc z-~&vi96o%QiH)_jHC8noIBu>tAH9?RR}dGgm~Kb?CYxKN{o5SLeV z_;$NAjX-=-6A+MjuK5!LL=*T)N{ugk{EPU|J3hjB5$Dss`j2*EWds z`tkqcf5QuY>;+o}py|BqIl~!XLf$k2%v~tBnBh&p!8acN_b9I3zX#1!AW>_mQ;q|J-4TDE*^Xe~hO4M~@~*s>wuce4 zHJTqdLyw~Y$zNV!%Tm<_f3|_6G{KjIb?2Cg6SNx*jMpVj90LwNdOwnpDM;)j`%kzX(&4-?-7uhAnTS~ z*_wB9ES)-miB^gm4o;()N{E6U)`FEF#+=;oPXR#gfd7C*7*@}4qd^jgi5#g= zsIgUnAu!qFT}Ku)ch|iNAN0p)$(fQs==}Qjb6OP0msXH3EhEt;-$D4Xv{DxcEkYc$y1#6|`T!v{Fnl<|V(7!# zkSfA*rrtV??ytH8c1@rptGsLt(OQl;D-aQZOCi|(92hGhnlYkQ4I+)qe){oGBBIc0 zuHHBODw09vf1#l|#2R)WI;(6|rMeMRXB9=a4%ulzj5Z<53@Yy<>W@IQ8<5Qgt1(ET zB4cYKk28vaDnfU=@i5M)v>{dLc~z^$i0cX40+qcSdC`YhD-kC-;xy&&6*8#8cDJ*? zUK=3Oo*;9t7!nz7C{d0ksbYo)s^)b#uvB0=Kg)>*qVbZ#hqD5`ZqC<`3l-=(QGbi1 zwb8PGzIGNBwt}4PxnM)?&F5%*$cqxYcJIR#d-mhO{1990&{(dmozjL+y+E1fW);Gg zfAcx8e)`gmGqhi{-`#E({az1^P6Oj(V~FE~Et-^Y1V+TK8O2qOIH@B^TrhELL1i$2 zJ-kD1Xd4L75!Ki^?6MRdi?Q=ClMEs!`rRIumsTK4iRsau*g3WfO$webT&Jo6x@~^H z=S0G8b53Bl_TA|5(;TTWU|S`soDsN?XiLP>9jpaLXVaV#h2I?+YJ&PK&70`l)si}7*SsN; z(iVUOq~1B#`fs;$Z6V=(x_16oqFV1wC#`j3`O?x7lNe{SFQ@&MAseXAfBy5VP}skJ zzt3$53jkmrb->dOIEFNrv15)jkZThmedkDtY9e$CW;Wwm)6acFMT_Y?zr6^CykT{Kl8TN;+iL2 zeO`T?#+YZn^jX+{{eHaf*Z(X2^`GwC^0p`_dvSL0tO$nAN*4mU3-nf&?qpZwjHxBB_Nxt+tIN#(KKsz!tpkq=~jwwb-x*)}@2L^_eOh*U>gZKQO$ z|E>qN35-kFHl!N4GF55xmsc6wE2ug;PRQEV280fJ?i=e8^I0vbicM>xZU9LRJ1meD zJ+>-ojI@v!Sw&uGV-=x_9613_-Xp^R+0Oi+w8%9tHi@7hfGakY`Ao97ya^&fsKSO8 zUXcwa)uvjSE2+6UTtVeI;z%Itta6y};-g1V&OeGxNzi@HkMt5MnTyMTCl~*!m(pW%08~$zXv-f666KgBb!zS5i@LZ zppe!B{aRIh-JO#Ga-w=os9-Uv)sR}E%DgaZd+k*j+?24@;Xx%>_*wV5s=!?H9kR;6 zaN8Awi%30CU1oq#U)Arj99S`+7>F=nJoy7io=ro9q_ETRO3 zf_Ho9EzSd-J{rxYw_##CJ7Mk_v;7DrL1K%|W*-E%Y(+A=^XE>KG+7bp-W}-l*06AD z9%H3IqmU@qI?&xN1SuriHMV(zu0zKH2`SqgVF$_mqmTyrCiJoqq=DRC`E&IIvhJv)3>dUnS@=S?<_3C`yLcFIAi zCz#kVj=Z09h3&~xa~RDsG+PZ;kdR#^1+>;0bxTIlUS<^;V9{mnQ4&5hn=gU0it;3C zWt&#naJjxy-CnMG*LjQ+&|Ty?SJ!2|3$Q}-iRC4i%;3P4Ay&uY%&h| ze)%{BXS^!NzhG51jwJHO=7>wz&#Pyh)Yj5SY1B}|&U6OjGh>(@oy3vlNjx(DDCQRC zu&}&@td(P=-C`wKE;6KPgg8o2vf9$9G)aBL$~bK@H`qqbdPPcYh%_cGwZPo*d2|-K zxZ#@XF+IN1*rAqYUgy-P-fGgd$B0n!c9ui)f-`)`?yk1M^ro?PO&PUhz1BoZq@n8j zd$l6?Y^Kw8yDZxIkP2a=H5yq+`LYe$p9yq#6_CSgj;(yR`wS@#?!5C(R!?lS?dtuo ze;<5}zYwq&DX#F_4w>(Kg)JfR5bVPZ-1Ku#2Uwnjho)}LStqzlyug#6jJy%zuZ zjeo#I#8w)GN|TD?YzT~gug|tY=i1W7`GL1*(@0hDmjCigrl9FEQv%TWz5U}~-16sd zdHz%I`v34V*mL!Tx5Xlm{k6aGQ)~gXRbZSH9Au5Pt-`1p5e>ii&Yx%1#f3U2M1uV2 zFZ~Gr{=nNmwB_wQ@U;iZ8tW;Oq?fl!Oni~S5dJC&uL1`*7BO|3>SDu2Q1Ofzh zCsk!O+m2W(seZSEwdF;wye;z{6jkAt83R6Lo}tX?cfkM^dAHM#3yWhwhVeJa{@M}e zT_gZap6GXEtfSc0iNbNEW?~|O4y-k#$-hw21km>SNXiVW#}1=7c?37^9>bM0;~1&O z5M`h6F~n(N0CEC2sUn*c7`8&zf&IeTlfdOO44){tG-)0D|AlXZwgIQF%sE|i*$C3@ zzB?{0gC9{Hjs^X!P{ObdjR^5%10#FJaMca_@#LpokNx|{AoB&}3y+|G;s}cQITR}$ zWD9HPFRr4u+(pqTq5Gtq0E#X^yjR`_RK{W%lsQ!;X^{I2|KtK?CZYNfijF{3#?YBY zw${b5hYkU~RXp+FUhLg9Vyt~A)H4G2HIs8-&Hkdwfx5c6Q=5I`t5ctm9C_S;Eof^Y_kNbcBy4E%~M*9SA z;wju$mdYtVG*{Nsv6Ed|nh+tSIfR)$AZ^wG*bh8ZnArWJeo&Td6%bKsU#m5kN;RkA4(oFz7k%1}YIiwi5Vg~KKJFuquSnaQ}I;!Y(k z>=;%L9Y*(o!{|J482ty2pgeRG`p_fLkIn-pmVi?oU^xT2>sDNOKqQF0%pYI_c9 zu1R1l!Ps^CFnM4nvZ9aW`8i}uD^N=vsQESMMN)*6Ts2MA@Xp6RR8tW+DYa9>NdAG# zgmAADfJfja6H(NE$#Y*p*CgVx1`_%its!c}Ox_h`pA`UXzo-Ir))~fV2Et3_f$_n9 zZ>$uZZHQNE1ZswX@P4}2IS`w&q9po#jeZZv`V@uIW5Obm3A8dE_ikLM%!iEGj!|z& z`@AzsCJ#^$nMc10fhS-4WVGTDwoWp3SaMSZL3IgqMc6=l8$s8fOvaQU-x{qZCZ={^ z?^Rde;Ehkfo-6jCUuHNtcM@}_PNK8Ag0jev^?F!aT|u|gLDugf&-zTv6l9BM$6xoo z&SDRhrpcXci;YxZIv?A+|ULjcA=IlpQ{`zG3u zat%?#-sWyRdFCcO>&j>1X$PK;J2l4W3{u2mF^mkb$!)EfeeKxkf|Pe zS(nGZT;(Y259h(@g2$Asm*eEIljyJXaA5ZV?4H_C81hapJ^=Rz_PbqH;h!WhZ|1t6$~c!^aX15gYNoZXDg{$Js{>Z|`*H ziazs`KlziKPZ26N!XIi^UU?;ce)O z^rL@`*S_|3OkThh7!F20kQc7edHbPXcm-bd##e568(;qPSFtd= zc%gUgaoH$D{*CW?18#cu6E8TSLyG;Ey!ORBrfju6eD6_oS9{+b7vnN$8=JjRMSEVL zx3FTWN1~X)O=T;a9mD_+k~vwt_7hsKHDK`2_+*o{PZCE=RCHHYd2+)J!;;MaxWbY$ zFxZlb!oAA~5e3VVjf=7L_dtmApy%-DSbD1uuF$le1oCUds~2@-njJ6{K2n;4u#%%#|f3hQoTsuz8mS$LOt}kL0huY3-9Gec7rnU!6pd3T zM_Q00ZN$5F0wYbt?GcEPCeW?}RBc509W^`lMv}!I>{CyNoG6I#6k}KI0(v=??|TF* zy%J-S0%|3JPN`qEAVmYB6&s==($xt>MLatpALpA`iXNWSRTjW@Ft;-I5=Sx_WuQzW z2qNM0f-@#~!L96=YzI@Xm9_*XggRifC z8@fsAm-Ms~&Pv%JtwP1-84&^I&a*HC235Gn^%!YdGZphvpfxsvnSDF4aB>dwWWlk# z!bHW$$S7*{2ATi(IRk?(46;O(3Mb%*Dm_Uk4B5a|3RQ)ERdQ;^AhIkWTL7Jxn3|a4 z(C$Q}th`90l#4G4${Mos8DqgF?Xgx0ph_`<4dw|$mCi%22zXJ}`Y~09eLfgN!)=_! zd|}Homvx+6sJeiE7I?}?I6EUl66>Nm@7(nwgJ9Avmgp4YNKmV{Ffu)kU1Pg&a&jLY zo;!kt{v1|{70$6C3%pvIqLHRZ;*?1z8h^=RO4@4yrz*=!wxR4Sb`kX>9Nc{^uG)Jo z8gZS8KeA+VsIFimQTs%5eO_G}0meGQK~#UK#6tnmKkz&F*g*Gig#fPG7$TR({YI^h zrC!8#HX^E&S|;CTo58r)Es;R3F1$t1`eqW#$PzCvRn?d5BaLjH$UNp-Os5kHjEFn4So|M=&hIp=S? z^Rsv1{yXo-Gk)OdkJBr4VH#btpMCr5aQ)M+yJ+7**XpIOdoio0&Ncj;8Y#1v=a(@$ z*}f?Ix7`NYh6DzOvaNNw8lEbNqbNp?Du-!e>4QfD3x#3HuwTSge=EtVCz3o-D@*dP zUNaI@MZt=a!<@-OSVlD<$|1@KWnt`yOnHU*LPscw@ViJRtPO7M!V}Ftm_=@+OvSj; zu>oj95kxOTvAT@4*<-k3tcfS=+JT115!DhCZp*S*6VfZbx)}JE}4v@JVo0KjPAW&CFONMqgt=AF^8Vf1 z3xew^^FnD44#MWUgjLiC_z!3ufCv?-;eVt0oFh3zaKBbR(jKbQPN~4P!@P-sH0#kt z_f9QJvDVM|oGAwR9l%ssZ3zo?xEu(k2~~lV+$5AjBaPW&hvYLlD!k$auG;!EJ=57_ zF_|cI?#TW$Dh7BQ#`{ zMPjSN3ff-LM}KhgK#GI0Tsqo6!a)uD^;aMmmqoVs(BQs*|A}C1ydmLTVA9?DPDT0b~E%ASD}*ysJT_3 z-9l}wjc9^?MoiE*Sz5JHB+V3YqsI4{VxU_guSKkgXpA>8e$9SFt2tI5K8dx(WwdBv zEzNjQi|IMivx<7*l?@JnboNn^6yEB{Jv&^+Ni{y0&yuQw85U9-5_yqn_VDkl z^>O0ZDYTj~(x}G4*up9v@}gjS2(sUk(%lU9c_qzxb1D?aI+|`QnH;X`8lHB|Q*rl~ z?_mOmgLJ|hTs!j5XpVz{jn+pU`;Su)zHk18@lo46{as~wT% z7^S@~{Om0$hbzWtJc3rzz>dZQ_D$@ye#;=wf5)+l|$aTAv?)eJ+hg>LeP@3<8#ymrd@^2GbK-vr#`h z+vZ7nJ06i5;Z1LP6F%^P51f;tqZ-@R|4r)(xF_P}Cw}55aOB7lzUPU^c=x;CjUQNAIJFg_(eT7>OYi;@rA$p62AWVZ=Cbzj~_mPUZ;0P z0;5*1aq#%L+DH)~zH|Q!1LWtM1@L%l58wMR?z!XME$ubg1ra%QsTvVXKk|kj!l7>- z+7dAS^?&&WZn^C#-__PcFMaKcIqT+Px4qZyWm~#K-~5wv{w4~PUplq4or7_SS{xt> z)k2X1xt7+DFRvLOR2bJ4s>GDaW?+~kT#so0#>1Ew*$f;i4;~sd1y!h~NRtE!Eud;G zbh@kP^?E4E9$F(QqNsrC_gRw8Bn7E83S+UNDQ|*mQRAv&P}>zagQm_V-TKNSSWr@x zvGJ!j?jyXoCOe$0gSfMVUL@cLuj}~fvIleNL690>;(d_*DUTGNae4c2p12+ z$aj5xI4nTwKH|<%0D$>F>58LpfTt&=ln?RNN_e{skH0un-hVs_kO~UCjd`&Z)ip0_ zXe@4z9l-Q{;P~+s9C_pzPR<|4T4w=Ktpy#WXpT;he-~7oAWI|O2B|UF$%}!t!c-#D z-cjB_nPcJ9EV9lL#ztb?bi*~+zpur%`BAE^KY7ko(%J&M!mZrhF<5}d5E2ed;`(y0 zhVWt9e%>yudO>h0ws67VI=83M*ikWpXw^IHi|?xEw6Sec?)qpu@m9jC6I>-KRd*V> zerdo9X@N>z0Shij)~G^^w3@A=__{?LY;3m5=1o?%Vt{&K?_RdXAtePpfQ3L;mgo}T z&wyEM@;CUdbm97@0*f*o3TgUjt;{i2DzrMQNc$aV)kiK(-=O_rD<77cmlXk1m5K&K zQ+3~aQX=TR0~nyL?lNeTb)o|Mr-&-ii;}NvK~`6E7K$8_G8Y_yrAlLyW%~j5o1%n_ zggu{j45U1PM5EBgYLD`C=sU`87x~&MXGx?);Avy`CDMe?y~l=hW&2byO!gQo0qHeu zVHLO^d(8OEijSZ=&l6rESm@dJGw3fUv%{)TxSTFUylWDpqa$eT+l|hNlUO}Ai{(=b zP%CRdW`nD<4&+?K)*+2n8!ZY?9vOjX)*)&UyZDKE9eON5qc(%d=>%f_1QPOm>laXK zeH7gsNv@%LCBH^oq|8e8f$eum5zs~N_)&=Y6dlSi7-=CLYaa7)g}iy1h@v^c|nX zBfVL~Bk<2^CSokXX%JP#=Z%#nHv7Y#8T(u{j(29vwBwmH0sEXaW1rJWEkz;|H0y0l zOwM5EsW}FhmX;Pdi$jbGuW&H)7Qsb>{bo!WR?!e^uG?Qf|5qL7;>%d7j!qsG$YGUc;1O8U8Osw2~RPa<|S;zAt4mm1UBpUf1)bh1 zvNA)DLik~ZcTq}=)Y_P6Phj89eb~|7f$_#TlNk*9yKEa-70+ObP|< zDT(U3F@a&nb-rDsndOxfPp)a-d+6nTWGX{9H=N}xtkMEv~aAm$vr;7e>gvS;%v+%E(C@v zxDc>@HdM^t@YGSnz|DnR_Y@dH%5<`!T%leeXN#?{IT1c<@=H-|ypNANv@C`8VBk zlYig&s!rU;@44q5CNJFge)z*5#;afbYTS0)^O<~bN{#UA#>Xc(=i+Hkd+JbjMzxLx z6XO;6u}%T?=Tms?06C0^dzVAn$&xLCjyHUURp4)E0r#|xcTe2rR`N*m7 z%4_skv{>6oo3a&e+5#ZOS`Woqmu(IR<0AhaUV78Qy0qTZ!hk`_x?)tG*filYm8}fK zA~LI)B!*6#YztItMX0w6EX>WJv$~A#YM-Y+EC~^|Qj7=;UQ{O>$W3_{R4gA`V93?P zE9)=i$OMVdhQ(CIW(>3lWpk&MgRna=!Js_bU->dlWe;< z7CLypsDNK7-GtFDCfvWhAG)j-*}_)L+u5}Cgo~*Vm#)eaF6`NoEy99iL(3wwYqF~D zWXJVtz~S0*8h~mVIxeA-8i#FOD5|X&Y)%$kh>Z|r(YKnp;P2oFT3+j8?KvX8k zm_TI@jM;pMO6I`-FKix!0k({fy>jpahdA46T=G39fLz7kfCFVnJmE@0sn0Q?5i!We z1V$NI&|ex5T)8t9Gn8ub7b@EyrY~q82(r)_wC)W6I07yMK?Jr(Hl!A&(!X#yAG%=R zjz|!}5QqWcNkvo;U`wE!mWYKyQzP9It0du>c=RkUI z8x}Z6>8c_hw!%Bs*1pP(Q;~*FQ^?UBXz!iCX!iit=9jQ~Vh-K;MGp5SVk0gxWT%T-FGE84OId-e+e1FTiemm0;#v)9tA*O=7_!k3lw(cEb{p};DD=cAqA^l} z)c9PG^-wfDhOsAJ%lE5usUYagKn= zhtcS&%#ikE6-73FspLTJT0MfUOQ?oKpN6^EJmVtjLsV!hB9w^H6ytdE)lb2r4}A({ zQ6Nqn@D(bq!rv+&FWhKmD?DAF4s6ydF2N8J9I(u!M$81;=*SpG+9SCC-urO!)GRAM z$1!f^4$>$f;xxA7qp(1gQ(svid?Wi20h>(%Q+>|W?utngBH>n+*U*SsnAx!t&9sH0 z6dZIPTk=G%8?M~324%Se@6gqK>ws+6SKL@)KgZ*R4ppYEn4y{%IPjlsmz)Kmw?~x5 z<;=N&Sml6I^7-%gHim}msP+kd%pIta7;XcM^YB!{f&6mN>dFCOr`c}!ci~Tg;IWft z3`78?QG!O)z*Kz-<#=Ji{w_LM2U*rfUSuXCMQfySiqXax#+suH@F${%NF>HO(Riwc zoYraESywo_{T=`=?dM~`B!(e!Oaf0Q?_jN1<=;!aMOKzjh6fir&`)Gk8az+7g9|-- zM32T4!2#sWxP{T?7{*&;7^}4r%hV{M$%?g5C^M5C#dokY)_aod0Fj`c)IrooSrAz( zDuLuShIr9iA_oEP;iiXgt^c3BH-WM&Im^Vph`YTtU#_)xS5>d5yQP){5(1+Q2^ba+ z0wj$sumsXM7_(UHgJu{GX7FJ-#{!PPHjH4#$k>29mhpfzGk^dIYXPCA5!zbnMYU$- z{`U9o65;dzvE2KzvX-o_>P5=vlbu;0Mr{VMx2B^;7DkkT zQCXt#bMCwEJ`SVa4}jsGyE_n^teXD(&;K0HeeQF8aep@`@MIdZKx8t?1eJ#Jgw`QuGi`na} zE3cz>9pfJB=Y5>0Yk~*DkaU(R0EXLd(lKqML#wO2@%Omu`@5W^ncr$l{3DCT;JN{m zgZAufhi-f73c1wRp8vAvaaO{G$1d#axx~hv(%$A8ZEk4}hi^ZONB`lm8#=}hyyt`X z+dusn7kU558@~a!-E-o)ZT6mxZPXv#kdlGy#=ibLU(EsAPj#by^pbz_V!lp)^Pm29 zU++$qMr-HRzhnW7#lwrY)GBUwqu}}nzUuxP0x(v;uzITx@1WTh`pgEmxUxTI-4U~M ziSeFMflOI8mY!X#?6prR9mAcyhu|E9fIO>fH6V_e=|~>ohmI!b?bNZey@_7GZ!#@{ z5P3@A#fZwB#-BSdSs1XPVQvwls@rO30pUG+ZBaj}VNAIS()xtw3K4Bsch*tI6sHa^ zp%Id2eTFcWC6msbn2L}e6%B>9|8qc!*~qI|Fv3@!10OTS#U>a#E(20jxsX+MNl@+t zwXmV#rNW`4D+&Wa@ilS&pC!e}DJw+P6Mt6$!C416=60n*Jj0;zKks2JQNEs-{iiz{ z?kRkJ1LK(-()FR9n4=yiI5~F|hmI~|V|^PNn>}o84bdM?FzAnv_d`}?5Wqxs4{YV7 zO(9>19L;8gV}~2K>&}yy>(nqemrz$TV2kxn*Sij;v_PulR8+tV=t`l{2K&m2#gkC( z8Vd_{fPo70$D9=oMzLbm6jUImUQK@JI%0i8mI>r4L|z0)rv;`dSwARbWM5H$e`feg zna{x1BT^!Y>qt02)NC#TwpgPfqA*RTtOhV+Hb<_eHk%|geX*Lap_~m>PzF}}(xPS? zD6(M-b!Y%eTY&W{2R{oat4pxK{m_W1&^H`G4~Lv*!zGu%43cwqQFsh)0BYUAuBJEf|Oq&7oFUIQ=ZdbvYDj=8Z26=Z9mi%6^5wv zIX|)lyxJsiV_G>@yEbYtpN}HSjjXgPI%_?_?n09BSCcb9Zy5u!UPFDMfyT*0d<~2K z2!qXSh|!c)8PnbX>2QQenjufSD7FSDrZE=hB$DI^WFtl{3rJclQW2c40TtnG@jfJg zfDmc27)mn+E{Ze-vJ7c&fMPO5O7F8XLb@|V-k)GFO)=cwfb4e>ZqyL9T1eV0gpCI3 z%Zmu+Nexnmu0>Eu0e(viUTqbGSFOI4?p0M?0B1>%{S_6MmzS2&c%Y8Q9)A>P&Yi)aKY%2Apdc`w?c;zy zQ)u&Sw4GC=0|sW+0dD@V#dKlDD1qlht{_LS(I7=6YdE@c42@cguTSVQ6|^HT?7qtO zrwVJ;y*Ga@i))JV!v!D|-OshOG6e-_SX2uxpr>yO-LgmF}}fE~{fh4zFE zq05y?H6sC9c-*x-rgB|bvM|cW`|DaB-t6h{K30{5=8Y7kyt>U5MD6)E6DouCbbab^ z1B=N5O5KAwut1p5dcL2DfYnkwc6A9N2#4U@XxoK)y7Mh7iWs(>(>V$zVF(5lNBIal zlO1diw=qZu$aIFBND^$ZY36d1F=y0s+I?=T023TeE|3v`laJ6HZDS*Apcyo9xOE6~ z$sEBG{F&)k%$GLkM`TVZ*`g&u9SIVQ^VG}*!mLle6quk-cdG}p`AB9@baR__>k?Ab z5;z>la_`6ADJw8Gs(EW)O2_fCm%SVt8=JhJ%a}B<|K^JJv{w72U-~7sX(AAVRChP4 zJ^%U7!>eBPD*VwO{gGEZeBu+Iz}J8M*CUFe84!V%SM;}cyyG3L+IaB62l3DU`A=YJ zY1uC~OYmY=kx?BERUpIt`~uYW1$!3!FCSZ18gI8`914LG z$B+AUs@s24t>EHhU`UH@m#X)=$KCAXGy4zvf9wIb0}0qcyE%;t$EWT)xo-f26ct2j zxmK$;>$v@%+wdWL_=b;{)Chm_JMZJK<98mz*ZkA}0x$adui)V0FE;BT%D*7h$$s1O zU-lsGe*Rrg+i`c6JNU|Pc=5gg3~HZ$_|d1-_UcQyksaF|51hK8GvMsx57xi8zFoEo zbS2)&=8j=5MCMhvh!q|AFOABBG6w<6T7T7~Q!uI+l zbbA9yOdI*tfG-@Gkn`_|M>Ejp=GxP>y>~sg%>5kAFPE5g6rJQ|Iiz3vTO|;t$tgOsq zX{m)qBO%u*wvwSuKb0iIij9MBmX%(H-j>B<{#n_=knL33T1GJF#NdT3M=SexW$ct# z>A^t>xebhl369TW1}?Ikii?uJPo@RBg9-XWDlke+CIu!`xJ?Pb&|GAt!V-I0GecjS zOc1ij2tp)e%N9}*E<~#pq1BF1ual~P6dW0(JS9q8e8- z^H3qI5e3nN8WS|dLBOOE16fEFC?;d%qY;8>in@^O4=&gm%u{N;HI$y|gAOP%Dgd4z ze1EJwUb}Hwo)h;$d4p}yXAde>aU5qMCq!xe#WY-7RF=jDNKrv9FmqF$<%q~DJqaLd z4am6&og)j7X%02$hyV*0HjvEEAvnAUvCx8Q z)_87+=i6xCb`%%>;WIcSjBmTzCc7BLwnK8U0Xbi1c`B=*9P3%yh#k8LC#x$5K-_4N z$mgAK-AR_A@(#|b%&n~UvVDOslN9K8dssQTWbc2{7E7eR&qzZ#XA2mE!w5{KQ*7^a zvA(s9G|$mGQODxaaYS)w)B}?-M3J*HWlVt7V1iM1h;eU>bSFimVnn2O%PELH!En2Q zg{2PKX&Z6SV5W8u38OR;3TaWG$O{Ifk2a3rXyXtbpIiW%<*k~pzx&x1->YD{Qw-?K z&a@jBMlI(2`b!YQy@ImAsv+hVI=DB!msz%F&!1%wgVYBEUP{WR2_uA*O(9LcEZM&$ zSW&qse(pplnf2vI+e1udgVC++ zR!~w2gkPpu*1L9qrbS?+D9I$QA3NnJD{I}o-yWAXmWr~P6XFzL_Oo-BQspXz0aiOG zys~50XF`QcG3&Ro>w7s$uKLe?H8-}Z>qReQr8z0GMR}({HVP>rWMxd2ldV9>3^xxE zdmR6DHofa4dA!}ILy3H#Y<2~&lQ>X6$GDiFJLzF-w9Sf(X)%T3`CED0M>vaB2Z&qM zv<{TFyW%;Yvgx9LK@n0YP4ooa={CCQHWq8kIMg_d`EY^95mj$_9}qazvr+6MqJ~&E zFd=oRT_+lgw}S%Wt!{JCE<2~{KkRt)q>m@jjNPinVq8aD$slC-LWX$m{m@ zrVqUO)vxAI>~?z&lga+9KD1vKl~z)u^7zT0{7L-5AN&Cp78d+GaievDsv_cBzU5o+ zCx7xM-bU&dfAJSNko!Bo^9?)~xX(n&i=X+KpW(;S@BZqq{)(-w-t(US+pH^W7TPR; zM{D~re*3q78;1`c#_L}9Iv(4*Ev2aa_>cb>XG{F>5C1R&>{r`s5x7FY4!visR^wxH zf!;T&xVYZu5sB(WFDgT2-P8J6hOKmBRO$BmT*=L8zXhHuz%MFyd-pd6>y<(?FDwAR zua`e)2knZDG7L_B+3onyAMYPvNXO|lA47YO`@iZwW|`lJEz<5r#r;3{m3Q+Oz2<{2 zeI8!;%ID*OuaMNo#Q*>x07*naRDJ+QPaWax&u4SnIKRPp1N&_+c*TQHi~6G4p7Y`_ zV^C?o3X)qL2!GIK+v)pH-_Up@aE-3hms%#x)6v4+@?-ix6*4AUeK>moE`Dma8QFV5 zC9B)c$|ljc>j(xy=!-0T$Vy5Clx2|ch9{~g2$C2<6k=9eC;}U$U9s2kMhaQzJKSg7F;vGnsk0%A!iy4N9A!09|~R>$8p5$0?NgpE)qn>j)w|_v_aQ4dqT@<4;4P&!YeNv zY6=5HxMH`kEN9-BD;x4$xM-6~I5eXag*|~8g9luAV2~g}o|DpoHXI>({S@0fBkc62 z=nW?5_Qx1cA}G~jwsc_vV+@OlHcb?u&*RYIjjrgVjinov(1S2V7!^p80%4qCWu=Bj zQ=l^!qe*~HNZ?!ws1c-0%_cyFT!1jwT-tmLt0oae*)RCBDmJtV={tcSl5#|1+aIO1 zyOLhf5*7s-d5VZ+`NKYRHZ|Y}sz8zO8^~%;l;lHOb3Oa0|XSbkQUHm1Dxn_%2^td!3dMy5R+aXIep)r zVCP~VX?G3r@&Z~*OF*ZCBxzu2egXYB!N&QEs2vLs*Xt+-V`LZBQNuD~0@c}mC@@MC zDjw4NyK`!p+@cy+w9k2O#uCO|eFmPq_tVVds2^}P3p1bv@=1<7%}m%Z&E=M1FRW#h zW8E_rl#((a$2d!|wzY*zTbqcQ367sWjHRO;1aW{|2_!MuxHU-#$JU3G^P&rdY+PW} z9b@h68aB>vVeQd%sDZ#-JcsEpMK&#XuIfmMrm7)qO3oKCgPF33XdEVJ2Xnal=)L&D zxeKMO5XyU;8|1St*`;MtWyDl|w!lSE_~2|OmUBgY%|Yh@t1;+z&1MTHZ$HVeaQ4Dk z&Z!7&o>G=hjhfE_|I!uyR2d*X`~LRcRWOW+FbFXmq?n9y%+(h$*IGisITr!j4zbdK zGG{FDVlR7(62K`7KQjtS2m0#rHtr6~WQe=_m{CJWY1b6~JPSY1azW0M6wHeh4gn$1 z%yx8D0asl%4E03<&-rr*lik7AeD2`fT1;!3^;97UxDVq)^Y<7i?(hCw=&L{e8cpw- zV?9qpz-MU~P@-~AQa0=z!mfQyaJ&J8D!ownN%+ZGmQ-&H!cE=M+C{nqEWreZUf#vp zcpY2AZMMBC$VM(SYaRa2sV?Xa#p@4q%>a{=#2Fd{)H$mspP^v@4k?VHs-N~T>W$Iw z4{)q?jDd_Kihcj$*!{u)k(3u#3mO>YeSe>Js*QtoYuiF??_!3o^i^j$bfZ99xk7Mv zYHRi9_9+_pny>j9y!53n#m7JXaaLQr`OR;}i(mYetWqHRr~S4o74ARwv5(=SAN?q+ zBiJt2-#2gUv!!Xqdx+P(<~8`O-})_neR^*?=I{Hy@5Axq$MDjZzRX)zQKr&|Km1{T ze6$xifBrnqpTB_9r%zRY>GHV#`maBPAN;`|8tA$I?4P~f=VMeG5fa|=mN(;7 zulid2>aYG9-v9phI_TBpUTK%9ps2wzJW_u4mo(+&fqy z@ZtA<n!$}vjZMu)8( z{7(AnD`NjwSF|E|c}StR#P~`e!URbiA*TXDAh6On&S1vc`STc)&v(CvT2kjigr1HO zpoSvN_;tv8o)?NixlmU#np_t~O$+S^)2ZURUojq_d*N|(f(&;aJ&Jg`gH~dS4di)G zKHbXs%Ivdc0k&x+Xq*lHLR*_jFLn0=)kfvzmlv@^!4CMg= z3)#-?cj@h(lnpKw_DK0P9laP;$&)Q6>fH>Xo1?(kEv;g5Ae}kO0A))CX@feckT^}b zP|m@V5l{m$Wn(V9x{|lE^5WMiGAff$t$bR$ikI^=~ z(iE1ZWrHTX$q2Hy4LKSj zEON-uW`_tpTU7G--1fTc-rQV<#_di-#O&`X)VdirH&xDA+Gf)j)r{Z`8CgV)$*OFS zBB-YjaRZ8ScZOq(cebJ1Ee_$9al{!Qni%ZF;!YCOq88iT#e)&z-VlONQwrXx&sye) zszq7lBUahdCCS@bNL5+j7C2KCn?9qeC~Q%|p3Fj5Y-GUfa;DA(wn9c@L>HA=Tw3@^ zSdVGVsyXsK*Y@9JKc=bROY;h;^C~JOQn@js9mGN`EMr(3=C1QO61D`Qjf%z8RhvgTWY^+gn)M+Qj#V+%wK8&b;3v~WrGNUc z`br?K@4LPaOB97-g#)!-x5wEIfsh`E5^xGW;|NCGn%olUem$C|_}t@X#9AOG@PhaRgn)&%7g1$-MD=?PSkqe!P{Cmozvyc5Y6KIF*ob~Na6{uO0V`ei42FA&4d z*It34-~;;Ui(uNfQ(%%JZ|%kq}|SRnIGAk!nds?4A<@ zysx(|2dX(Pd#a?vgk6LV-3MH3SXkcewOu#x+)L*zblEut(mX>t&5)@n+YphZP_8om zoAUQT!zvD?v0P%SJs5zd7SvIX5+rn=7C{LA3V+y-a zWG(DH+~u8E07zBtNkVv~;f%sZYp*7B7v+XPj4H6>4$%T5+9qu=c&Uilyp1!xGuTeI zxlb7tVeZ$Rlw!pS9ah+YuOA&}o*8A4cHt;C^GDdAZR6u_GP_8HktvNR43LWgYom*- zSQ@kkIKFrsoqF4Js3I`-V!7f_dMeBYqkwIxV3)rKLAtlTt<;iPaC|pt=i7}BB9}i# z_nozLh=+O?@#6X8-lmr3sk`pF3-5Z@yYQ(`eTt9eg)e*|+U+JDee_W-uHUTI@AvV& z-}}9+!g$F`zWTBy-}SU=c7zEW*H~O!U|T4%k|OYe&gpnO#v9)7op|LdU&)_`)FcEb z?Cg}W>GT@(dkQRfc^O1N3IYRyk`>h3-u5XE(zxsBpuCDQWe&7Qiz+eCMUo#*?=hHowth5RXa{DXJ`UPJyU|8I{I{|Zus|6W zfB%7pZ;A@zN^P(+!2kCjKEz+tPCn-(UisRWBU<>}*Tw`5CPR<3d+`@G$q8m=#WBPj&8Mj`OQKXth%uT2@$D z39!60&p8Zj=l+*i4H+6wKD4~6cu2~vj<%_u#|1G;gVG8&&eXH zjv4}t#}jPsbTN^A1g$n|oesiw1HoJqk}Qc@P0m~h8a2q8@hX#)?L%Yi!YZbtGf>mQ z$aI}0oy$w9GRP|DLE9`0Z}(6EI?7J&t=p|tS%vNFlw8(I&Hxx1IomvCMlC_C*f3%c z2K4~C4xWQu2(qvW4+<^ktOxIhFP%e?#YW1of41VWKsSdd^Fam7HfIr!QV3nnk7E|w zImRi#lm(jI*`fa;T`r7+57mmvngodrbV* z0@AgbcJoOE2(FUNeX|MzMd~XCD1m;rkHv*W!)$i96XC|Gx#ScnlA=JjJH*=721Fym z-M1aa;S>`5r zY$bbhRw&1rcjd6k3C9X1i%%9LDl_(IZp%4X+$aaMbF(BxkIFICDJDgV@pQyF3X^Pt zv>-5@`24v&B0HZ*%P7`vWR@0F^nOu6kY$7YWs6Ly?K($rf24ve{Y@zY@X;LNvgv2+LM=Gt zSkiRQmz+T-O+TRZK`p>=(#NA)pTks5aBBG!XJgQ06HvcP3+~@UJc{xN!*pCO&(!tJ zi3jb*+o5{m`}x)HK~L^lxVtgsoc!MTzsEhzC3M2F08j6I{P=O4IB{ZTo!@S^p0wp` z_4R0-apA%RRy4f-{eOW@XMSJc1yCyVjKyFnr+m?iz5+k=LqEh>8}2>V*4FTcfB1)Z z_q*Titwq@t=cfJgOxQCaR)t+VJ} zbnMs>eAjoqfvv1oS1<8vQJDEd4?ToO9{CKq-R^Z;{N79j#);cbAfYv{`&@QSeP*=H z|K|1Ju0p85>^xevjhq25b{*^P*;8DmiY!+RM51ZBJ*IpAC)xiF+Cifh*KfTg%!4m` zkimra{=5J9l$~?3V)}Rg`gi&3zOQ@^zVS`pfERt;SFml?GqI(U6kDs?`#!QGr;ecB ztX;<=Z+#;>vi&x4;vhoXK|5#%&9tyw_>>#;;e}0RRk8oRV{~b!96*&4J#!lU=39#j zfKV3TtuX{H!J04M9gH=v%wZzbqGV#J(=;SkUtu>T%4USX2F*zaq_`r+72Mu$G^FP8#kD{{Ug7S1 zx~Ow!X`vir&=qVATifitt+T$*2+)3G;_-<}xws5Mu7-10axhTo($mjrv03)+j zW+rHy?5{Yx!v(*RpF0KQ3S#Z1&_QC<7c#T~^MMI!R;0j45%#-~{cS|~l%J!N3G!tF z0#&^(%3nX|iuBtT@Jk-&L9T zB~Vdb+X^OT)DMsq`?yL$t&)$SrR&qy-ht+cNrtR9;UH|8j|sk5D6fbhRx0D8f}zLq4v2uft?}Th;RgEF7^te43V~Q>n@DHTF&mldFI0u;saN> z^F6{7qzx9sKbOih!?tsHTRC@dg1ssbwPP$Fo(E1I$K>Dw4_>WMGy6ANpcj>`xw~i1 zJjwWrQ?b4n6A+c7Mwu+GxUI~*NarXguu1`gEX78*$C(##J;3cJkK@px7G%v#>uk*x zz_YRGTE$er*UlP-ZYu?YMu-(M0-z`#f|x@`W87Ikg|M06(;s*YYmcv^*-Egpkt3;v z&_NT8r8q8TlmM1jp=zVjA?Cg=kmWfS!AI$kzlLgv2~ted7@1Ca?qF3ceP#k18OVvs zs->{u@DTodx?`tJRhEx2RAb1gM3e@ojT6*^8X7?pbF~&)af`#?dE5qN#Y)&YK`R%~ zQ348kjG#Sv<=1YyGHvhWDC~^?b?kp zFpQPb>^-!)7P=W37|Lq>$dd0ru6-8Yed2|9{@hUyEQ`uqUELR)0HQ!$zd3XEruGB( zKBQ=P=R1F!vm!$4=dR1!#&y2dLNByafewx3Ceg3Wg?SF}{d&8^Qdg+UyEgo5T#^e%^3S+H?9~>my0B$ zy11Ne;n~j2v{a?dgDu!g1}`LMUvSwVWK(($`aT?-;DY}7$DY7OcMFSpo{LzCOK0X# zx!8CU+v5=x%OKd`wSp{0C{R>uY^-0vN(~HN)bx~$!nuGIaLA@M7Ar8)U!SiC;TPpI z%R*iGobD&>>|$Jsr2~9yIHol0O%%pri2f}_jll{o9?;2Fkzw!`65~<9Owo)&8i`4l zYh5_H@3d)r@)O+@a4bYVleP31OlZ~=Qw=Ss3dqQ9fDdU1%gO9O0f4=|bllQh6I zr?iTYEu#v{t}UF3gatXuzjyT<636G;B2Rka%?6iQeo>8I6q!#EKf28QooP- zDc0AvapHK6BS&heHzI^VhP;>}iX;OKSxS~FH7;Zo9NwAp_#>7wbBifNodeQCQ?yg2 z5X9lo^jork8|)yQ3=pXl#OWlshS~IvjjV0C-diMTUpOlBEy_MI8y#8sUa4(~Nxq7bWE?@ zszJ{+Fd9#7!C%;3VhWDcIj|y@vPvI6K)juo&sl-(bAcZMm5Q8wJm;|3exi&5LQD(f z-7)fBpRZA(&3P2{2-pl!Bms0Sf^63z<{JDN1*tOdIEIU^P{R;#;*m<((i}dD${yEUVh6mkBbE>UuDlpA&O-~ZL=sVB0rBB^ zh!ZPl+l8M1@9Jvf5 zx_YhL9JpV5J+C(KXw~IshQ?Jv87(5jPH%@#cEnjwwl7e)JcW2tsJv3x8H{muV+~`W zF@L0uW5+sJT1xo7NTz~R=Lu~`qPZ}zsvn`evkbFGjZ)8!RWiDdtP2>u#!`seA3TO) zEbxip=Wu@QJdR=+$!3HsQi$g3Y%>;xG0zW^{s^tOiTTzd^7ThpQD>|>oK2&)s+|&W z+{VghoL`1>oT@MJ;S#aekRWW35fjMIdcxx4XtJMvUfArw+V&+1r15 zXT?)30987Yf!G7#v#{vCkck3QHAZjT$8b8pI2~b}k1*9!6m(5vcGhGoIkpG2wq)8W zKDAYvxVacU*~`q^(*9n=8X}g~_Dw?N0;7D2X_{f6M(B>ZXeVvVH|No+HTm4Tz*Tn+ zghY7{ywVy%Tw$18N88O!sGf8o%G-}Ha)irsxkWkmz^!Y&io=e3d4=nEl{HaGw9vj# z$=OVHo|&jAR{N_sw{;Gad<0Q5HksVFExm*amZYTO??vv5*bLg)4@TFURT|EcOqTaW zH-|d429Fu!W<4g2)fA0?w19Ll!NvYX$U>kN$Ee3O2o+eXD=@K03Tr`V8RQ4;*0#Ag zMkEB%T{(4j<5lhIPp+|TrMd6-@+to1Xa5xMeBQU=P|`AU!;HmR*+?O9Q{H~peRVpW z;%#sHDFz6B;wOHbEzHWt-gwQrUa#X{{EMGrkl;6d<2UeUfA(iMckUeT!|41HxIz7! zGAUfhHvR20pZN^F`@6r}+d9$T-M!4HngY)4c>CMmj=S!<^RoB#OSy`oa`y9=kBg!Z z^Yb0N;uWvp+>tlE=}q|H2S3PGQ}2E6duJ@JZU%VaUc=aJ8D+(tcirj9DzJ0wut@Q# z-X(9%1-nj^yC&2t?HR@H?^k%7zxT|XAY-2>rv;j{E@z<0fO}G2uGlze2kn{KR!*$o z`+xcS@asSNKjTw>^XaGMD9L8(_kQXB#2@|Uf57Yh`|rT3|Ldh(MpgncX5U!rxVfSC`Ei}c=l0B0SCzxz@4VzJzMXULnWdRq} zmDS}7WaoH<>W14FgcSoMriiDEWdnB-$s2xF98mCWp^$_DPSo#Y%abP_c?6r?9pkwl zM2G|Jl0*OiAOJ~3K~%LuO*J5iiAKeHT1pWZLd6{pJG7;2s)DH;i7GJNT7&Mc;n;19 zXo?hKGGd0biv?!;7i)Devw#x2KcU!jbIj8IY$1B zmRNe$quJOCudZ?RgIQonIPRZksg{Ay_EK0|gBc};E_!vKe#>0Pl# zgJH+JKvpWO<_=7BHd4a$X|_P&KIpuz%_6=~33Sm!(aq8Au3~kKHY4*ma=3{orUKlQ zkIPtj2o4c;0iT97TbPh^KBI*_zvh@Z)RWoN3c+*&G3X*n$)+vkKN5geSYX0mHw$7o z>p9P+7k=pYYd4c!Vl!Rb@2kp--E6KZv)^?sVtgw*2B=UzEpp^Rh;XTi#zF_Rm1UsW zfTFO|I7X0|+!Y2eh&{(@uz-OK$`pudu>m9+F@pIvy2O;o3$)6^vY>8xJ*$(4V^CTj zI;;M1K3J6xuxB;Sg@hY6vs9&LI|Z*6GQduMg#LJftWY%LAPOYnC_+efQjvh7uT~9m zc@AQs0WnvDSg0eQ?^4*1eG90RQ2<6gVNnI|&{9430;Z2vn$&)p+!K7#yZ-H+%)EqB{D+`MLr}sKL&uhI`0yN>&A^EGWa+jjxs$3AymK-acPh12M@gea;OkD$i3Bp084znO!08tr zht2|g=)E7n#d}ke}GSWTV9ZWMPya#%?V{I#tk_#Ia*1kx;%yocVji*CP*NjBzzN`M7hzJ-Q3LRLxc zZfU?Hch9V-4Jam@86>oJq3K<^{kIDlcNV8tqN?wCdBipTe)Z=1vS-c_{1mvk*%Z% z7`XG!I~j~1@Zv{);_s@HM7Pnrc z_arsM>C>n2`q#f67cX99yQ`o7`JcC-W^u#kclVx;KDWt76S(Kzd;J>Tt;xH2iq8!< z{5>Ra*Q2`jz7lw$OJ%M9odftUvx^e`@s(ei_Y?v(T9?mQJ|4_N2kqI_PTX}IKmNb| zDBC)H;5{FF+K!!ojDP!c{~P}E@BJy>@;`hJUih^yc-94eyY@`Tzc^?I?V#Pf77A-~ zplpG3+8sg-#x`(MvRzTZlv`P;IF-%S)OHsM-OsMzB$&c#0$3@CmDuJ&<;KIGZDRt? zwqO+mF}``D9pcET+j+xv_VLHDy1j`b91^K*X}jRfI~C+ewZV?w6g10r8o4nTQb3rF zaOsIhaC)hZPFO%pdPtJML}!o$o~@rkX0l3aH8JxUt2rL-Gy6qvrJx{CvI@2Kv{h!P z7j&LUF_Q3iiV#Sr5?j3?(!m&`?Ht>i6YOkHF&>b-pG5fyB5t|Ffuo7gg(?YEA+_ z$0;n_=7yN;O8D0>T;|g1NvjBjj*(76tZz*5=;M8?ujLpIV~jG1ya@OR*~)_gH|^%$ zokML|^ZeE}KvZ-1W??y&;Cu$HE0!DD6pRXDXuIwzj7bUuSumdH+#f7>;R;P`E5#NR z1ZLH+y4J&Fx{5)69(SBxffO+`GPYgI3!4HR2%`dF@Isf_8I;)(Sha{YV}~t1d@7Pq#-A63?$Vf!uj3d%?+3U6^DqX_j@ z6MA6|Q}Vd)^)MX{A=37Vf$oNo|=6ouSVzx!j}P zWy6-1GczOG9A4#5MTOBUDo@j*`kES7<34D(D2(dy=<*O03rO6q78!}`LX9E8piReo@44Qn4opaQ)`PHTpO z$q)pj5@^RZQ?Pjgl##?aP^L*HWQ15=ScbS$;xQRub88D5>l=Jc16ET|)7b?M440x&x?cc0F#{w`q>am^)?ogdCtuUn3PeN>?7EXDP zo1RR|g~EatL`1P_O(9C#H0`o4XpT@O--*Y8vP20F(3x|=3SL>>OEoLHa)iA037~U@ zT<6@U38*aUz@_velFb;mEgnZJYVcAwi4)G08t2m+0(1`A{#!E)(1=3x^TMxt9I@x- zHwR+mOg?+KcM(7R$e-dpmp+MaKk@*+`OrP+gvpEo;acA*_^S3y3yucNB!pzd>#lWA|T_(e(cBa+Sk4ozyJHc&p?LzOgDNR z+#b(?7xX^&-*>+UzhTez#c?0~VrtGG444m37%CNK{&g#_UtLyS?KnL-Y0t+92J;$D zapCQC4<@36_Uvn9U-Q;q`yssO>%JVn|3Cd9&OdtLY5NTH-aq$U|07=a*6+aU-uha$ zx;khF?VugBr?ZusDqBPtu5a57kF=it%0;c(kmm|Ly3%Hf$h%mz!ZNFABP1wgG%p}% z!A&w-@>o~aqne7W8z4ZYC);awwfbO;T)rVTAQ_U98Scu(;B|p`#77mtusC z(BxxKGBdNQNFr>!ludzkmJxX+1;!0$V(>{VY%#V-fV{HZdtyV1vPw8nHlGSw;M7{qrz95hzY55h`~@{b+wB#=XbENm18&%n5I;` z)5wKpYbLUIVTL1z(AuJqvY?(as-ev0VHf}-=kHuPDNNX|bcXkq?yh}KrYHxRDjBq< zSYra3xu39g#DvFI633Rwo?v@B#jwADVPE0o?JMZaH&Cb^w4NXc6;f@-lBt#%8!2Ir z%i4@cZSqqBV$ss?DbFUGB8Ea>b3`;-4jJ%GmWi~MeQnt7)j6bE8}ggd3UFxZf|Wwj zj1r?VW^viCtPg*{&i!)fo8FSgu?S(SuqG^ zz=zpFpJOunY0v+Wy|Zn9odGHA|EeiF+7@`FPp=W4;pd*@YTkuFC7s5c3j-Djll8&B zH1n{uiX_TNAi#zu$|LumMtyD`=RW)oxU{x|%((M3qdV{2nE{{ zv64`zGMh$JZg@AX8Jnay2(Y%^#pc>3j?N!JI?W+!5>u69doaNIXa~J?iq4^V9J_r5 zjrka1VpEs`UCw`6p)~H1Wzv8!eq8LkZ!elroSC!}#q(ETPByWu$&x7mCb>dwLE_#Q z-hs1^ox$p*b<~fpAga|kK$~8JvLBK#LaqhoYx8Kt4GfDhLdvV4dBWN}^7sg<=g)yc zC{yb_W7VnxU6IxXfg3QxT1e6TKd`^!0Ct-zAOkkC12Z=yGJ6ddI}4C^O4J*5_B|)2 zEvZH3I&C!Sb)@92O=H{wD#X5>jA^_jXw1!FO1UdA;067i9xH6nzGUD?S|!PDCaGne zioFZ>+5S{P9js5c(ana)$mcCE)iy_VP_}68)-BrR=K$rL z#DI$fj)W?)Vn@s%izwemlqF^uV4Us=p|uTx?Ps*MV^iY{u5<$+V*);4%)t3Gcun4t9nw&P`PylTQmO{6&uYbbRT;E z`seYH?KAkY<}zN`IcgqfFFh$4sHlId8fP{RNBCF&>SvH7HN5V1uk&_N`_06FwMnWz zw@OGT@Av#jF66)Y&EJF9zy7uOyT5w~4?p}c9)0vt+;!JoZ0|((k_R7r5Q~e8HvHQ? z_j2{cep^|eH!qYVWkHdY61UxU8zz$ppL@^G1O|%2TcfIbFfQYO8d*(TN-Xk=kqTmp3abRC}(7p&75lvtI?JvXgU;cdj zKmW@gak%$>auaTLd-t#XdknineD}}1>1hKogmrD%Z`&Yi{DXGT4%!z@vjB$KHLA40 zXl;{!qpXL#vIWX2rCC?`w0`e5Tg35}Ffd_+ya}h`9OXX{ctI>-R%8@5i@{Qg8Bj3A zGD-gIQX+{W91d?ov)RI>M;^myr^`S_RurfgIf6Ka%xb7L4IrUN{!RsoG{bm%8%2K` zr;jZm%tiux1rgC5H=zb;hN7z7Y3R0eo1%^8l zT)fc5`h@{TJwRtMirj>Dhe3=$l5coS6;VtO8h-fIxlqI>X+JXtUAPF4(-{K_M z#bnDUqYzpuP!o8uFprRw7gT^@C0JK~)L5M>az+c;YA!avrX8KwO=^Ie`4ZPElsaJVra6V0u^_R=%Rq(YFyB>gYSfetR8rv!DL1AWnJA0KqdWfredf$HMelr8^Sz8Sf_))(N z&GS3Atstqt1RI}x6vNM+Lp&LisEBWj#deSFhnP*@X+qXx2+D{FTt9Z;T4}deQX}{H zu-9GatblNo-L$!lwe@wZEv;j2v4cQM^zuHo^FBl?#PO4dFu&YEqaCt318kmUU%<{E z(gh6&;Xz+tq+xV6<$JGDUPxA+fv=q=Cuz1tDhL>pHZ(XWkU^qzw1uOmkKycvhcUO% zL3_S#)Lt|$$f7U^`MkGl9n2*S46{B^FRVSF8B<3dy=E>~heSJZr{T(jB7RMxnaUteihUtz^#-%fbt zgv+Iw<|C}{TtaWM4N1x$p*-`M0<;OdP*#yiYY7TyCs1QNo?tW>U^p0}-|b;I9AZ2i zVnUX%;}k_!*nA7k*MJHmQ7uB!sG-qnU~Ykwxoy;2HPq?}8jYIi(|K+H`$Dok7_BF1 zT|)M31m>=GFQ6VJIJR&Efe6{3-!Ur=0E}DTVkyxqXL|!)+<=wqep(5F8{4-~hWxt; z&%v`}i@*fRl^72DH~%=aQ;Go_8yon6ANT=$^rIi;OpH^fPG7g3RJG08NnM$oRSK~L z=H}+`%2&P|uXx4FXC5oWa}DXo<=pwT!v2ow%E!9*3-tSaR)x@4H5cW2L5#gDfnAmd zwfpbC9}DyIenw{CRREtJtZ~l>Jh{&*{Pg9oec8{uGVb;|rrpoI?9m#iDb4oNQpzWP zf}cT+eE*45anQarTW4twZ~E!~2H*5O--ti|gZJZ4|BpY%`nmO|Y;0kb+01OM?LKlDa41}1JZVheX63_` zqS2B`?7ggQ5yzD%xfo(A#?D4c7K#IfVe>G!X2IHc!?-790ToRtlrSW#BeoN%VIet; z`OZAn&t1a#a~HAI>miy9(VAL!r45Uzluyxdd^$4vB}&@=o^O{_c{S}1R!z^qcXk)ZMy`Ftc8_Z7Cb23SAa$NK6R z{p|u$*AWIyX1PYf^)@lHp*JsJi}K#OVpm~h#!tunu1cFKeOVoTA>fKJmB%`( zDP0LyRxU7GdFwMl_Ft?{v3}Fq3)rm{4Fk@8PL%Z&|{H zGJ(5XR90*QLBZ+-C>2@C_EKcGLjUpKtO7$+GBONPHSktYY`Gy#q0s!EyxJp35gFx- zjQDr1cEcM07LXLI#vq_2mN5d6U@|F?L~T6rXcuW(;5qjn#@s>;8e>RI5Matm3=X4* z@i3>u3fYs}_+;lYMS%(0!x8G;AzMi)_SUxA0U|-?nsYfqZPJ64-GE99D(LFQ={oiw zGuWxW{CiZWW?afl_&H@Wkjf%zHW4BfnY_B>3D#PYiDus(mB-H5f<;d4To125g%QKY)FVmgOmOP+n=bQ8gQp# z8^7FU&q!(Q$9!f`Ij+#!>^v z?l^|dN{#byq|J1qYtMyw*L7SdzCdaEf0<{In7J%`4^&ny76^pSI!KyV4cuv|1BdTD zg!!`_96h~)Mnl@;w@NFr6j4(Sa_&TJtWGXK5wMxm$mfa2mGo6+tJ31RJ+Osy!=}`> zGUl>*oGM6L1qRK_Iptqap|uDYa8XjB5Cx>n%lF_9Xx>xUu^htm`BptZa;T0*V-6de z8`#>WuWbhP>$Ms)(*uO)4+dx^^Jq3&hTV!XGlbP{(x@YaQ%8CXjqf6cv*FiW22=|I zo2G8>tz;)ifDs+3(stURP<^2=(GzTCJ6M}+VrSap!0v*^F^^m6^E=E4QXV8SITNMg zg%uP++x3}w59RKu6=j&Ycc}98dE$y@Vuf)RHQ*_K#v-ydgND6NDk4_f(48RA5VZhV zuCbYRF(!3lTVWwtK#0iPUs#z4Z~4S?BCm_~OQ#mQyLe^rUH&@4W{T|gR-buH^=~r> z{1uL2_if1C+pJY3tB71aluohUy@bu-Md&!=b5&TYG*VblpO$v1stdE8p7y8MTHnU{ z#SJdvj(bFo85nYbf>CU+!qw!56`U7iPk zYH?yNwaO9e5RtMrt1M^|=uf&hyZr=`R*Z$l91=TDa&=H_dny_cNlvsIc&OK(4R*N2 z&F5j*K|mQCAM2jSOXg3^aM`H8-*eABc<7X_MTWJ&qepbJ2_EjRnl-8eBd(&%cL2oof)B<(aI0&pFS5b&iwRRT0{u(Cv7|Db} zK9^@vS(FPZ z`oS0tOYibQ6;i1+NZp?z7vt@P$5fRzo1#h$MZF46EN2T7_c^o8pi0EL9#X z;c`9OV^~(3mUi>t3JjLfYJnI6Jje!0hIEc|q+HF<&r(x^GtEz zrs6`|6nQp5QH+u2V`SNg*_AXtxKN=oew=onT{}Au$%P;)j!{2#KSx2t*(#LMN&rS6 z6TVgfd5njNwdar)=!l?;keTIzS=19K%5flwLWR&O!MQVi?q39AbQb3@nT#QVF)Iv6 zB|-os74pbyKF;XmWtc`ergDsinqYZ2LUX%^kU$%z{TgOBU1xeupscluFWfs{bss6~ zI*uQ2*i|^Hy7ovohQBELh?wm*x&mHtCJFWDFhD}!MWf+8!c`cS>~tE~WVazrk&@>r zX?u_~eTQuDNbST`ymZP%aSA-vi9oo}1_q1B216dZu>}TEQ-rWVk*Hki z>a)tnyFmM(e4sSvT-6GwEJM^3XnolU#O)e3KmHk9JhO(yK%o%?m~Ln!MF>gYcq?Y% zBB=w-T$mdWi06}H#@@gMtMgWSuY#VtpSU7~4e2*lFJyD2aIw3FyI*hzHjC%tBOm%0 zo*13S(vc#dbz}hn;ipvV3kOfO zuP77)`*Rv89J+f6_iW#V&Y{HIkH}h&=06JltkqDAGPHsQnzF`4bfbnTEOa1EK^CP| zrMf9uL&oO`Y2|Hl7+X}zKyGY-m-G9P`ptA2Vf$Knj%a=i;sik_;zCWk z-Ne@RItIgSj7LMnQEb3Zr7_p(AhhA$u+svMSXtUdiRvk@_E8rCZ*3FJsG_+%V#Nr7 z-ZZxd1gPc)pdlg$t(CT?U7R1TVr{a4iI_sxf->{p&Z#shh?y~0QI*nrh1N&OUTNjP zH5cY>*ELl_037q$73Cc8OkYZI;mQE#g6I4|NrcvdjQUXsYn4R@5DB)eRH;UPI)>_9 zf^G{eCJTrN#IRtT$we{hp+E*RjNt2PVh<4G8m&|t@3ZNv%yCy}hCptvHLXp%!#>V$ zok5BzgO@~VpuRw0K*|aVGp8taXw1>w>EiqsF5rnr9z%Dti$EFMwmRil5#dB@G?IWb zTqx5b&87_K(fVSVO)yPUCMnU{a*|H5xwD1!jSZ|EK7^Iw5|&mLu`u63qn0p88?Xu_ zH|k$IsTU$QeRAh4lIRX3wK{`(`D9G;fEzfG2W`LY!IcI4zh_q&P}=txc`}Uwdgbjs z*QaAEcGlKbNOKNE)!yiq97g3c(;ALKte<$|3H;`7{w7;ERR^bAN?vK%GPBIovw+8y z+HRm@Rw?l0GcT^zsw$DKtu6fVAOA6bZYkw8_g&ZeJk{5BUn?ssc*#p%a+x~iAA6VZ zh2fSbN(o#^xnj@JF7QI&a#W|PvZ(&WM?PZLQbq$0v|DBmM|&E#UKZEhgFe$6XML%s zaBLs6XLO??{Fi_23;2tQ@@F48hfjXwllb^w{5?MXx1YJ?fCjz(pZweRF^JJw?mVTZ ziaz_|k%fIf@7(9laVYuXk;PkbT>EKfK6`fG&uO&kNS*=U%RxJ6&y*H2Dcjx{hnqXd zMpI8TR=Q-2R%?Q@1u46!_0-bYg@Sg0mCnO}ihefCn{9at!zMF%1IpS33G+MQ?fy-1 z(uM$&7jS!V9*rc#(qa>vJLl0KPB9n_P~?3Sq&^6zNV^+2)(H_J1&WMWmL>?>1%?a% zS%k~1aO_mCDwr(&ckQ93F42Cfrh$6z8j?X=`ipgM#qo}6n zfXspzc{W6z5BaeLKhD}o!P;E$L0RyEvQG%KK!T7|H3U>d38YBahKv9V8PpJ{6rs+z zutfcU0SrswbXH8p(x2?RB4dX{R#91mi|6}X1i$O<6)Y_+AR{&~*+Z~ON*RVNg}&#R z@x13E7OAUJ1mhuM`n^4IRA@K#C}B4r=DYBdiy3dy=f_;B2Z+kQ^bDv9RB|-zbr!Ba zR9`}w$IxaVNXYj)Hp&V2H&%ivpGV;k1833ifGsUtPKp5+jN4OTSlz-h)hP(N-Q?HI zCMl0e*v-AD9F!>Xg@8aS9_JeI?T2uL zSj2Om$Kb*irsE!(^*IFn0fTQMYe4G+vXekkCW;Dxw6Lg)xgf&o(rkha|4YKD6sxUL z;L}FwK=;AIf>m4HEv%eaLevg${|lajpdNFU!0o3_U}3qyOkx>r0U z6~Nd3UzRiNl}?#^nlgdHA$jO`0^D}`1os<0b_OfWaT)O#DL4W&=pL9`+X(_}EI?cK z{n`0ocfUiWs8ap6`EJ$|#!^aq)f!t%Ndvwcn?h;rp#0*cn@`t{P&ayN&hX2D<40d6+}Q()2N}marCeHVDbRpTC^K+(&Y+UbMO! zJ1+^BBce?>y0h$?={7=mK%Cg$(!?O~J<00JcvKZ++a*lQI4SfBYmF7Mk_$#96q;uY zMnhccU4obsSgbD~(xJ^;D=n7jxEO$Z7Ps9&|H6c%@O>l8Q*89s(H-`nV})Xx8t^BO zmOrC;eOP1?!`=v&E?&eJ9)Ap%&Yp*!nRRwo zA63=&C8^QTw2%-2AtVGd08ilHZUd@BnHD6#4x*<4cY+{gAlXg5FiGD zU_c-Vov77!SF5`2E35L(xFg=}Yya==FJ44tRaa&m-GcPC=*)_Y;~oC|xqsjPeV?E{ zMC4JTt}eGg#9E@xlVkJTqTBDH-D{&0bP=QhWT*hLZli_3576zm`8AZiF+Dkna=E1L zGqyS(WMjt)MFB*ikMkGiQ59v(PRycMDq+1-hvhim)aUD)rQJ6*mf2Ch6wL%CCq{{? z3z_J7A}~6g&JEwHC!Tl$k38}Se&H8>0YCYZKMB`Wz|Up~g)37`Yz)v}>T|9~qw`M3 z_$PkiC-C{te;&D3LfdV-&OYQnAr$GG-4ZWFr$J(~ND2)PT+qc|$-6kr#C6QU?xI zy9)Z^}pD=LA<(8zh1&Vbdi0 zW|1ie$cNllp5;an3MC>^T_hpmI6xfx{0M?JqPWXH!?1%O>?%Toh_|$(t)p}$aURsm z07fI8rgK|{W7pt19vp|ri~>Bj4A&{aamdP{3^y%vfV>4FHe6O+2oW>*NELf@{%tGG zKt+dwtL-9!euVR9+i)rG0+SdUn?~#_bqQISSxk(O+9v=g7a=B)5^yLm)F;2=XcJ?{ zi?>0bufYe01HH}lomXp&0!D*Aqd-uupl0)TlYB0fvVp3^tt10d2@p)PaYE{gKmndS zW+fuacn~=doBZlTr3^Pt6xgHlRn%ppKTliYm`jGfpWS~_ne+V@xdBM`2BE6QGb<|& zk6m9JxIX`r1(icMGx{8Mf=TT8`EN^{_`AHkZfVdhnL z!8^N%^<(qsH`g&q%E35>6Zwc`3AR;(M>d31%}zkJqX{`pxTiWeIn6?VQw`VyxsIC+ zee~U|yrW8I*Wv1D0ugV$?N%ns#wMz`|A9Lp9H3Av42XRUR2S0N`CacO&9#7e z)Fi?d%^h5&N)(2qQ2pGI4k~t8DJ@fEL?!$C=j@-$H(+H>@J&R2*sd7(44 zD0_&i(m5ML6YV^Da#?>mjD84_1N(K_5C{IaoR8r;HkyqV=LJnpOu}^wI(KRijGck| zT75bw-_0o&bg;Ljv&Af}giy*GMU2MEn-3NN+X%59wy@&YvEEsSPlXsBsYorZo*&eX zsP9VUAdFoNkx~mxv{jT*zDbq2K;sOnKG@zcW}+a`k5sD@Gu5LWUpPUTbqkh^j!% zg1*0r+D3iV3;`-AU8#}Q+iO@@xuEl}QdY^)eIkh|L_cLaF&Wd`Ho%z^XK~{AajdT{ z@t9N~!XirG2UHvsA_!uhvyq~Q#udv=P$*cOMNqIE&LtQt*Dy0VjYhYLdb^HJ&||A9 zy7$GsJ{oIa`zp%V2%{LYGt;P%&7xF&)Pw1thn$6~?Z0&~LKxiu@At5^A?;&xMn!EI3nyb2Uno zms30S{AsqG*}H$wH~qeBx9i(n&&A#Yd-?H}ANejOG|s+oj){z~eEO?6`sA@|_Wh6l z-4l52TVIXpMD>QA)u|nuCn46C>iEXj4&%Us`(M&AVd?A=jy!R6)9)kFrciomg44I# zc6&KCCNK=K;5XXZA_3Y~VAK!zavrPZb#nMQU14DYu<5&)Fdei`|p@S*-7CfN~SNgKlA`B z$yI!e5E<+_ZJ9~hVD`lU%(NJHi%1fx6fC0Ck8tMrS)4uALBHi7O)GFL4;EDtl6eCA zd7J9(WDtQ)#DG;q3RAT^0BF@hHd1M*LRHvmBcPDgsD*y7!N2=NU<6%+v5zzf z*_ujFz&7!Cwt(w-3MkUSuWC~j$wL0>`t;goi29Qw``}a@5nIbZj!#_%3f*|7-l@vi+e$~P@0I4VFXDQ zEmTdPXyU>U7;<3sqH-{Vw)C=;R~c7!YXXb|dSe~TtM{EUe9A>QM07;T-i<24$*)%h z@|sdks>b7-7dMbhtKESX>uko!|5fuISc z3B0w3ZGlvFlgS|%wU8QAH8P%V2?SxxU@*PE)9-SnknIUXfvTb|(e>*L?4U9=BveR0 zXE1F=A+v=>+e_Ojv{JKBbP;wUMXYEAiP>egxl3Kn!JvR=Dp(La^)RIZqq-}Tv@%>h zI$BL?sy|1`i&W=gP!2~bZT-4b+hbW7$v&Lxti}$u@sbu>B=QI+)pHawO@w`=OxXZq z>U;|g#+W(I24j@T!H^kwVF(OuTL@jGkqShVYd}O75frvWJMCcAU&E5Wj!xP`>ZyK} z=$sHGjMN}iiox8A_FtG|W|VGHTRv)XCyrFs5(Pn1_F3Q~B1@q-K^#S_98C2+(qt{A z`bg3E=4dr5Wn4J6%K_+2Vz9c?h3mLXtb4+PXStkj!SfoSCY>B6PQ)Q;1w5Pgr^c@U9*(?dGV^_JI{(dsrKi<0wP=-85)lXA1zx-$V< zjV4Y!e+EYnA48+Qj%ulliScQ8j>}e|L8lKt?4#H55mIn>qBKPg*#+uMTbEQtV^vJg zPGfv(9D8T0m>8Qty;H|}tB$VU<8w=4;mvwWTUsWAYph&Gk!=<|E`%T@1{FeBu7!HL zfu*$-OwCMkt_EciY`5#*r|5C-)HoIwS4Zm{u3Hnib)A`SCz*zHc>4|$Q{ZR_Pf_fp~GdE%wt@8eUS z`V@ZlXMYw83k%BXIoBr!@42$xu#s}!tk$(mrX7Fg1nzmw-Pe43 zw%b4LMudh@Xng-qyaSDu2A=)WbNIraeF8Js_Pk#0(y#D(i#ng_?TToq#Mq}sGUw9T5PF~pbr|r6R7uO+iow% z#v$GWrunq^Y40aOmjUZ6n?Wd0frDD5qmNiJ@bn*<48NGA)KyI{ zbL7QPAG4yW?5f>Sep(}<5e>0y&Ky}OjLz0#e^aReHZsWqd4_M<`Wf38YLSE=| zXg85U1pY7x)Po;1;Rki}`l|^1Iy(IZf}q3X1%b{s1wPvj3Z$5DJgtZrR1FDZ%OS0d z$RK}t>KYVAA2G-gv+YzA28g0Q9J_-s>cMj=C>DGay_zC2WC@bg7(#hk)A>{ONfrZp zv>iz#5fWg>wo?`KyD8?+Qjm88x81h`c9Dph7A!Yp%aADYnT({W=8{NJ?1zwk3l=Ga zuBifJ1i-`y0(pt|3|>EMr()^qWExM%>V&O2C}17R+k?ndzUFM`FhY}mp-?A7&QKrW zkYNS@zd%60ZWA8MRLDBA%51ofCS6nR7yQkZv9YsswuO~dj+xQOn7m|H6@f*@-i~8( z66yp@sDw!mD{G5L>;yA6@4y6kgcdF3FjrJ7Jjz`Wjt!eKfRYHd{myh82b^P|jwg)^wpKLQ7%PsU;CSc}K+B(-=IqMy8$M7gy{L7^2G%K( z^Sc}sp05FWr3O*wK!sOZIW$OtUPC~O22(tn)Rid%x^bAIQXYfnlyv5V7*+-3L&1a| zE2NUjh|G;r+eVpU0}e_T3jDJrt8Ws@Lx>wV-@1UcXdQh!f%I%vClDx|aFwZ2TXN2a zQCeubC8M~YJRM`D2fjZiio8f-M841G%J2K=w%TYnn&>n;=y!WeV9v`8kZ%C684?lt)%{iYVDd6s!VLvNx0ttCJj|^rCbR zIx^F*f9%1GlBCyyM%Q;$8)`50UcO+J>Chfec6-9>vz zANAEH&Y!!0nVBi)9belb8lL&3UN`1au;`2-@ zXxc`Xizft!=C3waQK%K*(RkQt{1ZE8FViN0JEzC+cS}nH(J`*GeU2VG2Q@ze%_!I) zlv&{JyYI#)Klvv&?ARF+BjUN1YQ0_$ANj~f@Z59H;l1yDFW&N&w_t8=U_-JIp^&eI zy7)V;?KWR3ZzDA^Y5H$TqV8vB&z{9^{^ozfhd=yb9;b(MqM+1qwyi(!@{@v%iUaa% zH}G=X#vOOufmgormAWWM&zU6fTyq%E;N?VO5cs;4cJk0Ewx8Lyk=brrZFC(>&Q0Rm{>AGV z$UlGlJpSar{uw^|iO+566RAHQd+PWNCom@GCa~xB-J2#bh;SkD<_-Vy_1A6NF4tD( zSMZO2^|ejEkFNOx5AEO7dnhx5Y^OHc7SAkR}SL{``bNIZI_?eilqV%e5g!gWqy&r<1TLo588_biZ+S4h`_BoFO?{i;){tc^o-+v^D3K9ZQi!2_g)0J2a)nvTKYKu$*)vC}>i zKqg{HB2+AoL8~Z^GH}lC+cTH-2kp%-Ne)Xt9( z#!p@i5jx;oq&lV|&8oS16KEl&7n9&~twX^xLUG{No7q$ik-=hM3uDGs8V6WOm9RzG zT5B27O)#@(4%OXr5N-nLro?mdxWV7yIeJm2QHL8JqC5ppdgwB+YOJ-SvN9U7AJjQm`m3^`Ko>kHmBc`SVXkpjr{w~@ z2kMMeKa|~JYSc@XrH2Lm8Ab;wrB)fLe6T185%?i1*J`zK&L2t=$r%u)F)@|RGg26e z!tgU@yi%VlSd~e!E2*){axBig=%;+qP& z>d$N*g~4nkQS%ksu{hK$)|Qlf4}s1aqk9Aq6a79D7R`DC_2o6R)>=$n^x7TxJ)h5~ z%C}Gtm<$Rc@#!uRZt;S zAc?`)YDTDQL8$v(YOIKK2F!A26(y^PVyVdeCe?)p*?4sk*m#|aBR11E@kM5SLUSp~ zzPPY_0i9l(!?t-0(&IC|iY%mh-7ZcXIfXMP&)~Lwx8TA1AH?mq9fU1htgSZC?*}M) zE^5^(4=+T17U`ZwN*PNa3=%E!Q_4TSAEYT(m+M$tU&qefJ8;v!y_g!CfH&)4VQmSE z!8(tPNtmG9?(iC+RIadn3=vGEu;DqT&cUGTfyjP8^3mutIbV&&v+Z^Rx}GI)bCr}a z7J9L&mKWm25PpU%p$lNX-$XBtFy^}1x{n@2U{JpyOQ#oWm%5fWqaxU-8B*hGU;7%q z`qi(ZR;%HMe&~nr!4G}_J9q9-_I*;Vg@+uJFSFkmbtu?E%PTE5T56AeJRQTI`I(== zU;Wjmv9hw9Sw@Jx-ahYSAKW-c4P!L)0!$17PoN1!w)9U_Az3d-WPh&mktMSFy zLF)I8sxR`NZ4D{)HIX4MUz7x01i&*Vvfb40@f;gFN}f^+8uH?0+9tLbdi=~6o?-9& zgAX0JAv;Ey8|P1)zaihX-EMrN>xOKm{@t&=5Bndu9slD$eSAyd->Vl^S;b35EH|o^ zYh~Pe&n@`MUw(Dd-}r?;`yw8A^gbqXZe*i*I;md1@z~)_e=DhHc2bVzCK{NR0SZOqgrV5fDsww_*F z#zIO3Z5p6>6aZx`o=6cHaTub?@UWp=e@lp?B32hWIB|3ZOY>dGqyh&M5J>@P;=pxP zH8lk#QiU2#h}o`%7K2$ejm3+04o{}60463Bh?BCJNz67qvDEnqiI0dta<(LjRN$;i zKW9LM=4zCO$w8VF;7oZ3q6F72!xlvd;li>D5M-w%ES&?Q!KIi;p_J`Q!XO2v=$I7Q z-@W2ZVEud@$LtHZ^PwFmjC&0H^10LYIzl_IGxZ{Ln6?z79pkcHNPWU379M(`ki%;( zdN(ijGy5n591dq*3=|h>MpR^Z8L9bvnG14O{Z4(Qhi*5;vEvJ9x5$&afH<`gMoOte z=a9}<7zc>_g!3%?a1GtwD(a0Tw7bhlk~VBRM!7=$!R71}%B&y|Kx!)>OZaTZKz+t_ zEcO&vY77?%+Yd=?@4mT*Rv>I=75V5>#co8(j2_~of>MEk z@Cke>GN4B!8(*6iK_>yL1Z-%}cG0YjM7dSDC@S+iMU>$O{lUa$(d*(Ji3R z3sCHL;iLg9CmqV8$Z{OAv!)kK0@=?YnM5v!J#Y1vugc0Q!9;Gn$$|xqMT^X?aX!~{ zJUEQh0>V&{7p}@IVAWOTT`$;&ULlNzK+RhuXi~|slx5CHw`bzh*p->IbS9`Q1;83Q zgiH0cN3>bI>FY~qjaberh%2tif`*k{lz9>OXtdUVVvL!+b12PCKsYgtN#M~r zjwPH@5rTr@i?+&_u@x}QU6gaCjDlxSnQzD`x*tU`jU5pp8Dp$gVQU>&v3i|7usPCP z=AgUehQ_VIDI1`|xn}-ifDTrPkzy%lTg2Pr2zUh-X^=7@E{u&4E4s*{um+Dv^<)qt z%^4WWN`Hsnie_I+k0S8-ixCS>0i&Do*wb*O^GF zU{l#IvWXPkmlKsw5o@A`t{c8vSp!Qq?jJf6LZ@tG6{^`SD4#zq6);U>I%hpN7E&ug zFYaQ+U%{%simvRDGbUSUswtESI#*THcd>`0M4+&p{VZ=g$v%XtwK&=JC8X4V7IN7nKMEy#R~n z7SXMD@v8g21$W+gfHPVAUVu}lPvYc>ljwH3n4OwI>_-q$fCAH9MU4`Z%_ZX6HfdDpqar@6jO_pjxkgJ#^vxaSTn9S$aZbkZ>v(eBp zOge0h#26C$jqKJasX8Y$j(-?n?xHZtVl|K(481-IUFD-*=mvr!?#-~Hid@!XdW zz5EW~cDtsH`qFp*=y##p>f+-c`h!h>^vd~FgnoEK0dKm#?|bB4RwitgbfMgi&wk?b z_`ZMpy|5ko3f^&T8W9(N{d<42rO&$kzS~fns@;(7Ja=>+{f-ZJYMa2=ZZGfVu!m(3 zp|jM`s(}LgBIhe1u4sa0z?ckq7%iyIU=R&oYLI~mjC7DapaB7)vk9PWbIeLQ)xbo; zA)Qha0bp6Zh$6_agYlAphX5Yg6zD1sVM6CbK5%nbvTE9pziUd9r6Me69DXCg{JCWu z{l)^?>tyFL4$q!I5*HZhVjV~nBee!quq^pitO!r2sB)IWGNG%eWdI>U#I*E_eMCu* zA931;A9dk}ZS=`DL4Rq@ysqL6%+QcSzrl~h?5 z1tF;vlvP z2rVD|Fi?b762k9=2>lL{xB{72C=v+gQc!GJ*?_U!ToZS^F|o3vVgDo|HVQ0ONc+IK zlZz-5<1tyf~iDKRBnYdmGqWY=@{r${m;twS7ugmdH~{aAS2`G zcWZb|8m#{Gn4t@gmCaDf0AdmZtgMLxAMJJ%qGVzEra2U*YYZxpE{C?0`h_$~5d$>n zZHkaHjikaZxiEqhWfZ{N571bzWB$}URxd0g?M0khFy@v~nVjMM3;KP| zOz`_%bi;)20i4y76)R|h#dZ1@p}_dvM#nTM^0-^;R8A^<^xcU&P{t3!JYo zy<-~VlT(!Hil-z={B@$Lf;PE|=Y_tj%>V5-Nr-G7G=D*pO>DiYB z>RznxU!#^+#9VIshJR1d?X~$KwE~wFvbuMYnsa?+9ld4`58d++?!4n}e*XBeqd0c> z1*|VEqwINj#lbsq@b&{Ze(V^IIyQV2wWci8l5CbVARM#%t|B?;9^P8-;7ocBQ5a)( z*9>+|?F3rDa&ujA^-;q8s9Y&=6?Vb%l)aj?*h0?XDjKQOyg{D{!mU*QZMV%g>Qj@R zhnZp#OMZ|orLKDuN*Sn#Tc95G(Fg0lNc0yPS#O7ckUdrwx$2Q&{@TN(1L5f zI1p&=^QK*FH#SUA8++&c`?JRG$`Z2Z&_Ic5!NIs~4GIY^QZ>JP+St?>rglzl$p`-* zKKTW_@=dS6{oi`ub=x268(;d=m+{FD|Ih389oy~Y)dFI-*))xFW}Vkr|`g|_it%ix7&8RQH_i(*dn35w5IGVgf&>`Z?q8`UhE?G z$Rm@3zZx{zKtZPJ4l++%-G^8Ne-zdbwbd?RN00t zLoT4Uv|USnV>ZS|&16Sy9aX4DAx#*|uuwyDIl!soOE__41wp5Pk~fVsDj|p*_QWQ@ zSEgMuMpwSp+8V*sm9o7I$@|%tHDx;x3T`IQ8dku-cRy;N)m}rRy^OBEj%IrmK}-SA zL~8VTpH(=i3IAjb5(-TDR3|MYj3!{Vsw|#QyO~ESS6y(&6xJOY{NJ zR!SV+rX(vG_M8HmjU|*zHB3!Sa|I?UHu}93LEne#lsO9~wF;1KfoTw`Yz=L)oC(km z`v?N^sO_=RDJ^&?5=mK*s;}A#IH9mlb#cU+#Ire2CNUMssleKD8z+wg_uO+3E~_|5 zX%s=o04@<2{T?fNhOBb(=kTI<-s{)0^FDf=)0M48$t4_6q%FQ8= zmGTgzpldJU^95bC!JucVPLkmQq*jV$0p`+p35m>VO-fzetceS$!8;hwdXXDXO{jOO z$XI15mmrLk^1|^f%-pgU_PBs7(91(rg>73};bl|>$d-^wc5nkuz&At7DYmRk=_0cm zg=7v}(Zw`!lW?c7ql{8{H+IdAqkjAxR$n-a-s&3WCTHRF#(}XaWTnKUB2ov!Q(?#I z%j`i%$s!m81KNp5P;WJ`ddo??8)!wZB^r_l_MX7j(FDlYvv)+1nHaEM9%hN zgZobQck?&B%@AhxVH6!A*9p{h#tKxvv~{i#kpWVoLJAuq5H08oAp-&%V%FG0soI3> zZ#vH_MNTQGL$Lcvh?VXN&bJrPmMzFa3ag-qP3X_EGEPP+a>SUnSeg?RIxB=MI1O2o z#AvOq;mnCMICK0AL|+Q%6l`fDX!THET*k`cGMbGB;xK|_Rfr-ll&?B8nQn*~ z$zJ2MkfM7~ZF~&lv*XygZ#Qno`UcD*u8%@=4w;$`vgjNd5)4s zF&d2)DwPVV6|yX%f(#1!oG1)My23XeVZ7(V~` z&*S|0^O;o?Z99=cQm=Cid9fKX~C+$PH4MbeOo-U#5S=6u;0i=!PYzX?btMd@lv(g zWEBs7+XGu7FsN_;-Yuq@ORAu0nfDnV=jW7hdRAWPL%gmcv zW=VZ^f4YTgX$~cC0x~Hgl@15ta^-pwu_}UcJqTowsg)Tc zQPm-l15}-AxvFA65@h?+MK4)KyR(e-#yVRGv^(@@vid?aBT1Ne8vb5^Nz6#XkIi6f}Vfik8X}gqQ$^PyU3IknH`bacS1PKy*(Pfk2eKcqm$${hzXpy| zWmThmRFXlK4{}v+)_}nfCWKp+&JR(!Lcd3fftS4+8;!z4WFRF!Z!%dDnOT}~#)*}n z*{LIO670DBCRkPKKcO1SY%!=nW2+tQ_s`#DDHU1<5dZTAm`pbmnNmn>_yU;)sjHt9 z)-B3ZnOMWli91l5tf6`MG}fET2;v@!@dOG{4M|Xh42rOdF6=@X zDH_y$Rgo~1JrzVDD-?M$Z;R}j&EM@(7?psrrBuWWSA-h-H=W->_B-@cBW_@&yN-p{ zBAQVPZq?&(WUj#{fm~+OMQRpJ>=;Km3z<@nhzp^Q8RvEM`&c+Lk3&y9jkOCa*f)1G zW@qN;7J&NvI!+xuj`fvQblPp+T1VJfB`+y7Eh4PR+9?MHG2ouWoSQ|8*>2oMx8BCN z(`Rt_xo0uAYbOrgcL#32Yd`kx-i_U7ci`z`hp=#N9)95S_(KeKcDQ1p=GkdOiQ9 zAb%o&f-c!W`Z)Idanz~{+y`HA-~E^@RZ&cBjBzeS8J$K8=Z>F4zuD&uIqG9nAOk)A zTiPmZh^*8_8f+&>ij71d4r81?br#Z*n7erfJEvw5tS0D20q1p5!9=~@z}Q#~Mc0E@ zauD08R#2%Ba<;-s68KkP8MNK5uwSKvG4eIjQ~2n~)2yh;tDx2OyKzCuR$CtAHbe%I zt}pafGlhaF3?Kr7!2fJp^4mW$Mew*lf$m;6x z#~;TNPdtI&_>JGdo;|zqhBv$suYdicxbMFEv2Wi#lu8AimthS(F1;NC)drEntinrF zK^hT;Pd)WC=W~4MLm%ST)A8ioD@`gG#f4Hm4&R^6x;B4KuB4fuZ`$8oyLREOyY9l_ z!-ui9wl?%`vhaK38{U`|eVNux6MU(87CvR*W$Fo|?m$Ms^I9tRMq}-YT>lw5P(ugq z;@?%jQfg$H6VsS@|5TOVYuiSFVQrj?_cCZy8GX~io3^|mWb5?XANVMK{Fna?CT1tD z*$$EV?r;9b-)=bzVY^+MbMO}*`AeL5_9TAnm)?Wh@4x+q9aWm^dBtxY)g)chulkNx z;&Xred7OIw^rqiWYKV{g+<(XWKJ??b^Hq0Tmu+mWweZnj{5^d6FTS$n&v@i}U&o>4 zn{8CYIJb9pOW#TMadd1Sdc%WT-tO(T-L7}z5bwr?RfsTQu$(gBE}cm5Vr>|%+Xx`b zLM`)?5ezEh^HrY)Xy&4H^8tA?=~R1E2NLiD!0)5t2oz~?&yqGmnmnB^kyBY*!aBu} z8w^FHWds<*!kJ|pdG0h?b%|Q(CL|H%7O3i4RxglYOWaji6gq&BkDrQBV0@Hz)+soM zrH@4Th={oGTWI&zFu#5n%k^_;G@HC|_q+n!Y8fSa7R6Exwo``dR@mB0Se{aP3{~-@ z$_tfL0cnId?jw%+2>cfMehceS4{Iw+@az@T%4adx--YS%y_hKPLaj8%sFShyFz($9 zG=vda>n%*w#)-;BQA}d_>;e{dwXyHsDIKbra2|(xmo;May@{3s%r4a~s$|Rsc0&%T zZIM%2$XuV2nI)M~SI8{bJI$*uQf+6(KP~k73C^CXL&_3YGLj_|^Jqj|aDaEj)$r}^ zI@as+XtWlQq+JwB4g+XCzmI;e%LJFDC)I{SBf?9ju~y=c7{b++ktqX|mKan}I<_L> z$W~&x-a@k*V6NK4t#dnZ)2=Zz8$S9z*>j8`kuIV{Fo4LmGgKKZ67*z(IPJiXB$F2o z+aY=EljDe@wmUOmhou)J^tuG0^f@5b5)@z`V*XqMm9ajYN(pIP=W0(G_F%^WTmtCQ zB(sqkoH%T3Vt&2a21AfhWYA{RSJpUOvQiy`bSb;Qf^zZJV3}6sn8XSu77XE~!+sg4 zbnI+#Y%1!JUyEfZn#cf1VP%~_KW}BayKWfsIhbK2W3<^bi5r2;aC{I zb2lo}Rn+H~v3`0HX>$o)w~dLh8I-F6wksja0@AaTqm&MhW^!B7bs1u<-(cW+H%&Ng z9(uT4y6Sw86Oo6ewsJNH_BWUQu3_<;%q*iALD%o692wd_0u;h9RAIq+uqzcP70a;N zdVEfESub+%J<7h%)|`gSb}9oTbWb*RaD3}A?;ULMfV9Yeju_k_WeI%`&e)eB4XQai(o`vPzOnu*Sw_hOO}~l7?h@)z1HK!=DY>j( zi4D%|}f|MXcmM~#syPP#<^MOrR z%@bN7*86LiDvx8*naCaG(f)gxs_q+Ajh^eLL(h+9Uu0$1;JHDo-O`pZ31p=6TWDMe zBF>yRe&hr?YtJF-C#(RvZSNku{&la#u7@7RShdFFQ|gDfaN;CR9ytnx5sz0yMvz@3 z7eYv7_i33tr~!P)a}rkDP@fYjcZ;mkmgX1Wc);YI8O%*i;q2O?BGD6xey`6YMzvf- z*)1Zql|`D#eIlzdnoCeX`F6X`{f_6gY96s?yo!C55>B?e7xy`N<-qo4zS}e&BV>w` zVS}GBQ!k_Rx%b}tar*S>%(95yt1Ozi&nW`{03ZNKL_t&>2Jq?M2M!#-FaF{$;-QBg zVv^wNU;jGZ`ObG@b#+xM!mjM@MxSX&71M+sxAH`Qv27btI5N>-?pumvID7UCKK8MX z;&*@dcUb)}J3GsEO4HNR7#kbI%*+ht=H~c!nokn>LKb@y6BGQCNSXEZbyiXkSwO@H z{kzp_@y}kb$A6=rqhNcSg|eGGrD3a-M^-;ZDY)nCQi-~RSY zQDce)zWXh2!Opq4?7m6wBm3GTtyP?Dtqr*~So+wB+#+>^H|2RkexHW-V|2Uog@=O= zv30%$6-Z2_G3H8MZhdcMYphY#&C7+zIPlQ^E&2DGW&qKD^Na7kCQ6LubIbVs|NKYz z>?b~V)t|N9w%(3Dd6dsBDQ$lK2md2}_&@w_c=P}M<{MH8PkCE}`E0gvX4*@1U6GRM z(RY07mIw@L^uGW6Z~t$+=Yv0r*M84yu4(xH{PFWlkUaJIr?>nWl=JbbH@#xZ$Cs2F zTN!8Q^&kJ$Kfvss84h~iZrknU*qor#$J(h2EaB%0Y^gJ%#V}~W&16GtgmvUVI*knH z-e#6d!@wDPaO#@HyfPvy*Jc3GU<|tABvqCksh*&wu|!I?Oo0z8=%ZTl;2_Xd?6jy- z6^%A>*%!#84TiKZPY@d7ux-%Mqo;80_yYW%16xcWPD?;q0O|eNLV~Pl(ufHJvfokR zn(D#X99(s&0+OW+5v3iZb{{Q&1*gv)!TjPG)ctkDRsy$BMUATIN<>(cUck<)_eSktmxR<=n1pYe-%!NhuJZkoYLaeYP#~`wjHFC$QR`N6)XL-dxA* zL>oJ%Voa2Q61(CExVFc~oo%|}gag`WAHa8&ZDT$A4DVu5gNUI8|(FD)Emo)(=KdJRgrdDZ6pkSk%bJYn_>=O78*6RbO$Fj zQ9=nSn!wN-rbrB_@;0~+Q#Gt37#!%uK9<*;u>ED+deiNgoSKE#5$N}Qcy0kM1;;C2 zEQmxAE@>fwA9oPO>-<>`S)ZY#h%pY6c2uQrswO1lF-v(aWGM#a4uOdL>#ZW1%@C(g zFJSimeH;WY(*Pp!VJ9&M7!wdu5`(<<`Vq{2G|uDMnThwF zoSNeBQ$GyRZnfdKE{YQHikf5)A-~qvfzE`?rdF(E^hU*CzKly-Q~mrv?O`MzgSDCZ z#WF;xQDVS^x+-uq)jMa&d`N<11x>r%K%4}azIhkQQ&ogX7v5NzRT*?0+R%RTx=#zs z8i7|uMl5a3?(GWNMfZ)3zUT5iAamf_;ANZtv`aTD8(XwvA%Pph#3idEXQ!IH(AWV1 zys3zh*xDhChUIb%6SEZ*_spPqZWY~yE*k6W=&y88DOX`ll-aM@^@<0H&}#d%GNZ+u+!3c)(gTzZXY+vg9AvWu?T;7K$is3rSCyK@{Ltn7JNU^r_K;8s(iyTTbzxV zoFlKpQxhmlFdCC=Cckao-nbQF;rKj)ULX7Vw_@*soAJmiUxU+&^EhMc}Iz@4|g#A7aA9?}{=T9@>O2rH0A|a+g(A*+~##Wnm3owT#+$1>=<(I;qdwPU?zwr;Bc76DpqaUb0i9nrJ&lvCKF?eRAoBI|C#UQaA`AjeN}TJ~5k(TNEwcI*+P3$< z|NZ#uzy37$PtyoIrw8?6aDhn`zWy~~MC8kV`jubd*Co8(j`@DwO%i-z3SwFHj{068lRYuTeRWSD5himj_XCsdhE~{iYGnO*tCx{=kpo{zvcI zQoruCd-%-n|2^AMbsC*5Z_9SO`sVin{M{e?J(CyI=zRR<`#*vwKKm5@<^TEpxbxvV zt|`Di+Fp3#=$35FsLG#IMK47o^5ivdeKo%L=U>_~i9t$?5B=D$;roB$`|vOR-S=U9 zdhDw9gj6(?A@l$J%I|ME^TIUB=$P6$wdMEE?w-LdckSa`i_Ny9Paeas|Kxwg`+nue zar^zZUDbYUx9xU4n$uZpqPt3y78i*I9;Nnx7P^8VpJFC+6GPLKpR^$<2Iz_~#@wdD zJ}(!}+}O3TO8zw(xT7GzybU)k;|Ziw0p0}US}e7fp$`#t;G}(2JprLZc@vsA+Q0fV$=vP8f(Ek5}J z$vTR_PSRu0qFzKyTEOX|AFg0=`7B;I@f?;`&cpT`6e?q=6e}o}N~&^*J~OpdxGzt* zjkQpuY#rdhvSS_E=`h$~Ssr_h)4!?eo^l!-X`v`A6p4rkd+@t6=(bOw-B?C*Wge}5 z4}RE3Z@h=u@!L>#p}g8jT|hQENZB^1(`}N9WOE__80rx$!m+iR(rKm_{ zUt@@-0Yo%7*XHM_KIfw6h8FpGkPa7<9A=@CYJebb4`n9QQUh8Hkl768OJ?Yj49te% z+vs&ktiOtg92+ZZ4Rn2~mP}D8*_=%glNA*CbcY>;vV(eO1?#Ov1W602>_YZq1b)Dk zT+~f0R$mAPpeW;oteDtl0jg?NDI^3kXi8UE@ivee!*RK~Ezybyx-RIY#22mi+Gw3V zjN5Kbv14K{XcNP33lRm+mmCfTPD8S8sdD}W_1k98M-=xFhHY4)0*ll=RGB7AOhnOj zYw6Fo$VLS*j}I|fsU!)UVu7uy&M!7lxn&QEmWPszsB9F82suPrhyj^o;6naf9YAZ= zIAU;dU(_=2Qs%~Nmu@W#g(i0iaEX!pACdtDUL z1fDFxW!x}PW1$Y?Ni68R3bRnO74eg(4Pv_$I|qibpu-@Y)^HW1`5_0s_GZP;woja~ zZL#GT2j(XUnvHeDGQ^IXcfy?@QqzY!X7e%U&yb3QCU6p2CA;zVG313lT3H4MaybjR zlaRY8#nAat`6gK~c~;A7O5O%m0)r1qFM?16Hkyd%7TFts_5+e(?1&)}&3!e?Xwc_1 z)#@UdDJ0a`YG7_A=1rDLj<4e=1IOZp1zTj$z1LDFj_zGk)frfc#X;V}b(n}} zqU>&@w$;+9=bNjdH`54B{K;WPX5_VLMn(<}O3KPYDoTjs5J4F7{nT=bOz^5ua+QT* zYl}ZS8`GpBFY*>&JYHD5kABp{LVpQMy(M&$E}WtZxN6SD$45wWp!jli9Fg5uDL07- z9=f*a^+X&*Nyr3-3==G#n#Xhh_!Meh2|Fid(Ozid8;>2r!rAkjJ>m)r1*&+q95rW= zW)7yZCG1q_>s;{M0m4{)D7#LYo22^QO5aT+1|6H!SE^CUY@rtQBCKCn;d@2w2RQhO zyYSE*_i||WbwF5WN`Tx4!`gZodtUwN)H? z<{9+5T?n_#_obMvAJs9kbDK2)UA6s?p1(qKqSe5 zw$idW7TE(5sZRt(uZiQW72Gv8M@uR--y#LZJ$K`Q`|rn-PYnRu(X53B9(Z6VENHy}VcfAXT4jn?j-_KMklv{G(-~m>fM4Her1pV>$Dn7e- z1YsO0WtUMyW)lJZ1#&Q~Wq+d{{!7h&Jr1m7oVj%+(ybOE@* z?P~n{^Nm2;54`I?;uUXv2yg$fx8a`G+>K)S@?bLEQ!bobz?c8>D@<-xr5 z-QSH5|NH-TRiFB2|L^}~;^A%o$6N93Kkx>O&x~*C{h=S?*)KhZKlrsj!m+21U-ce7 z^7hx^k+;3}s_q99a}zlDiUV6BFrZ0{pZ~#Mz`K6x2k>40`a55AY}szx?M60d{`fhh zR7&coNh5)kWc}a@TjPd_3g{BUlf^v1#HLA>rAZ63@Xdi`Cf_0hyUaW%V-OVCVXBQ& z?M0pBxf2D;#b=bEToBEia@e>-3HRsXo zH_+&=bA{>bm?A_9(q(|(V!vtyQu$#Ip5?(U3V5Q3BTt{g!Mk>&I9min5lC9FJWCy) zWb+H9%yuUo1Z(F>kL&>C$qkt&#N?0?!&CwhATl5^R0XhLm59%T5FcL(L{BZYt{^)n znFI_1Q-(=Ep5g5PEuV4+Jg#aa#Sisw0;5tZK&!KgrTRQNK^=}0avp`GDyvkYXi3y6 z9z4fFKMW`#1=97Dx2-60pN?X(`f(LZmtMT!H@KUD4vi56_2p1{h-?SFG)8^l3?77q zgVkLaj}xr-I}mOK)A{#>d{EjX6|W z44dYdMGt3`9AfX1JzMNUCJ}qIvqC~|2a~V1msP2&?SZmZ(-}S{OCt}$4sbG=k)Y6(v25p- z*cyN(_MeFgI>LjwZ(NL%?RCg8-7rS zaBY+)s;EtmVdqV|nZyu9eNUn@DNOdtrS>a#j`GJw49=j?L|Fb933JAT%&Ujg6`+c^-5#oqyYP@|4>z~=b}gVW=pEV2_I+sq~p*$s5a95V-44=k(_l_`}(i0DB84dp_R#gRTv zbR6jXaG)y{Qpl9&OPpgs!h|$o!hnbzGgkM*9?qo|P)94uKy7zxD8e$lueY2{{`*=Azs2r1H%ETg1l4d&zo)YA28+vv4?wCinDca5P| zsiG0JIea{hBlLql{Lp7b2CJK74#1`UV{d)oz1^;?jaKI0QmbIfbFt75w1Mr5b?&Qg zX--Yw3j6qC`y38V%&BgzrvcSU6?YuG15Z8u^hNm@1}wk*_JQBK0s0BtfA_oJjop^S&yZo3U9PMpYWo35ng2?ZMOnaEAkA$zL)zFbNvktb0$3f{gr zBWH8x>QV$n{@Z9t#5rR_U_%w$A#7=L};**D~j-rcf5nu~7B7`1f7jy8T1GwXr2eISk9oTitPX2ql(Z>9-^EmbVDI9t7*fweLGHaxS`G-$_ z@v>GwU;V3pk z{SVJvRn`Qw8snq?@niVI|MCeu@aX+`?OR`s0}t-U%Q#M?>$$k^b@$@Wf9o%{{28PK`tKk7C_eR(zr>sW z)ti_|qj4plmqN#4eX))c&z`_t-*P7==O(XeySLkRyOQSgR-3$7fn(_~PBoFTGB9Mg zZ8WXAc)l_)X%JFqOAC=z=3h}W))je7%oJ_j`C1d1Vb{gw34m@ zilBlEUDsw{JWBhJ1QaI?tgoKJb4R{}`Q>A9iV}M!Xi`DGBLU)6f!!P!8WUg=@WhcU z1!!9dP$5ErCWk~UF_4fdxX{uBhEpiP_R4VF8fxRaP_FGjxjYBYp)gnq{4PL-MO4ar z*`Jlbh9y#0G?uZjd>)8AR*(>oQTA#mxMVR=f{JEK6-XS#9AqlFI^V_OnL75>W>{4~ zfE4Qol0@4(m?iR{iP1^=(9c1oa9-AY5kP4c2N~ETwZKOKR6++}nyd?xqXH95Z!#`u zTRFc&a@8*qMZ5^Tu%O8>scpL57|W}5#L`6~)%hi0D`xFP#7YBF)U?~n2!c9f+JY-1 zj29)|_^SJG|J?^MwPOa|dL2(bcL-m5=6S4in((X=Px8Z zp^$7pRHP;Qsc0nv5t?kjWib%M$R^u_39uKCuttZmOEzKhTM|X+CjCAv@`&zq(C#)+ zovNZRSpuwB*~wVC5}p^8rgsdhDdfmeSKBfYYAi6|0yo&uAu)vhhh?0zG+=Imu7wyt z#F@Y&RV0%b4HA`N+!FpO@zoWS%w1H%kowpg>>0 z?WrLmCR`?}$9GIa_9cA3$C(N=mKMt;xLk$LWsQoMv7l{wB*Z57Z$J=^?&AiP;f4_x zI^#fU`z)wTAc=u0j0p@&jYNsDERuR-hbmwd=xuYZf(jBQkby2Twl%X{)QUA&(q?-? zYp60@hkobAc9OYqafBEEK9>sd&TIrP$7V@oKQ!Pq^7bM!Q>4)Tlj4E%FTx16Q-b3< z*}gIWrP8xC;A;Z^)trLJt;9B@Ns4BF4U6?ftdhNo7xUbNt$pZHCz~A(AGI^=P(-?26Ip75ZUWym6x@=B6)*~@GKE!a8AkFzSr+6)umKEUNm(j z`y3*1zU`53$2+Irg@WrM4HDG{!U$d*BWd>$c6_wgS~&dN5!BaP2*Q*L70huEX_Cu^ z$=Pb==2{TOW;Lgp%N2=GfecyRG}l|G?yR9!ts>}!=uwsl5g7eGy8Rx4C_ts)se7QV zG^YoFz1!{jwmbk%zmF9>+&NLjg6|L3azsF0MEG5cZZEV_Z;%m2{rM~HdHk?^03|rt zEtb0YqpyEGKJh1ioGB4ZGo(PX*}O=VLBz(vg9q{8gAa1B_D10RXvV>)1=3Ib)KB4k z?|UC-Rh&F|62J6Izl4u}{No$iKW)_V_EVR8O>QnsLP*k%gel74?p}czVkc3lgFw=TkO&&EJc0%uTzJVIxJQ9 z4IW3b@b0YEZb*!r)#GJTC*41JZjf8uNHr%hQ!GoUTqdE(b+)~86=rd3v4WDV?#-qu z{Yua4OWmk){o(I;g3D@F!qU-1%|N3kEcq!W3-t$)6`O0e( zEWZE2{dn;858&x9Jabi_L*&fw|LPy|<5F$kUHkAKKJxFk35@M_Lz)vbb>N$&Dk!q* z4>J+D+KIqM;N%q}Gr6!$45|@X=1!G=LCg!~GzcN12!#rPiy?ysHiuo4ue4;_4w`(C z-IS%3Zc2SYmO4d5X$cq3uHwiuXVIur#o7#HTFiiBiGhzc^!tq01=%`q;i{f+F}zJ- z-Bu5QY~k$UAsjyWI2zprlq&@kx!RB_@Isd1CvhJ#2@%F2FA9}qR64M;$SMm}RW4g% zkuyK#R3t#$7YO@g!9a`G0@hd0p;(&6_~c$pP47jmHUrP8A@wRqlOpVN1~Zd33U(DM z?g=#3E?{x(thS^IFgtcLD#bYtDwm!>!O>P8MDm1SWeZh<&Y!-3UAIr5P_%W{L6U`g z%8fi;a8tq2P^eaaV6gNhF9b5 z-}*4x%S&*>4hkZ|>tAs{W~Qg{$A9q|w2}}B=S1Yqtjwy+tSmY=MCH8^aU;&K&pvyv z^{=(0EJ2!b5u~&UmQdO^TnV%~9qhp&K0oJ8Y{sUsaUAIm5cmQU4Nb_P9w0%8*dZH; zlmp*0Z@^XKfgfnlnF@Q_?Y&)J+wUX@E2&%f#c9?7baWx8Er+M zvWrNfA7~-uxyU>RqUN%tmm~#3P1~{6#~RqYZVH3N6|BzAq1_ok)qol)EqYN&@c4mY z0~}g`>vfs2Zj!8~(Y@4GqZPR6TBo?^>!uIqW})dih@&oAtBVLG0yH;{10lUdTedMd zLL`{)J|!rBh#;(v$(4^9GBVBA001BWNkloA1&;vQr%+Wi$A=`W#| zc94ZRpHvoVNlgx54X{O3ZhXp+d~_dUV2+Nb(#q4>N1wp%dmhGQ(8P3o0uSDDKX%>o zAXG}39b=q>BuvOJTU&8`N6z%f99Uey(!wGGjE!0y z-V~9@9_MpVCK!RP(vhs-p(2mfsDreg!likRP(y@4rT+RTWrjKfLxMWaRW@2#U6N)w zk}Ni!hz>u1$Y+U*=pa zDKxPrI8jT_p=7~al;dm3UsMMCVi)XyY^=DHLNtgF^&>v#jd~M#EV#fXNn!>*DeJ|CDBZM1@)e!pqk7JTFIkxJc9&bDG}xGg>HR_*!rp0lsFj7X+vcwQkc zqV0R$^Pk5cIjJ9LEVI`Gf#pON6L1b34Ybe9%-~ILdK2!x`|cqNr(>;6>9g$X6axN> zzxaz78`G7nmt1lQ{^*bXh%+p{{`I2+i%0E|Z9CBbag4mTsxlcRFh;+3t)HK)oh)%N z`rgYgzns+q=3ZEmmN+1Ep=sgdJp}6p4^{@s@EWP;5}fvmyV& zHVS5b`CDFwZ++q0=M)kD3pXMUzVW$#drE=sRkDgee9BcJi>V8*x`5Rg8+UAEauC)! zZ0{i)*mVGRef=K(yQdl=Z`%uAa}B09PMsAekHB*xF+TR;KY7~w^-Q)mzUK}2-uJwL zi>yyYo7ptY3pvEbr~oNGMG zl`eG8dCBUqDu4oh2dixix`(m4ycg>?ZpX~bHjIy73gJ#ep$6aGgkX9Mfm_E|vx&Lc zhcMsT3ngL(4usQ$z&LwQJFb~9(gZcr;EXFmEG@RtY4s6KjDrIM$lgLqV*zvw0^?Zx zMHO`@6d2|eRe_&uC6uXdWM6PxvW)0~wUc`o4i z0f$sZ{T$L4W}==VQz>#m${NZ|Sj0;EFl4%nrXRzV16;gy2j24f*P}PP2eXgw!_>?q zV&BK3kKT{1mp=zHlhf!fce!en7ICbqQ5wTI-U?His{U$-m*l`%L;Y1` zM`lrzE_RG>0Wt@3{T{1y#_A6Gxr4eS1xAV_9UvzH!|(FBO8$_IROU+Lj`rb4;Im3acmbaX+JEx( z_JTUv2M%Jjy^2H%)N_5o5jo-H2I3W-AypmIS1ad72@{TM`?7RPDk5S*HM2&c z-ShJY;nWnSH_gDU(_${+ECnKAbk&=~`8QByp2G<>Rl&|MY(rG-g{mB~%Q&NAe}-8P zDasTIVUI^)qope2zj=>SdP7x##%eL}=oKMGA4gU7J(GQ{+yY^ORqZxQUl~P;?u)qz zwKwU``o30GoLI2_BzMTMOVc(dbZ5`(J# zrKo^05=GU!&JQu51-T=64&QGH{3OEPGxuX(`0(J4SlW~TYSmS4?Z9RjYB?mQPaPz+(q+$2V@pC7aK~RC5CN-f|aq-SZGs zl%XCrm~drV4f-CsCmU-{mqlW#%)l^nB>LAg`98G&G=K6P-A6n}Aq-s3yV|mKGp@Mm zDvVD}<11ekxa0mu_+H5Xyvo=niteFqt_O=j) zquam*2&uLNqjN;p6)A65R#wp5I*sx1aVCA}9E4*eg>CMjN{>U=d@R&R{?q$B7RSA2}j~Sru;f&Mj39Dhm>)i z#Rot5LHy3|{7wPl(>Q8#BFO6LSYZ2m-t!)|k(!;IU2}g%i3j>z3jhA-M?cEf5;am2 zk)nfs|ARmH1H9uM@4zFEJi@P4)p%=>7$;)Ugvxbg$r;)wn&(^lE4yvSJDw-fPIP-l z&;RJYz2OaS;OFf2eb0@r#cOZ8u~>WYeMjkeT_SUIREjUoJ){+VmHR)HDEK*wi6w)k zW;#>NOQ|P$M$-FZ4-5)E-G_Lj=9i0 z3mXB=uY1>P@yY+=Q_q@hKG#ldyYJ=)p0@Q73=v}UGR{P!zI*L!UwGCKhO`|oyXgij z&Mo2p`>&rk3%2Ow+l$}uBK*)VzWtPEO}yxJ*WyL5zYe#3`SvrjBj?(=b_SZq_D57f zY>KobbP%PjN<50lc#6#yT9#gy$N#KyL0Vco=y{ieJk2Tl5N;-s_9KWaLER_6W)0v7 zyRAywACNYmEFcI-A#fU^j=-NnoYk=Ff&F;!fkTkl1j1k)@|?nPeV!2ICcsvEQo9B) za}1E@6hMkr3rGx-0o*1K=Pf+C{|?;0`&$^uC5%rsLGpzpQ(P&hiHk1sJc1B8;gRrt zk8LG{QF;iaeYBY-F=fia*uPM?FokSO?e4@m!I{$G=B`K~0>%E_-PIMu(IIqKXR&EZ zj`4}hP!DIID7Qm2F*d#xjWHit-ogB#$FZ<7hq^xl&zpu2WAJ?eFE@%4sqI4qSFrnt z202#d+nCur2FG_;y(4U(@<|h2RRRNssHv{{EdeoH&8Wk>?R(4L*_FWn14AAh%BV!R zj~Gab7VfzQL1dN};oU0tu)N$yE?rG(kU9pbR$r(H{iKBhbGsqYgOptenPIZ#aV&v)bx~)l zn`EKT0e1pA_sVxLfW$)7!Jb10FqZq+zHTde(Ex|LE2vFQBUTyeL5Q@LbI^F4kqXEV zd$P+J8B+O>l<5NB9DT)>MON0$xR|-VeDb1=V@H*JIQ9PW~xAN)%!Qq zvEJH7#}!-=MPc=1>m?c_R;j^njAMDvKFsgki*0I(fjQR_I0V{prLdMpQI-k^tP5vN z71CM?fl>uw`mvF=$TY@kYYA!A$Cho|;Y|ccRRkw+;f4;AFbp~q1CcvrUgrrmO9WUa zq3Sk{5*k(|feN8fy*#Z7qPlCPGD0w!!Ik^U`mWpR;5DdhQT{VhoL@A@#xAe@g{Qq8 z^IHr-A(hTTatd-tY2e=}vfO!$r)!o;F1I|U(u8R{qsQe0T49}6oE<&NQy4e78i*q>AU4#rIL_;9=C=-BFYM3fw5`)J}S@a#Q zO0E(mRev@5-ZW=&bz$}}?!5VSO!`f%pIV1qcRhdyZ@(Xba8VD&3tL21T;zJ+`2I{J z1?6p+YS}W#+bDM^NSg#@u0TNJi%Q`LiAJr4iHQ&|xb8*RdD%`}vhy-bu3HBm2_D}! zi}rx}u+Qf6hCZ@-cv?FYr*@*KtZNme5A3fWIp{}NU0ucQ{$t2fg{_-6V)OcGOgHJcD0q1W(TgDmITq*UamVeq;-QCk zA!Zd`z}Hu$h#ju1@!ibpK%hA6QBH_>%*-;`gt7BMNz33>t}!VXYuJze;RxD?&bGe@`8v3QUDyQ?bxvcH{Em-KK8MX75NyWWP^R4K8w@? zANj~fuwlc7VjD^QK|no`3Rho!HGccIf14E=RYk;!T1p*H)nFTaE|Df=k#(v-zT=S* z$J>_C-|6q@c;9ft4g5P37>+LJvN>8z{$>(>BG5@9^z#gltj^{4Zp)XACrJ%75vibO%Sh0Yr43so&tI6FxVviy1d58sI0_dbqWzjWJKutn$E znQWAy@U_nztps~EGz!k90PVA>(e?CwKmI13Pf?imv#DM4s^{a!KJp`&+HmR>Eu_YI z(~o^GTa}$|bxL>Tso9a8YvLI<8#H^IrKx|_FJLnAt1(^RE6#wV1(u$(M^5?&M#K|^-ip3*9e zjEDwhCZjHa%^kE>V)SB(ne{zv-E8fH6N6}g<%M}1UOo)ppMoDW z5z^-p;Dmsy`RH&vx%L=Rxr4>|7S4+{BdiN;i9sQ>PbI{AD4)Bc#HeCZLqL^PS(xXf zD!?sEW)y&MflZl8$6Og-ck}r~UeIwT-*N(ZSdHW%Nr8nU9VoJTqA)=wj7WhIW00<5 zrL%ygl{t(xbl(Ua30Fy+w`n7ev<7IUA@Wv+#~+`?b#J}^;lviqF5L@R2V$wMqbM_k z{l}%A;0xo6e$7ld%T*tlB=Ukn>r)P%^?ghbP~*9X zX6Kl^An+38yX)#eTMf`_QlU^{IJTm-QVeX}m8^Y`We=0)#5X2|zqH1$LZfGGm6a0jft<43iUiW$d=YqcegZmP2`o zNV}%8oroV%kes5(5wN!$Yc0UauaZR{4rxaT_z$_DoCPJnvapL78Nw)|TF4T3XL}yE zv@fa@kGoGR-@}Hc+v-M9vFFliL5IWPHOS4jk%JsrCq=wEfQ(bN6H=zCnut9L91kWM z2xpoI8ak|+088nZJRv*863HnflQ$u8~tO|9GkwgyU@n2yY5Gj3S7GL zB0TZvUfgrrU64r%F9?}1V6Z3EN>ozFIYxCu^8`}K6$D`ATCyrqP44UcY}S43*uEJ% zFS`hrU%nHUTyim{rq}WJkRtC|%6ac~N5(xLcTg`9IZzG8k` z_cLAmpevMZfd3G2q8hVKHI{>{rV~z4P(}(pA~`P%ClQZt=XFAyCg`RS z+JipkRu-|m+Qv%MLqAWEI1{Wgo2f-Zmy^5mp1FV=!}Xiq#9CRRBdC=0wMTu z8v&O8?)~3~JrC?XtE{A6^Y&MBIPW+A{#$3k)_ixi-S<7tpz^b=k9>C1E$p=N+5h&rvtWy!yb*cv{*V4RwqA7l2%+b`>}tII=f3~6t5f>VV*9Rh?b+3+ zE|4oEtbIkAXQ)wV?Pzu5F%~05wO|nrxJKpikS&!JkPg4D@hb*%UFDmSMZ~>1$-hyz~!xc?d zCAfab);{dqL@|;? zA&wOCg#4uiYV`nmF0HG%wWWqe@m%u|ra~`pJ!CnBiF&*}VXc9wna!xx>PT~i)@m0k ztu`-&$!|S3Bm((XQ>84aG5YZwItM!t&H!6BUWT#8W+>?~NZn{`#n!Dk23?@n>tUgF z0QIpD&8CMy`tWmY5ka;DG_Pf)gL1L5w2DDDLO4+~KBmyX^|8I8j?t>2?Coot12S7bQcZ(;oK6S(xe3$dY5$Kl0;s5Qw# ztbx#R(WnK8vxEsRQkP^|4BTHbGD0Hx1!6$?7H9i1waSR>(z>?bbS21>`a2$O5$BG)o5CMGz$ zz$3_Y9o7dvdU1?sxrMaf!&n$-sk>dI8D$a!lCCOkG%P4Yi**Z9n(av0FW%SRO<}&^ z0T@ZYi(bEt`dEOm^^;I4;RmTN2oRO9rR4hjGuZ!BTD1aKmZ6D7QL}xm2+FNmor!`( zbc!;3Lncx_5dq`G0#b!soN$;kErw`eY@Oe%Lcmu13Zsmn#jn>4;e=%E6+#5AuDbS| zl1*`hQ49`~7^s9i+k48Hnq&{*lT|>PG0NOrPmaFchwSkj3>!Bd-(vCjyQ7NlF(yog zFmf1tBmz!QSBoXGPK>QTRo_FbX7gtWqh#T+#}XuVQ%c(p$Jlq$`N-o0hsn}zVF}wdZp8B8Be?TBx1!f- zW6WF;6>LyAJ@I;8f@LV3F|hj zV>=>N(qjEIp88`sUSWfiFaY6K{`KtCEF%Mv~|*Szq>@LYulcin~Mxh0HEj{(g9 z&B+FKY~F^&&Jx-yD_B}w#Q4Mnl6(R+)VM#fshP1aql~a9jkM(=6{QGUTqB0rO}kTt zD&yD}%L$DO71_?QEL5vk?r_z`@jlqtE#=yBjCQiW+UH!5)m96=)ef>=3+enElGy{; z9=rn4^d!1VUEF=w1GxF-Td}yXfN*Smp?cxjqO$gHdIO}*q~N(55#3r+2%d;oQw&41N?L@O@JakHV(W-t)xis$e+Rj@#(iZ)-Mi`P2mNTw2kp zK&g&SsAw3_fx-q=}TXVU;WizWm05Kt0{;fn)^3?<2RV_pg?+52#gb*GmLI8 z{Y(mvO`A4h%a$#iX>;h%AYIn8vX9+{Ni*lS~v9|69&wt)Ec*|SA z9}7p8&}y|2#SsQ^jGP<+DCa0BL;1;~)HSN=Fs$)7!q>ea^6532|9Zk?!ns;?E6f^} ziyeq0Nm5pN@b9#gA%#{h*^aIq_t~m7Tj?U~$C!z7OlDd+xP$J+LZRyei(|li6S!pE zB&KNH=V-SCCQ#Nm4x`8FY;9M)M&Ziu z|L9NP&p+}PPdP~YOg94CZ~DnMpK_a{Q`uy>Dca^o#%Rmrm<%GX#!v72w3u-a;2*m1Wa5I zNK6&f1jD$jtY~6kVSq<=E#c5?3J04Y)EJF8E*mQn^)nnKNn{)}j zi6KRd15Z4PUAu2bH=4)Tln3Q0#PNV9XlX>B78+k;FrcGyWq@9z?QksMVj`Ud3?M2H z1O$X+*t~T!F1zAVTz1737;B89+Z*7ahabhw-}*LsgP6f0{%cd<$yL8D0U13+aT`y} zWpF)uNQCY>I8Gf zqRik7_FM-tGr*na49II+E;dp_Kgy7+5J{$iJOc6aJVqRK&|O_Z;7O!uq=U}MBR0#i zXWxFjptS^F_AwUbxa#8VSU$KPhYue_=<6!g)DcCkyky0U)E@ZqXezbFqvBhq_DHusVr}W&=}=0PSuYL9>Qh;G$Wpp-&5!JkwyM z%=CjmB1tM9W~;cGQ|Dgf26(g9U6O4^0!Jbg5^kQcnuIH~pK646mAEK6z?1xmt>!nt zZ~v0%dHEl_APWr}`V>C+svmZ05Rf?ETc(+wV>mB`_Gy86sqdlUS z0S(xz48ftToHVh?-*pPBfsgq~1}GSPU6cldMW?jcJEI?XRQ>W(S#z86dX9ThPSGb> z08kVT8zBV-5So_$B%6qH+X@yOtw#wtV04XJyV6*@r%?~tiLy{6<*k@7) zw0cWe?i@ic??O^pNZ^?2bln$)DSA+&M9L89GP0?ryO5P6jlHeK73|yfIOiQT+yMXc zJ734rp?NgC1_C$G1rM~XnXdeIjEYkzLncdenaRq5vlY7hO2%`j$#paMzVG{9yx`jB zVatv!@HrsEg&Oo3yy|v3Xf3yJ*PZv`(MNY{+aTd1@N0Z8a@>?H22;APxjt_CcW*?R z4)Dn1_hDjY5*udf5DgdWr)RKkVjBCG<`~G_(CZu-t{iL?QL&k0%LJ;5ugA3=Q$iZj^pBcpUnPg z^!tl-xY9)A=(((hxbyx=y~dz9D9=!ZVD-zDS6ziGuDF7gCwuoD;5iZz5FX{g`2ie9 zTfjL&D~mbv(DnE|wB4TTuzeKIX&i@D7_77~nHJj0NfhUrR!-Far~r({Z&KDt&X|&1 zz(dD`ioRCbZM0Te=&yEojw#K4^220KQP$t~Na3pSCN^)VBM1zU?KmY_YUb9q0OxEb zEvn7>_a{-xVD>W!xbA$xrTEcb{o%7FocX)C5vcs?A9y9szkCNi{?Sk3?r+?4R_`tK zji2~0?>((bfm7AU5}K4vq;NW02_jOm{P3^5lLOe#MAO91+tFTHJtes-Yqi7s51)~J zIoHlLPnpSvUAS{Q3-0-4uyg5kP4nFn8^gbsKN6Bd28LFXZl*t5xCIod?8?8eve2IHDOoiIiYwRK z{dwGd-@l>VIe^AEyUQTXBq9n+juHl{Yc;Bpr0UbmRAlNTG(}Q>yU)S~hYMDMc$7^b zQZ$=QY~HaAuY1#*aOLG!VZ+8PP>DphJHQ~4xcNJG@?_CcP5OA{wqzPY#0Y{Exm>{P zq5Ij=V#l^|}{UTeY!`G9+zi{R3v*6|QW zq}qr%RGrizxy&@MaMEkn5#8sAkd!hK2qUO#ZQt+wq;0(7;_XdvarZw^jBNR;~u0)xX-X(G|>q`0}np38yDXG zZCrKnMx3`kz=lZ&58m?~tS-&M_iCJzBHbD!2VJ{nA4@xjV;wpRhN#NVCjJ!}N^sjT z9`%{$qUSi6@Aa|o$Q-U5yBJ%hC()i;gz_VVjV3EF!hwe*)|HhURBlu>OjaO+>soVs zp}o|Fv5Zlgs1hb`bd<322554*0APzy6vRG z(yQB|Dx{tg6{5`+0~}4dozz(^a7RO0NNq>OWJ#uNqhu>Ve|8D+{3@J&1~o8*i(e3?&3B_Dtwcp0d>JnC?HWD>J?qxg-IL5X|k4Yxg#IhF!C^yR1j%=|# zCX}1ijj;dGJy<$;7?)pg3HI&Yi@C=Spzefd26Z@OfoSZZJPz$PITwb+H?@S$DUe_#$z9PA>^#^LxL z&&ya1LwQK94<#fX-@O-Mt&a0{T!=>=eE{u+HiGqajE%2Dt>$C>^m@!LA7%?ns#e~( zZc||`V}V9xEX!!VMMWGQDyG;o#R3`4$1=rg6tXmw74of|?g*{) zbWkefD#YoU{m+2Dn#0h4!>hYGC6_g zjE~_yQfEJ9HvGh!FctYQ#aD;oE`IspFX8vE{9c?FPQrJcf{-9p&$@LpSigRzxFocd ztiFokJYyBQ$3VJ%3n}V<^hbXbciwpyKJ}?jA&R2Hs_A7fdl~-nFaHwTckJLXTA&24 zsh@vl?mo68df)li8Y^L!=iRify6(E`@U^df?YP+%$7}T7$;nApmi+Wj|1`F5-(I|% zlpsWAeBu+Iz(Wr`q!p$^YT_sVo>8TR{S2D}LPdu|3Jo~$d>@T^6Eo}AF zQWX<{VF?MgtjbvRLiT|)?zh`*R;gG5Ny^g7P;iY*is>y)Y`A<1LBmHti+(x=w0+)5 z2Ed)AjrQX;Z+{j4`%D3@1W^Cf@4O$|FWt_##kqF+8(q5>Klft%r@#1ReEAdq!is^j z5o|vbHb}R%_1)c%ptuHlWeM(P&9IC`iV+ffb2-w&O zR1Qq|Rh*_=rAOhLlzHKL)7ZEF5FUHvAXZx>@tuZ{b;w+2N~l6tk(;u7iv&4b{Vd8l zHU>vIoX{0|L9GTz#Q+cQy%(+C0W`-`gf$--RhL8=l2ow)dMyZyA~!Xe4W{CVz;7or zfGc>>&w;44MFg5dalqwymzg|XeT6h`V`=#Srlz)GtX|{5YMQ`KPEBKG-Fo!;v*<;AEVqxKQD4M(eG)-X zhbw&!5hYZN{J+UUW3|SKrggIF=`oagjQl)6|CNq&8dao)K7nYdNf!!#7 zuaXzi^e0lMm}4WY`Xaa1Qj8*K1qNkZ5QISXTLhd)ZGn=JwGsu*bywlYoUOnpJTu7# z+%Kma6F9iMicf#;Z*jx(uf~S;8}JWb{CnJU-^0j*I=X`xsXvZPXqAlih!usB%nUT8V64lTW?J}0!fyk-|8ZCI-Eb_GEn4|b|a1n)|T3K zg)&yGM#6BEdYf`1yOBV&+QstH5sYnUz!`TqjK7Qq5FGzY=Yl~dp~o&=c5PKTM~~e? z92?R->b<5bx#k)s1qG{oT-`UwKTePWuHQ#OU|Xk$yxl>6xdRy`T#4^1z2N0VFOLT~ z+vYH6YJwbTh|QUJ(OEF8ASw1ogCKqeCU}UvVT_N0m6HTXH%7G5K{z>%U}_BBL>*p( z@_d}aO-8B`z)+SLeI{>f7Gxb_KGs$sRtcq(B}!DapXX*wfX=!=q>!yd=pR@@G`E6U zB2be)5CdMU_}VRr3mDm#;@kvMJCc%yQda1kG#w9}UWAqVXR$iBjP)^?>ly1w-vss zEEGUFfbvjAHwC5b&scdPG%?TnMBDGW4BXQ5WVz89bkSOEp}pFIawNQ(qrnt9_?ZTd zmC1vE$ciB4qbmOuhQ{=vE<*RCB*VfJhjH(%_hIM8^H6gG?0x7l__>QlSl78!iaj}a zTM6`Y3p+)lzTsm%{P}h}90UH_+=DEY2*U=eEBoz5^!hyKWx$_7RkJx+#zBv#TtN$o!}CY*o!joh_{0<%wK2r~7)ytj5NsWX2z+c@zYb$_ z4bD^O6B(Lmn>l5-i%292R44VLnWOU9ULk=Bv4v1WqMGvjuF)Hw!aVo5iVaoJ7nS#y zzYE(-D^=!t3L=?&A~Gx;L=dUuJSh=Tb_G}+xx0H1ci(Y8T5ZbYYQXnGq$zpFD+VgH zpeE;gj3&$UY?js4$2bV(oT?--gjG`)#|ULWnT-s~b$~7zD^losNcTqqHsKBvvFF+| z-st?jVaFCc+-f6LXJ%<3t#vGUz2Zo)0^;vKlp>#v15CoaIo*E`_MO+ z_u>!t-Hb$Lc*Dk>*cwh~Rf?k(10Ve02U#VtXU`t)-zSrKL2FYYLC6A&0?NrgYP1od zam_W?;K2tUEdGvwdaJ}Zkyg$7uz$l!L8Wv1C@h>;n!blk8@J#kH@pm2Kj*nvw_yXu zCnotCwcp40HKT&1=e>fM(8POSez#-2BY_eNAH&lUxS zZ5D8yTtC5p>%qt8&e(Uq^o=jUkN@vKcG|6M&b750RmYQJ;QBYe6#w`?zKAb<{L5#J z_0gGD3Y^NuWd8Fm#;^bNf5zYbpP%Pr`%E6!>u$UjKlbZCitU$dd&ahb&f`z~*Y{!T z#ar<=zyFz2dOWybn~Ma_S%{u%r?+`FfRlYGl{#xWod$GE0IINdAmxKh;L&0>i{UEr z<0QMNgsbyNRw|V+;8=hp^|5dF3Le=-lO=&heUeof1V($jue#dUR!TDZBD7_JT^TE3 z*Ktx;x*3}SA{hE;#O49z=MV@&G>mh2P$JP4@E%GVESrI5` ztY4J&d3Ma_^YUF#fT#z*5#Xw8uEE>h^+UMe;`88A$S_qVCpr4P4(`48PTYU*-6jlL z71j}Db)s1~X97MN@SHmS{oCJx<&^|?-MI@FU)VsypJ9@PvOK0|)?u}^9iwFBr6|RL)&eHnt=Ld&V!6{pH0Z%^j-eJd;Kks~5620Z1Y7Z!E`5qZpwrf9Khshc(DIgoP1T|l<0)x~TY+51nGf0G1l{=zPUswXP@DQjRKeiUK zoy#a72qkx@$~2L1dFD&* z55{;87YjI1%J@%~Mk>0Yw4k8SnHEIs^dltm^H8l-3|3Z=M=_5%AqD-L40PXSZ^Pt% zaQ27Csxz(ja~U{byC)uAb7WH6^RXol?T2!hEOnGHW3w^Y^IZ6JiXvJhcMz}k5so*} zoSA?#QHPlHiEp#Lq5$dWoH^o|ysuTr#>#UASXjluV%RLR45#F_uV|`snUm zfLw~uNL1gHP8{o^dEXje|A6R#tP{JzukIdyO%qP>GJHj_mb*f(K7o3qspV z%>9CfWUA2RA_*sCFh$uu#MPuJ2K@x>ZVN3^T@dl(=NibN|I?r>bwxT^NYaJD;rlk5 zYe+$b;{JTB(USfwi&7lkcL<15j5V7$vi~spD;?Ajz>^-6@$~f+j;=_Rs!~=->dsVN z2i1E=_2}BU7LP-@z>y;@eC5kuW5wa}@&Xp-7ce_JhxTfhGn^y>Bst}8>Fgj@UZg29 zCxYvR@TEYKYoejo?c>Y;{AFBx{#JB55u8xM>xUS0x@b;LqF%3I{q#BtW}TZDdyki&dE?$hU%t7liEb9wdT1SVV$D<*f~bT^xuRTd5i{PV&i zxUfEjcW-|_wua*cOJB|3p>ZxHb*oJI{lCBWTX^@@YjDHN`9nlL-F~L0r|}EF@C!^l z*x+uue)8Pxp(wAsTb2*t*B|>T9`7#lwRKx-7H{2r6~AT)#tj=b;B~Ki9scxB|MaK; z@8dNB{-68Y=i)bi^EdI5m%M~?BSteSETQuAKmYUi>}Nl#)qAdc!rwEReNjATvyohn z$=`KbHeu(^op{Mhug4{q?1blqtbz-}kk^rxh=3u6iD>cMlBlo*hU@AOcCSKWR71(D zghp`(HkON=6_{#*g2p8xBXaXRWoy!Nq?)Oc7glv)RdfywP5no|vqy6^%I zp061UCN4aE-gS1`Fd1aWtzsO07V#LgA1{2(3vkhMF2pB(`>*k@pZe-){XPOSfB2W* zfg9g-;~B38Ki7_K8@8>-kA37v@aFfu5nuiE*YK6U{^~Op?!N8ft$6o;dKX^u#updk z$FsH7$LdVNkX0LHyZqbVee;a&$)(S~gmYz{_loD871tSsoxkNj+=S=e@ErX0?|mA# zf8~x-^10nskM9FiG<~j}Yp1h$h4ehv;eW1kJUQkGR*Rg(1|R#q!y(>0!I64moYE>f zBbJZCgB3zwSBg-O7gg$dRF&w}(Qfx~_;4G$_vMHq0`F_cGKb~M!l#MVKzr4*SFo#r z1+HLs`2h=n8WO{9D=j<|q?~^dr!gMgw+pH4Vj`6A-3)Ogk(0-FPV)L1nzaTL2FMw- z$+)VWtEh>J=A`bm6;ATzbi67@OLNn{WQ_SZU9}nN+Be&6X6Hm>9?S z_`>3 z0-dQO>N9~sg|<{FMw4m=_kubG6lxjNAnQKbdB~IMj4aM3#}TpIIYdt6g|e0RsR_MS zM&olVE0Lr9ZiNMag=QfgRjdmc;-RgI$nwP%=mi3Y2W?!`8b>p1GMr8XM%bi`lNvlH z1w;+P2^b`YvH8)*Dr0quf@fiAt3}e}pUfDq_7Jd3&ppqQPY6rdL7P3|w&Y>gllMJ4e zsFCu7fHcP`NDneMvBZZaRUFRAC~QfL<(0B_PmVqw3`-Ujf}Un~jGD-9uVWUtWM4;6 zU6u%-9U&$Qv~~|Z#^F!Y+3KJQpa>n7Fn8Xvv~DwO@)Q75`Z zW$m{tOgIDw0GdE$zjl*7O}^Ym=fDEer7p(O0A3;?D3>G?P%+(CV^&+yG0WJJLK-4h z6Gx8J@T8DBgqnY*Y!f~Y9K2fy_I&U^85Fc}8$eN9GXDXA_3gn^H|m-2v1z&Zh+ zrA4GFX#_QTk}5w}j-9f{rm7ez-l|4E8$};7#*CHsI^}zctRy{t(L92SY1w02F%grd zF}kbE==XXUq&;MQ&eu9wX^@o#Uj@18ua3!w5!UO^DnHclj@Wyp!mx_U477A#U0rJ9 z;QlADWo9FJt6*;bEM%O)7k*(4MBD8$IOV{j@l2WfYGEoV3<;o$(Lz?R95hYK8_v#1;endaEP_W)FuAW1TW zG(N@|laIBe4%C5l>!&f_Ucz9|N4MKSI+G$7f)6Fn=Xf}Axj*LS{#F2ZMPPT8Jvf0Y z<~VAKHOGZDo*e#&^1K$dTon`2LNWwxuR?Q!M!gQF<+AdRJnMO^baXaQMi!&FfJ_D3 zcBVlC6pSQevUl-01F}+>3WD8H>}6s%pKy4sj7Y0>8MUGYmmxopqD53 z!@W1-?#>*3eA^50oQW-%@@lA8tba)9{JFyqvhw0kw2C-S@i&L=#mm=Sh?mctkDB8j zwcVblL7)PdcZ|wTDfDHE2RjS+&yRi?`=b`>Ucl>uuP*M!n>Svr?K{h@W);WBKK3!) z|3~sEj+Iq$)m2yF;~)PxTQgZT##$gBy>`I`7vS1!uf;95+@dX<$Z|K&PvCg!c?=N| z^!XQGd?}7B9zn0)=R&tF+qUA>-}4$=vU4Y9W@b>U)#3Yr&dQS&)r9#@&t_S$B101t z`e(rp&r!wScooa2wfc&*c8ht&q3rZCt<4h?)=m3oUyrkt$qKTJBD<*7)ec)oX?Fz_ zV-yz-jAdAV@g%~g&%-*YN<0Sq6lX}%?@V5J-VljVlCR$#ZF=K0e&+Xo3a|a4*D&CE zssKjY2+)1QkG!6NastiIwX>_yIr*Vqcst(qfw$oPZ$HTPQuluIzB8g8AQjF{Kk;VV z_|ES^W4!TQ(=%v{xbUhA@C*O<&*H5=brZhu=U>EkzIe-NJ*HQ__zHag2j7P0zU*oy z4bG-!?aO}Ue|#9X|I6+8hd=vAJap^XVY79vooi3lJo5TyyBJzjDHLE-j_L%i6vCIS zgiEEZHZc{z7C^_EwS{ZgBvdNvB}zbAipoOYK1WsVM7HEHlpG)kX@R9>v^mI1gsR-@ zIJB7Ii32NGT*#1S8vvzkV1(JWVjBz4+8Wi{JOL8`3vJ_)3yVbP_e3&57N(f*k`MGQ z%+wTWB&Vk;6qdv4eXlj^94bEOb$RhXpk)CZSX&2a?M@8lF0_gvR{}eCUV#gDUI7_p z=;sN#?PVO?yAKcFbq{X&&K+1-SYaR`PBJ*ec;;DgZU{gSCL2QNbOLxzfcEMtUim#Y zVrsg9R_8DR0^ys}tPE(@o2ZYkgCDk#q%9^d`f(d^)0W zBXh@fMOd03sMiq3354tG+zqqMj#0~qB4oIV)K!r@r#PV+^e|l?<8962j7c^MRHVEZ1)sN%)CRBt%cLe?GC;Du77=fxF3_19Iam5R zd4lnPg5V>@Ykg<+S!dSc5bnM)t9+P1gj4IBJ9$`gsJ%tdtN}|EZ82Ry`$# zKBpwWa#lfc2{ZW!(}I#LlE^+L(@Fs%MHz|oD9jsh#~ZqDn(}TRo~i_0b?O)_~BY<`M>=KB|PNO+ z(jYpGn}*V6FZL`ow`x-7;!>zRM!RNZ94kh*GBmjO zJDYE1JE{3>8o$Qt4Y;|U=ke5r`kT;}D>N4&MW6J1o<~T!Z@3}%F}_Way2+5fbbO`x zp4`sI?RJ<+ud;-dV-$i?mB**D+rs1*XaYIo{>(*Aj<)mSB6mYhFt|t{qR{s#A}2j_ ztaBo{)RP0W=h~@nqhRyfFTV)?y0C~N)NzEpU)U1$(B zkMCJ`F~+;;!~Vbh8+qtFPhZq>p#r{nz+^u6`Tdw&^OpwzL^s)sSwx z)ds+(OY@=55q#*Of5O3NmHQ=KABj}BrF9UCNgrEl<3n3Z0KAKPwPUCToL8gjc-c6?otKeiAz` zza018dmldanNM@x$MawK0)(}ip7WbL8XAr%bIS4c?~buPB+FR7UM<*9Y6>PQOs)lI zT9hgadhby|;i|NCwn{}yR#?@A{hh4bDKdBsh4D=u z8q*Uz{7^Aatsd(6p6|jBeXX|eTqZGW{srfAIHDjiFiK>I6FbglW25sx0O;k{U4|zf zoyEU=@?Y`w&wk@+Wh9(`#rb%{yI+Tw-E=)B*H1nxkIVl`jX>`gzUCT!(0Si=$0PXG zKitfu#;I0jJn!Yt#cSX38n!w5ZXzy58(mif-2dEv{~)Vf?)v)O_}XW_!35E%Xrx?u z)%U*=H@x|!Y}0hMt5--7Le^z3e#48f=YhSfPWkpf-Ez|Xi-@lsJGW~q`E%`DJI&2w z*j76oz@wmPn!G5%6)}LekC4{trdasfDi~SVw3PU>sl=)Z=qt7+MU_NlQc+fpnw$bN z`L$q^t&(Ps`Y7XtTTWrf*#M0RAE}Js*C!$KCX(F4{Cpq#_pPGcCR>vkRtgXRE9@6b zQB*POz2Vss1HHU2T#cU;p>zbqC|CQ>JJR7rb?zzjya=~EdMiAygN-#8sia&87irwp zf!e-{`nZq&pvRSju5yawB5h^|flcNBGEfI;!qmxHMPOlR5f9&cAA7tXJop6m@86HR zZ@VAuRv$^6B2Dyyma6SoX~9^DG*!)JvdOzF89b6m@x<&Cn4GL*%hpLe`0#$z>vfhC zi`oQ5YoInhji5P&{_+aqxQo^9B4)-mAgN7o#sW=*GsBZmg=I!S7UiLnO-xaRSg}N| z0MEsGX$Y&A6~#yyz+H47RV6A=)XwS|utXUIYx68h>nKqW9+JbT6(e^(1J^>rAa+pvjhmxansBIgC;E6R82|)FsLB{!~QD@;%xYN-JfLT96P2DyGpV$!%qnIaVen! zdt{9BeTP9MIT&yziqohe^g`sM7IJkK3Xy>X6lIRk01@>~wv+N8m4}4CgR1ajczJ{k zjm^l@9vsg{O2kHzz;{EPfnb4gvzMYocZx-y-7{r!A*##Lg6gxG5hEm+GKv8PSVlr7 zERbdKCo6l`0p7V8lTv-$bFyL~pw)$sJVakzk(-cTDpdRub=4|if!wEc1?9d6L4n%}!oIs`##y8BsaZ^JgnempuCLvjk zA>52f5UXq`22w);viTeVPLA$NQdEdhu4FdHNNJKb4;<$41cPB_@t0)CyIo{Qme4;i zi)3j9K_W5Dwbi;ZkNce+m$_w{6l9{y9k%2v1_KP$$5$<`3cAidN8$KHfw`$!l1j*#P59vCPoGZLF!By^6;miX-60}msLopO1Xowgj{tb&}3I(R%0#3RFx35fUPXR7Ko8*^D4@0 z!EU4YBd>0%Zte8C=p_U6lfDL&l`it2en`XtU!!!KDD2xYc1O(N84%4@qU#_GCSdfC ztx*2YEdV}Sr7sMjr-^+BX_V{C2zaQ6Ayy78px5dY!Rkb~xJJRrA0Y)YWBJ8I$(C^|>GbD2#H9D4_i!cb0Q3gg5bN&=NaTP@)nd#qdVV_r=&!KV1_M=jj zBA!BszvkJiO!;V2WuFqo&{ptoYlVjw1s&$UbezZPbp)=*fyHF!5`+O0%F4-g_7BY! zc+Tj-r*kmSDo5_yLb6&aBY}6|Ck(9%CNuE#r~ivYA7O;lc{&8Z1)Dhom%;IX=AWpLs)Hzy4A* z37D?{)J2V(QO=PK)vc|A_^sXFz@eyT)b%>AfryMFNe_QNzY8BY|GH9zrWEH`+<4=S z_>({Rlfss2EtM7lQIu)%$xnWg!?6odow@et8kvt)G46$^yS|~@_1~R;{`qV(MFfT= zI;y~Wl{B#`kVd0{pZVa=;63koFT#2gk39AmoUn$s{`4oS19#t&Ks&!k}oe-c(!1)J$_x=1hU5#=8{@Yjo;Iw`Isccui=nDM9;m<##+dy_- z*S_|J{GjWxwa~)udmqQ6cRyDAdotwKWiQ;x1liTsKL?jzd)aBpxHub*+wmHac(49} zSMj5_+QaN)2l3cFyZPFhdwdR$-@oU$e@~!2g@o_C=28y9C+jx~%6>MEH$(`N)fGJ` zr-S;}6OYYe-$VOxXzw8$+IRS<_tX9oX+eb0mJ2rP9Kho=QWeqTY;R{`{5#j4Uh}lQ zhN+fQih%`N)mf?nlmV@(0BXhV!Xnhix?b&MvzT}ezp<=0FBhV^YEl|28)G3wUkS2Aw?j(e0n8}MASyb6{+>shwJChzl+8w=6 zxAn{-2Z*`&WLWOLSCpLwS_EgqJY9GO2}blMyc+}`8aa+h_ZbvF9@HED<5^0 zF)hj)wE%KCV*4bToRggrP2LF@BK~V^yop{XXK;r|GRa=9u@1UcIun5wTWoh@CjY{2 zzm$asd|`=kl(b;9TzlksB`N@z?mN-~_oi<-#>1X~wN)w6PU9h!ll4E>3m4BY=jF9t zTHnY5_$%$ya)hqVJz#>7w!k%5zoRXviV*Fg@jx9l1`NNaET64<*sf)zWv{7Y4n1pp zl_fE3{sogY84AY?=!3;P`gV0kP{*}iAcTR|}H!3{{>;Aw!}Y&2WGI4Bet#rPypNqMLkm@qk_ zSYhBw!_yYl9u5bJ6ZoI!6#5wOd+B{H_3J?o>B15Qb8|?JEJ5@J2vdog5C|L;0mMp3 zRLggZDtYA?t2U8xW1+&^NrW@0SAYYh*T>xGXif!dDG_eTP5 zqJ<8kg&w?Kj+zs~A*%-h9%!7RzDxFaG!C&cC@Ykhjxbhx^m@_3wEbZ!G6v6cIu9b$ zglR4~KPJc&YN^DLJ0HjTmtO_9;qsh<0RPl=*m94?8Op%Wf%GNNQ0|-Q7em1G$lhAe z$f@4rFbr%T<4{?mTa}2iA+dFTARs2qQe#z_YNesEhO>4W9B`j!Y-Ltv+E|-OYkjB+ z08befr5w7B9+ZYEeD#eLt`JC~2rJ7g+VUh-II^(FnKYC@LL`-{+)Esj{iVSn`~4NK zd8;lR`g&pRBl>d9q_H4#v|SkOyBXOH4{0i9t-U9#<)$zy4O!e91Yc*Vu@(W(Yr@1u zn&g}jmh@tTQjZ%ozmA?9ARQ#glH4c`Q7C1Vu*EFak;~W{36phIGT;?0!BeXxxAUb^ z@ifv84S6UkW8ag0(+D3z;~LpbvdSV)xbFpiP!?hoq?*a0ktWLvdi+&JX{V<)dX7=e z|FYXysw&EJZ*60hIp?m8pixFGBsAbFe2)ux2Xih13tTFKUzjDN#`x+@&3evq))aLMG2LQp!#b;UfAc2vQJO99KN;* zc<-h$KK$?(@%icV@G}>@1lLS#MV;<*L^w;W;7g^%SC=2hmlqz#9ql>n?XPh0jcbZl zT%Lyu#CeXdEbhkpcU-Hjgw171WBYsG`(AwUi(ka->?{NQHc;BGKk2!5zx&o?BD4WLji+h%Rk9}ysUugn_ z)E2&D$VhF=B@4@4z3*08Q4kb7rprQUVeO*m{q!Enx*)}c&bw%_HDH`1$TO|#q>PK& zqziX4MPn*tVuF;KOu$r#ih{fd^>fxf%I03Uh5ACTVQdF0?xoLS^~JFo9Ty@q=t1^R zF!zjpr_XCq+i1TV6OCtmcEP!}cBAt;vv~$DqQ~p6E8auIVbqJbxMgndAzruI=6DXa zUbuy?qjP=Z=+>NQGO0rkdu>sp<4MQ0+P>SbFWbN9LCTzS?OZ$Cn&+_8JJS{_Tooy} zio~vShb^XuAjYE$b!_!?<@uu*GorGfu2e-=-&_C-W+`kYM+5*fh)A28l0UGA?rM%y zW;obd#PZ52`U8bD3psCqCSzo)APi4Y#Dbqxg;5ER>h+lf*=`Y_KYL&|kPa|D(}19A zcH>2CD}+4HDJFeBRXtSuZO-*)k-z^PK0Kyj(V-Z_DH!JAd36i zI)f|DtxQ^(?NP&C$0MQJPu}!0T2#(=u@FajKtyFhkLn|rr5Cl-lkTG9N>xm%m9=tE z7K|$i3}wtUlmX#2$sjQ{RzsGzkvkz*;L>NsX~vRh(T|Y@4pzG{6j@_X_JxWeMNeBX z(f=G(c;jlxbzb`GDjWs2J=Hc^E2=E8A*^sp1Hj}>JBSBdNpFM7VS+!|50^kP;c3v= z_DCg)JdlDgvggbYp1csOkWRvQWuJVr1X!t{80uJ+tEW(8tN{j|SeF|Peut5}i(D4r z)7q*o=hu)5CM8XxDd)Y2hSYf>evFX3czqv!y@utvdBn@hn96gEk5535B~@MOcpJ*s zFu=LWB4l|VaoWPfhG~c~3TBU)NK&?+3|S3m5UO05p(Hwty%>+c(irUs6qVBKt1gWC zz;iSoMfPT7Yxe)M_olJ7W%+&3Z>>F@`Oa^w_v+PLUDai~JH~DUF$P0y6I*g@Bp_BI zgM|@_kPt#xLL@j?mcl20BMy9E5D0}Re86%@uxUJ`iJdO@RP*ra&F|fJ=X1_I!=Bbc z|Nk2HI_C{tgS)%B`>fKf`|iDG+Iz3H_FDhnRMb$1Cz#&ZMSgD|7@s05YoyH5W9<+O z@3;1Wc24+I?0;NFotQ)m4(7=_k{0b8y5(~$Qt}lI@O1MWP$wC))s2b4)kph&4&~J%T3oZ|x&_{~p%cmylXta?wsv6&jhA$l?}XU&TfDhRxon z7Cz*4X_7jdlkkYvfNzWWIKD`%jFl$Mk9j^fEL_qpjI}pFQkJ@YJUnw|e z3%}6|A5poz7~Qk{jimvQRrn0YTXA+=)*j2fs^X%yRfe)Xm*QL$x(^i;L_IBte9OvI zQUTIyQ1~CMmmKh-?c5`+J(mpTdUx5~<8*L*+Ai1@LcTv_bKAP^l_-S zGQj*&e-XlYd}?1%CQF%;bc0k}OeCw5GF$Xz^@WJ61{6M{shL~FQds?}eWw~-+nsJ3 z|MpXt@!!3F-Rwsa3;ur+wZ$iBevWehs~t<8&|pW_XFHp? zkn}ODOT2cvi(9i39F;R1*wt_F$A*DfBy6M zm0$T4{N``|rUx$w#CGH3bD#TM175&Qi_QA%pAT>2H{Smu-k%=g$2Xty$H;pMr5V8Z zKg)<{jC zSQQi%C3!qrFfmzDTRaTFFbWLVzYAkGESz%9>|-Bh55=H9{f&VN%?bbCf6)mClQK0PgFGOe}RF|H&*fSF@#SwO?@ z$sa;(Gt61Xs#)HhquEM(d1t_u02T`hXqacAbd{}Qii$%ZC!-na<5NtC6;@U_9gPu1 zDTjBbQ5&^R*jB_?GHB1_HXlWQXi2Ht{<)$myO&Ui_Q@_zj_zW+-$I-26|>UX87X90 zf{GZAGxz+)Iq&>q~xq^@lv*yAh zU0W``nldC6YJT^3|4&TkHA{^tn3^&=64AvBGjx(Qw6Zp{?1(W#9?Jw`6y+QpE*QE( zvpL=n0Pw&6NJVA)xx=u$aB;uDMs&q0Q$TM#YArBfl@-uU;J32M4C51Ri&d<|&}^CF zuYsxf^7$d}ptXJnb)BPPn;}zNrOnZt*vqpyikyHf@_tTG(juRklw>z$UPA=dC=N&o z`}pz@`Kaa8Rkx!Vj=J$fiRv2xh`PxKPcOJVt?Hta^kkAX)bb zbEV9vV5I^D`8w+eV->~IfK$qRJo>9YC?8hteV~GhRakiW7;nPQQVp2Fb0A$@vgD{; zpf{V1v!#bQM+rYhJ2gX=F<6B(V-`CBK>aL3)b3!oyN8q03BOjFWk@+E1`tJIu1-XK zHOFXrfYw@yxUGN|`81b^MaKe3ZWG^xqb^^4XIG;a3ZG4M1QU^t}Nt7BC z3kBndPHa#DaN0ZY2JR$j3wGS?DadR$Rra_0bJ z1q?21BkmiYXcmbC?CekT_ICNRxqBDp`Fb%ifQk{aLL~|=M$F~}sO84`gHAa`2uBIqd62rp*d0a?WV5GAw)-HZOe9TE zXsR??d5OcfucNcmN3zpLlu68zh%FCE=|hDVVFN%l9xpe3TyyHNMtSkgXHXpgCJTfN zt++Tt!J=ewq1fJ{$&z5Uvdxl_)_y|=YZg~*^K*MZ0%<@1-ON3UCYu2$U~N~lS+|T& zhA5|VtdYIrJjZl6CPR5HS~*}v3!9PTrbew7?g}CVRN#QH0Q~W`Zq29ig?}%GYt*>m zSvEFBj=W-IYfka1>)I;#vb_@N%07t_UzK-2a8yQ*5=!wmQ zX?nMd33I~XMQ}wdtp^vI1R4d4(t-7>fWti`jBO8tb}V+Q&HGEB)o$BsSKGp9V%Xi% z!y;zxbs(X;O@FFq*YBU^*yulVaYGPe&H8>0fWK7e?F(~rO$nJdWhe;_Q!a8vd^RI zAMU<}zkKEeY^EKas|frfRm3m-(l6oHfBo0Fu)n-^HDglZ%=vTpH-GkL@xn_l;f;6S z;XPQVH(;A50xpaNRAc}Q3sS^3s9RbEM*J8B2HOSOd@3Q_eCtSpjoQM27EWOiW?Yo4 z;-$x$$2)=+DKAKMK`IjhF$lDvU~kF}%=!_sZHe}JhE_W@-~*|9+bP@Uu|U}7ThPx8 zXru{)6%M?R7I1NSC~l9lY#kMX7vC=+#;UE_s;%0pt=jiXi?uC2xeW~S-9mFzXWim# zrx$CC?jeTF;~ubJhqK)5VnbSMm4F%Jp5ep7+-BAS7-W5csz5oPV|+A%9?zM{*P+6q z?wgQD^0KF5mW-_TY8Y~=7r~Op6tJ32I~!axqn=CkQX|(h>>u7nF+IVhi(N$ZkPF`Q z4BBM3(iGEaj+#Cz1tCh|kIB8Z4!(1cse!QaSQGZu26N^Imny3(%76 zhuResWp|LrHDxaNbLvzM+7!P(!iL@#d_q;ilB#1tbz$Gni9c`_G z9D|hYdo25&iulU<1-Xw;SsqY1*4Jp`Ua$=Ym9L1sF4%%5VyAhn3KJ#=qmH33x<*z$kUiSs7$VVgQW97S3Y> zqH+F<4V-p19u}Z7k**pEF{gy_z80=9{Rx{!qrO-GDZ+`z{YpP>6hEpp^NueJIRMOA zN)b>&YA*sWNHsx9lb9p54dBK8!I7~fqWNoW03;bOo}!y($QP`)d;kC-07*naRQdx< z4h}KDzmI%6MO;?Mrc-9d5Sy$*feLe!YKo1`4b(}^>I()oM8i1tnHRQ{wczXPj?SMR z;gw>Za7)*U<`J^MVa1GL%|=Cm^4@*qH}7F`Zy$1!qeB~IN=(s~saqS6&$;Y1;GqGq zSfN5TLj?9HqcCACuoiwS@j0z)3!Dby(T#QYdhV+D&1nK8u{qDu`0jOce*Bl0{fOog$)hMfON_$#!>TQJ^SG%0jVx**t|HFGQMPEQ62Un?i4y1|fgY8(|MmCr%wPH!kXI%I zzo^M>O(7K-v$tz3tE|G~)enUFFSLgY`ww-;39aasKyp`{A@G@1sFAj+H1}Sn^Uz#M zjCcoFG}b*!`6vq?$lbS=!F}Oi07#_*#RrX}8k5loWu7DLC72$Kd0t}mN~O)chvs-` zirr2v#z3nbUwogj3OT37^K*o@f&a}ZH@PS7SZr~Ke^!Atr3XpO+QkqWoYkPqNi&2A zrvUIL$Ew1}s*BXLRGecbzlh-Y{KnQ)G&Z8bC7)@y1 ztV``&5nkW$T=_N1a%k*_YNU!vu|8PG4RwRpBHd0Gsbx+QBU)PfAL)wCS_MU3Q@nK= zhcL3ktu2MkwupSC)y^5U>kGj_+3U&g{8;%Tpzz~ zZZa4hU?!S{h6f^I8SbRC&v2A>?x!{SKiYc-|Jjv)89Q;?0ud&p{*_mL0AKpjm+-fL zqc42n3k+Obx%}k9+RM+65V)70;%~n5`}pekKEDs`2;Lm+bJoX?#$6s0abQLD z6F>12_?_SR9kc5V$T)N_CF|b5@gM&u*g1QKe~ywAaY~kL1n}DI2RMKt<^XQaudqNx zY*iKXl?2vO9_UEo2XcY5C3kVYMP;lL-IoI=$R4Yr-_bp?bXJtw?{ygb&*wNDk2&vR zJQ{JHQC*cltHjPTZKPd^C~*PY(gPI?T3A&G?Xg_eMP$_&k;}V~E(P-Oyrfvn5=pm(NhJ8xrNKd#+Ft?&21YuvwHgjJpT^zDNXI ze2F0IKxw~nHLUpBWndJjW)m(7w#X_)P&8@E)*&^ANXFI&8f;)sn>Xb)q>iTARQw)W z`=rf1`$}5{25lxQSz=yIF`66#)dcJ5Cgga|c0m#Q&nqNUY_qmOsw!FX+jwC&Adt!R zR37m0i2?Q`QBj#P7pO>F><5a$CjW1Gf2phwwL2i|CH`@& z1MKw{2nwa)-_#=b2YTz!)ED7a9%_qMs!oU>rA}Px=6hfMMRC zRbH79xYH={VBR}f+Yu;}ae*69WJ|NRhw)o)0mBhz7qo>5ij7#2BYhDHE&!A76Quzb zpslS|ZGJ}pxU&7DG6ga!PIB-sTMEpWy-S{~TqLe*n>nI=A%F^C1g@|(fRKFtcypda zY^xH*2}D{zkh+3wq{6-q>LW2Z_6d3TB0u^Os6%byGM{|g8r3lV?RY! zQ$pDB|LFuU{JqiF9vhyw&ov6E;mT%?6hL)*irMZ6U1U&`oGp3?un?rUsteTRobLzY z$pn+x6uSraa5y}`(eRiR7>djoLzZLSdJ35sCCs^r`e zvy~_1aZGXs3Mqhp_uz=1AIBNk^-~#8nu`BI8s!+xUG!K2Eh^N_9Otaq?Apu~xm|XL zvvPs5Bj4GsXe+IA#E1uuNLhu44cy_GQJUJExuF^YiVd3PKyVwD&T}pKGt+!pnG6@6 zYnCD{2${aDZ74fwowfz;4_Mr z`e1_wE#IUCTb!=1Z=v1kGNE8F=wp3t9f$W1`O1-+llvXvQx;gaAHjUC1rSayENk7b zr7-idyOcTrT@WaSn)|)5sy8$LXYCf-xg5{N$a-D0S{Zs>niDcpl&viIEW-0HgPg2Y z`?NH=MqfC;gWo$j!du5fGsE)U@8es2iKXxUr^i@REo2@A%@c8a-rCaZSxYE$Uv%vH z$SOJ;Ysy7sn|n{`6??T3II3s(XTw|gne;iM>;=x_|gbe*{1K^FPm4q%^0;u~}25tadWit}Z-3j%@&UWOyD-RNw$_tISA}K%rsO z!6sa_u@-fC6%0zyzPhY*iOO%nNVZl|aqup+X@NjV&}hoYbTmjy4(cj*-vyvK-S5Y`>^tp#$^ zs;%0pt=g)s+Nyo8wU{>`wPwFt>54kR2Adi)js4~tO=%$jEFRVxhG0EqxC*DUF-DVp4B8S|RWL}watX3t5k~slN@C1aZJ2|hPj+Ci<%~?c zg1~Ls?Lw^P+KJ?if1I><;Z2KbX)`lac#+%_d4aAL7^d|4B>T?kX`D(jp<3})Z+#<$|)1@)Uak~Xuz?XH_L$6{;B3cdwQ z&gM@+z^V$4FHn>?`zqJ~)&}gV+2+ZC9zNJR^g8D-;Kr5;F3ZDKqxg(VIyWUYH+wkP zn<9>TP?caZJ{9SeZhTj@Fh1Qed$LBRuw?zxefEYeQmK&285SHq;>6+>=dOq%WKb)y zl5omAVRe-@IWp}eMb^qNCf`~LSf!ps8(1n1u?0`eY7C>GftQ%OE2{i8RwkrX)#!FJ z)J2ZeZ;EOwOMg@!2-GK5Jz|)o(ldCqWAEuEjQn9>g|}{zi>7ss&lhfB!cDE4i<3a_6F#Xl>TIh zVs?t@@DS746oa#CXl<@T>p5@6=ml7~BOg3gAHuqHL_SvnHtRx~Aot3F0+x-;84>e2 z>ftfw_x3Qka~IiggoKNO5eGNZLDkY^NjMH8gI|>?+D7ge>FrnzXVe!oQBlqZ4?OZk z&P(L89J6_jqRg3*M>cfC_NUD*ZIqeuXO$SSwI7PDcQb8jTdfwdEM-3Fenos5s-+XAem~L;K9S@x)fzD)hb!n#ZiH zg3J~}oFYd$*FKW*jWwpX4kOc>g`;zjEPO*1lh^{>^%b|0JUq4 z&8UT&U;YMO-1&=0x(TzlDaRqTY-#6=2DvXE_L1V7m!TR~Dt9`xCtYjeki1r3DhG zAJ8?*;)H=Tm$&4APvbZz7woz`IUx9~57@QX&twXeCijirC$eg$W@YLYirT7iLxe!s zd&k2W}@1fOd^&)qjQA3JQ024^HioQ{VmmFNw6Jcs8h$9DfL`u#q7y)GAB zx%h2C3uglenLD7#~gn zwJqJ zKn8;>v9pPi4+Snv?nSd65MGfX7mo3$21thommK;C2Cs!kxh-2wkjIcozd@M#R^L(JwQthEwkdvOJb_N8{FfdU!c@Jt~DZO|&oWI}} zbN19LSFp41Pt3fkm?M&@v4c_t|Ey%mEL1A;(}Ixz3_AZ@fFuAz)V2^`F$fx+Mx1ny1Q#XLk5=+)^yuEdlEsj`#OV}KSCRjQM@pi$?u6X7AvX!+ZQ?@^8 zYir{^hcU=EEZ{mvENg6RbfEVpzH~@ZZpNxODbSp2$`xQM8dr3&buXVu;*MQ3CCCLD z@53(QL)tkB^X%N1Kf$j=|%Ur+U zNB6M%vCZ5ZQpxxut$$i;Z!X zA(I4nb`WO*VhpUE-8SMbx>m$ox4!rCApnNsp)by{O(T^T6=JD|ts-IU;(QRh1o8sq z-95~%-9~wEj80ynV^vKw4(dqoHLk36k6}U^05GxyYsrcW7?T%B|U) z?dVQVXPD5}WM+!uL>HHaX~@|WOSjWOr`<)n zJ4e#)@R!U8T%lm^n3C%lY-YQvC}ogCAVsPrI+Xu%T3~czkNw=^Ud9Y#{b}FiF5>EF zMhg7>RkFRIy;l&HbT~(GG(}1vWLYzFyDnzT(o}Vh>3obE_io_!{(YRxXKcmQJ-3bZ z zeO2RRIKln5uVeeO&l25$RZE#jkwqD_ng(-0Ggp|P?Jz^f&K3_TR{5fJ08TiTDHia3 z?K30%bA2|EvHv4rjYC`A8Mc;k(xb!hv&@HTI+&A!3#zSR!574XzxliaJI2VigB3A! zPp*`o(H20;nEBx^*!B3iU83^gupPHkLYg#R=y&j1d1a0UB?>k?dx z#*44X!@^=dnp;WkP~d3x0-TQz=@n|HAG^6F$Qkj5=+Z=c1z z`}YvV5?h;F=ycl{jSk97BY+kb){^x?+Is(oAA zmGuFBYHJ(6d-JYWUpy>N?3>MU-|Z#h)2hJVe)o%bse1-bwAK*IW?v|p8}6doR{$sF z4FAKeui$^X_d0gxr#7Tp@%qI8tY(hAJ~_gdhqv(4+s~mYwE@a${TxaB{9pNby!?Zo z!Ixu=5+ag*5FI>T2aIOY{7l{k#4(y{M87+V|G}cL3>3|DXS~$C? zKyg99Ma5u-vLJ^0syv~PzNXVDf1RG5;&ePh9nB%vD{P(bvt60s&uEktKAbxQFKph0 zQ(HK7MfmR!xCq0#VHM^=HYyBveOBPbs;%0pt=g)s+V^&g*`7cc7QNZ0H`XBl8Y#Fh zDmGO`x$Y-h37E5*?OlJ6hK#&wy0Ly5KzI4bM0&o zd~sMWrE7eYW;e&I;E>kaU$ZU4$$W%b7sw(>xx1)Egu;R(^e8e}X(zEM1{Soit!)v+ z7C+c~T6ph$mj?ng$IStX0o_qFbnAzQKnJ8wcP^WPt$V6r#~Fym7EzcnUz;olvKJwL zbXD=WX63UqY<>zOXH^Sd@0!7+T2$zEdxkwuAV_42=a00%^~K@<;0Tl#B6yrDpe!g% zg5R5*kic5XJ>C_Z-BXvq3)s+l%fGdmFR3G}IqC$CXnG@oUmHvS3NhlN8Pksq@jk1@Zsi~Pnt zV1I~qq0qKo(jKU_e)tunJrAb;61dI7ntao#xT{Mu4hc*x3gpunirE~alMxR04sbe} zprXL?l&nYE=&iNU9T*iwW>po$EOfRswcRk&#yZbUi1a+qnbkKr9phwwAG`PWu-5Bi zV||^$j9!0$?%F!Co?>e#nPx^khzWE=^H2ph*riU{9*YXQ`*-#+>vqt6`aDD?8Tgjp zg1-9a)*7mHFkdw#bdI&*&~&2;Aar1Q_XuiIAd54ryeipVNU0p-`4G4EZ{fY0??J3} zv3=nJWY$8OcF^l|*>Ke@xR50Q&)g1H5gr^psC$!6nu*EfPrkzw8e{`DTWAaHSZ~#VGQZq=S{aoqt z6}+Uvo@cGG0JwN(At$a}s7s4xJ*cq&?YMJl>_Hq!!)7+{6saK-$zVD$e(63+#i(wt z$4UeS1!{A>WNm!WTcon(uwAq2^Qv9Hko2>8@HrlSKcLSOS^F8+4^76HRi}6-DOZk< zJSb3Gl7!??V7oGW#GCn17MK-9s$a%DvUG(OWnabWE!E83TL*~ zv9`8`Ubl;k?4uaKFzcSk%}>FxiEkc*uxj6d<}&d9$`cpx#k+f$tB2chK1ezH-fG$s zH*U{|`0HQ$2l)Gc;jdyl?IMMYLlYDC{{aJi-Bgh zI~&$H{?|KS!?WFOTuBB{N-znGtdvI68NTu6>quLf&2%tx0};Y#?qdK$MjntLK-YsB zaf~!J>H}6=xG($ng?*o$hg~T5g6bj&=BDoqQgGIVQC-xv4sxNCS7=Z$Hvt#)oa4hI z%!(lfS2A?hdq`Rd1Mc+36wd8b7cTRH0SpUTgy4k%DWnBvj0%GZFM&;z3-0#wst@uO z4<+@i+N!PEs;%0pt=jiyi?y{RFu`YrvuD_CVSz8iFGK?rIF@s;Q4a}_53SiQFwe!p zn>5)3+S+6o3n^Lk27tu9@X3Q6WNNtG6aZor7B@&>HQtZQHhMa!uBk-Ir~(ZQ~kJV{i%s#wuFikHf}5apt>f0$2A$y7=4KddSWnIHyk46 zRKmhdz&iU0uGLkH?IjDO3sx!Og1D4aQaSW@qh!1Vc-$4gzIxpLHUF?m>KS~xC11Va zQj8MtK|hkoHYeHwbWO*fMy$bj3A;0=u*cjowgB)*hLFH4w3m$_+{x@fhKuuYm+a6G zjAa^t9jVN{28J_xe5C{M7f;>E>64zMlng$gyTCjnQ9QmW{oDO#k$=i-Onh4{qM5IR z)R1_CJ4U2xfK$-xOc$PEU#OkX(9h|_WZiP6m-|yRS-{{dS}C6l&$nW$d=+00XcJM< zqU;&f6X-kd_`^_%i|Z>Bfm|mM>8`%;nik}TH%PZmf6d(C%P`(EF2hLo%TZVfq>cLF zmhH&SnFpXx^txMg%8&%n)z!hv9^BAo+t)2?V(CjrXbuW6pgeC~hl#1V)TPlI0^-pT zGC4Xew?@y`afPp57q&EnTa{VfDfAM04c6M6A%5WWJH>iUqLUqeG!Fx>q1e}F5_51E zL$MeGEK=xew?DzI4FU=El(Pr+26eEkqlJ>oC8K*xQF4@%+&LfKgxwCoJ0HIyY677T zi!itP{Bc(e192l(uM5b|YqRk(`3t|AKnMjNq21a%+Tj?+U8>QDI5sMw78{2@yLjtA zCK2bYquqYyuM&tVI_e#N>HW80M+2PC3qh?sC!*{8`poOg)*pL9=cF>hPl>rR*41_4KF(*XGbV7ihP#DzU4ovq*W@~OeZpt1pKkcp?^!p3hsjdJ5sE|GA@tYu)4c3(i| zZ@!#tia3na?}Hs07M1%hA_;(r{f0rT^paq9*|e`UQ%H>Z1M%dhYsc+Lzx*K>5Px`( zg}Qm_kBw+L=xn=#7lr~d?s%)cLrOOW6_Y~GRn?%?6o#Yp70X+POn%c&6{`&1(-S3a z`KK$ur-p^mQ`T8>MaS?C7Y*iTwD*e`iHN69pzm4tZA)82VWD<`+$>U-YC*!PAQZ*0 zKCnpTO(~h+(a}DVdurzmmCM(~ZzsHCFk5)V;nC^o0vf3aS$Z z7GSZ01=3N~GR>c@8u#~(r#7htAiTEIckjl;l7qDXqb3*a2Ac%`H)bzYnbh;*hTfj= zqII~Q780Jewwqy0T2`Yc-`W9a{e^-)}agH0AwD+u~enNPL_p{ol(o_Vb8$N3Sn>;RfiUHl;X+#eHe+Hya>KiEt#jIQo_W0G!ln^&vbA4A4jHfqNf zN_(S6$RXLnn`7ZNH257mIiw_s1;#*-h23XmyM)*k;5FCpTE`vP7sdiK%mZcKaEt2M zo>U)*U=8OSdVk2gN|oQ?&AvepLs8 zcav)9d*xF!C;G(xF)bO?ZUN~1zw@?Z&(7Lx%E%3O|5q7{TZ}XIhoZj*WnMGs)!eB*U$s(U?@J zadyOkH9Sga&}d&^77K;j6~;@h6(>AEshhGr|GtS80m1XJcqslj65>K>yzA5Ir_CMQ zuoi@D!@WO!Nvo@T+I|3Biry#dyfE&)3h03dEP}DnZ6X3JR^A*5qWQtBIwI6oQ z8qvnuVYWxBYU*5^y`yBq$K{y|g}D1gWf2tr7wpY4knpEq9-_n$xK3~j4oOqet-i;; zhA^_(CLojmJL6nYv!R%Prs%v&U|eEf0gKn&Y7$L_<}^E6!bZsiLsU*K7Q=m-98KTq zxYuULUb3H~3HENBKlYnAP{IPqMg$NaP&5N$(sFA}^N77(F6%|< z=M%LCF|MD1F)A|hDuAiwDUxX_K)`u_b$Wx=v_S8(Ey7~h5n|{`+|z;#+!iS;h8|c^ zVrNuseyl8mpvRSDPMK{OB7BTijYq1F5(Ai=A4uu@-F=UJdan1q$B~YR7{C5fpR>#u z^PEj;eib3bL(m%xH1ni;vaLi`@MJ=$;+ym=5AN*4r3gNM#Tq1rK5Q-;eY3s?YwlX@ zWIq+Ku=hE+u!{8&kjcyVgOZI+04=w*-4~HKuy-Tm*ltNn4s7Y|iAX$iV9^Wa=+zl0$U z9UX2q6NW%|TM7}~M_};Qw{4-z4SXHs?$$)o+DSNHL<4JE08JGU(RdOX*wy)X;7>-^ z%4%*5$15HB&$x;0W$Hwg`#Es#!PCq|>@oP*@K<+Jgeg+mV5Q_^A#=J`6?f7B&NSP7 zH@zvf-J@6#iDe`Zd#Srw69nZVTQuANYF>+~^wQl=Wq_%1$C8i&w<_OfEt+=2$RZ?i!jCDqUL~mKrXLVgJ4p zD(#S2Hoq^<39Ej89BOSA+vP9`(b4h<#2~nKtr5aYFWpbxdrPv`4+*yZqejJPfe1&& zB=GIbA~VdTx$JMDUxaraE?<)?$`2N!(KOn;16$s_>2|&;@cmC5Y9=-d>=*$uw3>~w zLgqTq=Q8c=zVh|uenNbFJ1?B3Cn)%Rw`(RR#v5>}Ygt!$)8%`;PkP-KBZ-*8NY1P+ z>!EJ$utFb%3dI|$O~COp>-vLWosR0e!BgOMUqsm=^Sj{Lub0KDz-XNu32AseX>*;; zX;0wc7pek>*Rqad2Q4KPc&33H2S}ahLYwI;8fA$-=K5hP_T`RHLcEOjPoUK($UbBK_W>5FPAJ0iffki#F!ab3aP&g z$k8!q?TIeVtbGtH|6BF`(^V({v9m7O#fR>qi4;5CfSnI46A;J~_62)R?|@cyem^q- zNxTqr)U%i30d?7+FSXL%03lLCccstw`!?2~_pN;}BJ(#KOa&n~NIRU`->cKpgt`iV z*>fWo8qr}8GJfEh%yuo$ zW~#9Il7>Tn9^V%HO~<4WDHft2Jc{J24X{$&0K0r5B&50^OecGkNslMS#ZE{{oAVvYBjkyyl`5(-RW zUTmz$*YAjy5W3lR+{zycl zRC4f`Jd2Ca{c(bp8Y6KBuY5pAr$$0N$V6 zg&Wj{jDa{Zq*8;eiJ6w;%NO?K7JhK$KfTij@9Y#p@~AkQQx66~KLM8=xhL-c@4q5Y zPwtANFp0%~A~nnO8M-Y@w6iIZbfP~l__o!whG&=mi}C(QzKAP0tG*AxTy|gVoZqK8um|gT#|!cP19KW7 zpVBN=DPDz&-KSU~dWM^put~EmAy>|cOJHRG(ja^Ds^HfjV`gH}^a4<3mX&1vUpUcF z*sJ{+ug6{7>fOP<4(h0zpx&;$`JaPexD55p81jdVK{Q@qJo~j9m8}%KT6|~_C_3!X zlH}XAjI#>K**a9hWYxs@Lnv+*tEKdBpjGp@?#TrupYaX+yI(&kSlXRS6;#+_s%!8b z5owhkWlT=c)xLg?#Q62g0axM(se?obcjVLQGgWDMgoB%*unuu3!=Ib_67-4(->}OU zhoKgexsx!k50nP;s9DEZ9L8T!Z3)j$A3DR?OqKJhxi?e^4c4u9mKcFJ{%=1@C5_6B z+DotW=~CYMp*?HZ>fnY22X_YiFG#OUL~=}dL`J?K7xp>T6cCje9D#H8fo4~zl4H>Q z$=u=lYYboDF_Es$l*hhP@Y9~0nl?1F0jYX76Wb|@)7hXo%FyeSUN{28FtQCw?ckVMNV!3>GZLr3P|x$xEn{yzZbP(@4#) zK^J%Sa1rx|lfxU6QaEk7q{9=E;e<`oxDfVOWnXWa+%(`#+rFd!%~9ANzm6owc+zaW zyeaJOCx4s3+H=T1`%$e4n`pM*TaE4S^Ed-L!$*^RYB4(B(4FZj@Tmt#yPuJxOjEVE28n_X^ou7drKGGE?ex{sIeoeKiE&_?#Z zSb%H`bcX`_ZoPm!BIjnh%CqH=qFXME@(%2K*toY^|zBxw^u%Fs~ z#9Ht!>8cyxQmts#y;#rw>J3Qqj+}B_GYW)Sq{OXfh?mT-X-O~%Gp$w0`JGr2W~Z7i6;HsQ z`qRYm8pS2&a5@dq>-vVJ_u*LU_Nj&tZ!yxLt-!R?6Ob`0KdU$2AWe-G{YoF+1b=n0 zJEo_#J2EvVlrY!xfKcmq#d+%J;ry7_8@-T&E&!#iR9b$33EU=ex$C?F!vs)h(r-A2 zJTi=)jvDOU=?rJV|D7OMoCH$>deR_F+|w8hjc>V5??-?ZM8AXY*C2WKYKJ>j6S!2z7RUFiTT}h9sdU2l4s(;2N zsghfprbiy~OnmPYVu|~b8(5eJf}%fjcp8;pozq0q2y7H7FNaT?&9KU%W9%m@#ZBys zDX6Eta0fTUGkgEoV}Nvia6c26^hgo2LU;d5+;ZJAMGb5P1MP?Z;U*M|T8g;D#Huwn6iHFY_~m=iyrT91Tj z5hn~eq2Tt=CB1YpsE9&^#_9*ENnO=~k!fmzlkYXo)pl3R*)81i1k|Hb^qm_5cb@CY z7c~MY%uiJc!53>vWY`r2&$=oNu+AIfA6b0Cm2$QC+bGTq^$wi5+V&)w#?6*pz4y%ML4} zne8q{+an!Smg|pTC50DX)_(W?%j>D*q+w;VKYgpe7>|-tJZ6TS{hS5SVIk&++wtXx zhCeA_uup!IlhF2sHDFKbeWtE&;@QarTQ}+1>TSm%!Stf5WgC*8#i8$L^l=d;IqfV> zeZdCk9yXWTdeZ^K)RQGQ8#pH}%9uxzt0$(fV9pj!g{Es{;%rq_y|`!I@`S=kPMeXh zv@eyy|82l$TfVq1iOtQ(CL6c#wd&MUT1a%CbKe^|`;8|R80KbB^aI%KQ1lBE;!9&Z zRx&NtIjCvShruxKaz+VYszgLpTidM(iuvDj;j14brO$aDO-WO3B{>4tl+_wA z7oK^FfIw7vii5+U0vsR3T+&XyL0NI2p}eQ2P#<cPRSE_af;Ho45&f?4q@V4SV!50Qfkj+Ny4{NQ5#6z76oTC_XdzSH-h*Lcw0po^QDx~ zvhI1!(xfHtJm97!!x(1yNW&V@JLz>g0NFnZOUzMMv8y;@k;N{@G8$NZ;f!N_ZulZ{ zUy3e(?UiTM>ud438B=j45UXyyHD{e)yq|q|!84WUU?uXoSLV- z$%Ka3&+tq_6uBXkE=C049MYe+XwAP*qpVY6ucKW%byG}=$S%ecE}cvwE5Niom(-+J zl|ir6!27b+X>bF5BtHN!ko1KI);bczsO()nkY&7AWQ<$_sm`X%hh8CVt~@`&7@Imu zam(?-)JkQQ@4>LoBbnn6prmq0a`$@b`htB@56`~#p3@!oR>+-1SJ6s{W&jh}k*imv zp$Vv=b)sK7!4phlcDWJ&?mG?{z>m>RMvM!}-Aj-9hKAsK%elRSj;6JXx`NFsvMBX( zIXpp55#f!nsWwxpbpnq@WH+8!)A|7HYyU%l-_5LGWMwk%8=brxw0RfR^bQRnBeFWz z^0p-{h`;kkMs3yFK>^SD+RYUM&(!bDg)jguoo&=OSUPhU zCzO1@CV*2yJ&!@s3}K~=>=F+^v1ZhlO?>#Y%xEt4ULE={@_E|A(^k<0;AKL4qH6Uf?UV74_{CZeQ$B3WgqsR3FT! zr)LrzhztBcld$Zbys;| zx>Rd0G$9~=uZbs^q#Wrg2@VT404ZYH3mG-UDhCRokE|Sm{BXCTak%gzxxX1=(Jqkmiz`Q0rNpiY`MDRPs(v?+T6)tt&JmZ5Q} z_`tbltubDd+*3K&Y1R$A0n!TLPyKy5k8zp$tPL&~i*cI8)xuY2w67Fjjl{@F-6FRf zh2J<-N2?s*Cye#`81#m%f3dASmN_UTg}kzHaKkw0^KXB=7UHM|cc*i7+*8nUn1Z$w zVwAi z_qFbXAH!fd0u2bDwd*`H!g1masRn9ws<^)titB2V(B3j?x-ES@kQ`1XqA}f##vp(u zA6Zd-pCvTRzu9$h`l@=Tr>HmpCnHNVf9W!syDy4&ulQzw$VOfgH&!s7YJP9zwx#9# zpb!hfWkL2Iig=tLL`y{K^nqAxr-uK+IIT7bR?a|IG)?K)rBFNGcrozf*Jf`^If?Ya+2| zb8xFQLS$Vw!nDaJ-;obEp{}H`tTz+Lb0#B(MUr5fYD+2YzM-9pOV#et_hNg{bkrz1 z4+8x8n*oThYRLeJvSl!!R}g)>lVCmkI};esM_!divcfZaX#6a~z-{IS>TZgoj$|{L z_Nvcf{_HN7oW~hv6I(kLz;kp32Vhx^g->Lw@rOjO4=c=JXiEQfpCF%Qha_VE#?vaC zsC{8eNEoTh-2FUyE-OGRnuxq2pj+cEYudUvK`sIEqk5d18CRnSO?p%{WGQH+4# zfU&Vb`4#t$jXjWNps6QR$=x%=?%%UG1)T!JfXyUy$o}ys$#L~zEJ%<@$cS|YcIGA){EZY+v3;-9h1D)cCeBU^7y zh;*Ng|ElrQ9T+t6w#v3G2!{N9QY6XU3CC_A{e=5loUBE>dQ|TU&C5?_sYbk4;2;lq z87*b^=TRMp-Y2NcsQ>RQriyhmN4ygsu_+(%LC>}xU#_yVo4{Z(1AgL66+y}^*5MgZ z+7i*bo%XMvUmf-q#6Ky(=s_&G!U-q20R(gUy&<%D?XjG;C;Y}KComA9x>2g?bQ1%u z+p-5|zojBO`21s(7aE*XE_d|Y?$-gw=Rlg_T^+M*oQ zf+w_pFYx`G2*!>!pt zr+n1k^RFJ%qa4J$iRaHma;B`EM5Ucd#HkBWM(tmky1sP=Ia;0{YIs{Por6sKlH?>y z(XOKZiTvH^Y1L!ox_Ftt8pjkCX8_q<$@sBS%}da%ZLAL(S0xBOqpE5$M^?u$VX9;T zKuLoG8$rpF!3XZ9=X+XuqFU>ZD!^6pfHWL!h{27#@&0tHg}a;mBC_L-sYk*Y|Hfns z<|M%(PT#;fw|Kgk`c_V3#Q9sl=wD~_D=>t7zE=<%+0^97WJYwux~5IsqO8`Ng|CJ7 zsqh3wxvxKt+#N2@Qp>;czqzD;vF~*LrP=X6L{@OPBp1mr<@sqQAjJN}DEX6p0aGQO zoXO6K<*kLVP@HSE{qYGfP3v0hj5yc#ASAF|3(Y^jm6TnTQ8QCvJR`TQHOkJC>kBco z0AIV)KG}gc*Y^U^d%Ij`Dl)fV?dgSbp0=~w~z{pr7 zyYWY4*L*ZvmE|}LvA*A5437j4uO{)WjFsWNsX(2zKTIl@C1@vN}OQ&jaRoT|Ma}6KZ)qOwYiND z_M8~ig4HaN4B;~>ebJ(U6rzPmgiqR~N&$G~mUhDx_`kHi6~1O&7Rrt!mLg{2>Mx=0 z#9XyV-Z0Rg6!PqErOkBae<`2VQYBYm}Y`CN7}v?ZWJ2#FOJ_ zBVw{%S#3*T!lH)N_&kAuyFh)WO^NSWz6(GdA&NuOS8<`=Km8Yl2LAW)c)PN928K>n z2H{^L&JPrtWTNlmeVU#*&sLrVAC0)Y)Uz@`?JUd~5mZqbiK=wt=>ataj{nM7#s4J* z!w*nr62E8C%C_qs`*192*sRDbk@t{qF;Nw;X#1Em$E7nW2EDUyJMDaFcJWPE4nFz}au$*EBt{Ez)4a2rTp;vtyKc zMO8y1;{>T;!eut$+8B(Ty0?)_#gvR`)KBOl2wJ{HV%ClwqP|P>s@J%SSG3(=h&|8p+JzT+`VU z6`ZQ>QI8d3eepx!8whV=uoZ!H>py52u@sOwa{B_a4F*aO&)ZuCk5;$VMZWAaFn8-j zs(*seqo}TKj>Wocq~^77agUJ4ai4h96VY}8LO6wrpz!T2A{TTRHNRQ0fY~+|o5bL5 z=qjN1)2L&<&_nBPlWg>mToGMKBsp~l(q-U zY=2c)Usa{~0?OpBnl=R?cgEL4o5ohAOK2FyU8GVZ#qfI+?5c~sioT1r57)BT|M4&^ zSlb@@-Gj=uDQG-ZNhRm4RN3-a(B|5hrCqTmb#I|_MF9U=!nN&S3b}MHDGFo511VW)|r2px2 z0sS+N^@ALSV+c|CThw|FW_GtT{#WXar3qK<@bUb!tf|8)FK zAbV!sCZN^A-;~!HSc!^utz;k*i*+ol5c|2pAUzEUXCs>QmoyUwq=??`ja`umPk5cr zbDon<_%#;I1adU02> zNfBv%Bea@|UC1!ke1Y`DYk1(a!^Wr$x~4E2N9`TfU}qlt=*RJ;gWH8iVjIF zlX76v8b{1q9sP-|8zzLQ8tQj8fx{@TFq#^V?U{Cd9RP;zpVSo0^34|%lVImLak_+2 z?eMLz5enCOXl+x6P}wK|XDEf-1NG`4(=u<=$}7b7+DIxYIrzIS;c^M&oH4-cxSJgg z`ysHmOOmBX{Jv$+76Y+dQlbPxU09-ZWn`U})l?T3PifTaHciXva~e8(lP@CflO}w~ z3~7wEngD}?h<9MX%9h(UQ=p3C(DVDReqD^iKA$7%_tC8DJ*qk;8uL|y%JofVq_zYw zvKRh?xe9rvlWxEA6QP6G1iyEVIc7g4Q&@6VTtUJ1T4 zxs6hE4IVX7DPt7!x)FZ?k^AZ(no&y;dd&?ZX{VFsQl6_2EMU>s{Ag*Yacbonwu==i z7Xuk&f%Pkw`N5cOx~g~*u}kFQGtIAXi27P^+l|xsCH1FOTw1Cgmt{O}NdKuT{s0dC zPO9HdOyx8uEJHPIU+203eKq;M4}4zK6fOGd%FYpsx!nkeY>`EEbZ4)lXwd7Ct+5SA~XzP(G#9b8@aMN_PvdaN`w zIHNz`W2XMa`p%^ND)tA?`Ypwf02cLWu~Z*uaMfdvevwhk+PD$5eR3d#@Rfq463>To zIts*2tv@2zyz1B1Qc=CWctsqaf_Q%NTsZSot;E`XLiyXn8Cis4(Oj|60xw!?zXC_} z!PHo7=XFo5Ual@g!?7{nA0J0%CLQq@t1@>GZMa)ecq3z#(5&L6tvX6#4Cle-o68i- zchhSsNGkB@LF+o0UJ7kz!T`6TFO;x0Ix9Fk?_MBMDj$H#p!r89{&mI|ujdVXQ>Mjg z=z9k`ijec3waS_+0{5SDC_wV=x2l9x{I=nrEVP(gT|USWbp}JAIL>yrR|Ltnm;x{d zMH#RAzuXtf;VL;;G63*~o~WJ)ytgt+g{tIbTsht`wJE$bj^{{O0+nP}2nsXUu^oDF()~*e+5{+JyS5Y^*+G% zCD^Xk>WrFBlfmeaYTRdjaW(VGS=nK-06t+6=|DbluC*ufHfo)g39tRd@oUjQ`=CZJ zhv#j1uf1H-vLGA7TOv|)Sq=JOK-}#oOYyAp$J5*CCGf>F@EzCr>LBIRllNxhhtbf# zhv$Euk%q7oF?d|+wO~=lxztDmSCF+*KqgoC?X&TAw$rEo=6OMJoz>W2-(2D;!5$wkR6OcvxS;z{V@z zd8>-zNgwPUbjagiVO#<}=`HjbgYc%Z}c2H*W}lw`x$Ghj47?ud`9tme>@8^U+yL^WgKIL5~O%CtFGv3|kZ zsS?qiGctm|M)q3#Gj2;^Wwx+?`lIHux$}>aNKBf>oBc`Fv8adnW+_rtqvSMXJJEeM z{ZVd2W@R$4TpxIOUPzV=^f<}DE8^rDnPMBZ4aRhxq=msiiKFu5FHDrc+?T6D5+*2+CC=lPJw<4%hYW+-J!AmE52 zH;r=DVlggt$ySi&3p}I9_1%Zo=lci6v&!qW?SH?c7=X+mfbCr1yX*cICxq4nOEa4df-Wj{buF7H4866EcM>kl5*;H+T?@eSrZa&= z?^vjo(w5sxV>eWv5t&WQL%H}5B>>ugg=}{)7<-CLx;fQ$e+fs)8vS$IA9h~bbRwa3 zP!}Bce{Vq_GtVnJOSa&k_{Gj0#q6da7Z^;)NZ2NeKz`ABtFT=|LFly@yN-Z=r@Q^i z|C zxSw*vn-5kjex-9@-C9r&AYqrHNpD<3I=&L-%A?R7sTurt@Rk=8ruz=pi#Cg4(h9Cd zH-Qt=y=WU%iKUMgTLdLHqk67x#eN8CK>LzxT%mPZ<2#$}u}a`SvSE{2*A~lF zs2$(eDMgfm=>}2}>OHRyj%Tv;=lx#O<_;?4e|$A;uD#{0XV1C}uM3pdi!NgoVork5 z%gK62*NEZHiFAPK`dm=~*uuVtk3eF?b(hq+e#qZqGM?CfN$O)YjJe z4rllpx6nd-FV12RG`y6#F98#XqmhPu#*+I`*O^5+U3T^~viyO1X;3y0PxiJTI41G* zY0deK*{Zi6KYSn7Xum_-f~qsF?%7X$-dqfW`FGP-z$77AO$b&4H7u`dd8RsbIk5l9 z3hG zZ%(o+qmQRG+v@f}^wn}a%)CVO`g`{)1DucP*vs-Cn33N-a`~gk3eU>?rvBKYAg_~8 zV*>qp20CZPT937jeN={CY%9+QzjhT^%09ZY73Zdv&Kt82m;DByu%4Gy#jq*?l-uKp?~ee4bFEpkYv8c4qoIQ8F)>WMMjLGO4Ea+#8vDlg z=pMqhhL0tDJ4?byIqPHKPgZM`Mos)d-DP;ea)tD?hfJ7w@fLJ-`6#%Pkrmo-sY6$iDMTWSJ${I+==nuGxQgtge!ReG4 zkofgravRzU+sI0E(o5%^5z^4V6HwZ;-Nw}gVp_6hpZMj~FK5@AXSwONn4CsODJ|OkJZM4m2o*jHm1Qbf@hN~(O4>kp=Y19__9RISj+SDuxbe;k!98L*T zG}4!3tv_h#zeyDd{*o%&mShzfxGhg8S;qeQUUO>i@`Zvx-Y{&QPoz#bL_IJ^8SOXD zmph?<3w}}^O&t_kznMFjhm&V0yiG4cIFRP-A$Gl~aFLw$D%wV=SMlBk)*lFZ*ozs7H}duv0(~s5#&+S`VwwpJEN6i~fWOLQTYJBr>9Y2TCd&RUMQK zz(^KDv8E@gYkUDX=^UEpw^V{1)!sdGDah}IBCbH|8`pb>uqqs(O2SXIPmPkV(cvCU z+LO4sn@r_ou!Q_gh}c>IRR$0>v+~Hp|~o0S&~@H#^%owUeVl7o^mD%YYT_&0GZ%s9oTZH);y;5ryXUyPA8v~LIQH=Nh2eJ_(ERwQ~T*HWFcw+1tg1@ zp=UiFD%5y}Kw2<6Mb)Bd#7XdR7ZDMiTPhD<`bk){YaT>M#ZHR46koS5QYES!>}^7Ab(%{7Yr_%|$9mA$s2J483f-Hg&bYK9?svwT;WMi*?-b!rcvQ zh7NSi5l(d^xiEbjCDfNvB-6Tj)!(+g^A;i$XI+o4pqcSX*7QkscIs?UEI3hF6N&pl z$JGiA@1NK|Bqk(X3f23P#(Fp^=(v{dtbOFu%(n3=!d@oeR zKyoQE`2J3^qN(N8q(CyuU%^%wDbZ{FNfwlG2aLbEHa<4fn(_9K?R9!4S4HdVJ7snU zZ{DP_+bwdH>z5QF8lt_6S+b^(sSAue`~<%eX$Q@H>^qvBZDD+V{Cm5;-5cfU>7x)2q-?`805A}3#$@uFe z)Ni*TrexDW6~1fbOiFp)Xo0nF^^9hkYSz*dOZNEkH^rqtczV{Q;-xx%eEz7TJQvq| z6%OEDroP6X_~}nedD;D*6Fp>Me6!guy7`NWCYNuDKP}UCGEP_~b}BlsljTHx1Vj+B z48jLgCyReaAkmw$)N;N$n_3GvCs!KvZ$7{Yk;V*|p$z1CsUTcE5{nc!Ag95SL^))= znvWl^pHqSUZvTVu$x=t>Bnlx@>=|G10(oMfyG_+trQ{pa|W!Tv$|+Lg!?1616;HP8Hv+Yr;!8yn%#2Xf1GX6i|;aVah88=!0axU4=>F z*!j*VNf!y&rLw8;NQsMt1w;G({nlsEz|cl&qCJs!H~%EzvGvBWC!xU+o_~mI@Ow)>*CpId2g>yG+A!d4{;{QebW{3e1yTNwM4Y zNq^}d>21L36vvA?R!~6-RXIel3*2|;nC7mgtz5!N0b1NpFtMeOSY1B+OR@!Y3N>yt zILhG)mG4crygVuZTx$a`Xwctb@S!IOx~^Lm!^_l@Dqi$f`bpD&L_DI+YWTHr)W{J} z(NLLe>12mQM3clX?vlXA0@vYNFf|J#wBzr{wzw>KZ{=IV?s7@S_k$1{_7wJNEFUQw zO$uR%Ie?KQrFpzGk%P}moOU7t!@B)}oUWHFRzGk(&ue2de`4Am5j~Aq8Q^z}`@sx9 zBY4S~v^^V2gY_WQWtAdL32nrMvssRezQFp3>yNz%zCU9x547Jxb$o>IZ)~k=fsJXW z&|c=j>{-DBCwjl(!bbfpVU+WXMn0c+f@i6JKC)O?YB{y%-Q5f0vuT&5Sc3FWmND+y8EjvBj!y?XJrt zduJ=p=Nb$iEW-HpflTZ|(psFB!Ya#&i@^(Mv8Xn}OKn;uDQYxjaZZY*8ut(VK1u+{ z@69tH#Ucm+0YI22_|2DAQ(G$ic(LJKKt(wfa8fz1-|uH4c2A`_6r7MQ%e_uk(iBUX z69zDVgAGV7>;1CD2NKaBY3qggK_KS8&J?lf5A@?!*`0nI61@(Hz_hiY z?FL3_{wyN*^K!PS4B?;Ga#2!pum^703hpf{4h@DYVi-^Mo24Hn8r;0VU7 z5stNU3k{+^lkP>pqR!VvSRLUzY;}z-2RuqcniyXg5CxAPs>spkZebO>PaZC!hw9iJ zh)s+Jxj2?HWH3OPN!96$Ir?9<-5mA32{bl{Cda*K?{>Ju2HuXa9=xba_#ON$Lko9T zDVw!F6-vUJd1Cqk<)VuYzYyTQxJ-u#G4M2+W4>heM9D<4aT%QN!8^uq4>0}uDX)kd z4@3O2IcjTrOGC;w-)3OmqYQ65b>aW~ZOCm;cZ(^}b;_Xw9%2;~2Bd)pkKUF?Z10e0 z80?)H{o~%+@WEsxw2e%_$hNL7+_o}R$V5<@F}Wn@u+sjk-)xoY2tp16v8DcB&Qlv zeYdbZrR)tX3X+=Iv;Lqurz3EE2Vmg+aRZE|)P?v581}NnNtnp?j@722c_1H#lqinS zOIC>2c;K0#mLPCQ*#6)L%EAP&`p$gMlpIrlMuH0Cw$N>VI=J^b7rf5*e1?k%rEJ#a zZe_0KhNNkzKu!E6K+ND9t5umo{CdQMupVk?eU1NqTyt6J}*uc?b^QQ{4FFe)~YmNOG_C zz)?NnhR6G@z5#NIIoJ@Cx*47u$u@hdj&n1$FJqHSawa)64n)N(orgpigw|_FY|#)G zHJq777eFAhb$^MY8MHtY(-<3^WUvr0&@><+A|hMg8E{%NLu92O4`CDMP;!30!nty^ zKb+14bj2?V!K5~3snJuh_-G6r#VDI#q24}S@-F0_9lZXimFRsE zAi2EY*k_O!_(L*Bd(M_(1jGvRI7YMBP9*1iCfJo|+m!NjCSd@xT#Ri(PhZt?zU;8P zS!&X%Z|)BBsS4i?^}l!~Y3MOZ{y$8e^LJfgyR~DxvC-IOV|yprv27cTtsT3u%_fa) z+eTwF*2($CdB^vTvHpSe!&=XC-}9Pt4pWIugtg6hr`cAX+?un#%iExvh|pFWpB|^_ za@q++A054$Q}lKJamV{Sf_2D~dS$Unb{J$jBe^kS#QbpY9En29(6oQN@^! zKYj9C6Zt)B>c&xS0cnFOQGtNcek32g6KPO|)GP}PJpbXG9Z4Xz{R??VyJS&<>J?kR3f84}i;c_hpiV;Ug3Ewq3k0CK#sD{2R;xwp`87x-ePzHbdJPIG{ zyY5RY_zpv8==>6&4IMHGo3B6}+)rjbiMODA=jnNk{j!gwV0CU-1no(*aWH~pp-{lE zz~BaqRk2QX!!oS`(Z0!47Vj{5&D*1Mj#Vfg(n&Dl6;=I!*zxiI!8}!B-_!H?>plk`5E{2-pxemD_TV-CG=hA@^ z7og^7;p$qO0*VjKsuJAP+Jil|eiCfIP0`~HAbg(R`5SgTMj`>qFRsSS zt;_ZVj%0yBS?mh}kx9E`zuH`3eqU}3Z-$4X1@>&5BRs|jZ|Y5zvfw9Ifo+W!==Opo zU}NoUJQ!Osw)`b*a7*TJ$8Wl!@oq!(Vyfvft)!7~4Qh5z0E9GVrL{(VZgs-qQgVb@ z>smF}xuR@l00*e?S)Wg^zo$1`vcfzV_l=)MAbhgITCjRb_u%7<_KOYi;<`@8&Sus_ zy_9peVfD1~4WFoX^S^Hl>kk3-qjTRjtX;M)lfn(iatJd3UL*kE(8_aig;n`9#&hZr zMM%Q1aMB#!Iy4RcHl|C!JNaOAle(iCP^#T@&GHlwH2ZF6%Kbaf9d!n1%4{wyv?h&x zjwSaz7(%pp;3dNP=v)%TksSB9%{RUl;d^F%- z>AjumtJjNV3fb`VRRsM^#GQ>3A^#w66-wlXFvt9Eg=r2VA@M(!l~t*e<+fa!1~xK` z&~q6MpcplefZ#yi9ytoJV2C=rvqFGaQh0u{q@A1#tyXLme;Y} z!9Cxl-H)EbvozNBj+o9Z);Vv}urSeCqvVUTH~p!P-sBu?ia=3N^OcdfX*I0xYtHhG z!ROJH*l&uWcg+4-(6Rj~q&SVIsTeu(=rT5$Vhgd6z27&5rWvIN1-D9u0jER9oPQgv zS}c9$JirAEuG!+cF1o&U`Zk=y_xpb*H8flsUZCR1L{*@ICX#g;FECsZo#{U;Kn^0^ zbR|t|)C-hRG;IhCNo69ld)Hw$3^|2bm@pJvAs-i$Ue6tgKf1y8JAcb7FHgBq^AE^{ zQT-V-4f9Cy)n_G5IWCWFC$Q}CIA@E+94&onfhB+XsJ zeDR}M;OK0&T|Vpb5YP1W^%IzLNuWy_lcAN_plJnYES%w1B~bfvccPE{$cFJL&aiw< z7r2CVv~5g|BNj1Mr42n(^)J-5NxXHaAN?QAJ|Vh#AV6f8CoFG9@3b-yRUtqRvtY?O zfA(*|lI{Ln3O$dwXKrOSS+#Gq_oZexR?rBZ+&;SK{h>bfhi&gl$@dxPp38Z z=$kp8P&j|Ocba0sii_-Ycd5|C$0+c)=hAw!guK!2f@&rB)qm6*JUsx5zFcHk41vm2 zMyA92&{#`hE1s^GF6Pq`{Jsyl@y7!;_y#O1ZED|yS$`OD*mDOS(J&%P|PLI;Yu|3%B z^+EflRNECNrZNp+S{TSKMwII?z`A}GBPyj|;ZMm>yHw!BZyShy=lenqXKcB4!-;Fx zE#psHFl;!WMWMfmDlJ+G`It$}_bEFC1~`)D$VqUi{3h@9Y6H-|H4qXEg}tiGu)O}RYO9rN(9bpNgk&uj$|^o+mlyz2 zxDs&i(84%5GLg4j=giaJ?xn<_i;$}r^+nrTA_OR9IC64XmzLKxwd_R8^awTxdy*N} zCosh*l-xN`y$|6 z`u^`5n6jvV){@mfi{c8)H!Y zqs{6MA(yk6%#ZGIklKGgO*zHNz`5K`={o<=Gd0j0kl$Ho-Dla!HhbluE)x*J)&Bfh zaUHgLKnut?J4f9>J@GEZ6B07KB_G;((2KFRC-Za-QirJNI=Z$yNOh#;j!Z3NlrfTr zxSA3S+g6|@BqH2x*7zBq)g7qV7_FbUw^<13Yx7CGHRf~TL4J@h00N_(MzaAnWcZy? zu8zY$Ow^$N#gOGN*J*wGXI7=R)icp20P^wNAHLe5H~8=w*bhk&C7hC*aoK5fwn|32 znY{9a`~{ak1aEziMh%G+ZUUvf_fSajc9`~o-E0X%;{T#`5FS1g_}u_%q%N*(vG=9z zuGQn1spWEecr}*Yb{~TVQdhR2gPfjrn%z|gAqFKL?SS6bc;uF>aLj|>pk4L>wNI#n zMCNAKzjR#L)9KVLs#Gp(8&Zds4rIm(BSWGig*v-TyENq>`Y5&X=N&9QjAEnf4vagk zW!AQqoC;c7IEodvD>`UJWo`te z1yNG08U)^wJk=;quWohd{=3OjWRE;gbk0FSZ~r@Wc5vjqq*FgL!%KFbD2~BPq=CdX zwV^?3C>EBrbI^g~m0mL@gA7U`hYOTOLWYa=#ZHBn+q2Vz4ISc0?ro{drB|{}@C4df zvVW0Tz-p*j$J<|}-ODed+Fk5#Jlz^t!65DMVXV>sWiF)2v$tncR@me#Mk+z(D$$u$ zhEdc3{iXsfA_dDs0xj`Wbyi%Dme%KR5K;?mEmahX3?pm zqV-JY8Q4c))U3`@f$6P(6xa>Hu704S{go-K8Bb!JzUZ=mr_F+tB&9-E#wWh^m%xQ5*#>GX`NSH>o|M?ww5bu$1HB^f?Q;po6UYc`)vI`Eu9leNU&3t--1e z?;cNLbhXoi=^-s=-@qn)`&OfZjAi3V*CX$OBG)LV`O840eb&gBRZ)sZz&Awa_x49* zr`6ie`U{z&_N;GQ$L80}%yxVJKOd=#RbFRwlTE{Bjy96Afkh-mewd{9;mEHaEYtZn zghy2KQd(|J7SO63wM)CZFLi-ww@lXee==}J*yg!+H=67V3EAj~NdttgJAyhK9{`~j z)j1@D;0fPLG^61V*E}3cpc8Q9&2inJ%h|KFDP&B(*8>e$ysY%;0Vdynl6Wl~6Vk0o zDU)*R2zVtPLs8KR)Q+^5nd_)w;e-N{DHzUe)Eb$H8zV9?u;;n#{_lA}=P=0A&hV^I zZoB6F882afXc2)CF*|iGs=)@Ms3qNYHKe0`Lk`gSzP;#!0}R4O``-RdkH>7))G7EMgWQ-f4p3@zDUw1kFCln687bi^{fJ6>*0 zYmG>xN7hi)!}3T-ue4JP27`0w5>O4%93?S`k=p6Ir!HADqhukuEBKUUO}T1nmG0tN!IW`b3xhXaWQIZ0UUkW@x4Jv zbiY~H{3OZQp&U7Ijjm3h;!)!3yF^G&F)p{(`*J@F{KFPgfZ1AXSb2Eh=-73n*CSfi z*B(lS$=|Pw;%^*d%+Kg!ir;hovWq>*(Nt z0SjuQnnABMvU8fBP*q)^Fqk?5N~KJNpq2-TG4IJBc-lbCz{#9)eSg= z7PYpaRW#F^wWMAEJNvi_44glr2s~^xbq#^#8%G~i@+oxmF_u#*czvYC#^VytI1hl* zVeU?Sfe|7oMW?U7-ThHpDD`qCjrk@neY(f6R^wm$=HZdRzYWssd}o7;v^uD z4i4JjT<=9MJKdj}O)xU|Jwm`;6cTV=A3Yijck-_fVF+^5L1^f0*GLe05|76C7>$8X z3o1Jpez*K}YHWEtxvQS?-gimp}FOtBY_IQ4uuZ$!t*O>v0t~-R6wh~J_6M)qF>?*-*)`9yJ zM!|%<6YUa8hlaTE6YjUxZ_iJ!%U$9A@2{V<5B^VFd;6?=PDog#GvY(EF7d@4R$*?0 z{~pnEV_0-@gZTjR`p=^6tweV}oo_DzppcY4=;*!Vfgrjo#!?_oDTx?>fpp9*Qm0!J zn#Mxf5>N$<>Fd|F<7<~g6rN2_Ng;5|I04|T+yDDJmpKl0FyyTSw5>YH{GLogCMqrx z5<8yt^zX_KTPt;EONYDO$1}i?Ntn8Vtbl>38ibSrp7U4Q`3Em8&z_3u&3B&+3~Q=#+E2$ z^%bQOd!DdSpszqQJIQFDB1=?&kONs6GgdDFrx zM=6gZyo@4}_Q7w}L_kHLB}u0-1ab1A#B{L1WT5U}&zLMx!^e*w|Kj57f4`yy2m3{A zXxr1-HLHk+s0=nYLGzS6l{6r6m<$tFHX?HZn8H950rN5((tu?<_ZypKp;7t=lCFEm zr)IF@T3h446Bt{5$1wJrt)bQLuw~2m>1RxN?Axp@x*#6HM^*t`Ll#`aZjX_+?>M!( zy?4vC`!CTX{kJdp1$klno*|q4QaqRUq`s5OTypZ}XuuR7Q$U)sdSrZtHmnVzji#1n zzpV{6_+%Q(spTqIs2B2Z+tSm7*!+J zzohlkW1^}Ob@HEm&M|SFbPseer&Zf{6`_ge)m9FxJpXQqG&&F>fw;r6C->n|nYh&uU zf>$x@6Ft2_ikU~m&_FNG4()x#K>7m)qflI$M9D8Kf(6`$5>C)v3Lz~(KwNqB^LHGr zc$^HI3uMCrLWNX$Ug;Pr5_~fQR9j;U_Laq|d%$0`VIj3VIts+Fu3NL#$)!?bi_kD| zH&s`BE7|H)Gi*gd+J#V)8xdCLAOk{&x$o@%4c(!goe*Da4TT_O+pnH{fFQ$f=E9HV z8XAR@jn7|zyWtT#ajWlg#=027PfVCr=lmsI-z6AdoPw>^V1`iZYTAYCHkN1h1&Y+3 zhEkgzL9eL(wXi1KZ@%h``svVv^{3wnLVA0J-0GWrwlWYMn4PrTjpLkB2phdtLHM&Q zgOJp5I0OSx-Hed<{$}#4fHIo<%GuGZz4-2vTo?kfe#ym*ayvDO>6-OJF zfP*}}EU}46Y-_b_7VaP~_p3>`uQ_^U%`1eY!-=F>GFX?Ig+Gf8o#TXrou#bm1tK*7 zgyonk7E@e3MUElHUzwU+$@`aGrQ67dflBo|TXjMLD%0aATpJ2YyF2#T@e*&-+s6ZB zYFc=~C~*4h?6|(cn|MDVydQnF4QixG=Ec;&gh>Uf*^RuM}Zsa&XF=OWjI-K4n)3X#0!r29{{ubLIBSP|+Z5$2_$P*N$&z0$OY zyjgQhE5cl+vWCnOosSgmR!8h?Cu5UJXeuj$(IJe+Ih&;^WXeTx5>}!FAH+OSCe2-p z0{6J%*%HzBcU*dQMBE~HcPTO7|NQ_>sd+A8B19uYrb;u#GvpbI zPvYxjzdc;NQT_6IK*%(Li%(R6!EC)S%3tr*ZH$zX@5y-9es8?(r>tTG};0`1*WMkZBMcdLupa;tIPp4SU}9h9ox!*z$&{=Lr`mN=H?lyY)+RgZa3*pUT8?v zgZNaLLdj)Bn$?jZN)$#F4eI z7S;88=0)=U@ejqD$ICM$9QJjjT~EC^1P=F_BsMm)ZYX8$Fk6|URt+a##WH}Ly6`&; zf*h3|jMts2fQeFukK|9vemFb~q+3?kll)CP6L&M>@{#Ku!?fUM?pncExoM%`jAH0- z%8HMUsW+CcHVRe0(^w7898;r7L!lViJZX&t(_EL57POlSI5G^g;lEN^MO8D|qZ)qd%0e#t@>2Kqo^pZ%w{fq&j)Cz z{BV!Uj`3x4BQJ1vM0OsZ8$x&Db3;#zLXVC9sV$0{y^?uhFlr)=-|WH12EORWOxeSS zUlX3YJ<)}ER{T}o_Aptf7pfFm5t#S{w3uN=3oH@6X{6+4rID8r`ms89{feHX@EQ3S zCw`XG)EQQW*b9b5>eLSIZ=s3|r!~Pobt0Wyy)OlmXDHXfj>19G)}nlLZ=(9lKaw}b z*uTP`&GYi70sFdq5zCT$odoTiIomG<(GHONOx3cM1l_1C?KS#yJ{gOs397`l7_%WK zHMA-JZ6D@$tI8s2`rZvZX0a8Fu{BX(bs>fgwS>P^xVSZvclHqnoq9p!$9EqnfD-RU zZX=I0=jC`Gev}%{6gFLqsJbanC5@E~R%xr_D)t;5gZNEnQ*=d`s}d0Bzs83<%byAK z=eE*YCOH5~`mK3U*|eQESgi$X97UaaXh)IZitB@=Qd^HPWUFS3JC1zT8$p`NJH>EF z90J{Z#JcH4a)e_n2l`j!KoP`VKJ(+$x|W~B9_|i?pJm~Ld13aJU=|i+gTz;V)I56D zs?&855dWoCCH(L>4E!-F8r6BB@FM=qpWIy;#5QPk80$MVy8Xsm9Piy3CciM*$EySa z^yqBB-tsZo>ksywQ;Bbhr_wB(5^FO?(x*+4D*Jhc2(o$!n=c3YaBn3O> zLIAid7oo}f`+EzeRyMbc0;1uNZVRXke4fbIoEyoL$gM7ZwXV?`vLs?5VtVRT)+-&oB z?VaBf5A%}micp$BCG?CMqIywX-{2Sw1n1zs4nsmUN4#E~PY{Yu_wN*i0|eMkVT=Oc zTgH~+!ssS5BPKv7Cw0IK26_zyR&-obWD(ve@g`8Sa&@40VPtWr8wa1=IWe)(>#kpn zq)AQ5xoeiD7GL!9D(Cl%9!L2lRPbD-d7{`qLGaix>jH=dk+PmF2_6|gevU3anxczr zQ>@j^4pDPK4Vi?jO%vR{!$TeUyic0=t2`ayB*_g{*Jy|NpCS)_m;M^GC!Ie-YI$=S z1(eVid`(698=DJm+xV2rNCx=%aW{Pee0@+i)yRwk_}KSz-!hA6ju>E^{dh-*jprmQ zGIRER3Z6cGfr}oj1ny|w=q)d=Pg9paIuZ!;x2r{3GI1L&RK`-_*?>z-6F>FGL6+sD z6!HCq%z{4fxu4TQ@zH71KO%6r2&G?!C=)PaN_dKH0-# z*L(D0=p_^ggn-jr78KlKka6_&>?xY-1)zS(PagHN*}ZLtjVHqx)LpqS-w!_m`dCPT;nmZ@C?XxuV&Q&wk*vpfyvpIFdtCl;gr_ zJ~FWC<3h<6c2!i+XG@T(X@Tv21Pn(sLF2fhf~X$JOIfKo=@~E#jM6Ek{qlaSOIkQ# z_xr|N6oyBLg(6NH^v^TX* zISQXgSNj18!AMVpt~z=h*XPkE?iSZ;IzoN3y{xA>FiTT4M(=$v)3E#K==AV9GPHte z;HUR|NwPU@S`&;@`AfFf9X>ofy*Ezr!9ia7|0Z#ZyNhj!REB5zC`b-ZB=u}-vImF8 zv|O{;v!_MXb^j3;2~ak}SU8dz^;K`Z89s>4Ka4L%`N3kLl@m^SM||GSQ5ln6hrX`Q z05s)w2$esh3m^TDV&M=eC;XP?%~3NEV)=0VWU8pi1bya)WEV{Du`@coi!4D@cR@c( zCfJx+9^{MU5mv)wuCJc6*NkJy*U4o-GlDUq?Jg$Zg9jnaabpOG#ebo#W{8)Q-Hr0rA^6l0{su>wN(}ov6wN_;Ct~_ewmny)XQ=sn= z)xvb(@$2j&xps4>qL%}zD;SL3chd(A(sq>c-5s|P8A=cc`4)t=2*|ZZN{%{tm>2i! zOa+ewq&Dw(VRtt&qE~z#ISDWR)&IaHui!*GFz+8FSk!g7$7fXc+O@tY<^hP|1_aal zAne=1x<>42(!Mw*C%} zj*EYWcI~Z4l#-r9YSs?Euxnk*vc(fnA%!C(oE`EDNLlrIuzqZ&;er;|rD=Jb ztMbhINIj%ADAnFE;kcxTuvYepF`pT*5WHvgJ*?ao_1~~?q(?H}`AhXZ6YUnRa2LHu z%{C!XwOgfaR4ovB({xXYErPsCAeRf{AOBht$blQ!DtkL)*935Anb0WgpKfzIrg}g{ zZAG1&Vlez^k9Ss?1=oo(C(y$)%8N*$5e8*ch?pWW*HbVVKZfquY;O)O($KCf2`p5# zQTpa=$(Hji5~`V0T;b{3CI0<-^^Y;9CUzM){KsF(f=wY73;v=BA#?;uJ zTR&1`lrq|yQ-efI%Sl?jOQeq>5g6Fn5-ThG#=#!T^4neEp!rhk8WeB9N=j{e{f9Ti znHZ}NdPE1V86&KQJ(R$qL+qTi?-_+S=Ed{f5Z2+JJF9g#l4N5jvDW0F`2^LJwe-rE z+%CDLRb!5!krjCNr%!iqrADo=YAe*@qg*V&Kc=vgcI}8+n|dveog1kXZ11U(WyKb7zs;Vebo#RJSIRd^dsm z8GJ!-+Kio=I(;K>eewil(SV73dMDd(h^V2BICe*x#~mgsspd0kNC*Vp5>m=FC)Wi^ zCPiiED)?Zuk?ly^vF_eXgTKD~fLLp@R@*NV3c=w3^$)juwA@^ET`#X_>Kxl9`M^rG z)^`T8!hXPR#K*ApO>9C%;6i66T?fz)Wv~P{y0JZw1>e9mdq*?Ems=A6B1pQwT<+Zu zGXLK2Zxh&rEhSxYn>7n#(pbb9ryL#qaW*HlV2($v70~t+54aUn+Bn`fw|*77bEQ0Z zZrCw#rxI6@tET=t6rjjlek1QEWwy;6fFhtgx4>v=NOrgxqsb$T>|5TfV0%j67}y)3 z;D+Ui3t|^{c8M=scFjZW&O((~1{HgzOpq}m>;*Bdw&HKEAS0xZ&I*d|k|97aW^VJ<3KdT~P3i9k1&__V0 zn6K|Ym}+oirErHS^mSOj5a{MMso(S}#W8F}PH}BR3{1J5G53SWO#}++ivVnxa_GpS z@6Ghp37U4&o!K?}EU=kE^X1NKKT)bPfl@o%lUD%7fm~fd*cjut!@B7+T;^taT8;Ug z(E_wH)|nRz-)UbV;K8dIhv35#EqGdG(`~l4K5Ym`E@C9&K3Xn*b3J|w8Vp(OePj=H z=$~nKd8~DsZ5pEqMgh9b?5R_OJ1l>rQG6+(p~y$lOgYKh(Xw_o>(a z@7_>YS6Y9I;ga~(q=m^Vw;?$_l=jvkh!=H zBF#r4^k3z#Dm&=-`yCDJgT|g;<%-z4@P#=REE?fH>H~%h0X@UE!nifw;mC>$gEgHi zkZ={c+H6Rgzo{4iL0W1*@jk=#7cpbbayGMqo_buUvBPM2G zz}lO7x-7GNc{J!s%%U6jCEJ$o(zr&`!9NH@S4?utN^6$(v4^#3Ut5|ZGr=9E;N5G4 zm2soV^wcd`gL!UPiEUaeMkV5$_(TO{>v`HEOPT%fCr=1(0kDGk6(8`asXqkV+`x@E zLS%=JE|NRJM$X&6jeJYZD{q8p1V0%BsDrp+pc)7}`-#23$X_zeytT^($uudsX&R?o1FK$D zD@cl$u%GPm%D&K}DSp>5#=J1P@hq*bqbX9|1ZB>_h+~1UAqx?;pF>2kM{efG(LrLX zmCi&3nmlkS{NNCp_TPq$I+}f5dkBSdkohKTS1`A#!YLfzmL#hSZSYsvR8LOfuu83<$DU3{uB-IH*E2S z?_vC+ukAq_Xme}*kwFYIOm|@cjCQs#aCM92;K+AX;pTFg6${=G2~!DkHZw!1U0B59 z2!FVTo=%`{;vEVk@50&cf}8FFTZXBfH74yXpvlvdv!dN1r?p?x3jVAKO5fdo1?;+S zw}wAAbDl>MZu5BuToKys@?cySqLHO&#jAYiLJbZfQ6Y%OEfk|yL2gwu3$M7d{?tAd|fW}w*gy->35{K=z!P`cg8C*8Qi*mHdyx`Dh zWKv%OsX|QZeQ@(Te9>DMsZFp9@#GVva(oH}7`Q|w5jGf^_Q1Hw8H%zO%Pj0WcUhmf%BDy(bLsEWEOnih2yrvMLmydOz(9rP>- z3$EB1xVJ*)cm^1yl074u(1ut#bHVO1%Lr_hN zGm=-QCx2kzn1<9-XLk(sj%Q~!CL1nBmwa@zWm;^-(veuQ6Cl;8H@w{S&+L42TXJoh zMNws<`FO?}%62#vW(k16PD5NsN_{$>!Y%%wLM$&mlf0#mXa$<~ z0u)B1GVc62T8Jfw1$y(@+f`0KWOflnlRD1LId$#++kNFVznkeCNJVY&s#N&LWBr=m zkfaAPV|Q;9`ecO^`;q2fvWZMZIIWe4zk7nlQz=!L^O(!(h061oEf#&%_W^C8yl*eZ zKfn2VJ#N=eYUPO95ikbUq zV(k^Th9cKSlE$oj#9dm5YoclOg0Ids7KNGWXm4@*9C>jBxD+F-m#%0@0dB&8IxLY1 zHdCE>%%x8*tGFDf7^AHCsGxDOy*2oPlHqm{rtAiHWqCEw3k5=Yha)y1ZYF)Cv1~WTt*u2%cja? z(IMbzajQz-ScveW(Wd+Bgh-JP?k$m(4Bj}**RrHx$V4IjcA^KppfPv-V`X4~nXyJy zF_ln^0Q43uanUi00&|pjqWb9dbW!w7vs$gi20yBUdm}1SoZG)POtG{>RGEvn&k4|&^y$_J&h;it{CZ50Qui3UP)rc}!9Z!=s!GWXnlOK=7 zfex~(Bg%zwd3kbXDZIW2jJ!jqx1J@(ZKNwZwdT4$U(GY`ah)DwTEB9|25zWb4{61y zFPN4DVRD%kAE@pKX%%YLrB~?P?V<%&hx7dLLzSZw&O1lsuaBq{Xen~C%29b1a7S?> znc^YEEylm6KnN~E=9qMM==h+5g?u$4;t!_i@T&{=kHOzv~HcJ|lRp26quPYikj97q30si)c4_Ez7L$P`Jq9}#3$ zF+Skh&Dfcfcmsb~UxEeIwgRP6og{W#>?5ohqm_>!k_D?P%4uG*_4cGzWm|_QbC`-Z zXKhk^su>Ck*{m>a7Yr=7uB8L9gXnMn+Kc6GH@f?pzDpcHp=VVTh&o@5Mj_=_Ul>t7 zY2gx(iIrs7kC~!s;fNUF5@dCKe1IZpV8o-mdPXpPA@s}n1d>h`_)aK)R1T;ujg8V@ zktyDQX~@*t8in9FM9DMCNl0S@CXZ`&)~F47#s-n0Vquan z#wvJDD8k12RcwgQHqDy)sYs&d6t%1u%kA~lGD_vjE9%r$mL#i#N_Qn6y>G~VYR`E|vk_ed`XVlp%_P3Y~k-=?nseo11R z%C9)+Zw-)qj`{~SH)FA^md!)Q3J~x?ZdI8?Zpdf!0J*{Z2 z3U_J^Z(MkF_+1hGLs7)NF7nOXb)2wf%U9YN?O8F=!b%`R=na7~11amrrpc+RgZ7Ml z2@CPKoc?Z5>aMoN*xL+&{;6{o;6^Bnn{J;LyX_Xmrpu>#OImK!IjwO606``3>>!6;Jw<`c)S9;tvusmOSkcW`2?7TMH&E zJi|LrV4H_1N5?pzGVmH*KjPqBvX@8qo699XUCnLb-|Avq#1wFU(^X+4ecKJOGc0<9 zEwBJ;GVQ`rl@aw=2S*qqG9h1iR_Q=42>%EuWp@{Otl;ndHQ}89_Q=9+(AXkC`%uAT zj1@mP&b~}U&uR4n9>}A0Q-EHz6cQ*%fzJ&*Tl*>SXC_jU1FH!{voH-!fQe{$ihRZd z2no$I)Gn zUgv(o$5nOoOpstzhH{^*tmO5;5%sGrDB`V=_GAC+9HEqHT$HHsE&6&>8|2w%XhNb3 zBLM+nE?G26Jbbq`fdKA6)4ROngp5iCiCl=$ebz(b0R#}0n0E5)2l=o>VUmkU1h=w@xz6^tkkaKT=S0S>?{Htt zR;;d?Gppdg(u_B)l`%6ES&Heo zk+JgA5a{4);{vEgEt`IqsipVur{A9sU)SR>TqJLvzeFZ0B;?Vfd*N`(ncNXY93eFo zz6xq@>%3|7B1Y$0a79#Av0!)IFL%twjG zO{-M2U5_}-xhTxJVrKNH@5?;R8}BqKayP^kTNzypgjsEJ04_`h4@Wn?&W!=*2oNcL z+8Hgt8ostp#Ee(uV38y-M6!1g&{AcWnbDa+@PDNnR^V}9b4nTc@BSx=;uV&oRDz}^m z(UzYSY{4%C=mg=xeuStnwvQ`X+C92$9wKite~qPO1*rzr@i zTVMNsdu>@oP>J)Ihf0fvTv<-s)q}qZ{F?XbB2S?~59op+Cs|u=bv=amI1zdl_}Xdq z$WoC@x8YK9>iU%g13lSII3WjqQ}QP;K8bN1!^KY%kZ_dlz_@e!Jiem)BV2NY z<$K(2nil8y5*k1^+%*5_cz_IeTAk%)s`CxTHz&QGZWk zNYA&P^Z09lH?_0j$3Ja4#Zar}&X&KEJ*^f7RR4t5sqeE^j#5RzAx`I#GK*5V_KW)M zMmX9vI#&d47Il?8k{Pdo@G~X{MxL&|j50|Ct}(&8tgfJfD-zsA?!#FCS z?gGvidibVm1^LIXcBIeod+0IVy#)H0UGSf6<~$!+<^QsjFc&;CMH@pf=f$drqIn{H zUrquc=#rLj?S@c=tUzR}jm;5VTZ{$Sfp_~<{YyF?lhado;71FOPG~Fm_4~d=6tIFI z0lA#Z=T~w1tjtAZLO;#jc!WC{r4|jIxTRBCm#2_VDj9MkV3}Va$))AZ9aF<_z~f@b#!r@ z7#1pbr>~f)Mo%Y@+DG|0fH91wC@}%;g$3&42D(JS)>xpt*`k7~G9?5n2oXiCq3dA5 zU?o)~-TG3y6UGi6NjsT6-60qOE}f!zC>`<-_~yJXF|sG+O82i?aywyB61;V9I32>& zb!)}I5W0|cJN=@0szAm9XEQ8H+di`eX>5kwNgf@)19w;l8ODl`@5QiQmZb=z!aIK_ z8|52(A7LGgXSSspo59dqKt~t=j`**59n)2K$$cr@c;a2pg<(g>fb<@F=DOvTJVQF} zpF12>MI;6585SzvE4W$t`Qx7-@SwE9!OQpaMJuG;@>$i(>p3y7TZ5bR3SB_iq&;ZO z$|m+Lp_7dkbzOf#)G3T1L@RMeTjO~YdBj5EzMS|AVu`2;QAmxkXgT$$Wg*~LR+Trg zz$hBJG-@yt^Wg?evcZD}3Hjs+ar%F=GECS*~Q5Y3mL(NX= zJ^9BXXRvtkuJC81W(L~dU9_7I*2H&;laDX(9VIwfw)5DjW4YApEucCv21_^S)hjjY z`C^lRo2(GdlH>Z?5Ea#DIK*Jp7LEP$aM0JDS(9=@1INLrTAU|ub=VTL`&#>fYsGiO z8$ur!$+D1aA;N`OqMa$jXEtPp7iSy5`%T5p_Y_aqja1ma!L@893Bv93cKQ=Py83P; z2rb--q;{({R7qF2KN}P(8avXOp$>`a;e3EOmX-EMf`4VJFbr#wftg0O2d4Uick`#( z`&}d+u40?{v7gSq!!L*0payHWzSdAm7P#!Z=V&(4Y@!?tD&geHX_sk?=sB7(x{9D6 zP-P4vCNxx^eiqw-wwiR!1|5rv(oOc!avB*VJTnVe6#OmK&j~cJP{?Ub0`u3!!=zjTt+fF0lP<#{A70jy`m#)Xb2Z zgoEGG3nnD>;YTr@v&F=Zei&Jug4&5GmScP_e#zH18RBn>yL5<@G!_79)sP*&K#U6I zQYtTkrZd-UZl;UzCba7_wzf?m(b(7 zrnb1ebz$Q4L@@C_sAg2v5V{^&50qm*`jZfa#v11es7&r+L?n-*5~8VsogsRtWJDci z_Ef5C)n$)LRP6mRq|1~^&bJStPch%wPsEP9(|EEUV-$&HW@IcX*Gz?HX@!_ZnkxI?43JmxX zRWumDt3}sZaZ?|xc$3Hkxl*SKR&94D`5pF;-m`FtV?m2-o|9-IliXa6<@J>}GBzTE zXoZ+`OUm_owd&QQwJuh77)Qt0Xx{lZn-&B-CX~sgk&KP?A?3(ZIMB=G<36WN?_e7KH5``2lnxmbh+yh1!{OqhO8LD=TM$gQYIomeG@57o8Y77bG)*VC2uY zZ!D|(-v@xp{%zNARc`j-oXM{=rBv|t3Lp<+3jA>_hBRoYC2u3+XbHctz;6b|bl$y? zt_ld%Ny>;a+TI<(!@%W^qD~Kw_?W}3EwT37Pc0cy6IK9Q9Gp-S(#Au0dX%Bc;ytRl zZ|4N=XaDe9ADwu0Txm^LjO#U=$M#o#(MIUjYPfdBux@AA_hOK^2|a~N7Ghp(F;u6U z_7g?BYDx9QOx_jrtE&z zG{NBe=Xf$A8z3%3%;41r!g2#L4}QrlcB{& z;1i6=m}0J0NTZ-}VjWs6Ej_Kxo9mn52~4flfu*qs=U}3&FPw*-feGZx5zwlHJ*kam z7guSPab~r;-w^tY`(J&97I5=~fry}H84_c-P>`Ph0fUOLfTlQxuj}ZFR5n%ixX7>n zq3RriGmEx$9ox1#>e#kz^NVfUwr$(C)v;}K?2dEtrYGH`!Z8LoPORpItfP-uRi5J{qwM{lOlmNHMD>DeSpUtK+xaX{k(%W z5nUPC`;4!JJwhqt&6j_XD^vE3tH2f2|3Mznv4#d!1@rzuVf8rYO6w%F5j5Rj=jKG$ z4(kQ(bxPX&b(#GaNZ!B+z4>WLjc&KVutUX_K=A|wol5YXdb3O#WC)`}8jv2dt`-Io z5)n@xxUn^wQt^m3T3dlE*52h7JQG3(Is#VvmJ6#rqXDhq-)38{s==Zgk5j^?oH5HjRkB9qvC%U$s-TUstGQ`_jZP^*oSRa7D}*9tszOP z3DG<*%tDa0&2APGrl`vJc1lyuDwZaye94+bx`(#P9Q7fL!6R?F6?)q}#P1{ip}4%B zz#H!??X^xP+;tAoIjDT*2;^z}jo^`_Cu!R#5}X`}T(1)tJ>OTdAFbAdhPI|K8id4Q zb9>ueb`jpY_C{7lEZL8{?~yR);G9q5!i?$z&eS2mgker{1fI`i1)1{{n#L_{-m^61 z>6mq^*DVXQSKJ5YB6j-e`1UF~Xxzt;54EWGjEzf!yDPNmhkIgSfE|EoUy2ptiHd#b zL^$iAOHSipaQ}f{Q32Ru5KSmPs1_=_Qt#?`%4kqRh4V*E5cN4gtjiuU?27n4{&^Eb zpDe#KTf#EvAjB2EKc2Uenz+mVJn82?d%&=)|4vx#;+-Zz(T~|QTe-{|vPAz!3&72e zb5CNpSw4qWtdNS03M#gI``z%5TsD>LG-b`9Zz#)%fQjT+*kmQnTSEaun5!MV!?H0lHOR51u8b@DSp zYqccnHY#iW6Q4$`E%BYU1`9Mdd^3Y&fnEl?1Rq7R%D1hL3-h%@4$Hla1*0mJe=K(a zQ6B;|B)@y&u+yhPs(PQq$kpmI)mn0`1CprGmt;eIhctS+f|BMW*)VmmpX?b z50lyQJz2Yc;YEZ*nT?M~=|FHp5g9Tp81baVSjB*)kXd0xt%E<=AhJJSAH=BpBDNv& zh21$I_MQ_-;*lTzAUrH?JLq?YpXubI81goH^6w3*ctDd zS|i|Uvf*L9HM>{Wbanxv!;5AV-IO=R*rk_{+V>5i4@&Q)Qn)z1w&*}1S}yS;5H`IY z1vA8+zqLISzkt*!)N_5T>E{CS1X%H9XhvX?bBd)m7`xHrBWpj~0gZF3zCU}x?!uu}n>kYVg|(@o_PiTk+` z@EDLFrdzv<6L6RmsP_LdqpNaYy96|orjrE!a^rE_$Rb2qmq<+{kH>o=^rbu2U-S&Q zbU&RV+Y(&fTG5}FWy3S&OxJ6uz(peo4x&GRDJm`0g|exSz>OR;a&6OzWr&WF#)&h; zOAWeIt!7(%{u6zW(0*TTZFogo)|&gK_c4Pulub5|;qHQpcXR~J$&MWS!n{p__T3DUMFMXr+$d<6$QKmXnhg6f;M)bI`;7xbi~0 z%g6zR2Bq*H0m+FCF2NiLl;j+MAd^{FA@f^VF<$9euRKF1`qDL7rK9?5Lj zArc%QEn)%Q;)Lwo(-EQ6r#%g~C!lyPzc?A%E~RAdn&fxQ#aPLAo2g3t$_|`g<~MJp zJnr1LifSJg0X*h}vpWzyz;IK*;}>n<2HP>AIX>2b%SK6@lFd-N49oTJoDsrMyYw2v zqu~*2QR(~D91$Ge!VEkkVNyk{lBTCN*ohmS!^}$-8ayIHmnYZyVg2l3MJ62U2LvV< z2w&c%sv5fdo*KjEt;9MXJbr&a#L@(;OGtLSpnxnuUM!}Vmfe)(OvBYf%y;)HzyG|Q zcKe07GZ5GJPq_QLNq=vC2-4m04eqH>g#Ti^Wg@ba6os8F`$1!?j*8Enu9315B&t)S zkAD#NAAbmU-PXv}s{ZHzwHw2lp12J!DrUC%)kx)h8hGT??-g0Xx~tuxeL&j#VxIXQRI2nI7MhzI7kv(IbCK;DQt&DK(JDlJv?+pkNztAmw!)eft}vqA#3IO?746yKe)gnoR<$ekGKW^ZHD=96m;(fpEzAs-6Ra!{vo>BIRC zNE5KFewI*C0!l~)$V~Ls)U$(Bk~T%EDvR>!=$Kc#|K~>kE@2E9h{~)FP~rw(r>>~+ zw+i60^*F{1z+P3MVpE+I69lxlDeUD}#4iy|kj7&=tdlMisR4twFcQVhO1i`<_Ko2> zOcl(s+U*?puCNEB2M|F=!lU{}KM%TkxNQOvKz*%mFpmB;knXn$MQaj`pdjQM-1y>K z3EdpG?F-TK3*G@h^1scf-(9}&s@(7oY!AU(yD2-@2%F%o5;+}rqRvq+0`w5XRZ<|; z(cZW~x*Ph;hq5HlX;q;e#w|$#_Kc5=@6i?;``r}*dYm+8oDn10GyzmvbG7@=)9QB{ zQ+Y!xX9%TcoR#33o(lO7;I99uz5h<6EHU>Db9A~_1hB;OCz9|zrx{RR9h1h!`?>XGmjkuom2Z|F|4&uPFS~I!7K0JI7KA!E=2V$oexDUsN6z!^s+ftx4|w|;FT zrlX_ZlOa5uEHHy3;Nc!MR;-U4hItPNK*5rWo1$Wvakbz%oop|d=G!Q4x9}gbcYRO3aMMA>#qITsUm+6OPE|9^Snx$Ry7x(F?u5(haps z`6_hn|1j+^a%Ym|boS&lJ|u&L=a36Aot(@_vYZ1D)hb&Y*m%nX%Y*;~f2l>a5QYr9 zMS` z=gdJd)Kw+iX8_QydeL?ZN%0SNP2kz!)g!{#w;nzjy)ck!w?;bI z+_LiW^-Hi$dFm4$FSU1sXrsVBRRf(AerFCGE<0O8YfEUC z-PTaknEc%i{mxL>O>dkTV&1`eQ(3xRw4#iej_R}%@2SFX2gBjS=)UsCwk#u~=FpZH z)}PyZ=xZL>7H@laXMnG98W^KuzXf93EMv7W(n!4@6FgBf=E^!a?WdqIZx4!Ip=IHm z+tyYs=PAvuUi1bbOU--_Vs$uTbMx15E=<+y^o2Q{;)Jd`2~59jZj}Z|cNl3jkli%7 z8DaX}ihqOH`93GT%*Xlq-}v2lz3jO4g$eEqO||KbZVzR(dtD&d;u9Fy*&+VelB|mG ztaO*V(!_0RjhQ&rgf{HU1`;{AN;`Qd%bmlj4S(RgTr)^Fx8i`<;kw zZ4puu9R!f4p!Im9yKmBP-7k%@kb^)?cQ zuwd>ve&&aNe2O5DC<>KWmgf~vqkTwsT@fU+q;W;!N^E;>&)pGQ&AAc06N`aS`SRhs zI@iicM(JA(xYRFqsw!f^`h~y~3XLZdijUOut+FvpykJ>^xuGXs=UB1vCStY~_Pd~) z4=g&*kC!lzJTQL%y@`oDx~hwytImPhjqsj1P;j(AZ8(uoeOQ9imA7)v(Ya~DY^tbg z2(H(=`_LugBniNoo~y7S||H_^72#t$zw?O1i0_90msXCZ|Si zYGLGH5H94)62*?;JL+VmM8|u^y1DA#NR&|v03%{lJ+_^f1~+C3Lp5DMbkcCrYsA>c zBNp4uUeZ6r`ri2wMBgtyg1p>>-CrY%k#bMv zP+*L;4m>&sIDK7#H%CGgOI1~VR&kaB>jd0&HQ-%b+x>M0!Ij->+&aRLO-xZD9-zoU zP6q=5I0tLNB+8s&Oi(dS6ZGAU1!}JpNwW@}1mT=miHT~(Ouix^vLHM4{QYpCz6qxL z?+h>v@_<~2DKG`hjdP^QJLDPCkWiScz5apy|mX_Yl z9#(g1FOrHLJ0?1-4StMEG8V#M+OD5r%s4wW50Z;h(;1#lgz~U{$H(YloexN$AY7S7X5G~@6G)XUMG`Gtb5MhEcC7nn5w3?+ye-RM zZL{Cp43%BKGvu#(=x^O$^78f$jGCI(C$Pk~s;oj^=RJamJqCoIPrxf~ zYz$ji1~yjspNe@OWiMY`tO%$|Tfx+wm5Q^l?T#~(O+)ht*LFLDwsxS{HIjHC?pFK4 zK>hQRo{CdUL1K<*P(>WnS@nE2zNLp|h0**%BSz%&Emd~Bf2?+7AvJUNC zVL4YWczCu(o*uEA?6J3F-v@{dZM`A2S9hM_K*DY1=^q9D*5ujUNX}wWw&COXMly@h zV^sy{VGmq-DCzF3y2NMNQ)eY9uIb{-fvT0|!u~zwo+17IwVP)R*whr^*_+ok=Pou& z7zz`4TB~L$)zjS=tHFP=LW_~~CtjSS6D$K+(~@`?3FNDuakn*=FL`It+ZMpSpOftN z{QSOoKi2<3BMK^6jd`WNpMfNv)`;h>^Ywou>d^xuz7_yL)Ytw-A^I90p|@t5?rfT9ofFl^9Tmvm zb})NA_G3V<^Sf9FGt;Z}J2wV)X0W^8*XnlsVZIt>r_T^e(g>5&Gv5|QjBR_AR7FmiJhHML;@N! zPJ0xsN$G&2`}>|=Ok8fB(5pd1%0Kjjg(_(6XpNBX`NFWh#?McmVZ?z~RCH;q!qSKZ zKx&e|Xlzz3$4$3~d}YZefzS+82P4+jVsl6^1&uu7N$v~`s6l%81|#NX9}FZh65#6t zVU4HLQT?%j&+@;km`#8|TAmH$ zg90DcKy1Q}lcy5uRXy8uAK)17b$A?QPA}3o<#;N>svab?zqr^9dMg-Umm8>LGY-FN z*8U9^vW<%~I0dgPW7bf9&`A*L73!<+iAI*@@qOJB{%{+d*lhaOm>JV#vVfPg4rhjm zxwgJOmR$`;dCBK~pzieILHrCN=wOh^pcV(XLGfn;E&v_Cq#=PzjWDeJVxFeW_sN5H z!=IUjGQw5DAMbCOPxNcT(sQ+n=8LMjv4oMu#LOurtW zCYy^TYYn}L&}ka6z-!? zB<#k9Y@v9HD!O=aAdu=qH)c!Xn9kr4EqJ31UniS(u0=N+bIh@h+3;|{Re6U+pv`wK z6Aeqw-v~I|M#qU}Q-T&XVR{z_U72G#B2ZA}Y0{W|yUMY`@kc~2qsCDd@SHl9Q?7ug z9e1k~Iw8~H7sZPTsG6%IqZ(>hN%-*J@N8Iy3{dbu;m6blM?T(P`z2G$N^u0P)FNAG zkr|2BKGk*HH4LY%Utxy2Csz3y{10IPTVl^s}Ioj@>;{S2GCwHDoB9i z{@yk^V^I1H6h3UsK&B+a<8M9hJ>f-LYGK;0L9y7#3k}X0QHzL7TP&kYin7{0RoQb3 zjG~I5?Dy{;&uR$RmJt77bcgM&6%s81$)x1;VI)JhJ6H48%S?@`1^TgUR_4>w(=~A4 zo|F_Dxai(g>K-SUDOikRxtvK0w4K)>@;qWCyqT`iTyL?>-Tv2w{fQIttEk#TME(&s zZa+BCN*7kN-BRE(B;mBc#T&*Q+KO!DOcDuM58bp>z}@DY`&U^R_Lv38bbxj!LD?~wh&G2dkUU4q?}xn6X3{_CHty(_Udd;0pdMm3x0CVmTRA}tfJ zH$kGrEQ@uqRME!@wu&(KpYD9CR@I~J?ft`fAlopSskeMYCS;<6=R?7;(oCP4Xc&gD z%Nn8~0@wM2?g^@BwMdxi%yeqQBmZAhUD?vweik>C60%RswocYNe7glIb{K`DCEk)s z$J%QVo!WkrRIJ|YPBtJGI!c$28a0CfFe@LGNY$SdO??H>JIcB_BxmvMZw+2&$w-4%x~C9G0*MkbG5wEJdvZkm4FTfwg-7&OWXDoOExn zBO)5n!poaASm2|h$Ja5jcYkEs`4~QRI6bKvzr?ZL9(!p#vb?Z8vG;iW*+;tA|JUn` z$sXrfK3P^?#YmV1P1M?U?q-)M+qfbn&ae0h zZsg^Ko9A`W)b8dS2HFpuU2rE@kX(VHPwr?PdN_^@ER8uJybAKZHdAaeF}R6MVdN@U zJY|iL1(R+@6?J6`iR*^w(B9EMWQOT}GtW6gH?ud97zITzxlaB_C&_7@qP6gAo{f|>;bGYTi>C!2Vfte(CZ1o&AM2_a#p}J(#gdrKLJzHL^#z; zCV+L!A?l)5l1B;{h@ep$$5ma|K+u*u+NiEU!ZJREs8BOXlzsmLCW@v-yrWoKt1nyE zt5NDZT{`)l=(D_F^#N)#2;z@(_1C%Z&_PqJ8@n_bf-4~^O8OBnkBGk1kgq`xQS}NU zJL)QZeCzh=OuN3hU{`XU%fv7a>cf;}#4pQ`C#Q%qZcPi}0g0;5+%N)@J3NeZAYeG` zH>R+q@VQlqr$<$f29jDguLjkZt%Lc%Eacw&zy<+XMjjDKZZ?=rO>bL+l}VP{OQrbZ zK%SwH?-SBA0cPuzwi`q+w(7zIL8kK}ys)LxFbI{CJaCR9;pU!(}YOL3Uh=E2u|3F;oUn2!nzN zUUh-fD$Tr3R%7)9o0F4v?i5zjZ|4cnrcB4PD}unMmI^2vQ?9jigH<&xR+|Y*Vo9$j zhd!5U2u-9~Np~vnKrg(n(m>B3N4IRuAXS)HDP&oKgRNVq3%EqvjDhR1#q>xu(vcYs z9k2W(YxeJFWs{df7aSd5Gm#$Gie#)#6lDb+EO=9MEIxbcViO`? zbaOiH#kmlxmAXbsB@J~((jO_{Fyl43VE!%=jF+$wA|FpCLPwfTV>-TomqRL^>%2x4 z?km@6EWU6^1BRG|ATBZws}##Oo;n4~(-lcAy4E&MeDt|(xHjdMm(Cp9jR3t{e;Q41c2twcDl6%7SW zep(6BDw_ND64X1Z3LRad;ejXVkHSig?Wa2yI2lo-cOsn3;Zd7 z{^?6V1j7IO;~Kxap&|79Ys}6lRH-I4mjcCi86>5|uPw^R_wNN;YAf+3-I@V8j@42{ zz^v>$*7Mt$KohNoN5Y*C{hAo@$t&X&9&{920J1NpbrD7sXBEBa6B*qWbR)3FK(=MB z*a~!H6sf5m*psM&CYF`HHG0X#WxNe?P-jd0SS!p?Z6YT1g5k27&Hu*N@0Ld{_u=Ng zRFHPw+)dulJ}|4G+x9(B`=jXxRfXK)Ifv@4yT2wvf?3p&(NVC?_~ z)wbXOFlcN%IyLRLl`$wa5!yKh5;EZ73Q-pz+$N+K{Bo{d40Q2P*s)B(a6`4jg8?|# z-q4zDmm?2MamYG=-zm_0SKIAT%)h&t+1*jdAB^-(pI78kiUux)cMb0w`TEu^eA zIBK*hpM5-&W~i|-@UMO;8y>nc6;0k_n784_xxdD1JKYhpy|3Kve^;zD2(OxK=8rw5 zRnv@{RI(VmGY)Eg6b4Op`1_-R2siTanv*$a_93NYQ;_tUm<0&hvT0zECO6U={yhodXy+7IeZf?;GvjjSKVU$*JJk^Szb`-X$W}ivO=oT7PClX>y<@jg=#sHP+m4ua@s#M<$dw!^**6&bpx`Zs`CsPvmDYX*Ht7V zYE;@_0t`lCfej;2fHa6{gRzp@(~z94u>R~R9v*YzLFgx2mk0%`Fm{nHQd~N}t@S$j zb9-^SJNkyY?RbSB{`Sc9y0#PdnMhUOj0>vFg_y-qB>BrReGYz4C8$Tsa3*K(UKRD; z_H6IWFgzMxxdTH(mkZ#d*u8^`>JXI#Qc5f&A_;vqBUj}V}g3PWOudA9aqsP%v$AQmZ zy+tG>x{P9@Jw*;Wq&JbEcpFot=j7apA3bIS?0M1Iz+_kwU|gn93*MKOfCtG(NOHIT z&K9z<+JqyS1L*0B9r}?y{!%izHn4KS#r@IBq&2l@>l*UlA3h$Sr5#G^%+ND(W&tkA z(=$1(`NTJ_h%Q|BDD4I!mr+lxmTFV{iq3gj&Cx*OzKv1I!6P2!Y(lpzTj+h-Dgr-< zorIahq{t+OEoIE5LH=fR`#bI!|*&9`O=tFjHzH!iCTN0k?2$fh`b@kR)ZYdP{N=OQMWJ)jVS*oI!*t zXXH{e*yU;)@b77FNdwuHwy`iwEm93Q za11{#g`nW!3Cm=8fP2Va5P8(Oqh1JP7et(l$-7-1a~!RoNWjy`uNh0I^x*;Khq5W@ zE+xK&C^(#Xs_8sgLzv2st8Y588OK)HybUc5{#P}d?LHU81iWuRN$c37QD&6e8rG!l z`}zhvc}{%7spOVTL;&CbRAW^In$mQ1^iy$((cqsUPSj_Ok9Ez};d0te|NM-!#+0i+ z*U)42sCNQ@Pu|@q(m8s51JACzAxhro9LP%8v!={?Oep1uQZi?5|C|IOuW$rw)yavK z9yrLc7e35w%9IUx4hbcc7AWiR0X2Y}A~-}n&!H?|l#_n#KCp6-eP!h?WY&k^ru+S8 z&K~9V>Q?ghyU7F#ne&`=dJk%%ts=1=p^`9aAU|%p27jyL6XhCjTeyySFWw}P6=^^i z1`THR!Z@@>;(RY`nn4nUdYU$Fo(lwD4jQgZO-*)|rJodASIvIQ>eFj^~%d z*{POC7Po~KS4YRuR6eoZ2{X}y5Rw{xyF zc2sH%!NOf?miaG^KAb|uOj&h26xs0CpJvEOyebZ#v+(Bn>16j5Fk=x2a;)z^~ z5Om7?<4>LXR*#JejV6qKrNWw10&)rYrzIx(#ntZ1sM%B?yijyhy4<_;`!v}|NN{Hn zTzk1VhmL$1TzwQnN1EP;9?(29r6ahr<}2!0S)=y2p^vv>hXtqZgbhyzQRQxqr)Yek zB`5WB6v18_b(52l=(TFK_orpq(38OFUXS;DeSFi$LYWj(=RHIp(38DfYiMk$`wa|( zujPNtH;o`*3&{OUG8jrO0s_0cwreThEIk&3PwlZU3=DKDZrM`G;NO(Lm~nMg^cYLq%Ni4N@H#i?3jDd3>XX4;1cNxqIzA zW`Vn!an(P0L)COOSUK)l2QcVgF>G=zOpaXj?(*Lpk2)&q1E4v5(O|Cc`Bf^%#^rPH zd)cfo{;S0+h1pRk!bt$X=FqH9A3WQwel4awbd)6i?16ZxeUKi49&x!NKuHL&p&uBF zRkVW2!DDx5Cki2~?1r(0jtq=Mg&!9$|H$10BZ3`Xy%J`Cv0i3BDDk=yUAY!55|r`l z{q4RHT{K`}a-#UUrn}CFyGH9eq+*LYsuJm7Y}x%g=--{0-?Q1vN?Tt@+I4r}_NT7m zY`GL^b(bv7^PM_*u9fKvAm(K7)nbK+@q-7Y?p_1onN5hUZM8$zyw9Q?ZYK6lBFWTb zQLFhOWPFmeMJ4G%aTF^?x8bzP15Q;H*!*Ow^7DkPC|Uq$@-Klf43L&Io=x?BAHm~o z1;y+w|M#UX+9#aas$>NUzRmk;$tM8s)$wT~u42!;ifO~o<_1P~-1-Qw00(#ccEl1N zREc}@gmpvTrZ5n;grlG5u-^8KcYTFxW}Dg&Vs!ljP1!5Dn0`cD28kRZ$F|l>t6@hf zrTWKfV3vfE0Yp>KI2Z}Xb%F|}b2N%7s%ltNdfuB=!bjAw`ahz=nhpLIF%E5`*8`g- zNtPHhXX&ioX42%{jnaGiH3f6i)0af{4nfyj$v3?>|m1_RGDge;3r@#)AszYSIeX|nA!`mK^ z_x*C}{yrln;P;CB+b*J*rgJ4e30+A=Hb*S)6bf+~Ot z#LIYA1UEi}5{fb{h=z;mlw_P(wTz1k>(6|2s@e!gnD;8(hAqo-Hb50lJzMB9lahz0 zA|O`wg_AtN*YOFDk9qCPY^_-sulAn;0q65nPxg!hXNDo0etvmzWez)o3ghmDp%2UO zTjrR#LSdGc8fgibZ_GsAmJuJbHEU}*FNO-d1-|-Pnsrl(`HH|`UW?M4P$0YZ*IuMH zM~x{(YZGV?9Wr(|GHMI7CPIgnfbbz1+LFYPds5JRW4u=AYDr|Xtln_{wS5T0W$C(a zYcK0Ml==5(B@A(7qfGS~+g0QWo4|+8PQ+1u|K`i9FgO=~*|QLXk~uxHQIhmgc?MVv zn_26fbDUG{OfImaBM!``xQoCIwqS!07SfftGt=#3_@1Ihz*l zT~9#O3_f)dT!somhYnCElNgudB{T!I86`AlLOvuxTJ&qua;(-WX~g&&73iM_xS}Un ztj3UnY)tSc5v0=t7yKD5EQzm<`L*bxXLtD1D5`&T5ZO>3fCF%dVkkVQ5$vWr%;Qb0)=o;a!|1Or(1AbiD z5t)j&(In|ZuGZXlPb&QHt8|#@VxbAM>LT!yx4nJV@F(^%8 zlbcX_nuD&MXt%aD24d#PSDIKAT~`XVT5z8AnJ3a26b1WxRV>>iEXfH9MkwGl=WB%{ zjB61?nD#3=uYVo%r+u($|G8c6`}l!ei0=;Fc%Qi4u2Ad@;~;w??Xnc@b;Ycs7DNz4 z3@mLFQBh~>>l~~Hp+<)$?-Q$@6!QmQUX=2*+kMf@Bq&2iE(l!nYJLh z7MQs;YO=#FaF8S(^RWx?oow#Uo`hc64;T?u&?^^Jp+vEyrUUAZ?{44FuWJ3TEaiI% z=a3al4+jBQcnXA)B~0vykS!kgcar6!`DbxKv=S1QV)iHw5lE4gQokiqqZJYl{1@yj z1vhA@_Ig}f=v9IlCqo@3)<&+%JraY^z(h0Di4@0_>PhYu-Z0aMc`vya>*C<@ijIX& zmk{-$Wk|AREzz$MQ1vi!Y&X!dQRs$hGy{z2#NAgJpGuh89WO z8>6MD-~Gl&U>U^q5i`>8HRI|4N}8jV13874`T^EPM#NU7TFZ4etd`Gsc-9zJgd6hs z{89!x96yYfls-iw+5io7U|sUDQFBN;A47b0yPYxa8$bUsiZLvC3JyMx@TtGQcicZ+ zoVk7Ax`ZFOL9K193w8;IW55D*!MMf)`4!*6YN*uc-D^3TBWxFz*@B{4z`>YXgsRjB zj;Me@V8pv1BqY#y$~EZdS5Q+na^n*R!0hutnlofFNTe;D6QkLAAgwMKOobbyph1tP zV{9c*S_{j*=rl|9Xz?OF2L`T|+*EDS>nY3Qz?npj;lBBR#=NV&CsRz}9Noibc_BxX z*&yViqm||oPu)Yg6QTJX88g>i2uiy4l!UcP9}LI*b|eBewf zXUJgkV02H{mj`Fxs*Muo)X@P$8taNv;iegA(bnhl0FN|V=4|y?RF-4`_D_DS_xw=C z1#~(>AKO!CWFEZ)3kF5fG^^&e19?wha}_8UY9W!#W&Q5G98&j!WTF=J-H)|jY8^(h3<5*Yy@ zWaT@6-GfYXv=wY@zDZaMRH=$aC_Pw}z(|0VfXB=imt0pS&830m!i>vQ_E@KrY@Y+Z zn!?upA~J00h_mvaG-NHO-Z-hAD{3h2IJbkanP|6XP7vV&s}j>iQ-x=qyC#|XPm30* zt(+ZUR}MFT>xPXkzoOCO_XPaiycw{I*@M9MuO={KWlFPTomfpWb0*b0`8mIeR0e8LI8|Fp7J@f{EuVBTZAGH>h zSjQBJFBbX2i7J*#pOYl|6mkuyNE(~F@pvwNFy_WO;%seOb#QwKZ?ejbjl0{qgLMD5 z{{AD02Seo+%<(e*g!@#)^e`jB*3&}kZ?!fcSavuqEL5stV=W5=;??T|8w~4zE)-(a z?f-+n@<5E>c6|V_BFqYMaemM_@!%C#3p%|3ilzvU{4*{MX`%%S7AGf#9B#J~8Lpn} z5vzkaqB3X&@#gF85ZU)N?{+&)=nX3`m*>dXf6D*B3CE2>Dp$F#G66(D|5&8;wyc%e zkBx2;bMrloc)Pcac=wlmH;|quju(W3*DP?(hbO_``qyMZb5i~N{r;i&%f@P7K;PTN zP5vt3Ufg#P|G+8VHzyya@pf9K4$V@v)$dLF5?#fp{AI$c)8^dM&j)&A!&z44=G9oM zG!kfw?JXM*+O_jCg>mHXYviT@cZ7e2r(<;{6MUAjW~Z}r+Q?Xa%P~&)Dzxr)M)2QP znEXL?J^z5d7mLKRx6{T;Ws-L_8?%*PkZWY=bjn)m3GYw}dP(9@ymE;7^rHd4;|Uu_ zVb58Dg{rC96y+WWEb7Gm0eR9Spqe!)^JscQIpdE@GoBu>+sDdbL_}tV zha{p6(4AMl70T*hkx>Pr82!$Ehv9D^q4|H^{GVY%MFdC@c!Hl{HT4)NS2p9kvW&sTBaB_M)qr7_uW)D`s z-Uc`T6gq(80lVAf?4R;u82-*9q8P89%A}B@|K&(`|Hv`csTrz{UTj0U?jS9f!~!sr z64x4mo55AN6myC!v3kC=2ji=1(kHS_;?!%P&*XzIVva9nPInNgbOYbo;0Z&(x};$n zBvEPQk3Fdb?g>v)Jsah`ki?Uro}oA6Skc38j3gzXqe+JPL@g2+InAE=zn)vYi$hs? z-9PmtLH@JT(lR#93FSqGHOW~McLVt?Igk(?-A++WxRpdyxmb9X&M%i-x?uOI*K*<3 zu0-(eX9_P^`0Hg6ctz8A-XAwh34hulHw{9YeblD>yi)@fZ1D#hB1u}m# z2c3($%|kAg%`Tuz1}IbTm*3zF4!!=&iINHr5LEk>FQnt6F^O0olXSg$BO!6DNf^e; zBD<0%f*BYx#_mtAPlf@4afq`B=Lw^fBw*|8J6k#-(%2x*pY4&Lr`*OZgT>*`O(B`S z;Wkj3^6RN$e?tTVfPw7PBSe$0z-d*^vqm>+bS1rPAjGT!?|Nxf8EWdRa*aP8=>7BM zoErQRZ#$sf^XNq)Mg~6QICSln%zA~e>jg5g820x?!C3^@7<%; zdPG+)9^SX^a}liZ7DqUPB4UY-qQ*au7}2Zxa;a7e=7{Gg_zl;6(Kck{gn zExRbwdS*-k!d>{(hZ+Ww0x+a*me*FT6H_?@S}VsyOO=k77qb7jNJyfmo28W{&Jil~ zEhA+U&)*YIqO3cVbV@+)r^jfhZ*a-G~X8Wib|N3zp9~}zl?0M;|ODpw}J|WoZgdd zNUb$sy9j@4qc`H}8bcevsY|%FV%!W?s1LrNh55y*@&2%PoYMF1n(uW%{P?AED=4^= zAv=Hbk1#P<*IB@$?70(!=3N0ezJgbU?A^Um{ZRVg})54$?!6vc39=fsQ^kT!DYLSXs7Xlt} z`8Y{5ne`|8c+9_a!f@HZ7 zLi4#lej*S)+8((?D|;v_Z^Ui{*5ohAHj$_kjg%*$8fS|f?2Je!S_mA6+5j!ZnZu8W zLR!FYqQ{x)GsiUn32@+bXfSSROa9mV-*ms~S-z`;{OuoonQ%V1fau)Hux^}xs>3gA z|1hh!LJtM2pryn@?UcjwLp`)aOBdiY6MW}RUX>{D&99^L1~_nYaE;_u>5^wG4?QU) zrV5sOH12nwt=^WnnrV1ANZv;=AB44Hcn0NuXpHH-#{ zqN*yGsCy1~weQ`p_kcUFy8bT?)YiTIpZ`4b{xMBz{Xq+fpqCdrGi;$ELsm|g#{&S zbC}+kY$1n|bo4z#D_y@nQ%NL)za?q~b@72^mUzC~aY~l7OT7aLATFk-IAGK8m=Y|>!VxyPiv^S!?XRn_UuL+s?aHn?7*9oV$TEu- zU$Qw>BM?nzHdKsz2mKcbnij{nDxD#tx;TP$f|L;oRCz+GE*w$~7o=}O`xwd#A&pNd z@8m?AJBtW!9$$%)O3XydYOLGFbO7~T?NU>^x6W%tdCdLDnS zU;q^3b9Q-Y)T4g&RCETVN>!>WOiiT`GFs_zhNt=?-ZSV^V5|*%1(`UIAZm(BtJ?p zZVDM1OwLFZ-Ys8JX zO7Jj(=$9I4aP@kV1ptD*U*zk(s;3K-&&ay9q^4ildb;{-d8`d$K8nwYuMyKQXarFb z5szD+VB2p;T-O66@2FX)1W)s6J<-fQ{~lTX-YMd!*yLWfWFAHYzk52_kUX~rQZD)$ z{I3__S?#0`=j}x^aj<3iv?kw6XM|uH*>cT!s9Sgxg^k}-bS`kg8h^YyeUSrOfi+{w z_ufdoWxIVDIlj~*H*|jYD=t(OgVytQhWR-$Q0C5(QNZ zcvNjpoi>Bo5)R05p}a8OkJ_cnq5V{9wQ%Vtxdab2J(01kGydWw$4coR=_Pl7Z)nmD z>scSgTK=oL)MKTxd$Jq*RCB#-s}dXolQkvHx+MZNAbHW8fvLaiee-3F!v`(SLqh#O zRC_yf*^bR6B^r3jF4Ue+7f0fgB6<*D2%L=K4h{afzHLK$+YD;PLAE)?l|6SA*XzYSufG zS3)9E?73!o^}!VGh^EJ;GI3O~q?6|#)d&pX5 zhJ*~{&q!+Mrj$X)R-0PGfYizq9*@c~NEKUSv|89t4dIw!`pphvN6u6nExZ8AXF4A` zspmSJ17~LotY-f{39fEf~=~P__S5qg{M0{s@2sw zxp~ljLqAta>C|N^T=YXqy|{j}FZ_z4|9XranA>mU`Qjs^ACaM7E_pQv^s!naaupd0 z&3^lj&_pk?;AYz&;@aXLS!yl~y$1wPDatJ_7ev3sN#~BnUxrm_RNP*MMXRn~^tQ|y z(Qv8bSsC+B;mg;2tv7OEugRuvqdyqqN#%1q_usclEqo=cYkUAH9*X`N>0}9bN&o~j zT$3>Z_0|T~0?lbIRWDgl0LiV9C{EYkCY>6#H{CLh^HT0V0V_KEy=R-mLpO`?ky@FXsN=pA5C&fXp=`>lXm3 zbC|(gzdZFZV$DGP{b#sfSq#ieEpdI=QJrNdks}rwr~C*{ltB-bSm1ZA!L3@`ep$5I z4VJXgvPvHHHBf0q`LJVDx7y`i-Dh3hp1BtaOb`-(P8=(MDhU~m=oce)M5weUa(tr4 z&)6a!zeT0cpcky2629;{#rSS)0Y(KDMr`X9GrH5U18((=zUR1Y(8Pd1bsQkU1x9_6 z=oreKfoumKxA?tmMd%9+kJ?#XXpgje7MKYW<0JJ)1;q9b<4F1Vp4j5Kvo1>;aoQA~ z2XCtt9)KFRwysE-P_S}Yv~g;{mIohR3K08gbjX8}`%+w;Kt*SJc6R-zLH)aaQ#Mf6 zLivX)HoqCw)hWg`h|&NZ2$-g)8l_b0Hwr>*Mpwun6UT%z$mYHlFLQ~PZ@Jn2rn4YY zw>>bekHdWaXP_(7Ks$GT?TIcohkxMkkcv7APm4KGjSr^eG&}wnkGC87w*|~V4VmLt zdWc|8-w)-~%F3H@mHV9S7jCn7y4?9c5WpRtA|VopqvaTa#jr z=bVygda!WcbH7DlN9cL~6ZiAO>XVsi*{rO{JRdd5M3lDw`hqH!0GEvgz!TU2@^g_*snOY~!kA(4L zea0dIi?Oymq0_zjtjKw)?x1wxH2K0U$}`}0jOQoQbnOMS>Umpx3%HAN2dlImUzbaJ znBT_vj#iw)RZNtuxA19;%5trc9^9 zr*kZY`TA$Lknx`pNbeHmSK9P7Voec9eZ@d#Gfmo~Kg4ppsrKeI$W!SYEJ{-%qyl1T z_lPdnh;?0_-yZe+h_tM%vY-2AnL}uQO^ZwbXlrZljTMc!Kipx?5T~_slQ@;o12;@G(y)*@kJXDUVMInqK?|G{=}X9A zBE|YX#{1OltL%`%EF#0p=#YKC1jEbT;AgMnL;4BZZ0mN9c9D_oY7Lqt>5<`c_|q&E zFH@pX<6}u>Ahbe^MVjGvlz&~iAQ}UgVn8{2@NXzC^0BO`U7%Ra;P=Sc;$5K7RkQR^7F>Rl!Lj zgKQyy-3#-vyU8S zO1fR}yN3E0rwn*E_DxSQTH9XqnIWA zoI`eCT6RWdc5*i&2@=r@M}{9`0;MW!hE1EBHyK+h&4I7Ui@>>6v5T2F6B!k+ig>EN z_%lLb!0jvKOCYxxOjM`{EXy;d28+)hYLHlYd_6=0?E+&Bh2O)Uy{A)CB?J5}{)p3u zSiCQeIYRGbIBfiMyz(RA*V9cJ;*-U=Vohq zK@;@2lihTXWzjTR7VNJQ+U)4P@AiI~d!CNkdIQR4h%{!HIAUke*i)nv6w1ATsqJ%S9U6Kf zgd#G^{fr!`x9mk?j0cGiPY<(OBYHj+rP=P~O(DEJ7nwZ?s#-#YDWTmR@T@3)k5A|)94E|XKDySGJ}rwHgNm&RyYh%& zV^+5qv9zF~f)F7a3<-5EDBe|l4I{2~W;I*{- z{w^jZ4CP7RDz6#xZ|mlv5dsem8tpaOfuy>(jz^@%mK|L_I<^zs?`}aa>sQ0r%ro3< ziA>VaZAV=&U}@rrvcRyJ;4Qqa-(ftL%F;o46;?!i`6*PdtAHC}pged?{t6WziC#nw zI#il66tXG2jPaY`0Yi?f^4;=Ey%)E!%g|U7x&n#bP7uo0ND<6P`V94zOFvd#j`t{m z28(xjlzo3=JVgN_(uy^H3I4Cj5WLBP%qbPoLtJ%Q)9TUk6FhAJ(2Hpx-=k~2Uvpd< zvfJ$GoM@XkP(InATuBYV_nKoWUDzN#jJ`gj5e}6jkV$}6U8G$V zsnCE7R4HGA6WWs2=TftU*`Oc+xek}85GjtFAwccxjP)VkiSC+B(Z8aQ@Iqu7A%4O{97#BeWxw_`l$v zpY$E>{eD_}=sMkC#`AUib303J1G_ruq>T;ryHyDm4{2t3()WkS?CJ8~YxFhgV2Ffp z0rHl=QD;Qb-67clPHNfL6!x}hchZlK`2;Y1t;am`;r;yc{+0Uj;fe<_g*h|rnkU-Uv_+IIhaS6 zM+u5sSU5`>F(cBP2yCy&wE4w1V8)Zr25`-pt-i~Iku!EU_U6v8vS5c5xyboFz-s;B zw643<_whjpwXU*`bW|!#lD!6p+10}DCmdP3FO&})U>DQ zuN$3DMcbcgUlG0_JMaBd_v2%sfq^iRz3W#J{aHK%CmW2Qt@t`3dauTOA?HzSnuXAMda2 z4Q}6Znnxn>GMl}j1i6|44aRWe+l+o(TiP2q24g-xBm&z51$Bjidr&n>NiE5$Aiz(r zjvOFOaf-$AUjwDmxZ7RcgKHfFrD=Pfj=n>0M5)t2XQ;~B_4}BMBo5{K%ns{ON}Yv&yM`ae#-z~-dUnN z$de;HD07AQ5>DG~fxc7bKM>;x?L(Lznf+;)h62ZN1K?>YA?dKFvJXF1@T}MeWDpgv zN)t%6^A0m@=?h9(O4@A`wk>D|K5Eb&9=IGXKbUfJa`&(I&AzM;w$KT9Kpu&yKG9lX zTE;#;MD@QtEQPKHMJMh38T}nZ)_ZG}e_rW+`@|7&aoM{+p}V>**SGPB`Z0=e@`aU+ zk2TH?b(6xJy=vpACu4mF+9Xa%GNuT+)L?y_XTy?M7h0>dLT(@(2T3Hyl_?10Uwb@~ zd4VE)7L5hJY$H8USz&FkF27tt;yJR{PzSoZk4)TwhVIa!W6Y`VX59m8YdpjPK9e&f zh+$oLWLD7Q6{Plw25QV%_?6TqdJga~#XYj<54Z}8GRIF(xc7y9Tg!m3;t`n_?k8uX zMfk5$@EB3{V4Yy(Lh^qdd57(_4E2ahpn{WZJ#j|2Ad)(z4RVqMNvQ2WuOzq!pk!t8 z8aJYH$7Ezhs9|*M3$^aNsIhxPu;rP3AJ)aUQJl&uH~hV_uL8x#uHTCWcAdS5orwEva&A-C z=g-^m_8tbVYHetVLd?5}>kHDc{3g1yB?ilhhKtVUE^;_^-qOaGu#wntkq_{LFj+N> zIbytdH9dvP&y>Hqdw+CzdxFDG!@%6f`-~x$q33Q5EE1bmzFP@X-(DM;m%+ti{C!%^r(Vjf;Ak zsSOtffSFNOLds~Q=uG|sgY-l!P4X&1T>@a(J4&_2j#z-0F!AK0!1pS zr=JCnyRC?e%Bv%8^{1TFMu|_Hd2wST)8%Ngtw0Y%g4JfUd6klF3%d%MlQ@KkI;Y3A zGGvW^-$&sib$csabW5#bs!_-)m9ZLF=V)AiL>Lj`Vy@=h8E2_>IET6ZW6kwFjjT~yoNzO9NS zH^u?LC-vhQKN+d9T$e+ucD)iVUBQcl28vnE@Q!a4<|^-k=4s#y0N;s{yE8M*v~!~p zna!r0p+QEd%~(vIc|YSqk60(e!<1i6ScP(#Ok~}W{pj-}{ApoE#Nch0^SFslFx?xB zJuzFjr?)KiM1Ly;vp&1?BjVj$KWJ-rQ-vmP0pA#teS$2xi5vXGkbe!X&&vE?P2q6jF*_+Xb=4OIxbYq@#{XO8 zdz_x+^&z075)_aqN1tn7z&lYnRP0#Rwd#)g=S*1qm$3C`#y6ueM{ov~{v+NS&4TSn z33wCVc%nfRJ)aaVjs*puP=Om4SaXEj18Q{cpH<%M)tg><)otXhISg#xVSTd>_4sE* zmAwOA8gF5bxD2`SJ%>7e5?*?k!+gg^IIp2NmHj20%*C@ zb(BD0%at+_A1RLCEip_QWwzpZjE$qQ%wu8zoJbr|RA`(0lGS%?HM7gV3`f6pj*32f zQ=i6GNQIq&{s!$ZHvJc}$Pj+=xV@(>Ou2So;Qf_bm)OYc5^=eWD^Olsi4CQ=F@feZnszdl8~tZZH_b!{3S9;|bPn%M+t*+XEqL-vdk(8|#rk z`Mmxd*CnY@pZqTe^>#XR|Z#|~qqt1y4 ziXll8^_{qfw;qt!JL6cbwa#?9qMP_S`}7BgLz@NB!EGuv1JC3MSLmhe1AMR=^ws0w z({2nQ+)~+CMa|s+dg2Il5nq;=)Tln{))V|zQSVWFt@dOy&@=Ntd(whO@xN$fBFl;k z|B{_3`+X9lM#PLl9nma_Zw<0 zMU*rnXhBEvy{XzKuMPk}J7sbI`p0pk5H!m-Gt)~hNJFe^#^RaRf9!HL@1aF7ZGXG& z)ZW?|Rx~2Tz3LY|FcP+_TV%nSGgYb#p33FCp3l(na&eL^Y)CpYMLf%oUDC&thz&^ zR6tnkj;3c(`Ezivn!#;krLyd?=k6Ok6836MG=%LVZp@tfvG%geWuuy`1G%%1P6^@9 zZaOUXR>rJ5Qg=qOw-iL-{rZK4%(C|re#{DqYl=W6WlVHsS;O17qy6}7C)_!j!)*e^ z-!_w~hA5pQ2L%m@>96ZIIE1sF^QB_N1?|$BsLyW6)fCD#2YfG*ShRo)^{vChkufyi zl9+6FAFuaacfW*->AA15^EuVpetDQFaexj@LHZ7ia8S=kgkecgxdPa@7hqEWfIB(F zqpKoYg8>rb-JH@3&+Q;(hAPbZ41f7=HGi!%ayMe(qf5oICy&#a<5)GbB!BQdBh7?j z<4C8l6eGFDJUJ>4VT-NfGS(wXdsbL3N8ll4ReJf;$8*L{T#wH7Rm2?7ujsZ2CU06l8Dkf=uKz${Ko1k&=KJ<#z zP-9J6={(VHu`5cou~zU_>viZ7qP>T&t8go)^J!whuJECcM(PX;c_Lx44k>X)Y^3YFx&B8EKVjF3V&`p2b|Y0yOL%Xu=2iw|4a`U_H^ zuyABRMWwu9Lb9-@CkC&$$r#0p>ozfGA4yR@$zT!b5dJxq8Yewo^#6?U27~2kUWkN> z_m4n)v!L=(t@X7Gf3%pQ%qn=ZrsJ-D0E{l-Rl&!Woxk&rGlB0Hem*8)4Jz;xp(TyC z4p`RgQsBx%N*0JmA~;1&>x~M(oZYzzR$&F^e|v%pkCTho-RD}%&h~i_=8xKgG!Wbn zY#K2B#Q=|=X|ziI^T>%IznJr7$qZJ@`!IiIG*~>9aS~!|C-`jtOrA^_dRg*DVJ~v1 zUq03&q%VJ^uo#(IV11TAOVy}_A3L-3ut$KJG{2VH-F=ThTgVN%RkLyY6dvdHf4u-- zg?yb?-{SsM;jpA51{jRNXwPM{;TpOd;2a)FQd;bra5jod2;U@s9N0k1l%9^Ks?L+R z`&tR4#u4x5^uF^EZudrcezI0RLyU~jsAp%h?1xx-*694Ce)`%IyWC#C z@q0d@<~_e1H~3sc2OmXQN}N4BReMe~u(B*bbTVrXXl=ie34S!BU2)aDqnfUTwu}Wo zl2aM}Vic|KP^w}l0k=CDfLfR(>y8+x3E?2WDRhLMFbTTl+gR1n8Yx=Y9-7t<&SKs) zKCN=)4rBGngkU5)76n+9Y*1F7hN=@n zq)!1!Rw7YWk%T9bQowY!`Y8itE9J)D@K726S&rOp6qcEV9M;!Tph;sA<&~&>dh>7p z{Mj&3(qlP+nQp87byo(qEC$($KFPa;b@i~y`H=tbJ5w{~dF9EcG7v7FS67GO3N-~ht2+;_G+*)K#NQZdFx%TV$9ee3^^+W3rQSl@c091xIc&d_zOPFE zgsf2zM~v4gAXD}>3;KMI>b`y2g?YS|dL-F>{|7s@Z=?!z%q7A417y^a#Mh)5XCWaV z+tDIfH0BtdVf;Rw{zJvOO-&AwBs#3(@Ut_J+1$HK1V#7ij=9lHR|Cz5FLS?Fs_6)$iBN6?a!nVI? z$seUlPIJdX6kLl^T!x(xi6@d3@=NA$S6DuEnuV~urK#W@F<53czc_2|absjviXbK= zu=qpe>n-)ML^}tkmoaUeCJo_2eWF)?D@vCDAtdjHf(fyhLQ5s=E0cZ7GY6fXvZI#i zcE-09EyR}Pt)GR*5o@Ob;K{Vb@FRgfqaC6Ph&wEOL%Wjh-^z{`hEZ%+X)?gB@XOQu z`AR!2+AT?9@`Aclq6t9lWL*rqngot~I|p5@)>!>2knb_BX-qn5Gd*B}M;w%>TP(YU zJr_DlA&bXmeTC?_AaXkGvfnmOnUitI*AWWd5O!Vn)Wjsj`(LjNwPX_9S6vkk#{}xU z4)exeuR4J2PiwQkUFc|{dTGFrm2J}{*gFLIGyi_S zU_sB{Rf-ywbn3qn5tLEt`la9?^b-OR@45~wz^f&eA+mAk*l-b;3cPFSwY4%+eInPeZ%{PX}y2l5F%eu3VC1y`p`+sl;E&sN~5ox>S)*-gn3F0 zI#?xjgiI#4$!je7Ky5wM;^$2Emvmy;qpr`E1HzF9)AdnQXz=VqpreKif_O5dq!F%w z0~k7Ldu~yAhP>v)KswZSE5*;8s?ck2JV*RP#RW_PW#XCOXemliLXecqTW)bvjV7gu zxP?Gl%h;`BZog~m{jOl6ULZTYjvKl=zwyE2p|v=bN0G5)88|+(v1B=7dFO5itO*>$yXh7_Xe?S(Cf_$}9=SO6fl{E(tdyP&j_O%nE^0oTP(PvA&7Ln}E zpFs#dpWQxf^m<_hoxA8F#a8i{4k&@CkNPqvMhW_wDl1@#{!N58(uV=3bR;`1rq(Bd zp3z+u;}H_Me*)vm`{ZzC))58btzKiAvWaLh5Bf4@D!rM z`p9$A58^HxUpwDkTud4AV9|AC_r?nh&UJtIQB34)9(UMpOCKj(q(yY)KR+0I(88Nu zy>IR%47@xH(FlEg!LBkBmr$IiHY^-ncDwg|FjcE@I^02NOu0jRaVXbCW4J*ju87eHUb`V|8J;jZGUvMBmyW8 zHcl-`1|s)MCH)nunMWk!YYf6glr+xZt1fVH_lhXcCXt0!n&srUQ8`XZ!xZTKt%vneR#2WaNNpPOg zdb&1)BEqL+u#pHBlxfQvTyANvqGn`|F4^D+WZLm5C+!ka+Y?wDhiG}!)kd3inJx@R38 zwTyPV^#W5qH%@z`8X$RAnyf88rYC4!szlhT(zGxJI%MAQPu_idV_VqFSDO9lw)Y8v zUy#4At@qIe8iV=r++Zh6C`<(=iL4d=R5XwE0CV zE`ogI7xlfs7OPtsZ@g2;3!!+!|NC zG)9?WCmWSrH5npPBszwzk>3dV+1bccEr(Cy79x zN(Xxh*>q#ZHiXxWz_0kVRAY&B&b%vzz24))NCZ?-*^Ol@DOt z7*blI0xwkI6p#o|(%D^e>P(+KWjt$qE){G9TW4T=R%!4*is!ArM4 zr!iZxKP)l3{&3S9sPlWdZTC9`T&y@@o*E?&`Gc|o9aGBv!cB^SU&X4}G_Cqfv@GT} zA;#CJjK1@Lc5ed|oVM0$5P{up{{yn%d&LYi5DtE_^jG>n%&A@U8tPkVX|8YW%Y)Ao z;WaAtXwDAPE}C$$S8JNkpbbRnlvhl0TUwUw)-yFzp4U+jx`z^u94{o6D0?Hh=@sKn`CX=jkWiK1%a}`a~ox_%u<^)(DcoTDrv(ZV$ z71i^L15DrjFR8d8b6#4V5){Mn^hEvZl{Kn|lCN0CNhjWwxOKa6T$O$pIfEQ|X-0Uz zaLkOllrf&nrFlxnehdG!7%QY^Lg>LEZnj4M$X=9fnImkv4cWwrNwf&Os& zHc*K72~bG(?+19|HZm`yqy}c|CgRgXZpZ6g+l|fI8(L!;U2)?WEyr!~yu(2zVfzCV z$cMHY=&o#ts)@-V9C&aQHi-h5P?0z~H{?fJ##S&m?6W=rv`oHAdxbQ#|F@@VZU3&J z#K>(VEU3)di9%ZV6MqtlQeenp^IHz0h8)|IHzT3=V=rr(RI*$XfDS!MyrBV9FWUnG zItlSLp;MrMnlI}H+<0IF*c0C>y@?$qsDN6r)GBaz04?J>aZ8w1+u3rI3dgqJb4k?* zdcm4TYRP0OZEz#wDeRh9$720-7&tcdv4!+`t>bTZHGv}Xjl*;*{chj40j zs-9JC#81eQF1}3cYWUnur|sy$Ka%t6cD?0wxwrEKMvz6&?tP8kYI>?ppukFu*icR% z9c56>0g2vUX)FWzn|&xE0*|a`!%mH!Fb~pxZ)ZFi)gT*>lbQ`)B}*NM2L=S+=+bA0 z7%$HuFchvr@LBjB-j81gCQrH>xGyo@#{%(DtR2(m5nrg!3u`czOd{IKBKh*&hIY-e z6tN&Q8Q2fTVY9P2L(P&)iR9f_&v$qY?^=07?l-k?-hH=O>aZ&h;pN=pxejRf1x3hM z#74^ZDs@~+1r-X`Z+qxcG~aJV34AO<_;=&Paqj1^rNd)t$D0@Ra`rL|U8~y}dFlQ6 z)n-G_vq1V4X@WNrC*_*nU6z6JQ2S++Uu;@Wm>6|8xJlwS4hxneoBJ0WLS>#I6{6f} z$qNk(6aseurBpm*7=U1<)!x`0Qdn{QH0RSig?{YDZ18#pIAJr!Y7u{!Kb_Yg9=C=UNIxA=uW`r1MM2z`EDJy;2Wo@>~-f4{<_knj)AdvKJ1 zls5K?E$rn0SSsfVx5Q}hF(^xgck|!ucUW|a8g8HbJ`T3^-zPQR9#(pZ&#mR?1=ILb zp?kjX3`5m+Ie)wKd6ZhkQ@Cbe_Cx=I0Q-eVbQ{YGvg_R#P0WwC$osI<6Zt5jr-+l{ z0r9ZQBf(2s`B~ghkc)L*NcmtVJCH5e5eO&BfqH+yS*j(C><8610ux<(5L|{O7UC#2?=&V&p@fIP^nsTVey~c*ev>Yy^N>gYA+m z7_3EZ9wyg|plqgT&cF^c+;?BdZh5GJ! zA2H-DBMGiJ&WKuJWA*vpHbvU&S@ma+sCsQmh2)2sif}hnur=nIGoc3O%iIQOY7dEE zBM|2PO!v~rnjb-mYZ(nqdJ~A$D2%M>M5d1Fy6}NDZkgHUSPyTNNhj5eJ@JA1qlT8? z-N@fRHzq=wBAfrfcr*cj-#D1zMMAQl6J*ro(>N&5m{5bS-sowGw7I%7`~k*k(ySj)$?e}UtItMd zNC_F<)V=3E8cdCbmC0CQ2~YKXkxd2*_eX?5rw4Xn`|LCSOy}Q-sQ(jGm)rEZQerGv z0g}QHG=dY6%#fsOZ1lgxs|xDp8WK{%RcDlS2^gjq1!e3Rs@CuXRWdDi0vs&1@epn~ ztmn&WNP9!!K29i?YT%6Vl5uO}?Ae6+8*6YkU6%;=cX4f6j6Xk6SN**ecNbwmHQ(bw zct**9`zbm}#!&x;^k=!!+c}`#9HR~0a5sVOJLMG%3HeJ? zr7`y_g(_lJvGQp6GUmxDk)wEf;mvh(zWYDRtPSc8;l ziFalmeaD^#e&8r9k;|D#WQI1dBhUMQY?lzU@+a~{Je{rz4GiI-ky>swq|P}VG7|%& zosL}u0xL2JQu?#^JkiO`M`-kU;njZomkHnsu>Y8sh#uJ&PF^@JvVWd~JzTRvWV$G% zhk5I4OiBKGmac%Ik$z@}MvaL+XJRll48=G2sO$IDMmS-|cID@8ADlGcQQyoCO)~+H zz^$?j$x!MDX$NiYtzP8K3Tp|gWzbR1s-?HTS^V4fy6z+G6KO`<51LEQlkRB6{%3-B zaA3qX^iutxu~jHRU03vm-;2&>m*Y3PFKxHC=bbllEd88ymT7=w!=wREo*3JB!GXa zh)+f8pQ&oxjf^VXV@Z3BJIr5?*je6G0dEA)tg~+PV!{Cr>u#32bEHAv6s<3 z*~u4^LJY-D05|AUDGk(^h+wSX_WDA|8#Z%kV8IDROW?jjNx>4ee^PeQo(cD=u>#an zv^ZMUTr@3<#x6`=74F^Lfj7Qj55kI|8I9w-S`_NtyYWdIvBI=&Li^)dSBZFJ*aS(W zIBe!whYTUF5>dj7q0*Ft{TptSCulqh9|`#cexzkYzd2??t=5PmGvChVH4H#xwa)Do zcr*=A-=Cj}7ue1Chnu&#wj=2ExP!ah>F2+q88%=NHE|EN@{a53>3@$CT7QUpZ-{rk zPHlyu8prUXp!nONu5*98L`VXI_)Q&;L&1n`qqPLkDSO#-uke5VW&OA8dS0Yu0T}KB zB?hU&y->#wp+Ks_uRGM%uMke#8Z?#T{vEDHt1}5Qx9}g87?mf4e6sq6Do2{tQsB86 zj)d_I)7fcs38ccFwTiR_mb6j<&uf7hN9P zGAUyeJ6uA!GJd7fvslVG0vfI|ltS-Xrd9$WsblGNxHR9ag1ThfOL@HH(3))^YNj`Q%os_?W8bCga~H;a zza?+yJ#VYOf(xhR$aD4dKJI-chCBE!)*J%;nC- zA3pcHD^6rB6Fr~ti7QaQIXAHZTkhU?hd}lp!LXJ&ObWifzBJk}vD!V;{AEl-@CL)M zf-JwX6^6Bd;9r-Jq}Zq;wRPyY@E3y)Nm`P|07>{R`~3WmDN<Z6ne0w9mj)trq!gE}L9Po1W;P`}hPp7;C z3ciI#r?-fhh}SVbP$OpHG3ed~@JEjnqzu_y8V+lxbk5ILJ5|_ zAXSn@h5a!eb_nN z{ROi!E?WG;3D)1m+T_z8D}svl$fidT)(yi`{c*=LaphSlB{c4F&u?JG*pZLdm$|m!TYeQuZyzWFCec~zgs}h-T3kKR*~0g z-A~*Qs#AWkNP5V?+?rlWib&>8B;W@w>dscE93pu?G}ZFVq8EiSv z^yR0!C>7v?HDTTmMdtH-#0YXK<;+s>`}FGqD&UL5i`pZ)5~S6emPezhd0H8{C)b*X->%CeSK62h53(3YV(X!Z^{-Y(vb5PJoPcB@(j zzBN8*W7A3pNN2do`NktTj6t%Zy)?UG9@0YFN*#vyB=*%8{+3o%%ydexVon?{q+$9G zRl%31M!VA!Ze;s~b$Qv%fxFHK*d2k7kn~?vodb7fQI~FG+qP}nw)w_(Qn4zwQE^2T z+qP}nw$Zs`-0rV?jPo1T*?aBz%m)ODw2d_Lwt`^MqcC#F*bMc8T$B_PegB!^b3b#Y zWm^UD>t?SFoznj`6eJrP3w{YMv`n5;f{R=4=RS?48-y}UX68UHrvcvyyb9Ldk`N4} zB2mr-&cE~wpW9m)$%V?o<}rUJ!v5Fm0kuRnrSNaoCRr45Fg`OVbg^z_deiHz+MsHs zhKz!+7&znV9rP^4kXgz^vKumOcT5Qd3kq5D+^qZ2gqpJU34|a70c_810}P{apTME9 z71%Gh(S4h&<@eX8Ya8W&ht}i^Ph) zOg$bPgNwM!j(*2{Pg9pa_HhOB_KG@>r!)AhP5vOrxT}3jzi=~lNm&jghG18PVN}}* z4yMdXBQCsq3u0_;H}FZDo#|zn6kBIqL!(n?JzikcldN&NgN4Im?9JgZ8&^olEfTpm zve*HkPzH89c7P`N`EM2g z=dZQ(Rxs+T78WY2WkW_fx51AlMwM({VZd{|0lTcJc}?A~z>SYG4MPR*!UukN~A znb7K-nLx>49t6^zaAyw?idn5UNTEIEdn15w^#1Z9OW1!E;vzSK+G0Bn5<@*Zu~g%> z&ol8_ZyCju>jW`C^PU16qX?&HiYdP=L0A`WTP()scY{Q@;gzc{RdIEP5Y;r8H3ADV zYh};&7toUinsXjnFAqmyqqV&?c(ke?Jw&?kzGK(%{G`ZKjl(v?N+o)}tn1Jf0{MKs ze=!*4L(ern0Jex0l8re)g~GfG$#gEd@e(t=``!6i?POvDY3O-a|1$LaMUSzFQgDkU z?*=BCs%~%rl9>uke+F3;oTg&YM84`C&73>UY@pkJcK{{$we)-=_(Ay%Zg0>A+}#b> zD?rLfv?NHHHk87Prk@^#52Vm2`)&=2RbG{;5`7m z;Kirdl0<%3#mo=9GEp|s(=u95^;pgzl8J5t zz>0chTh|^$XAX{n?C*wGgQ{OcWJ*sjV|3XtpNM(x9&82ONy-qrapltFtp#ond83i6 zlqv;*ERNzNrJ(-ZDKIq_E%RNaQmv3?F*Bg_CmzR)!Q)Rybj)`X+-So)aI0eey8;UeXVrwMepwf(!aZ z;oh;1KKh_hx_WxmM4MQl<6q3QlXjOniT)f}&Jy`&XAp$>KSd;ULW%`zXXPmr)P|Ug&44kw+uqMq0GUSpjX;@5TA(8;1Qfa! zl0E~6+L*8WyKfYjjX@jFk*yg*v%E+`(i1snU1p>p)Lk+^IM6=#GfAG4*CbhwoC^n@ z883Tq=ndnS`?LwO;m%KJLb?%eyMpL%Yc}f z#5Jj7Ox4M{ZR!m$$#$al;I|-YqYI&r3`~5=3iC18s=PfuFl<(Qbf@&XMGWC+qtz5= zYWC!1%}kz~`AriDAy$F|$EBBoCT%m%Y~Xh){&CVsxWD;qzgvVU5ZdcHfPU5_U2P|x zZ5EvF46|is8+AYWMMo1<_hmEiYs?KvIH@YWq)Y602t)L?tsMeXWsm#DM+k5aU=nH9 z%NO7pD#qSf-Mnw{F~^led~!Pfp%o%Px_x>@kQbiJ90K$u?Yw%)~uC9z-%@FGJPk#5IKMmKuk!6Te4nqQe>;e@6seyjLhMvEPET~} zF7zV?u~Pd(0UrdilXK4D16-@d@RX=(U&yFf9Td$93jwj{6vCtNulK{52-vez(o*&4 zc%AUr+6_$!Dv;<9Q9ljsSdS&sHVi|X=F1ZS_bqZfL(#iEyp zU7;nenx(609403`FY9IM0V4;zWHl`2#;CI_pNb?LiDn)xIDTVR1*x;EH!+A@t6x}w zQe~fZ4ECka*^o9PhCp;xn>QSa&Re*b3ja!&q~h@ zFFl&a4qci@jLtz-TV)e{-7!T?7Lv$U&KdKXvD6wQcVM8Re=z;{mhXH{@2mVqS9x8b zSLn3Bv9Wv{g-TZjzAjaZ5Vlc$y!igJ{Zx3Dn%NHSanP7* zMo2yK#$51Jo7q|8Nc2+Vb9;0%^KZ6>ZOm}G72|TB06P1>i&JyjX#;_{N8B{Wmo@$9 zP#a){L_W|0^_QPDoeg>AFJ)dVuol6jieVWRs}KDLl?1~&QL(yT%qn#ktO3*Sc+ZK} znk?BAbs@j??Wn8ZQiW4T-{4A`Z?D3dZ+1LlKZ`_}>$Gfe(V~k(((D%%w<7xj1ZV@B z4a)9Lk5WlN*3dpm2EeYz2d5x#_DVGPHEebJj`U$0SJ`EILWrhnZeLrA;{8Tpp2hTu zGmybC9x}va!8~n$b@}%A|K<_-2B4(x@%-J8hC}TZ>BrS4As*0ySq)tuXD*x7RI(a@ zNarpyEm^El7rlWghye!|ba1%tspM{g|2t+1!}D*)zV2-Y-kBD1A-EcD0u9))RzOQo zImN*$?4}8A9E>t&Qtgmtt3F8NO)+PmBqc}bJfTXYNEATyof*I;0uVDMoo>6q4vxP} zdWamh9YL&9YBHTI4 zM?%Un1N-^mOORDRk9%C5&<57QVQ4$iFPpGp<0U)nvB*-Dco8`uhmQtkUU19kj95gb z3}QRx0DUe_mjhz<*rCbJ@Fb6PuQee}5sJ;|HE`OIbO?vskDLgE3&e~Mgwz#YhR$=r z$ipvmBy@_{98bu4fB8@(;=bLA+6xi8C>jMlzc-G1zce!X0CzuEe=~0QIYMGiUH38K zkA+rX;=m1hkq@A0L$stU-m?7mtuR*jl&j8TE&Y`)pU)*q?b~v7D8I}>S#gp+o(RIJ zOYjyni=@7}=kW~`WEO+Tpx;MX%^&p3FB);%A66H0>Gn2GFz(bA1p?7*JsA{lUf15< zIZAX6`$Qt@{2j;l}T5dF6Eo`m^{(p`BPv zmf)jR;Rs~2u;?K?0Y8UtW=IYH*C@jw%v^u)bF6|K#&8@7ebwX#p#2zpI)zvz{4Bu6 z3)k^{8`sQW4|ocR{`{`%*sD4{DweR5a6^!CVxQHaCyoLG>2r3{s|qO&}Hb`v!ZHq*kVQUR;_+NXIse>V;1i z4HVV7%g;;ws@USrHqu#2iJFuQC~&rYkzY4F1r*)0a0|u`d@f~d)T^LeYq2P(8Ax`I z7K>RoYg$VVN3MAH#7~^bC=^mpZ@s!N>+W~!DF-j=9(pw@ijbnh8q&j~`5M2Q-J;Ao z2S!JkF&?unrAAH4Zs~QW7;wXqJ|S$9%3FL6ai%d9)S7+I*9FaG$NsThQ@4)yZi|hK zf3B(O+SjHbW8weaL)#PCRy|IRv;N=E(%aLs4Y~TEf!`~4$JbC8R>9%S?9i#XbP(BK zY7Z_kmr9X8oC8Oj)ntVST48@#zUar_&JxBG!}EO11dwRY6o9P?^UzLQ00&&UlUw1S ztqrKf5B<3LPPo?}WCGW`%ydT}THe@2oV*%LHY5aSxzSOJ!KHx=>TXI>Qgv|7S{T&v zeABmSXw#?3sa%MYJP%O{!~>ZN*>__K^( z$thxBy2FcPm#Y&t;UyzPTw3KSB12VDeFj6>6AKc@P=4j2;x?ieWV@sL+8RTHVCqY% z7F)+JCHv2K#P1ni{VC8HZ3I(asKOE8%qIXQ28)FB7H)PT*`k2<QzwZKEB_Rsx@fTdzla&dR3%#JC7bdq2=4qf7K0=Vh932&KUr0i&MG>VU$t&*%jyykIHjx{r9dPNR4!qyz73 ztVjZ9qY8TLtYW6nRoh5K-#xRBgMWg9+h3F0Mr<@r<}SG6sJ5{X;(<-@!5Za3a#QS| z981La%u<9Sfr_s~QFeb)WGulk_07RCge6VtYIi(8c!>vLNM<5I!k!8L`uRksF~NPJ&e+w3INJ>#$4fXjX^x_ksW+a z(Bcsg_M?&Vc{n2eGznrZ?gc}GmQhuvvJWMtZWmYSL+(z`W2lQ+-O~cRQGmL0iwA0Y4 zp&HrqKwHGAw)w!LJl9S)Iu!_zf}x=99Cqa~i>esB1(AheDL(yEF~%kI7if(q;e9E@ z2RXa4p5|HW(Ey5gHwYz28aDWG83rI>5)f!jc+rAImqZ6wjA={N!{kYmdSEQZOPxNI zLGJE}ToN^+E8!@`F1lxozJa+nb(&=+M;1UV_H0EDZt!pUIblHRDb)%8R;I5VVIAvO zGAYL*`6k}vqR_cyQa$x=p-Zaf?k;s?%7@+nw=A?{C;6gG3RHga;DC4T5-JfyktAq& zN!i2ZD=P}T7oyR?QTaO-d2(k4+uH|mKH4O+`aWsk>?rr#yAxrgwA8B1XQvJWQ!ZnT zZK|bjzYMnyCe8L_l4SHJ=fEd*3AI-!=jIWslNE9c7 zHdn^C#RzqEhs~82)(K4(D-9J_hKDPhJOvZ55fUl(B6pZ@$0XtpJq8bQv6e2J2NVg? zz!49ZY_^wt0Nup=;3BQ==QasBb_P@zYP_g$V^+*$Ef6{c8O7km-?l_=JQd-NT>?No zLAE0(T)oJ0myWLRQu(eTL4d!HnU?-^EefyOpU)QZ_6^8dj;pD;iBBpIghMXD-0358 z9V9Kko1S$BSKX6Tc_Gn8adIz=bg)_7D-;S!lxn3|HA5~DCWG+Y8(yB^a2`5n;(^|X z%%xrnu8_lVmfLk%Suh0bq07u@KC|<3Xq1zMUgykEE}$#HCcE0-*%d38;gj@R#NGDh z=F*Iy$GgHn3@TtUdMGrzZVgisuw}j+Kw@@Wt3MvyLs}v69JguR+NQ~vmkJcxtW6zL znX6s}S#WUNq%itJKVJ#%AbmcLg0aH%Ez5{ABP{$jWUP*uC=L-!4fwXE|F=^pIH*B4 z1=Pl7S<^mYp{z#QI#^|;6dkJdf)PiEpT{lG8v~nm(|T+$kV<{?gy~n0XFc}lpqm=TgOjNYWovg5(Nc$f!V85u2xKg1DK%o=@a#4}dw> zM5?#IR8*baCp*d+OA1s)K+Q!!k?j{%Vzx+sns|CHq+O7aQOyl}lfgzmtKlb|!MEWz z7;lig&!vlDZ%2d2Wi}yD0mh-}h^LC3RV|jz%H00V4wCzJd(7b%vcP+-;;y0JF6Yf$ zqXjd+{VImF0i#hA@sV3dW%;s_K?mFgVV?kB@56vocw5p8K$lIFjSezi2$HefcbrHu(H8{B*=`(Us)t(50M-4S$SS)0_ zsD?ia>>h}x5jkrxED@cq*4?dj3#ViZ`rkLrg&NnXz86kqa-_02JE|un5v(fNQ?Ju= z{6j7KMY3m6tEq`0aCCnt?LSHJBTE^o>t`#Gvg$Mj(tZ!rxc^j%;wMKFfFfLyvv|`8 z{>dN+Jc=Vz76U}f?w!od(v1?!Xjq+MWMGAfDBGR~-UPPX?&({U`p7}Zv}ee9rHQgR z(~?l;mR<4(!igx*6%SmhJ~qLlS~iSlsC1{vY6)y`-}$Hk)NvJMsa$^uO@!~&Zom$_ zjbSPbn-D0+AugRFSS_k{KT>QeGUjjZqRJe9_H6!^QO|+g2}sL#d3XQhXlNQM*{ zf^5@}0<~W~L$W+{3?#Sz*Q>^tQDvcSORr_{J){0`<@$Cn7W_JP{4u-wF3O8YCdJwe z95xfeqz(9Tq?>~4!Zg`|!iCkH?wF%0>edXiaEQLy9RDM*ndVw!U*xf8i8>^Lx|3h&E2 zC^l1y)K%tQ{%laUo(CmeB2~OB&t*6f#0-&XgTF|0D{}5UiRKU{e%qQM^lUy~q zo;XYu$C$ZDl%Hnaw*h+JU9FPqOmM3{_HJ*(7)cqL(xwTjGIzN7NlbV=BOD@sdz+V_?Qh?WuN#gk!i#?CnHO>;LRwEE18}c~OV`OqkpRD1$$G%gq zvqQ&G4PuapKcHGf;3&({_B}We<)4;o?S)Yy>@eZ2_Y(q+Fv*W$j9KjjICCrq*&J8E z5wSzWNb;Dd_2^B;{js&mRH0e3s-$(gol5~iSz_?vsO+P(saVPvff8{@gO?}(k@cXn zi;~Ulj2L$4jMLrr!95dGrzpn9j93kG3oCK+Y0zQ~Dj<~ns{c$IB zhfZ(5kTKZh_PbgcAu0f(P!QM1%W8!^Z}}h?4Z_OflmuHKWp|fSg6wDrWYMZF$7({I zr&1fWloWB$(lf~ka`*zMMVn&F#oS1Z$^$#tm7zTNX;`V07LzG{)ErI-Mqky| zt7l~y)3b7va?!lXSSsxHqXf1{eI#1d+z4O4jQJPtp7{_lFhp4oOo*6lmlRga!!DL> zwIiLw|InOrF-$H!@)VtSkrtd(?MH4ej4jgcF7Azjftq^1w z?|{Of67VgEa2 z6JgBm^Z^LjP(aN%75lNdM%!KxGB_j~_ssWoIlQr)+8#UD&9vWcGo zqTadD(T83cRoPJJu15ogZ0U`+vJlZQ$RyJoCwZ@k+&8+8T8-SQk}ewGM`Aj5yR z6sE&CK4u6d?0C%S{leuq=UlMguc$0Ncue2vK<{)HjPHyNl z{pHQGq`52bPIn2mhBvofJf6Nd9S+jtuqacD;FIF(0Pzdzy6c(2Ue6mMX*Dk$)(&L=6+PEG_;J(OkkX5rfuMu7eA>FNN9OYe1PLC@pJB}Z*yFNBj zO3@H7BUReymj7nC`ilX$U53d`dKm8{85RYw9#AeTUe6-Ad!75XIK9vKHOX0{5nC&u z-+y8nC@wA`cR)yZap`dpAKfRMSP9LD# z`Mlnr3TtMjfURijb@@3H4CneHYc@}tSW95h@yYRr2^ts&g4YNPoQyGWi#IeJqHa%-HCYBjn`R` zNs2@?=)I<*;>mDmYkQv!qAdCZnrkz!=6%B@AsDC;_7Y}G!8fJfZ3^QA)s7lhQ!(Wr zB{27XoRA%G-Qp2(HJ9X`=wQ{l!{9Tn))k2a2%V*|5e{dTC>a}*kM~M*!0W5_|1nh& zBQdxT=Fk5Dsd$$bhJ(fw>`vqwvS2nfkDRXr>DmuGgA8fS`HSB9q#z`|npFeOQ((+o zXaWPP=mAY~XL#qPb{AqmZggJ{IrQbhu{Pq%I+3Ei^SO286oN}i3n;@~P#8O!N1=Kt z5h;yEhlNgr_L|9mY5^i~tiDZ5+3#v}yLb0lQzk0Dh5L(R|5We(n*}f??1YT+#bYQH zc)6DEa3@A_lP-%ogvM|iDcWtVg8F6VnkzRb6gG|{Nhv#Vp?#s>8IkSME}e$?n*VS6 z{AYxH3#`pcMXPC%EGY_-{u?%ZrVGCp)lB)bF5#9u%q zh09DE1*Q#`QmHa!14g0RY}(*$9l#Z!yVR4{Gt{cjr=z5Gs=wlliEG0ucOp^$_~B4O zJj+3fW;$=72ZI0V!Iho|=h&7Uou9I@GU(8vq5hwT_-h1+1nnBv>utL-< z<1_N8!3*hItp_|;MO4=JazWKlr=c1d}DJ-sn+6S{8zW zRJ|n0vxL@i@BGo*H{l$@8w>>dw^9QNd0I$qtUrF)MmKsG)MFX!E?7tihTK47MaxhY zm&b+#s_^)vfZij=Hh~CbXYySxW)GcBLx`G`GyaaKj+L>jGzIXeI>ntFJP|PcqW*P? z6uXoBz>3~S)hZ020=*MRFb;{AeS2XDJ7?u(cREUf>3t}+9&K>;NpQTS1|_9r)Pq|M z|J*{-mP_+SPG8B_8a{H3H2j>f2q^0%{M;qnt<3F;v8XZ_cH}b^YAdm(OJnbrP)@eR zRgobG3w^6+PK0RsMZcoReN2&4-v_qdpuxiU;EKc~ckOn(L@)P?1Wjc- zj#Q{u!>`EmgdW>DuN;jOWlcM92HTmqG-z$xcEu@pZzcDmD*T=$y|BLo*!bcv_5#N$ z9Q}>-SIV=)J1gFU@h-D(Dw5X-DpawwiKbx9xmJvgZE5>H*}*dWIg-^hwnC#6vb&Xx zJ&RntG3f(lShDcxAuG!OGEsMT995r7$G2Xen(5mDJvG!$RyLUHgJ_xp+(F%~0o*zO zUyi{Bs#=bWzpI9Q?c^D~L|R}d*iFoXLs6Vc0^T7frWgK>xR+nVL&=QF>UxJ=Oz;X2 z2!7u1m#%&HO10?)0s=$3{h)Z6IOZFBEds`l4RrkvQP8=la)=vCCz( zSGSuZI;vd`Cc!iccOahGXWRcC+|^SCUpgjb9cX{!N~Ew+8kDh|t}(66aU-9RqzL+< z20m-$X!S`fvSzBi^&(4&o!kbzo^Cb46;xJa1 ziB~9sLkfmnW1M+B-)ZR8Miu(7g|eF?fo{mdx%Pr0P>67<<#@`(0|t?3%_fLII<~3j zD+?;BUK$914ZLsoMNf6TcMwd!E0rLcn>*?mEC)w}rn3OAa3uRf!ZPUj0Jo=Ahk3IX z2Hba0{XgV-*jBOg8ZBQ~Y^%El#Dx8L@3=Ph>?gQ^>YDCczuf)1*>T5m<)Tjm+m)f+3zjff1Hsh)B*PL6poK1 zKwie|2V`V_KC_@r^WjOhFU{4XhX!OkyR_Y;MY1XiZ95Xw7lV*#!?dVMh=}aqSRKsO z^-hl1L-r%_1AyiP2CXUDsVY{uxKRzGIBQA%&bpj{Av^v97I)^?FFqLgZl%-GyJrm6 z(5@|KkskcpigViua|$1T}Z*djX?o&s*hW0 zdRnW4pGH^70?SZdBjuHAZeR0I|1&+Skx+ zm%7W2{63b@?=qp3;!sv=yh@dY3nQgqh)sjt@bw6f(yZFK&wXcGDmMApap;QjcK85e z*_f?w@Uj5?B+Fs=CpNShtQ zQAhDy#bOf;8{(O$y54!K_D5;{dZ~7tQ(MWZGGW05`1>QcF96EZjpSZ8x`ys{h>yS? z6eYB+U9uHQ;$r6G&k=R`d|D}ongRh zi`3dd+gK{83dC0Q+nO9v^L6d`5D&LvNU+$0U=NW&R_laJyv9b5O2s#?F`;1h-ZO=4 zx@e>oFSh{L59Lz*k^O!dGav5Z*%jy~sBih9bSQ|DfIfC0i3@yt>zuog8{|;e z@=TE+7wJip{qpshjdek}TSiHVnTipy<|X%6zcBCL&qBh%-bQ(!KdUPr-g?#N7(H|` z)O`GtfTe;|5VwpM-NInwBqA_5@VYtd&E#vV6I-b%y7jQH@@s(+|-mpP3jh%o>IT^a{~>t zNzkf^2>76?+J$yD4r$Qx)%(w2`_V%<;m|U{jJ9j7Zw)*e`hr()f3Zemy1+@AR9%sI z!!&6d*Q{E}nM1`yGqp(r#n~RA45ev4kZa9FS%XAxvoXbl*vVpPiUDTUEBds`20)8$nMKf74>N#vn&8(=eRYP^wc z0iZ0|KoJTwxOR0(cQ`%S>KXO&4Z)^kE`1U{A+^fWR$$j|8mds?#XyjIR?$|ewReIZ z*qI2m$S%@#O3sXd+qchCgOja$SJK$E#*b**w7~$IfaZG!?@KY$ibUM41DQ}b+=ue- zzH^maYjt^IPdwX%z?;Pgi%LwO^>S;GckR0-6ao72=9!8hOUOf#TL4eW9R)D5HE~hk zIkr%e=0kgQ-new+#bEBorzA&Z*>qGdYQVhsZetHCHHFU;B*<3D?;INUMn;;KUTxrL z42-{h988dbGg2eqnh4Vrs4u^Ig&?Wn!h7<7bV1rr`H)}aTttxV1@ifor_&MgLw#Bk zT(>+2AA7`3UCJvveT3xF&fVF#X9Rq%v9kp+;~Xr=Hat!s~VO({-GEi_-xZ45KRfsazr0WaB z#T@7KKAaKgCeUARN%&p^Pu!x|umf!B(n}pxyjr4Ivt9?_Z95&YCvBmYvC>1txha+m&a4_Wz@_ zNuMyUz<_=V)cAb>N-YZ$lM$dtvRq+CQqq9kL;rU<h`O2@N@7 z<}J_CE_?UAERnwnL|9qsq&cbYw+J*`D^=%F}ck;_?V$Dxbt;tFHHwY>06jjIJ)RhvK7S z@iE48+}J=;$Kf>DEDVHAU!ejlu3|Ef)iMP=x>z7wd*S-gB9z+1panXo*(j~60XsoW zSX@L$lByq$GXQB9g^2Mxsw||M@0W$9)8Wu?ZEa`&arh`}?%UwBOY{8CgfnSm=zVi3 z391)-E)@UIY+D!1)({Z^)xH7HF(kt59}!?wZ%OnL$s%(q{{)+qoq`*sAiaQ_?DbC!%?Z1JV83dc63GW3i zZhT9ao=zhIUm}5ijV}hRS?FOPRaNhcCHw_);S#`v#xmUjWkC^zF2gfj-pc;Y5TPN5Chm|Z<-;Ms57qPb&mWmr1wBDIQZPr-nZ<)e3NE z{QGqrCdKrsYRI7q;FH#IE$#}Jaf8gAQg#(E0wLClbnhS#TD~AXmC29uFXoQO1(z1y z1qDUF(Sa{re03<2VZtx>=rk+p{7{k@qC4A1g^L7155aP7pHo^tI<#V9!>OgiRpE5_AnAEFQAB3RLz^{X+1Oc1Z zQ&;JCMvgC66)+5WoM*e=^zv5kPnZU8zfOO%`h2{hquEDbz^2av{bCu|-9;I+4E3su zy|4DcJ70dq#BO71X(uLihe>^J|pwI+w-LH#Bw(viQtDLl!cb zP?>$D-3#6D{mo$?@$1T5F!UheCH__wA*|#F z1Mc5`soWm8{z~hP8mtppOlLDZb7~-d$$T439B~6~StTgFWg;D4w((Rk5?NJN4XV%W zjyM|Fy|p3C*Irmz-RNnx$n9NQ{E4F7@I!D+*7Q8R+C?*57~fxTT-b)1`9>d236w#` zF_+*;*o~~EtG>K@MeOZiG#UCq^q04%d#l)~TVBN1SB2U|jSfN0Z{n{!qPP?IhOaRi zL4r>4*LFeq4pH8blpQ;hCm$&PfpdLIR85i#mc!eBShXU^B%EHQu+X4a=420*$G-|Z z$!-A@2oHiuOu7*1W^faO-;L?D{o&$hS%6Q@n9-FAx&%% zS5iQ^Ra5!S^OHS5=HCu^2J{8>U(sXQm4qat=MNsFsxWAYX&3BnWyA%FL5%x`s<)Hp zUad!$a5e}LjS}^lqR`z(A2d{GF;vEuPQ7?J6G*t;vpLebxE%*(A6x&DFOGH+wo4#L zg2HaqJ#fje0n@8$V9Nf(RaM08nR?_@_#T&aF}0MF)o|o(K|KZa93;>VDa_Wi@cx(S zxjp(P9Zb?j;HF0C#P4$2CC9Ro-aur-M!|@~Adn}ZaLyG2%HYM6AL+7dkj@}Q{hSC} zr&bO>C11TWx0qJzcSILWExO`e>*eGX-|X=HkfBp9P{|*Ybt3)0bqW4Bm1>E?{ZLk* ze)r3QL4HodO#C!Nf2KrBy}nTph#;+u~L|5zjUyBw^ss?BnV98A)B`pfM z4}a*@xnj|Dswf$nqh$cail4Zp#)fue0dO}DbBI3fdOm7q#s<%;pPh*4nj$-X0q&WV@Z$^$+WzKgH zkSb~5*mypcp)d}$d!R);-k>B_ns6qRahwN%nQ~B2>yO#a<8DjlU^Nm%Ov)L4`9Ezv zjMg1=A@Kfj_ms8M@0@m>et59O0s;j{{U#Ac0yCRSlv`DsNi2&0MC7{o9=x+y-#rXM z`?8)Txg??oN~Thdml7cY3s`{&(q>}qTzcDTmo46JHajY}4-Xb!aUPhA$T%zWy6$Q} znC$SJy3X2v+{j?h-D(~(<-#|~hnT;crnXoqefM3kF%AmBG>)S;qJhw+1*w<1o#BM* zX#S}#-CXUp`3dd~^%5JjzSDHwF@7UmclslGKjJM{=@0kbv80N|Jz9r6T2B&vTDLZloRu>$7xHfFN1?7H+7!GWIWcuTd4+2jqdIy(Do1oMB zFQA4=OThyxN<&T9Nda0d<^-f3)WFr!Qa!GY1p}WoNUu5{5%eJOINDcQJK_!}6^$K1 zMb4Q8S3)RlBnWG{1qY8l!)}g2GZ1Y5ET9rEfk=k0_}=m`bl%~eSumPZjDShUR#LJ= zxI!c>z_FV{rZIPSgk9(O$f6~KT_8e)Yib8#C5S}BdoVh9u8b%lMFN}CCXez0Z$Ls* zQ{a{^LXaZEyU8PpfaopPAwIkZ)){u_?K9B~WYavmwacN2O@=!O&7Z8C%~pDAI4Uhk zoVmJwbq4f5Tp;O9T1$A^$HT$Z`XuuB;H-y8JKCgv$!<%aC7n3t<5 zA*nQBISh-Y{9~9@wL(}zjG4OQpHtP!5smDqa{dC7NeQDy48k~)V&d2S<0du}W7^Ea z6xbU=udu-GM!rguGL`ypdQJ=wZL;KdoZ0A!StlG`Jih8#uEe9%UWR8`Qn0iBHf;9S ztTQ5^E#_z}mAA82&0Jgf`m}TFLtab_{&M$tWU=>uWB0js?~XpZz*FyxH&u#6byb13 z1T?y2&?7~KlI^mS_iL6yh5v_X*~T9)9e;z>el z*5&7oy+vXyi&o|A@2D_IjRyp!F)mL8)DB+Uu)LkzUKNlA?>HUbkzXvsr}~MZLPf0h zcmoUJdn(PcFh~tS%rX`vU~K&>;sT>M5`QZJlR&_qf?g5^PX1u9DwqSV1v7itPK8Zi z!F-q>D{7_&rpv<(VJKE(6;q!Q>-U6V*&B7Vd)+`{1*AFolt)f-$TFjqA%{P^s_3w@3Re^pW&HP}C7sY;HqT zmX%9gA~}F(am3Pln#d`GAei>Gi_R47wT;TGtEoagnODV``-TFhc8F4HG_oZPouWf8 zRkjeO53{Nuya$Fg&o70+<2xJ)ewO(N)#TdddC0?XZP>f1%ebTK|7Pd&p1D2HyzQSk z{TyRq?aarYtru|Vs7J2#oRQnY^xQG0(+-*z(F3PEW(yVVgh7*(C$ z>V|v6z6ns`(XoFOP9f+m5=gO^7pgAW6eeDc<^IucKbUFA-0&UDA|h9orvTibd=U+0 zs}ggQ)`-a-@wDGA^t*m{KjnVOFTC@<41b?AJAP*)GP%PW3+63$N&UeVL(;GdsbB2b z<`r|5522mj==-S<cEf z2WTU9VbF4A83h?N3%_s3&(@f2x@90`t*Xy;~|xH(7qSwz%Yn=<@GI0He& z+_aAQd#<#|F2a7tf7$x&*|ZUDje6>uRSe^*63cXWSUA$B${#xA;Hs#k5YIoJK8cB{ z9u$wr_v^<%+lmUYc8*8N!5^4nAnd}DwY8iQ>QjFfY9P5^iXri`!}cs3iFxoW;3XRaIk=)1XRyPFv{2P0jI8 z|AI|vTmJHZ=ye&@7n6(^TZlBtNGL9@i)I8i*=X#`xGoWKmtO#MB_9V5xe}Iaqq(fJ z07M;M5)yLso@U_PM&LNJ)#KtW5Ln)Pm0xUOO)s=(qqUmF6WFc#{@gAPHn{Mg>G|!i ztAEwHn4bVXRJ6|N_`1)y(<}JS!|;kBKW|)c7Md=C!zyHhAyHeR^aRtAX`ANT_S4!I z*A`ws0V0SLHbcsWuQ&f2aOuPr&?Bn>y3Rs2jiP@d{9k0fQ*@<4+bx=`m>s)gn;qNc zitUbV+g8WMif!BII2|V)r(|KJ6Bbu2%7OIi2xnD5!GSp8+1QkivRY>xyKsHu?|6n*-NM&FZYq^V`vX z8cmK?KVu1mTbJ!~2920GtWY!$>OR&oiFjc@TO-z?Qrm#1J!D*^Q_A zoZvNDqLlLh+$Nx*yNbe<8n|r)0dhg7{ye_mZ^&z<2sGJE3tbsiYCEUz$7@O9Te7Ca zo1J%TfsT-Ui8(XbK5IWev+!zijV{eKJJMcHgpNB6Otg^eS^ld9s0XoR=b?x&^jz(a z<+JHf#FC(^g1Y8jmKR&|s;YcD03}s8t~#|XC})U;w!QP0!Ds}(Yf0}`W}GRLV=`o2 z*Gn!%`wPmB&7xwkdnrcIRMO$F|EX}b{=O!j=lb)Qt*%iCk~s6!E2zThAe@%7aq)sz zL@v}1rp0d)S^o(g^Y9Epo!hehvg)AvaTQ3$NjCk7ZImZf-2V+r5; z1ZwdQ=5)F{zCJj6Tr6o^Rk!xW3wr*O_u02L2GT{*QiFJ6~@qg&ItS!@WwSWipY!=PTPmpZNAdYbhyeC zMf4S5(uvrWc&W8(4;4pw{EAsw)6mmhSPQYRkPVLoC+w)d7@u#L+}y9N{r5Q?Zric1 z4w>H3-*&P6h&IQec`pF(ZAM9sOSSWDcxS@r_I3jPFhtLHW!DvX?}!4|FJ)Y_kwQZ) zsW#sLe@=AOwLSXO)G>I!LxjdQez(nSh_VGV$y10DdtoXGc1EWQzC!nCNcwzj(asEk z`RM2>_nLTTXZV~emmv54qZ*`F9A>5H%ZSM#X&fkGuyb~A2A?QYXE7pnb^f~N0i56S zC>|4Crl4Y8SzQ9hVe$L^@wvcTVfhy0nmayHN|oH5ug|)aO3e3A+w9#cR%F?2D!Q)Z zeRO(kS;~1!ISlccfW}6)?3|p&dmMvNFS$;$5-7Q)7jo~U>+wpbl-){Z_;95h%$ z3c=8bFqtDrh|1B78;2m$7tDV*eXr9$O~kGo@>>ID^ntE#FooJwnfxpk@WX`F!(1XzJnGj{jf^>=)dThq4X&elf2Qvk0iKKY85Abyb~Z}&o+cB z=dqej{Z?vlu@|vW1PX$_6GYAPtSNeK=KY~lF@VQz$Y}Yv(5>$+@Q4-Aun8SJ%O-4q z$C61V-^;xW9>|=eer7RswI-FT;u1ShOM)BuBy8MsS>$820GZZ@C;8c>ZoL!1WynDf>Ur z@;~?Fd$EqM<11p=H*lDAgm^?C}xUY{7#*<1xD}QX}e>v)|F@{f{ z_863R5Z`NxtO&7PFvwF*xN9CyE z*0{r0z(7OEsRl`4{7@ONDegkRNUhZhG}6NC0Q(l&4V*JUer6LJE+XcF0q0ew!r>wk z|8AI*WPpunYmDUNSEdJsdG+B%u*cg`~~Akc;S+M60a|Jt?(DeldsP){9glUDm)E4{tX~KAKhn z=M6I44E+%$OdpK4K`7`jH)49F+!uo&U%%hA2*lHs03%kcCR4NO$XmXd>~ZpH`i{06 z7Rc~8a*Moc6WZ1qZe{;1Zfv`y|Dvq|IGNMLNyF1AWASS=}`n3HEDn$M;Mxy;b9Mlfk%Z}PbmU)bS=1OW{_HA-r4fv^+j zIWyin5m0##uc2#QMsmk)ab>EqWHm)0#*ibq&2kppK~m@tcG(qPsfRZSXGy?rPz+Hz=5D{gmc{At`nR#|c>qCE zm#3H=(q_3B*=7~K^h~nJG&Oa^{N!e#HELg8OiutiQcPnQ6-m-nwP2%a{(Sa&kLd$( ztG6lKz4zs-tHg^S`|I7+&+@prykcq2rXG_Wf{YLUSb7vVNRj(_Y|Z8K{G(2lEt0d2 zIV3I*?5*!XdH;BgEiad84(T`?nTXE|zw(k`+D>OHR5~_26#GY>O}zqkO(S@llYr09 z{_>+>Tad2U_C1#X&xljc(2NsIquWS`OYe!Jm)At#NcJR9qZ}G|*Jca+sh0Ia)#Vpx z0za1bYq~Oz05^iARa7oM4hpZ%Go8W#o{&M7B)#x0tIMge{6rm=iR#hluar+(sf}8~ zYElfeEmjj2zLiXv^KB`h4eE*walED}_>v!%91*ng*Rb`YL@C*khl)Z!-Ja>uZ)>5E znj67mK&Q&EcbdkhjXaW)k%9JDHrIwN;=rx)n!AZgCTeZQ@hV}TIm}8?5f4KQquNta zL)*H)F=ESesV-twX~V9;bMQ;nSTH*kZ=2jh0QCi|Zg zD`Rf||F|LLFQ}GI%Ts&J$bLOnRoDAJqv=0|D(>?3-oHjvvOQUiM3ciq$C`6P)q|tW zZ3XOAvF-HYBmS!@G@#8eGeW9H!{~;FK}h;OY89JPDzW5IquYcsP;-V7CR{=Q@3I=i zW?^i_@r#~662%Y!cdGh)Er`&vkquF%FoXjNgD-Sj5KE05( zZN*cJ9V$`F*WazNQ)hSPFJTDToDBc$LvrW_xoIbrUzf^NYQg~(6KJI)Ji7Hf(X2G3r65*LLXZzqyL=huqw~fxE9UQlc9XQpZ zIe2a+n7ROdrgY8Ibqlu5U||C7VCn^uH-f6#Xq!wKAi(D&pim^6rAT{izQUe-YJC4R zNaSwCs8L6TS1X$%7|a|!7z5A62L=qP(&Pglu*VU)<4F9SZfcll0}A|3C_Hj(CRv@8 z&l|PF=2lUnIZ^u;*~`x|N0}{GYwkB(Y*ZMTgr{rjc7oAxg~u6yO-f9puHcdy$w;tT zg(wBWYskRKJW((!eoM11Lf&9~VfwJsow;SqzdgdTk4sYfM`NDfZMf0zyZONE&Q@yo(ZB2=;r4t(`;Dr9{O@3iwmQSZzkQWj4~}}= z^^>)z6~0y$?&CljmehCUw6-<-;}Z4y!lW}@?5!Wlwh0@gBQsMil$1y<&B<+Z?%+Wdfi49^>+11?;YCHk1Zw4ZHNzD7w+AQXQ<2Y28bQKH^ky zDj_2RRUhTtzA1yY!b#vG_-!`Er0&_9dKWbx`_6AT-WDwc!Z%j_?Oi`^4GVtm2@bDp zzcKn9u6$zUdcI@MiU|>MgJp$NNin9>TGtasnR8OFC^gvK5peOD=}rtl zk8dWZ$_-s-_Psz!|NPtb5~)$=zRx4@v~O?h@d-O>LDvKU6G)6q7t%6fK5y7uUgx}z zXXkUkm^P~`oDIQGLl{X6tK7GM3ZKaXeRb9$qR5&oAAkFS7{BsujW;hs5T7M@t&e7> zTU#x2<`3ZK|)c5@mdf^0sElE)Uk5QjQ3i1zxH)kvm zJz!B$Gun+-mn;<{yXul+3IT9SlTr>|MIynMnM8TVdF zWLI;h=TOFE!M7O;DFwX%m3w1-WOR~ENo-ssQd2zrT2QUPj`d!OSY51N>dVKAr!-ok zSpd=W@zNP;CkDf<@slp>2?SpQAtTx1f4HsmxiMV{e_{+%H^=v+#v)SMQB4|rY3UZx zrIQ2JiHCxqoJJAy%BtX#(MdEApySX`@~P2rGNTRkcM6EHqGrYC|EcBn|4{UKPu~2U z!Ezkrfoc{8?GvsX&rna_K!gXI70E&l8*H_mJnJItsjV9IvH~ti~;dvYDm1 zM$1#oSxgyK8JtLei}RUTi|h{g#dH(ylON77MlMP+GlYh#B1AyOZdG0r!qvD5KUE%J z6M2neCDvGxtpOI`*}UPd*mJeBj@{aZ{5&mr#C^fAh!8U88K1fg<_X`h7m*M7bo1acoLo?jH` zG1Alp`3UXkAxZ9hT7}*=qPtie9WT>8xqf>*#^Dke+(KdghRlYyj~}1w9HI;F)p9Fk za>!-Fe1|Xg!c(ElWPX*-$l@qr3Qco^Tw^ebQd8LZPOvN*KW#Ob&kU?bAN%3PA7*D5 ziek}n<#1MSX-$tUHiG&nW5Pw=;wP7SZkUPygNs z($FD65!xC?p}ZCb$3}(L+N~Rvw7UZuxy|DSsNPlBNWMItJuvvN43wObBd{=Q~-B8vMD%5s0R-b3?o>Z&jprU^U77X|GOTa0MXT{ zq(J1uGE!A#-!H_hTO{56)e-S3Z0nw~BD4Y$Gh3A?pKPkByu z5#zeb{#S(eB~Wfy&VSZ62LHkc4vq5?74mW)Nr$9h+SvyV-;3(@x}x6>^0PV}7U$+)9bF&MJJ{DndPr_KH2VGC;(=Zb; zH3>blZ?`ZI3nL5xMe2Jd#}sNgw=5~AZmccgnvlw8HosN%Q7zIVT$AuAtSwzY6Tj(G zY1Q$F8Gq@e0zTi?grgYbQui2+yJ0^bN@5cTbyW1ZNYK%gO>PZQ5UBBSe0x|=dJ6s0 zY)jz36dz_9T2fpK+l8XEc8SvPI^v{bF9|ovEq|eF)#HBXNU{yDt7P(VL-Q@w%*k;u zl|$a2grrJn6_C`G9 z_Z57czn^FeE*JeXWyS_3D(^MD(%_FJmBC#HBb*yvZV~w0FF5{q-lJQ(Ka>!4TAdMW z^!lfGIsHHM%m0}rE`Ahh=LjKHDzK#rrNBFb9^RgqeDSCr35`8oQ8P8i2)O1p4vEdk z`A9e*WZ*)oafRpj5TU$t7)|B>+EJsSVpiTdD|K@QTQpVISSZCLqAYaLX=Vq+G-aw> z4Nj6a%+IMn!*s#mHs%+Y=v=WRZ7TRX1$|SPao8f-9oeNIxNOpeLO>S98kQ=-+}4Jt z>+gtaljOIUxk1+*For}ak2xQomAdc$j=Gx|-`;QM9vl^<&fQgdS4A(n|! zLppP!44b8>u24+1BBJK9SjH0Jf;C)@N9rgLDW-ib7+sG=`z!6nbrv9h?6~(s({;3b zmT;dfJ#To;VPuB_)vp*H)w}k3R`H9ECzWWxt30tiNw!UKk;7h~ zn8K`g+_)eOEX4b`h}x8S0Af*R$(u&?GddAjj)jHMPQ4l^4mGP_YwTE*Pn>$-}d0k=C@tN70o-$G}HS7if<%`S85_y=qi)@9ErsE zeK|QMJ}!{Goe+ZdyAN*9-lE)e@}Xw@qL$i2o4&Yslio~?!kL>a4IIGp6&z0dkZJgW z?fl*M^m=1)P(WZ+RKVI29hb)m=DCW}Ez$GxuE|^v*l@9v_IFc1T;S14f z(!l5fvYRWkrSy^JHna*C{MZ*9!0x$%>bkw`d>a1TNx#E}EiLnBKgC;OX zWKa~h3~ZDg?9MtP35mhncY3qIlg|+C-;(v@;&a})XU1|-v-f6>{62p{oa?R)URt{c z-g1Q9XR--pzU$4j69;jq87FgGy%`jg?L1IVpw6%to^-eae0buQI!0r9+^zrczL3l; zOP0IgW7dX1l8uT4Dle+*K1uci^@zpM$Bj;zj?ic(_C*d?qrzdM_eZH#3#@U;W@cv` zd$h;f??wJJc>Sg!RVWVp>%>BJ;@e^wgXP6w!Zk~y-P+G7P-&vw0xCT9AdNfjQtmZe zf~)}DIpbKZ5Z~qtMBk~F>mh_avhD39ve&K_{Wn+1P?u}k5W zu8A~jnsfZ&?m~8Psi@?#a`uH;nKTUSp_7uhOy9IiV7CHl8e6H3%pR`B6e?pS&jQ;| z5FskCZdi1<>)omI@S6BgK`T63d^_56@X&h|B!j?m{trs)U($u9vI{D?#-x+z@%sZu z`Ng07Sw1-jaSGifdE8@xceG3q7PqZA%T;@x0GHxK>~9QdC(;Y=TOoh!hs%d2e#2*K(8bm0Gl z@O1k50M+(wR%zb2UF(W83QQDmGm6rxo`6fI^ct=VdiRBoz)<1})PW{F1*EHCZ9aeT zO_0RQ0_-uc-D*Kuz8I279#HY{34#pT<7xdj9JD0NmMjuW=DMD38TFZ_>dmWdSl{9m z93T2c|L|$}}&=6Q=}5@)({2 z)r81MF|}2rkT-8%d&bN-EtW+3q6UEIOy#gX&ve(AI%%You+gm=3XL&aD*VzR4!p`B z2AnYVVCicFs!o4e63_@U91pBU61`k4eZZ{9qeQIPKk4*PSsKw{45a2~&K!V=jfZni zPrf)XED4^bXl8lH~KElsRsQT^VVjg#^J3Q+6oM)ny}!FUjWgPdy`jC9Xev>0V(s3@`}CAZffwzu~Y+EwRE2g7nb|lQ?~n8wD>ROg_DC$N`8Bj4duU{PN55eytB&7_q=Gg4@7+oZ^CzVgv}yc`6EzM4w~{)}Em*Y1qSG8wwv^6VKSb5^c)P$~IW zk)x=CeCWg)*a}~RS-Do@!f)`jJ*O`Tyg3<7h>-SzCApsadAVLbbUf~6Ub&uka2n-J zgWQ*YSVm%yWZ^@yH)et}L2DP4gNdBiacd9O(yCRO3>^YA>38T2N^+zF>Y@Hgh}-+l zKC-4CJd)9gkI9U>@}J6&nragi0q(%{a2)a!ejoatZ~0)sF#QpC-p4Zj?UV~J^L}!R zlQOyCO88cI7oYGtg%z!TS5%#cM6I#GZZG@>U8P=Ve5y|OR{Oa_w4>Pj!g2oZVz>9z z{)2G9Jh=C^^^ zXn2*wy{CPt#q*isENTLv(OF-MY3(@i`rrgVC&`poDM`IznJ#+9NI4pY#xDeNe+VCiU)YrEp(zwy(^-c@j3>pNO;HV-?xHgX}ege>>4 z`+bW8e>33SJTPIfb>opB?4xl^(6EUS#WEd5nUEsXLtHL@Y@#9mQQ8rikcSmKN$$v< zM-B|LWk@WLYR6GxrF6YjQbJHZpYLioG8a#^AWMf!-D{L>pyU79{^#f%=>d?ZmKoxw&}<;C$M5;ilC!Kt^;&+S5cP8fD?n`0#h0?fH*n zr(dt$C21y^IY14qQ1m;(voP>V*U>!198aOpEbrOf!%lm{#|3%3hUfOM7t)Hd@}0dC z+XX|p|5v;7U-0-k-rP}D$jE?Wm>QxCihiuo@0a~5z}(qt>nM5NQ2&Ji?SmGQ>k&l>@6*Q$__U%Z`y#pZ9g)6m+YN*YW zYg%|@Kv&A#jfhE5`l>4c*g1>oF=(Tq^Bt790ZqmD>)_XKj(n94W_f^~qty~CRS!hR z9VLgOL(2i;Q*h=nmZ(+_?T~0z(XjwWI*7rOf8mxP&}dS|M}}%5fR-HAs*Usfh1Wta zbBm-wHQYh~3x~wikhoBRr4lbFK{_ns%hgaxt<~im>JM%#Nv4>s79VEV0*hsMp(0fQ zHq1!JPK^L*b!)o2-XSWA(;%)1t`Y@IP`%=|bZDc0E1_3gf&q;&8@M(cq7e+Mp~MLv zhwVX@E3RNZBHf%)brwfmWQ|=)U|>SB<7o*S;(Nco%FxA-=-(*>s|9D%!53LvMLgGN zUjmTyQ+rf?u14v4*Vl|6#YaFt^b%tJAubGp;I0tS>^h`LNu%|QEZ?zGfFGn9C_IDm zK@RNa2DLy7W0=I_c-auA=DFB6*Goutdtjkf5@y}91<7)8<*~9!i4V8vFNYty_kexo zp=D3&3BZm=LRt+&;yKE;^|-WQIDKcBC@QaOe#92kVSXJxM<9DgtgD~he>V|yg&SuW zG-D7Q{>6z;C(4RDXabLp4G8f>|h?O=QA&+Ke@I(!2tdI7o<%)Anld)F+?(iXjlt!XND0TrycE}<< z#@qo7-qsQQUP&~(RXJ;N|G}S~#4-16YjB@vOVqhx;F8&RR>SfDTCxh@Xl-M%TuEOF z&mT%&@Q)KmAs&T>JvSoS4O+3qf`EFvd?`Y-iqxxjKxlL>w!AaI9DSz~>`u7RiQosx z9-oVls?q9N|HmdabEg|p^x16R1tAz@&2OASEx4X|8)3}~ideeyn}r=6_SSp*_sFUS z7iQF({+BfDB;!`Q86&0X`m9dcyEuzk(`;v9HUvzh8UzzUw^k_Fm%~w+{bW z1ow-vEIcHE`o6I!!Qzs!LTH^Su4Z$0So?*m*~9DLB?b7 z^3(pLyk);fm>95>AR7tRR6^zCkdopcx+P9?!BCU`o+D&JO7%kBj?8YK_gLo9%AKMfA5^ksM93m1C>4!oa#>dyFsAg|gfS-H!4WV(%Q9Y#~ z_2eGC1MvMbL%KZaCP=(g0OCa_p$1ze#!n}!q(}e((G;QNmMnHNXI1-<&KSEExg!OSm?!kXkScOL zW&Di#5M3;!v?ttV(+Eemi1rorsbbiwXP`PcT*bKO%xkwfzS{H57Ko-q?HSdZY7*j1 zeO}J0ob<0CVG*qasM2zsa(dvHaK;1JJekqhM8E3wj+g3a>0F^Myt`GEYe*QKv`fY3Q0Z-rOGI>?7E3zY3 z+I*7wHZJtUKF^lI*B?>*@Bf80+p4#2RD?;oTSL=UhI=X36oL614d-HF^Ay)a_!@2P8CZ^ogxBaE%Ae)yAX&k&}j#v!nCW=*@rUwC;?s; z^3h()6OYjH#x3t?q8S2Jr z;=ZAm)OU^54K50n92ypQGmIp(!Yk-r!*>(SghQ;92SS})oZ*KCeob(V!#K(f1speU zo)fpOFPj@pfZW!*4by;0K$7q6Fu(#iQfl}8v1iqNRLDm%!1>UT2ifC1uv^2R|6XUd zu*VU}k)$1KumgRs#SLdUI7X|ghS~TB;Y>0H-a zg1+z+R|owXmG1f?I0?JI0+^}`Yk%5e`NQzjO=xdSNTklPHo98pM8}hdd zT~m6WP1PPAz@b>3)o_81yJmt)f^;+L=ie-Y(OafJ!zGjH?Rltk@&{q=m<`wYt`;OU z(_u4G@7aUJQILB*mNwAs>c^3C z8APx5{ogPi4lm%ww$7!~;M(v;YlNXldq0@piZbagT{~-C{qRdx`>QC>caY&aC=1nD zE4q_jxG~zL3Ked%`KgA>$q{t=!Ce!Af^7;?!rdYT2} zaC}_+mPr!I=sjJPKne>~=b7Hvd8WgFl%V|HrKLFUymb6qQ;jBY*XCjIwxy)4t*-jU zdqHE)S(QE&->GLJrZMKNvsgl^x3!D9kw2OxuTu=n0zl?}^A)-Ssya_f4UxIQnB?SInIu8q~EA8 zmROD7jsq)>oS7*9cCK>LC<~+!K9UuVDi>+S33}P+AbO&k>rz@;{fT@%&`30TZ}G1% zmm%~{EomyQmb~_6{G;)5^1r(7|BVgZt?!53M~&w#3WD?lH;0`fH;BA{F<1z^1^Ywz zcl1~38}nzjcap4A?ARixG@~!%{|y9Pt|hM&J*c2sV$SFeDkA(P<_Sr(;Sm|mk8YLe zwv388m!5@3zK0E0SEmvY56qFd0H^nj;T zv@KF(p>56=zZsH_zhG*N<5L&x2tvFVXQ8s2^aI*G5&A2_MjclujPrkz-*x=Ww*fbI zOOt)i!Coi?akMQs5>?zEX-6Vul(jKF`LYZ3QQQnvLP7YJhCfH*vlKukYOAu8uS7Qp zpAUK=<5Kb_6`T!41qRTzxS!%m9h<~6KKR(93JYVgLbA@aQ`MoHe+d;<4V4@WCmB2Y zW$2K2%8CHTC9%qf0BuA|t_?H8ZI{UCYn8+J0PX575be(0F=5TW8K~7-HCyE;TTxL* z<-6*|NOa~@iF8QHwb@G%YH#}b1Qc{hBQ3NNTTGdo{u`|*FnbIl>i{=UQmh%~E0%21 zfMP3NrPMhe87dYsRwg~zr*V5F#yus4|Yj{qMYi)bvYM zkob@~7txOB^ntc~%#b!FAz#4Hp12ho9VldephKlqi1b(3ko$`_0cjCb)ez2&_ z(BLq{`Nj7)V-IKQh*6Iw+~z*1PwuhUH|1@Mk|!OGWqPK7%lk#D!#({L+TDIr$o(H` zNBE;d$@SCDIpW~+uYhgfbpC!ruJePaajq5#x+%dfJyF9Po>C~@_UA{I$Kx)4*T?TX zi3|=+Tb@Pd*&iZ_YT!i&bRh!FyEBm0Fg@4zwR`?=d7s#C>7V!hq1G#7S^6m&3S>Lz ziMraYS|#o7<6$$P&BLCn4u8C3CQ+!zO6^pSCRc=CL!$DuQj6dI z7~y0||2w^%YrHKE#JBX}cxi_G)Sv3}qug+K3T=*wwm;V;-d5;jyV&y#bsoZVhah;vzH#x*(1jb38JHtb-Ea`F$?L}<3kTGzL}fZRL|y9SX&f=; zkd6Hh*tSJBTo&eep#W7*ySl9^tqP$7F=7#s$jHR^nF=TG1EvOjlB6)$7{c!cf-%Nr zhbpPPD+$*jf2|{|!;gR5bjFyK#A#s8zX^adqjlM1&i2zqiUBW_>S8gWP=OnJ^F?HB z!QcJLzzooMwAaN`Ta>Bk$54_#!_j~(pX-q_8#EX8G-!_ioFx8vz52~NNuYg*X}po} zr4C#l777s*BBQ^5;!vI7ngHHUyx-Qk9gmiITp8`4RS~Nnc}0TU;+8=P$uzy>R}uTC zct@Mi7MKoCY=W_svzvRpwPA^v1ROt&CCsGQzvYv_iBX7AnIR_(%PEltG0H}xnlFgI zhb+MbkE2Nt${)MtI@NAKfAuhBCEl_Q6iVnyu)luk5-%IuTwJ1x+y(pITLt^BTek;J zH(@k$?K95zE>;Zhb!AEJAib>pV(z}r1{f&OvEw;_T#+Onq?-qSUd$1l=X?I8{nJdh z+UtcI{^iH!UbWDv4ur!B(z5OjW0gkb-fzBHO+QH1s{~MYs13mFOEcj%lq{AJ=16mh>pxm`E1KfnH~@rlFqc|N-v5;Rlix+u z!ewaBeWY6oZ>;>I^usQ7pD`JSrzI=(7m()o@Ze)F`q`ZM#)6n5Fb*&1uaiDPH|WTN z*W!_v)|MXL%>$)CMWK&+&hHh@O*LP(Ia(Bi^E5qw|LJT$c9Ig6$v_j zG%@w3q&@V7+`3J5xFJZeA!sfqrCw7Q#z4H%#&V`0u9?~;12r6p8K8hzF6`OwMKI`C zdnBK86J9x@YLg#KE++N=UX`LDh-PB1VmT-Qb7#0s&xUvZG@Gyvwl=RD{p?Xub0VX` z@ge=Ahml1`#MZwfFMJg8$3b}W!&q271Id7t9o{u^=Jvryg9M#8RSV}=cVZ%XntO{Y z**aIIe@hxYcM*-V4PJZxkW{eSM;Twd(V6dwFh{S{U&-28Ba;?txm)C_T1=Ukphf0N z`LfwbX=TlXCk1I})%Djou@a9G7RF`Q)OkCXpqd&ALUDHQfiNwG7EGj^6=aO zF>|i&?INrbhp;e&a(Sp~TpY1N<@)ewBs;>b{+!NQlXh<)f|{`xazp)4JD;`yE-c&s zcJ%#!$WO5S!398OeebMfG?5&k$(Y{us!DW7`$y8?nea4O`A>ONl0>E3jGK(P2O4H} zyl&*lK!6(ob0z*1Exc5X=Pr@RA8nz|0JUkGR2wWjIV^@;Ar~rjF8#`qSaO^kHDeyV zfOUYb)fsupoa3P>G;DpgW2kmHKKlilU3>ifJk4@h*;D^DH(_@6PX2vcm#T@ox{10t z{h0Tv4UGy#0V9K+9Jx4kpMkQ0lt7qaImxepl==h`J-p@+r~0Swswhx_Mn+qq&GBZs z`LH2jXru+2$)i47Q+VKM!^$?%m_#PjeOh&g@IWn5Th5qO<_KYvKbTxUiwhTlme_1F zY31Ff@+bkn7n2l)ciU36s4a=jN-zWavF87B*LY$@^7+@ zOres1>9k4x?KSn1J7SQV8d9OW8abUA{Q%WQu}FW5XxOdX6+;VE{0Iy<96&HTai9T}^4`twE54jG) z?TN^)vhj&8y-c!h=$-{-mQ$v}9k1}-bQ#^c?dW4*Ku|`vlsSf<1q-+i^vusy#!43} zb+8;h0W>JY9JY>21u#h!v4o>1T4>$Rr?WPfvovalO!)6+F$t^@YS0IrikNLZX|tEm z*WZmu$tai5@&qIC5@>ZhF8je`$MK0Bk{UV78}b@}g#ibAxvaLKUrlZ(XYancn}U5y z*aibnktNIWzcFeJh#$MEm;{-i?^qDJ=6#>9(7< z!OG;I?%#e9+1pNc>sN24;wS%%qTvAKk2^DZP#j~Ma%~WHITJN+~0;MCv z$RLXsM0`u4SDPR4RrS&MmVfG1~8!*3SeRtHW z&Guf?GC4#HIZz!`Fd43vW4_I^em69l5oRMPeEKySg}q;?%;1c7vz%8{O&>5v_S|d< zmDdViM~(T4Hb6Jm);I+NWmOosQXnLuO6;IdzzemR4WR6&Bk9ZZ(F#Co>#4#EIYb*g zfm^nq;xfqD*?qxyjfQ@`g@$Rw{oAaGp6II#G8K~>LdespiC7kQfNht-swbvy@ODzm zqS72DWJhcUE0}^8F#9HYk%a+`vt$?ThbOOB5FjL#Mx|WY>RRxfPh$|A!b@UCU){jN{P7jrMsh&ty7WI%wW#bYo1w`Q>rL zZ}2Bwg3>Yn=|BbXVt!#7Fwpmpq$Ko^hpYP3t+X`YURVNR7@vqGm_dTDqSp(sE}I`> z|63-0jv_RSGc{0sJ?)WACmkiBW{r}dt@ekhsr7vNgK=%&VkD-JeSOY*5CD_eM^uoX2p|GWd$$- zWz*y;5C)+o(DTEQZTo^>6tyI~@{4WC}0Ho$h&~{mpa{ zo6x=#%T~p#d&EXPf?eiR5d&d^2SovYbXjWM)D#8>3({2Ph%Jx@Inb75e~7E`W-{|U zl^QpM_t5*a`7qdrw1w)dIG!wXP#W!(j-C#Zh1?>I+&Z@V-V9ii66Z=WB4xBi+?OIA68pi{_u6X zFI*+C-CzH7OJJ}=C1j}_p|3T?Kqx4!a5%Zb6Zj;0cdb&Gy}r!zLs}{v&f5OK`gq3f zjZpM^xLxgb(Hp7UlD&ioJ}&erxVfBX^4OvkB zEJR$Zyvd;z-$?K!;YjFy&yLq}=(toNOdq@mj-|e-z^{eke4`j8c7)FIWa8;vVeq4J zAPS$Lk}K~tl*?yAP8NuQ%SjBF>q-1uILD{Ki7SpeT8gGTB;df{xV`FNTj8Izqh|8F zy_`_j6d;M0L|t|Z7L&HT+*DBd{vbSzt?c;aK3FsC+FhjsQ6vGT;o!~+MjU>6eQ-rU zhscX0|Dwl1K^jRt6mERrLB&lUA!)ENw)`>+1lzKRiv?SwCd9}(?%bY8@XoT}oALMDT!knd6R z^0VAIlwTC69Qhs3xk8C_K!!8K;p@`}iA}ey8vEzrDwt+aH80joMf3z)7p@tPJ<_Xg zo;)2F2XKMfCyu`x6ORMUY{-p+Z8h<~%uQx9W!CuGf4Xh}$V`F=2F*B;O?P%@?c3J7 zqs!}tXI+^$*8fvm=*L>^M?03>l`HE&h1q;9k_1A2{PykotxRCx<`w4iG^_6fZysHW zO8=n6o6JcTFDB7^<#**T|Q*!!zYA|>|yT^G3 zovPU*E&;*8q8Wh;7;lPL%bTv+_xwhi^whcL@Q6FOS4#8O9rjkbU_~lCVF*`4OJ*n+wzF znoqQqTxmCGIc~aZGBPZG6_;yCivssud<&D5w$3v_+Lb|ifV2}IRm(fgohe*A@C^w^ zahgi1s8jWc9>cvtto#64H5dxaClqLPDRS>JzrMC8SbXrej%jHAz|r@xO3jm_KEF!< zN-RMNKiEDVqaDr^!J<2zRRCoro#E~tOW>)?%_&#uupm5xLX0S2OO?%4)rEqvPA^g$ zz{nEJqO&niq_QO~@@MkZedo$(GeI+~?N2x?;ZMo>t$cy2HXS2*!;^Af#v#b#t9N?W z^>O1#YeBLE0`0ZaYHD5gQEQ^ZbCkxI`G56XoK=Oe#)HPYHCmuV7;1plvo=ZwXWVJ> zUlr<3wr}>pGv?L7ubdmzW@Wi?Jz!)r?gj>~GzNO4(dfw#wnUA6La&BvG*T=s5J7M| zFzer*rE44=uQT%fy$oVbGRVRrY;uJOVNTjN^v!BHSTo=`0<_%cLZ;PyNu|@_$xWqJ zz#9eiJu35fMaX*JdUwH~4PwUaGg1`UQ9Vlq;({+pn#wP7 zZ!hn4K8qGQ5%?@t1h@lJrIpzd0}kG{_j$#=qmO;$-e&IMZ`)qzjYa5h{h#;Z*@QX! zY`s^W^nblAD=Bn2u*D&?#>ADUV_b)H1U(RXABUNC5k3!OzpV8>hX$XrJ#H)Z0uYD@ z0*s_ypwi(}pU}sDy|IFCMNIoy+s{}a&rpL`ML(px_diTVpI7iapcjpS82U#k@dTWZ zA@vb4FtQ35`iegk;T#QV(uTJA#P-SbQhA&lL{QEef2Jj72CopafBXD}%J{}ZB@G2O z`z+Rk8z`(rayAVw+9=U_%u@O2=+_fOgh8ya!SeDHcvMxTBIN>gM7oBP07pd_uUeCY z&XEH?&0=vfCdAi}WNHamr_9e>D#vD+r7@=)HnCtRhE7)M@K;$?`eiy@sHGX(UtFXu zJkKw41eJPV$Na@c0Pdlt9?<-3cRFWr(Udj>KVlMdi2%tsOG+k|(Cog|@S_2NDFI^4 z3pnM)&pX3tkx_7<(!erf@SM(~`+Mz(8I}xEQ>6xxqob6v0xeu6a80kFn@Fx%EEUEQ zwaCLnTFU51(DHa+sujN~IbzY>u!tTMg@V=OL_^tvif7`dMw4;P+!2n2OjZyWCjxWh zS`-Kx{v~GYnN`{5oHpkX=Ja*m#NkUqWk$GXO$j+Nb8=U8_AjS8ml@1ng9c~Wr?D@UHW^F^En02l4*Aw>l343JU8GO~GkLFaOo;RWn(Gk2BB0}is zN)o6mpyTTih_{IzO#OEl;kjtUmuSdY$+#(r zOLNlE(;d96Ce&}~DV9C;xq{nQJ4t*L!MO~P0`A>F0u>HDGv%XRD zajk|;Mkgn?mhl02=*O%EVe)!BZLAoNs`22I{gPQO1YC6?=@=Qfyd*C|CyKi&Bs2vD zo8#JoF)%<;ce8Xu)2B2z5Jt%>r%HaY^pm;j553VnIxUl5jhQ{|Ww}hM5^8{+3fmlI zrim29G%f$_`CzA?$Cq}M#RDeG^)FFCl+oeNKevwhb}h{y*4dKND;o5o;B5; zI){u=6Of=)&H4Q5N$N@ahpjOBaqHzUPUTf}l{Ko_1q;=n3Mp4%2@!D3sL^TWLj_I0 zsY8sMc1@fVgY%;0a17E&-!=tPfyIEdSe*lqey@lMgG^C%0~TWqKRY{Kpa+($j=muu z`^6x{YP;g!xB+0p#MvcPd4sLo7Uta^$I1M$`pyIP-zWUTKl{hLIlbP$%*nq|vd65q zJoiO$@brdeo$^)XO6ZCO?q(SFt|0OEp1($J4olPZN0-g(4>osS5c*z9{bQORG5NmU z$Q=$hn17aw`RXqovG_f1Y(ECm{qdjiw#Vo{&*=}H^WS;$dOvZ@rbt(G$BY^B$w>j1 zIUBE%uSXOH+Xr07j^m9p%1R_+7w>L;s)gkxOy+XUvc)m+Hl5?c!87fAlmr>4xVlZT zC5;^y@s2l_wlBJS4ShjBz?nvEw}^p6s%ICMHik=E_Ttq=Usfz(JQlDdDkL%eWk+wU zAaSx%POnOQMHozN`JYe-RuevC0bL0>vW_6IOXtR63-h51xlH9 zi%<$^w>&Ru$csXyBF*Xb>6E5rN!i5N!1kR8u_e}4@HBDy_Raf=OAY>In+*GkPCh|)uHjvKt!4ZJrOpkJNp+NwDd_Ma zF;T06>ZMvA=V)TxC<~`?F+_~(Umb!FUztDyOXzU4UVoQFE_{T{+t0(nI|}&=@4%#2 zh&ey9ss$+(;+dumzIombPc6iOt-G4YUpS>{APm(jE`Kv?(l`vU752Z(;dVDoVI`H4 z-4bcGPC7n6bT4(g{-qcQ>z0~!W8RoBtJCd2Vb||(euBPP%j=y9c2pFYUp%OBRSmP_9G1P4oYt)S-i==;= z5CA*<8n~+{mQJm)2~^;2sXHl9Z@R}oqYZn7GhQ`p<{C2V9n^G>8UtX{T);%P4c1gL z+FSqVhiz-o80S}Ynh-c%SR})l%LpfjwxS`7-mKQ}#aeU`yrGic;M0ih4t0NHaD#Jn zH2W~u?EMs5HGVY36CstCvgP`)>SpTcHLhst_1xDWWz$NvkH4)Bu2ovczU7lEn)P{` zRec5mB0Zxcan)uw4o%a90%m1$ACABGm}8LcT7z@7ysqVxpGuYe;I^B zOTCZGtiOF5;~t_02VY-=e4oty5yzKEUwc#fqpST-tT?#1`}!aI>bJ2sJ(1aSJi(vA zpp8LQYtx~@p8fw$vR_L3pQ>M|{a@?%HfFG2-u=IC4!w7MVf9}(`bXXSe&YN(z0uPV z5FEVR!hU`%gVvor<9si8|7Y|s*q)ES)@$3lfSQOdTLw2mQ(T+;>?^A${N*+nl)SgD ziph$*JeP?vS`6dabu=zqO2YABZwHt+P~LSp@Ud6)LVdy);-33c?h16gWt+&8e>m3u zR4WOyC}~Mkh}#-5a-pHx4w2&$zO+Q^2dU3aYOE&&=X*YPkN6OABco76UDz%6XtJ9H zcGP*BuRHTJKs(6=ft!L(Jw*`hi{HG+11JV{VH$7|u!52S_9H+ZrB-1yyAe_rDm7c< zsMkmWS4eAfm|!X5A~%&Hx$6z2Izw%KCgh@N5h7;OLpk}xr+~N<2_gyA+>X(xccnhk z&>v={ON-{nN{JomMbCT5cE%GMPHC~VatVf#QX)2h@ehl7L`Mp5S#b{4iV~Tys-xJt zR{raoVFCz}s*YNtSaC@Nr8#Z%V4m|RgH}B%Lq7qZZBBfeaZ#JIm=~7keh%W16?)p` z^PR?CO9XiZt2uwdr)4~ArM`rvbO})|cX^u$%|ImFCG&cPRrYu%IxnA(OVH2xJAC$* zGY6_^hy>lZqu?kiAdQJyL8_9k8W)+V3 z=pFr6CGNU&50YThI8Ibeno#nzW=%bvyVj|)(^Ev!5Nc`SB>5?7y5hqB}rPS zzDf7xOA8Y_=|id&->8TU-JB{BU_P=2#L0}9ClmsvN?xx9y5%KOV6|YE5OCRNE)?{q zF;WHt^8LKEON}Z(GZC2xt2U>^StyN%ST)_6PUuMLF9p5~8JrF@9}K795x8d2i{Kyn zU=20}aOz~rDr-Zb%8HX#q6HVr7>Wg$yK=)Cenf;raoYzd}JKfz9U&9stHVYdoDL9R1Hm9^;#|UA{Sm$L7u8H zF&TFm#%1W#H2<~Z@!9CptWKUT)Yn{KmoBQ?12b>qyGAQdTVT?1T)_F?8#zq*H?7D> z{M&N8Z*r#weyQ0s%_U?eTy+1x+8*BG- z@EP;A^j|L3D>R(`_0>WBS8RWj{>zB|ppSW^yw5rO2S9ejaL4|8PdBH2rNiF^UmQ@r z#=kXo@ngBgcXuxC@H~Rv*xo<3YP4*hfkHUnk~?F{qfDJcE_6?iXGk=V@5gtQ=RjwN z$iDS$TSs$w3pQXHPc)M`4~s=3pnsX~~p5O$wlzk=d{EvSki3bmQY46DZvNl8P9f#(V`ZqZ?2qWL z&_qQEgCV5xv?9t%x>95X!%@k)W4`(90KNO)P&IGsrv_MA;~`Zq zqyQM*jB+zoTdiqL#RMCeZ8%B~E~{l@6X`%a+}BZMwm}X9PJIVcy(?!lhG6tTs!&#j zbtP5Sap>d zJq0u{8)I73A3a1cN}8F~%VT*xySUowF4!Q_D>yN-M*{tr;N@4Ah&byTkw5?|+<6zQ zo~k41s^MQk1RQ!Q;%(IduPWk=AB=yZQ?bEvichMhFR4&S1k$|M3JhSgvnV1N8_vV4 zSfJquy6NBS0vs9qUz@trf!&)wDo|A1VDdsR@;ltWiZ+u^Fy;Hr^ItyjLt&L!8|xXd z^nOzbV!tmkTx~>#s>a#iAUlXMziq) z%7RhN3tdFvW zRRpYK722Ay9huZ$QB&9RF@qR{m1MSf>NjJB-gqp%g+v?8WQxpTew6k0u(*LE^KLlF zqA4yDKv0rgm)Qo12H)ZL3(jBuBq>~LQpBdKg`#Ra`i|q%^x@UuxOAiKK-hT#v-4ed zN)=hS51Dbwa0Afn60B?{@o>9I`*C#p8t)fpp8o;V|DylR{_)oSJw4{-zj4Tum$7!; zhRgTJ6(3k(R|G47+F!y;}PT%_7uzszPxnl3u(bs2q zAu?%F5TH*q;oF{k_y_?Pm4@tw>~M?rrKQFq@JDh zBgdW4h2?1IM!rP>19-%B9gF^6KbMdM)e0CP1311Tn@A2?9O{ObSY<@ove61<;h2(z z2I~UT5}M#qt_LN%mCdHZ44@E zT>5bM;epJNT8T&Y|B#4sBSi{M#)PJ#Jba1#mPrh_!&J^QFSIODR?jH>_U#lKNQ}|szrB>TU zh?3&)adGQ0N%d;~q@BeqGBm*i*LhoNa{_{Fx+t(?n73NiMIO8t>Yxw^ zRqaY?2b0of;w{-}V%#u6g}WAT@lLD;VqDw0qGkP`$D3c(>+iHz#@`Z7us*NtU-1FK zj{d)`iD0-&MXh=`xftvG7E2C*skliZw;D)y8WO-=q30pjkBVz3XGQI9#LXS9<*5V61oW5 z6`fHYnQpb-ZbJmp#E$O-rh+ru)4TJ@A!|>Ea7fd%Lux%qKG|doq$%UPt+8AR&g`b5 z7|jny+d=@3D*lpQy0x&^I}rKGkvlyILGOm=^L;{6&&KYkm{<>K;RKnjbND>M$}Z=A zkY~H?#xq36Fv$fB??HNo3DbhiLY@s{%OL_5BmVK+@A8_;#vBOBh5TWHPWUr8E(Jo0 z02hw~cEN(PNDi8Ni2>7+z|@9~DH973mfi2YbQvE3Cy^*MwBS89j+;~TR|BIavV>N$ zXz%w(sLM{$CuFr6%h47$p<+*?t&~@(${k8mY(m(vR(gFPp+CakgCeCjhK%>q-tVQW z+YQQWTg{5(W?KvqU7ujW*e%uzW(?YaHuLPF6i>)T%}yQ7nqut}Zv}9%Z6xb7`cm$G zpq2D!C0U#d5MpF_^xgYx09-^3O(htrT=$rvh?ttbih#GM(a{yd#gAtrsWinnM&Y$7 z(ZE|a_GKR5kYT~V0p1}?GgoxCa=OManWC`0B=~SrxLAaW^*Vyp*-&_i6qJI%x@C?3 z-#A<8Z&xwChQG4oByAZ?L}_N?DXa|Uh@DOG$DJ@x=FO!kz>`$c@Be;v<+md5*!aL% z$iA9ZeMAe%Fq~q*E68_xzZ=ggdSZWN44-0G`x!N&9ohM9KQHCdbB79b;&O4NZKCn} zd4bkttwc&pAkSk(HsP_%lttc2gg;ANe^popW?!eNwZRa+9;B=#fEv603J4C2grG=H1-;C=&r^9(!#pHY-BLtd3jAs?y^2M`pv7 zWnQ9TBZL}I6ROT~;9dEMr~nf@1gZ;#2Y=+A7^enc@|xwXvJy${Z0#J_aTIi4zh==n zwMiSfjs>MKt7Jg@<$+dun1*W;C#5ZU4Yc`ZM5E@2nWAblPmv3(w&;O%~i z=KvD(#eGoRMD8jHVneoNeUVoS%@W>7gjE*aVcx zI>-*E$;k??w9iYKVaB?0&H6T*!d7KUu4p=q{I?qSU)8R!{7nr8u1&H~dtuMHpk3X& z^q7ZGHhM?ATgGizhP(vi!%Y7ICUZ%Sy}S+B26ChxnL{8`!1S)Vub02zhAgH#vs(%J z$Q+1-j1)-$$W%hWwu+kp5f&*8u7~nSbkwire%+_D`B8P*w+0PDk~am{ok+;1gmo$- zcoLlfdM{#r?{+>!bBL|H(9|^Yy?o8l9B2T%vnh^_5LWRDJP0JSIv{WkHK)YZU zBXn^IKvCS@;+4#a1Fr%2_8vX|@W(Zd%!X6LwM@>oYvIb>*^Q38 zwTX$LZ!%nxOROb372+aPcj?f!XAN+E)^|#x-muLMkEB^WpS7rHejY=f-!ss?`+-2b zUYNL@mxP`sHhYnlh!j-7_&RzdERm^d5RF?8JS=$CXra#(jaeehImvU7AH1O`3fyZe zJB=x#*DM-SusD0YAqLeM+?#-wIn20JuQjt<-?ZCw4sy&{R?%62KzApLmu%o~0`y&d zA~NGgVJPVqu=G0Z#;F$v``(@az7sU>>2j(eIv<9B-Hr^j8r_PNkN?sbIoKociptn@ zOvIZn0f$v>3t*681`P+J!qTMPAC0UnJ|ODkvt7OQ97^k=(O{%zL8&g!+C>pMNxH4k zfo}FHjY~8Ll`lPKc!Zs))TCV+r%U2GM;wXX1?fcxFj8|!4ZJ(PVCmz1k(XV=^;>2- zlz$MZ`7UUb5^j%UVY#l`qDaJv9aWwWtc!`MF7t9-6Ky5D8f_3F)0zAc1HO?Nrjs(@ zf+jIDr)**&IR-Es`e0yCkP#VoZx(Jt=z$Pd;bam}flf9=iiok%0yGx3bjMfbCpb4Q zLo>i4irNZlz(C^99yx0GM{r(nRiFW`pHZAC{82@fAbYu0l_uBH9N^|14Xug>p%k^s zP{=ob<0hIyUa!+u1&P>ulHgh77IPTz=GJs=J$emXXNWKncVO55I3DBN%FC(I)!(-e z8eL?*+%MDlNP>1y@}Ict`$9k9v3ILoLSV;JlERgQEjc(eG9pKldOb}~N)rYaLulNi zA9=;@x(9Xa1V{ey!I`Uj6@?xBq&R^1b+N58Gctw}4>`!G*uG#Rz3@khUI^GOOzk?b75_ z2tkxvL01E--o29qVr8;BABN}Xf{_NkI0PyOVBQ6?qwo&qf?IZyi~xfrVVm$h%a?;# z)1)r8t)@Q``OAY~t~+5AP+K4|yRd@os0>OGXvjXu>9NS@kcrkM7?Xhw@(RsjkEui8 zkUYRx&-E!%n1Ew~%UIdYPf?NuiaZfyWEc{TLL#Hrk|(GSBEt1cryaKo!|@9HG7Ey= z;}t32&b@s#^4wOeJeN4;vIr~(tSQ|Go)%0 zlQJA;%Ey8HfpGLcEI`$8L{j8AI7!oVwvbNA)DN4`2zBtw{zzaSLCLR=`Nnsufx$YK zR0_yXNntl#L35KYUdW)qe1$0YUQWxEUb4F9LU|{|rq85J^)gs4_n4NtT8SL*-zGVG z#|`^h{w$oUQaWu#a=y)p5utxUn*DUE<_W_48@CFi4my8`0rxoyt95nt{Zs-U1 zXD?ejF%)5p3{n+sd?MCD3aToZnCxh$%IBu4%usl7!_=b9h(FJo!u+)K+up&#F~bL9 z*a0^M16{MM6>6|gl}2bCNvoW2RtIxt6TF8l9Nj&1&HlmMrkOUiBYh4yhwR_|Ie=2D zZt-KpWwBA4^WRyoa)_8RJz|2K1YzX{6?`@`)kGWnUb;1NlF~8AXTd!G_-EPu1e)D3 zsP~b|zX^Iyh#(m&=M#ab!jGsa|5!N8Pa~04n8+<^Nk`FejnOkKS55oR zHYj;h62ct08&MrKIkdD1FcBjBO8h<5wHjpigM7;UKVor=9tSk_dR&_ zwzq-ncY23zzHd>lZ}1v6b@uzeuIO)*)Aje7tLOgpyn?L`l{-AoC6<|EETW=`W17@z zX#9PfpT_786mGFb=P&QXkyK?ggw<(|Rh|M9TeN-U*5pCd++fSgPT+a=mJ% z@!-HqB%A0UJ|;k{yH8?>6G;G*F^;|$U@J_7PBwLPkd~ufaC|};u>T5C_4v<7`v26o zi+J_>dix`^149qza27eDS9ODF6j&LN+pAP{a+5K^{63Is((pfkS(6JNH0huMH3c8^ zNe(7gfB14@gdxn$KI6Gt+1!j@u(>>v?o7`#;qF`UP#w2zD zc2m_bU_z-m2jh+a>8yVijoD7`7t07jyixgtU(SB{$H=>>uO;#h3jrwyZzlsho<`6u z^7sZDNIu@!?SE=oNO-$9%~)bBWtNEFlwbt*;Fn4GQ(NcB)+M~xsjd}@{r36XSrYp+b!9RaG%4NbM38nh$3e7Rf)Wb2y zy;*DXI&_CwFb)V8eFpe55RYDw1*Nvczxgl<&*loUKD(xd8zL_X?&OH6De8Am4g>eq z=pGmlHXZjmK*(*U4ZOKZKbA_b3c*!UBtPK;Ml>GEZ8l5-`n0*z5Or$(6kGr*($JQD zHJA*S$_&X?R#iJ4c`IHr>s03C8w}|RcNz2wyCz2Xnehr00<%t1+ws6ip{6hx-}no_ z-0c&)J`=+dlK@Fb)h+MaI`of^Cy^BBRIUFO=l%NXXN46bSpcmB#|u%}ZI@x1r$Fuo zLRMsB1ZfI{pE**x^~u^ooJ2*&lbDwdEAYo6r5juDJMqmA94b11s0T z5${d%75V&Hdn;^u?nDheakF98tPS+k$o3qJ$M5>3Uun<;&cmpIiq~55SZul{QMtQ0VT^Zn<^RhSaGeV-U&N2c2`2;0@d-Nzs5O3h;4OR`hZ zMs$P|&{)wwu2z#ce^mjGx>NlYge8^QZe0G>j$MX-RJm0&tKJ7ID9gxak)B z&d(YXXjhIr2dvpGo*}o)Xe1pyp-q6{;F-th<|d@i>#}x_-y_;R&cDTU{tEy!81#ShlEOq+I#OGe2B5;DzU z2cTs9jQKkQ?J5R&ein!u7HnC* zJPmi8Z)<`k4C6y%_z(D#cSEZ1WaN$#Dd8aMUW8AkawqWFlP#ieUzlAF zsX%&d7uuX)6kLl!IoW5nWB~*$2}Xti!ug7x9QoGn&cO};I}X3+D>UD_o-uPYuGj-^ z9^W^dy#EI{>GZ+W?#})%{oRA>uNYIA+vcI!tNA3`KS-D{Qg*Wz8NuKYXs~r9f;*TF z4z-Qwc7^;=Wi&t9YD6U>!3b5Y*WByDo0Lj~$l=Jvdu zQ@<=~wmCO?ns9PY`p}7StYJifXRN4bPC+ETVz{nS6pcJl$aU4v{Q23{kup*=v9Mro z+04;bhDL>DJ(+Fx8N4^Qx-4&GkhkM?e@Eo+SUnNk*zvAB) zBJvw%LsiRy=R6X?YB9=!jm9@rG!R(>piAb$YQu55d2>$*2(oE%@ zqQlyE#j#o53>a&AqSM<+XF|-F)(bMxOBjGc3cE@Nw)7g8A- zCzmAM`a4oksxLDObP|isnWxH2K9yzYN506!;~w zp^jpKN5V-|sfu_tHD6MrK-?tlrxqGOUK*inC*J)*I0@Gl)}(M{GO6yV+q@*Lx+ACZ zF4AP)<=ZiK(XEYBo)`(=Nk&a2)UJ^ZK}kr))8VZa?XrpuabIlKJnU(bWUBZNom8HY zcR&-><%&>5b@hpd?)v8D_U%Hwqql#=tb1^VW&gopSlCI3Ooy|iFZB>{yHJG|Ku9oK2A##oxHE!jN!$S@wIPi(f z2oHa8808Tq%N|Etgc-)2Z(#g`gn==`7WgbSZ>-==X{4U97b^u~TSj}rG$wp=f`(k>`4Qmz}n zo(Md{fcXl7Oc2c-Q6MutNVsxR?VusX?>O>nu}xd|wJSFn6Bn1_`ZgS_bn2T!{B09V zg4CN$WrRjr-~uD-5tA_GY_yRFlJJ)E^;K_6P$ak(RYMriZ>03Zrq`qjWsqbP=0qq( z9P7?L<;AAw7h@F_aXi_de3S7@A2+URvY1YUjIOJ{txJ~ zrTx|K2kejQU-kliptR#?QQJG6n(_LPUE{p~J6THU2n{g0`)HT7>aRdT6|#J-jwQY# z;!BO{Q1K)Oxpz&P(vSnGb7NXGxkUc-H6c$$<%-%+!wouQDhRgqm&Q1=tF|NmIhi|- z&inK^V>bNC1SB}LGScv3n99=nYO6S!n+b1#WB-Z1-uHeu6dJ^8@0eefgNB)lq|J<0 z=ubL&KhMbH*}{_Y2RnW4cr*os5*cX1y{rkS0u)%i^Qdka0Of)Emk&?SD?=1nbClJF`me6; zelz|TkiA~-pEH+}R;pHBCMTWncPh-xlp{yBZySF^!KMr(k%?bBJfRJ}8byOCAYBOQ z(`gh{RT@r+O0gK^+xU+h zxn>3_=7F)GkC7E}R1jkR6@8xZ(Jo)5a}I#3mz9R^U>etiF$pZC8Ybtk;XsUb&-ynK zxjW?U?k%9m!T5SKU&VC{EToDg*M!^S{Zi^Ox}>n;Ip@UKk$~@0%b7HSK#=+zmWU-{ z__k{Nc1IVC1vIU2pX0*NY*P$)X5u7T0<8(+;O2fh7AtNkF+qpgVvk+-d3q?|jG@6P znfss}n~dpAHoop+%_(ZACDNqu3a+RmAPCSLCfWG*rwP3$0i}<~=5%N=GfN^Sa|vYu z1=F-2NOZ!&IAzYC(F*b_Bs`qA&+8GZ{rTCAkVmI+7C8J7Y)Xk|tfaf3Q~iIxKh(1J)qkvohBIe_LtPWZ zo8Z7*_Yw!1JTSNQTS^l*oP?z?Q15zdomlH?hL5NVn5u~VY6iIZjdBLJL>AkF{%N^| ze{?Ld>+o&~C+;d6iM~Sch1x<~4-xBPTE5r4zNxmY*|3JkHV?=S5LC4j`OF0u*5as8 zrUnUoRlXLg!P3T0WCuwJZ?On&)v%;Fv8-8<+>sWb3hsiKR0?lLHpmm=m?*{uM5ZG1 ze;g@qv0%Fx3t^Z)Y$DM6y@A{Dy(2yW!24eT-;v?&=9dkkI3Vsjp=F0x=1zi|DihN!MfBU{PUw82Rf>OM%YRd43@KqvBm7`0mMo7#LOT zPtpo{sFp%H`8M|I233&l*~NhFPhj>{UNVrXvGE`+2655;Fe86l7+Q^hqpOCx_*^@v-t4MJ#>2tKtGrtWe5s z5*? zM&*2M^s+%T%@o{aK!bnyW_#3?k)~_!-k>tpIR7lm=b{q}-pqHwWgc$FtCzfjPS)zu z;Zp|npcbZRB{P;THCrt`DEC9e;eqYrnU5;Qj>mIN*3D1)C5iDh-~xpGqYI;-2y_R#%$v zaB$Ntysw3>Qb@pozMj3|(V-UsP9zb;H^$`oz-Ufo64KpesC*OpQzaTII1FCRNHG^c z!$i3ohQ}^C+`J+0mdHxBNih}F--$TewWpPoie|}6TG++H+lo#3;rX`mC4NM zQ=-kHoY)^f4chOT5qz>o<2OEn%^7FD7}<#5Q*-}F^bE5P*B6-l9RKM4tTVxjIX(Z# z%2d=m8@%7cA^q)8|0~?*3>qa1hs?lsc1ZVO#E|9aKbduE=pjGD)=Qr3bNXA?xSXaQ zvtt*bgnvDDM?DfxrXO~Hbr?oUV!?BjQmsHSMQc0ORBI~lU9OCen@>zlMq2c$ooBPp z4EhacH2-JM^hWns3l;Ns8$eg1e{v7yo`>iQY?1Q@znUflR_8&?4hJ?}6>c<0XwWye zt^Wi9a!Y?(dnilZuLpvLV;q;43Y{0!bSCDTh3UeZgTQfxKkpe|cZI=m%K6Z3PE>3d z4PB4qn_ml_q*d8)9I}q+QGo@@3CCoa;g&cJ%VU2H3lTU5XK{#x;wp&>Jpc5ULe@#r zZniM$7i_AT_BrV>&|U@<3kCyz-0c{#pOb6w9NVfxIPm8q>aNI5o&MhXufBmRzfpU? zM@Sqyd5if%XWm^IXLJBj*>g0dYy?i!2wH`UqBUeSB6ijpusX2bazq>#YttiJsUcmi59bp9l_{+ily z{-sclu_iidW6|NN1e_0O#!Gnu_HKH$%iDPWIgk(c0i51@fT5Jfv%`SAU7cWO&lc*{ z$kevwZ*FDTGN*{YnX?VuNoaDsQ?w4<8VVgXH~+-+UW}GgP!U&F>{gV`ERn%I5LE?f z-u2>r+j>Er`b5eQuRJVPWYhpDDknCT!!Ip~gq^IN8fwrU7Qj7$ffSQWq4}DEkL&V% z17G@PJX<^a$AElF1zZvxHT`U6y9f!Ft|t*~MOD!Q*otXodajwl2rMVK46P{J50+aE zcBzFKldzl2p1pXJz%kTUnqcKK)!8$KXv}osUh0?6-m1Z6>B)oMy=d`Pohl%_SbcS8 zm`D8`bQPW7WMXWaj`7Q5GU)Wm$qlAG8hp|V$`pd(cY>L4^N$SGoJsY1ySjb&Fob3o z>AnF+QYYZkLR4q3ZjJBr8_BGwZgN#AR z3##6;xh5*J^O)-;GRFnqL<~Im`lCZr60k)q91?m#p7_koDwu@1r(p@ZZa0uq*9;u; zB;b@RW0=zxLrhUpM@wMfl^k1Ypp>IaO$S+GIjV8VbnO#}g8QI8>^BC0coT7CiTRb& zfPLvP@dQ^OGGh&_331QAmlfnG_5M;!1Rx$+%5`1c$*?#zqI5MW`S1@(#1 znAS(`5M-`ppbtSc<>c0K$!zTF_O+=jrweGtb}Jlk6CLdIIly$2Z7uvjKMP?M<|` z;<+^M@$w$t49C2=CPE;e+_d;@Ox{VE0z9;CVhX}Nqc{N@CGQ#QoC9DTw4hZK_sA1h zoVcq1Gm=T2pO}ddNm*x2)e03W9;3maOLgTqe^hl!$7=hZT~qB%abGS7@e~vdjv7$h zK8pdT5~N=C6@E~5sq#Gi2*_wQZ@jT~$k7yUI`c^Ir}Srmbtjjqg)oGo)>Dn!W$(<+ za`YAyq?KPD4ruM-DFXuYtcfE-P6FO|TY;dsuD@>hOjzHukFu;A1Q-uQo4tg>8UeuR zcEP*p&$5j820N*sMTkKLEM8fpiXx=`)!wWdrc_-ik1 zkM0wRG>9ccuTiGUK*#uM(?Nf)JGlHVJay;jvy^$8`i0&P#%udP`tM_ecXi~AhGaX= zwqh%m4-KEr%N_M!FT@?)Z=GsYOZ~sY%0MoZ`UYK932gk_sP<|C!a{GEq#-+{>-wro zgJaO)^;}OhWPxsLLPFL=B!52)yFqjI2`mFgZ-|n7$|>Z0ZbHS#9CX07LyO>~OCJb= zo)}6M*;mL|m^`rf{xGS`7wUme zP~5VcRht6Rm=$P*#=}$2Vy~H#4TG69t{3wp!ks(@PoB*Md#)GK0cZ7okPHql0zt2w zvm7TgnvYAm_Fz4-vFQ4m28D=1(cuk@A>qxk&W2Oz)H3Ue33Pp?URTu&-$DdD zuRD#{2kBIdf>3nVhV7TXaKg3dY>IQb;Yq6K?lhRa=3y}suHU~Nu^1D7;cr9*u4GW{ zp@3JEUmm_6q&qbIBOlX4yI{k1W})bz?sEXmS!*Pt!j;4#mBvvZ2b8o z9QV(jnHg&aPQlAqu$x>>y(po6^2qF34YJ!E1UTk2W0|#Wr?SC`7!Q~fP(9?WV!uP7 zsl|ob1}1@z`zRC@BFhV;b%X(K8+t_av&5UYiUlm%RJ2Y>MKKJ1_M}p$pPG@15Vd(u zrUEY{Gng(PM3p+f+A(%;*Xg1!b8|L0Co+(J&}%{rSbM8gmbFK1HFJi}g;NY`EHZzK zOBpa&^VEP`YscoZJ$9z&iMv}C zc@Kh-VVAIc?3Z$Qon09D*;Pjyq)<+3QUFiuG3&tCoe!R)KVK78A{61Qi&n{cu_Uy* zZ9KU;ch8n@knRSEeZAw$9kR@iMr{{dzKO?9l9hpXdq>VcF!%ir;Bgm>&XI5S@uUv< z@ioPJ9dBa3Vk8NVyp^jLdFKIKq3Fx!L_zPqfQeBGv>I9(=>|=np6kZiupl5cmkBG40u*8cS8?c=2a1e|3m!cS zy$BZbU!8E#Fcd8 zZUaKSm{Kzs*1!`r%>IoIDe9(v<;<7G$A`=yfo=W`BvuyL74k!vdsM$RYq(oSi;1xeGgK zof3PZZMs3=A7naAKZX?}v05H$}aSo8i_CI&vI7c68fdHw5+ED-U14WepiT*YECod4sPCshHSK=?-nX zm1E)wF?ig6t4pI(BwBI)AFj?RO0cNO(rMeaZM)L8ZQHhO+qP}nww;yM)T~+E(|^C5 z=lgQciP*8@+cdLWcC^!4jAiAkT+^*wGymK@8jvvL`%EK^tEEwZDgQXg<%QbcI%73% zBBUlm>v&^*dd@v=x_iVYf~LWGym-)Io*vUK8lLddO%5)MetZm!i2;8-uVc^$N3K$K z4K$4K@?ex&@n|gJj{UGZG9VLh;%PG^Ta%P>T`1$`JZA{K1X^d8QpijPh5j@fd01Ds zGE`I+x6*q1@fVBnuTkMc>dIpEA{J@9=yX=Lp!`g!N=5wXDl12mM7HPS-4EY8x_R&; zx;1T&QHR|KV1_QOK710IC+`W3KMhr;0f#Jx6Rn;6xyPEF0TCcR1_3ukhL|%J81m-< z{CRAmf>jSWKy8NKkY%D^?9VcK0@0O_s^e7s*Cz`bBJ|5vg7luIXH2XAea*fT?9&?# zjQq(h4L{)_N>tR(W`H9;JDA@af1&tC2e_Ndz}p-bB%BoIv`S7$E@jaZ`-o1$aigEa zwiF;d5k=?G#Nv-PM{FYIjUfiNcIzG3d5>PY$#q*f^^8K%LM#RCSUt9aBFZ1HOdc<< z&zGW2dTQoN`bI+(B;?}R+NoDjLFZ*F7+Kmu&3)$Pxu|=n?h*1{JJ?ZMU&4|z3=-4{vcm-fBxrt`uu%@a`S2|AS!eMzVl+KMfGCoxlJ(IHg26< zC36_B_It^kC|z^n3=^x(m}=?%vUOOsBt9GW{;Q;s6bV{XqiX_7nTE{wOQurw_W#TU z|J^FSD&Uhb@HyWdKouv;$5X*cZR0bmA5ry>UFj%RGXX5`@DAmi%E7isKe&|Dx|~rqm107zE9_bZMBX~mCenEgmRNW7GHJZ`817mt+{dR z!osX0Nc`ys+R0wBwZm#S{CJf|TE~J0&~Z)t`3+Q{-g$D-c4vwlbVQikQxfsj4izu! z?%R{akkD}IbvC|R7EaV zzrAyCQ?=&)apC05u7=Y)No=EwlCLQ&b6|7<*2X%g!c3Uy*Xy7_8T4i7hSGAh+WDMR z-LSyqD{WH1p9NK|*>vL}N>b)cz=~ghbtlnfXTat?4@2mys~nPvh{0p-|AiC> z5f*7Lfgb18tY{oaaEYb#ORFLoJ1ub5LSRx{$3xlM2E&Qlgm$67f!d1zRZ3DlSYJf_ zG&pXv6MIdfzNUuT?ons{#B&0}`Fegz=MufyM zk=Z|45TKMf?hHSc@reI|v83ki^kK(wAQhZNV63YM-Xu~Tj+Ujga>19!E;31R6*_!s z9~M7d8|z7CJbhmq*dPw?zf=GH7&+g5IEH!YsM z>v>sN-S1taU$zFz7+v~iT5xN>p9`t4!@e)#%^lRAli!^qwLagVtxrhkR)`(AS(|g| z;TT}uId+AGceUx194BeLhyNysc|$AnU6=o3!8^;X_fO%b3N~W@~`E0dfKa9$0HL<#y!**fgy`r&dM43UI??4nhD0n(~kCCQ2wL4(OZuSM+F7 zVDXU5+Ck;kJ#1ZbA2*NJBhVCv15ez2?a8dvMYJ4f^?T{jm!0>grg+E{_Ji@3-xn&o0j~Y^xpV_6<@=VWaLjg6we|A1Zh5D6)#RmdDcZHUA#VX zyl1It;k@@bCydX+4cJW$=~R}9>oJsOQ@baU++UWpalT3VhH{2$-lu{UkE#4!BL9pw z-jclGucYqvO?y}2CVMmcINDsUur<3qf}Pz1kG%(@E;jpI`K{+iO->T+KbJBlgg=Ca z17$BFj8)9uWax#i_ES+8;A~M8ihVN`AU<0!n|(8h!Z(tMpmAOq;{?&wyTxxrBk3%+nzmHiazc{ZO|UP(t6O%R@djE}cT0ky+wwnkO# z+4IbxgoIjDbB~umBd4g&{esV_vhyG#}{i50~)gkB!zg59SI8rf1 zl<2ck|3;(C%N>Ta&O^?gsYd8Ee5W#59I#YPRyYcX!%W2JTah#-O-6!}awPZb7ye0s zLHoIRzk}KZ&lB_}2^*GxY1+zm*fRUr*xnW3h<^&t(n$`>X2U6V__w&dpR& z&ZZPGw3I}}t*F|fKB{RE+}|`guwr_c^$!!DIDUMNk*zrPFW)2 z!OACTJg|c+V>8o z<`eaFg=Y5?;>eH2)3-4oBMioRo(cE+j_()Sx^%MaopHm?~Ib55Otu^zvTdgJrAYWx(M;#`Z*9BSP?SKwH>niV0sPMlU+yq2N5!oMF2F$w0Up?|Jle zl)k!L>dgd1T$9%3S3iO*-tZtsFb>g55lSmfn7M7b$mG6&#CM3aCFqKEb(S!D9nhJ* zoI@ZnOe5G3_5H=8=l@1=b8~C=`G%Y0_ke_YGC(RvED2oKp-i{RsGy`O$bZKv%`7M4 z$cF-(#ehTw!1tD@kt&A zQb+cJZPby?68oJj79*TnI_k?X(|CrOpi#(He}<$zV7j$Nlbleo%AdW0&{pTxv6Y_O zvX{^=7TYQrXxlZ5>MqVVR#i6cW&4*`n!_KVtwEI{KVtCDlKe!iWX0;CuxhKu4zrG} z%&(^|ukwyA5dn=In2nA?+nzPr7nFqscgshQc;sySJYw@jD!b*iuMxEVZ#x+Mxr7F& zSTkB`xuHPUDe_;8UQfkPgJLhHg8?jTsnGtN%{Y`~g9z0Ih79t*fzn)pX_4_TSYA~~ zzrN#90#nf3Mejta;+N*YmHJ~LRL6MV8fOaa5{p#8GPE@~B?xH+dD#-?&_npsLHLtz z+!OBx3a&jDjy)Idd#AC2KwjYRlHsaHMLAy#;j*2zUBwJO=d?B|6|D(B!okvlTGDG; z0?S`@pbrhofmCU@L?hhy9!_YI$dKf$i2igJSeXo*!ft5Wzvd)uf#V^$v|QY(C9|TV zR8pyqu?cYZBCGbEefuEOWv^2pQFwd$d2XOkl|6j!+^ z5u?`sM)Xgav_@$jBLCf1{J)#QZ{k@D6$8KPwc++D1O=m5b(~#mF%$!3*%7rr<$#o& zJ+oS}UVob_ThUIk26-I-#8;c*POn3-g^^9cukN zUr}KD8ER6Cv-V(#AmFkI%fL=nvleB47v~Q&pQVNt0c7&)%v{LmeuF)v$M$~#W<4l) zTbg2d8!j2(jhcqu)6_e$D}5pvIpTssv{ofxdzOlYUv$6$E#Oimt?ly81Si?Cn~Ig< z^iUoT?Q~u^CmtKB#bAGA2>-4rRU@*JfeN-K#)i0B!k)?_mrm>`0&;X@WnEo z0~zHRm|+eCiWwm)QM{%xbW1GFh|pBzOVE)p?@<>cZLuu`aBkco>~FFdh^YJ9Nmyuy zWzCK}d*pxg4Y)rxo*%%dAD70YR6E?%DWUuZH1jSj7CTJ@1jK>}!rFdM}($hNU z7?+zO-(!d8Sy+27LL_*JPdEad@Z}EMoT2?lQ`x6O$BfQDrrhI}6Vre}s~NLLRYjD6 zB>Zc}$(vz01I4StxZYmuR8RfMlR;EOmKQ_QIUZ86`bTBu*oJXLn$y}N7J{RizJbG$ zbC7{n`h$G?;Mh0D?SkEY4mefztugf`k_Mm5q+V;ZyvJ>lDintX{yg2u1McHv`+^#& zNTG31>PUb zhrWsMf`h{oi2#2Bu_rD{9R>3dq@EH?8Sv;)0ybsrCokM5IL^ObMy?ov_vsm&l?B1U zvNGKJCsOj`G4t$gGja`%)a9+pYQaX!wtL@4lq$Ma;jqjgi+GaFQhBHUQwdUEv3#ME zTm!?g1_*90cUedZo5J}Q$znh?hOp=2Er}MOd%INeo zI)1LDv>VbhM?KV|=rczd{h!?M7>1>$`0pQuGQQM^SYV^=EEV(mM5~K~Z})lzJ6Z{^ z(uorQ+)s``17a%}nlT;IbDj~w#4dix_aUi7n>2~T*a@T))lOi#=pEasLZh^qXXTC_ zgUqw8vv2YWoa6Ti*!!LW?EAgpJ$-?^dO$L%<{X3C;~Zlw>Gp4U&Z!xw=-e$I$?fE8 z@77Y%&HP`hT0dBq+#2EK zLB8#88lCt`gw&VvhhiY&u%169DQ3{bVG;f%woy;-czQd0`$=VT81gm|?LnquEQ=n~ zIRku3nVnOtfMjGU03t2;m2tdK>*!Usi^@R4O(QZi9gKIBc8!ap5~;y}e4?WBUNk_- zCFZ%$Smia7&||J-%x3x!%$JV!>`v6cCkxVCWlFasrdu53FH%D19HdM*I8}^9-dF8i zo=sbXFo0oUz;63&YJ#c2KidkqvURxKwML_Lq(41NtSLar5ySaDbpD(!2E@o{8S3Gk zwq%fRu4uCTbzxK{IM+1%GMEI=wShNiaJ5!4v+cEz z5ZH(--j1HN3I4uWUblm~Cx*QyQN29fj{P>6UTICt$Vm9oJ#u4T-& zv*6P3;4optvR^KTcS9E>Yh+3PvP)H{!zE`3J>y289nPi=qfpR&f=aE_YYsIhpa@+e z;YAt3EdyU9{E+s1#N$zNa7e1<^)5&2BCQf|I0s~{OGh!_j5wOMy;fT(!7+i(QY`F*4Hb_s218x^yzPnd9=B|f;^4MLG;1VG0T z*ChpCZ&HD)VXPSl+iNITwH)@zjB50>eravR)&7Ud;P?F;0}@*G69+xtp(Db@oE1sU$ zK|c`MFXHBRvEQOE56q*ZN=_qj%?SpzTnzVAYCv>k&*8XF2M8I)3kYR~1F6oD%a~0* zJ4YZ-=!RaUlMuX?c@nRNV5z{JatW4n1KfS;#|;J_h(tZ_Gt8#4ymKCqbN61#nN8lj zT$D}`p4eXm2Ui1NY)yVwa+JL{c&+#s!cX4h01E2u{LSx+bFa2>B*OP}vuo!OW_qR-7ah=UyX1(?Hzq2b$lG2oA2F-qI}ZT%p;p;cK8hdIDyV?m3{>Sf7k6d_fn3t zRQEe?Jk{F3o^}7`A-XM$S@#9@vuUZ_uG0R8ly;G^%HXU#oP?x_PlF|F#KDc8MNRmT zIzFfv|87W=)2QI?{Z57LUqZQkSS|0E=2*|%1)0PElQ}vMVnS`s0)mHNF#sag69`SR z8?rG?0T*vy`)F0v5J^ZpX9Nme~bGyvg+8e0kK!*1^czpo~wWy-_0j* z(8w0K7MX*H0$Nn$ASK{{RSqMlLVJfMU{0u_1x9ua1iVA z2@{wzXt1-&gu?1HigNL!uwPW5;y(!P zaVK&443Z6=;I)u`aCQGZn*Q99WurL?Au0uY;J8d1L5ryD%=riH=8V6^ra|wf0by3f zQU*4~y5^<*p`TotLPWI23L+C32R}5@y&M&?s^(uI!}=98WQ@v5b79H@-~efBasvXS4CNHZsd8CaXzV85S_Vs5C2iDE%^s8#aYp^4(xylVDnY5xKWMj7j2qg z&>r7;zQ8Y$C5qJ-16^7G7v8EAk;=)Z&ei~$Lh~6LQ`;}|O*|iVgFhV@Iq}H!xjZMB z_AOh76Dw$2H(*(|LnLrEPFLKmD@2EM) z@i(SXb$WRTgRv{_QI#e(&*tf3xgLC8X@9pZh&Vl!<+TQ&6SlmHLlolfOh8X6PU&i6 z_?2r_mshOzB&Gjz2K&puA_f`F51MOgAA^dqNS|6W@H{cVm2>k@4<~uidy=&-Z;kbT zMVdDT5ZFDDms(j%O#Pj>FRt!^#SL{ z9GG>>HwHi7G4{j!tP9s5naK48Hbjd9WUfMna;rug%<6(!{!7K$o1H$Hj#tiTel4B# ztY!9{3&!?zIy?h?oeQp1xQnlBm@mu*L~2D6syFuoAiJQ?z0;syP!wRv>PjReno$3t z?)@pZHur}k{Kha2z#pHpjjikf#oKMR5;>}aQ`7~5$Rg!%WVKj&z#P61As$}Mc6ydWe_!||;J79cEMVBp#|L)%&mX4I}HQG#jbl<#xL$aWxNGKcjLw3X~ zwjw8RfV8X{unI5#=m6KKuqIGF2IaD+@kA;Y8G_`B)J^FF>NV|TH_6&1beSY}5>fnx zbgb640R&TlLHT4SCYaF0AZEynYS+fa^Q*(0n%1b4tnbz#WLOGA#X!Gk zH(WLB5JgmJZ=+_JP}C@yIBPvhOr~mKRG%MtHLyBT|{HeRYmDdNJCW;LP-)B;-WPxVq~yu zN{FhCXpx~M1oH8vsawaG4uh!K_>nbDT*wAg#dcg2ES)~VhzS{drVUQay^NfsV(p2O zAs+4>o0kSlMil#ryjg{~;}Ldqfn^5u$(Qr?>0MZF4hfc57Qy{4!Ak{86q`cPtF{;@ z|5#eCtzluXs!;IfPpf2%ss6kxLI9%X>?YE4fUs9q5ov;gQ#Nb5&!`87L8eH`>R;R~ zwgekgEz8825-RmZ9}|S<@TDqx#9DE!2t-mIoWgQ|r%pwUa4?_hjljXis6tcA^G+$e ztY+Mb9RKwqTykfnxq|Jvh+@~+6(S#Uz+-7sQsP#u8JEQyH6Y>(`RD3RoA@}Xe8&Is z0+jl%)rn3^oKmwV*W;!c89GPMqHUtEXmF) zspJUk$l~oZZNt6ac-l zHQcl-A2%Lt(?$R!xn3Y8fDlBKI;a0M=myW_#DUm>kSzK*ehjpwfx%wJMnRgla5yXg znFt#b1LnLHTa`EejuG*&DdV87b|DYFxeRdY2y-ipH_U;L=ek46kpntmfDNp|c?ON) z4rzS`AZ>??OhJm{hRg&Y)cfzmX~9;!7ZaOvkTgJ})_Sdfe|qc9dShrZ!C#_*6S)uC zDOBq8WIBQUyCyuh4eWvY_1cPF2Rb&Y6n<|Tr6-R4u9t*5h*++Fr0(#TsfJ(t_Vab} zi4uKjK6jX>=sLT*hb46Sb>!@La-drC$<}1kPIp3%8n0$7Hd6lKGdz!3Re=(7#yKB! zso6g*BpVNX(;^b@9DlrabfVEKG17pmW8MgO_)qWcZ`odfc|v++k%2ZENgBeKj?;f- z1Y3;}-S7hDD}9ufWmxW4u3b_3-GK7Rrm|iZQDyCQrFPa(;-y{~|8_!6y6DimYAK|k zqJbxn+;eM?i9Tvf=DT`V+L4Ya>1BZ_MC`+Bo>S*hegq7_!|Io7vV@G}KG{CK8O z>CcacC$EqRnI`uUCc#t24FTPflF~&Bj8`&~GF&q1;IvaUwUBsVXrz>?+B{E=yr}TB z?x}?>$W1AZ!)wXq%p`!Os9$s^~EHF$6 zticr5TV@%Fur)}>2f%> zBA>ubf?S<Nvzdd`OwHiw$eO!$m^S*L{P zee8$)3WHH?W_md!#FIteTNV7K6gs97 z3ptR+p*zkfpkA*>6nUO6(B2D7e#=0?ma`#pk(Ky>6lT#)Isfybr*Nk!GzJ3i=iM9G?U!Wu@ z+|e1zNUUN^1gw#gK{@e_-9ms(1N0~Ij}k!Psj$3z4t zi0@DY1JoG7n#3DtQ9C7;fYMakVMy^;c|a?$p9-ylhI0yFqe39i*f(_Bza-7NRsUPI zQC1A+8nSw8*>-k%WpWv;5F!RQ@1(L;nt;f&A+M~4QG-CD7%v>%+*Z-m`qM1I#0TWD z@ALzr!64606dyi>VHV6t*dnkcl8=oEq0Yv;*(=t!W9ZZ}bZWm3H}Uik?!jS%1y1I3 zUYgfW*jQC>t>1+y_tE4MK51r$x_`aD35!ex>m@=D^g%Lrup?;GHXXEEH3Akeu$SsN z?y6eBRqsb9SK<8rrt$6HE(fU?beNBuOG_S*cL z8CBX8v^r@G6%@=2MEDrsp;}X1$hy8I1u0uY;=r-1tM-I42}v3T10jnge;dG5k0#0S zx0xQd-Q*I@1@p2=qQ|Eeq1f9QUxvF$o6+wj>SSmm!RPmNVVqXaPD&xVUZ{UORPq$D zYOTk8#P}Oc$_Wr^o8MoXJ&m4?)D>k>Gm}}Z+C##Mp>+c zlTz>%%E~z|O;Lh0TXyqOW|R1c^la=C8sFC{8o{KPT}H`Qn~>*kXaDSLYTn6} zpVfWa62ZA8jHYi|9Tu(-*&pp6xE?SG$w~wZRHlu?CZq~u{|Xt5UqXm@*s==D$5X*L zl|!eG={#11a2b+_(@7F`=7RZ7G^wVM_3VXEJSTp8I1w>bC7Up(wy_KEehU!2JP*+A zwCdu-t15l8Tu<5&Dm3ynp~MG*Vb~;`Y|wusg#9a8(u;9I>sVyNOiuQ2I6o^im0edA zNPRP@QW6b9d$zlqPNxYYwzMRXWi_XyHB~YNXIrt3Rl>o*I!q6pSDbKMo1b#cKouW_ zOfxL#sJKiuBx9J<7O|nz4C694R;gM7BSE1T+^`He`}8<+oH*c3(OF`dtA-uxbO}GDi!Q-^{ zFb-*-e|F+ENaf!Qv&ja0p)@Jkk3FAmr+df8BPJgyRexvuURBacG$cCWfxAQUAv&;= zxT&vVrAev1eQ8+vEZs`0nAn!*xZJ)o?^*&FqWq;~S8)D%+7KW28xHh=d^14&yXG>% z9L_nL7hidSJkH&;q`R0uW)PR`4WlX^XoN?FUlgVQ`5D;maI z=_`Z~pb^|Hs(^~22Z2<~oRFMSA2DjvCF0`juH3o^f0c5db#&ji_|JIJ&gJOrB*{cv zDa7$)!aj$9dpmnZnryWAQ}|xiliK@%_Cnl8r77%n^HAYWOhX zyxKV`e$a=;r4$R;MeJbYK1lH+9H_E%+oV5lS~M zENt|yY(thD#y%pv!*;q=V$5#Dfobt@q^mP2?7R=LZ(LKIWzpOf6^7%jd)9AgoJ3aK zFhLa+MmswHr1kiAP5~mOcCfBla)NVPWHxBH=gb+066$=XjH1Xfo#aYKnl%~}NDbd& zPR-gg-2DMOz+_!Ah-AVbyNG+!Q_y(;cH2&#Syj7ft0t zO>K{IpwuAZlJADmEwjp?6+OKtGya1hHA^C~rZ6<7)O61(u?8ZoG!|?oM!4-w3q(`A zs;}ytdJPTrx0Ez6_YYN6Yg)cdYltOBb=Nbm=9)O47|LTQVWS`<78yq-*;!1-$OV@$ z$!K6mHI2s3!2;zte)g1cf^ocft8nirb8gD-4hm=vHFG^#x=hy*YjfThg$_6=sX-A% zp?JbU(@lDO5DS}8k&}VNzLrK&Mz$-GJ09=jyKv?oW1D@W4Rz)i(~boS$;EIX;jf{e z=!16sCuL>K-Pd*NJjeWxL!wR9Q>Gg+)ti;+jNGJaE@c??Kc$z^Ki$@pAIMbj-HEJ5 zhAiA7$5mg-3GB~AjtzxB7U;=-bS=04;SPGnZ_0$b-Yn&KhrwiUo z2LcGcq@Jbwm`c7{Kz`^|Kn3 zASy~B-~U^w=Cwzd-GzTqmZr8=Yx3+>SCHQ#)d4N%24(F;Z&{f~$oKid+d{4lD4TFL zZq!k8U*?LsOJu3f{;Y=gaz43}*!Q*v+)25q+5uGXs#(0s}9|LvJ9IF;w%#$)BLK!VbVcO%8{c3VHLl0ETl?s~~^ z%~pFX`1Qu-uCeN!1%DZ-ofZ1vBP7qfoO1C1!pyEJQ~NS{75U9|cWYF_3VhaOUCp zLYx}v1Ip9cj|H?<3>07{1zDEGs>VCv(f-T)FUl;LBiSk}AO57$05ZnksYH`4Md-@efdrQ6<2aEm&GSiHJic-0y^uz)}c_(ZMJ%jth6xz}_zWrwuaj+Cw179!S zU>P%^7Hb`e9#Uz1i%n@e%-CFc4`1sOEmu!`L;t9IoLP{a(1%?otexQ@_30nCznGKp zuv>Tk8`N6)Ovna@5V$Pj7n?Cvq1GsTT>_A};vXB5ZQi!n4}8{;{=TUE#5}_U*@MjT zU7c?KE^8{%0Lz5#$r^=Ja6N>koDAlGpfz-+vfcud#pre*E_A7#r?P|>c`iP#Ye-d>5e z2$aCYrJ3_Pdv-6Hu7GG?E3h%oDXjAO)+kuIH5h?F7zLYuSQm*|xGDB%N{5HkADJy* zLMpfegH0vNN&>9s)N*Ji3VO)#I;3i=P711EsTA-4TT z(hN#Mq>FFX_Wk_i~38pN~`*jUc}1*QGA->D$@o@g5O)CaFlEWsiS+PA<8#*?XIA? z0MF;VJ*|N)nUX8tm`aL9LZ?0vuMUM<;OukY@Du0c)${eFX(Q;k0u|$YW3yHT3Nk}a z+B!u)-*FSWWK*c5ALnyg<0PPu<*N{IC*cSnFxBjNM(~J5CyjA<7Ubouzn+M1kU%k@ z)eQ^U9NUaZl3GX2)?G5o<#)SQNxgMy?i^R+o|XGo)-PNl!<(~v5~F-r-*^6MRR^Y4 z3KL+8b!g>EYGXJowqjK=ScXQ9tt@Qn?&V6rwqMCiN{=XPW|X)4!q}V@iFTu47NOoU z*L^Mf)bUldDrA|=$;b@se5NSZFI8^JqqcN4++7deGy(pWY^0=2!dW$nO}0Jx!~C6z z+(v50>@m(rMjfy6zh-18@1~(neV#7(&d9$fB+S`0^!d)i=b3;cxGtchQfYj zsyM-#l+AZMjRjB{s0fS~d>G*AU7nmwvr$dSU5m<{>bb&3nx2&&N|Rz&tG67lrh*0+ z^Rn`XNG)M}WZY*t-+{xfQ%%>c0}OXQj`jLjgH#wSp+hv9nY7U^l`pcsie8~dM1~un8vL=mUi9`qro}lvwGuWF($awK8lxLqXM?RGs z#_3o(FJJKFuM-BCv_*e2+!x0TW>6eQOut~Amv*b}Z@{1W3dSP&__v-QKh{E=;OJ27 zjND17ffI)jO^Wi&mMUwnljqKmIujVgz49jcCzknvcbW8gjpdY=bfpUbJ$^9m)kst9 zjSihtF|4`iHMBBISVF=|%Rdn`jm?SM1h_oLVkutd_&Kl@n>iOw(qhBGZhn*FQ&4i4 z=-bAQ;P|%)QltH?mI-pu)rTT&y0z@`*YxRD65^@w4&Roc{v>uzQCu~+lg zbz`GQ9Gq^h+Ch8sHEMmI7A0C}14b?$n z(PX1anu{Cy;b_%gK|H^cdUZ~Uefm&@PRFH{_rc++Os~AQ>kC%<3h%rVw(A;7MJPFT z*qlWCimtXZ7<(>?RqFN2l(O@#+tc)pga#^{{W&?{C*uhP%=`0{cGvp>>a`5`HWZl7 zVjuxrG5?x9SJMplmQeE5% zv4qpv{i5;2Lijo#M*mgA#*!nB?a@1{aVT?mF*Y2gF^m$cRogA0&4|A(Rs-l**+#)z ziTS?wp|o(Em1|8I)iCG;+03FGC2*@*aMRm(D6@%g#C1+NtoHeRE7EU!l zF9Jpf5^_cJleEHC8CTkcW3nu0;!Grzj%i$?U<$y(8UeOEeFsu1uF{|3D@OH897zx{ z(<;ixp9o9zlw#_fKdr=ylO@1N2AH1P+V#C0QvS4A6;#B=cBqR<(6-3aY01F&`T`4 zC?ZkqGmHt0_pghnpjAAL3EhIDC<%qeOBJ8NV4~uXz^~LK#U>)^YORmcQoyQ#33qfs^Z1PQMp+R-J7nky9achIDzN|5I!od)fh&V7652%M7$dYzs&FPJN0Y?OS5(B#H(YhLN=bm} zG0QEq2^z^SmX%s(Zjf(mA?l^(RBklj=A^-QJlRgs#JTzpX z-;%@tm8)RmG{fy}Mnr76DZ!K71fx@`06R-b71G{hq+~ewFzRXLR`^S22iF3frel)gWh2RecObFse7o)VY#;A?>yL0ULA$ z6djCogw=WmMtcCvP!TkyjKTp+`WE?Lz$2E8;Zl}!>=}^i-%bnMr33$d{(YgX7nNR0 zi2H07o!G9Jy5OSiub=pB0JWmSp^a8Q#hQYjzcox%H7Dr9$8VRKVWZNEA~>VZAGYr#}Dw zU2swEI(4$+!YGPqnzE!dAzUqgr#!VX1cCW*nlVjobz5~2%{qQ(ttc$xvV#zuziNpr z*zb%jNmSZ#R6eXm#7%t=((;|b&A-g7EFLLK?=9JoJFx^mxnzvDdJ*x`vqL{&Q417U z_Kgh!qf>Jnf`EZKJ&m`G6UYEi2#+S!t0yp;b-wKmB9H%l!<#bm1L%DRymbPP42=M+ zH1tP6lD%1ocAhr{CVGiD7q>v%!j0?1&HBzmegrABHU6y4d{sO?y5P}dxf>LUj{^0; zUH~IwO9z4ez+vT@ci;z6@I=U$!-OTS^AG<1Byml?m$@mB46Hu$wWA6v1e{m7SPr_Y zowAHalu4C!*43LxjB0gJZA|2P2ph!xB*2#7%2T34n^8;FL4=NV8bx64%bS4dz%a%)x5ehz&dj1IO}OC+875A2 z?mq-Fl@m1reMoGWre1{Pz~wD1(f4oy!y^l>6Ln`F_%xcCVmN_DmarB+cS*Gfxcb>} zDcs7As$)jYd`iXr#Z)m)agoK1#5vk!od`fO6J;7wRyTFAG+KZrR7)PN2XHGpR$Fa5 zCi-OLGd<3zy>ebx6z4FD2H8ksTPaU8`{TReaf^0U|0;j|Kg(`iuWx5iFgMw;VI99! zUNcGB7|VhG@&ZJZrV2buiMat^RyVD6T=MJGfXuseTs)1q1;okvvnVMw*W0aycz4ky zcR}we2O2+V^l0{}1-V1cp&$vTRY9l^AcQ5&J)Q>TknSsLJ}L_e-hORxu>xz!$pce# z$eOjL5ru~Xb(r<%Bv2A?fKQ)9qo}=gvUbhN_two;13f_9bF|jQW1l2E(pQ=_&;$_; z`TZ{VmgEXtR6zPbV$JWkDQceCw|F&5!+@T^EKt9+y${%p{{w>BY9nH*MRlmPsZ~lB zc)D7!^STft&$Hs(@BI5v8bjE>BP@+}d|NfF&1$bw!sygQmUKI3rmQ3O%0ru46fpt( zYHa+b9trH94Ib&x_G!%r+~Rc7W=3rqGUTof&R_A*Vj4uLj)JpyB9IOl8`QoOXaHI6 zbSN;aRhK|6*d17JTg*eXHgYCFmxPz7+tf(yQv$VAG&m&W;p&V7Ng3B)HB^v_j*gCbWxML0 zn`Al*h&yB67dOt|XCARA17hv<8;dXu%=W+wI2<=-VmWW4;Ng2YM(PS`xJr@oTjssBjy*SxQrUAs1k=x z!oo$d$%BCK`3ySLzV#Za;uH`8ea_9E3jm+5Rj8yYZPdmwn_!pC6m)Daj&Mo^B$=qM8#OJ|head&TO`go$ZqWU z1}!8Mm`n5##G0cNfnNj+$zUN@mLk7eH8<4&FO$;JC{v}*!V|}`k7M|#$TXa_t|bkm z9wi};lL&Mnv5b&I&i+-uQ346;6~o%J$?X)t8Llpd0kEqmfr`yuU?zqjtz}wx+ZFk79>PpHBS|aa; zY!viZ+PRa0YKA2-^QVEb@+ielA-k5AWz% z3cX>sR$G?Re$|y~;u|lp&E@-#Q+Y_Wd1J(F-uVAtksn`s{^$`i3s&_cc=rUc*E(3Y zzsz@Q*SHz$30<|1)HlVRW1vBTa=y{DkdWrIR7IQdDfN`2Qs8=>t;<@EP;zo>H1!0u z>?%+Qp^M*CMNv0}C+tNLr;*q&iS!_Xj(Un(rntb!zgLYpQ9<3I>92M7hi7)6vf-&f z4ASpPsejqhun7cuF`D;bEm?aLBM+fvur$DMF;r5kEBjv8qNX!xz%76IK3s>H2$QiM zUTw27#Z-(kILsSdO?wUTmN`o3+4OkScIVh?12ayhUx@=;upOmgzFsb$GCuddcKg79 zPKE{y&qa^-rAaUt>9iV{cDYju;btq>u1ha}TsveP+`p6?@o5Nn47gS^8EdJIVnUxjI3XGWEFjtG)4~jN#ltK7rlf{I+jGr=xN{F| zL?=%{yS2M}B@GJ*PRiO7bb=xCq!FbR3=U&)i@8<2e_$??3Gb`rNqpsHY0N!(&N)j7 zr9Hn)YkzuW1tytjgBGtD>iR1PJ)y(-g;yT10NtuJ+Y!gDU`p|7&{eXyn^vrvf2SSw z=U3Chngigc+Uq=PR6)sP^WZ-Sqn{7W$=olSSpA}sI*WRdIkd@1gSxHdp{4sBpl<$e?fzFs)|#{@GNb4Z6h)W8G#={LlfdOCY7| zWvI~NkP;Ew2LlB;GvO0IQIXf2b+q(kNGVRVeKr$tQ?pfFWQ@v!f|g?>vqwr7rMNlm z_`^d|`H>>z@hoP5$6X$Y1$!V%%!QhtX&Z;#n(ZepQ#6VT1+!#k7=EneNg7?G$yW_$ zdZv>wx_aDDTI-#ysEZLF!@+|sbgqTX>+R36Jbu~_{X`d%&=K-i_bSPhion+NQU z*FgAUnD;`n7}Isi|5%&;$*O@V;J?~rLiJ{K0>9#yBma9srOK+nHP)tl^iXlcK=sbF zk>I!IU(Y5Xh{uqh6zsf3MgF>K&YREg|KBb9KUeM7p9c_#VK`eI3?zEZe6z%rr@nT? znQKY=%|8zYD)uBxLiPp<5PUCwtI@f^;~&+=(|+f_*s5gZS;?Z3FfeqSAsJm8J`l+d z@K#D_A;F5Hwib9{dQ|HoQ&9qYe=rf(YV{&hYKmS)aEb;A>yxrVyATUNCSrp(M+nIF zHqd9JdloYq@Nds+w_$LR!s|&V_Xi9f{%lSl{n5|Domd&pRaN|ob_B699+6*JvxNAl z%hmHW+@LP@r-~dxQ6yY>p7X1SmK9uw?!({qhQqKgHf~12-sFAo^0)7`eeK5Mj=pTF zRcH}jwV5#5b8A^l;*XKrfUnO-Z=Z)DuIG1`L3jh@1m4og!bQ|g+ealuxE|(Cg37CYFL4JVi@jdS0pvdZ)ba@pDY`1b^Y0y7Cm<5}y8k0x~PCJpe zDh!j5Kg_(na>p%d+oPGd=YOtT?d~~y3L(cHt6~p-Q77)D3R4rJh#F{QE|!$uW_!iB_474+dnh;7FmY=*z{dx1 z5_)^5@8kAnbuf(z@W84XF&rz4wan$dvC?OBsFo@%^9Y?RN-@sQn|g1zPe>T16|b_^ zlR7Z~WQ0|U|4e1kFeiZn9EKq(^E-X=nWBH6#9(jIHOEB>8Vi2#^=o{EEeM&@=kt8m z)0R))CGaF<2e-M#b^=peYyOsjIZ5%VYs zm{ENA;XOXA`g=MFZy)yDpGl@QPkkmlv5Hr1?l{&8e)FlJuX$#t$|mtOsEMYzkuq-x zQ2YScTDr^Kv3bQBpWN>MyMG83l!X?W*=qaC({{Q!glX7@ia%6^^kyx7vdH<12tK76 zeO0-#uAQlFXonjmP$UEuFix)7tdtI1*2smbE$XZ4w0;a(nR(K<_;8(=aV2mBdR>Uc zU;o#3oudX5bNMTxwpR`5_cR#FmUXRH{J>I(Zna>NnsB*4Q9|%nJ&c z0obN!M7LLgjhN=L!RSyF)KnDpyGJpTdW!xr38{x!ItSF)D=hyly2jrU2Bo89fetHq z3HL)vi5D?E1y50S_l51WF+{ibj7D=zQxOpgK0>yD6fb{tz_@Igs&Te8=QGuvQ{$PN z!LV!G&0Uv(RGCt&Z+@8u>Xq&2Sr+6@Ggh!DlsnZqZdPK~NnFR+D7Lr7`mWGVLZASerY8uJ~st2zsGlbF0wjYK~6yw z-vdG%pi{o(+DbaihdP(Qh)(X=BJ(wcyn%WMbRzWC@=s7@hu9J`k|2}C7o>60^PHqi z7i{F=$eE3Sy?|SX-5xko7dbFA)+B8fi|y3VEsr+HiOf{OlZlwWHB$bG{<3lS|GJdZ z6u{|d8RrhZXS|irQ7_Jep9p1zOoq4Lw7$N?ET*4QjXm45zLhp|1~5Di_@Qj%2QW+f)lS?EoXnH{sz4tq@PkK$@qv1 zB9G<;In+`5iS($XsAGCSGuJJ2rXxeuhBg~{tSuc|7_u#&XR--fsU!6%NH;3R{{y)H z=cV6lN6lPw2>k%5_;oJeWo%biN(2a+iZ%Jz+*bHf0Wo;mOV`^uP=Dxge+X;)YEK_U zy?Xh#Kcow$he7;=+dT19ht6w5tp;@rZQgv#W{)LczFuQB~;I}hRg){nbvcj%ai)SquvwiB;zhW@(&#ZH?Hla{x z%Vx!yRM=9m!tbi8+u3V$tkI2~4(@Hn^ETfTKcvq&FT@gbjwY<40O_t)*7_4fD1Jt| zPhiLO&C%Bjz5+0&p~}M-yIxU))>S(i3vu@4>nRhvSp~bFeu@`iSwR0JM{opu2@vkA zpW&ivD7NOdfOd-hhfE8R1d^TyeW&xHmXfbI(x^f7@~6j3waQBLU3eT{b1dxnpJ|w$ zo8ZF`hq!pJKPz%X5SCNjCBI7xjsgOl?FU68(u%PRQF>{?W5xD`1A9jcsDDY7Q24zr zR?CBzn>&@-0*mtXUuV74gjx|q3>iJ-ZQRd|2}Y0P&gR%wS>3?lZHcv2xLUKUtU;Cq zNR)I`3en-3AZabT9t;!f6LQ0WJ;3oWGq%Z^p2k+0Z<`%&K_Z3#)OujV!wziLxH(JU z(fV_@ zMO=;wie}2poQW(!P^^V|e}*KL|AiO#Cd|8d|_8Gs2UKJ9fuz2=Ur5U1av4 zbUf26j`QI)PmVIemYgmuaGflSWwnTI?Bb>T45NCeFNT@-VaGk6A8utj`cb%>2;Y-x; z@153N&YlXR{uCt%`XZ;ooEW|okKXL->u1y2kszk)&%wZhTV&5|=gwHrWQ!auH&=l> zBb8uXzO13-HDTX>RZF_gS^XN39rPORTQEoGuY-5i&%`vjPZgDKBgdTCU;8p88}QHG z^s?WXB;w?Y$TkpaC*fK4sz`&@MZgVB-pZfFCVr`2R^y z&u9tp4sHn3ogv3sZ5(jv9jut#%v>l?N*s_Dz!4x)uNLW7rS79C;qp-CBt1wRB3WIdjBz(3*K(c>0c&)Ohm1M$9+}VxtMOs6r~%yyBinQ*wy(y>o^LrOy{{ zV5{*N-~z`w_nS^*C?J_^*vwj*gBV7mZbjZb;g-({oDwUPKkH3n>q~#s&3aN%gTn|K zpXaeP2}3A0N$@aKlXTpVp5DcQB8X1MKWJJH+k}SGl2?zz01Gae*y=!xVVe_o#Kpf{ z|5pP4v!3~ko@7Vd4;jU39963aQy!QienWsW_uI$z6_ZpFw3%dUCsj+d(YWKdSJ$d< zOX8y5Ks*Gvh+b-HP+;cbu>}>|N?Sw2P7Bjlduw&JlCLkL^45w6F4-nxz>HF4T+Zj; z1dZ{Hj>pzn7-OYcRG3KJ+E%%dD@!)kvvNo>md4%&WmWclzQP7!PSp`ll(dpJ*Czh( zByCKJnYC$SY75)wn9sdlq>WVbDxv&`RwRSIu*XNQ|AANL?-_l!filr`a4_TnQIEzZ zs*qmIUXa!?8!Jrt0HHql0y9?YV2pXL92p)QM*CBlZ8%1W%{U|^k|Va?y0v>n4uKcz zZh!VAo<3V=_z;vZXv7g(E-k4h2zop+GOOT=mxN)BG^$%@HxtO&lu&W9vDFCo2z76389Z>%+4 zunVIw!rT?d=AfQO6+F?vkg_W<0%N}d)%gu@bi80})4O{_p|OUhrK8ZOa4v_4nm{C0 zRe`HD*e5HLIVTRCECvrP&HCb`nW<2w<+(UIOFvQjzaWHJ4g+V@2g8YOD92bQT=r6K zY-uNA$D5v<8?EJ<0#ZC#qivH3_48+w3+Y=Y%hxRC=PO??O4QqdiUMnA*0$Zs%eCtX zSw@)n2}Frop+eEnF$uZ5PHJ5l@{ldkOnwsnJ>Xmm<)0MdC zkNKU>{~)72>?^Xthnr;f%;tWb;x@THn%?yT0@uUD+b*4tmM=G;1g~YdZ!fN|AZs*; zQsSx-QeCxOMIrVUi?bN$2M6+cLlc}Sm96&invjY5v9Z`cyb*dMrY*tRl zhpDckkd|G`2CD(W20!IZ?h83VMfuVArz#=Gr$fpc0us;XY8<`3eZ%`3zIWyWyVUMsbizGtc@W9w}VO!317ljdEeZ zT`V)d2@2!Jx-`@$bvPzPJ-?YlWQ~O}oSCB0Q}9 zRSeQRnK!bMQG1dQTexx3O>9c&z9ueW*H{GDi$EK79=7w$p#O4xtmWH;(-O^u;@r_N z679I1^6v()@a_)~(j_sF`+qBb0e%n44Ky&!EuBys`H~%fVe}Dd695v6>JlnrXb@j- zfN`Sx{p)2x>-(nMEEzTBXFK@?Pw5uvaiiKaV5f0DmEoCW&>>=_ZTyiHn;LC`-5q*4 zAZ#{H2oY{NL)sUGjn3BQJwkJ~LCnNc4BB<7Yh4m1oajZMl8+QJo?F1s^Vxm#V*+nC zpZ|D87(Zm1B%5L_RH?B-@t|6lg^!5jo(&B}DR>U6gnz(P2_98T*aX68&e=*tk1Pz% z6e=JRLP?$cYzwJ~IfuF`3p)G&CET+2G&O?wDENsx4W)KGq*%z{CKv$VCt8rl&?P5^ zft7Fk9gL)wMYW7W013&6$Lgt65xcrHBehONCZ5SvC*CiGQ%v8wpO9@D{ohH-#BR)7Ug$| z{NJ#&thePaza~malNQrCp8n(ZN|=k_!(k}B#{U@Q%Lk!qzrUaZQTFudcw;o|kq)xF?XLh&T}K^tNFrxkY-%&@OzPcJ zX2L~)(Y~Tm{UZ#f+TEB;)Cs2YbETbcz7|Yp2$u^d%7+3O@i~+m00y{CKb16^SNo;j zY*akG>f-r%*z~5anx-+E`f$#BNy#~z>I$RRdGhsd0O|R7V&>K>XEom!)IkeV-O@ol zgf3~yz$$qC5;2?PyapBkU*`kwQp<||`_5z3TA$uo{k+1ggBs*o7u332HxyRt#p!?-Nj(CRBu8w`&TcQZ~DOkvr{-$!uMZrq>6k z0$GIZHP`*QGThk6qHBB3-m*$3jG%w7qd8^-Ucf>xw@LnDF+DtnU4)GL;QPy{IQKz7 zlNt@b693$K5>Y0>ul!ND%@(KW0=204QVR^UF^t09yOS0#gt+zsc?dIYb1Nbf_F;Mk9f zS<)f2_YWx^x(9(f`EFm4TZ|MLCEthQg4{w^>^T}-YQ;7WHh<#~T7ORkg@R-)s~XYJ z?FJaY0s%i)|0{V0O0V_pCLkE^migE7yVOXDIja#qD59YeXfTX7?EXoX$5SI>xUBws>_;G`_Oy>uWYl@AlyU`-ufB=`P7Mpv^~ zH0`~KgksIV|KSuq%IPU0#Q?iHO-3oP`kfqVn_vR}8#XST_$=e(Tmz)({|WEDBnK&x0f14 ziqASKcD`aW;X~K5#r$!dSG{>j)JhIv1a2HUcB}!l8itt(qmrr|0~BJqUH^ku$$)kDC+d9WxM;wYSO z_=9~^S!dXLpoLT^Im~Fc)VYayyEozd4Nl=*@bsomP`#gMvdpAU!U2~-ZU5ERZWn_c z$EGbkhrs@#!vyTjO}?;IAgHvWV=+0K#mBLLiNKAZ-zp{Ge&jzdwpB__u%I@hDcyJ! z!rbTTNUm<^^Vw7qs#n!cRu-|*7%-(9)%Ze#X2FexpcnNA;u{vMA^TNB=|;IIE745)ZHH z)i^NcU<<<22te~{cs*pH_+KqRA4_2U6r)pW$mmkscwxCE(6LKSM z9Tj6-pB6<)k%LtR+yp4MvSJSGRF`ciny|`A>a5AKU7B%Uq8%TuMycaXxAgm2W1Ouj z3To$6?m4x3nn;zwhN|qkHQnmoDTDpl+EW2aGrIXbh+u zf9E{yX+d&SSt59YhLMhfXLs^I>jrwQVEnen?n5y!(O-WF{d3<QgfI2O`9q5!EtS3#wsxi6gB~m1m4X74ARJ&(KvZi|x~?>S3inVXLfCvD8NrQ< z;S5PP6r^kh7mrY%hxaeGR31}lF?Z<9I_Bowjo-GdWJFwYW!A8mZR`Y{pD#{_Vo+{F z*qFT`hf)Ph66fVv=MKewd-LGIovQQIXqXH$FaC^8CDFk}Z!l|p>mTw%RPWlY`g*6f zbnx_k?N}Y>Nrl&ub82)cTJ?11%(Tld-*cIdoC`S&(--r{DfxejLnJjY>5U2DDYuT; zSInMqPXK+T7dc5hBfZ9CN+h9*Sh-Tc=Kz zNNm%gffq@Zb7)w?BI_mK+SClT!XcZE% zX^vAihN1)qmXTE+4){^+pa;ZUg)F09e2vSe&3R8teCtwJBrb=N8Fr69se}*qL6d}t z=bYku9pqX&dPdkG_`5m`w8$O)=3JF7F>Z}|vVvQ&bv>ZjN3pyz5rj6U$V=`?`cd-0^72`|I@4MD` z4-0LTT2Pl0*RBFL_mStNNW$n$K0jSQJTPI$2>{@ln4!)&_;8i?8CwxMUA}|%JuguB zh(A+!*un@gb|Jx>UEtB>w(%qA4_g~QNHIbavoJ4o3>h-XoXGP(iiB_$RX#OI(I`#O zni76`Ch9_U- zdTW9z(XrBCBY4ZRDR5HS7yj9G6vz!?DotR-gOpmLI9qGV$-bLNh?DDffkg%Z8yw2c z6xmNHW=>}lY&o`SW zs**fK5jj?vV~`yPyc-Sz9ECGyXrc#j-ifqOp)*fzAJZ&e(>Ck>^IiNkb1*;AvrTt% z39cEwE;H9TFzBXpbA0I?gO2*^1w&lQw05H;u)j{&Xho^h^=b;8Fz_@l;ktX|L#S)u z>Fmbz>fzr6MZ~i}daTxtqMrEqe_1;y#VyUrXV_z zd)}Zt=+uhI2EQ5Pqe@TAtXfk*zSopuGcGZikkW_>U+xmhh4*@kuVsUNZAxPjLLngE z$yJah+y5Unn{e&y@tov07Or2KJ!romH*tql68f@W%dZigL$L(;(b@B z9orfC3r`Y32xoU-nKBz)`+Q2~GV?s6eU`2I*$-c)@{+62_0b{?Bqsvk(!ZI8nNdMl z8$4GYK75V%1W5cF++^<|*g}pe=wx<%WYd{PE1nh8^O?%-Vn?9jnRMKEyVO7c&EWZ= z8K*Dj)hmzNgySNRFWcYrJ7Y{j?hsSt?|)qhD!^vYVs@q{ z1Z1NYR{TNe=5K#RTe~$@SKpak(&;c2$c)`4kP0|uE@GbA|1|@u_pmpy#I+U?ryQt2 z2J)%NvpushH0Ui+q!JxA%&~9JWrS1tneo7h=YC2kP*o>$W0(c`GbLbv*|D5B`^96Y z-K3*nGyQFE{n5*9<)tut)*P=EnAeiM#aPjJ8tZ6}q(szmqf=b;cY#&y9+QN+AOP|b zPf2sM(kh+8iA;W0%QPcY^=kQ{)b)*T%j1hf|Lu=aZ2l`j4W6(zs#pNpPN5;VTp}mk zD~J%k-s*XXA0?!56L6EuDe^o2)&z1AxN`zKKI(^NjR?xRrm+c=pbeh7gRI4ZMbER> zAX#OiE9%K{K)hCI1w%!QORJe`gI@-n&U&1LTr^O}$X&&j*B`<(0=22|6Q728^$t8D z<_6X>bG^!BDTNzyPX==cv$p;9kB0CX5dNqHO^}Xxa#z!BW7l$}Pob-SEV8yI@TnWy z>@tC$S0WIArkc3hiGt`vU*CkORgSC#SD#)a>^wbjBZC;&X_~aWD*PK6oWn7 zkEtT61CP&0m;;=qniAVy5;kruY&dx}xYluIZ79n9g);|EJB0+_FD=>rl-Ec72 zlu9z;m7&^7MDY2{?1OSm=7=XQU*o$^qT}B1Kq$@ zN*DT~c2HQBH)^rikc!)?8iA$xMSYXygi5UvBn3nIV+=P@v`$(AMRF{M7!>T^JN4eG z#F%G&C@&#kgf=u=`rGb~KO%F49^{#k$TP=U zIcA+@ti<%hs)99Y$)zUL!&_}gy}Nh3bASu&NoF|_y*1P(g1|2MYF7SVqZXbL$c>vhx z6S6;>9y1RodX>yNxmfj02pvswN6W01Vk!hT9ifZ=d57Gu#83BDJh_1jR@J+;He+c* z?KbN`ac4sLeDvbEYunf+mOgB_=Ev5^&bX3Gszm58Ou|h#eLPj;Nj~;aO+*%)N9Azo z27Nm>QbRmGNvtXdJ`d!@6pq}tAzc=(hAOi`@L~4}piA&}??ML7Lf+n1@F97G1a8x- z4jXmk-!T%MSfY@r$wI@I8HX>&yTCKv87gObT3y+?Zg;MF!Bb9p`(2s2;K>=Sl2tmQ z@I2@=kX9y(*~C_vj+yR1ynbH zFlREhnI1B3Ne5P&j}MBQg#Y|x?W8xTHFw>o3DNkBlJf_MT_PyCD!7KA_eXv+=yS}vPM%rEK{+lo-Un)AqB*;&3nv|M&Z0b zC?#1`mb274=z;Gtm(ZUb#&oUGfImhqqtQRT(3|vr2OR0#JZIegV?Ve}yrZnxD=?st z(8J%GhatA2AmI54NN`eSh|lmq--`y1$)*>s&glF==&?NY5GgU}c=)*jmFlJ|DTef~ zCj;tMrTdGjYs!-CXR8+%&3z8_Y*a#Lk)Mu)Y1y2_ScBK$Tyxx7O7he(J)2g{2@keg zXNQ}u`X32*<7z#{v^1w^R14S|nY0X15g6+%ren`Nf%a-Yr7-s!UyZB)g_g0 zS~dKFO^$oZ-jk9|x>4!bA>;osZ%r$;g@SShowEdzEkYAV{W#c=l~Paq&0Jzko2Xo( z`{UuQR8;52mpl{v)I+^BSs?&nbs+wGm*BaJ~$T&wp7?zh0`$$onPqoIYnxFlG-xTZ3o z{BT1kmg$xwKX2dnqq#f!FVL=Prt9w+@BZFP&kP13-hL4kQw@(~x0W-J)^nx`78s91Jp*>yIvb zX5#fG#7~vpC0{{9p0US+x6~34@+^lo7Wf?dfhtluf>YUUzq&jA@`T5u>U(TSvm7r0zkT5dc- zD%7`VbZpc2-c`4g;=_*kj`)z91eBXZ@}f1zU{QRiU4gjVMtxgW-l-fu#fx%3&Q7MX zVEO=v;h1&0V^R>a(X*U?wVW^KA5(j&T^qUpq&SEeS0?*1yf2NYGBp_=8ki1xFkA?n z$_+BkG3fNEGs%R`WW4z9582hSH7ZUuZeB)w?9X6jdl8Pj(h+}MuQhg4*j8_kZ&94- zTfLw07jD_WVO4Nh-_HU~Frb4IuEGA`QZIxo!GjEN-yoS=CnWVFH~>KyAyc063n%^i zU7j(^JGi(R=!OfWq?$8!5)xCAZrXj{i9!C!+YE5~_j{_#CzPUT2`aUEWJ0DWbuU;l zyUo>oO-8}MZa^Jl*qpe;rD!#hG785)0CgPN$S2KTb9uLZ4xT2i1V}x~S7J56#(wqD zsc3lM!-tN-jx`soV2_!c)a|NW2CFf|`b_YvsyJzQEJpnAcUK;I}`w_<5mF ztDP1iL_7Ox`~brKUV80F`6tMJS)Rix=r}JGBdNq>3f(AA!!MU>y#I(XUjjxXZ$KI) zO&o6MMbPSD_~hTsTnt2q-?PX4uv_S*@8z0Z%~?wiK{{Yw^Efm9_AlnNcTrf&zn0HoQ`>ElRor+xTy7du3mox-tOnp3!%+m1z=z=(@;(CgA>2R`-p#3%VoOS!9YQ&&&=#IX@CoA(Ii{OK>CS zu6_=s_kbB;fIFs6QWFG{>=GOZZQI#JKNl#F#Ua%?w0i$|*H1u$kjP6s5YT2J2hHeD zgn>VQoxs4}cEUL-TkEA-#3-l-c^A5KWZFWuXDH>(rlO$wb_z+l&RrU9dUTVu6o3qU z99*{Lc0N-?L-#ns8xic6faQsJZ^VP&sQ#@58Rb`RJRa11Y9vGhx5J5=IE1Q%BPY)k z@#Hn?`cVPC%i0VOj`g^P!zS#)zD=kVAXos+h`bB(r&^tQ|F6qfYQJk)?RN(%^Nc_3FImu92kMU$|QQsc)_@(LY)fSh=h_DYn!e*%4=) zp&aaF!)uRG>n4jZ?#|K5vq9yhl+VmlvaHs*D1EtUkIu0FWJf0Gtl#~ z;(u^<3zZWGCchBy=GesOqYroCW1$p&DskOU;jJ1LqG7yac-*a5D>xPn0HMaG{#tUNFt0((wY!(Rt zgKR~856*Q5#O5=cs=1-DY(sKsmm=q64U(iBTc=84YDoZ(v2ORH!Q zy5;ly^^ZZLR*DiX>Iioah!6D=ZICq(WlAG{TQZ|6N|QIo-&vC##bltBsCVq(Niz;q zo2IQgnypZ6IG5|;X~N31d~Lk+{898z0qlj|EBV&|5WuHDE(-1+%5`CQ^3#(qkXGqc zOloek;+YkVA-T_TZAVTuV}X<6S*yD3S@C9W*0T#B+ESLEYF&aCHP(4_)ayHGm!yN2 z;nHz>Q}dKjmk=o^?b}*z@w8=&u*{}yjK#r%LqXqlGL~7~bYrWT!}=AeIbU!3exn>3 zXwhqq_5XEnT32m=ORg|HDi2MJ6B}LG9Zo?0wKQ;kAm)6lsNAUdwL)0a-M;B%tEB&6 zcCa_OnC|z~9TrslNct6_tSD9`K6szoF*b%u615{UYBa?{-I_7EyzN!KG(VKZ4r8&0 z_mGgGYA8!{B^v71*i59S0{R#Rv#>=cSFUmKrF)EX|AOM_iI5NLUYuO}vin^Z(2yI4 zw|?bpoBe?fJJ8ooit%p@;rVNEdM2AVIuH^gmRe)0bQKg1j}=_at2ek5eWB?FtCKvIDf5{C<9YP)Q;b$-_hbh6QFf>L2ii1iHNXub8b=0J~=JjE7zYTWjP~Vu}A4D zSSc81oy2X4oEa>B!f6jz?)^QF_>KV-s0QR)-jhSp7vaCk=x#OV(B+1Wu!$NS{M7X- zmqj+pCi#SFIu%8rS>F(S{BLy2SxEPP9qS^DKZ;#HWI5v|lRFU_LTIVfbv&jC3+VGS z$#;cJr)CPSNgp6TxWl>b(em!1eDEC|LoY8G6%_cn1Y)2YN{R;CDE-fT8?i`sN8F)(nvmi#z7|n40sMHUryKA5*AIwAhk+$gX17GnPB!%Q; z{i739n9~sPu!y^YN=r4Hkr$!;Q0tGl?umq^IHbdjLR=$xMAfLQ z$Zu196ls)$Q5>{)Y$7?3!Pe6<`@tmVqSpv_Xvcl$azqn|jx!GohtBmzk!8M$@lrEV zmtAfmJw^-0m^`Kiji%U@0H>ue9DPl%fm+%UCGei?e6Rj5Sh%kgFBpDCjgt{CpZA>e z=+`|afzN)lP6km5ndITuSsivoZme>m$VBX4Q{$5l zWx(l>6AiE1iB>meO$zQq!`-rJo=L;lMG$ z4sdHsWv$`$oq&MXIkgtc4apjY)Bj#R0eXrCIy-JHf}#Tf^QLC`p42=E44g^221!3C zFC6t~6E4oD$|`@jDMe#|Ak4nZS(g{0V5T*nxAeN#?kC7|bvUNl`8=Vh=(p`{QF>1} zS)}J|Gj}A;k-*HAUD<<@LDeV9Sos{)e%_Nb4Px$c zDLNJxtsd%bLOWd8{^m6>xc9E@u6wd+XXGoDv`2Ey_1tsPCG$q)*zLI9xe$(z->$YF zOO5Y2A6b%>#&02dQxfRv5$^iC+>jr4=q}_jW#WKQ;DWgd4_2mKK%)(U6Hsa#?hM)) zc%v0)6F8tDy>@iq;!pyP?Z``*+B*u547D;PBoSbCTCb4b7r+75h*SCPBQc?BXpYQK zG0FWw!!{};)G^q%bT7Jf$hO@c@U}oecy;}<^R^%$Z`d2CN)4X}7jT6xl{Qt#BLr^& z(Y#(X{UTLZD=(sEKXHaIl)fYTP?0PbLu#xKTHQrqxOfoJ;%zC?yGfIq8lX-i65@hE zb)KhSwB=+lA9&o7+VXae*~+s2(ZTh6kS%2Fh;m?fXm>XDOX@6WLgpe2{`k}A7Y1$p@EXR!>T>ufw(m63PfE*z zzAmG&((wqA)2oytilD$_zwy}J8MnZZc#)wJyMiMZpFHY}vE>AHU+Ro|JoK-Sz%l6h zXj~pJ%3X&H2X1@^?wI#?W(Wtw$J~(f`*s?bK&O$7KvlmudC!vwb|#3t(=sVE`PcCIwD7#g})22M<&B#1s)F}St{cXls;ozvq`nzNd z$WQfd`IBE;+DQXQlhu}pL&-%_OZO#7nLPPp2p zVc9jf^?L^VGz0gX8vl)Z#SU_MbU!jZ$LUoF;H@p9ZVV%so5v^RSE~?h$9`r>rb`Tq zqMIh$JF`u#U^S~k5LJpB;IHyBj5-2Ny2=t%O0~bXJ|5pQtV+7gD`j4^^|5rWzs&uM zH_ZGODAxWX803>{c|@$!{x32rPkpBl*|c?&$k;mePcCa3$`Y1HwE#Dkj`^+0jVVLM zOS9_h!CG24m)eQ-gB80aP62NT#fafVDGmLzd-hj5NjT+;ew?=&ZdhFeJtvBY0%9h6 zBFsToiAV@W zSs1-7Iv5Shd-x%wZjRPb5iG`IFek2kS*WNGg5L4yry0>U0usbe?$_J9Z{<(i%Fmx$ z0$0N@JDWTAtg+b4%*a3|HLXx7$Q$l+mDCsTgBK-O0;B~AHhVLTX?2*f`-SK3LZ>#*w+tI!I#iG zjLRagvEbK}$KRhPek){?23p1OE%if0A}$%hvDR(#8N7v;P}@`jJk|Kf5nN_onM{9F z-^W-&1LBy8JAnZ!wj-9%UNLQem;$M|nAR%(X-zz_H)2Uh^)Ww7a#0T#Y?``URfkae ztT^g#5xMiM%+i{hTKIHPW0am5QHKgwN7t|9ze?K&|Cp3Rg3V9KjKLZ5lFvFo7fED`s>#U zB}NvWdaL1HPZG+>LsH+;4-cB;mKX=Qf|trRRQR-k8fiHgRUH8?cQj3b85^+T%X^)b z%AM~18b`YbmiE$T#|IX?lKe^?d)=qRYs)DJNl~DkvnUK7Zm}N*;f!aueBn!2+}bYI z_HsiPTV=tj%=C+-js+xSWpt;G?#wRo|7roWr3od2tk&DjLfoo_?+e&x6KHf5hvGsa zp4?Ja_=@$Ya9<2*k#z~%%bISPPaXACKO}+T&PI`FM8@G~0g>nyb_RTny-}Q9pR-f4 z%_^UvwjZ);O`07jiN`gy5`xO5or_`ry<*=l!hsLR;w4_i<@CjmG;03NsE!;SHESFh zohMiqtZ^X@b)8>d`NIFD9t{*-(g-oa>YgJ$lJSQlg7gi=YP>uV9Qz^Uf(_Nm&-R1jBBmWCm?wcaZlc}-$i z#Bw`JA^cV{pLYM=${E3dqYa?|-NoYPB4>v>q>N$d%bN8&^N&-%_)pZ92?>GznvYWJ zVF?OWUWno2r)9N>tKn8FTDd=S*1Q)SHEz1>kV+?ZHEkSOmVW zYNlixJarkrR8kKJ%p+lj=F+U=Hf<=^dX9$ATlx-5pa?kQTW>J$OFS{jBzs=Q?#JF1 zgnk#H2N8lz2gP-Z4$PYSnwbqm8HIkplM1qSVO2Au1lX9Re3@qwblJ;Y1S%H2R&)GJ z;1a{UuPvHAm>G^si}R0=AI3luP%L9{N>XDz(Yy~O1 zsuMww`%s%vKnn63B@`dtp_tiQX%Wc0T*b2@TXcwpBl8IjDNWb`JQw+=-Z(dISzz@l zbxO9)yvH??ys#0|j|YoumcD9G}i6rz3}*v6PB zy=g;f(I(9JCAQX-u(9W8QOF)y3H_*v#t`sSEt1B&(IHNHc5OL=Yu%r%GaB==#PI)d z^^T39Kuxx0Y}>YN+qP}nwv7|x#L0mfs^_}q230GiBT#xI&0QRIf100PF z-IV6kfrKYOf54G7$LD;|NFGj0bQfrkK%1RSbmf~dWL)EpOo@^-)%lpla08tLw2z-|-$lv#zzLssHHD!Covc4O>ur9IAj^0ccvDgar>7Bz5-ST!WJl|qEtWB zwL~T{*r^a%f8y0LgAAk8AnE5c}EOpP~I40y)i-smaHhU4d`!o3HN%(3}|Ol zkRLquZXDY2NU8Ur7lv}BfJ5P_8-=NFxG#Hm(T1BnAdC^ppM6&L09P-xSS$}WjO$JQ z)UUQWNH9HpCwIl8Z)Cg1LW~6N$e-8fn0PLdYcpJv7Q92lDDfqD@>bnz{9%V~-e@7A}$D1vv*mB}_)QEuo6iKIIjH97L{ zS7jUq62;gAEs`_Ogm4jYGL4A3Rvyq07FDErml2i?tIYOd<-4)En&MzYw2F-?$$s^i zo>L<*m(uw(cxCf2u7k!M$$b}I`~r1_)r`}hFN_kO>W|L7G>bg7W|A=6=$>aUOH6x2 zj2XSXNwE~0G$bHIjK2Gr2pNd%T?%`#<&(1dDGzMKxVk3AO&zfXmcSD!&;rwA?=`EJV5Hx4ViQ)N7dJ1#RWt zs$7?>Zn7yxv8>$HDbX)0F!^5&s-Lv?vd~4>6TUsDhXv&Q4)^yKsaf6ft$r6|nVeB$ zMeLOCufJQH6FWJ7lL6;79|)y>I=@W7^qWD%caT^dT6`QZ+AI5(|9X{vnPu?|%kx4>YS`ylc zNM}>nSd9mqHZIK1Hmqw~L)}iEgT?exxo)4|Nu%Oqp}u)>;dJer5n{U6PbAcU2mXi_ zSkaS^xp#o6l4mwSCL#)6mWbWI=8?q&N^}mgSv?9z6$eHlo!zFscuvhEz}Yv`$!pTc z=rI3Dth20nFLpr6r?RyBU4rQ|01JaODN@i<(yKx|Vcln%Y1Ki~iI~+;8#B-7)QG8q ze7MK6-{!q{l(LHXC^9ii;4+3n-ne)j+7o#L(GzO+tekr{uOr&iAzZ=dhGOfOHN9D_ zvy%$JKIj%`Aj_uOUcXCc`RVi2x&}3M%2*)w`?-c%cMP?xme1)jmCr}CivJofp zP&VxJb%gBs*pv70@a2*zef@J{iUEN8+&_fS$skUXMH@iJfn zuNdBCl)uO>O5knV{mc>9fqVo_lRnIIB?QG=WAQS9n<;t1QK&DY(-u40qv$S}?H5(RUnr!wf+#A#XBr8m7jA)13q~YyFlVGR^BUy zd#b?_1WX(3PggP6tFpv58~peZhvvs`MIm3>?cj}*ou2b1R`#eaNq@oN`$(_KB zBOQQW5=AmCUk0?g$Re(OLg1kyeSMWZ(sNf7}B1$EAKaL^=HJ5I`AejTC{tel-E z61)umvZgg_QSWA#{IJ^ z?a!kvZy9^?>^So5QKNI0`0xQhN4Jdr(Y=XjZM&d@8dLQ^XW%GO87}ogq}ajP$jK*& z99ejn5-QEjV6Q{Q8`AJ#G9sXz9^VJv@2{7iK>p7+d52CN@?VA5TyjLXBn@XEDrq?C zc=Xc~&;0%Rg@1dy#?H|edD2+LMz7arHFgwHb(pN6W7sP#saix%KvWKkMxrLgb3elY zp+TSfR8UN@Pz|LnivlZH^NOaP63%)T|$wcO=}!ci_sZ@Bs!zS-WE-+IGzx?kX=g?8d=>4a8er6 zh^E#wJ0qlfjuL_g?@;VyU?zB(mc4G0D4wb=Djab1XLdP8hA&?Qq9bT+TVmHH9x_uR zWD+X{XLtE_FsfJJP*^S%ApH_>Bpue|todC-eU?d8=Yjfw(KHDCErcXau~zLT4y8;f z)Ow!wBF)7L+=AYI1*`_VK3bz!lW(N?eK@od*-ur*=;O6v#ciO<=k_e?$d3}~0G;=U z-AK|&hv~D{2m{U1W~^5MC-~ijx-|`S!V3{7)3`@sFXgpq^52|w3QihlyF{y>`-NpEfT9d*VrV0oQD};5N_gT({|TPg=ckJp8GBRe zJ-u9OhxaZF7H7PNBjn$=6ejmow&`f8MO(E03BLMWorhnHCt&!~Zs5s7tWVLK)kU{> zOc?#rm|2%{75g`39otrihlZz)!a=olL!(Nc#U(8e+Ys@TJEqFPj5zhL_L}7h`=fs8 zR!b5Cok>&!-I3B1P%iK%ZW|%zYCVph0|q?uJkl#zR+R}@Jv}B68L4g%a(wLZR?UU= zRtD6f2RRfJB-yjoPw|BW8i(km+i632hg&E2{`He}=MVje?(||E$9p7qL8L|-5AEk$ zw>IE~f(yTruVbb7lk!eWU1sZj@-_>P8;FV+Hu6$QK?9OC;MNgUW5-1E=^|Tg?|$X= zguZmk0j+f9PdUf1Gc+YJN`o1%L_{U1Ncy*LH*xynH=oB@)sx)RUzH^mkESf5eYrqm zBHH0yfwrdwb4(h-@=O{nEQaOt_kP`w9^(6v@A-Ep7NR_cO!VR3U z2!ntA&EvvN+#URpx=`lkRgtRb>74s#PJIhUF`j8Ku;(=02ntXGDhMzjWJN;zle~Z+PhDMgLOr+vVg7vP0p@&v~>d{eK z*ddZZOIpOp19G?b3-0&rR^Hw78Schu?A~C`KqF7Fh5;(3z5Yl*LD__fw%iEu_wARv zhpZ*N0}FvENfLeM7(P|vdyWHF=CI!5(>IQZtW`@XK z!^?$$uP@q~X6(=7Df8FIdsa5t2>?<}=|rTOj?Uhp6Z=_zx7b!v)Ubg~J4P{bsBB6j zu4$ZmzHnmg2-$34JP*7_K4_0??^fgBNQi%t5YQu=0AwQ@8*8p~dH{A4BM#W!&EJYOl;?PZ#QyV{DUcj-TR6Ig)u;~##W_tp5#Gs)iPhA%uW?s zt(=XZBVexPasie)!mS?xMZRVdjo`U4(kSx~QiAIkL(HXxS3ig)etGlKTOf+#{g{Q@ zF1JL=g2z^kz@RvdOvG@EiurQNVHi!*qQS${=gy(Jzs887rj(yttSQ-@I+EdA+UZYy zy*9liHH_S&ZdamKt<_|;RH0h=`@nBlUucGq0_p`~PUc7I=h^$-Xvv_MY`?(F&Ie~@ zVzq+FH}ADn!cHLLMF72OZMGX34uTP0gn@2~fI@JnOt41S9eyUUKn-9l458sx*+XEG z7zylq#Pc=DeMrtmj+h5Gr`=`h%7@<#eF`$Of-V!z8G9_4CU%1bWAe)*(~)K7nl408 zu+Xni$flqQ(wMjq=0nRV&%wAbcw{!amV-^%iDCo5zQNFm$_d`Eb9@^+sj6QUi@$ad zXtoP+lf_~*!-;>DBG(hD&Wnv3q+=# z;A1HHvtr^QWZxW@<&P@Q-JrFne0G(Gne5Jjs>z&*jtf1x1v$~$M8wie)rY}w+&v|W zK$fttuf)Yv_laI`O7L%=IA8MsCLZlHz)i}s=ogEEA);pU5k_0A5h-|rNXZOBsk|>V z?Fcubb{`~cN(-=}OMmYY2f#5=N2eVlIMoyB@z=7t`0V_9-Bth-1X=K?=lPSR*eEGlM_f zNMSaJ`s)Hv+G&s;0#AuAshY@_a}-KNlp+$aVkqO1W)pzgE)sF56MfW*DAl|U5nlEJ zI7FZkCP>djrvq2QuL&nzsFR9r7rI)LzdCXlxW1Zsq8u~So&t+r$&=YC=8}i0gRbpi zYI~?wjS_hpp{Z+H^j@TJZf+%b$RBu|mvuR2om^3N>P_r|P?+EibjEv&VDES%PMBmW zUjOIpIQ*t|%MGF`EXkIG@@&!zTdA>5it03TzX@Ix6{}OxjBM-GmlThPdQS{y4`9br zrp6GP!~PBZNKkWCQL9LpB}}B6hmt05_&H1hRc#sf?c4rz!%n?-ux1dq^A0cuBjE?JL~bvt3-nW)&cxfhz-DctDVozaGk%XCyS0LjzN*_3M!Y2Y zcsfAQ{b~#CDCsCO-q@+%n7$~DRLNXFY>I#Etm}eVrRi;y^Q{!Lw9NN=J7?dpoq%() zjpt3p`PEj#L_K6)Zv4^=34`roagcs1!1*kt1#i7D{TV0U--?^(t}lJV5fyuqmv^f0 z=BF3NEVz@uQ3aaHfiB zzZRXC;j&Y7yr{OWmlA=buw=9+`CbSj2F&;1Rlixu7$}$t9E+6PrIRuO8ty>-p6E1w z0TxT$aA%&iCV7{bKDffJ?=q0qhpD&dAU>#Is%&p;a%jqiSTqC-5q*H67RtEL?(hH+ z9v;4RYo;|z3nwm75|(Q}-Z+384#q3-7fjFHG;`nnZGal2ypLu@YaWFf`5n=@Rg`O- zO}5v}5f(s9iC6;sJi>bo_kuAjC}cf$UVc-1v>B$BFJhl2Ixomioj>}K(@a@Se&Q%$ z;Id%L*pVq2IC`)%iMz`@Oh^EO%;)`p+3oX0 z`n(Vz%wrUfDUN@;#{Yx&&;JAa^$HIDAmXM6^LAhgklh|W!_FPW~3_Jf8;_;vVx?#{-Vdk zWhY*U;u!S-ZR)|lv({Ak^ZooN%J!>;O51EY$uTw#QjRkKvt@x6&tZ_Q*$8{3@su4G zx%F{DDS;Q%>%v1+N?A!eRwN=p6&W|(Hl>b0B)W}il3e!c=qUp_V_=X3tbBR3rpBT| zLM>Ub`bY|hc&h*-(O{%~k|hQ?YyhCeS~G`jj@5?Ch)b^3+dJ6dWbXu*EepC0{LapC zGlPqB3yEXlr-mwrnOsl5oHD7N&Dr-&ZaIwfdd*Q9$eP#kE021tcd5TR-L<@sudX$< zv+Vzo68{PMno`|!W zMbaBXV7~k`rE+hkPojI>A<8vHp{LxP+o_z2t zzm7g0`Ac7!eB+(y*kpC@pEaG1%uj?~7r6HTjUek|+3SSQ9ax*AYb_uL*X^%sq;&Ov zj*gzRZRj;ex2xS3OLQZpzF-%f(p#<*;9KZR(0mk-w6;Rf8WmCw&DAc4$l1#i5C zTBCh$(!&H6n%>pj$*h z25-p8oP)uG;V>uBQ7F+Kxo2UKY?pOeT-QyLUd9Nh(BV#oXjFJ)?8mHV49M(D@6V@a zIQRklyZAi+IC}oCm=@O19l3!)D>n9Ni+8+kTLu_+hwg)#6Ze;v?18(jmK~mAuY{*V1!*_K zTPw>siU5o`H|$QYCrG}J$ClI0NdPQBfUNFxP!on;{_}5H>>FZ!mUIwds9n8a%1X0- zru>cPQUIhjX(n7|Q3iK~zj8{(gQSji-`;Cf8kW&{?MXQij;3C@SMPYZ8%>cde#r^) z>A+WA`4Y0V!YYk$M(CvHP-8<7HndA@)gd0-dpNDI~Bbe3t*%*tB5gDGg6I=NozuT5Drr|7wGi6spFS?w({0^sx)x2Sa4s zgi&9@A@lKa@N8Z=y1U1JIUIJz1gc;uLhRR#LPKGxSGVvx;r8UR=v*lmUaK`{(UXNdF)Y#ZF!XfXkE?KxguB2L3>(1}seIN%>I~#_J{&PJhLSO7v5Afi zz=K7jmvf|3ZlCXXeWRBSYo8DMm)G}7?amJVeGHyYm%e2`90RDBzPjZ$t?H zpna%#z44nI%=}yx@00N<~H$^65#bX5h3}7QIg?0 zRPXY-%-zz&_*BX$SZqh6z!76k#|()GLEE*Uiv3GI!TTf*8|_r2;`jAN>`C8UvILEU z%ST`3GY($;>cFA3uaIb&=|Are9I+lAt>W&h4q@P$&ifLpFYE7=j@Jor^8NXdR3t-|YU!f+1_3F#R zzy+ENqsWlcgF`69NGKI-h?AswAV>tm{AsdNni~NDp67jYB4ah{%7iK%q*@KPqe{^f zUcNszgqcz~x7UnK5T2YjZro~ym+-(w3kD04>E&iwgv9zvQP2P2J)(xr@&6N$T?-5PoHX1*E z=e!qk^kb1ql88|NQ;(SjWmSNsn$?Lk-cqb$f9!MwfTTsmtK&7r3~I+E;|;kw|FL0D zn>l7969@cjgXg^JOidONYL`9QRgvu_0bLU~#SgV4cslRci&-w5`x#LU7_L0lP|o zlQNhTLK@19BqfILq~o_`BBE_i^J80m#s&i-bsjLUr4^HTWKISW7ODduzsn%QDUB4a z!lZC9qGTN=HrIOP+x6=i`3}Fi8`z@V*3sE(Sh07Qv)kKGluWj37;?%JiKt$Z%CkB+Hx~%kK^}|#lDJvGuxsrgE7`7PnDc@fRf!`J9nha-`=(3c{V;6pqf&M_^X&}ya4Lg_FKs(FeF2KKr3 zrzYq3h)S54lJ#uH@SM5}JnA12t;yevkn2BrO;Jvsy=kAM6tsw2e9*e7R|1CqVAMGC z&{fdD$h_ceEkQ>k04T|URr>PsaL6H*t$ZbG+2H)*S}sw5)+1ef~4U!U}{}9 z`O(Wp0723HrDudtM?{xx4S!;Rr&UxCh4BD3quz_Q=ZBR4xfjH9({0Xif<&JoEQn$e z89Y?7WQrDpikZJhVL8j<%kGr z3ONp?*wU8AJCG~e)3-E4+V2k79clukU~o**%(BR9i}>$(^>RG+6L~QiO$jtH(YFj5 zi5e|h&4ZuZn)0EX4kG%ozu#?q6#7f-dJb`KeHm+Vh` zGW8nNcq`PY;3fwK`pLGSP@ zhc+WN0#OmJY6gy5e^0yzPJIEo!-Mm{Fh3U)<@>-GxPT`}(N7?|_ijAf1^o_$8= zd5&$IJQyt=T3U*6>>g3y@r(CcFU%c@Ktn4ZMj6oy5$%GPEV5lSrAr;wHIkt@k)XT%LNKzBFgC1$Ce8;lCXJf0ovak zV4o9jwAZk5)=)F4Tly&rw@3BWk>RL-#uMKb9o5TsDya+47iTCZB@dY(js7cWiWg2& zB{e+{Ep+M>p%9Jc>WZ;P-5WcG94xKbz+S4%4#D8P#v;NXVjKB?XZ?s$ip+zUC?W+- zXylVeXXz^G3oAT*$Muw$vhRDy#xb!-56p8Hx^Pq|CtrfM8T+h8(iU(JFLxUV+|iTj zqk#KIAjOHqhlz&`cM(4DyWikOI)G2~AHUK@=BLb}9Wyx7DS^)X7HHvk(p9?LCYS&3 z@9cjHEh!gg;*($KIt%#}!+y(&3Qpjk$-P$)8y?#ON*fG_aG*Ei3r@W8{21Tl%W>Sp!Q^9Ov>Z=JVL z+nf#AP)Lne61)PLM6uFry0*aAc-I|hSZ)TGtWJ~ zu;|-FKOFn{zkdOT?++`l6L~xPS@t)(-s2pF0DE|Lc70Qea!%nPDmh4y@*h$0!2}!q zYaS|2x&c%N)48JL`kdhAdCJ1spO=|ckwem?LFZ}ybX5ttx#Pr7fwn)TG{xgHe7U_J zOKTUak$>YZdY~OVtA{mwT3+{y+S-u)b2FyHb<|f@Zt#`m&24!aa4O+JI{cDh3CuNO z{+PP-c67LBz-!}_62G?{%*^6K<@mS2gTovIT(=7D%wP-@hlm;^ML3&U9A*nE)%^)Y zDJPUEXs?Nsm;t5@KCw<*l~2<&lLK+CfST)&b0vJtl&SabLQt_m} znw`=G1evsiq$?RtGLwC*|J zk_|^M<-sj`dKU_-JZlH$3WL(eM@wsA%hlxC*$B0yDj7mHxS&MV`E_A|;A=8QdCG2? zTw}YhvosOTY>G3!%POdnn1QWAQPaGL^!Vv3(mCI5n8mSZ1J?=nU`F2>aOV7}bBSqR zk2jt~GgUXP235i@T1iX*JV{2WU`z(rGfl+=tt$9oJ%5(OX-)aS_SHGMCq^fhyd_d!hLwSofl zHM)zsvTtZ0NW;8zHU7~bU)rB%^!AMY=XYmWa2v7{&n%5PCjZ1&^1}^xaT2>XBz76F zSgyd-?fHf-c0R%EcVJ)7E3vnCu=Vtn09pU&7qc0lY(t;Dn>8gQofN`7j@@jtd2()Y z{w#7h9Upp?Z2TNH%K4Poe9HCYWu7FviHAlRmQ#7ya5pK?XJbY$TR~*K>@Uoiun#5K zO49S?&N-)Pls2nseIxfA44QEb_3@Bp;n{m^@R^TX#^*P4c*DN+dZl=a3U0|xOyO)N za-E^s-}v|54xp+3(`Qt?czPEg{2aK$yJ9fjRA`19ur(_X^dOoF!KKiux(hHnc&r*9Y(i*&IX`nRkBI4P6Rhl+qET^F^hcfz zmof9WnFgki)J>d=(MZC1#(b7_JbS=NxiW_>4W*({c|ru4Bb$u_oXihQpxTSCh={NP z-A{EuoTUQZu|%90VkHH0m1T}}48^!+#EFZrzp3lkF6cR3YYr1=vFJrJ&PeoK3GKLB z+ZIw+%jGR%iGL_Hv(XU{?n@jY_FhT;f;-Y@kR04--Ge)WM0-=@2^_xdFI`DrA+9VN z5uYyNZwp3ZnJm;h|Jp2Hw7&aUoFVP#JM|lF@zfU#P@T$dYF)kT!NAu zU%sVl3=ur>Q8Oai($~FVymQGI)0-I7dq88% zl4HtpeH+R6;DJmDPBiY9^a@uWbso}bq1_yN&aI>i@hWfiuV$m{UUU@t9Z~-^8+IS{ zmRKpZM6TEloz0>H@5?OMkbiep79$Mt;~;<-ky0jrIVXuboN{-C{qzIY?F#O9&u>K! zpk@Sz`+UPJQQE?U8`08_!|Sppg7m_{{R8di`3bhWC+GHk$y%q!i#Fg~L`{u2II89> zvk~EbT1HKL6Be*W=hD+U;G8T4drC$rAWmpqcb zVw#vPlmz?B89BmO!1+f{BZD7Q?G*;DiXtY#%fT>})Fz#`*(UU%NcPDH5<69};@G>LoZY;W6TX`>%lIOV{jb&$L9?$M5A8oqRosJv`y!^>lA-*Y5+i znFug2P*xH8nBXD?fY&iJeCy7KXKI8yq$?TJZU$R3+!UOo(ztr!?^`viBzzoK_TNyy81=tD=oB?g|KT9Q z%dG0y&mu`O#8-4%E{^a<9L2R5FGKPps)Qcda*64+7I$jAo~H9_eA8J$zIAFqK#m?m zxr##FJQsEoDe#}(b3i{W3;4~fVH`u62<}wNmCn`PiVgIzrf83FE81#|&~k8S&9-Su zCZ|Qhlv+{!G0zY;NSmpn3LGSwiYY0@Cz2WBtFh(9nBfO$Oa@t%V?wHbjhR7iR?Hld z5*;HZ;u_ly>2He!Aq9vZPOiC_eW_?9=r6o^b628CFbYkpbC&psa2zB+Q?iK_Kfz2^ z!i^8#xI!Cn>viO=^HuKY8F9RJh(Q)3LOpG@O#Qrr1rErXg4^&5r?Z=V#f^VhkjXm} zKt%Vuo|VJh-}O{xTP@tu&EL{HxV{FnhX>!3g+H|yTa&ZtJ4vc1VZn_Zw}Ef!YBg~> ze!kM)M9yEg9lW-OlYuCOvm&D>%gD^89zK65f9MYzx#7VY*3+=#jb*Z@>P{Gpcypko zEtC6YJ_-xHwY91#WtVBtZTuOeFne5dk#htSoe|a3CuScY5N}BUS1FGQ}nPRWHJAa9{wKPGA4tdVy!m{ZEyU>-s0- zTvMc~s`mYdNr#-$WD;IN^*)D?c(F<`R)Og%KM#W^BRn$R_O?N52YR%KQq_~t<}ZLg zWpkWF(?cliiMm4h@ORbee|+gHuSfzJe=*qmPd)Z>;ylTNLeLHfk$%0=7Rx*D?CV67f2xxACCZw{WYe;EVvaw7l4q*SDR{Au6y7e`W^<6wF1h^nmC~ zjWWXElVImUxa=o}PJ_yf0PokDOXGpze&{ubnnV^-1E6jD2tE*REx=Dx_nwlvvdKJx zI3~VWw%HW=Fxp>;=n>qwf3Sb)tqW&r4M-%5ScLqwe+VZEG-s2qHifjbK*K0}ERfrr zh{mN+=7y#yT=&z66T-99QIhreRWS5XzR5|E&{^aY^`oAcCG!!}_0)qvDWBTtDarQl!i z=plDj`+c2h`oU;?^G8QR{)dxv+-Pk&fvI#9sjBO^-|>@Q=mAC^@tujGJ!WK43e+{H*mSXkL&-{FLBk9sCVa zuNre4KAyo2zVJ4Vv8LTm*J-I&N*s+QI!^?@^l#=A$D;p#wbo?ywdoqv%Ms6^aH5G5 z7xngLg5P`9y=M;8Il^uU%o(Y4V zf(y*NX_s*K?0!Fp*|Ds$Ns+p(?5ZB8u)+i=p4bE*HHO%MDj~snrbCF#a8! z{8&-WFvmDel4C`13`*`uaWt^WEnHo?s46Zi2BuBe) zG#{Z)cG#tp1UDtR%MM#ZI>4>%;IkJwCjO$RZ2mq+-F#s9zFY9FHdzRXP1g|1?CQ$h zs#LQ|pd=(LJ~@+-cc3oF^FRpjkr9YNzG*8*{T~x9yt=if)=S*(uj3s@kJbScTx1%N zjK(Zj9pPnV@Ww1$WU~c~Oq@bHtGmtkss!mYGllw#6RuJ}(%WO@iMQ^-zL6SnH>IO` zUh(?u>qCy7T=V0G@6C^@ALoi#eIdp*n`IBjelu>T)oyF-NyEXd-}yoJmcdF(exk*q zBV`l2hF);V_JnT$1^sEifF`Oge?gA<7;Y9X{L}*t2-p#VCn;`32x;oYfZvRihMGaf zSIWwR=2u^H2k!RQZJIN4Mlvii+f|$f^mLC!;eaC=lBsl5{S??v_ZQm07v929#7CAl z&I@qO2D?mzOP2{4E&fvDZ5yV}L>jg;Qvf0JLol<5DmQ@~G=w8S6G7x4vtGB}L* zCGcPn(KyUCjC^^`Gx`pY3an>B710a;<>bRDE}?NDHdzQh|IS{xQC)M#-Up9wn8%Dv zAfm=NL6>rr=$N$A2k_RcNIpa(mii`hju-31)f5IidhZyV#En+$=aQxXTp5{nu^2ho zauab4@gzV7tpYebCz4&EIO}JAcM$2tSao0o;w|gkh&L37jUbl)^mtnr3#8 zY3eLPAKl3^kpvP02ugJmGSL~VvmSFq#Hud;F0w&|Y5~thG${D+8F9YI2%u;mb*x+zA^-_L zJ0JPT)BGui_2}{8Oft)=wAja2Ujg5vbBH|=p zX~~F8B9tQ%fdemGQc!f_0#gf}I=_4nYd7ok?ipgtb)bE5G*ldNz^qyaT6A?BxHprv2bM4D>!y(&)=9^ZTbH^mO0aPz6lQ_zHCyw4DM$W zVdz1%=6&c8)@yz0C0(8bHRWk)a578*m}6Rw+~J^EgXz)gouL4S48}E_O`-(g5$@VfllItjr11Db zgyC3_DC|NVDQCxnU&y@`XCirA`BKk?{4jG*UY@0+qTfz!`hTx3OI{^%$Qb1K`7+P= z_rdMRt%L0IncT|c*q~e$GhObq?oCs2Do}@rlki0ciQEm!0nmI4l$z zKAVAb+W$ge%B4X4;NW6#sYb`)ladqgjqdy5%jirLs46~|R;8u>;{}jCM=%%mQ8g?B zjoELC%rMN55FYN^2w}oIG|c2m9^wnX)a1c>u*0JW{wSk+0@`k_+lyo~9FEBBSS4>N zHFU3}EQ~}{myPb(+^l#1a$9&{TYtn3ImIlt(XOYlNQw%!EV3}rZdfg_J{ojVM-TX+ zO!&JSxx4r*IGL5?n*4|J?BucQ(ffKIdWgjk6EoVhp(;A_%07RY14m{u_mPkmHn7tM z@R(_hu;-r|ZPQjRIK&oF=ho3pRg0*XXB)5M`C50zb6X@YJb`I+^-Il3du(E4yOW<~ z7O9i05l#kM1m4sjWhsZ!hz)X^;|}lU?!y(B?rL;~AkUmgB&i`X42Pma%j;9C3{}RT z{?TA3&DW`Xk@AX~M`j>_tkB2>10U!s^wbOuKGUJ}L}ai|2+YyDx1pWbV+W7xM^`g! z)g*K?>A!6pXr6tQe=mlkAe1;}(FiyGaxkWEfDnpUq&0rcvNFYpDtiVx6V<;H_sXVq z%{g7eVQI}E(rVdwVA?ZoBhT_%wKiXNgrTB|(6FpF;)c7u=snPJgclW{nW<`z%BviK zcK!a^E30hK!5!j@t|1nCXHula-7|G2;C>1l6qVfH@<9X1NP+#cp@4h_-w@99Rn8b3 zn26Z#^?OZJMMo)=gJM&PLpT!*-!)q>fWZ@2@^#V1;ap7T$iNLdh=A3N_gHVJkj2{8 zH;F}6F%{ex86agx^{e^|Eer2gkSWWKV7 z2Td*JO`F>sPj#U~H*Ta=x9W2QGwri|%|8FKZ}pR%5q9}UDblTbpV;kil8cAS!-kS1 znx=j?px%?{^BV8_0s3>{*uPH|<96$p?|eI;?ejE%3m5+DBk360Y?j0wX?9`B(Y(PU za>>qk-42^JR*IS$!E3N*;Eo?-&9)aFugWB?H2P`m=-job&Fa9G5wEYrW*7!UM(ou4 zW6e>Tb**g)vuWq>aZT&C=pZDgBn!XeZZXzw72~8^P}gAV;MgXNK|I_6`QaBo@as-p zeY83AoYObl&?ktoGCm7gtgF-ndlrU+X}R*Z`pIjZ+^BPKGN3@Ab)y>`ggI|TC5|bL z_4=7s3m*Nq)`g`j@I=R?`(mpV4>$WLFiWFX^WRL1YQMG#a}0UyyE&74)5TU9A{@4Ii@*05!Gsd zhOokLeSD-s0)XMc01cKAAb#mrcHktztLyV@d{$e|0CH33B)))hp-36x#C)2&Y3~=GhK=+B}`UBk&Wrfw3^HkgsAS zVdNdDYmj{@T%U{a_}!1V(+uQn|kU8j8WBlueY`4>B8BEf(qQ+qrHtaHy8N6^g4Mr$7S&AC4qrsqAy?OMIR`aAo0&H@}37MJZLSD zWP+r&Cs%XSkgQ$)vRHJOkOXsq!OD{6VBi5NG+m3JTPnw*<5n|+#u zAX9trQ0@p0Mfwo9+qmQ2R!8(Gg{oHa*ytY-LM>N@$-*W^>^Qqzvnvy+9L}2cMFgoa zp@aFyK4y<`($p`Mta{d8$?o>cQlB={8b2d1*+r;%NO|tfT`Fbe_35|+c@M?^$xum& zyx`R&bny1T_PWL$SjAmj%s4b{?6)lVciha52*xCNgZKA>?e+Z@C2wy$+ zlA?RW-^n8|%#I5unvLNxJAl8FVU>{~0^A`==+h|z;kc`Y&D$CI047zGlK+OSU?WWU z%aeD(w1~Lq1$V;x>y|srMM@NU-MgIao63J%o=4*Nu$0AC4Yjeuy*qp9;JsQ@a-n@t zx1pV1^a-t42v`v!6pE1doVS_)hT#;Xg~L2l!_{+?w;Fd5m${|X;mL(ctm9zk^ZdMD zVqGb>@Sg(scaR^ab49TcSOyTzn7)?b<^Xs%y#I4%1(k`IE{c!#1t0FW%vl8CBu=nY z4>7Om1?YesU#Ce#5R3*NMOHfs0lZ!0_e`r-HG|n=vB-8&5g;TXBnEgf* z^8bCOcYyF9I;W#wTW7RD-U#nrqlY{n2E3!+<+r3$tYcy9Dn;lI1mOMNQdQ?Fh+@`N zU5vOF^j+1A9KLo1787XCMwKcoW;jv z@uI3e6iUJObO>!q*4_bBgCo+ef2;_ts6`c442@|KAEgU>&<3XvUn+> z#<9&q^s?wLH?!XFU{m&oCePlrl^oK#%2s*4_n`xAFMqtSS;3ntinyAV?drC!)06Wz z2__Eo6CZ=APf8Z&<#hR$1WzI59~k~;-p6oJ3`B&w6$=S)Y^t9yIkw)$wc9VVIc2Lh3ViY5y$ZWUFU=1HPu4?@vUgvVct zTmO!wQY1tMJlhjicT*4bK^uvH*J|crLTPyY;<+$#L;B{-JJU(!rZBg@2dm%6oG%M6 z@7T7HP0T-XI8Ld{|H>2WkO?;s?we_gyg1S>kT%^zrs2oVaQ0_2Bx2j>V4*OoOe}|j zRY5N~pbE*fG8HY7gf4f&H0~^vHNzy=t_syN-4lKGZ3OU%npz?e52VL^MvpVRIB`Dk zT$*GiPbRe7IqIZffD#D>^+ZaT8{6S!Z;HN$7#J<+{EO7l4SF`$l(DF8t@ni{dWmm^ znFvuOVTr_5yM&u(^DLe-cgr$XL?oCFd8OKnWn#c@UHHAR#kCd6C@cTMiiB=$2SLhQ zh@>(^QPw(S@OkHOt2vXZ^vzQxhG-#=|9#}^Fipd`eK%-wA-3X$s9B~=Lk2voR`n`H z)25|xl$=+$24nuwdq{!+_Um_He6z@w{e|N5J6{T4vGb1z54syQ*c1+&HWIcqt*9%? ze~`(&&OSm(o4K-)g=g_J?__~T#6aYhllD>OiV+bSt5;>Vlx2WbL z70+4=&cwSCRp)06NZ2}ew41oH#lA5kG`;iPV@;)`pxl=fO?z=xObvvZHM2h7_igmM zuf9%FJZGh>8SQ7sioJH4m5wziydL0!11ueY56MG(Jj@v#{LGyAqGB6(WdqLb=b7cx znHUMAnfkO{*E}@>cibkU2oaqsaUb%2e@&B$m6?k$S6*-R#v_ZG2`f6Bd@QUue6DUE z9hZt4(ZN76S|r|WuFs|omBgcQQ^4$^qGiU^#9LT@UkLAI=4#|*wD~1ZVGct(Uemc# zCdr=}WLJLn=g5^zRLZJEu-JGFFYKi<#jVZ7C2ivHv6J<_A4L+( z%VsI?3d+&N!4g$6C4CWfDD)3)TvN=N{299#?o}3Q05Ab|<`!7iirT3i-RiM%u9&;3 zIfZ@YJ|m$(qT%3#w};+s+=*Y&_cXEa1&4XIM2v(Du+g9}zhTeb3&%iEcx?Axjma~N zbrfu<;y&7h+7k)J8V&To8T_>%tDV_AxA4^7UEv&}kr%1Y*motYan;KHuTXHwjkiHY zX2)hWVTNZQCnmJ0_T|wKjqja{*Cffg)R&e0Cau#NPx<6ufTV4g_7RH(LGZAgk@dgh zmpsMPY%SgFRq!5MprC85;6(jTeA#c!wTDNkovY{ZfSxq+QG zf7rRP9X8>t?-pBGx1#1)OJ{FtcJ{ZfaBApo+}~|>3NEnNr~X7*lB_n-d_UlQ`+vQ( zp!>Cop^M?(oP5yLeHoU)k|3=|S+70fDbzhFk>~#Rh*hcL`7?5|vH&b048@}DR^|DE zJ}rYvQfZhE*U@~T5z+^s!T#f(>+=$2BA-Q!M*Y!Sov>IaJoYOU3WHgm+7uYUnIs_8 zKw_9LdPAA!o{KpZpPCn>TYa>@j!+JV&g-m%#csSpZ8B#IJ33gtcwC*&b0Y!1K2IG*nA_94s4Sw)BW3)oP?%q+9>yNHgcrLCvuq%`vT^ap25-~x|D zL8!J~YKA!U47e5N>J#@7#IT|DA=%p6Spn)Innb8eXZLyglG4|=YP-E zkHx&lL@JoAnKu6lGy(MyH>z*0f+G4K{-lZ5G)F}r-=Lku-Npk-mY<0P-@8GF5exmw zop{Cp6n>RM-M1%e;sO^@(rM*O9SnKR+ke<@8&o zQn$%p3G4TD{JzuOp=9$RKp_q+i=jtb1OL+%C2>J?C~&Af`hXErPp=1}217P2!vKU? z%O#P_1!dv(dzjMO^O2u93I%qzC@2CWn&>8f4@WbW;%=L)!1_!U9QgZ9t62A7M@F-{!oq+**iGBPky6=!Rt@_9Yo`P~Y0 z7K-SZL2o3e*s(bV{CU5GHPoke@=ud2gK*$-KsaF!K63W!MKdq(AElESu8Ua5(K_bZ zi8cq0(iifwmBNEVq|zkyLv)c-)dKAdA0>~5wGF7DP-a@!Wm^7>;CM;J+oa?A{i&N4 zfuH%M;103N`ty{Pwe7XNpBGtQpMXTNe!yA;k$!$X1K|qVvf{Qyc&mTBpgsh|nW;WE zcF2ZBNU~{2sHQx^7=n+4etINH5()xnM&h%y0C}mv{32SS%>IID;@?XTrZINB?C7W$ zuk%A*;}1bPD40Czm*2^;zJxS23c-XC4UW={lVs|J>g7({UwTj5wP`AFKb$62l57?I zPUj;(;~Z_=u9^oP1@E=hoMaV-9jDLNBi>r=z6>i~S?r`43Rd8p&X`R#uTXz@V3Y^iKj3PFQGCS2ki%%&K?-&eGHr&UU9~%)wo_ z1LmBoDI1AfOLE`ckzEfz8<1ZklFbiB`*Y@v*_5qvO&AA2$!`6F3n2ng3E>q<2MaH! zsG-AV&U<~8oaHllDT>;T-?cl{iIZdI|HG^P&*lk9caQH{JIicr;#SaA`qGJ8C^M4hy?cNihEd0-_W)cYC+$V50~PpP+tQ60u_yt2 z(qsOlSG0F2UsWxaTGSNMCRiN-6im8+ml7=2nXqyh9uOp0v?dB z4);@9>tfv6{`aO<^|cVJ+001~!0<=ou)UE>UT;H1Ik3IFtVJMVHw#YG;V<)bD8dN~ zOjNAk>5H11Tlkkv9#(L<3$%fqvd&s>wS+t<;PLUqfu-O zXq(H2_L$_^T?YTF4hYa<>oAOT!cW(zC^_AOFuU%)+gtn(;zo03Eih0>b5yQrW&Cv# znFFO;;Y>+-Hl(r`utzvC?#=B27v1()pXLn3zASB~PIzJxA$Wyy<;y1d#Im7!wEK#z zmx#2-BJvRw1T2`eX~FmBnxP)Qr_(~IBT*dUeGnn z#GUhyv-8@X2KEkLfbsrkVUGImA&@z=T1O&f3A*m9bUU>aeq>Iy6e z=zKxePE;JSc+|ida>fhOQW(5t;ph>Tp!GV2jg+fn>=GK%{ZokDQ23$)nE%IM#KX1a zy~k;$EOUdvW!9|Asdki1N8i#Bi=NZ)S@;=yJy7)UTt^si4P7%j`7hFXdv?P+g7@>a zWAEh^cM--DdI5Ol5FZ`YY`{g%wbIf-BmCE8<`7h4UxD@aBW?IHPrg8ksb!v?>pC>} z={Z21dcxzX!Y;&^eso;% zGHz5rN%7y(es3@RF?Ion;Zp+;L(uoW3M$6p*2;L9=)EO83Tl+99r5b-OX|LYZ* z(ReGm&ouvP5TrdbuR8&ATXT^alI_$H+(+B!)>jo2Jiu9mi*c}hY>^b~d`f}mdnt0? z`9z!aH(`>~OP<<)9z?&wMy0o~jfZD(UdRUtJ9Yr@<3r1Ow4}u5)gXgkYWQA>vJGCS z!b_V?lY2$gZ9l{LE&tc;O54*>*>}wzk&OH~s>;V!6W2)}m!N)I-LnEx|>7&kXk8J{?!jh5{&?+l+kUthN z@Q}8p{5NW?-VYStuW3E|Eh{UXIgpQeWf)hbM_)%%tX0IYkuKw|ns72ZbNb|Tvhyfg zNh-;jGN8~bbp2By$uzyk%r)(U;K{{*BJ2MhGS0JVTF=Bw9C?J^5X1PS6WQ9@GRX6W zc?NLoIM9LtM+(Dr{9!uvxN-Uoy=jJ&Y+&|9xBr}@^gT_#eW2MXOif@sdw*{s)|Z{u zOgl>mQT9HUGAn(MqC+s~4U2$`o|8-04!XD8YZMsB0lHGtV!D#GqE!eS@WSYcP{+S0 ztII$b?Z37+-l!zn7zcIsiQ4)6#=9M z7dnS)$S$;ZXc#IA#j;4hU56%$jJC*TxO}lM82I)LtGZb6{5$Ver#N??+i{IuApbuc z?9bdqamb>?eIQqG7nq^g6|+-%A!xa2R5?i858W`1H+JEg09{x6(#Y(i4y~hvj z=_IP>jm<6E-!R0$grtnaN0U(?bzy=&1o_7DPr>T*;(TV;K&9N;abX=?*uQo!9AUBd zbe-zk~;UR*A1{EQN`@(+uVBrrZLNFqv;PX4rZ^GK?EXfG+ z5sm3+0o!BO_{LNffs7LTa-5=(MKb_XpL$B1#o)w@|W-5$+zJg*OI`4ZRwdKUO zEaq2+U>euY=nOvKm^AoE8bTl-2>nPi!YEM+>-#NT(pH#?3Jq7xROKd}8w88G+WVi% z^7akT9ad#dEnpmC4iQkiR>*AiZLE&&Tg#w2+2!^h_^>g!J6pSrn_#unQ8+MZO{CS! zTtbd>o*ANtScQoGyb4^cKo&QPdESt4z!uB1Y->S^&r4AO-7UUam;LM{e>Ny3T7zW3u@_pAP5xbNUHjloMfG$vYYLx| zt1@+ydBYv(rVvzVtKIE+!xS>YuHa>@hR6Ej)l5%(zqdf=7q-&#h8=c*`{x7rdB9&M zn_&oJfVtNL2TfT=00bHV{Qw^dM!_Ju63TYA84jmnGQAg_0miHK3~0nCXrf9jBH3g~ zRCmdYB3i7u!q14Y=D!GclW)b7G1zeXs9-RS;3DEEQPy2zC$CF#jXy7NoLTC(++`58 zz#`4h)YklVU@5Sccd|*}B+&ZQSTjE3>P?2X{zujui0L_#H=a{N-6xO5ZDSfO*GW+D zkf!}r1tom);fWpqRNr3!2577mv_exO>h>0+?^rPd0VX9jgP@!3Zll^0n}x!II^&s~ z!K8IY`!psK1J4N(aV_0G@fLpRWvFl^4fL}wu`B}#^1p!W6OH1*0s@BIq_nE3N}wDF z&`+Ym2|)!v{mV?Ivn+tiw7~e{o02N}>O4SQxk5jMpsOAG?xUwzIV&}3J!hTUiun~+ zMZpSd^|h5yO;A*IRYyJQSO3cV+grI$K=r4IMI|gh&8SE<{0S zQ8ey`0vSuM&;n!0OIvRx-95_&B5)5fH;f7n#*Skz)yc2)BDS_J)K+!$4u18sUs5D}5va#&je06L-as{i%xi-RKl@pHq&77VEt8q1}wC^nl zHJzTRUdjCPOdt3Z%f>;2v!hRa9Gkq{qmJ=Ys4g}yJX)i|D%~2qNddzp;)fOQbLax?*UJ5in|WNSQ&5Wr&_;aab*!_3%sYd5}e zKWBblJFA`#^tkE~b9|dc8_(G`0;q~eF72o4QW-|Vx|7wN>93i?D=aEE^G}smN3Iz* zrLkhW?x0~f>%K|Wh3hpsN9a5bpX@EGKX185Uo^A-y|z^@Zo0Okdh@SUSDMyk^qkK% z!z{GX0m#P)3~H)-^TYZr`ms41|Gt*WP4^O!ZMlaAG5uc_z$I!m97tQO=f$|CUtph? ziV?FstOvl|82E;HgOE-|nqcD}ApY0_KI83B&fzCLrxbYov3arz7($+6&LEGsPg+dkRnyzj$^iZP^PH z71@VZS8Rnq12Omb%J>RwvF@tq-+^YP3V2BsVR4=>j5-AtQ3>y0_OhF7WhG=++w%Ta1=REx@rdYuEXi5`o^TPUTz$Ji7Ag2&K_e7; z!_Ab%_}a6zY%>{23n0W?KznB-!J6Zt3OVcV7Cwys9tv^`pjUwE(mM(?Coq+WLG7yQ zr2#yqrTu!4eq;RH{CxQtoZz?$(<_X_Y0ys5R_JAOoDFqW+X_gslWKEfItljL|MX6* zK77YU9RLxa*QbOsp0kWrAM0E*u1n{=l`%Q%8kBwXH(L?kiC5eqI)&s`hnxTR6i2t)?v&w z3tSJy&ls)IdcxAH=**q&D|;ohS{~kQizqqFerXQ$fS6Hgj6`mXIo7X-Mi_LPSaE&s z^?agze!u>4|36US0B-jSHf}c_T=oVjBO`{L)()Yx2B`P}c1+wON7;)YbLcG3@pnKn zF5sSf#Ki@h^@&9XJq7(G@0}0pp&{^pIk4Y*ykY&MxAR`-8uMDS8P7og50mG<$O8tt zBM$v?Z|PqN49};68F1Nkyn*QYRO!fzGrKW zj^=PV0KOL^ION^&v?~-=cJv^Q%bUe^V4sTRed+{aK zlb~knD9#*=C|FpjLJ+7NLp(yuj!zt|IK7P=PS_`p23hmLzQwfRkm1+?m-sBQ@TX8em{AY}f3@X3aj}#VABVv|83ZI)P zJdQ>|k}(3)EQ?SHJw=cQ)bS0pi6F9{%1u+6Q_2pS4iB4KUtF zM#5Ls%d2(BBBZADE#MLta#@YCl_#J&v;8>(sK~fcZe<;c=4SY0IQMAh=#Oeo5q{|94x{>ag^zYOT5h_V?u_NU z#(zuKyqa7UeRNq=72F+b&dULW4hU=rY|EB)37!E1=|8#@egu&@5Ip9NoL~VHL^O3V-(43@LPHO7Vv;n4|?(5DGe$`B_~ z?3+eZF{*I+jfGfTQCOYCSj$J1`#gxep^~7itQ|d&7Q%0jT7A;m?DiVT;JE}Z8vvIf zT4hfV*HpuF`BRT0EIn)KkncJ8>mb|u{n_$*TvdB}99XT&O{zFO ziGVca6?yqEJprFg9t-+feu8{O1vOn_wEJxkH!+p^f`uf`oOtF=CkanJVKPh?v6P@f z<59VfEpw1mp^8czs%}LxgnI+NC2bQ1FPz}&=N9Gg<1hP9qnxSzjFnq;ruzPYBC3Sl zK=oulvpC#Ci(AsnwQA;g2Ua)ax+4*Q12lDHe`_9*5(f$zsGNO-k6*(WWWN0}5gwOD z2P_{JUCt^=RW!}_X}gGmX1EGyoEGr=kHhzZ33*vcL|W%l+JX<}L*{123?Z0eaYVmdN-mSlig$nN zhPM8RhY5$dx>2UKMbXnn`~`4gfZ&{kGLE@@nx2rmrn%>GM_zjHy2^0!(8F`;=&V#q zg0t)jed+*JZ)3qZ>Vw#^Rxa8!sz#pvdKaaVtEnt1;9k71BT@{0aiRyRFoL z4#?>uS}O&{*@3Czy?@=e{)%|cb1?XWkO|^2s`}FQ-7>s~pSM%v5P1+l4_j&A9rt}6 z5%r>=4X+Ao@E{*)q?fMb zC}W0*a=?PXb=aB{A`RO`c8{e0Y)nVYeA4Snwc$Gn|Jr3~lsj#(Wk5*$>8fbsu4@9U zRTvHCZ^M6%$5JKF-~o_A_XN9Q%xMIfU;-@w3FSG(RL=$SN@=X8j@r*6nCY&|MvUjw zP)hMYZLa2kAhP>FIvgsj=&43qgQDS6%0)Bd)AJw9q*A-zr5TQ3d)@o6XW7Y3tOUUTZT? zC4%ytkOob@=PVh7#(1+3m;nM~IZvKGv1%Y{n9>Ry_P{&#js+qJ)$kt#0UIV3tjo@% zW*DPa2{0zDQv)Og&gzWD*|>t+`FXSbE^!F~p2mwZ47h`y~6!`+4fwcgyPz6${&9zq@c^8jus*ud`91Z=|l7hKAM8 z6K(D;ls&%Ka^V!uv2}~uiS?w&H*po=XUo>Y_G$hzZp6_rmKWPay8$kG)z3PpO3P5# zovuT5paXJ!b4?!y>}Hh|7|VU51$zrOlp8_P{y`36B%^<7RkN6iA=pVAi-&}{MoS}i zW?`Ni@hnD1>*uDfX&>@*qNh%FJa{yaVe%-YziH7Cz{Lc^BPK|#TGiQH-By66qJ79OCobCviO4x_W%KQZg@v6;C`o>qjF2(xVgOHd)*ZB(&?&5oZ+ z88jX=rWuN}2f5t%{GHZ!#xKubHCk;Pm6_4`>o$kg$7{;G=UZR`oSZB=O`;60lM=Ov zfE%2F+$7C#gC8gQb%oB}JF>0yk2}&jrk#DfiBh+FOzc3%4^!vJ zu5*)+$Ow2>4|FQYii#Ip5ge`{WosMj92!G=*@>8+^&!jz*z|-_ZrJj9`b0z0#ldl; zO3}wWPegZboEhLEd7h~JHCCcfVTv+>@Zb^_dV^{74F*~N2z^Zkke2RMxhII5aO#^* zD(=_3l@T>HynE|>#(dUGt*Mog47%qbFW1a|*)H0#2~o`?b?4lHl*SZepDrhlwNF!H zJ5e+SB1NF)JrOM~sIr2`8qpBS`x>~hXT!aPrD*5+#_1r;KXET+&}L~upc-qTK^e0*2WUh`kPhg>9OU$8>7O=)uS&- z+Yyq2;0eRHj3D9e@dWiV@bk)d5JlHBxN+0d$G&MqIA2)eLP%I-aIP0h5*GG>urPoc zvt!t?K@!EI7+Qz92C<&^VeB(AQ)Xq{9__md#78KDD-r*{?eoZr2XsZl0uBcKdZ%y5 zTcMaYSN<7=TR@<`8CWMfx{4EyB%vX~H}+_Qu_56A75$hb$n+As5lEMABZzfmkumObQATJsr5 z_0S_1B0VKUku%N*2TqvxbBs2|nzSmAi@g)tIJ+Mk^s zL#P8Zor{4H>S1Y#fvsKw{f@O>E%okxRY-GD0Y?(YzC|>Toqq0(gq>diw46yR8uSg` z>BRNU4rstgz;XDVyUBqT?|_zeq{djzG?I3z9CGM>Um?y?V6>GufAlE8bfxGD3p*D4 z^7t<~R4Ifbu>)DJ zP~+5QvF*~!!_gvOX~Oo3zwp5lA~Scv^YZr0DKv?WrwNcXLvHX(ksdhQrM~4VhH1^|VQ78fWUZ zAtPAN724@aH_79)X}S_f6Ot@exO0AM({&a{x%9%n0Mp*fuZ7elTi))+P51EE9xAZt z>$K6rQ)1k5GkO?C3!gN^0Gv-ppXq zW1l^1PsYXL?3JsRJ}AE3v~4d6`=BezoEjPJ^t32sHaNeF@IWh>U%V_<{R$$V-fmi9<=5YQgGiy_I#x>S zqhv^a`~CCprBo-8NGvfweb4@d{l2_9Ye~d$EO+bW_~$3mqf5`U*~_z|tuxZ)2q#Pr zVDh;lvoZtd8W$rYMTB&1)>8pvFLptnzE@aTr0`6u?Ys{jiSk0Vajt9$UQ#=GJxrZb zqvkeHnd7W$_nt8W2gAbc)FIm5NvQGgt}Oc-Fd;ztsOLxQo+dAmW74o*I0TIF$n4*2(;mg3Vur?Ql;y{%cUKP-7yW#F)yB!x51bGih%BoiWtkL72P3fwxyw))mXUZYvuL#0@*^P|l?f$WV*VL`I+3J^qepL? zcQfSp#|G>VNPq@cG^0*8=bxW(o@=)Te6 zP0t#YqzlBdK#e_eTwK5}?ck@5Q`J+>ObV%PA{H)`*<|yy_C}@A-Qf!dZ{a>2 zDNar-aBicwRC4E~$S6u6r$k3)u%}eQxhH0%#eAYv*?>C5+Khrqs^Z(h=t=tOsH^q+By~}rC>D~Bm<+Y zl_glGBX)Rh>q1Zg!eyKq0@RPpKB@22Jg7?yiY_oy6N&gRz$7ReOsf2hptCjnEPQMq zC%kIS63gPHg)%Sl8br~R9SSwRbm_dnd9V52q{QR~?`*xWG6~+x52s12?H!G!#M(bD`8)kJ~emF+w!sLQPbH&v3(&Qrbhaj=AxJJ7} zyd@MPM9m=x=;gsoHWKhCh=0YFNdJO&0?93H5Ef`7GR&A40U?w@{~)kU+Gf2_Vzr_$ zd()i1f3bj^mFnY~X%r=K&|zt18!SrtMCjq0I#cA^a=tbgxsz6^#OdpTL$fX^nRa57 zB%e_sp}I$PXGuJXJ7beUCEfUa0Ibhn?_}3ubI;L3YtbILzGkBnr*Sb8xCK*(&HcHQ zi@ji9B00dDL5Ot}e7V{tw6PXyhLMaHgTo_7>FpzB<0@ECOc}oAaassK#nc!geOyf) z_(TZ~Ja9G9m^6tI)MBhkqLzxz@QC0^6eRi*s%4BI--2-p81-yAhE8<&QK@hra$^L* z?LzppioZ|-*;6qwE8@xLa4Zuh@;#C&klmy~>!7$k?~tF45jS~!BNuYry+?)tr5Eb5 zCa0XQH9|a%7J3ASmC(?$>yqV?#jo&CPJ71I%ao{GHQ4@_4L-F8tN0tT0}hx6DdGqZ zwk+GOAr(_bQj!_QMCwNA4E8UXqtW!^-f9uDQg-@!#C)yS zQ=c{H6gc-Nk-bdYU!f2rab!z@~1ej0-stPi6GbO{9Kl+zBs;GNul4 z4M$ZNn|Xle{`dRlIDYjg39qZJZZ1TuFi+XF3IWxAN z7lBb+orBNsz*85jBnd3vF{sxpjkesD4H;*9^R_^&Q3R8%bM-Mh%rKCzWe3y` z)+A?gWWtKgn1tYL6WjB^ru3{vtj#iG(W4xO7VL|<+*2fdZ+D6X8-m8>X zS}$uEEdC(b%{Mg*Z_G7+W%SEjuX%IZ!hCgJA=q<)-~DA{)X(6Ex029z1?>wESF917 z_Mfp&QF=Ix4Dd)~-~U#S1F*1K^(>~D_|CC#YC|*k;ys43xj!+m*E=jNgfC%0oll;b zA(`c!ch}#T@pL}K)5>75)V-2?vx^EQrTa<0&Ry@Lvr?9cnevP|I>&C^0x(PS_8T}Y z=TiDoWVN}_%f_$+E_~MykH#9yyX@e}_5Q}7#pN4bIGMRU!RsD4h>~J<0OWHn|A31X zhq)&s6byOh8(k)=`GLifAX`H&c zUy$@K(trMlN3cbE-UMtXWSxpR1?oNVHlox5aDn`6Me3tdT z{zJQ(>_hprJuYsdxTK5r7ixh)<*0<%Blyg{61@hqX3`M z8?Bl?GuMWB`SfGjIgj30(6`HHqBU(thIyv{*UPf;tk3BgcR*dTa}3M<11a-=e}h)D zSM(_EQD$hpe@m^7y{1_6#FoF}DNYqd=Tld&+*XgJn!G^oVZ44506x<$mv6PF%Nve0 z9J6Hu=jeAYPPloOKNiar8AB2+Ixyiz@B^A}kk4fI?Pq5C@zGzrr)4(_Hz4f^q@VQY z`CGO|@bNUr998DUD8hzPGZ8XXW2fF7ec#7mGy#e}gAKOx4b^kW;!w7%glf^@qTKw1 z!yc|;8>8Pgym(*f=g=%Sp>tz&Nlrv~E`z;X`PFKSpa<&tqbPe7oiIOGqWb*0AEU-T znR@IbbfWzH)69I2@9rC~Z{(3ZGUQD~x3;NN`$Y;!H~yv15$;Bi6$4(YI2H@!zRZ!i zLr7o9ZaQ)3BRbrM!u-*(1eh8WNhH~QbVC6J_hQE#(kQ{iphNQt3~J8h$eK0F@g+q} zDyC<<@S$+u-hH9FR}Dk1T?PPnIx`>vg;GV?mTVL<42p^fA~H4D-peOaSI6A6c-;L- zGrS@<*uL&hh;p@hhD^@tH!r`wq)12u-AMa{Zw9w=wg>7pN$;xWPuaQy>g&W!6{BZE zj~#7OpnJ)-!hD@OcTEgv`R*VxH9bldg6x;gT93x!*NFjcgjAFdz3_q@n>QQ}afn6d z#1};O1Yu`(l#pga7uQ~o;1k;n;#|_bCQQw%8{;EmHm=rMKjwdeB+QTfbnF-!u`VOr zjw~sOP%MT|v|}vt_P}Qz26}|DiMcnKJ`p@9D&Zd)Nxc@>lA%uia^*$9ec7FYZ+5Ta zJJZCGMGZlrb6cv#RmS*D6NkKDj2O3k=f?W|r?ivhi|m_drDbIg-{SlK6I5 zncajSfZMwVenE!9=fWsjjcF*y<4S-06nLPMki(aF0n2YD!m5KLZL|*OeGf0PKqH4(UH*TcF~wuF1gxn4`K;K z^!^IWesSD1ATTh3*2J=u(Z;2Z=2;O8|Ca^GJY{@lN(qA!Wwj)R1||qrcddlyw~eMr zkxc04mtOCCK0DY!Ni4PA|6BHOLoD)R5Clfs58V>D|qD*!@__ zd5)iaB5Xf84?66Fyetiv%-n85H5Tatu0V zAl^Mb4y}@b@GfR8r9`NNF;3D!zVf-oTJpnz8sNnf>ju1opm-!0qHqK-NpFoJuiHkM ztFih{S5*@?$mrxU|AhAuXKS2jNBL1 zX{TSiYAeu2u~Ntq5G(N*13|Lnq0pA?FhrthQbY%?Y&_fvL%L0RDP)8 z$S%lE1~Mre%S8|d6QCj>#rE=T6*!b$T;Kw(p~T~#ry-O;8G^vx_u6Z;?9Z?Snd#la z?)%J2iZM=7!wG5<_wR>VUD?$f#lI(F02U`RwAdwUA?uNw>s zoLT$}gI|TVkY7nzU(G2f3`-ZmVHyY|6s_QEjdY-_vM{5}e1&7l+^PXdG9? z2NhXHX^RvQrwJ5KuI^qsXyk$Z#gAW#^!1^xv}==EP*sd-bWC$<<;{V z^!>BL%-+%3EAxc;HARDZLRHgkIMy{DI5~vlt{jmdvE;D9kv1LR4z5RXg9mh5)!v7e z_X9NX03G~|>gVx>9H+}OZ0QkjM6bV-6s#PS+etr~6yyh_mNs{TFxHv%uR>_I`cR(a z3kX;|u(}yns(#aSo?QcA{D1t{*8ab(XVQHetp+y3M(Y z4#%Iw0m+|Ix3b8Rf#Cqy4lM(*naYPP z$YzsrS8;%OC5PZ5a4+q_BWjR#^wRBCHY|39CV~tH;*2XUzc&MY!BH{ehh6(C9G=?ba2~p7_8Qs1WT6$VG?X8Msm; z>NGYxEfLOh2XiNB^LUu~1)xfmP*DPIY8JNgj8U}K5F1!rby_$A&8|a~wj6z;(?sZ2 zy;~~uGqkbyk(Uz3nzJy0dI#QX(Iv{XX@5NYs)}Wrk5I@iVV7~q;oPQ=fVdiI;aWgO z+ozJ&-ZG!o7(51tdO>7D4dwsi>K%hCYoo5+j&0j^(y?vt*vXD<+qTuQlMXs&hdXA+ zwmWv`%U7q~bI$YrTR&H=s#W*A=eWigz0Z3H0zS{MU5fonH8{^-p1r?whZMEg2#itf zRzB3M)k-o5T(}KrU0{v$PXx(G5E{maG<#$aoY(O^5#;FyQDpC@76)tcpNp`C9E{^An zZWdDLfs4SR)!*WYsIM)Pr2V=7h7O9rTobU9EE{4?#$b<0zY&8~+e7|oFlx?2^ma_* zr8NKGEzs0*D(#xoAQ%e@lzAZZMkh9&_IJ@i`G86Y^i)u@Zpq`K+KcMWau8_i`Vf6F z=u2ITqqtQ+B!!2z4@!m$H{V2W885ppGR0PTm3-j_^42tZN>e*Zy)_DtX5{=MMB(Q3 zA>3SewlU|1t`HV7PgV*j=f2?|z|f6B)%Yjvk28M^oL^IG?ecU<#Oe4&8p!V}$dqJX zUt>rxH5X}!vjC=SGfnb6*@vyt2`)q|ZTAXAFYnnuGDBy(k!c+gUKSfceRXzvw?YSy@Ue<}@O{|qlR-lkxu=KY0y6e8< z$?NvOOu*|N7b{ywi?i4FhHzUbFoEgi>OMFc{sQLaQ8r?;3SJzsC%z3$Yc4zqj>x=D zTgui=@!qlS_nhr@N+Akb-*?PMF}Gve3*?-YV;I;L0r6HNv%d46I*wkaq-qx5J@|+j z+<9rzovvZkZTUWXG=;fddkdXLU$=<#JW{JGZm(xt{Yw1%Xd3>iuj9tUd3ve6GnI7z zQrKfkd^qSJ+Er4nJ5I1a-f`BTpSX}(kfFU29wK-nuHP~pg_P12DAOOsyJL4rs#yYweuSGZ$*qu2#;e^Xv6(9}cdQ)2l z%78^nLHkyW^ENfxCnM2B+YB|C?t}Y-IE>cJg<20!COkqu0OEkg_%N8UtO%VnlC+5t zCmz|O_b>H+>P$NOp>0M*m5dyt6ljn6Td!nTSQ~-mQ7$c^=G?}S(9uIy$hS7NDZmo6 z60IJ>MYu}r$Q!Q9E(6BA;3$xwh#6RaS>Zi^7D>_1GsIEwW=Hi8SxKlQfVh)*f_|sA zddK&NgD$mFp7Ju=Ly??YorJ0JzCKU-xA$-|9r+mNs?~k5m?siBQSMw}%1EB}e3ezj-74@c&wmOY(hf+>w z9VK3#y~nA`36`g22X6~`EI)l*qK~y${vLq9SGdQ@jNGdii}@*T(4(_%<|9v1G{sK? zyGhs{D;|p;>-_O^tprM95(8fkS)A|ojb>S=k1)Su)Qo^cW!W0wR zaZ$B)=n?Ft`A(o@!`+F#>~_7PD=>hbeVaylFVQJrcSQ~H!pLa4}HMk}p9vQItK8fjZFE$gJz?XNLjB2(tzhldM>^g={kq;IG2F=AQre z((TJ11&*u9;7@h}y#-sgaxvqTg9THv8+{# z+i|TN`x2b^`EXZ8L`9%;?#wn{RI&*#&#Sy-Uel4#EGrB2l%Co71xEEH^78t_zuGM} zXxN^vljgec71VI&)Z*~G47rx+RH7?lh|dIwbS>K=iudn4TIE9N22Rg(f(hnS%RUjO z=#vzZOhhIK9yX;FT;}1r7O#Q7+SqP&A8zhQDB!_RVW~XQWmumQO?7l{|7aNQaFiIN z!`jmyHIa`w=_(<%>%K$I|L?}btBr@0H7~pOw?a~W_7JO=yfKwpy5umr8Wzp5#VqoO zfV#+u94c};?vJ<|X6VI688+3Ch8k_ue195tVsRZ82YB#S0_et>7Awqz|T+ zl;4&Z(qxzbi=pBhzPSfD2^O8elw6YeFiyo1n1Xle?KpDo^x;M*0p@ca!$<~}YACT< z^db(4THVCWnoKL$Gm4!#X|C9fOF^UyfuOeo}NA7V;){ zR){wmO3|WZV3n(S+d&!KLe~@Nj5cKVG7bte)R2AopRvVde?iF zLTq$!4S(2AO0@)}a|+vRS4oJ65!@T&jt~^LLT=d&=JR0QEDn^a+omvc+fyO?lR3<}gJHd>^8Rw5dq(9ut?E*&BDt-1|-sLRaguZ{0 z60>J-@NC@>$ECrb@=^H@{6#7Mh@_it?P`6FI^Gn1-U|P;!v1?nJX2r)_sy|yfW|0) z*W{PP%2U)j@d-3>Y9)D?UtktaOmK}oxJI0t?@;F6&Id|0Xt60v4Yg|Az#>2;hg@N#PY_{! z*x94q4%)BN{{%Xvjef?AX2MH9*6mz0QodQ)*e2vyuK(uS)FS+J=xd{dMqc|I5;bEN zT8s6ZQMh9SnH4Z#y!*6J$;%AH_i2&7YR|+Sy+3r1DnEgYjTRf zxu#-*`4$4rd@hw+yMLf!!VM|!Ti!>Ya$YC2P*i{*bBY@iPwG9ROY6Z?%hL5s=UQ>a zBF%r%-fB)`nOgghB^vnkP)A_OnDr(+WvIbN=BCoiKV&h)b)ct(SK^{!Hw9$HX;83F zq52Q>DxZ4XhSUp?*puY`uYX)wb=#bkXG&)B2O?zLlbXLO9jTGqxI4RbTSi-naeV`i zZ!Y`FiD&*!Gi9{$jviOdAJ9(Yp!?|Om+9r723TPaE52M&mYk7 z$g9U_=eb;zHLiYmN<)xZ&O+7eUt2Ya{?Zk%rU$6WWeMFhp?$`w24;`tqp@m*cNemm z6zPlU7`|n@`Xpk4u;f2)xHYP?tTg5Ig#yBI*Yz0NGOcXz#dTeeVji>4_vHAGeYc8a zzD$4hW?Pbz7XBR+yX$J|OmF#GM zhz~XUebaQ1Ik;knFk=711JZTg^lmB>ue0ZDpZ@uMQ1#Ck{=^0%d|9tVfq>;VM!8< zGX|F50xKbaT_T^a zbF#=A0^K!S%l?`+4qZ+qq)K1B{ZWqLceuQHLv*PdYGD~w`0Ux`>m{oV)5=4oT=1Nn zVqP>c!wjjPhA+T>I3gmQj2*Cd0GMU3RS+xh)0nrl z?DvJ$v7v*L$x;}Irk{swodLoCUL&YWwn}h*#8bLq&W_)LLnFq(w-nh?R_>PI!!`rs z%y#BLKzmsKacr$Jyy=w36bItrMLlzjcUUh#8nXr+tm?dH`j9r5A8CR$XkVsMMB9o1% znnXjd!qlLLfNOu6wp{QAmkMDS)mX%_sc-uBMiB>gt3M}}&Nrvea%Ps|Q&?Z2{4o#r zf)kUZIseY`biE$75VzvYtL$*zdh_=!s@VhV;*Wmg-cn0%=}^K!L7D_*O4<4FcRq+I zF;lTtjvi^Gxuk~1By6?`-f(tp;40yq*d2CgEEG6C8n&#*wCtn9m9IiQywz3dnmn+A zeT7gQ`pm0~y)5wD-~~N#Mzb;ka{6wC2eAXmYr7Y) zK*<1$mnb$_il&GDLfJOt-&oIj4?*mIk0$i_s)hJm_zTUzJ$A|=bld+^I?sqv#i*r! zw%AasGLymO4C)N5VA9*>_PR1FYs2$R!WdDox<0hx16apR@r#lgwu1o5Y-1DZ@#BZk z?B%0=@M1&+!}k=DhGlZ zYl>p47acP*Ma-;yOJt4>A}Wjo1Jgm4g6aro^;=`UTGM$AVJE_s4rKct(OO?b%1AsI z)%E)v4^s5hLxX+_luTYPLQeA*=0Fm7I$L^)yMKzeVo6VJz)k~maxH?MQs|wNb&66K zikG%i*YAQQe{z}Xje5m2_yLMv1Du3u%O$v$N&X16V#0HcVKc=sntN?}#Q}dx{^%A( zMIZ^mxy7yNfA<-@drAFcIv-Gr8vRqN+twj68esL(imQW^=KINTh+`?DVB!cXIXgCC zWT5CI2{;XW;Aa#C6waK(V7%~hbcux*Ox2N-knEqNi^SCZ19LH2<4`4+!);n%I689P z!l=lPDB4lV!lVQFkTFXv5Y(FW*qLgMd0`Y{LjXccwrJ3165^1oW^jM^<`mN_(TfqK zb;zL$UBo0ko|Ax*A|DePXTD(u<*k_J-CTECuB3ZrF~m?z`wp+@?aw zp=#M`ui>JRlLuRx-ayy6?goJ9T%&c5_^1mqme~qt zFo9y~;}M2Tt&2#qdAVP2A*0aJgE;hbNzfGB9WEq)p@p;VH+4VbvB~@~)OxVfyACL@ z^`eyB50yuV^m-o{r=p*bKtF>92rT`^)@PkoQ=_NEda6T#~zgMATS#sI(ULFCF zQ$dzXR5L?vZ%f3z9uooY;*^4%juKCXc^0 zP+Gys$X)3rb71VhOK~eq6DS)Fhor>g7yXkPiqCIr0{;o$L~ef&Km0LSV$>=5mwUXu zJCG`0OPp6^jZ@jmxltZUL&dPi=7YCH-^l`P_vDMn7GFoXE-|w0P}#fzNTdJGeI*s% zZ*2A#EO^q{)7Pzw>C7e@*b~<8WZjRpT~>=Xa=zfx;y50$b?u{XWKb5sQh$dq8_5BL(6T$4a;Aqz{=-8Oh@{s><0Kr2jM%|rjv^}aT3Z;w0(E%k- z_cIh;4*7I;5fIE)3QYSUBq6rPchdk*hZI%=8UBnU7H7FyMk~vx#&#wVLHkxH%Na2d z1`~5YBNqaG&9Kezx~1C$U5vK9xb3o3LV4{~EZ&a<@ zy&9ve-Ysr=YPkZe^-M9aPe9wNL(mPo;dH>^kNxFRPj3$*+j$ZoBu0WP~n8+T9&093!zu7auS{75Ivt{f8%7iqL) z+YD@xP|GV&=bcbclK|71g85|qu(F{RICZ=oAVsfL^W&xbtpI|rcqE0l zqKh_hcotLBHoMmD<|s}OZgq^WVaEMhw933}W!sJ!Ra!>Ya;v#cEYspm;SBmQeI&ti zR1gy*H!2&F*FS0-=9zVDI$=pIEo$f0J!jEp_P-VLGfPxbd6i??xFBhl%O%vYObieykcF1SFTm;4YPL?{szcL*I~?{0^3h6O8ykZmdf=jz|mUy?Fq&iBgM#%PS=} zI&4Z^a1qotII3ZLC;<6DDXQgeCRJ?J^S;|`>0YmNtmRb9-?Q8bqvJdi)o}t|*>}e; z<8MxU*=t85mUjn<>iL;|@~kKe7!2GLg?sV?9+)1N`mdcC2SF|_hObqM09))kvG0W?VdH41m~D|3Bv{4(F@lT&5|7pbU3g%DC6}X!Bd9;e z4N+^?joc>WEm|xV(givNI>seNz1D%98}399wxtk81gDiJ8%2bX$?|*K-O0>ja*(p< zQLs4U0!JRC?2*6jL!B@s?+WQ<0cYXOh~ng~i?2aoRl1W5k-!y$;h-8EmQ}}gbcmIR zxUL1!!@E#2lMy8G?-0mQk!gq85`M^~CAHh(4C_R1 zrZT@q4!YEM*{a5VE@4Ipuw^n?!i-o2$2hb6@yEm%C$~3Sr=BpB_-&WE%Wr+p9Sg0Eu6FGE)c9$g-&29QeP|J+73H+%N>`HbW98vi$QBG{ymu8wp&)NT%H z>s1~~=_?62*5={hg4|bTVJ3xr6D0P3Pht>@qOHxdXyoHMdz#-G1=1|KTFAgyM%9d6 z+FoBO$gH$jiAPlXr4@5CXSkPsNSC5fk7CQ#x0|}=ezeK z&VTRszJ8s4aU7lr%^Hz5%V1r|^@_C!Z%>O@4sC5GugDWTKQT$QtYi*{Scew#wgfOB zO%|$SR%kwBWzsRkS8YA1EUrOcJ-K3%qZ9pCd@z6Ozs@i<*>Mc-R6unt~P?aky8fL5eGE&OS)j@)j%S zJmfu!+Z`n&SyOR7r=t8;eZY=#CjDQc*C$oWl~<(#Z}YBOblzkd&izG>p;wX75waz` z>!eEhbfrymaN=peLi7QCV`I9S6&kUeksqCP>ZVYMNC`DNOt>Y?uWA-C%>+O~+ z@!2x2nrX`mS~$7*e{KER0~hx}ity@@NzGk(*{Hthm&abo!(-AXNH=kJtm6 zQ8Jw5zLjGLddGYO%xrTiPMoJ2c2UR{bXxSUkIIQSgZ`wZ4cmDTQoaSs91I7v4N_Ty zw4f+yf6Vjb{t8O%wAY&Sq6*CmY2AD>fQIm5R_@PN)(H`1hk7BLv!u&8k?TSdGH1hR z$B8JQ8P$iIN6~(U-wwD#!mqX~LNtBEMuV_Ejy*^w(@&x){MXLh7(T)=Ycv*X+`nHL zL0oGj>NX$r|w|EklU8l2sOQBCOAv(W+n#H*5mYNy{AS?FDd z3k7!onJMO4R*dNCg^_LoY`q z*aVn6?e}dEp)*R=$e9{|;Tf8|WK;*$s!Q?8Pkow3&KANNGF6}Qvu{TIynh05C~j%Z zf&soaJlx>FCaiM%l`eCGdt5b-83eqZQCSRofmOXQg?^CR-=lHy=W&M=^=CxC!UzmO0h^LnFOTqi@sq1^yD{|%o6!wJt z&?ELzQ$!8KnvbX!rllo=z}wNx^I0>?f;Q`Yl)>nMI3azqOSHaX(U9jO23`_uj1VQn z^(OJ#1OA{xY`ZS0o1g&RvAVky3%jxCoajvXjoaW=oA08=pnYS{rm=tXkxfRYeRg-{ zIrW1gGZiCPBIp!cDt=3V4+;U6qN+cdP4aK`x$R)$(f8$dL`o z2iRog(hXuer4}IF8YTB}to&o^5!85f)}+?yWy;q4a|PCf6KZi>`O~PQEoymi6J3Va zs|*sJyq51i?R#@;SpIu<7E;)7&w-ODw+}u1o~+eb;Gq^_zCF0TTnv!L@VKeonXy)* z)4-Kgis)D3G*!T`5YFLQzW_{)$bWO&eo!!+Rnm+ttHn_wOw{zieEvOy9v=W;M-1KjzR*vK27P;J&i&JibazxSRdN12@=0O*2qEM4;MvEA^4Hm zb&EHRQ`*Ly<|!+LT-BdQAJ>0m_1|7#(o^81@Zy*(spZ#Ojtu4v02Ub_=|>lz+6DMS zuVJV+Q$EFwqnYeetDSgdr6y1Tr-lfA+uZIGPx87_HDSMb)(t>#6%nuI>o1+Q=Al(1 zW02<5oGr!TuSlNMYmOS){OO6NE)-~3%aMv2dl5XcNk3C>~ zzfcT>e8T*fcV7~Piw(JWK2My<#ZL67RSd`hev^`vkDZpAb+E}i7_NR~Kcf6!-?fbP zcT-1k9U5*<=0>%jh#dYbuQ>X=zjR&yeZD}$u7PoyfSP`kH@-%DBCdzupegL}8poS? z`#`qmXC)iJ!nJWgzJX=iuU(7axf%Ei>5;;X27PAUHgh?7T5)Wx z)GA=$acFRuz2@JN-v8qJ4#zq1bH$^-!~w^~8W|&ppRKmTV=Z(FcyXHM4JA>%IYO;CaT|^?WsDu*{q==8URs7j} z5<8SPZiu~A(w&QQZdbPd%cc=tGaxH;#~vgEM^Dx2iTXp^=0NUY^b6s7os1C%0Hdms zIYZ3%@MXRgvegoIZ6;93os~~xVI6c7WqaD#ghzaPAyMD!C2G02M`9ILkn-;-pu@q)sfNq!zW`KKf%Gt8o-h zNUOcy3>e{X4t!bG6UcFB`2JSyrlbMz1O2K=|LO|vL;Xdx=~rLtsz#NpzbHu*GF5we zXvV2r!>(y|r6h?BkCPR=ULQP1P0jixN?F}LJ5=_s_>p%lo^@wH8s%omF)&>C-}&cA z$*>us!8Z>ygI~j!^`qB)p9pKsu8ho@PHhDdeq8tUnN_#@r+1cqhyW(>?D3*=6?gH+ z{vgetykz!e{v9IbGELr;5A)ZaD~{YJRp5!$M%v``K;?Kbcskw-uiA||ZXTn8=4@{7 zi2JvK9J<5zACuQ_~ zfO)v@;M2l~AMHW7RNlSw@zcpU5ti&+4S|4$ACfhrn_8W;MkU*;J{eAPqT1~VoyDeF zSrJ7kzEAA&@Gd{F10`S9=0*)l^tue-?A|CKt;gy0az{`q2H6@l7Wh2;|x1-P*V}!~N~vWXvP;`;R#-!;>dc<}kKc(mV7gMg-8My4CXUiKi8?KeuJKx2y;&dq z7%~^pmN46o1W0SM)x$8dlH)JU&XGF>JymS}BP+Obwz=GUvT9KY<0r-#DCi`pmw=}$ zF{7?~CZhXB21;srv5nOJNK!uogZyWTOnR`nxqxNHXz=6egvY#dMJuA3d2+wP_<=lv zHK*(jeT;&!NU6IItBjJ1?g5>Mv0)dV8Ee8f9dbaYeaZ%Y@ZL>*bO{w#Oqg1|o72$~ zPnDlYbqn0Aoi}EX=YgYsH`}<6_$o1GSccss9i6j?N?2Nc6;Ntb^`D;kR19R?X+tWU zFW9*OuhPwR6tjhh$Xnz*A1WSGXX(e!<6eo??1Qqs;d4pt8t1)VKa@4)YBuB#3$nnq z_Z3dyPMm^7+rg{xMC=wZrRo*>ezKERz$NY%@;^M{n)^xJ8$>2*$}_H(gj4eRvF~wF z=BLUrH4I6@mYoGtoeE4Cpar94n1(sP4my*t__vJNpFW-Wsw%NR2p7VQ&_YIz3pl5X zz~ZfF&g9@O?T4^C+xE8QGZf&$mscH!YYvKVr>i$8{)zw?mO-IAiw=q7FHln82@&+9 z5~_chEI~AdJF6(dUuH`L#$K!48Em*6lq&SNCl2k^B<*@Z;QJj2f&xWc#+=PfG2kQc zgCpSmNbh@B&oR}$m62H_@yv4D^Fp(ub3n&XW@&4a_7B590*~tJh7FECx9S?J zXJ)U*PFm&XmBrKKa>jHftDa)vcsBvSBl+dF^OuTsyVWrMtG1}`xJ@kpAS-4{M}Fgi z+gLY1{s<_jj(HC&LW)ZV&aPHhf_O#<`Lf5niAr-$pl!3Bc)vJGODLl3Ebet8Y+* z5?X^^W{D7Erf82%T9EZb6o06TM%MD|1z{jAZe#+t@G)Bk5x4kvqI?Kb{%%r@rJ!#* zFTga!xz#ENroS0Sc<-<|lc!62_hx_86q*_CJEi%t58X4-kCX%;Y;SMp-W{&POk?d# zwpE3*1;0ebqqvY;I``1Q$2n_xdU09&=BeI1wJxEbx!-{PzwJ|B4F)Y&^f71heXGMi z$)1yMGOE-6L9bl9Z3zL!tUFmWV*9_npzOU(W&3s1<;!!YZIF-BJjEn!e$>8>U@}Id zgUgGcMXNRf)WI)x`go++mX&l&M4P;^{+2S@f`7m*ns#81!`_c3l3JY7CLbn)a6h^a#3PefXHMDpcFpZl&V?`y31atvPm({{!6B!cuwxPG8kkIV zAgmfca#X#J1-WIQ!JAg=f2ALsFa5JfU<)OrhH!eletzG7?pD;0s6qm@%hpf-WY;*$ z7Qs7mk~j3k<-4+6TVDFn@eSPN0?(E$EV+Yj+;WSyQ28nq!!t=vgXnYm-C_*UUf$C# z*Hd-})|6z4RQDAc4L7ksKn`6tg>xOioGc(gI*!7m@YEWjR#i7II2`f8IQ*wfj)4qy zWrbfx%Ar@Jd|d3hagzGP*=87Wv#=Fk+@Cb8U4<(GPsj2#Cxw_T(Oc=t`5|legMIag zvRT_dSjCSHx7*TGVvM;^t|c$4Xg$s0VT6>l&iZU|Tde=~osdNEAMDJREm83eJaa-h z3zf=|RM)I++hq3)O*K7wOS@A9Gi(vm_wc*q{OS2QmgVo)ZOhT?vF|hLa20)UFu{F) z`HjIXel$^Qrt#f%=%q>0ZhB>ZXc@wzi|P7p_0H0(#c!yO`%Mr(Wsv|J1IvVVnbbyO zZAZP!J=lCT%8#^xoSfCK&r+7XeiAork}<`2R^b@6aow)bz_G*eqLp92Cm_83y3r=O zzcTy2&R=7UNxdh+1tF((xxSut@8*q63;AEqaFuuf4KsP~c}yU;bFIrBU?UTqUw7E~ zpvSu99cIzEB;eWy@PIvV9i2#dZWrJPCklLY^Hv9|;DrA}^ZUFxdPJ^jV>S#u;4$V} z%Ns|P!tp=?n~GVEDnc3FM?XCI-HSNSz-51XV{3;c@CAY8>F-?Vf&EagEt0UUVCtU~ zH#9-veUEd_#Nojs^qNP+ct?y?fqU(|rEq)t_`8wTce?#C|3CE6&`&GlaI%$VHF@NM z<>xaJBmX~}xa;JN(EgWpr5dy$5bpukPmP$!?zCAw+^>P`6=~fHmmU_bE=a31(!!CvnUrH~Hi@FQ%q==ec4vmL!ZEvTQrBDoP22c|EQ(pp4#5H)*} zbl+t95ou6{II5eivJN&<5eQ16GR+HFg_iKB2Z+s@Lhg8LQ6Pe{9$#NLl9-re?*O&Y zBF5(8dy>UPfd_ysR-Kkbskjk6QF^AB!Y~Y8nyyQ@Q&Iv%Y6gt#Hdc&XeNNS&#SfiR|#G!r)hyW#bkQY-OW;ggt^9; za)=OUxS1&0e3cngS@#&tz|O#|w8eyd2rc_n+mdt$-9rg1CDh;SKc(X)o_bR7VrF32 zj#_o9l$e1I%Ih!Ex+c`Z8vMVi4#t3a01djLWA=^;ETSq$^Vouzi9!r0={t1DS=tS@03NkTpvF@#w;-(X&g&K+WpSqgV&w%^f=+HkCAtS~BZ5e)S&y55P7iiwV!ma|l4WD8R;O?B)%@*g!bZd)&c7fftZ zEiGAjBlk7oVJj$tQBA?yaA;F-OA3CvoMOU=bwUcwkT3@H;)7hljtasE&~R|$8ECFT z=dGo6P91&u&G!}(4208BykL|>U0Z}9*?+IT?G;4cQ3nN%V-Ri@*3~*ip7&yD`;A^# zB)r%^rsCuQcJld16^CXdkt>k-0*yy$3;h|BkXrKNw%zoSYmb>8HKZ3MtL4IdjJ+Cr z=fF5PP<4c6LHuZm3+z$8567VJRxxcU|7*Ca9QVb5@EnCc8W5fEUp?GD@zds=ffwF@ zmnGrG$rGdA4`_oYxYWV-$y==I@l2^$**)f;jMkaR`>$v;jf=ksA(E#qlDXm%U>(I( zvgCGvb^)*GlIOA(1yrrWuHnCQg>ZZVjO=gD3*)pzCaW?ZI4{}Pwn`SoSbJ6kAqjiRf`vS>2k-Z}C> zI%Z(RMkqkltYjS$ZMqnH=etFzNnxGkWQ`ucn}+1lg8!~}X_0T7aCD9*sQM%CS3Jy5 z=I*Zs0b%epnak|yWMxOERZ=bsOhR(3@C^?-1RYg#P!&xOHz4nW83$@ z7%UVhj=BnpJYf+dG*H=8{JnwC;IGU3dVe}K1W>SU-%0!{FvmAy)iUor%ax&Yh>X+Z zlcXHu3=~!voUQ$cB)hYnIvOMoiKjvIu-7HBtYF^Q-D+}mQN;q}B?+$eg1gK1ksZ?4 zz<^(K3zEay5mD^ThuY4Uq2%pZ_q#RRdGj#dE331;sH)zrZV_8YgaW=EMHn3@Ko(FC>R7j;mG z;QvHvDehE57e)^-T2`c}rs3<;1(b(|$xCg9XA(`E)7FrVp(aCAP11vzob60s6668u z;)@F0%KWHV)v=T<=pctYvxlQ}bmc@Z`^~H|2lYA&a8Q_6aZb+8@IO$LonLOuGP1~T zyq8}AQ?m>!wEhW7Ix>Tdw!hczWjpI_Y`*~EyF}vMn(cq+4WIW!(>0OLZqJb1gGlwu z4KF+%248g!pV*f#5bf#G=PZBS{6qDwZz7LpZq}!ms$U6feoAlK@3!gzERh6WhP-Iz z?X72T)RKRvW4{T$ZbFNbSAv;EANrn~c(8_tVaq0_)e-!@SG1$T`%}g^m za=#aije9Ey>Y1W(@po@zCqBg=Rev8u!~1>1HERE3-A9k)nx|)mm6rC--ZV&!S#A1|-m&uyRY+g>+=9N#tKV<+UdXqGep8a8e8USS6P59hW44k?M!7{qLJ za_IvSiv_B`5qkp9ZXk5jX2<=M8mj&KK)k%P5=!;S--Y;P;bxAxZ>M%za!F56H|}a6CD~loHTpY~?GEBdJhcNOtp2(ZAJ>+HH-ZF~Sm55p=7{=f@ z)xD+E51^FKUF4g5o5kdlNrrTu)uQt9&EAvicSIhCz*yYSy{G8Zo%dueb!&n12eWnP z)JsK_z!nw`DGTvEkk8Fun@s!sN__$;z}4y-d0*YT#E*tBm{!%uLhBp!UDq{ zUBTb+{MZe==X>$~|UBK;Hvv@}uG zqQljaO;YLI-kZ*)#C_7HG2OeH%+&w+GqU9X7iC6;l3r*k56HGG4@RxK$QA$=Svkz z%E$>xmlBhc#p)99H{>lD=D3_vPuM$-JMASO`S88>C5KCVUd?RfyHQNDtvSf`5l6lG zX{84i2ftDwgu{iYMm2*_S805=i2KZ;n@P_hBbJcRJpLnTB7h3pY0;K*D>9@Vyx*(C zb17_HUSkL?)GJJzQdx}kmg>x!Y_)eDuTB~oAV*(1qmPyJm!EBbW{)&8;u+DZ`8oRL zPZT^Je2V3$q74Omw?ofpDh-L5tAFVFwUP17U$AKs1L|<95cud>Dv{*oWP`0hu>4d< z)#~@Nxg^Y`CG4qvdB1lL@K=@pVFAtqf4Ai#5!Vy%-+g4Z_xeY-llBcdc4*sj?(_clo=&Lm9T!QX`WHeA3Q7OunyK3sXc^0KNAkPvZJIr&jWXr|%<@DJh%OqDD3No}56{kuefZ~-&z z{HYuFND!J`Aq2_`)JhHmDf9~!<8-&ucPtk#-PVB>6yh@$8*{!Q2N$H0cF6*rk0Oei zpSKD)eK7zPw06@0UWx_KhCBX5_ROpt0_}^+7++m&EE`D9o0>6Z5qQfkchCTvEGG`D z`ui7lI^cEQYq^wO#Jl`@pV-Ln74Oj`^7G~TGsdy^1*3XocVJ)oVt)V^Qi!%;tB{B$L9P;&Oj4!g-%4v&~t>X?MuOJNxx+_RdfAtk(u_ zuNBX{?)qk}4MzSd&0i6l=Duakk;yE%ng)G`ivM+0Pu@gS?qg0(RsqaZA;l$uCFeFv zVYeNEH#NfUrLso*{+@McfGW4lQ*oiL>Tt~BGT8wV{e$crb+CFxSiO|6FT$jF5 z=JN8F{5mwPq7MdJG)Q@Y+YHjKc%V}ckQXUvQ0+RPO0pX=$nb)P3DOSc#-wcNTUT&B zfU^-`x6V&N!A(Zg_b2_JQUb*&0K)`2rV1t3H9h_q`Y|_s9{}e1_#FC;9w*H#fa_u~ zS*-uUS`?<7o1}5+_Uk2!Ha@&VmhH<$yx4{!f8{Mn5`fE78r47aFh*VWMXX5JO~Y>8 zKmoM~!P6E5ncrILFcNsp!{Qiw?^H+b|02%V`B7^?+el`NjfQ*qbLX;ldKx#X{PNMR zIl zP~RVNNHf0=R8X4K&5{EgP{qb*enY{Z96q?ef75bHl1|VHLhT=3#q(Ha`om&)`&ZIM zO=ICNN*e;$tAC24Itz+C?ZP8H+sc*T3yh?`-#<3u-hl9PlEU46Bfo^)EBwCCz)y&p zCA>Bmb4jDkjp8o}B-Q)b6plo|u($X3c`id9x21knV{zQ!2fTy!LF1C_sX8in=)veC z(TjF%hfhd?B`F-+JtJEBx@aJ-VEjdin6s<@)NMfSC3Zt}KWR313TusRi{jwt5y9_{ zi#A|HVVfys%XM#J*FOW;U^A)xYqR{extbaKzrjJNj{Zzn#%M|fbJ6Zwi;yssVN)Z8 zLcQ0zpW4Ft#0z?lhi1u5SN_c4POR51O4SU&3!qNbEA|{ia8Z~fD!G{K+T?J#CsVp> ztc^9sZI`N#j)Iv(3ix8Rp?AYr-d9GMG{7LehBfczg4Uh(U{Ku5(eztb(xb6UNWx3A zvhcvtL*{el=R@EiH$<;nU;p7?q&61<;|Kia?EdS6{5%>|J;{Ku&0Kv(l>5|~M^V*m zGs;L8=jTZsbq%{@vdrRBv0A>f2oH~93oE*s!#iVP3eJ9597x@MzM*(E(0h~t#^LhX zlu74uc9~DY1&ypd6IBg`kj_a~* zHpC1`RX}L4{5QbBa(C1wYogAV!S+3m}Vf`9uOns}8W@RxB-l zgXa3%xK%mS8~tvAXg+yZo=WO?%kUTUsV(f&ZDPomUfR5Q|8B_pwjo!cceINDLsB{lk&5o(CIMJ>;S6HqeJ~)Mfj)LBPTFn2 zS4U-nAafxWy{dBisP2M`;~%6t5x=s|Fh}@Os(&}xqdn>U;iq$ z)NIE5bNv+0U^3hNKXT@S=NL1bH1fZRoUZfvNL2hoI^~fKNQ6yB3xiCNFV{>inx)gj>a4>okv@s6lkvyAW&j zNW08aY>T$&w!Q@5*iB8Q*V=E49sl>InmUvO+rjf2*XA{EFfE$AUecUX9>NVloGFHf zYa8DEic$VxRD3Rsl}~Ci*hk;$9G)mH+|WrDt3K2|9T{1BBGE;`ej^*|I*K~OGq8$~*JiJQZ7*H_-8AZhZBmCQp_(ZGQ{gzokz|SA(_eFKl^4t4$5zF2C1^NC& zTVMCPw?J4R)5K9Q+l z(!#h^f`1>$Q8e$fvhI+(mm7X38V+b`8VP3r$xq#=WVvN=j&@4ogkDaT6TNB;Yc6Su zPv%l-OLfk~3asdtTFCp@Uid=oT2v@z#j(B={EL6HNa4x#7f}arQOy=@0(L406J(K& z+>!Jt@#OH7r0Uk>Gi(if@TPKZ9sJ*(sDR|e#!#OzUlZ=4ir{3tkKZ6ZMIQ8v%oKKf zAB>7-m%&ZN&lvqey&p`ZG_XJNt4;*$laqx60#!CiRO>G~fflp-oaN)kSzb7ra1UBPG35*{!kvj% z3&#eccHQ*WrB<~V{pqnJp{hiT?mq(!CV7aCO5ffyxNr8%*KBYCEv`T;xGJ zlb2BH+dB_c%PZB(+ALAD%cB-)bgm~J1>TJobM+oAqm-K=Kq6#>G3}m_-(S6W#`if! zH(~1NWm56|eLa&rR0eKtTH-1EVgmj*K=d2OvfyNro)P_Qg#y85ET1B-jaYa*H#K#G zwYA8b5r7C)Dv|Eb#wkTI=H#PfFS7JY5)b7elk$Olze`W9jlvND4=V_o2(%1Yj;rZg z<5x5%_KfibCmE>-5xxVNxb%b1@bv2-HQexEbC$bD5k9XQXrf^ASU`ud)SPO{gA#5;)P3lqlD6@utc;JgS!`Wy%i#Pyk&9YCA&8*{4Rx(?ErvyEpAEue#mEw~1>7y#hjBdY z9L-geZXWdzm6J0P5uu(cPJ!vm{R-FVm2xE3G#{Ni^RiDb(5z&4Uay=%E?vZ~S5r4( z(naVgBH@#O2$NfG3|@5Kvx$i8q&j~yoO0Q~GJ-0XHHC#QBpyLi!xVk3Wen|s=tr$c zDe2S!B{h~PQqakNLifxqV*yC|r#X>6sW%uEy(-q(w#8dfAu0nCvQzO?H5mdhe%rd?#-1_pdN|@7L)6;s9D$pzMpuGcHzhtxc=d zH~Zhar#pLrht*RxdX0iIvfbj7r0PEt$hUo`=QPFikastl5ZZMsZP#3R?+;>TAHv1T z??H2i1t^)lt+q)}A`kMIFa9g8v<~?AswErt_Rl?zDHK8;qDT61T%Xy zytC5>?2P2>ZhP7+;%%GKUAMOrF^qHUEern+UADz*+j?k8Apk z>vkzHJkCWTgNSG(0=s%PGmwP+Mwum_Ebgq6+a%5BDq>eoOsgV%$S=b0OYJ~f{!SEQ zUA2$h?EKQ0StjiJH!gvXo29gEUB4cSqW9-q6BlB(PwB1 z!z~dT?a+Y@@B{awW-!PnFSHFfaKKNSuXAkinw+JUcJ$Ot=0jJAZwKv~lhOIV8cw?f z?xJi__;O_-g`8VfgD|5xscz)t01e$-k{ZnNB~h^Lzr<5YPxPvZ5Zgux+a%q6t<)+Kbd&!a;XtJZJM*lk+01rU&oVOR_IYrB+Qz0 zIeT9q_mn2=@7UomX>Op(T|7kag3NNQ*u~S+MP!BoNbnb=vI1}^NE$#LUvXklkV!CC zEAq!-N&HF0)gM(U=SjY7D%m6!LURs|F{QrR9r4q>&1-!=2QXBI)Jc_5Wbxoh4!jC$ z&ct+Dp}fWr%d9|8s{r`xRHU@95`3VAK-N4z)@ySqDB~UN8bj_$HJBZWdad4gWbEn) zk(n9pk~dd*c%(otV$Udw3e?b8p11`%hX zc~{pCf=t&0De~gVgrQZ~b3+4<%4RFMuoZ|D0wy}gqj5=WST@D(Dp8Ow0`;S&YupIx}J@XW!hWpD6*fYJ*9Ny&8Zngfkn8+XFTpLkxFk^3Nd`4A9TD@|$e20$Eu{sO&)8e2f zub=~-Erj9}FTOT*&?XMw2=FsqRnw0@J+7ls*vWpMVErpZ^O?IISTT>{oUQT&r{K}4 z-3bux&FSB{N;M$aYH}vmU&)p6KOgI+nQ_xP$AQ93$4r#`a`j8~A$!3Gy}Q7XX{wka zy)ErIEiy$jnZj@Y@;0kSThJd{`}J)jX!*pQ{rYxPqB`QmYFdKu_Y5q{nTrsVH#M6P zplAhT*K%q#mjk4=D*2#WjXyjjSDCqp9E-nm&pb0S$}de&c6EabGFMLCL4p{vfR_5L z4I@o;t)-u6xjip?Y2P%;3(@nHV2@~H#crOUz(xh{>EtHb>k$4ksZX*#Z@y4xs*jX*uGD)Cf(%xoLV)}k1QW_G; zDDTnzQU)OEKN*wb@UYBdR&-va6atiRj%BIzU83DGEX1xb112irPh ze^ZI&a(H!A_F*QyM?qf3KS_w-L}*I1`7>I{^OS^BNL^cR8)I9}^lkayM_(pUW`#Bt z;T7Z}2^Vwc&eQ}h*h$@HR5ub=GHG~0QnM!In2OPIo*(Lzmtizrv+|$(4H~2cV{Ik? z(vFFou-g2H1j8s89Rc3oGKLa6=qriZu{!+fk2g^9N))9c6N5#1Xzj=6rNTXMa+xpI zs?6Wsp?(35wW(x^d2P`<2Jf}6THzy#6Aj9_06Z%4vLvrm7y?SdP_e!?9&HE@OC-C7 z=&o@S%JC`Xp-4BJL8qZp4Ww=<#nbIRHK*v>*yz8WJ*nF+eB-xTkv0+f!D#vk1_$=o zIu~G??(x|RR9OEd?v{MM?fHPx|F|aje7F0G6@1@~qhB8x-T+VYHSiD?z$K{RTz>vM zZLzSL#+mY6Wb$BkCd_*HxzzI8dWmyGf6aBspBe>GZPB@7+qiMF;5;!p8R(e`Tis5m zUd~!^q4g^sI^m$ApcvbROubgU~#zDLy+gWXqLl3k5x7)^#!$! z54nPF?z<}mi_@&x-7&&y?4NL%yM)cYg{*x6!!GoV1HMAgh8KbfJLC@p9>pg;G7M7# zqT;4*=nw8UYq{wq;Y9q-)C(94QB?JJL>jZ*Q6QuEuZ3|h+ODLK9hOf!n?!Qyn!(%*B9w%T z6c?AL`+FU1dnT?P|Armd5TaIe-{ZVPJ7TxA1~`$_707@-gV?K#tFv}G*8S^&=t8SX zv$Na;ZAq-sKcO{07cnq)cXz-02HR4_ZLd~$-vIMnZVe=S4P6i(VtiZ%J&UT;C<`r? zl+Lx6;_(Su4H$^#sH946j$L7cuA-E# z3g2|rJEc|yH9brghTq3Sv8i06#Q!VN9Y+tw@>=YH-<~k3PcthH56NL`gX)n&0EC zYghg;ep0fc(*6^?eK3qY%B1-c7Fpi3^_Qzd?iV9W9QLJ0*SgATE-w{yptd(r#T0^a zKKUu5akpi*<+z>stiX$U7B;~5hxi_8xU(9|n(y7{CtGs00ooFT)&}<~dL0QlBlyS1 zpW)4n7uDWUZYe;6#z8F!d_Hk$726aWL@FXbt06W%3Jks7M!!wJ($`<#^{GIDv2vDN z_V=)N42vFGu-ek)7vA`K1zX1iA9HYf%s+7%E`5H~Z8h4Y37IMpv{o=L4Er2yQH$g;uMMS=# zM4$Im&q?38^v7SfjL+}KJ%dS9iuE094Sv!c71~!cRvAu)Xj`IYNRIiCS<7Rj(HaxPprlTPKs$!A=)bS z<-W35IdomQb0BfP_zpLotdjI>xMI7?9EZ`B{0K88CNZmolz%iXULN;soe)22|M|?feDP5 zcQwN`g*g69(v19xD6QhEbb0Gz5jZsC|9O>C-Q$$QtWynKe`&~2G6Tm%WaT@%%%LRs z*6R@48w;Ho6C1;FB%gEN_15{(t$!zJsTYseglMCsK|Z1j%-jKVU)G{XWmC|G&^X5A zd`(F0G&&i|fgX#jEyT+$$WE$jbBKE#-@i>QJ^&9yU25Om{@&RCD`Xx_cweQtSZ z$s|a9#%2x{`akpIbC>x%G=@j5)Bp6IENo?>N0)pQvpN|im~=sbA)!)Zg#1Ox!hPaE zasVd1J@+Yo^6Ciq1nT?VaQnVxZSO?szoW(pe8IX9)!hx6( z88@uHbiG-B5{j4F{M*{OnBrGc%-!9?$nYYvpL?*7;;H?1h66xZ)TZn%F7_=G4(xp8#7ItO&dxtv97}KPYFVtxG=~!dV zhx-KRorO#_>@#0`>0S~o=n;?kjz@m9SDz_0clZq_-4jZXHMwFF9XuBAh2AC86?l6T zVqDKNuKw36-G#m`@gw+@232_$=9_4k@- zdyHW*n7*Akom(5CkJ)$7(wz{2s2PB@vh8i$fps?LL_d~*{m zl1?J8!Rj1x(TDCLxujG$e=qVll5TbWqa>w1mn!T3Hl9G8bww$xLDZP)O`3Y`VvV+e zY+(dU77l%9DyEqV5-aGh52VWujP^&Gh_)-=zl{{9_0&FDQ_8`ZR>t#Mx2)*t6;@nH zv9I1Nhy~01!s+d604c1{iwX%y=!Mq}53|g&xLsPk%cDrYtWds9Bd-rtlUDG&wHBen zy~?%-W`)}#*D6|c3g)TkQ8~#>7w+MSN4x^M8gMFhq#vk8TMam0jxMv@;}%} zD43a7G&t*QwOt4o9<4UtzD*vqkdshI)33>{FJzxkk^%J+XLS(ty9!$T4Gpe8F6r<^ zs6e19&_dwNsNsyaUHTvGGuU^kFP{EoJ1?068W_Fp;F4%HCFqdZL$j$#-$^uJIqGxt_;5fgqKCzINUALbkMU+KODoH)*Z8BufH1=_-r#5sQvO0a z%3h&2DCN{cVu+ig9U&)^U0Ro!K;16BGXmF6OJ_Q8*?mpe)@i}BzIq3F^1ar5AM%Zp z{1(Sy>H9|f*Ujm9wG$ku+U65}+5`Ct8F`Z8?6G1(L?QCIhfz>eY!uV53WBAga;y85 zv$JP3KD?1NiL*8d<9)d^3>9Ef>}e7o?GJl4KEQNzPXK^Mji02vHkJvg^k`cw*Cbx@ z1KAnsNY~qT@98!Nzof8@ooHane_bDBzcyA$Hejgu{D|$j8%}U3Tlg~qvSBZ=7J1b| z@VJW_6#XP{ZYiy@nQ}A5ZCWw}7G0bf&G&U{gg?2?lCCRP(B+Q7?LnC2jQ;jO&4h~P z6~oVz*i9Ka;D5EaD>*R!f3*M#7B`EK9qYcy|9 zAj6b`0<-7NxAW^>UiT}m-&12=5fCxFEa4DHogwHNrKG{Zhg~@8HAJh7CE| zLcdYyWE2ocq(_xdBjq|;Y@?bjxr-!$s_M_K9isC~E7nUPrGBahLuG#j=#SPJa3jG` ztHiLpX?abgKVBB1k^fO%7DRv)1ti1!&vSdQu)1PoNsK7p-{nkuD47}`{9&mooW^jk z$5*Fs3r?M6&g^In@Ph0$TRu=U;Dn6YKu`!2ZpG}a(RCnXvGqEeD%X_`V zQ2}JMa05p=Lnj4WR@@Lu@!*IU299maK7P=>FZok_Z^hT{^=H-q(8LF7O z9kM)JyobaCTtI8^C{OUGb&R0zwd(sb<0shu{>by9%63-id%dK<#W{C57 zt0TJuk`TSqJB{*tInal|?(^cLe&vxf1p$qlhLtgcUpub5`@w(H1sKcak)X1sOhafz zn&Wh$(zN@X87S3DDKi_0rKA_<`B;@}_U({f_GQu=;dk_w}xLb1e8w z*!^`aFtr9%HC(h>ZQr)O%gN~Wj3oHZ`X1)@OvTr}VniJOt~5|&IE+=#E+t@TGHe$U zFzmonYH{>WGHXkIom=0DGbt8z-I+CwdY(Y@pb$NKi4>yI?|~}|uWx#w!qEFuH|0Za znjT8{$o6`ckM@NVE{4*K3&KpHWe`=CzW)EcZhQ-yWve@-GO-!gQf#y7Q&d8-C7nFM zvTJtpE|rG-33DwU0#KtnC2$+L0><0nhItK0cgbK(fx(x=DZm35VfB9``40aq7O23* z=On3|v{-zhh!U|DvMf*eT~YMt(l z%=IF3i7TSlF9S>NGcW|dG9wU}E`Fu~01ADuiX;>_7|58#mje!hqa1!laO!g4)rrQI zkuydOb5a-=jY2XHkRoDF)*}tJ5-Ukpig$DmY&JS^L@H{~oxWjYV1zMAn|8jie43Ri zb$`g5@dqh;@Itm)U$LgrdUjhcg#8)ez9&J{sjuiHZmyi1dgM>l9i$s~FR=f*BmT zqEf3?@pgK~YY@e!$ZGB>2rb90z$LsTv>8q;mcBCCyGAXxZBu88ispPo=ZWlAF&*myGMoe2`~6Q z`*c(0_mJlP_)Qib*7`=dV=HfW1noBM=2dB{L`k%yPMfRO+M7H5m>#Bb|9pp;ng;He zxc!Jt=gbPPCu2navCY%^tFnoi1%2>|HYRxyw%aN(FCp!1uxysPJQ!UQ?g#hsndg8H z6Eiq^9MfwgIvJ($UX}#g;+oxC*UJqC_!pk5*>G8#^JIMiaCq<>0eBjZW%~yPfa!Ht z5SwB*4{_%){?9m<52S`@dJvpUVZ_$wb(InFF6U*zngxmJhtX;YW=qFX*zJ`H20u7B z8xaf{6w%BJn_U&@30IlflPllo^I1&V>to2#AhT1pSX0Z zcRm+snaJZ`@R^lO$>1+h1`#Lw=uOc}2L2 z<<3xL=c;BwYBrPm3cdbT(IE?`YL4^r{_Y<$0zEFi#l~KVBO)~Kv!wZamcjlfuw*HL z3FYv}6}?QA)&1Bx(^)cKi*!T?iGWYcsdtH7{o#p08ZWvoPwa90yra2ot$FMJz=K-- zVmb4KtT?1G*s>3KzP43c()!;2xW#>d5YM;VI#@erk^iD40fQ7i#`5>GW7X}sp-h56 zmV6}vg^=D@N0aK5>@6JaxnQA~0pAf8j`Igxb}z$f2N#zJtXc+p-zSL6c7^JgZ0o1h zg-k!@yfXTk&Syw_)HE$@a2;eyvHKAAy&yxFXvl&Yj$2ARoU%VWp&*@-L_e%gS{{Eq z`wEF34@R^^CWYJ+9rNMb(=6`H>U4~@KS_-nRV*+>WM@>gu=mA6S_`Fn>74X)O7*i@ zD3^Mt_QFB27Je2o;Yq*+Vyx20+{cv~{o?0PtE3^Yp#H?C2AWosmcLEpsPROo(8`f+ z-4~poh8w-HsQf96Gxm*(HEl{MFLojrR#BHiMHQWPG#itGrOc@E$|bMX<2`FVe;Q11 zO1HK}6-kPt3|z}hma;>>#;Sra2-ZW@Af>|?u>PW~plX3&4K@-L3qt9xc<$oi=~kpJ znGZ=21_A57J)CgM2s9!MIPfteA}6~-nVR?c<%D5F4t}ci3bxtdY^MotfX_9%my^d5 zD%T-LYh(;cQAp?LpoAA4|H}533DM`%Afq7l5B4 zr#x@lrUmA#ahmQ?MiSIav&vw$W%O&jrNGS;KAhT^PN^3e{V|HfCIw1)f$V90mFS8s zw-4?!gU}+~GO;nSq~{Om--?5Ml!qJy0p2$^k={DN-j+KJ-D<)^qfJ8qC zU-o1|9Yd#vzMKxX)sd5%`C9v@PlD|1ZEl6T zB%g^#yN*W(>MwxeuwHJ$6Ek;S_Bw3a= zzDA!z2LUm{k<%QB+`yt}s-FR|Y~&q?eL(y2AJGsqk)1gLL`63nG*}~bBYd|yVEr@m zU+&c|t4CSdxTh#6hd9ZyhI2a9lpsN%K>`LFZrC+YI^GXpJs*C8BSl}YvfikCpPSq7 zPtV_wl<}tPhnszOOyxDx>?zv0qxW&i!%bc}SJ_xXwC{<4UPG}RNN%(^6p&`-PJ>KFOsGlXv?;v6WEGGN5boYenmRKjG}r1%l}S1t zX9?vUsGH2;Q)r$qQ#*}fc!J3hHRDLn70tzxJJgr`Lf4S9_qeS4f1SX8_XIZrg~QcG zx20zQyeQfVmDp%i;HI$OT)!Q!-T?}l5jww)U={TO`O{xRxqPagA5<*hBml(L^ni#> z-!$5XQ8?p&Mo!C@i;%-JyBFV~XXo=KumHJeWsQd^6I#_n9LH{Nxue=PP$@r;(N{@t z6XCyT;IjBSva~B0dwgxea9WH8U_ZS9*qID6>e60bh6Ti$*FzI`=@i^7Lh)O0rI&@$ z&X@-IorsOo^y#Kwjmsvmp<+p$bpqJ!VN9DXzDPs%j$6w$U2c~lws_;m%RH9ZHh<8# zn_ZX4v+t-z9!;A`G=`bT1tX-7zSd>Px~0+WhdemDe#GIsK1S_a(L~UUc1cQSR_=Kv zppGPmm)7S4-x8&mRdjGkf>b2~|4q}i<*B~S<&_g+3uNe`*|ROlkBzveIRS@?xl21! z<5n394gwFhOOs4Hg-bRA!Q2?s&(Q3Vy#b|GUn$eD!)7{E4M*!*@Eu(&Gdi@OZZ%9M z@97{iKpB=Kw_wU9Fmjnxw zkrcm0WD^+#CQ&+zT5Cu!COfp)L zJWk&DBl{Vfq))!1%H|*se7h{K91gsGwIxK7h!i|YiDewSgE0;NlwdprX)Fv-kz~3o zS_@-^)6_ptH|vccMx&t`-t;$Ztc-EX9AS1BK!n~5x5uvY@r_{odV%BnT*czrMtUat zx}UxVe!kXTua3Am%*ETG@ym*MLV4c2Rq)|UqdUqgim7r(4}fk4z-nsPv(F?Z6M%l* z^Pyrg=8)D44*!1pe26lqD2dEIyt!G{3qUU0_sp%;BHqVsH8_5ac}hi#Yxw!Ncc;QTxqyl7Zdl;#Q$W)R!n)ibo& zhKptlRYd^0Q5bI78T2p47Yw}pZ6aLWbhbyRQZ$j1cJUGZrzu zGt$IID;6r9JVz3cTsh3_9Dthys38=s$!-J*W#BP&$TG>6qcDp`+o58yddM;=r54lmGw1|v`rlvEg;xDvTrtKj;@A{(NXfhrm<#y(gS z@{Ikxh~0hRs4wU9yMtvsMLMtUXmf3T^+zdbeEsnwg;4Mn9Q$SIej&MwJm6v=9aS-1 z-9F1?LJY?;)j4F>;=qfX+<*GT$T!!T7rgvVhidxAIP)sjSTbOU&SQ#`z_@g0Bw^Vs zHv71e`G7R*e^6-uMW#Jrtb*_BqLC`IMV}{Oynm>+N-`<~!QQ#-3-wF1va@Omw+@zL zS(w;XPq|$(lM37UDfj_-{(7n+RW-QP%Tz1C7WdB!j6dgjIXMns$vtQ$2}hBxOpJ-E z)p(S%xs~nGYoW9c(QkIMy;wo|>`V;izw6RHLb6oDY;Fy(gAQKLk|CBNt@PHSYU=15 zl{}|}6Z4hjlG$nUu*&+HJfTP-?(ItuN-rYPi!~2gBJIRqY-J>{x&s?T3bFBir1-#+ zqL@C}nN-5c6}-{(B(2kB!76Iwplx3~TFt4g5Qh48MEXG+5Zz)+3tN7b^6?FH7@sKdaCc$=JT`v@|H7eT~1Uq)rP zCEC+rErBH}qw-D^?o}>9pvE*!8=8RIb*G$V3dfQ-my7ctjG&hP!Z<4hG&(27q8VRk z7jOk)hs^j`pnW0NLP7nM_)gCac9nyvCkH=tl9b1o5TirhMqtDRj%7oyjSSL}QyV1Q z9~;+5D&!}O=~BpJKGRZpO{toG!aou;|!>Z$zZG`%Qp>$0ZcCecG}# zn{bh)F1J&BfB_KN(ub9Rg`iAEghXiW2_|AM+$s_1_-|$ur*wtF(P5xiiu${!Sc-LL z67M$yZ_iV!xDTx(r3D^~pcX68u=TVo3JSwixa%-k#2`E8WpN(4QclT>m8MWyhkWfI zkH$_D=ohuBc{(2BdO=x2l)u9Z?r2O^23IvOOj#~*E9ykJ< z_s+WCorGEVLE#Eg5Y}=9&xUMboP5ELZ*zmfbEHm#nlq?w+gTTWJzc%snFlR9j_M&{Jn&)}w4*&V40>aq<=3VA0~)zl12(BmllN$*qKSn_|mw=+_!{E$pKEKyhF z^%Zj(_4FbxB)r-tv~(gGB_TeBzj*xgOo~{FL^uE%8>xmUktRZ)e+?4pwQ*^ux{J0j zjUHoWpLOI+OKVDgo58&U3fFCoXhHRt!<-rC(Z`(q+KHzZhe}(0GA*F#e=BodlH*jv zl;N>23-~M9fvm$WqpdN&Y@PRbp9O6ZJ1AWJxqLHum3zWYD$`-vb35XZ^QXI$U47>S z$4*41(m(~pHo;f8AU_QPh_xG;LCqw88v7R;Lp}!jnc9b5bx@y(E4*5`q2AfT#fz9U ztiVYkEJ%~W(q5vYc8L7+OlLimLFK1?My496=*H%5OowWnOG9p44#gzGn$Erbax9F* z{(~tWEx;vQrg`s<*Ttd3d?J6G53+s3$ihJvU;Ky9d!KoR3kz*UR#bo8b|%{!>paZO z;pLuY7~?u=Q-2Tm`QbNQbgWqS%LY4IfScd288_o(4jaca%)yzDk_sG%DZk0WN(}+2 zaisFw_VINfCJP&Z2;!|jx>&YzJ2eOVBGPHiq)8yjpr*fGFG2O3(>m+PSSVVJd^b^y zO4>hEt*%!b?%*Sc|8buJs#f}j*onxr>XgYX2apb@eE=l>#DXc4a?Ir$9%~kk#_9w+ z?r)dh*P5^?BGglpw|95x9Uo!5-akXn+vJyI$5#qrAGVEB`oVSyYb=$EvBmDOg_Rdz>W%~cpivO+)SPz`? zp(jrO_m=M}>;44c?igVucgp|}B|Qq!h$Mz9cel}Rew8(io`5<9W}hc5R4C$pH0_xV z{5jV0EIsF`Nj@KnZmz+DH;#*{qQjTUq>R?8)%4DGITSCH0EJ9(me!VMgpcVZmSC|w zFl~mDHtNHOz~^2`ypSB@TVv~*hqS!+ey1JM2?Yv^N(De&;d3BeQb> zoM!Dpnb9%90G{!3@l3+`))b3hq2tkrUb?mycq%yAKwV`wR2vR6MSBrG^4S;&C8@nh znh7#u2ymiq>?R5JKuyfJrhiYXf<;VZ@Xv*Epj7SG3^8dJ=P0mA{V8%ggF?QEUZ`!U zr>)us%WkmqOOC`xj+tJ8pSp|ik2J`lsPxM3J}N`3FVx)Dl?ZF!K>c(C>H74^FIZ{$ z`DOcytlRUGeJKmG%#DL9ST#CD5-x1OP)HC$T7rko(_GYzO8p| z_o36{uYM;6@jHRIOWjAj&Ds49{*9OrxaDrU;#a%1hYl%?&G)z0m-S&@{2&?e&2QzW zfG5jyra-y@h#A~l=+ff!)aa<1E|Mnsjm1WqADXh^JsVX7eS~HkBLAb9L=rP%#!*Hr zYbJMu0Gequ22>hdSW-aT&OG*!ab_(Ahw%Y-BIS~*AGNV>W|

r2~S5~J$&L%AaV zTO>jlzetZn3sF4}N$L)$z!JO&d0p{dSVn64jPcImB1RDQ{Yrs@Y1h`Zucd&rP_w2! zVgge1(caJmDSzP4EE`Hz2u+S$IaU_?0;lajqmbK%iL_he(ut4iWK{+!CG2fse{UOE zD*Ew^p~Kju@QCWkTgSy|Ec7QZ zJ*g189dR9>1FIS{+(sQ_gRqn>XgO8K z-4*wB_|DLM_n~;>_m%gB<@<5Kd0mB@$M=qI>l8Sw-rBlW$m^UT-l$ibbRdh|;cff$%hKb(ak3Nr&IR6 z6NcyppC|nMM@I1X#*GL3pncMdHvIth1=gfnDRmh%i1>uQM;jD4py>tWAFwXY3tvP? z+$n_%y$9!EnI->*+>+7ww|YrGe46cy7)J>(RvMELp$KDo*&;WduX3&tw129IJ9#=W z=+u?c_-4@(_lK)b*m0U>>ly(vk>JS4cH78Z1}26^*5IcjHmd)bnAP;+#I3uFL&W73 z2vBiXH^!yo$cZ=Wg3zKV>%lX@+pNyp)ij8vguNQ-Q_|Q z(wXJbHh+I9yvYD&{Xe=TJJuT2Kq%a3gi|B;4&2BsPpwaXhfAvn2_tFpaT^I--Fwm7 zP)LWX6;K}$|N12zbsw)*R-)|4QB1EOzSd{Y21jC5zjXV=K7PQr5v`CTX$ zG29lyk3siep2d$upe?pzf4IzFNf8v(r11g8WIOMzcvkM-JXXO|K=i6>*Ti z*Txoq(&hbMEr3E}Tme3umuB5umMlFDI}r1llKuDX1@)Kfo_Rngs~N6)o)4RvQB;yc zHi&9I+3j&ak`qY)`s(#>pc7TMh%~I6YKroVY|4-E!}DLDmj=UfKDFdH+r@?a-QwF* zUX9D5VJ+TC;RJ2?vS2~sDJb@jH~jApU+;puyn^rWes4ry{N3-Xn%J4JhFyJT&?|0a zjXss3CSBASS`O+D_wF`09a*XMjP-+@5oEe2ZrqPO0K3&)C2gsZP9~XPK7BF(bwlR$ zSn)oyl~8orNhLTxOnMGVkpNKqjD^-8XnMs;z^=j^G`7QS=5l?P*F|ymzL8(C&qb*r zZe-)Wm4?-C!eL?5RtRy?KD>MX2}9`ph?4v9it>5G8FziU>Gk})OVC=D*6L;l9+nD< zdaL%0`EsLSq{QQxmxL~F_6{+iJfCN7o^j6~XgckfV6{Ny`1NCBqEwkk%qVjN>TXr# zzIR8S2SG6A_h5MiGXt~77SbnbjBvn@ow|wjBVFa1g#Z&lwotmGO|9rI*WH6jKbg5O z@qqt<(Dy!ultK0>ulGuy)N6(`*)`l0>iyh9ub&gql%>J)r6`k4IvT*oGO1-p4s z11sNaSf#nrO^vk$cp4>~VfBI`Y~3r6jklxo7pDWNT06Q&c4rK?@n_h6PAMdsfY>C` zeyDkAqt@^H&w5l>;<5t->I;hM2!nOw*M}$hNU}#~O+3<>DVU4UJ+mHXiiMClLZl$09T+$G z(C4Lb$2!Vw(|=g_(NXnJ8tEqSEV&D4;+%10hU1LTmx(q=?gq{Jbe|F`nB6UWWVud8 z5J3WH86%#Et#P_*f?D0Fh|h{ER#yYiB$C-meFg8!i^p;kN1YiF3KacAaXoMOl_xsm zLK`uf118%~x8E*f=`NS=2^(V~bzne)To!n~;?aK9B(HAI@SN=ypqet6zhpnwT3m@+ zcBVriL*W!|*ePgX@dz*t(OF6Z(qcx$(U}7M!-xZa4jw>P@-x2SBw!OLoE7AFDvY}7 z0mJ&I!hea&Er-S&e3&rFsr-yEN(9NO^smMd@|AD#48x04jSG2CxzUf^lg{;KF765( z&1SE{@V(a*?CS#h2J3&c#(l$O41hIjq9=hx@?4t3!ojP(Ai z>vcVE0#(*x@Els`y%eeJcR%LHd24c5n|tTprlswR6maBSn|9i?+!Se6>DyD*(|1bT z2oG7`&OIcSytr=4x<$v@$d)GaEd+2kKY;_CE|kf6imh(#zu{t=eD{ke5PPmncm>t~ za*DhsV#)As7N`3wO|ZA8(>o-utFw1tx@S0#H!)Tw2E85B-2CSf6~ZrCwF;|e+Pa<} z{GuY&*C#hl&Ox$-INd{lBShNmd|vnr{_^Q~zFL-;O!IOceHQ4%2DXOaGXxhL944l@ z;=a)uy&p?0Sevg!f1Ni2E46el7}QBWI|Zl& z-R~5v?C;P6Lm}96p9HMl0)Nszd-kJv(NFTyX6oMH_rROh9sVI=8)V0!QXO`1Be6TU zikN=-^Fq(n+WBO`CG#>C3eVg%iAVP)#&&OhgQuP3zNDIel+2awWe;-Gz@>%k(Ga1911`cFIxUh6PK3_#fR8;T*e(qb%{+-19jUz@mM zzQm>M7c3Yb^?Bb$N76F=k3FtFaX(Fn@Cz874X5=vAM7ep0#w%W_ztOVm8)jCy=<3+ zHz7k7Vbtx3i`zirBdf)|{d}Lmb)8{qTaMaK!J&OpWd!e3r^vBu)N7K;o3;j&_!Vs$ zFHJsXEyT$v-!`1;_pouFhkv{us;E}F-TU@om>sPid&^b7M%zT(`0M|$ zcFVr&jVN=EZSZqn@XL|KEFxC(mkIL0h0~`rqENevX zn21)#k7l|Cx9nzJFn-Z?`u|K7Vqk+g>LTq6XYsP1#h2uEuQjG9WHKWQj2lAdp?r~N zJlf>Z386Kr2G?*~e(ux%K+)s(h?V?$Xn9u5^Mlda2=vCSy7Y;+MgwP)ld>iETny9Rl?~RXyKV0!XF@ zXV96EoXo(L#Nguvgn#qGdyW!K)tmeGTRU1@+qvWY!J|FhB~erz1ay`^v;w=^l^jjf zzyya{!#$#+iRo{ecv7wpC0|y&xqc{eoglo>vT?_HkuB}xh}K_Ci>gSv4=WCT2&iAZ zPbC;*U`%GK_!e{eB_&Ocs%$lZ*czp@VwHQCJ{F5DaaK(yu*8X2Lu39@f_UU)@jkSB z&H*-E_zPrY1Bj`!1yRS`N{IomcWsNVvGaHI(Xm`N!=sl52txsmot|3~zD9*T){6$V zr;QqG|A(x1?9MFQwzXs1<`Y{Ln@?=pwr$(ClZq><*iOZ^t%@tjoBg)7+c|5UFY^z~ zdCze3JR>XjeI!>TNrSIZT1O%2lqya_i zbEv$V#*OQSHDa!+Eu6Iy(pbwZQZV2j$x5lsMmVcDoS%)qI%Yb)@PXtkFYfXnpr10k ze4x6o`o^(AO3DW{WQKN748~9_njxD}NL@_NohmfQq^TH*!gbLPuLLDbO@zUgV_s!U zCyaCgxQuK>(P!tX66YUJ9O4Yl(#ac(_$KQEo-Cg&FlmFkql;hXFiqT6zA*{QRPE2> z6N8qxVt0-PgJsTUSC^>|_yO$t1@J#fD^L|3;{}@B18Sn`6Q6#x}6ZDR+ zsu#1mbmptqR}E(g6I!n$dym@k3ZXd=5q4r-}_e#xpo+|NIAMe%ma(`*R$)-;-z)iLQ=3j~qMd{=h}oIzFV zKdEX@K_|LE7HAlKX!A-hYaf3wU)SipJGvM9!zFX}8jg(Vx|pe$CT4aIXchxG^A$#h z7=4v>&Ey$#CHkenY--bHo4PLZxDWsxrZgn$1;aa>K)kNqDl$3J2k6EJ(R8b@j)rC% zU2dI%=00UIki%|r=``QZuViA^vB!TZFpL$Yg|LN|_eB~6p|1u$-26Rp#)T+{(1?>ty5aqbzKh%SJzF?7sQn{c2^v3kKT-D#8rO zs`!@vGjz)4Gj(r?_6}hjSkTqu$|RIbS)(R#DH@q`vy_Q`1c5{n>zkkkz#O12P5S!d z4uD?z!SU_NGTzy5wYB@#71EYKUL8^0FJavZA5a*~XUdp&Fyt)QVq8ibx|eEN&iq@$ z=-Q9!GC`OT0XGC4rD@~FY4z&GP5hZEnS~8^dvt`d*9402eihB)!>o?6%!@>N-Yihn zbh~S^eejojOfgFuA9J!K6oqqBRZT{+1(R9pc4w#z6FL@h3!Ddvdsv1*we0un+VQRr zx9n^d%GjpLsyv1GnK_4V4Ak6$P$#f3l70WMb3!B#m-B%TS#0dNop?`1*tf>z zyDSnBUG<&ei0z&{w+|b22}zPEzZ0%M5;a=QcMZp!Kt}sgDh7Tcn4+M_KTr}q5F{K` zMFRLmVKPbj$UbtS6(r0#)Olnat!f{=WM$Ec*pW+cub7`K<+XjEMhl8M?q9;*E&e@x zZynyxVYX(RaOvimT;A+mZy%s||HW}UmdsUysC!zCYeYiP` zfu&~UMfLkP?7O|tVFsm@YQ!*G@gHj8f984hkA0r#uhd8t_dG;$zX#4sTx@3xiv6AY zW6Fd9D%4bxEg$O|sy0*-!BtH;YnTEsV@Z7w$-ehR15!%?gJa>*`6iaE6xQ=0Yxe%HlWy+Of6mIgGCUGuV#v6&qi&dq)@gs4DK}y>T)q7 zKI}t0PO22TPG%%(B`&OS(=~eOMpE=>*42qMpQ9rsF@!+QiqL+XhoXhogt;OQQzjrQ z+_-^ptPz;`P!|_gI?!BFDA~(BD<+CSH5{+l@8@N#s*DC2iuxx_y}6l#I9W_7m8LN~ zi^hALyczl8q^K#4>U>}w5K0IDN|%gv#w=q!QI%vtZPjHt4>Qw(C|k#!87j)-D43KI zr4{=7(7y6~`=TQboPa`Qb(jAaIBws4MWN=;}_a@;ALzRhxe_JgpeqGj$Ks30?fd;?uPOWkiNmj5d>IEx0aj4kwm3L zWC~TTGittQ8qcJlkW@pL5OggXmcto(1W&vPZp9*cn{B9R~3il?HfgVw8UjE4@NSK!TzB8%dYFjkMx_az1_&1fZ{3T zSvDEWZ9rg5?TwUw19psZe5%j|32!zMDw97z*C?Mmi#U-OI$<^>_M5eX1nn`?{h52f zFrH!M;t+Q8Ib2aWL(ub5ftB#}O80N;8jh&|d^T^iVgsrm9+WsDFkuvw0XVnS5i^>B z-R?POgf6c{sJ*FI@St0pK%VUu6I6;t2c`hjY&_oME}UJQ)5S3kgs)63kW433RRnR4 zgQqL*LRy=IE&~zk6lL~{Irhu)n|!}r=)s@VrB5J!^+3>3H~)bf8fhM1HEQ0EKw}Eq z#N=NMopIa`@Be?O@Bd#ijb?)2(AF5A9UL#%^jdQy!0JjJhv%c)Qz75%-R>fII`!K^ z$rGiJk};%)XF5ConuMWKIV>MUt7m-?Q zo9@>2FNP3cyu>MOf^Xp5E2@~X)|@d{_7t%)T3`@9HX2AD>=%pLtB#7UoU>*^T-Wa6 z1qPgy5k+Axqi)jIvfWZW85>P0!|_f-L6vVG&ZN3E8d2^Z%S)!*tf;Gah88;Q+gGJC zSI4yi*1hny!dgZufI7xK$98_$hlX1$9eG|=xy~_FqU-^f51>KK6RLzD;RZ*U^J*0s z2Tj`JdxU;UZt&(h)*r__MMs9g`FX*a5NTz046q;bikp|T=Bek}m)!Zl33&TZxGj9- zdO*UzX|Ucnd{tJPV%>it2Ji46sdq&~{ z)}_@*(|v6@C#F@ZNJnE!7dC<)QzWa;lQuPNjapEu20T9a$m5vLfuI`qT}>O3`(tH$ z%B&GFk;tHFSRljO!{d$e6~nb&`1^s1bZraX>#wU<0h~8fxBGmDaav6sIB4{R(l_H6 z@J?;N;K)vk+cHvUFBgk1&q$a0&hk~F@W`2*0YidLVk9qkRLo``?S0HdiOLob zIBmC&P~ej@s?p45jw>~$O9%~I+C~n^(W4I4ZL?Er{L=;1DUIipw<01RV+V*h6F-X? zI9cqsK`A^nULMO2ep@-*WP|%vluXWsU-14uO;I#lh|4vUklD?EajI&Dx6tAT%Od7OnXD#(&PKsMfa{ zf`Sxa84Zz~c0^B0KV9F(DI3_mvQLg4!kyR-gd;~r3kh$pt>bbPie6Q z2NRL<=)fPVZ!1U5hA~ODwlKW=9dO%;mZTU`pSo{eFnW)@+K;5S*jg%pPk14=nUq&&Wq5mg1 z8zD?D7K%BtV}}Q{?yrKh>?Epm@4R>?Wm+*1%Im~{uGxLslc7&ZmwB+qZ zfNM0N=bhg+oN&2OlKgpr>j~x{P>2XSl?w0OSC9I2$b(vAj`H9o8X)M*$VVZJBpN{) z|4m>nRUO;d^OoV2iM}e!ABpkZWLzUxf9ln1+qUsu*0uTuG)^rhPFYCRRhLir0*LYD z)_pFj(5X89;iKnLR8_blhN&hY(yB7);v&reGI4_MP>w@^M3kMO#<;k;yPwhrB;!)t zRBYd@f+9PwGfWS#I>_~MRPll z_2`M{o`Lds6R|$SlS$48!M6R%T8DXnm0?>FIm_VOW9>u^P1EJ9`W_PUV>6Vd4}#5{ z%N53cHbh|IE?rm!w)A3s~t5k`Jd}@i;qy6 z>M-goFK6Fa)Hg&jC)fq<(J0Q53=mF!+_iSA`;+{k zqm_Un?g4Am5G{ke?;Br?4&RHVIJ2&M;0aQoOhcdapN%=CMu{GQ6` z1kT4L*OU(2SOv*y&zLF_d=H^U{R8@jhtt=nD{?sG<>j5`>ok)imR$i`=#oH@CVJ5X zT8ZxtrJmIBua?B;p6QcWZfRi{dOKpaw*Y%lId^&IVYXdS)8>g@rR~Yxh`Y|rKuybT7BP;b>{=XU?uQAdC7Ja{t3%Hm5SJ~fc&wTIgkIvp~q z7&sFMqLv&j@0AL-Vrt71jmoYz8}9oxRJcN8-obu_t+(=E4q^B`Q(&>jh>^K6o639K z{0^SNNYA8?5FH(rbap8fmw(^5a$ij$MjV%(^Aj-$w>NfXDc8Hfu$BiQM8D`bgiXm?lD3&Q3m+u|) zb1dx_)uh_LLoZEwz3GGPDV5WR)%QI7`?NWwX^p1;8fH{S?Ra4R3~GK)>cadIXY36V z7Arm~0Hv#^$8wSIW_R!%o9qIX%Wak8d&B!>-rF&RKY9_k?!dNhsv9tP?Jm
-| Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | +| Llama 2 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster 43% less VRAM** | **2.2x faster 62% less VRAM** | **1.9x faster 27% less VRAM** | **5.5x faster 44% less VRAM** | -| [⭐Llama **free** Colab 2x faster](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | [⭐Mistral **free** Colab 2x faster](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [CodeLlama A100 Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [⭐Kaggle **free** Alpaca notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) +| [⭐Llama **free** Colab notebook](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | [⭐Mistral **free** Colab notebook](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [CodeLlama A100 Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [⭐Kaggle **free** Alpaca notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | [Llama A100 Colab notebook](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Mistral A100 Colab notebook](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | 50+ more examples below! | [⭐Kaggle **free** Slim Orca notebook](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | * **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. ⭐**Free!** DPO Zephyr, Mistral example!
[More info](#DPO) on DPO @@ -218,6 +217,9 @@ dpo_trainer = DPOTrainer( dpo_trainer.train() ``` +# Support us! +We're currently 2 brothers trying to make LLMs for everyone! It'll be super cool if you can support our work!! + # Future Milestones and limitations 1. Support Mixtral. From c1ac4d2707574868767345e76ebe49c8353f9057 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 11 Jan 2024 04:08:03 +1100 Subject: [PATCH 0108/1088] Fix some bugs (#83) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs --- pyproject.toml | 2 +- unsloth/kernels/fast_lora.py | 2 +- unsloth/models/_utils.py | 10 ++++++++++ unsloth/models/llama.py | 10 ++++++++++ unsloth/models/mistral.py | 10 ++++++++++ 5 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 70b0322788..2bceca566f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ huggingface = [ "datasets", "sentencepiece", "accelerate", - "trl==0.7.7", + "trl", "peft", "packaging", "ninja", diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index f8f5967545..5f48f316ec 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -22,7 +22,7 @@ def get_lora_parameters(proj): base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) W = base_layer.weight - if proj.disable_adapters or proj.merged: + if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: return W, QUANT_STATE(W), None, None, None pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 36ad64526b..c88ade6dfa 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -116,6 +116,11 @@ def patch_tokenizer(model, tokenizer): pass +IGNORED_TOKENIZER_CHECKING = frozenset(( + "CodeLlamaTokenizerFast", + "CodeLlamaTokenizer", +)) + def check_tokenizer( model, tokenizer, @@ -131,6 +136,11 @@ def check_tokenizer( # See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25 # Seems like the Fast tokenizer in Rust breaks things! + # We ignore some of them! + if tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING: + return tokenizer + pass + max_embedding_size = model.model.embed_tokens.weight.shape[0] added_tokens_fast = tokenizer.added_tokens_decoder added_tokens_fast = {index : str(value) for index, value in added_tokens_fast.items()} diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 73c78ba0bf..f370c5bd6a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -714,6 +714,16 @@ def from_pretrained( token = token, ) pass + + # Fix up config for transformers uploading PEFT + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass + # Log Unsloth version for future fastpaths for inference + model.config.update({"unsloth_version" : __version__}) + return model, tokenizer pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e48a982b8c..01b9dae1af 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -343,6 +343,16 @@ def from_pretrained( token = token, ) pass + + # Fix up config for transformers uploading PEFT + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass + # Log Unsloth version for future fastpaths for inference + model.config.update({"unsloth_version" : __version__}) + return model, tokenizer pass pass From 59d74753362ff59e664cb6d650b564511e6e20f3 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Jan 2024 03:35:17 +1100 Subject: [PATCH 0109/1088] Update pyproject.toml --- pyproject.toml | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2bceca566f..2b5f039a1a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,15 +32,25 @@ include-package-data = false exclude = ["images*"] [project.optional-dependencies] +huggingface_dev = [ + "transformers @ git+https://github.com/huggingface/transformers", + "datasets", + "sentencepiece", + "accelerate", + "trl>=0.7.9", + "peft", + "tqdm", + "psutil", +] huggingface = [ "transformers", "datasets", "sentencepiece", "accelerate", - "trl", + "trl>=0.7.9", "peft", - "packaging", - "ninja", + "tqdm", + "psutil", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -93,30 +103,53 @@ colab = [ ] colab_ampere = [ "unsloth[cu121]", + "packaging", + "ninja", + "flash-attn", +] +colab_dev = [ + "unsloth[huggingface_dev]", + "bitsandbytes", + "unsloth[cu121only]", +] +colab_ampere_dev = [ + "unsloth[huggingface_dev]", + "bitsandbytes", + "unsloth[cu121only]", + "packaging", + "ninja", "flash-attn", ] cu118_ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118only]", + "packaging", + "ninja", "flash-attn", ] cu121_ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121only]", + "packaging", + "ninja", "flash-attn", ] cu118_ampere_torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118only_torch211]", + "packaging", + "ninja", "flash-attn", ] cu121_ampere_torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121only_torch211]", + "packaging", + "ninja", "flash-attn", ] From 4112eb4a3df4c0911e36211b47381086c963b4e0 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Jan 2024 03:41:00 +1100 Subject: [PATCH 0110/1088] Update pyproject.toml --- pyproject.toml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2b5f039a1a..83efab1641 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ include-package-data = false exclude = ["images*"] [project.optional-dependencies] -huggingface_dev = [ +huggingfacedev = [ "transformers @ git+https://github.com/huggingface/transformers", "datasets", "sentencepiece", @@ -62,12 +62,12 @@ cu121only = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] -cu118only_torch211 = [ +cu118onlytorch211 = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] -cu121only_torch211 = [ +cu121onlytorch211 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", @@ -85,12 +85,12 @@ cu121 = [ cu118_torch211 = [ "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu118only_torch211]", + "unsloth[cu118onlytorch211]", ] cu121_torch211 = [ "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu121only_torch211]", + "unsloth[cu121onlytorch211]", ] kaggle = [ "unsloth[huggingface]", @@ -108,12 +108,12 @@ colab_ampere = [ "flash-attn", ] colab_dev = [ - "unsloth[huggingface_dev]", + "unsloth[huggingfacedev]", "bitsandbytes", "unsloth[cu121only]", ] colab_ampere_dev = [ - "unsloth[huggingface_dev]", + "unsloth[huggingfacedev]", "bitsandbytes", "unsloth[cu121only]", "packaging", @@ -139,7 +139,7 @@ cu121_ampere = [ cu118_ampere_torch211 = [ "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu118only_torch211]", + "unsloth[cu118onlytorch211]", "packaging", "ninja", "flash-attn", @@ -147,7 +147,7 @@ cu118_ampere_torch211 = [ cu121_ampere_torch211 = [ "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu121only_torch211]", + "unsloth[cu121onlytorch211]", "packaging", "ninja", "flash-attn", From b8b1eafda35d124046e11766aeeb6343957e0daf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jan 2024 04:51:19 +1100 Subject: [PATCH 0111/1088] 2024 Release (#96) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml --- unsloth/__init__.py | 3 +- unsloth/kernels/rms_layernorm.py | 5 +- unsloth/kernels/swiglu.py | 12 +- unsloth/models/_utils.py | 2 - unsloth/models/llama.py | 231 +++++++-- unsloth/models/loader.py | 108 +++- unsloth/models/mapper.py | 56 ++ unsloth/models/mistral.py | 2 + unsloth/save.py | 865 +++++++++++++++++++++++++++---- 9 files changed, 1095 insertions(+), 189 deletions(-) create mode 100644 unsloth/models/mapper.py diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 19902a3055..c3baac21d4 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -65,8 +65,7 @@ libcuda_dirs() except: warnings.warn( - "CUDA is not linked properly.\n"\ - "We shall run `ldconfig /usr/lib64-nvidia` to try to fix it." + "Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ ) os.system("ldconfig /usr/lib64-nvidia") importlib.reload(bnb) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 2cf3acb928..ec34880a2c 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -41,12 +41,13 @@ def _rms_layernorm_forward( r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) - W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols - inv_var = 1 / tl.sqrt(row_var + eps) + inv_var = 1.0 / tl.sqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var + normed = normed.to(W_row.dtype) # Exact copy from HF output = normed * W_row tl.store(Y + col_offsets, output, mask = mask) pass diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index 037dcda84f..4e9b7ba2ae 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -25,10 +25,11 @@ def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): mask = offsets < n_elements e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) - g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) # f = e * sigmoid(e) f_row = e_row / (1 + tl.exp(-e_row)) + f_row = f_row.to(g_row.dtype) # Exact copy from HF # h = f * g h_row = f_row * g_row @@ -53,12 +54,13 @@ def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements - DW_row = tl.load(DW + offsets, mask = mask, other = 0).to(tl.float32) - e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) - g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) + DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32) + e_row = tl.load(e + offsets, mask = mask, other = 0)#.to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) # f = e * sigmoid(e) - se_row = 1 / (1 + tl.exp(-e_row)) + se_row = 1 / (1 + tl.exp(-e_row.to(tl.float32))) + se_row = se_row.to(e_row.dtype) # Exact copy from HF # f = e * se f_row = e_row * se_row # h = f * g diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c88ade6dfa..35b6de15ef 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -14,9 +14,7 @@ import torch from typing import Union, Optional, List, Any, Callable -import numpy as np import warnings -import gc warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f370c5bd6a..309625d962 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -15,7 +15,6 @@ import torch from typing import Optional, Tuple, List, Union from torch.nn.functional import scaled_dot_product_attention -from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from transformers.models.llama.modeling_llama import ( logger, BaseModelOutputWithPast, @@ -46,16 +45,13 @@ LlamaFlashAttention2 = LlamaAttention pass -from peft import PeftModelForCausalLM -import gc -import peft -import bitsandbytes as bnb -import numpy as np -import types - from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig from transformers import set_seed as transformers_set_seed from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model +from peft import PeftModelForCausalLM +from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit +from peft.tuners.lora import Linear4bit as Peft_Linear4bit +from ..save import patch_saving_functions def original_apply_qkv(self, X): @@ -110,18 +106,15 @@ def LlamaAttention_fast_forward_inference( bsz, _, _ = hidden_states.size() K1, V1 = past_key_value - Wq = self.q_proj.weight - Wk = self.k_proj.weight - Wv = self.v_proj.weight - Wo = self.o_proj.weight - n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) - Qn, Kn, Vn = original_apply_qkv(self, Xn) + Qn = self.q_proj(Xn) + Kn = self.k_proj(Xn) + Vn = self.v_proj(Xn) Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) @@ -156,6 +149,28 @@ def LlamaAttention_fast_forward_inference( pass +torch_silu = torch.nn.functional.silu +def fast_mlp_inference(self, X): + gate = self.gate_proj(X) + up = self.up_proj(X) + gate = torch_silu(gate, inplace = True) + gate *= up + X = self.down_proj(gate) + return X +pass + + +def fast_rms_layernorm_inference(self, X): + X = X.to(torch.float32) + variance = X.square().mean(-1, keepdim = True) + variance += self.variance_epsilon + X *= variance.rsqrt_() + X = X.to(residual.dtype) + X *= self.weight + return X +pass + + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, @@ -287,28 +302,51 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - residual = hidden_states - - hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + bsz, q_len, hd = hidden_states.size() + + if (self.training): + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = residual + hidden_states - # Self Attention - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, - ) - hidden_states = residual + hidden_states + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + else: + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states += residual - # Fully Connected - residual = hidden_states - hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = residual + hidden_states + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_mlp_inference(self.mlp, hidden_states) + hidden_states += residual + pass outputs = (hidden_states,) @@ -378,6 +416,7 @@ def LlamaModel_fast_forward( if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length + pass # We already handle KV cache position_ids ourselves. if (past_key_values_length != 0): @@ -391,10 +430,12 @@ def LlamaModel_fast_forward( position_ids = position_ids.view(-1, seq_length).to(torch.int32)#.long() else: position_ids = None + pass if position_ids is not None: if position_ids.shape[0] != batch_size: position_ids = position_ids.repeat((batch_size, 1)) + pass # embed positions if inputs_embeds is None: @@ -403,19 +444,22 @@ def LlamaModel_fast_forward( # Ignore attention_mask if attention_mask is None: padding_mask = None + elif self.training: + attention_mask = None + padding_mask = None else: if 0 in attention_mask: padding_mask = attention_mask else: padding_mask = None + from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, - sliding_window = None if not hasattr(self.config, "sliding_window") else \ - self.config.sliding_window, + sliding_window = getattr(self.config, "sliding_window"), ) pass @@ -479,7 +523,11 @@ def custom_forward(*inputs): all_self_attns += (layer_outputs[1],) pass - hidden_states = fast_rms_layernorm(self.norm, hidden_states) + if (self.training): + hidden_states = fast_rms_layernorm(self.norm, hidden_states) + else: + hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + pass # add hidden states from the last decoder layer if output_hidden_states: @@ -665,6 +713,7 @@ def from_pretrained( bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, ) + pass # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 # RoPE Scaling's max_position_embeddings must be updated @@ -714,6 +763,7 @@ def from_pretrained( token = token, ) pass + patch_saving_functions(tokenizer) # Fix up config for transformers uploading PEFT name = model.config._name_or_path @@ -721,6 +771,7 @@ def from_pretrained( name = name[:len(name) - len("-bnb-4bit")] model.config.update({"_name_or_path" : name}) pass + # Log Unsloth version for future fastpaths for inference model.config.update({"unsloth_version" : __version__}) @@ -751,7 +802,7 @@ def post_patch(model): correct_dtype = lm_head.weight.dtype for name, module in model.named_modules(): - if isinstance(module, (bnb.nn.Linear4bit, peft.tuners.lora.Linear4bit)): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): weight = module.weight quant_state = weight.quant_state @@ -766,8 +817,10 @@ def post_patch(model): pass # Clear deleted GPU items - gc.collect() - torch.cuda.empty_cache() + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() return model pass @@ -782,11 +835,26 @@ def get_peft_model( lora_dropout = 0, bias = "none", layers_to_transform = None, + layers_pattern = None, use_gradient_checkpointing = True, random_state = 3407, max_seq_length = 2048, # not used anymore + use_rslora = False, + init_lora_weights = True, + loftq_config = None, **kwargs, ): + if isinstance(model, PeftModelForCausalLM): + raise TypeError( + "Unsloth: Your model already has LoRA adapters. No need to run this again!" + ) + pass + + import inspect + signature = str(inspect.signature(LoraConfig)) + SUPPORTS_LOFTQ = "loftq_config" in signature + SUPPORTS_RSLORA = "use_rslora" in signature + assert(max_seq_length <= model.max_seq_length) if lora_dropout != 0: @@ -794,11 +862,61 @@ def get_peft_model( f"Unsloth: Dropout = 0 is supported for fast patching. You are using dropout = {lora_dropout}.\n"\ f"Unsloth will patch all other layers, except LoRA matrices, causing a performance hit." ) + pass + if bias != "none": logger.warning_once( f"Unsloth: bias = `none` is supported for fast patching. You are using bias = {bias}.\n"\ f"Unsloth will patch all other layers, except LoRA matrices, causing a performance hit." ) + pass + + if not (type(init_lora_weights) is bool or \ + init_lora_weights == "gaussian" or init_lora_weights == "loftq"): + raise ValueError( + 'Unsloth: `init_lora_weights` must be either [True, False, "gaussian", "loftq"].' + ) + pass + + if init_lora_weights == "loftq": + + if not SUPPORTS_LOFTQ: + import peft + raise RuntimeError( + f"Unsloth: Your PEFT version of {peft.__version__} does not support LoftQ init.\n"\ + "Please install PEFT 0.7.2 or higher.\n"\ + "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" + ) + pass + + if loftq_config is None: + from peft import LoftQConfig + logger.warning_once( + f"Unsloth: init_lora_weights = `loftq` is set, but `loftq_config` is None.\n"\ + f"We shall use `loftq_config = LoftQConfig(loftq_bits = 4, loftq_iter = 1)`." + ) + loftq_config = LoftQConfig(loftq_bits = 4, loftq_iter = 1) + pass + + if hasattr(model.config, "quantization_config"): + raise ValueError( + "Unsloth: You are using `loftq` init, yet `load_in_4bit = True` was set.\n"\ + "Reload your model without any quantization by setting `load_in_4bit = False`." + ) + pass + pass + + assert(type(use_rslora) is bool) + if use_rslora: + if not SUPPORTS_RSLORA: + import peft + raise RuntimeError( + f"Unsloth: Your PEFT version of {peft.__version__} does not support use_rslora.\n"\ + "Please install PEFT 0.7.2 or higher.\n"\ + "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" + ) + pass + pass transformers_set_seed(random_state) @@ -810,16 +928,23 @@ def get_peft_model( pass # Get LoRA - lora_config = LoraConfig( - r = r, - lora_alpha = lora_alpha, - target_modules = target_modules, - lora_dropout = lora_dropout, - bias = bias, - task_type = TaskType.CAUSAL_LM, + arguments = dict( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = lora_dropout, + bias = bias, + task_type = TaskType.CAUSAL_LM, layers_to_transform = layers_to_transform, + init_lora_weights = init_lora_weights, + loftq_config = loftq_config, + use_rslora = use_rslora, **kwargs, ) + if not SUPPORTS_LOFTQ: del arguments["loftq_config"] + if not SUPPORTS_RSLORA: del arguments["use_rslora"] + + lora_config = LoraConfig(**arguments) model = prepare_model_for_kbit_training( model, @@ -828,10 +953,21 @@ def get_peft_model( ) model = _get_peft_model(model, lora_config) + # Fix up config for transformers uploading PEFT + name = model.peft_config["default"].base_model_name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.peft_config["default"].base_model_name_or_path = name + pass + # Add revision to enable future fast inference paths + model.peft_config["default"].revision = f"unsloth" + # Do patching n_mlp = 0 n_qkv = 0 n_o = 0 + import types + if lora_dropout == 0 and bias == "none": for idx, layer in enumerate(model.model.model.layers): @@ -897,6 +1033,7 @@ def get_peft_model( f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", ) + patch_saving_functions(model) # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 48200c3878..b4ad3aad14 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -16,16 +16,9 @@ from .mistral import FastMistralModel from transformers import AutoConfig from transformers import __version__ as transformers_version +from peft import PeftConfig, PeftModel +from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER -FOURBIT_MAPPER = \ -{ - "unsloth/mistral-7b-bnb-4bit" : "unsloth/mistral-7b", - "unsloth/llama-2-7b-bnb-4bit" : "unsloth/llama-2-7b", - "unsloth/llama-2-13b-bnb-4bit" : "unsloth/llama-13-7b", - "unsloth/codellama-34b-bnb-4bit" : "codellama/CodeLlama-34b-hf", - "unsloth/zephyr-sft-bnb-4bit" : "unsloth/zephyr-sft", - "unsloth/tinyllama-bnb-4bit" : "unsloth/tinyllama", -} # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! major, minor = transformers_version.split(".")[:2] @@ -34,6 +27,39 @@ del major, minor +def _get_model_name(model_name, load_in_4bit = True): + + if not SUPPORTS_FOURBIT and model_name in INT_TO_FLOAT_MAPPER: + model_name = INT_TO_FLOAT_MAPPER[model_name] + logger.warning_once( + f"Unsloth: Your transformers version of {transformers_version} does not support native "\ + f"4bit loading.\nThe minimum required version is 4.37.\n"\ + f'Try `pip install "git+https://github.com/huggingface/transformers.git"`\n'\ + f"to obtain the latest transformers build, then restart this session.\n"\ + f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." + ) + + elif not load_in_4bit and model_name in INT_TO_FLOAT_MAPPER: + new_model_name = INT_TO_FLOAT_MAPPER[model_name] + logger.warning_once( + f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ + f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." + ) + model_name = new_model_name + + elif load_in_4bit and SUPPORTS_FOURBIT and model_name in FLOAT_TO_INT_MAPPER: + new_model_name = FLOAT_TO_INT_MAPPER[model_name] + logger.warning_once( + f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ + f"We shall load `{new_model_name}` for 4x faster loading." + ) + model_name = new_model_name + pass + + return model_name +pass + + class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( @@ -47,25 +73,27 @@ def from_pretrained( fix_tokenizer = True, *args, **kwargs, ): - if not SUPPORTS_FOURBIT and model_name in FOURBIT_MAPPER: - model_name = FOURBIT_MAPPER[model_name] - logger.warning_once( - f"Unsloth: Your transformers version of {transformers_version} does not support native "\ - f"4bit loading.\nThe minimum required version is 4.37.\n"\ - f'Try `pip install "git+https://github.com/huggingface/transformers.git"`\n'\ - f"to obtain the latest transformers build, then restart this session.\n"\ - f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." - ) - elif not load_in_4bit and model_name in FOURBIT_MAPPER: - new_model_name = FOURBIT_MAPPER[model_name] - logger.warning_once( - f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ - f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." - ) - model_name = new_model_name + old_model_name = model_name + model_name = _get_model_name(model_name, load_in_4bit) + + # First check if it's a normal model via AutoConfig + is_peft = False + try: + model_config = AutoConfig.from_pretrained(model_name, token = token) + is_peft = False + except: + try: + # Most likely a PEFT model + peft_config = PeftConfig.from_pretrained(model_name, token = token) + except: + raise RuntimeError(f"Unsloth: `{model_name}` is not a full model or a PEFT model.") + + # Check base model again for PEFT + model_name = _get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_config = AutoConfig.from_pretrained(model_name, token = token) + is_peft = True pass - model_config = AutoConfig.from_pretrained(model_name) model_type = model_config.model_type if model_type == "llama": dispatch_model = FastLlamaModel @@ -75,8 +103,9 @@ def from_pretrained( f"Unsloth: {model_name} not supported yet!\n"\ "Make an issue to https://github.com/unslothai/unsloth!", ) + pass - return dispatch_model.from_pretrained( + model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, dtype = dtype, @@ -87,5 +116,30 @@ def from_pretrained( fix_tokenizer = fix_tokenizer, *args, **kwargs, ) + + if load_in_4bit: + # Fix up bitsandbytes config + quantization_config = \ + { + # Sometimes torch_dtype is not a string!! + "bnb_4bit_compute_dtype" : model.config.to_dict()["torch_dtype"], + "bnb_4bit_quant_type" : "nf4", + "bnb_4bit_use_double_quant" : True, + "llm_int8_enable_fp32_cpu_offload" : False, + "llm_int8_has_fp16_weight" : False, + "llm_int8_skip_modules" : "null", + "llm_int8_threshold" : 6.0, + "load_in_4bit" : True, + "load_in_8bit" : False, + "quant_method" : "bitsandbytes", + } + model.config.update({"quantization_config" : quantization_config}) + pass + + if is_peft: + # Now add PEFT adapters + model = PeftModel.from_pretrained(model, old_model_name) + pass + return model, tokenizer pass pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py new file mode 100644 index 0000000000..124eaf7b27 --- /dev/null +++ b/unsloth/models/mapper.py @@ -0,0 +1,56 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "INT_TO_FLOAT_MAPPER", + "FLOAT_TO_INT_MAPPER", +] + +__INT_TO_FLOAT_MAPPER = \ +{ + "unsloth/mistral-7b-bnb-4bit" : ( + "unsloth/mistral-7b", + "mistralai/Mistral-7B-v0.1", + ), + "unsloth/llama-2-7b-bnb-4bit" : ( + "unsloth/llama-2-7b", + "meta-llama/Llama-2-7b-hf", + ), + "unsloth/llama-2-13b-bnb-4bit" : ( + "unsloth/llama-13-7b", + "meta-llama/Llama-2-13b-hf", + ), + "unsloth/codellama-34b-bnb-4bit" : ( + "codellama/CodeLlama-34b-hf", + ), + "unsloth/zephyr-sft-bnb-4bit" : ( + "unsloth/zephyr-sft", + "alignment-handbook/zephyr-7b-sft-full", + ), + "unsloth/tinyllama-bnb-4bit" : ( + "unsloth/tinyllama", + "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", + ), +} + +INT_TO_FLOAT_MAPPER = {} +FLOAT_TO_INT_MAPPER = {} + +for key, values in __INT_TO_FLOAT_MAPPER.items(): + INT_TO_FLOAT_MAPPER[key] = values[0] + + for value in values: + FLOAT_TO_INT_MAPPER[value] = key + pass +pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 01b9dae1af..17da5cc75a 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -343,6 +343,7 @@ def from_pretrained( token = token, ) pass + patch_saving_functions(tokenizer) # Fix up config for transformers uploading PEFT name = model.config._name_or_path @@ -350,6 +351,7 @@ def from_pretrained( name = name[:len(name) - len("-bnb-4bit")] model.config.update({"_name_or_path" : name}) pass + # Log Unsloth version for future fastpaths for inference model.config.update({"unsloth_version" : __version__}) diff --git a/unsloth/save.py b/unsloth/save.py index f94a1b8566..29175a13a0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -12,24 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -from peft import PeftModelForCausalLM -from collections import OrderedDict -import bitsandbytes as bnb -import peft -import gc -import os -from tqdm import tqdm as ProgressBar -import shutil -from typing import Optional, Callable, Union +from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit +from peft.tuners.lora import Linear4bit as Peft_Linear4bit +from typing import Optional, Callable, Union, List import torch +import os +import pickle +import gc from transformers.models.llama.modeling_llama import logger from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters +import subprocess +import psutil __all__ = [ + "print_quantization_methods", "unsloth_save_model", - #"colab_quantize_to_gguf", + "save_to_gguf", + "patch_saving_functions", ] + LLAMA_WEIGHTS = ( "self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", "self_attn.o_proj", "mlp.gate_proj", "mlp.up_proj", "mlp.down_proj", @@ -41,25 +43,36 @@ # From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html ALLOWED_QUANTS = \ { - "q2_k" : "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.", - "q3_k_l" : "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", - "q3_k_m" : "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", - "q3_k_s" : "Uses Q3_K for all tensors", - "q4_0" : "Original quant method, 4-bit.", - "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", - "q4_k_m" : "Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", - "q4_k_s" : "Uses Q4_K for all tensors", - "q5_0" : "Higher accuracy, higher resource usage and slower inference.", - "q5_1" : "Even higher accuracy, resource usage and slower inference.", - "q5_k_m" : "Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", - "q5_k_s" : "Uses Q5_K for all tensors", - "q6_k" : "Uses Q8_K for all tensors", - "q8_0" : "Almost indistinguishable from float16. High resource use and slow. Not recommended for most users.", + "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", + "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", + "quantized" : "Recommended. Slow conversion. Fast inference, small files.", + "f32" : "Not recommended. Retains 100% accuracy, but super slow and memory hungry.", + "f16" : "Fastest conversion + retains 100% accuracy. Slow and memory hungry.", + "q8_0" : "Fast conversion. High resource use, but generally acceptable.", + "q4_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", + "q5_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", + "q2_k" : "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.", + "q3_k_l" : "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_m" : "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_s" : "Uses Q3_K for all tensors", + "q4_0" : "Original quant method, 4-bit.", + "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", + "q4_k_s" : "Uses Q4_K for all tensors", + "q5_0" : "Higher accuracy, higher resource usage and slower inference.", + "q5_1" : "Even higher accuracy, resource usage and slower inference.", + "q5_k_s" : "Uses Q5_K for all tensors", + "q6_k" : "Uses Q8_K for all tensors", } +def print_quantization_methods(): + for key, value in ALLOWED_QUANTS.items(): + print(f'"{key}" ==> {value}') + pass +pass + def _merge_lora(layer, name): - if isinstance(layer, (bnb.nn.Linear4bit, peft.tuners.lora.Linear4bit)): + if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit)): # Is LoRA so we need to merge! W, quant_state, A, B, s = get_lora_parameters(layer) dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] @@ -75,100 +88,362 @@ def _merge_lora(layer, name): pass +def fast_save_pickle(shard, name): + # Use this if # CPUs is <= 2 + print(f"Unsloth: Saving {name}...") + torch.save( + shard, + name, + pickle_module = pickle, + pickle_protocol = pickle.HIGHEST_PROTOCOL, + ) + return +pass + + @torch.inference_mode def unsloth_save_model( model, tokenizer, - save_directory: Union[str, os.PathLike], - is_main_process: bool = True, - state_dict: Optional[dict] = None, - save_function: Callable = torch.save, - push_to_hub: bool = False, - max_shard_size: Union[int, str] = "7GB", - safe_serialization: bool = True, - variant: Optional[str] = None, - token: Optional[Union[str, bool]] = None, - save_peft_format: bool = True, - temporary_location = "_unsloth_temporary_saved_buffers", - **kwargs, + save_directory : Union[str, os.PathLike], + save_method : str = "lora", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + + # Push to hub + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = None, + private : Optional[bool] = None, + create_pr : bool = False, + revision : str = None, + commit_description : str = None, + tags : List[str] = None, + + # Our functions + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.9, ): - logger.warning_once( - "Unsloth: `unsloth_save_model` is still in development mode.\n"\ - "If anything errors or breaks, please file a ticket on Github.\n"\ - "Also, if you used this successfully, please tell us on Discord!" - ) + save_pretrained_settings = dict(locals()) + for deletion in ("model", "tokenizer", "save_method", "temporary_location", "maximum_memory_usage"): + del save_pretrained_settings[deletion] + pass + import re + + assert(maximum_memory_usage > 0 and maximum_memory_usage <= 0.95) + + # Clean memory up first + for _ in range(3): + torch.cuda.empty_cache() + gc.collect() + pass + + save_method = save_method.lower().replace(" ", "_") + if save_method != "lora" and save_method != "merged_16bit" and save_method != "merged_4bit": + raise RuntimeError( + "Unsloth: You must select one of 3 options when saving models:\n"\ + '"lora" ==> This is the fastest and easiet. Just saves LoRA modules.\n'\ + '"merged_16bit" ==> This merges LoRA weights and saves to float16. Needed for llama.cpp / GGUF.\n'\ + '"merged_4bit" ==> This merges LoRA weights and saves to 4bit. Useful for DPO / inference.' + ) + pass + + if save_method == "merged_4bit": + print("Unsloth: Merging 4bit and LoRA weights to 4bit...") + print("This might take 5 minutes...") + model = model.merge_and_unload() + print("Done.") + pass + + if tags is not None: + assert(isinstance(tags, (list, tuple))) + tags = list(tags) + ["unsloth",] + else: + tags = ["unsloth",] + pass + save_pretrained_settings["tags"] = tags + + if (save_method == "lora") and push_to_hub: + if token is None: + raise RuntimeError( + "Unsloth: Pushing to HF requires a token. Pass `token = 'hf_....'`\n"\ + "Go to https://huggingface.co/settings/tokens." + ) + pass + + model.push_to_hub( + repo_id = save_directory, + use_temp_dir = use_temp_dir, + commit_message = commit_message, + private = private, + token = token, + max_shard_size = max_shard_size, + create_pr = create_pr, + safe_serialization = safe_serialization, + revision = revision, + commit_description = commit_description, + tags = tags, + ) + if tokenizer is not None: + tokenizer.push_to_hub( + repo_id = save_directory, + use_temp_dir = use_temp_dir, + commit_message = commit_message, + private = private, + token = token, + max_shard_size = max_shard_size, + create_pr = create_pr, + safe_serialization = safe_serialization, + revision = revision, + commit_description = commit_description, + tags = tags, + ) + pass + return save_directory + pass + + # If push_to_hub, we must remove the .../ part of a repo + if push_to_hub and "/" in save_directory: + + new_save_directory = save_directory[save_directory.find("/"):] + + logger.warning_once( + f"Unsloth: You are pushing to hub, but you passed your HF username.\n"\ + f"We shall truncate {save_directory} to {new_save_directory}" + ) + + save_pretrained_settings["save_directory"] = new_save_directory + save_directory = new_save_directory + pass + + if (save_method == "merged_4bit") or (save_method == "lora") or ( + not hasattr(model, "model") or \ + not hasattr(model.model, "model") or \ + not hasattr(model.model.model, "layers") + ): + # Do general saving + + # Edit save_pretrained_settings + # [TODO] _create_repo has errors due to **kwargs getting accepted + for deletion in \ + ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",): + del save_pretrained_settings[deletion] + pass + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth",]) + + if tokenizer is not None: + print("Unsloth: Saving tokenizer...", end = "") + tokenizer.save_pretrained(**save_pretrained_settings) + print(" Done.") + else: + print() + print("Unsloth: Saving model...", end = "") + if save_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") + + model.save_pretrained(**save_pretrained_settings) + print(" Done.") + return save_directory + pass + + print("Unsloth: Merging 4bit and LoRA weights to 16bit...") + + # Determine max RAM usage minus sharding + max_ram = psutil.virtual_memory().available + sharded_ram_usage = 5 * 1024 * 1024 * 1024 + if type(max_shard_size) is str: + gb_found = re.match("([0-9]{1,})[\s]{0,}GB", max_shard_size, flags = re.IGNORECASE) + mb_found = re.match("([0-9]{1,})[\s]{0,}MB", max_shard_size, flags = re.IGNORECASE) + if gb_found: sharded_ram_usage = int(gb_found.group(1)) * 1024 * 1024 * 1024 + elif mb_found: sharded_ram_usage = int(mb_found.group(1)) * 1024 * 1024 + elif type(max_shard_size) is int: + sharded_ram_usage = sharded_ram_usage + pass + + # Switch to our fast saving modules if it's a slow PC! + n_cpus = psutil.cpu_count(logical = False) + + if safe_serialization is None: + safe_serialization = True + save_pretrained_settings["safe_serialization"] = safe_serialization + + elif safe_serialization and (n_cpus <= 2): + logger.warning_once( + f"Unsloth: You have {n_cpus} CPUs. Using `safe_serialization` is 10x slower.\n"\ + f"We shall switch to Pytorch saving, which will take 3 minutes and not 30 minutes.\n"\ + f"To force `safe_serialization`, set it to None instead.", + ) + safe_serialization = False + save_function = fast_save_pickle + save_pretrained_settings["safe_serialization"] = safe_serialization + save_pretrained_settings["save_function"] = save_function + pass + + # Only safe_serialization uses more RAM + if safe_serialization: + max_ram -= sharded_ram_usage + else: + max_ram -= sharded_ram_usage*0.25 # Uses much less + pass + + max_ram = int(max(0, max_ram) * maximum_memory_usage) + print(f"Unsloth: Will use up to "\ + f"{round(max_ram/1024/1024/1024, 2)} out of "\ + f"{round(psutil.virtual_memory().total/1024/1024/1024, 2)} RAM for saving.") + + # Max directory for disk saving if not os.path.exists(temporary_location): os.makedirs(temporary_location) pass - assert(hasattr(model, "model")) - assert(hasattr(model.model, "model")) - assert(hasattr(model.model.model, "layers")) - # HF also uses a OrderedDict + from collections import OrderedDict state_dict = OrderedDict() - state_dict["model.embed_tokens.weight"] = model.model.model.embed_tokens.weight + state_dict["model.embed_tokens.weight"] = model.model.model.embed_tokens.weight.data - print("Unsloth: Merging 4bit and LoRA weights to 16bit...") + max_vram = int(torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage) + + from tqdm import tqdm as ProgressBar for j, layer in enumerate(ProgressBar(model.model.model.layers)): for item in LLAMA_WEIGHTS: proj = eval(f"layer.{item}") name = f"model.layers.{j}.{item}.weight" W = _merge_lora(proj, name) - filename = os.path.join(temporary_location, f"{name}.pt") - torch.save(W, filename) - state_dict[name] = torch.load(filename, map_location = "cpu", mmap = True) + + if (torch.cuda.memory_allocated() + W.nbytes) < max_vram: + # Save to GPU memory + state_dict[name] = W + # elif (max_ram - W.nbytes) > 0: + # # Save to CPU memory + # logger.warning_once(f"We will save to RAM and not VRAM now.") + # state_dict[name] = W.to("cpu", non_blocking = True) + # max_ram = max(max_ram - W.nbytes, 0) + else: + # Save to Disk + logger.warning_once(f"We will save to Disk and not RAM now.") + filename = os.path.join(temporary_location, f"{name}.pt") + torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) + state_dict[name] = torch.load(filename, map_location = "cpu", mmap = True) pass for item in LLAMA_LAYERNORMS: - state_dict[f"model.layers.{j}.{item}.weight"] = eval(f"layer.{item}.weight") + state_dict[f"model.layers.{j}.{item}.weight"] = eval(f"layer.{item}.weight.data") pass pass - state_dict["model.norm.weight"] = model.model.model.norm.weight - state_dict["lm_head.weight"] = model.model.lm_head.weight - - print("Unsloth: Saving tokenizer...") - tokenizer.save_pretrained( - save_directory = save_directory, - is_main_process = is_main_process, - state_dict = state_dict, - save_function = save_function, - push_to_hub = push_to_hub, - max_shard_size = max_shard_size, - safe_serialization = safe_serialization, - variant = variant, - token = token, - save_peft_format = save_peft_format, - ) + state_dict["model.norm.weight"] = model.model.model.norm.weight.data + state_dict["lm_head.weight"] = model.model.lm_head.weight.data - print("Unsloth: Saving model. This will take 5 minutes for Llama-7b...") - model.model.save_pretrained( - save_directory = save_directory, - is_main_process = is_main_process, - state_dict = state_dict, - save_function = save_function, - push_to_hub = push_to_hub, - max_shard_size = max_shard_size, - safe_serialization = safe_serialization, - variant = variant, - token = token, - save_peft_format = save_peft_format, - ) + # All tensors MUST be type torch.Tensor and not torch.nn.parameter.Parameter + for key, value in state_dict.items(): + if hasattr(value, "data"): state_dict[key] = value = value.data + if type(value) is not torch.Tensor: + logger.warning_once(f"Unsloth: {key} is not a Tensor but a {type(value)}.") + pass + pass + + # Edit save_pretrained_settings + # [TODO] _create_repo has errors due to **kwargs getting accepted + save_pretrained_settings["state_dict"] = state_dict + for deletion in \ + ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",): + del save_pretrained_settings[deletion] + pass + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth",]) + + if tokenizer is not None: + print("Unsloth: Saving tokenizer...", end = "") + tokenizer.save_pretrained(**save_pretrained_settings) + print(" Done.") + else: + print() + + print("Unsloth: Saving model... This might take 5 minutes for Llama-7b...") + model.model.save_pretrained(**save_pretrained_settings) + print("Done.") + + save_pretrained_settings["state_dict"] = None + + # for j, (key, value) in enumerate(state_dict.items()): + # state_dict[key] = None + # if j % 10 == 0: + # torch.cuda.empty_cache() + # gc.collect() + # pass + # pass + # state_dict = None + # del state_dict + # torch.cuda.empty_cache() + # gc.collect() # Remove temporary location + import shutil shutil.rmtree(temporary_location) + + # for _ in range(3): + # torch.cuda.empty_cache() + # gc.collect() + return save_directory pass -""" -def _colab_quantize_to_gguf(save_directory, quantization_method = "q4_k_m"): +def install_llama_cpp_clone_non_blocking(): + full_command = ["git", "clone", "https://github.com/ggerganov/llama.cpp"] + run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) + return run_installer +pass - logger.warning_once( - "Unsloth: `colab_quantize_to_gguf` is still in development mode.\n"\ - "If anything errors or breaks, please file a ticket on Github.\n"\ - "Also, if you used this successfully, please tell us on Discord!" - ) + +def install_llama_cpp_make_non_blocking(): + env = { **os.environ, "LLAMA_CUBLAS": "1", } + n_jobs = max(int(psutil.cpu_count()*1.5), 1) + full_command = ["make", "-j", str(n_jobs), "-C", "llama.cpp"] + run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) + return run_installer +pass + + +def install_python_non_blocking(packages = []): + full_command = ["pip", "install"] + packages + run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) + return run_installer +pass + + +def install_llama_cpp_blocking(): + commands = [ + "git clone https://github.com/ggerganov/llama.cpp", + f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j {psutil.cpu_count()*2}", + "pip install gguf protobuf", + ] + if os.path.exists("llama.cpp"): return + for command in commands: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + for line in sp.stdout: + print(line.decode("utf-8"), flush = True, end = "") + pass + pass +pass + + +def save_to_gguf( + model_directory : str = "unsloth_finetuned_model", + quantization_method : str = "fast_quantized", + _run_installer = None, # Non blocking install of llama.cpp +): + from transformers.models.llama.modeling_llama import logger + + if quantization_method == "not_quantized": quantization_method = "f16" + elif quantization_method == "fast_quantized": quantization_method = "q8_0" + elif quantization_method == "quantized": quantization_method = "q4_k_m" + elif quantization_method is None: quantization_method = "q8_0" if quantization_method not in ALLOWED_QUANTS.keys(): error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" @@ -181,27 +456,409 @@ def _colab_quantize_to_gguf(save_directory, quantization_method = "q4_k_m"): f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to q4_k_m will take 20 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 20 minutes.\n"\ f' "-____-" In total, you will have to wait around 26 minutes.\n' print(print_info) - if not os.path.exists("llama.cpp"): - print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") - !git clone https://github.com/ggerganov/llama.cpp - !cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j - !pip install gguf protobuf + print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") + if _run_installer is not None: + _run_installer.wait() + else: + install_llama_cpp_blocking() + pass + + print("Unsloth: [1] Converting HF into GGUF format. This will take 3 minutes...") + first_conversion = "f16" + if quantization_method == "f32": first_conversion = "f32" + elif quantization_method == "f16": first_conversion = "f16" + elif quantization_method == "q8_0": first_conversion = "q8_0" + + n_cpus = psutil.cpu_count()*2 + # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model + + final_location = f"./{model_directory}-unsloth.{first_conversion.upper()}.gguf" + + command = f"python llama.cpp/convert.py {model_directory} "\ + f"--outfile {final_location} "\ + f"--outtype {first_conversion} --concurrency {n_cpus}" + + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + for line in sp.stdout: + print(line.decode("utf-8"), flush = True, end = "") + pass + + print(f"Unsloth: Conversion completed! Output location: {final_location}") + + if quantization_method != first_conversion: + old_location = final_location + print(f"Unsloth: [2] Converting GGUF 16bit into {quantization_method}. This will take 20 minutes...") + final_location = f"./{model_directory}-unsloth.{quantization_method.upper()}.gguf" + + command = f"./llama.cpp/quantize {old_location} "\ + f"{final_location} {quantization_method} {n_cpus}" + + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + for line in sp.stdout: + print(line.decode("utf-8"), flush = True, end = "") + pass + print(f"Unsloth: Conversion completed! Output location: {final_location}") + pass + + return final_location +pass + + +def unsloth_save_pretrained_merged( + self, + save_directory : Union[str, os.PathLike], + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + tags : List[str] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.85, +): + """ + Same as .save_pretrained(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. + + Choose for `save_method` to be either: + 1. `merged_16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `merged_4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + arguments = dict(locals()) + arguments["model"] = self + arguments["tokenizer"] = None + del arguments["self"] + unsloth_save_model(**arguments) + for _ in range(3): + gc.collect() +pass + + +def unsloth_push_to_hub_merged( + self, + repo_id : str, + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = None, + private : Optional[bool] = None, + token : Union[bool, str, None] = None, + max_shard_size : Union[int, str, None] = "5GB", + create_pr : bool = False, + safe_serialization : bool = True, + revision : str = None, + commit_description : str = None, + tags : Optional[List[str]] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.85, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. + + Choose for `save_method` to be either: + 1. `merged_16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `merged_4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + arguments = dict(locals()) + arguments["model"] = self + arguments["tokenizer"] = None + arguments["save_directory"] = repo_id + arguments["push_to_hub"] = True + del arguments["self"] + del arguments["repo_id"] + unsloth_save_model(**arguments) + for _ in range(3): + gc.collect() +pass + + +def unsloth_save_pretrained_gguf( + self, + save_directory : Union[str, os.PathLike], + tokenizer = None, + quantization_method : str = "fast_quantized", + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + tags : List[str] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.85, +): + """ + Same as .save_pretrained(...) except 4bit weights are auto + converted to float16 then converted to GGUF / llama.cpp format. + + Choose for `quantization_method` to be: + "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", + "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", + "quantized" : "Recommended. Slow conversion. Fast inference, small files.", + "f32" : "Not recommended. Retains 100% accuracy, but super slow and memory hungry.", + "f16" : "Fastest conversion + retains 100% accuracy. Slow and memory hungry.", + "q8_0" : "Fast conversion. High resource use, but generally acceptable.", + "q4_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", + "q5_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", + "q2_k" : "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.", + "q3_k_l" : "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_m" : "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_s" : "Uses Q3_K for all tensors", + "q4_0" : "Original quant method, 4-bit.", + "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", + "q4_k_s" : "Uses Q4_K for all tensors", + "q5_0" : "Higher accuracy, higher resource usage and slower inference.", + "q5_1" : "Even higher accuracy, resource usage and slower inference.", + "q5_k_s" : "Uses Q5_K for all tensors", + "q6_k" : "Uses Q8_K for all tensors", + """ + if tokenizer is None: + raise ValueError("Unsloth: Saving to GGUF must have a tokenizer.") + + arguments = dict(locals()) + arguments["model"] = self + arguments["tokenizer"] = tokenizer + arguments["push_to_hub"] = False # We save ourselves + arguments["save_method"] = "merged_16bit" # Must be 16bit + del arguments["self"] + del arguments["quantization_method"] + + # Non blocking install GGUF first + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) + python_install.wait() + + for _ in range(3): + gc.collect() + + file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + + # And save to HF + if push_to_hub: + print("Unsloth: Uploading GGUF to Huggingface Hub...") + + from huggingface_hub import create_repo + create_repo( + repo_id = save_directory, + token = token, + repo_type = "model", + exist_ok = True, + ) + + from huggingface_hub import HfApi + hf_api = HfApi(token = token) + + if "/" in file_location: + uploaded_location = file_location[file_location.rfind("/")+1:] + else: + uploaded_location = file_location + pass + + hf_api.upload_file( + path_or_fileobj = file_location, + path_in_repo = uploaded_location, + repo_id = save_directory, + repo_type = "model", + ) pass +pass + + +def unsloth_push_to_hub_gguf( + self, + repo_id : str, + tokenizer = None, + quantization_method : str = "fast_quantized", + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = None, + private : Optional[bool] = None, + token : Union[bool, str, None] = None, + max_shard_size : Union[int, str, None] = "5GB", + create_pr : bool = False, + safe_serialization : bool = True, + revision : str = None, + commit_description : str = None, + tags : Optional[List[str]] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.85, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 then converted to GGUF / llama.cpp format. + + Choose for `quantization_method` to be: + "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", + "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", + "quantized" : "Recommended. Slow conversion. Fast inference, small files.", + "f32" : "Not recommended. Retains 100% accuracy, but super slow and memory hungry.", + "f16" : "Fastest conversion + retains 100% accuracy. Slow and memory hungry.", + "q8_0" : "Fast conversion. High resource use, but generally acceptable.", + "q4_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", + "q5_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", + "q2_k" : "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.", + "q3_k_l" : "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_m" : "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_s" : "Uses Q3_K for all tensors", + "q4_0" : "Original quant method, 4-bit.", + "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", + "q4_k_s" : "Uses Q4_K for all tensors", + "q5_0" : "Higher accuracy, higher resource usage and slower inference.", + "q5_1" : "Even higher accuracy, resource usage and slower inference.", + "q5_k_s" : "Uses Q5_K for all tensors", + "q6_k" : "Uses Q8_K for all tensors", + """ + if tokenizer is None: + raise ValueError("Unsloth: Saving to GGUF must have a tokenizer.") - print("Unsloth: [1] Converting HF into GGUF 16bit. This will take 3 minutes...") - !python llama.cpp/convert.py {save_directory} \ - --outfile {save_directory}-unsloth.gguf \ - --outtype f16 + arguments = dict(locals()) + arguments["model"] = self + arguments["tokenizer"] = tokenizer + arguments["save_directory"] = repo_id + arguments["push_to_hub"] = False # We save ourselves + arguments["save_method"] = "merged_16bit" # Must be 16bit + del arguments["self"] + del arguments["repo_id"] + del arguments["quantization_method"] - print("Unsloth: [2] Converting GGUF 16bit into q4_k_m. This will take 20 minutes...") - final_location = f"./{save_directory}-{quantization_method}-unsloth.gguf" - !./llama.cpp/quantize ./{save_directory}-unsloth.gguf \ - {final_location} {quantization_method} + # Non blocking install GGUF first + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) - print(f"Unsloth: Output location: {final_location}") + for _ in range(3): + gc.collect() + + python_install.wait() + file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + + # Save to hub + print("Unsloth: Uploading GGUF to Huggingface Hub...") + + from huggingface_hub import create_repo + create_repo( + repo_id = save_directory, + private = private, + token = token, + repo_type = "model", + exist_ok = True, + ) + + from huggingface_hub import HfApi + hf_api = HfApi(token = token) + + if "/" in file_location: + uploaded_location = file_location[file_location.rfind("/")+1:] + else: + uploaded_location = file_location + pass + + hf_api.upload_file( + path_or_fileobj = file_location, + path_in_repo = uploaded_location, + repo_id = save_directory, + repo_type = "model", + ) +pass + + +def patch_saving_functions(model): + import inspect + import re + import types + from typing import Callable, Optional, Union, List + + if hasattr(model, "_original_push_to_hub"): return + + original_push_to_hub = model.push_to_hub + signature = str(inspect.signature(original_push_to_hub)).replace("NoneType", "None") + signature = signature[1:] + signature = re.sub("", "torch.save", signature) + docs = original_push_to_hub.__doc__.encode("utf-8").decode("utf-8") + model._original_push_to_hub = original_push_to_hub + + push_to_hub_text = f'''def unsloth_push_to_hub(self, {signature}: + """ + {docs} + """ + arguments = dict(locals()) + del arguments["self"] + if "tags" in arguments and arguments["tags"] is not None: + assert(isinstance(arguments["tags"], (list, tuple))) + arguments["tags"] = list(arguments["tags"]) + ["unsloth",] + elif "tags" in arguments: + arguments["tags"] = ["unsloth",] + elif hasattr(self, "add_model_tags"): + self.add_model_tags(["unsloth",]) + try: + return self._original_push_to_hub(**arguments) + except: + del arguments["tags"] + return self._original_push_to_hub(**arguments) + pass + ''' + exec(push_to_hub_text, globals()) + model.push_to_hub = types.MethodType(unsloth_push_to_hub, model) + + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth",]) + + if hasattr(model, "config"): + # Counteract tokenizers + model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) + model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) + else: + model.push_to_hub_merged = model.push_to_hub + model.save_pretrained_merged = model.save_pretrained + model.push_to_hub_gguf = model.push_to_hub + model.save_pretrained_gguf = model.save_pretrained + pass + + original_model = model + while hasattr(original_model, "model"): + original_model = original_model.model + if hasattr(original_model, "_original_push_to_hub"): continue + + original_model._original_push_to_hub = original_model.push_to_hub + original_model.push_to_hub = types.MethodType(unsloth_push_to_hub, original_model) + + if hasattr(original_model, "add_model_tags"): + original_model.add_model_tags(["unsloth",]) + + if hasattr(original_model, "config"): + # Counteract tokenizers + original_model.push_to_hub_merged = \ + types.MethodType(unsloth_push_to_hub_merged, original_model) + + original_model.save_pretrained_merged = \ + types.MethodType(unsloth_save_pretrained_merged, original_model) + + original_model.push_to_hub_gguf = \ + types.MethodType(unsloth_push_to_hub_gguf, original_model) + + original_model.save_pretrained_gguf = \ + types.MethodType(unsloth_save_pretrained_gguf, original_model) + pass + pass + return pass -""" From fc25ab0df032f8ee5ea750f27c68d63f49d2d9a9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jan 2024 22:52:30 +1100 Subject: [PATCH 0112/1088] Quick fixes (#101) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py --- unsloth/models/dpo.py | 38 +++++++++++++ unsloth/models/llama.py | 77 +++++++++++++------------ unsloth/models/loader.py | 28 ++++----- unsloth/models/mistral.py | 16 +++--- unsloth/save.py | 116 +++++++++++++++++++++----------------- 5 files changed, 167 insertions(+), 108 deletions(-) diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index e7724c2d0a..519914d904 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -65,8 +65,46 @@ def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwa pass +def NotebookTrainingTracker_write_line(self, values): + """ + Write the values in the inner table. + + Args: + values (`Dict[str, float]`): The values to display. + """ + if self.inner_table is None: + self.inner_table = [list(values.keys()), list(values.values())] + else: + columns = self.inner_table[0] + print(columns) + for key in values.keys(): + if key not in columns: + columns.append(key) + self.inner_table[0] = columns + if len(self.inner_table) > 1: + last_values = self.inner_table[-1] + first_column = self.inner_table[0][0] + if last_values[0] != values[first_column]: + # write new line + self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) + else: + # update last line + new_values = values + for c in columns: + if c not in new_values.keys(): + new_values[c] = last_values[columns.index(c)] + self.inner_table[-1] = [new_values[c] for c in columns] + else: + # Edit for evaluation purposes + self.inner_table.append([values[c] if c in values else 0 for c in columns]) + pass + pass +pass + + def PatchDPOTrainer(): # Patch DPO notebook printing + # NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line from transformers.trainer import DEFAULT_PROGRESS_CALLBACK DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 309625d962..38b8f55d11 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -161,11 +161,12 @@ def fast_mlp_inference(self, X): def fast_rms_layernorm_inference(self, X): + old_dtype = X.dtype X = X.to(torch.float32) variance = X.square().mean(-1, keepdim = True) variance += self.variance_epsilon X *= variance.rsqrt_() - X = X.to(residual.dtype) + X = X.to(old_dtype) X *= self.weight return X pass @@ -660,14 +661,15 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "unsloth/llama-2-7b-bnb-4bit", + model_name = "unsloth/llama-2-7b-bnb-4bit", max_seq_length = 4096, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, - fix_tokenizer = True, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + fix_tokenizer = True, + **kwargs, ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) @@ -720,18 +722,19 @@ def from_pretrained( max_position_embeddings = max(max_seq_length, model_max_seq_length) model = AutoModelForCausalLM.from_pretrained( model_name, - device_map = device_map, - torch_dtype = dtype, - quantization_config = bnb_config, - token = token, - rope_scaling = rope_scaling, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + rope_scaling = rope_scaling, max_position_embeddings = max_position_embeddings, + **kwargs, ) tokenizer = AutoTokenizer.from_pretrained( model_name, model_max_length = max_seq_length, - padding_side = "right", - token = token, + padding_side = "right", + token = token, ) model, tokenizer = patch_tokenizer(model, tokenizer) @@ -755,12 +758,12 @@ def from_pretrained( # We check the tokenizer first for errors if fix_tokenizer: tokenizer = check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, + model = model, + tokenizer = tokenizer, + model_name = model_name, model_max_length = max_seq_length, - padding_side = "right", - token = token, + padding_side = "right", + token = token, ) pass patch_saving_functions(tokenizer) @@ -828,20 +831,20 @@ def post_patch(model): @staticmethod def get_peft_model( model, - r = 16, - target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", - "gate_proj", "up_proj", "down_proj"], - lora_alpha = 16, - lora_dropout = 0, - bias = "none", + r = 16, + target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj"], + lora_alpha = 16, + lora_dropout = 0, + bias = "none", layers_to_transform = None, - layers_pattern = None, + layers_pattern = None, use_gradient_checkpointing = True, - random_state = 3407, - max_seq_length = 2048, # not used anymore - use_rslora = False, - init_lora_weights = True, - loftq_config = None, + random_state = 3407, + max_seq_length = 2048, # not used anymore + use_rslora = False, + init_lora_weights = True, + loftq_config = None, **kwargs, ): if isinstance(model, PeftModelForCausalLM): @@ -909,12 +912,14 @@ def get_peft_model( assert(type(use_rslora) is bool) if use_rslora: if not SUPPORTS_RSLORA: + # We do it ourselves! + new_alpha = lora_alpha / (r**0.5) import peft - raise RuntimeError( - f"Unsloth: Your PEFT version of {peft.__version__} does not support use_rslora.\n"\ - "Please install PEFT 0.7.2 or higher.\n"\ - "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" + logger.warning_once( + f"Unsloth: Your PEFT version of {peft.__version__} (0.7.2 needed) does not support `use_rslora` natively.\n"\ + f"But, we do it ourselves by setting `alpha = {new_alpha}.`" ) + lora_alpha = new_alpha pass pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index b4ad3aad14..d812d3625e 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -63,14 +63,14 @@ def _get_model_name(model_name, load_in_4bit = True): class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( - model_name = "unsloth/mistral-7b-bnb-4bit", + model_name = "unsloth/mistral-7b-bnb-4bit", max_seq_length = 4096, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, - fix_tokenizer = True, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + fix_tokenizer = True, *args, **kwargs, ): old_model_name = model_name @@ -106,14 +106,14 @@ def from_pretrained( pass model, tokenizer = dispatch_model.from_pretrained( - model_name = model_name, + model_name = model_name, max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - token = token, - device_map = device_map, - rope_scaling = rope_scaling, - fix_tokenizer = fix_tokenizer, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, *args, **kwargs, ) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 17da5cc75a..eb9d10513f 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -256,14 +256,15 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "unsloth/mistral-7b-bnb-4bit", + model_name = "unsloth/mistral-7b-bnb-4bit", max_seq_length = 4096, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, # Mistral does not support RoPE scaling - fix_tokenizer = True, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, # Mistral does not support RoPE scaling + fix_tokenizer = True, + **kwargs, ): if rope_scaling is not None: logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") @@ -305,6 +306,7 @@ def from_pretrained( quantization_config = bnb_config, token = token, # rope_scaling = rope_scaling, + **kwargs, ) tokenizer = AutoTokenizer.from_pretrained( model_name, diff --git a/unsloth/save.py b/unsloth/save.py index 29175a13a0..a95fac1bcf 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -94,7 +94,7 @@ def fast_save_pickle(shard, name): torch.save( shard, name, - pickle_module = pickle, + pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL, ) return @@ -106,7 +106,7 @@ def unsloth_save_model( model, tokenizer, save_directory : Union[str, os.PathLike], - save_method : str = "lora", # ["lora", "merged_16bit", "merged_4bit"] + merge_method : str = "lora", # ["lora", "16bit", "4bit"] push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -131,7 +131,7 @@ def unsloth_save_model( maximum_memory_usage : float = 0.9, ): save_pretrained_settings = dict(locals()) - for deletion in ("model", "tokenizer", "save_method", "temporary_location", "maximum_memory_usage"): + for deletion in ("model", "tokenizer", "merge_method", "temporary_location", "maximum_memory_usage"): del save_pretrained_settings[deletion] pass import re @@ -144,8 +144,8 @@ def unsloth_save_model( gc.collect() pass - save_method = save_method.lower().replace(" ", "_") - if save_method != "lora" and save_method != "merged_16bit" and save_method != "merged_4bit": + merge_method = merge_method.lower().replace(" ", "_") + if merge_method != "lora" and merge_method != "16bit" and merge_method != "4bit": raise RuntimeError( "Unsloth: You must select one of 3 options when saving models:\n"\ '"lora" ==> This is the fastest and easiet. Just saves LoRA modules.\n'\ @@ -154,7 +154,7 @@ def unsloth_save_model( ) pass - if save_method == "merged_4bit": + if merge_method == "4bit": print("Unsloth: Merging 4bit and LoRA weights to 4bit...") print("This might take 5 minutes...") model = model.merge_and_unload() @@ -169,7 +169,7 @@ def unsloth_save_model( pass save_pretrained_settings["tags"] = tags - if (save_method == "lora") and push_to_hub: + if (merge_method == "lora") and push_to_hub: if token is None: raise RuntimeError( "Unsloth: Pushing to HF requires a token. Pass `token = 'hf_....'`\n"\ @@ -222,7 +222,7 @@ def unsloth_save_model( save_directory = new_save_directory pass - if (save_method == "merged_4bit") or (save_method == "lora") or ( + if (merge_method == "4bit") or (merge_method == "lora") or ( not hasattr(model, "model") or \ not hasattr(model.model, "model") or \ not hasattr(model.model.model, "layers") @@ -246,7 +246,7 @@ def unsloth_save_model( print() print("Unsloth: Saving model...", end = "") - if save_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") + if merge_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") model.save_pretrained(**save_pretrained_settings) print(" Done.") @@ -434,19 +434,19 @@ def install_llama_cpp_blocking(): def save_to_gguf( - model_directory : str = "unsloth_finetuned_model", - quantization_method : str = "fast_quantized", + model_directory : str = "unsloth_finetuned_model", + quantization : str = "fast_quantized", _run_installer = None, # Non blocking install of llama.cpp ): from transformers.models.llama.modeling_llama import logger - if quantization_method == "not_quantized": quantization_method = "f16" - elif quantization_method == "fast_quantized": quantization_method = "q8_0" - elif quantization_method == "quantized": quantization_method = "q4_k_m" - elif quantization_method is None: quantization_method = "q8_0" + if quantization == "not_quantized": quantization = "f16" + elif quantization == "fast_quantized": quantization = "q8_0" + elif quantization == "quantized": quantization = "q4_k_m" + elif quantization is None: quantization = "q8_0" - if quantization_method not in ALLOWED_QUANTS.keys(): - error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" + if quantization not in ALLOWED_QUANTS.keys(): + error = f"Unsloth: Quant method = [{quantization}] not supported. Choose from below:\n" for key, value in ALLOWED_QUANTS.items(): error += f"[{key}] => {value}\n" raise RuntimeError(error) @@ -456,7 +456,7 @@ def save_to_gguf( f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 20 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to {quantization} will take 20 minutes.\n"\ f' "-____-" In total, you will have to wait around 26 minutes.\n' print(print_info) @@ -469,9 +469,9 @@ def save_to_gguf( print("Unsloth: [1] Converting HF into GGUF format. This will take 3 minutes...") first_conversion = "f16" - if quantization_method == "f32": first_conversion = "f32" - elif quantization_method == "f16": first_conversion = "f16" - elif quantization_method == "q8_0": first_conversion = "q8_0" + if quantization == "f32": first_conversion = "f32" + elif quantization == "f16": first_conversion = "f16" + elif quantization == "q8_0": first_conversion = "q8_0" n_cpus = psutil.cpu_count()*2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model @@ -489,13 +489,13 @@ def save_to_gguf( print(f"Unsloth: Conversion completed! Output location: {final_location}") - if quantization_method != first_conversion: + if quantization != first_conversion: old_location = final_location - print(f"Unsloth: [2] Converting GGUF 16bit into {quantization_method}. This will take 20 minutes...") - final_location = f"./{model_directory}-unsloth.{quantization_method.upper()}.gguf" + print(f"Unsloth: [2] Converting GGUF 16bit into {quantization}. This will take 20 minutes...") + final_location = f"./{model_directory}-unsloth.{quantization.upper()}.gguf" command = f"./llama.cpp/quantize {old_location} "\ - f"{final_location} {quantization_method} {n_cpus}" + f"{final_location} {quantization} {n_cpus}" with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: @@ -511,7 +511,8 @@ def save_to_gguf( def unsloth_save_pretrained_merged( self, save_directory : Union[str, os.PathLike], - save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + tokenizer = None, + merge_method : str = "16bit", # ["lora", "16bit", "4bit"] push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -529,14 +530,20 @@ def unsloth_save_pretrained_merged( Same as .save_pretrained(...) except 4bit weights are auto converted to float16 with as few overhead as possible. - Choose for `save_method` to be either: - 1. `merged_16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. - 2. `merged_4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. - 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + Choose for `merge_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.save_pretrained(...)`" + ) + pass + arguments = dict(locals()) - arguments["model"] = self - arguments["tokenizer"] = None + arguments["model"] = self del arguments["self"] unsloth_save_model(**arguments) for _ in range(3): @@ -547,7 +554,8 @@ def unsloth_save_pretrained_merged( def unsloth_push_to_hub_merged( self, repo_id : str, - save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + tokenizer = None, + merge_method : str = "16bit", # ["lora", "16bit", "4bit"] use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -565,14 +573,20 @@ def unsloth_push_to_hub_merged( Same as .push_to_hub(...) except 4bit weights are auto converted to float16 with as few overhead as possible. - Choose for `save_method` to be either: - 1. `merged_16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. - 2. `merged_4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. - 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + Choose for `merge_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.push_to_hub(...)`" + ) + pass + arguments = dict(locals()) arguments["model"] = self - arguments["tokenizer"] = None arguments["save_directory"] = repo_id arguments["push_to_hub"] = True del arguments["self"] @@ -587,7 +601,7 @@ def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], tokenizer = None, - quantization_method : str = "fast_quantized", + quantization : str = "fast_quantized", push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -605,7 +619,7 @@ def unsloth_save_pretrained_gguf( Same as .save_pretrained(...) except 4bit weights are auto converted to float16 then converted to GGUF / llama.cpp format. - Choose for `quantization_method` to be: + Choose for `quantization` to be: "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", "quantized" : "Recommended. Slow conversion. Fast inference, small files.", @@ -630,12 +644,12 @@ def unsloth_save_pretrained_gguf( raise ValueError("Unsloth: Saving to GGUF must have a tokenizer.") arguments = dict(locals()) - arguments["model"] = self - arguments["tokenizer"] = tokenizer - arguments["push_to_hub"] = False # We save ourselves - arguments["save_method"] = "merged_16bit" # Must be 16bit + arguments["model"] = self + arguments["tokenizer"] = tokenizer + arguments["push_to_hub"] = False # We save ourselves + arguments["merge_method"] = "16bit" # Must be 16bit del arguments["self"] - del arguments["quantization_method"] + del arguments["quantization"] # Non blocking install GGUF first git_clone = install_llama_cpp_clone_non_blocking() @@ -648,7 +662,7 @@ def unsloth_save_pretrained_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + file_location = save_to_gguf(new_save_directory, quantization, makefile) # And save to HF if push_to_hub: @@ -685,7 +699,7 @@ def unsloth_push_to_hub_gguf( self, repo_id : str, tokenizer = None, - quantization_method : str = "fast_quantized", + quantization : str = "fast_quantized", use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -703,7 +717,7 @@ def unsloth_push_to_hub_gguf( Same as .push_to_hub(...) except 4bit weights are auto converted to float16 then converted to GGUF / llama.cpp format. - Choose for `quantization_method` to be: + Choose for `quantization` to be: "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", "quantized" : "Recommended. Slow conversion. Fast inference, small files.", @@ -732,10 +746,10 @@ def unsloth_push_to_hub_gguf( arguments["tokenizer"] = tokenizer arguments["save_directory"] = repo_id arguments["push_to_hub"] = False # We save ourselves - arguments["save_method"] = "merged_16bit" # Must be 16bit + arguments["merge_method"] = "16bit" # Must be 16bit del arguments["self"] del arguments["repo_id"] - del arguments["quantization_method"] + del arguments["quantization"] # Non blocking install GGUF first git_clone = install_llama_cpp_clone_non_blocking() @@ -748,7 +762,7 @@ def unsloth_push_to_hub_gguf( gc.collect() python_install.wait() - file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + file_location = save_to_gguf(new_save_directory, quantization, makefile) # Save to hub print("Unsloth: Uploading GGUF to Huggingface Hub...") From 920e3c2ea07a044addeb7c3fa8be6f0189cb7f84 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jan 2024 22:57:22 +1100 Subject: [PATCH 0113/1088] getattr issues (#103) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr --- unsloth/models/llama.py | 2 +- unsloth/models/mistral.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 38b8f55d11..06b78d77c5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -460,7 +460,7 @@ def LlamaModel_fast_forward( (batch_size, seq_length), inputs_embeds, past_key_values_length, - sliding_window = getattr(self.config, "sliding_window"), + sliding_window = getattr(self.config, "sliding_window", None), ) pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index eb9d10513f..2c85129010 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -131,7 +131,7 @@ def MistralAttention_fast_forward( Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) - sw = getattr(self.config, "sliding_window") + sw = getattr(self.config, "sliding_window", None) sw = q_len if sw is None else sw window = (-1, -1) if (q_len <= sw) else (sw, sw) A = flash_attn_func(Q, K, V, causal = True, window_size = window) @@ -175,7 +175,7 @@ def MistralForCausalLM_fast_forward( if causal_mask is None: bsz, q_len = input_ids.shape - sliding_window = getattr(self.config, "sliding_window") + sliding_window = getattr(self.config, "sliding_window", None) if sliding_window is None or sliding_window <= 0: causal_mask = xformers.attn_bias.LowerTriangularMask() elif q_len <= sliding_window: From c1e7480ac2ad0e5efa05e84fe0997619ccdd86a4 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Jan 2024 23:15:20 +1100 Subject: [PATCH 0114/1088] Revert quantization methods --- unsloth/save.py | 52 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index a95fac1bcf..3f279df37a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -106,7 +106,7 @@ def unsloth_save_model( model, tokenizer, save_directory : Union[str, os.PathLike], - merge_method : str = "lora", # ["lora", "16bit", "4bit"] + save_method : str = "lora", # ["lora", "merged_16bit", "merged_4bit"] push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -131,7 +131,7 @@ def unsloth_save_model( maximum_memory_usage : float = 0.9, ): save_pretrained_settings = dict(locals()) - for deletion in ("model", "tokenizer", "merge_method", "temporary_location", "maximum_memory_usage"): + for deletion in ("model", "tokenizer", "save_method", "temporary_location", "maximum_memory_usage"): del save_pretrained_settings[deletion] pass import re @@ -144,8 +144,8 @@ def unsloth_save_model( gc.collect() pass - merge_method = merge_method.lower().replace(" ", "_") - if merge_method != "lora" and merge_method != "16bit" and merge_method != "4bit": + save_method = save_method.lower().replace(" ", "_") + if save_method != "lora" and save_method != "merged_16bit" and save_method != "merged_4bit": raise RuntimeError( "Unsloth: You must select one of 3 options when saving models:\n"\ '"lora" ==> This is the fastest and easiet. Just saves LoRA modules.\n'\ @@ -154,7 +154,7 @@ def unsloth_save_model( ) pass - if merge_method == "4bit": + if save_method == "merged_4bit": print("Unsloth: Merging 4bit and LoRA weights to 4bit...") print("This might take 5 minutes...") model = model.merge_and_unload() @@ -169,7 +169,7 @@ def unsloth_save_model( pass save_pretrained_settings["tags"] = tags - if (merge_method == "lora") and push_to_hub: + if (save_method == "lora") and push_to_hub: if token is None: raise RuntimeError( "Unsloth: Pushing to HF requires a token. Pass `token = 'hf_....'`\n"\ @@ -222,7 +222,7 @@ def unsloth_save_model( save_directory = new_save_directory pass - if (merge_method == "4bit") or (merge_method == "lora") or ( + if (save_method == "merged_4bit") or (save_method == "lora") or ( not hasattr(model, "model") or \ not hasattr(model.model, "model") or \ not hasattr(model.model.model, "layers") @@ -246,7 +246,7 @@ def unsloth_save_model( print() print("Unsloth: Saving model...", end = "") - if merge_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") + if save_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") model.save_pretrained(**save_pretrained_settings) print(" Done.") @@ -435,17 +435,17 @@ def install_llama_cpp_blocking(): def save_to_gguf( model_directory : str = "unsloth_finetuned_model", - quantization : str = "fast_quantized", + quantization_method : str = "fast_quantized", _run_installer = None, # Non blocking install of llama.cpp ): from transformers.models.llama.modeling_llama import logger - if quantization == "not_quantized": quantization = "f16" - elif quantization == "fast_quantized": quantization = "q8_0" - elif quantization == "quantized": quantization = "q4_k_m" - elif quantization is None: quantization = "q8_0" + if quantization_method == "not_quantized": quantization_method = "f16" + elif quantization_method == "fast_quantized": quantization_method = "q8_0" + elif quantization_method == "quantized": quantization_method = "q4_k_m" + elif quantization_method is None: quantization_method = "q8_0" - if quantization not in ALLOWED_QUANTS.keys(): + if quantization_method not in ALLOWED_QUANTS.keys(): error = f"Unsloth: Quant method = [{quantization}] not supported. Choose from below:\n" for key, value in ALLOWED_QUANTS.items(): error += f"[{key}] => {value}\n" @@ -469,9 +469,9 @@ def save_to_gguf( print("Unsloth: [1] Converting HF into GGUF format. This will take 3 minutes...") first_conversion = "f16" - if quantization == "f32": first_conversion = "f32" - elif quantization == "f16": first_conversion = "f16" - elif quantization == "q8_0": first_conversion = "q8_0" + if quantization_method == "f32": first_conversion = "f32" + elif quantization_method == "f16": first_conversion = "f16" + elif quantization_method == "q8_0": first_conversion = "q8_0" n_cpus = psutil.cpu_count()*2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model @@ -489,7 +489,7 @@ def save_to_gguf( print(f"Unsloth: Conversion completed! Output location: {final_location}") - if quantization != first_conversion: + if quantization_method != first_conversion: old_location = final_location print(f"Unsloth: [2] Converting GGUF 16bit into {quantization}. This will take 20 minutes...") final_location = f"./{model_directory}-unsloth.{quantization.upper()}.gguf" @@ -512,7 +512,7 @@ def unsloth_save_pretrained_merged( self, save_directory : Union[str, os.PathLike], tokenizer = None, - merge_method : str = "16bit", # ["lora", "16bit", "4bit"] + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -530,7 +530,7 @@ def unsloth_save_pretrained_merged( Same as .save_pretrained(...) except 4bit weights are auto converted to float16 with as few overhead as possible. - Choose for `merge_method` to be either: + Choose for `save_method` to be either: 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. @@ -555,7 +555,7 @@ def unsloth_push_to_hub_merged( self, repo_id : str, tokenizer = None, - merge_method : str = "16bit", # ["lora", "16bit", "4bit"] + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -573,7 +573,7 @@ def unsloth_push_to_hub_merged( Same as .push_to_hub(...) except 4bit weights are auto converted to float16 with as few overhead as possible. - Choose for `merge_method` to be either: + Choose for `save_method` to be either: 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. @@ -601,7 +601,7 @@ def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], tokenizer = None, - quantization : str = "fast_quantized", + quantization_method : str = "fast_quantized", push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -647,7 +647,7 @@ def unsloth_save_pretrained_gguf( arguments["model"] = self arguments["tokenizer"] = tokenizer arguments["push_to_hub"] = False # We save ourselves - arguments["merge_method"] = "16bit" # Must be 16bit + arguments["save_method"] = "merged_16bit" # Must be 16bit del arguments["self"] del arguments["quantization"] @@ -699,7 +699,7 @@ def unsloth_push_to_hub_gguf( self, repo_id : str, tokenizer = None, - quantization : str = "fast_quantized", + quantization_method : str = "fast_quantized", use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -746,7 +746,7 @@ def unsloth_push_to_hub_gguf( arguments["tokenizer"] = tokenizer arguments["save_directory"] = repo_id arguments["push_to_hub"] = False # We save ourselves - arguments["merge_method"] = "16bit" # Must be 16bit + arguments["save_method"] = "merged_16bit" # Must be 16bit del arguments["self"] del arguments["repo_id"] del arguments["quantization"] From 8846337e5c8c2f206a4ac8fe6d239f3d1221f7ac Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sat, 20 Jan 2024 02:30:31 +1100 Subject: [PATCH 0115/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 35b6de15ef..ee9647c7c6 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -21,6 +21,7 @@ from transformers import AutoTokenizer from platform import system as platform_system platform_system = platform_system() +import math __version__ = "2024.1" From 31e2d71720e64b854145d7779833b7d2d3d4177e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jan 2024 04:25:06 +1100 Subject: [PATCH 0116/1088] Quick fixes (#106) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF --- unsloth/models/_utils.py | 10 ++++++++++ unsloth/models/dpo.py | 28 +++++++++++++--------------- unsloth/models/llama.py | 11 +++++------ unsloth/save.py | 10 +++++----- 4 files changed, 33 insertions(+), 26 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ee9647c7c6..a773b13890 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -242,7 +242,17 @@ def LoraLayer_update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": + # We manually check for PEFT + if not hasattr(self, "loftq_init"): + import peft + raise RuntimeError( + f"Unsloth: Your PEFT version of {peft.__version__} does not support LoftQ init.\n"\ + "Please install PEFT 0.7.2 or higher.\n"\ + "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" + ) + pass self.loftq_init(adapter_name) + elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 519914d904..92fde81fd0 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -28,6 +28,7 @@ "logits/rejected", "logits/chosen", ] +set_DPOTrainer_metrics = frozenset(DPOTrainer_metrics) def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): @@ -47,16 +48,7 @@ def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwa if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: values = {"Training Loss": logs["loss"]} for metric in DPOTrainer_metrics: - if metric in logs: - values[metric.replace("/", " / ")] = logs[metric] - else: - # Maybe not a DPO Trainer anymore? Redo the tracker - column_names = [self.first_column] + ["Training Loss"] - if args.evaluation_strategy != IntervalStrategy.NO: - column_names.append("Validation Loss") - self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) - break - pass + values[metric.replace("/", " / ")] = logs[metric] pass # First column is necessarily Step since we're not in epoch eval strategy values["Step"] = state.global_step @@ -76,10 +68,16 @@ def NotebookTrainingTracker_write_line(self, values): self.inner_table = [list(values.keys()), list(values.values())] else: columns = self.inner_table[0] - print(columns) - for key in values.keys(): - if key not in columns: - columns.append(key) + new_values = {} + for key, value in values.items(): + lowered = key.lower() + if lowered in set_DPOTrainer_metrics: + new_values[lowered.replace("/", " / ")] = value + else: + new_values[key] = value + pass + values = new_values + self.inner_table[0] = columns if len(self.inner_table) > 1: last_values = self.inner_table[-1] @@ -104,7 +102,7 @@ def NotebookTrainingTracker_write_line(self, values): def PatchDPOTrainer(): # Patch DPO notebook printing - # NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line + NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line from transformers.trainer import DEFAULT_PROGRESS_CALLBACK DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 06b78d77c5..1df1a3e120 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -912,14 +912,13 @@ def get_peft_model( assert(type(use_rslora) is bool) if use_rslora: if not SUPPORTS_RSLORA: - # We do it ourselves! - new_alpha = lora_alpha / (r**0.5) + # We manually check for PEFT import peft - logger.warning_once( - f"Unsloth: Your PEFT version of {peft.__version__} (0.7.2 needed) does not support `use_rslora` natively.\n"\ - f"But, we do it ourselves by setting `alpha = {new_alpha}.`" + raise RuntimeError( + f"Unsloth: Your PEFT version of {peft.__version__} does not support `use_rslora`.\n"\ + "Please install PEFT 0.7.2 or higher.\n"\ + "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" ) - lora_alpha = new_alpha pass pass diff --git a/unsloth/save.py b/unsloth/save.py index 3f279df37a..543ffd80d0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -555,7 +555,7 @@ def unsloth_push_to_hub_merged( self, repo_id : str, tokenizer = None, - save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -601,7 +601,7 @@ def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], tokenizer = None, - quantization_method : str = "fast_quantized", + quantization_method : str = "fast_quantized", push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -649,7 +649,7 @@ def unsloth_save_pretrained_gguf( arguments["push_to_hub"] = False # We save ourselves arguments["save_method"] = "merged_16bit" # Must be 16bit del arguments["self"] - del arguments["quantization"] + del arguments["quantization_method"] # Non blocking install GGUF first git_clone = install_llama_cpp_clone_non_blocking() @@ -699,7 +699,7 @@ def unsloth_push_to_hub_gguf( self, repo_id : str, tokenizer = None, - quantization_method : str = "fast_quantized", + quantization_method : str = "fast_quantized", use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -749,7 +749,7 @@ def unsloth_push_to_hub_gguf( arguments["save_method"] = "merged_16bit" # Must be 16bit del arguments["self"] del arguments["repo_id"] - del arguments["quantization"] + del arguments["quantization_method"] # Non blocking install GGUF first git_clone = install_llama_cpp_clone_non_blocking() From abb462be71e8cf01ad989dca0efaa17441113651 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jan 2024 23:23:00 +1100 Subject: [PATCH 0117/1088] Hotfix for Jan 2024 Release (#110) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF * Fix quantization_method * Fix quantization_config * patch model * Update llama.py * Update llama.py * Update llama.py * Update save.py * Update save.py * tokenizer_save_settings * Update save.py * quantization and loftq * Update save.py * Update llama.py * Update save.py --- unsloth/models/_utils.py | 22 +++-- unsloth/models/llama.py | 47 ++++++++--- unsloth/models/loader.py | 3 + unsloth/save.py | 174 ++++++++++++++++++++++++--------------- 4 files changed, 158 insertions(+), 88 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a773b13890..8ddf4d60da 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -54,6 +54,11 @@ ] +IGNORED_TOKENIZER_CHECKING = frozenset(( + "CodeLlamaTokenizerFast", + "CodeLlamaTokenizer", +)) + def prepare_model_for_kbit_training( model : Any, use_gradient_checkpointing : bool = True, @@ -74,9 +79,13 @@ def prepare_model_for_kbit_training( future Pytorch versions. """ - # Freeze all parameters - for param in model.parameters(): - param.requires_grad_(False) + # Freeze all parameters except LoRA + for name, param in model.named_parameters(): + if ".lora_A." in name or ".lora_B." in name: + param.requires_grad_(True) + else: + param.requires_grad_(False) + pass if use_gradient_checkpointing: model.gradient_checkpointing_enable() @@ -115,11 +124,6 @@ def patch_tokenizer(model, tokenizer): pass -IGNORED_TOKENIZER_CHECKING = frozenset(( - "CodeLlamaTokenizerFast", - "CodeLlamaTokenizer", -)) - def check_tokenizer( model, tokenizer, @@ -252,7 +256,7 @@ def LoraLayer_update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init ) pass self.loftq_init(adapter_name) - + elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1df1a3e120..bc7aeb3e59 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -844,9 +844,11 @@ def get_peft_model( max_seq_length = 2048, # not used anymore use_rslora = False, init_lora_weights = True, - loftq_config = None, + loftq_config = {}, **kwargs, ): + transformers_set_seed(random_state) + if isinstance(model, PeftModelForCausalLM): raise TypeError( "Unsloth: Your model already has LoRA adapters. No need to run this again!" @@ -892,7 +894,7 @@ def get_peft_model( ) pass - if loftq_config is None: + if loftq_config == {}: from peft import LoftQConfig logger.warning_once( f"Unsloth: init_lora_weights = `loftq` is set, but `loftq_config` is None.\n"\ @@ -922,8 +924,6 @@ def get_peft_model( pass pass - transformers_set_seed(random_state) - accepted_modules = frozenset(("q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",),) model.config.update({"unsloth_version" : __version__}) @@ -949,22 +949,40 @@ def get_peft_model( if not SUPPORTS_RSLORA: del arguments["use_rslora"] lora_config = LoraConfig(**arguments) + model = _get_peft_model(model, lora_config) + + model = FastLlamaModel.patch_peft_model(model, use_gradient_checkpointing) + return model + pass + + + @staticmethod + def patch_peft_model( + model, + use_gradient_checkpointing = True, + ): + if not isinstance(model, PeftModelForCausalLM): + raise TypeError( + "Unsloth: Your model needs to call `.get_peft_model` first!" + ) + pass model = prepare_model_for_kbit_training( model, use_gradient_checkpointing = use_gradient_checkpointing, use_reentrant = True, ) - model = _get_peft_model(model, lora_config) # Fix up config for transformers uploading PEFT - name = model.peft_config["default"].base_model_name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.peft_config["default"].base_model_name_or_path = name + for active_adapter in model.peft_config.keys(): + name = model.peft_config[active_adapter].base_model_name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.peft_config[active_adapter].base_model_name_or_path = name + pass + # Add revision to enable future fast inference paths + model.peft_config[active_adapter].revision = f"unsloth" pass - # Add revision to enable future fast inference paths - model.peft_config["default"].revision = f"unsloth" # Do patching n_mlp = 0 @@ -972,6 +990,13 @@ def get_peft_model( n_o = 0 import types + active_adapter = model.active_adapters[0] if \ + hasattr(model, "active_adapters") else model.active_adapter + + # Get dropout and bias + lora_dropout = model.peft_config[active_adapter].lora_dropout + bias = model.peft_config[active_adapter].bias + if lora_dropout == 0 and bias == "none": for idx, layer in enumerate(model.model.model.layers): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d812d3625e..5a7f84404f 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -71,6 +71,7 @@ def from_pretrained( device_map = "sequential", rope_scaling = None, fix_tokenizer = True, + use_gradient_checkpointing = True, *args, **kwargs, ): old_model_name = model_name @@ -139,6 +140,8 @@ def from_pretrained( if is_peft: # Now add PEFT adapters model = PeftModel.from_pretrained(model, old_model_name) + # Patch it as well! + model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) pass return model, tokenizer pass diff --git a/unsloth/save.py b/unsloth/save.py index 543ffd80d0..5d3a9a85db 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -221,6 +221,17 @@ def unsloth_save_model( save_pretrained_settings["save_directory"] = new_save_directory save_directory = new_save_directory pass + + # Tokenizer has different saving arguments + tokenizer_save_settings = \ + { + "save_directory" : save_pretrained_settings["save_directory"], + "legacy_format" : None, + "filename_prefix" : None, + "push_to_hub" : save_pretrained_settings["push_to_hub"], + "private" : save_pretrained_settings["private"], + "token" : save_pretrained_settings["token"], + } if (save_method == "merged_4bit") or (save_method == "lora") or ( not hasattr(model, "model") or \ @@ -240,7 +251,7 @@ def unsloth_save_model( if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") - tokenizer.save_pretrained(**save_pretrained_settings) + tokenizer.save_pretrained(**tokenizer_save_settings) print(" Done.") else: print() @@ -360,13 +371,34 @@ def unsloth_save_model( if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") - tokenizer.save_pretrained(**save_pretrained_settings) + tokenizer.save_pretrained(**tokenizer_save_settings) print(" Done.") else: print() print("Unsloth: Saving model... This might take 5 minutes for Llama-7b...") + + # Since merged, edit quantization_config + old_config = model.config + new_config = model.config.to_dict() + if "quantization_config" in new_config: + del new_config["quantization_config"] + original_model = model + new_config = type(model.config).from_dict(new_config) + while hasattr(original_model, "model"): + original_model = original_model.model + original_model.config = new_config + model.config = new_config + + # Save! model.model.save_pretrained(**save_pretrained_settings) + + # Revert config back + original_model = model + while hasattr(original_model, "model"): + original_model = original_model.model + original_model.config = old_config + model.config = old_config print("Done.") save_pretrained_settings["state_dict"] = None @@ -446,7 +478,7 @@ def save_to_gguf( elif quantization_method is None: quantization_method = "q8_0" if quantization_method not in ALLOWED_QUANTS.keys(): - error = f"Unsloth: Quant method = [{quantization}] not supported. Choose from below:\n" + error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" for key, value in ALLOWED_QUANTS.items(): error += f"[{key}] => {value}\n" raise RuntimeError(error) @@ -456,7 +488,7 @@ def save_to_gguf( f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization} will take 20 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 20 minutes.\n"\ f' "-____-" In total, you will have to wait around 26 minutes.\n' print(print_info) @@ -491,11 +523,11 @@ def save_to_gguf( if quantization_method != first_conversion: old_location = final_location - print(f"Unsloth: [2] Converting GGUF 16bit into {quantization}. This will take 20 minutes...") - final_location = f"./{model_directory}-unsloth.{quantization.upper()}.gguf" + print(f"Unsloth: [2] Converting GGUF 16bit into {quantization_method}. This will take 20 minutes...") + final_location = f"./{model_directory}-unsloth.{quantization_method.upper()}.gguf" command = f"./llama.cpp/quantize {old_location} "\ - f"{final_location} {quantization} {n_cpus}" + f"{final_location} {quantization_method} {n_cpus}" with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: @@ -597,6 +629,65 @@ def unsloth_push_to_hub_merged( pass +def upload_gguf_to_huggingface(save_directory, file_location, token, model_type): + print("Unsloth: Uploading GGUF to Huggingface Hub...") + + # Check for username + if "/" not in save_directory: + from huggingface_hub import whoami + try: save_directory = f"{save_directory}/{whoami()['name']}" + except: pass + pass + + from huggingface_hub import create_repo + create_repo( + repo_id = save_directory, + token = token, + repo_type = "model", + exist_ok = True, + ) + + # Create model card + from huggingface_hub import ModelCard, ModelCardData + card_data = ModelCardData( + language = "en", + license = "apache-2.0", + library = "unsloth", + tags = ["gguf", "unsloth", "text-generation-inference", "transformers",], + ) + + content = f"\n"\ + f"---\n"\ + f"{ card_data.to_yaml() }\n"\ + f"---\n"\ + f"\n"\ + f"# My Model Card for {file_location}\n"\ + f"\n"\ + f"\nThis {model_type.title()} model was trained by [Unsloth](https://github.com/unslothai/unsloth) then saved to GGUF.\n"\ + f"\n" + + card = ModelCard(content) + card.push_to_hub(save_directory, token = token) + + # Now upload file + from huggingface_hub import HfApi + hf_api = HfApi(token = token) + + if "/" in file_location: + uploaded_location = file_location[file_location.rfind("/")+1:] + else: + uploaded_location = file_location + pass + + hf_api.upload_file( + path_or_fileobj = file_location, + path_in_repo = uploaded_location, + repo_id = save_directory, + repo_type = "model", + ) +pass + + def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], @@ -619,7 +710,7 @@ def unsloth_save_pretrained_gguf( Same as .save_pretrained(...) except 4bit weights are auto converted to float16 then converted to GGUF / llama.cpp format. - Choose for `quantization` to be: + Choose for `quantization_method` to be: "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", "quantized" : "Recommended. Slow conversion. Fast inference, small files.", @@ -662,36 +753,9 @@ def unsloth_save_pretrained_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization, makefile) - - # And save to HF - if push_to_hub: - print("Unsloth: Uploading GGUF to Huggingface Hub...") - - from huggingface_hub import create_repo - create_repo( - repo_id = save_directory, - token = token, - repo_type = "model", - exist_ok = True, - ) - - from huggingface_hub import HfApi - hf_api = HfApi(token = token) - - if "/" in file_location: - uploaded_location = file_location[file_location.rfind("/")+1:] - else: - uploaded_location = file_location - pass - - hf_api.upload_file( - path_or_fileobj = file_location, - path_in_repo = uploaded_location, - repo_id = save_directory, - repo_type = "model", - ) - pass + file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + model_type = self.config.model_type + if push_to_hub: upload_gguf_to_huggingface(new_save_directory, file_location, token, model_type) pass @@ -717,7 +781,7 @@ def unsloth_push_to_hub_gguf( Same as .push_to_hub(...) except 4bit weights are auto converted to float16 then converted to GGUF / llama.cpp format. - Choose for `quantization` to be: + Choose for `quantization_method` to be: "not_quantized" : "Recommended. Fast conversion. Slow inference, big files.", "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", "quantized" : "Recommended. Slow conversion. Fast inference, small files.", @@ -762,35 +826,9 @@ def unsloth_push_to_hub_gguf( gc.collect() python_install.wait() - file_location = save_to_gguf(new_save_directory, quantization, makefile) - - # Save to hub - print("Unsloth: Uploading GGUF to Huggingface Hub...") - - from huggingface_hub import create_repo - create_repo( - repo_id = save_directory, - private = private, - token = token, - repo_type = "model", - exist_ok = True, - ) - - from huggingface_hub import HfApi - hf_api = HfApi(token = token) - - if "/" in file_location: - uploaded_location = file_location[file_location.rfind("/")+1:] - else: - uploaded_location = file_location - pass - - hf_api.upload_file( - path_or_fileobj = file_location, - path_in_repo = uploaded_location, - repo_id = save_directory, - repo_type = "model", - ) + file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + model_type = self.config.model_type + upload_gguf_to_huggingface(new_save_directory, file_location, token, model_type) pass From be4b97e7d89074b6dd1d2e984fa429051d328192 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 21 Jan 2024 03:43:49 +1100 Subject: [PATCH 0118/1088] Fixed saving! (#113) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF * Fix quantization_method * Fix quantization_config * patch model * Update llama.py * Update llama.py * Update llama.py * Update save.py * Update save.py * tokenizer_save_settings * Update save.py * quantization and loftq * Update save.py * Update llama.py * Update save.py * upload_to_huggingface * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py --- unsloth/save.py | 156 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 103 insertions(+), 53 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 5d3a9a85db..c266fdfcee 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -208,6 +208,15 @@ def unsloth_save_model( return save_directory pass + # Update model tag + username = "" + if push_to_hub: + username = upload_to_huggingface( + model, save_directory, token, + "finetuned", "trl", file_location = None, + ) + pass + # If push_to_hub, we must remove the .../ part of a repo if push_to_hub and "/" in save_directory: @@ -331,6 +340,7 @@ def unsloth_save_model( if (torch.cuda.memory_allocated() + W.nbytes) < max_vram: # Save to GPU memory state_dict[name] = W + # [TODO] Saving to RAM seems to leak memory??? # elif (max_ram - W.nbytes) > 0: # # Save to CPU memory # logger.warning_once(f"We will save to RAM and not VRAM now.") @@ -401,27 +411,32 @@ def unsloth_save_model( model.config = old_config print("Done.") + # Print location + if push_to_hub: + print(f"Saved to https://huggingface.co/{username}/{save_directory.lstrip('/')}") + pass + save_pretrained_settings["state_dict"] = None - # for j, (key, value) in enumerate(state_dict.items()): - # state_dict[key] = None - # if j % 10 == 0: - # torch.cuda.empty_cache() - # gc.collect() - # pass - # pass - # state_dict = None - # del state_dict - # torch.cuda.empty_cache() - # gc.collect() + for j, (key, value) in enumerate(state_dict.items()): + state_dict[key] = None + if j % 10 == 0: + torch.cuda.empty_cache() + gc.collect() + pass + pass + state_dict = None + del state_dict + torch.cuda.empty_cache() + gc.collect() # Remove temporary location import shutil shutil.rmtree(temporary_location) - # for _ in range(3): - # torch.cuda.empty_cache() - # gc.collect() + for _ in range(3): + torch.cuda.empty_cache() + gc.collect() return save_directory pass @@ -629,14 +644,44 @@ def unsloth_push_to_hub_merged( pass -def upload_gguf_to_huggingface(save_directory, file_location, token, model_type): - print("Unsloth: Uploading GGUF to Huggingface Hub...") +MODEL_CARD = \ +"""--- +base_model: {base_model} +tags: +- text-generation-inference +- transformers +- unsloth +- {model_type} +- {extra} +license: apache-2.0 +language: +- en +--- + +# Uploaded {method} model + +- **Developed by:** {username} +- **License:** apache-2.0 +- **Finetuned from model :** {base_model} + +This {model_type} model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. + +[](https://github.com/unslothai/unsloth) +""" + +def upload_to_huggingface(model, save_directory, token, method, extra = "", file_location = None): # Check for username + username = "" if "/" not in save_directory: from huggingface_hub import whoami - try: save_directory = f"{save_directory}/{whoami()['name']}" - except: pass + try: + username = whoami()['name'] + save_directory = f"{save_directory}/{username}" + except: + raise RuntimeError(f"Unsloth: {save_directory} is not a Huggingface directory.") + else: + username = save_directory.split("/")[0] pass from huggingface_hub import create_repo @@ -648,43 +693,36 @@ def upload_gguf_to_huggingface(save_directory, file_location, token, model_type) ) # Create model card - from huggingface_hub import ModelCard, ModelCardData - card_data = ModelCardData( - language = "en", - license = "apache-2.0", - library = "unsloth", - tags = ["gguf", "unsloth", "text-generation-inference", "transformers",], + from huggingface_hub import ModelCard + content = MODEL_CARD.format( + username = username, + base_model = model.config._name_or_path, + model_type = model.config.model_type, + method = "", + extra = extra, ) - - content = f"\n"\ - f"---\n"\ - f"{ card_data.to_yaml() }\n"\ - f"---\n"\ - f"\n"\ - f"# My Model Card for {file_location}\n"\ - f"\n"\ - f"\nThis {model_type.title()} model was trained by [Unsloth](https://github.com/unslothai/unsloth) then saved to GGUF.\n"\ - f"\n" - card = ModelCard(content) card.push_to_hub(save_directory, token = token) - # Now upload file - from huggingface_hub import HfApi - hf_api = HfApi(token = token) + if file_location is not None: + # Now upload file + from huggingface_hub import HfApi + hf_api = HfApi(token = token) - if "/" in file_location: - uploaded_location = file_location[file_location.rfind("/")+1:] - else: - uploaded_location = file_location - pass + if "/" in file_location: + uploaded_location = file_location[file_location.rfind("/")+1:] + else: + uploaded_location = file_location + pass - hf_api.upload_file( - path_or_fileobj = file_location, - path_in_repo = uploaded_location, - repo_id = save_directory, - repo_type = "model", - ) + hf_api.upload_file( + path_or_fileobj = file_location, + path_in_repo = uploaded_location, + repo_id = save_directory, + repo_type = "model", + ) + pass + return username pass @@ -754,8 +792,15 @@ def unsloth_save_pretrained_gguf( gc.collect() file_location = save_to_gguf(new_save_directory, quantization_method, makefile) - model_type = self.config.model_type - if push_to_hub: upload_gguf_to_huggingface(new_save_directory, file_location, token, model_type) + + if push_to_hub: + print("Unsloth: Uploading GGUF to Huggingface Hub...") + username = upload_to_huggingface( + self, model, new_save_directory, token, + "GGUF converted", "gguf", file_location, + ) + print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") + pass pass @@ -827,8 +872,13 @@ def unsloth_push_to_hub_gguf( python_install.wait() file_location = save_to_gguf(new_save_directory, quantization_method, makefile) - model_type = self.config.model_type - upload_gguf_to_huggingface(new_save_directory, file_location, token, model_type) + + print("Unsloth: Uploading GGUF to Huggingface Hub...") + username = upload_to_huggingface( + self, model, new_save_directory, token, + "GGUF converted", "gguf", file_location, + ) + print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") pass From a7bd8d119c16433de4f8b6a36903ef7131f225e5 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 21 Jan 2024 04:13:03 +1100 Subject: [PATCH 0119/1088] Update save.py --- unsloth/save.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index c266fdfcee..36a61aa5aa 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -796,7 +796,7 @@ def unsloth_save_pretrained_gguf( if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( - self, model, new_save_directory, token, + self, new_save_directory, token, "GGUF converted", "gguf", file_location, ) print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") @@ -875,7 +875,7 @@ def unsloth_push_to_hub_gguf( print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( - self, model, new_save_directory, token, + self, new_save_directory, token, "GGUF converted", "gguf", file_location, ) print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") From 5145a61e69ab9b3035465f649e1c1e5aae749f8f Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 21 Jan 2024 04:21:54 +1100 Subject: [PATCH 0120/1088] Update save.py --- unsloth/save.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 36a61aa5aa..7f2cf88128 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -544,8 +544,9 @@ def save_to_gguf( command = f"./llama.cpp/quantize {old_location} "\ f"{final_location} {quantization_method} {n_cpus}" - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: - for line in sp.stdout: + # quantize uses stderr + with subprocess.Popen(command, shell = True, stderr = subprocess.PIPE, bufsize = 1) as sp: + for line in sp.stderr: print(line.decode("utf-8"), flush = True, end = "") pass print(f"Unsloth: Conversion completed! Output location: {final_location}") From 57a5b5a49da588b1db8e9a988cc985dc20393d34 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 21 Jan 2024 05:00:37 +1100 Subject: [PATCH 0121/1088] Update save.py --- unsloth/save.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 7f2cf88128..a3d5fe9b0c 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -674,6 +674,7 @@ def unsloth_push_to_hub_merged( def upload_to_huggingface(model, save_directory, token, method, extra = "", file_location = None): # Check for username username = "" + save_directory = save_directory.lstrip("./") if "/" not in save_directory: from huggingface_hub import whoami try: @@ -797,10 +798,10 @@ def unsloth_save_pretrained_gguf( if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( - self, new_save_directory, token, + self, save_directory, token, "GGUF converted", "gguf", file_location, ) - print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") + print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/.')}") pass pass @@ -876,7 +877,7 @@ def unsloth_push_to_hub_gguf( print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( - self, new_save_directory, token, + self, repo_id, token, "GGUF converted", "gguf", file_location, ) print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") From b370c9c8aacc31a7845404566dd95dfa8c0e3bac Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 21 Jan 2024 22:20:22 +1100 Subject: [PATCH 0122/1088] Hotfix (#118) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --- unsloth/models/llama.py | 39 +++++++++++----------- unsloth/models/mistral.py | 4 +-- unsloth/save.py | 69 ++++++++++++++++++++++++++++++--------- 3 files changed, 74 insertions(+), 38 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bc7aeb3e59..55527dbca9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -144,7 +144,7 @@ def LlamaAttention_fast_forward_inference( A = torch.matmul(A, Vnn) A = A.transpose(1, 2) A = A.reshape(bsz, 1, self.hidden_size) - A = original_apply_o(self, A) + A = self.o_proj(A) return A, (Kn, Vn) pass @@ -187,10 +187,9 @@ def LlamaAttention_fast_forward( ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() - Q, K, V = self.apply_qkv(self, hidden_states) # Check for inference - if use_cache and past_key_value is not None and q_len == 1: + if past_key_value is not None and q_len == 1: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -206,6 +205,7 @@ def LlamaAttention_fast_forward( head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) + Q, K, V = self.apply_qkv(self, hidden_states) Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) @@ -304,11 +304,10 @@ def LlamaDecoderLayer_fast_forward( past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ bsz, q_len, hd = hidden_states.size() - - if (self.training): + if (past_key_value is not None and q_len == 1): # Self Attention residual = hidden_states - hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, causal_mask=causal_mask, @@ -319,17 +318,16 @@ def LlamaDecoderLayer_fast_forward( use_cache=use_cache, padding_mask=padding_mask, ) - hidden_states = residual + hidden_states + hidden_states += residual # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = residual + hidden_states + hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_mlp_inference(self.mlp, hidden_states) + hidden_states += residual else: - # Self Attention residual = hidden_states - hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, causal_mask=causal_mask, @@ -340,13 +338,13 @@ def LlamaDecoderLayer_fast_forward( use_cache=use_cache, padding_mask=padding_mask, ) - hidden_states += residual + hidden_states = residual + hidden_states # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) - hidden_states = fast_mlp_inference(self.mlp, hidden_states) - hidden_states += residual + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states pass outputs = (hidden_states,) @@ -445,7 +443,7 @@ def LlamaModel_fast_forward( # Ignore attention_mask if attention_mask is None: padding_mask = None - elif self.training: + elif True:#self.training: attention_mask = None padding_mask = None else: @@ -524,10 +522,11 @@ def custom_forward(*inputs): all_self_attns += (layer_outputs[1],) pass - if (self.training): - hidden_states = fast_rms_layernorm(self.norm, hidden_states) - else: + bsz, q_len, hd = hidden_states.size() + if (past_key_value is not None and q_len == 1): hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + else: + hidden_states = fast_rms_layernorm(self.norm, hidden_states) pass # add hidden states from the last decoder layer diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 2c85129010..68add4b239 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -47,10 +47,9 @@ def MistralAttention_fast_forward( ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() - Q, K, V = self.apply_qkv(self, hidden_states) # Check for inference - if use_cache and past_key_value is not None and q_len == 1: + if past_key_value is not None and q_len == 1: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -66,6 +65,7 @@ def MistralAttention_fast_forward( head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) + Q, K, V = self.apply_qkv(self, hidden_states) Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) diff --git a/unsloth/save.py b/unsloth/save.py index a3d5fe9b0c..64342d91de 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -94,8 +94,9 @@ def fast_save_pickle(shard, name): torch.save( shard, name, - pickle_module = pickle, - pickle_protocol = pickle.HIGHEST_PROTOCOL, + # HIGHEST_PROTOCOL seems to not work with Pytorch! + # pickle_module = pickle, + # pickle_protocol = pickle.HIGHEST_PROTOCOL, ) return pass @@ -783,12 +784,27 @@ def unsloth_save_pretrained_gguf( del arguments["quantization_method"] # Non blocking install GGUF first - git_clone = install_llama_cpp_clone_non_blocking() - python_install = install_python_non_blocking(["gguf", "protobuf"]) - git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) - python_install.wait() + if not os.path.exists("llama.cpp"): + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) + python_install.wait() + else: + try: + new_save_directory = unsloth_save_model(**arguments) + makefile = None + except: + # Retry by recloning llama.cpp + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) + python_install.wait() + pass + pass for _ in range(3): gc.collect() @@ -801,7 +817,10 @@ def unsloth_save_pretrained_gguf( self, save_directory, token, "GGUF converted", "gguf", file_location, ) - print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/.')}") + link = f"{username}/{new_save_directory.lstrip('/.')}" \ + if username not in new_save_directory else \ + new_save_directory.lstrip('/.') + print(f"Saved to https://huggingface.co/{link}") pass pass @@ -863,16 +882,31 @@ def unsloth_push_to_hub_gguf( del arguments["quantization_method"] # Non blocking install GGUF first - git_clone = install_llama_cpp_clone_non_blocking() - python_install = install_python_non_blocking(["gguf", "protobuf"]) - git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) + if not os.path.exists("llama.cpp"): + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) + python_install.wait() + else: + try: + new_save_directory = unsloth_save_model(**arguments) + makefile = None + except: + # Retry by recloning llama.cpp + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory = unsloth_save_model(**arguments) + python_install.wait() + pass + pass for _ in range(3): gc.collect() - python_install.wait() file_location = save_to_gguf(new_save_directory, quantization_method, makefile) print("Unsloth: Uploading GGUF to Huggingface Hub...") @@ -880,7 +914,10 @@ def unsloth_push_to_hub_gguf( self, repo_id, token, "GGUF converted", "gguf", file_location, ) - print(f"Saved to https://huggingface.co/{username}/{new_save_directory.lstrip('/')}") + link = f"{username}/{new_save_directory.lstrip('/.')}" \ + if username not in new_save_directory else \ + new_save_directory.lstrip('/.') + print(f"Saved to https://huggingface.co/{link}") pass From a833f403462e9cfc1f96b3b84d9da15d7d8db5ee Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jan 2024 03:55:24 +1100 Subject: [PATCH 0123/1088] 2-4x faster native HF inference (#119) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving --- unsloth/kernels/__init__.py | 2 +- unsloth/kernels/fast_lora.py | 20 +--- unsloth/kernels/rope_embedding.py | 10 +- unsloth/kernels/utils.py | 110 +++++++++++++++++++- unsloth/models/llama.py | 166 +++++++++++++++++++++++------- unsloth/models/mapper.py | 6 ++ unsloth/models/mistral.py | 55 ++++++---- unsloth/save.py | 13 ++- 8 files changed, 292 insertions(+), 90 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 5de19c86c5..9b861f4934 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -22,4 +22,4 @@ apply_lora_qkv, apply_lora_o, ) -from .utils import fast_dequantize, QUANT_STATE +from .utils import fast_dequantize, QUANT_STATE, fast_linear_forward diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 5f48f316ec..b70e6e4fee 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -13,28 +13,10 @@ # limitations under the License. import torch -from .utils import fast_dequantize, QUANT_STATE +from .utils import fast_dequantize, QUANT_STATE, get_lora_parameters from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel -def get_lora_parameters(proj): - # For DPO or disabled adapters - base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) - W = base_layer.weight - - if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: - return W, QUANT_STATE(W), None, None, None - pass - - active_adapter = proj.active_adapters[0] if \ - hasattr(proj, "active_adapters") else proj.active_adapter - A = proj.lora_A [active_adapter].weight - B = proj.lora_B [active_adapter].weight - s = proj.scaling[active_adapter] - return W, QUANT_STATE(W), A, B, s -pass - - def matmul_lora(X, W, W_quant, A, B, s, out = None): dtype = X.dtype W = fast_dequantize(W.t(), W_quant) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 2bf7c1b272..a9527520ab 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -134,8 +134,9 @@ def forward(ctx, Q, cos, sin, position_ids): half = Q.shape[-1]//2 RH_Q = torch.cat((-Q[..., half:], Q[..., :half]), dim = -1) Q *= cos - RH_Q *= sin - Q += RH_Q + Q.addcmul_(RH_Q, sin) + # RH_Q *= sin + # Q += RH_Q ctx.save_for_backward(cos, sin) return Q pass @@ -147,8 +148,9 @@ def backward(ctx, dY): half = dY.shape[-1]//2 RH_dY = torch.cat((dY[..., half:], -dY[..., :half]), dim = -1) dY *= cos - RH_dY *= sin - dY += RH_dY + dY.addcmul_(RH_dY, sin) + # RH_dY *= sin + # dY += RH_dY return dY, None, None, None pass pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 8a7722fabd..7c693a6c4e 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -33,14 +33,36 @@ def calculate_settings(n): get_ptr = bnb.functional.get_ptr import ctypes import torch -cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 -cdequantize_blockwise_fp16_nf4 = bnb.functional.lib.cdequantize_blockwise_fp16_nf4 -cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 +cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 +cdequantize_blockwise_fp16_nf4 = bnb.functional.lib.cdequantize_blockwise_fp16_nf4 +cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 +cgemm_4bit_inference_naive_fp16 = bnb.functional.lib.cgemm_4bit_inference_naive_fp16 +cgemm_4bit_inference_naive_bf16 = bnb.functional.lib.cgemm_4bit_inference_naive_bf16 + def QUANT_STATE(W): return getattr(W, "quant_state", None) pass + +def get_lora_parameters(proj): + # For DPO or disabled adapters + base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) + W = base_layer.weight + + if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: + return W, QUANT_STATE(W), None, None, None + pass + + active_adapter = proj.active_adapters[0] if \ + hasattr(proj, "active_adapters") else proj.active_adapter + A = proj.lora_A [active_adapter].weight + B = proj.lora_B [active_adapter].weight + s = proj.scaling[active_adapter] + return W, QUANT_STATE(W), A, B, s +pass + + def fast_dequantize(W, quant_state = None, out = None): if quant_state is None: return W if type(quant_state) is not list: @@ -90,3 +112,85 @@ def fast_dequantize(W, quant_state = None, out = None): is_transposed = (True if W.shape[0] == 1 else False) return out.t() if is_transposed else out pass + + +def fast_gemv(X, W, quant_state, out = None, out_W = None): + quant_state = W.quant_state + bsz = 1 + q_len = 1 + hd = X.shape[0] + + if type(quant_state) is not list: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + stats = quant_state.code + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + absmax, shape, dtype, blocksize, compressed_stats, quant_type, stats = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass + bout = shape[0] + if out is None: out = torch.empty(bout, dtype = dtype, device = "cuda") + else: assert(out.shape[0] == bout) + + n = 1 + m = shape[0] + k = shape[1] + lda = shape[0] + ldc = shape[0] + ldb = (X.shape[-1]+1)//2 + m = ctypes.c_int32(m) + n = ctypes.c_int32(n) + k = ctypes.c_int32(k) + lda = ctypes.c_int32(lda) + ldb = ctypes.c_int32(ldb) + ldc = ctypes.c_int32(ldc) + + df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda") + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), + ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), + ) + df += offset + absmax = df + + fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ + cgemm_4bit_inference_naive_bf16 + + ptr_W = get_ptr(W) + ptr_absmax = get_ptr(absmax) + ptr_stats = get_ptr(stats) + blocksize = ctypes.c_int32(blocksize) + + fx(m, n, k, get_ptr(X), ptr_W, ptr_absmax, ptr_stats, get_ptr(out), + lda, ldb, ldc, blocksize) + + return out +pass + + +def fast_linear_forward(proj, X, temp_lora = None, out = None): + W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) + out = fast_gemv(X, W, W_quant, out = out) + if lora_A is not None: + + # Save LoRAs for inference to stop data movement costs + if not hasattr(lora_A, "_fast_lora"): + dtype = X.dtype + lora_A._fast_lora = lora_A.to(dtype).t() + lora_B._fast_lora = lora_B.to(dtype) + pass + + temp_lora = torch.matmul(X, lora_A._fast_lora, out = temp_lora) + out.addmv_(lora_B._fast_lora, temp_lora, alpha = lora_S) + pass + return out +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 55527dbca9..40074ad043 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -68,6 +68,7 @@ def original_apply_o(self, X): pass +from math import sqrt as math_sqrt def LlamaAttention_fast_forward_inference( self, hidden_states: torch.Tensor, @@ -102,60 +103,104 @@ def LlamaAttention_fast_forward_inference( This means we can pass in a row of Q, but we need to remember K and V, which are called the KV cache. """ - Xn = hidden_states - bsz, _, _ = hidden_states.size() - K1, V1 = past_key_value - n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads head_dim = self.head_dim - assert(n_kv_heads * n_groups == n_heads) - - Qn = self.q_proj(Xn) - Kn = self.k_proj(Xn) - Vn = self.v_proj(Xn) - Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) - Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) - Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + # assert(n_kv_heads * n_groups == n_heads) - kv_seq_len = K1.shape[-2] + 1 - cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) - Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + Xn = hidden_states.view(self.hidden_size) + K1, V1 = past_key_value + seq_len = K1.shape[-2] + K1 = K1.view(n_kv_heads, seq_len, head_dim) + V1 = V1.view(n_kv_heads, seq_len, head_dim) + + # LoRA or general matrix multiplication + dtype = Xn.dtype + # Qn = self.q_proj(Xn) + # Kn = self.k_proj(Xn) + # Vn = self.v_proj(Xn) + Qn = fast_linear_forward(self.q_proj, Xn) + Kn = fast_linear_forward(self.k_proj, Xn) + Vn = fast_linear_forward(self.v_proj, Xn) + + # Qn = Qn.view(1, 1, n_heads, head_dim).transpose(1, 2) + # Kn = Kn.view(1, 1, n_kv_heads, head_dim).transpose(1, 2) + # Vn = Vn.view(1, 1, n_kv_heads, head_dim).transpose(1, 2) + Qn = Qn.view(n_heads, 1, head_dim) + Kn = Kn.view(n_kv_heads, 1, head_dim) + Vn = Vn.view(n_kv_heads, 1, head_dim) + + # kv_seq_len = K1.shape[-2] + 1 + # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + cos = self.rotary_emb.cos_cached[seq_len] + sin = self.rotary_emb.sin_cached[seq_len] + h = head_dim // 2 + + RH_Q = torch.empty((n_heads, 1, head_dim), dtype = dtype, device = "cuda") + RH_Q[:, :, :h] = Qn[:, :, h:]; RH_Q[:, :, h:] = Qn[:, :, :h]; torch.neg(RH_Q[:, :, :h], out = RH_Q[:, :, :h]); + Qn *= cos; Qn.addcmul_(RH_Q, sin); + + RH_K = RH_Q[:n_kv_heads, :, :] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda") + RH_K[:, :, :h] = Kn[:, :, h:]; RH_K[:, :, h:] = Kn[:, :, :h]; torch.neg(RH_K[:, :, :h], out = RH_K[:, :, :h]); + Kn *= cos; Kn.addcmul_(RH_K, sin); # New KV cache - Kn = torch.cat([K1, Kn], dim = 2) - Vn = torch.cat([V1, Vn], dim = 2) + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + Kn = torch.cat([K1, Kn], dim = 1) + Vn = torch.cat([V1, Vn], dim = 1) # Grouped query attention if n_groups != 1: - _, _, cached_len, _ = Kn.shape - Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Vnn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) - Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) + # _, _, cached_len, _ = Kn.shape + # Knn = Kn[:, :, None, :, :].expand(1, n_kv_heads, n_groups, cached_len, head_dim) + # Vnn = Vn[:, :, None, :, :].expand(1, n_kv_heads, n_groups, cached_len, head_dim) + # Knn = Knn.reshape(1, n_heads, cached_len, head_dim) + # Vnn = Vnn.reshape(1, n_heads, cached_len, head_dim) + new_seq_len = seq_len + 1 + Knn = Kn[:, None, :, :].expand(n_kv_heads, n_groups, new_seq_len, head_dim) + Vnn = Vn[:, None, :, :].expand(n_kv_heads, n_groups, new_seq_len, head_dim) + Knn = Knn.reshape(n_heads, new_seq_len, head_dim) + Vnn = Vnn.reshape(n_heads, new_seq_len, head_dim) else: Knn, Vnn = Kn, Vn # Attention - A = torch.matmul(Qn, Knn.transpose(2, 3)) - A *= 1.0 / (self.head_dim**0.5) - A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) - A = torch.matmul(A, Vnn) - A = A.transpose(1, 2) - A = A.reshape(bsz, 1, self.hidden_size) - A = self.o_proj(A) - return A, (Kn, Vn) + # A = torch.matmul(Qn, Knn.transpose(2, 3)) + A = torch.matmul(Qn, Knn.transpose(1, 2)) + A *= 1.0 / math_sqrt(self.head_dim) + A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch.matmul(A, Vnn, out = Qn) + # A = A.transpose(1, 2) + A = A.view(self.hidden_size) + + # A = self.o_proj(A) + A = fast_linear_forward(self.o_proj, A) + A = A.reshape(1, 1, self.hidden_size) + + # return A, (Kn, Vn) + return A, (Kn.unsqueeze(0), Vn.unsqueeze(0)) pass torch_silu = torch.nn.functional.silu def fast_mlp_inference(self, X): - gate = self.gate_proj(X) - up = self.up_proj(X) + hidden_size = self.hidden_size + X = X.view(hidden_size) + + # gate = self.gate_proj(X) + # up = self.up_proj(X) + gate = fast_linear_forward(self.gate_proj, X) + up = fast_linear_forward(self. up_proj, X) gate = torch_silu(gate, inplace = True) gate *= up - X = self.down_proj(gate) + + # X = self.down_proj(gate) + down = fast_linear_forward(self.down_proj, gate, out = up[:hidden_size]) + X = down.view(1, 1, hidden_size) + return X pass @@ -676,10 +721,10 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth: Fast Llama patching release {__version__}\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ - f"O^O/ \_/ \\ CUDA capability = {gpu_stats.major}.{gpu_stats.minor}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ - f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform_system}\n' + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ + f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' logger.warning_once(statistics) FastLlamaModel.pre_patch() @@ -731,7 +776,7 @@ def from_pretrained( ) tokenizer = AutoTokenizer.from_pretrained( model_name, - model_max_length = max_seq_length, + model_max_length = max_position_embeddings, padding_side = "right", token = token, ) @@ -760,7 +805,7 @@ def from_pretrained( model = model, tokenizer = tokenizer, model_name = model_name, - model_max_length = max_seq_length, + model_max_length = max_position_embeddings, padding_side = "right", token = token, ) @@ -1076,4 +1121,47 @@ def patch_peft_model( internal_model.max_seq_length = max_seq_length return model pass + + + @staticmethod + def for_inference(model): + if not hasattr(model, "_original_forward"): + model._original_forward = model.forward + pass + model.forward = torch.inference_mode(model._original_forward) + + internal_model = model + internal_model.gradient_checkpointing = False + internal_model.training = False + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = False + internal_model.training = False + pass + pass + + + @staticmethod + def for_training(model, use_gradient_checkpointing = True): + if hasattr(model, "_original_forward"): + model.forward = model._original_forward + pass + + internal_model = model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + + # Delete all fast inference loras + for param in model.parameters(): + if hasattr(param, "_fast_lora"): + del param._fast_lora + pass + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + pass + pass pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 124eaf7b27..7da9e1042e 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -42,6 +42,12 @@ "unsloth/tinyllama", "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", ), + "unsloth/mistral-7b-instruct-v0.1-bnb-4bit" : ( + "mistralai/Mistral-7B-Instruct-v0.1", + ), + "unsloth/mistral-7b-instruct-v0.2-bnb-4bit" : ( + "mistralai/Mistral-7B-Instruct-v0.2", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 68add4b239..bde68a0a51 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -132,7 +132,7 @@ def MistralAttention_fast_forward( K = K.transpose(1, 2) V = V.transpose(1, 2) sw = getattr(self.config, "sliding_window", None) - sw = q_len if sw is None else sw + sw = q_len if (sw is None or sw == "null") else sw window = (-1, -1) if (q_len <= sw) else (sw, sw) A = flash_attn_func(Q, K, V, causal = True, window_size = window) else: @@ -176,7 +176,7 @@ def MistralForCausalLM_fast_forward( if causal_mask is None: bsz, q_len = input_ids.shape sliding_window = getattr(self.config, "sliding_window", None) - if sliding_window is None or sliding_window <= 0: + if sliding_window is None or sliding_window == "null" or sliding_window <= 0: causal_mask = xformers.attn_bias.LowerTriangularMask() elif q_len <= sliding_window: causal_mask = xformers.attn_bias.LowerTriangularMask() @@ -265,9 +265,11 @@ def from_pretrained( rope_scaling = None, # Mistral does not support RoPE scaling fix_tokenizer = True, **kwargs, - ): + ): + # Mistral does NOT support RoPE Scaling! if rope_scaling is not None: logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") + pass SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) @@ -275,10 +277,10 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth: Fast Mistral patching release {__version__}\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ - f"O^O/ \_/ \\ CUDA capability = {gpu_stats.major}.{gpu_stats.minor}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ - f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform_system}\n' + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ + f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' logger.warning_once(statistics) FastMistralModel.pre_patch() @@ -290,6 +292,18 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + # Check max sequence length + model_config = AutoConfig.from_pretrained(model_name, token = token) + model_max_seq_length = model_config.max_position_embeddings + + # Mistral does NOT support RoPE Scaling sadly so we have to error out. + if max_seq_length > model_max_seq_length: + raise RuntimeError( + "Unsloth: Unfortunately Mistral type models do not support RoPE scaling!\n"\ + f"The maximum sequence length supported is {model_max_seq_length}.", + ) + pass + bnb_config = None if load_in_4bit: bnb_config = BitsAndBytesConfig( @@ -299,20 +313,21 @@ def from_pretrained( bnb_4bit_compute_dtype = dtype, ) + max_position_embeddings = max(max_seq_length, model_max_seq_length) model = AutoModelForCausalLM.from_pretrained( model_name, - device_map = device_map, - torch_dtype = dtype, + device_map = device_map, + torch_dtype = dtype, quantization_config = bnb_config, - token = token, - # rope_scaling = rope_scaling, + token = token, + # rope_scaling = rope_scaling, **kwargs, ) tokenizer = AutoTokenizer.from_pretrained( model_name, - model_max_length = max_seq_length, - padding_side = "right", - token = token, + model_max_length = max_position_embeddings, + padding_side = "right", + token = token, ) model, tokenizer = patch_tokenizer(model, tokenizer) @@ -337,12 +352,12 @@ def from_pretrained( # We check the tokenizer first for errors if fix_tokenizer: tokenizer = check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = max_seq_length, - padding_side = "right", - token = token, + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = max_position_embeddings, + padding_side = "right", + token = token, ) pass patch_saving_functions(tokenizer) diff --git a/unsloth/save.py b/unsloth/save.py index 64342d91de..519d8e77d8 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -77,10 +77,14 @@ def _merge_lora(layer, name): W, quant_state, A, B, s = get_lora_parameters(layer) dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] W = fast_dequantize(W, quant_state).to(torch.float32).t() - sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) - W += sAB - if not torch.isfinite(W).all(): - raise ValueError(f"Unsloth: Merge failed.\n{name} has some elements = infinity.") + + if A is not None: + sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) + W += sAB + if not torch.isfinite(W).all(): + raise ValueError(f"Unsloth: Merge failed.\n{name} has some elements = infinity.") + pass + W = W.t().to(dtype) else: W = layer.weight @@ -156,6 +160,7 @@ def unsloth_save_model( pass if save_method == "merged_4bit": + print("Unsloth: Merging 4bit and LoRA weights to 4bit...") print("This might take 5 minutes...") model = model.merge_and_unload() From 3d67790901696e953171f64b4bf9d980780051a0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 26 Jan 2024 04:19:17 +1100 Subject: [PATCH 0124/1088] Fix bugs (#129) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py --- pyproject.toml | 24 +++++---------- unsloth/kernels/fast_lora.py | 2 +- unsloth/models/_utils.py | 14 ++++++++- unsloth/models/llama.py | 52 +++++++++++++++++--------------- unsloth/models/loader.py | 2 +- unsloth/models/mapper.py | 2 +- unsloth/models/mistral.py | 57 +++++++++++++++++++----------------- unsloth/save.py | 19 ++++++++++-- 8 files changed, 98 insertions(+), 74 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 83efab1641..506e9a7552 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,18 +32,8 @@ include-package-data = false exclude = ["images*"] [project.optional-dependencies] -huggingfacedev = [ - "transformers @ git+https://github.com/huggingface/transformers", - "datasets", - "sentencepiece", - "accelerate", - "trl>=0.7.9", - "peft", - "tqdm", - "psutil", -] huggingface = [ - "transformers", + "transformers>=4.37.0", "datasets", "sentencepiece", "accelerate", @@ -107,15 +97,15 @@ colab_ampere = [ "ninja", "flash-attn", ] -colab_dev = [ - "unsloth[huggingfacedev]", +colab_torch211 = [ + "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu121only]", + "unsloth[cu121onlytorch211]", ] -colab_ampere_dev = [ - "unsloth[huggingfacedev]", +colab_ampere_torch211 = [ + "unsloth[huggingface]", "bitsandbytes", - "unsloth[cu121only]", + "unsloth[cu121onlytorch211]", "packaging", "ninja", "flash-attn", diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index b70e6e4fee..b487ff95e2 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -141,7 +141,7 @@ def backward(ctx, dY : torch.Tensor): # Gate projection LoRA weights d_gateA = X.t() @ (DW_dfg @ gateB.t()) - d_gateB = (gateA.t() @ X.t() @ DW_dfg) + d_gateB = (gateA.t() @ X.t()) @ DW_dfg d_gateA *= gateS d_gateB *= gateS diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8ddf4d60da..5f35851443 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -30,7 +30,19 @@ if major_version >= 8: try: from flash_attn import flash_attn_func - HAS_FLASH_ATTENTION = True + # Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl" + try: + from flash_attn.flash_attn_interface import flash_attn_cuda + HAS_FLASH_ATTENTION = True + except: + logger.warning_once( + "Unsloth: Your Flash Attention 2 installation seems to be broken?\n"\ + "A possible explanation is you have a new CUDA version which isn't\n"\ + "yet compatible with FA2? Please file a ticket to Unsloth or FA2.\n"\ + "We shall now use Xformers instead, which gets a 0.01% performance hit.\n"\ + "We found this negligible impact by benchmarking on 1x A100." + ) + HAS_FLASH_ATTENTION = False except: HAS_FLASH_ATTENTION = False else: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 40074ad043..da99ea4b68 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -234,7 +234,7 @@ def LlamaAttention_fast_forward( bsz, q_len, _ = hidden_states.size() # Check for inference - if past_key_value is not None and q_len == 1: + if past_key_value is not None and q_len == 1 and bsz == 1: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -271,6 +271,7 @@ def LlamaAttention_fast_forward( if past_key_value is not None: K = torch.cat([past_key_value[0], K], dim = 2) V = torch.cat([past_key_value[1], V], dim = 2) + pass past_key_value = (K, V) if use_cache else None # Attention module @@ -283,13 +284,13 @@ def LlamaAttention_fast_forward( # Group query attention if n_groups != 1: - K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) - V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) - K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) - V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + K = K .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) if hidden_states.requires_grad: - K = K.reshape(bsz, q_len, n_heads, head_dim) - V = V.reshape(bsz, q_len, n_heads, head_dim) + K = K.reshape(bsz, kv_seq_len, n_heads, head_dim) + V = V.reshape(bsz, kv_seq_len, n_heads, head_dim) else: Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) pass @@ -304,10 +305,10 @@ def LlamaAttention_fast_forward( else: # Grouped query attention if n_groups != 1: - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - K = K.reshape(bsz, n_heads, q_len, head_dim) - V = V.reshape(bsz, n_heads, q_len, head_dim) + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) pass # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! @@ -349,7 +350,7 @@ def LlamaDecoderLayer_fast_forward( past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ bsz, q_len, hd = hidden_states.size() - if (past_key_value is not None and q_len == 1): + if (past_key_value is not None and q_len == 1 and bsz == 1): # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) @@ -722,9 +723,9 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth: Fast Llama patching release {__version__}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ - f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' + f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' logger.warning_once(statistics) FastLlamaModel.pre_patch() @@ -813,10 +814,13 @@ def from_pretrained( patch_saving_functions(tokenizer) # Fix up config for transformers uploading PEFT - name = model.config._name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.config.update({"_name_or_path" : name}) + # Not necessary anymore since we require transformers>=4.37! + if False: + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass pass # Log Unsloth version for future fastpaths for inference @@ -1019,11 +1023,13 @@ def patch_peft_model( # Fix up config for transformers uploading PEFT for active_adapter in model.peft_config.keys(): - name = model.peft_config[active_adapter].base_model_name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.peft_config[active_adapter].base_model_name_or_path = name - pass + # Not necessary since we requires transformers >= 4.37 + if False: + name = model.peft_config[active_adapter].base_model_name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.peft_config[active_adapter].base_model_name_or_path = name + pass # Add revision to enable future fast inference paths model.peft_config[active_adapter].revision = f"unsloth" pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 5a7f84404f..07396313b3 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -34,7 +34,7 @@ def _get_model_name(model_name, load_in_4bit = True): logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ f"4bit loading.\nThe minimum required version is 4.37.\n"\ - f'Try `pip install "git+https://github.com/huggingface/transformers.git"`\n'\ + f'Try `pip install --upgrade "transformers>=4.37"`\n'\ f"to obtain the latest transformers build, then restart this session.\n"\ f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." ) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 7da9e1042e..56fc5436ab 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -36,7 +36,7 @@ ), "unsloth/zephyr-sft-bnb-4bit" : ( "unsloth/zephyr-sft", - "alignment-handbook/zephyr-7b-sft-full", + "HuggingFaceH4/mistral-7b-sft-beta", ), "unsloth/tinyllama-bnb-4bit" : ( "unsloth/tinyllama", diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index bde68a0a51..2410572174 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -49,7 +49,7 @@ def MistralAttention_fast_forward( bsz, q_len, _ = hidden_states.size() # Check for inference - if past_key_value is not None and q_len == 1: + if past_key_value is not None and q_len == 1 and bsz == 1: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -84,9 +84,9 @@ def MistralAttention_fast_forward( pass if past_key_value is not None: - # reuse k, v, self_attention K = torch.cat([past_key_value[0], K], dim = 2) V = torch.cat([past_key_value[1], V], dim = 2) + pass past_key_value = (K, V) if use_cache else None # Attention module @@ -95,32 +95,33 @@ def MistralAttention_fast_forward( Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) - M = bsz * q_len + K_M = V_M = bsz * kv_seq_len + Q_M = bsz * q_len has_swa = isinstance(causal_mask, xformers.attn_bias.BlockDiagonalCausalMask) # Group query attention - K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) - V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) - K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) - V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + K = K .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) if hidden_states.requires_grad: - K = K.reshape(bsz, q_len, n_heads, head_dim) - V = V.reshape(bsz, q_len, n_heads, head_dim) + K = K.reshape(bsz, kv_seq_len, n_heads, head_dim) + V = V.reshape(bsz, kv_seq_len, n_heads, head_dim) if has_swa: - Q = Q.view(1, M, n_heads, head_dim) - K = K.view(1, M, n_heads, head_dim) - V = V.view(1, M, n_heads, head_dim) + Q = Q.view(1, Q_M, n_heads, head_dim) + K = K.view(1, K_M, n_heads, head_dim) + V = V.view(1, V_M, n_heads, head_dim) pass else: # Xformers does support the forward pass though Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) if has_swa: - Q = Q.view(1, M, n_kv_heads, n_groups, head_dim) - K = K.view(1, M, n_kv_heads, n_groups, head_dim) - V = V.view(1, M, n_kv_heads, n_groups, head_dim) + Q = Q.view(1, Q_M, n_kv_heads, n_groups, head_dim) + K = K.view(1, K_M, n_kv_heads, n_groups, head_dim) + V = V.view(1, V_M, n_kv_heads, n_groups, head_dim) pass pass @@ -132,16 +133,16 @@ def MistralAttention_fast_forward( K = K.transpose(1, 2) V = V.transpose(1, 2) sw = getattr(self.config, "sliding_window", None) - sw = q_len if (sw is None or sw == "null") else sw - window = (-1, -1) if (q_len <= sw) else (sw, sw) + sw = kv_seq_len if (sw is None or sw == "null") else sw + window = (-1, -1) if (kv_seq_len <= sw) else (sw, sw) A = flash_attn_func(Q, K, V, causal = True, window_size = window) else: # Grouped query attention # if n_groups != 1: - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - K = K.reshape(bsz, n_heads, q_len, head_dim) - V = V.reshape(bsz, n_heads, q_len, head_dim) + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) # pass # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! @@ -278,7 +279,7 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth: Fast Mistral patching release {__version__}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' logger.warning_once(statistics) @@ -363,11 +364,13 @@ def from_pretrained( patch_saving_functions(tokenizer) # Fix up config for transformers uploading PEFT - name = model.config._name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.config.update({"_name_or_path" : name}) - pass + # Not necessary anymore since we require transformers>=4.37 + if False: + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass # Log Unsloth version for future fastpaths for inference model.config.update({"unsloth_version" : __version__}) diff --git a/unsloth/save.py b/unsloth/save.py index 519d8e77d8..b4e17966fa 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -135,6 +135,17 @@ def unsloth_save_model( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): + if save_method == "merged_4bit": + raise RuntimeError( + "Unsloth: Merging into 4bit will cause your model to lose accuracy if you plan\n"\ + "to merge to GGUF or others later on. I suggest you to do this as a final step\n"\ + "if you're planning to do multiple saves.\n"\ + "If you are certain, change `save_method` to `merged_4bit_forced`." + ) + elif save_method == "merged_4bit_forced": + save_method = "merged_4bit" + pass + save_pretrained_settings = dict(locals()) for deletion in ("model", "tokenizer", "save_method", "temporary_location", "maximum_memory_usage"): del save_pretrained_settings[deletion] @@ -457,6 +468,8 @@ def install_llama_cpp_clone_non_blocking(): def install_llama_cpp_make_non_blocking(): env = { **os.environ, "LLAMA_CUBLAS": "1", } n_jobs = max(int(psutil.cpu_count()*1.5), 1) + # Force make clean + os.system("make clean -C llama.cpp") full_command = ["make", "-j", str(n_jobs), "-C", "llama.cpp"] run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) return run_installer @@ -487,8 +500,8 @@ def install_llama_cpp_blocking(): def save_to_gguf( - model_directory : str = "unsloth_finetuned_model", - quantization_method : str = "fast_quantized", + model_directory : str = "unsloth_finetuned_model", + quantization_method : str = "fast_quantized", _run_installer = None, # Non blocking install of llama.cpp ): from transformers.models.llama.modeling_llama import logger @@ -566,7 +579,7 @@ def unsloth_save_pretrained_merged( self, save_directory : Union[str, os.PathLike], tokenizer = None, - save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, From 87a7ef1049f6fca409a0673f51f4758e0aff248d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jan 2024 04:47:54 +1100 Subject: [PATCH 0125/1088] More bug fixes (#133) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py --- unsloth/kernels/utils.py | 5 ++++- unsloth/models/llama.py | 2 +- unsloth/save.py | 24 ++++++++++++++++-------- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 7c693a6c4e..e22e3a11b9 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -179,7 +179,10 @@ def fast_gemv(X, W, quant_state, out = None, out_W = None): def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) - out = fast_gemv(X, W, W_quant, out = out) + if W_quant is None: + out = torch.matmul(X, W.t()) + else: + out = fast_gemv(X, W, W_quant, out = out) if lora_A is not None: # Save LoRAs for inference to stop data movement costs diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index da99ea4b68..bc7dc69261 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -489,7 +489,7 @@ def LlamaModel_fast_forward( # Ignore attention_mask if attention_mask is None: padding_mask = None - elif True:#self.training: + elif self.training: attention_mask = None padding_mask = None else: diff --git a/unsloth/save.py b/unsloth/save.py index b4e17966fa..6c44d23cb0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -258,11 +258,19 @@ def unsloth_save_model( "private" : save_pretrained_settings["private"], "token" : save_pretrained_settings["token"], } - + + # Check if PEFT Model or not - if yes, 3 levels. If not 2 levels. + from peft import PeftModelForCausalLM + if isinstance(model, PeftModelForCausalLM): + internal_model = model.model + else: + internal_model = model + pass + + # Cannot be converted properly! if (save_method == "merged_4bit") or (save_method == "lora") or ( not hasattr(model, "model") or \ - not hasattr(model.model, "model") or \ - not hasattr(model.model.model, "layers") + not hasattr(internal_model.model, "layers") ): # Do general saving @@ -343,12 +351,12 @@ def unsloth_save_model( # HF also uses a OrderedDict from collections import OrderedDict state_dict = OrderedDict() - state_dict["model.embed_tokens.weight"] = model.model.model.embed_tokens.weight.data + state_dict["model.embed_tokens.weight"] = internal_model.model.embed_tokens.weight.data max_vram = int(torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage) from tqdm import tqdm as ProgressBar - for j, layer in enumerate(ProgressBar(model.model.model.layers)): + for j, layer in enumerate(ProgressBar(internal_model.model.layers)): for item in LLAMA_WEIGHTS: proj = eval(f"layer.{item}") name = f"model.layers.{j}.{item}.weight" @@ -375,8 +383,8 @@ def unsloth_save_model( pass pass - state_dict["model.norm.weight"] = model.model.model.norm.weight.data - state_dict["lm_head.weight"] = model.model.lm_head.weight.data + state_dict["model.norm.weight"] = internal_model.model.norm.weight.data + state_dict["lm_head.weight"] = internal_model.lm_head.weight.data # All tensors MUST be type torch.Tensor and not torch.nn.parameter.Parameter for key, value in state_dict.items(): @@ -418,7 +426,7 @@ def unsloth_save_model( model.config = new_config # Save! - model.model.save_pretrained(**save_pretrained_settings) + internal_model.save_pretrained(**save_pretrained_settings) # Revert config back original_model = model From 89daa0efcc38c7690abbb8170b5d9f3d364796ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jan 2024 04:50:22 +1100 Subject: [PATCH 0126/1088] Inference bug fix (#134) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bc7dc69261..d5dd7833d5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -234,7 +234,7 @@ def LlamaAttention_fast_forward( bsz, q_len, _ = hidden_states.size() # Check for inference - if past_key_value is not None and q_len == 1 and bsz == 1: + if False: #past_key_value is not None and q_len == 1 and bsz == 1: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -350,7 +350,7 @@ def LlamaDecoderLayer_fast_forward( past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ bsz, q_len, hd = hidden_states.size() - if (past_key_value is not None and q_len == 1 and bsz == 1): + if False: #(past_key_value is not None and q_len == 1 and bsz == 1): # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) From cd32ba76b71adf3317ede9de7d1cf6f30ad3bf0d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 28 Jan 2024 04:20:06 +1100 Subject: [PATCH 0127/1088] Fix bugs + more accurate Swiglu (#137) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask --- pyproject.toml | 4 ++-- unsloth/kernels/fast_lora.py | 43 ++++++++++++++++++++++-------------- unsloth/kernels/swiglu.py | 43 +++++++++++++++++++++++------------- unsloth/save.py | 12 ++++++---- 4 files changed, 65 insertions(+), 37 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 506e9a7552..c7368b276a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,9 +36,9 @@ huggingface = [ "transformers>=4.37.0", "datasets", "sentencepiece", - "accelerate", + "accelerate>=0.26.1", "trl>=0.7.9", - "peft", + "peft>=0.7.1", "tqdm", "psutil", ] diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index b487ff95e2..b3a1098355 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -90,6 +90,8 @@ def forward(ctx, X : torch.Tensor, e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) g = matmul_lora(X, upW, upW_quant, upA, upB, upS) + # f = torch.nn.functional.silu(e) + # h = f * g h = swiglu_fg_kernel(e, g) i = matmul_lora(h, downW, downW_quant, downA, downB, downS) @@ -103,6 +105,7 @@ def forward(ctx, X : torch.Tensor, return i pass + @staticmethod @torch.cuda.amp.custom_bwd def backward(ctx, dY : torch.Tensor): @@ -121,11 +124,16 @@ def backward(ctx, dY : torch.Tensor): g = g .view(-1, g .shape[-1]) dtype = X.dtype - # DW_f = (D @ W.T * f) - # DW_dfg = (D @ W.T * df * g) DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) + # e = e.float() + # se = 1.0 / (1.0 + torch.exp(-e)) + # f = (se * e).to(dtype) + # h = f * g + # df = DW * f + # dg = DW * g + # de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype) DW, e, g = swiglu_DWf_DW_dfg_kernel(DW, e, g) - h, DW_f, DW_dfg = DW, e, g + h, df, de = DW, e, g # Down projection LoRA weights d_downA = h.t() @ (dY @ downB.t()) @@ -134,31 +142,29 @@ def backward(ctx, dY : torch.Tensor): d_downB *= downS # Up projection LoRA weights - d_upA = X.t() @ (DW_f @ upB.t()) - d_upB = (upA.t() @ X.t()) @ DW_f + d_upA = X.t() @ (df @ upB.t()) + d_upB = (upA.t() @ X.t()) @ df d_upA *= upS d_upB *= upS # Gate projection LoRA weights - d_gateA = X.t() @ (DW_dfg @ gateB.t()) - d_gateB = (gateA.t() @ X.t()) @ DW_dfg + d_gateA = X.t() @ (de @ gateB.t()) + d_gateB = (gateA.t() @ X.t()) @ de d_gateA *= gateS d_gateB *= gateS - # Final derivatives to backpropagate backwards. - # See our blogpost for more details. - # (D @ W.T * f) @ U.T + # dX = matmul_lora(df, upW.t(), upW_quant, upB, upA, upS) + # dX += matmul_lora(de, gateW.t(), gateW_quant, gateB, gateA, gateS) + upW = fast_dequantize(upW.t(), upW_quant) - # (D @ W.T * f) @ (U.T + B.T @ A.T) - dX = torch.matmul(DW_f, upW.t(), out = X) + dX = torch.matmul(df, upW.t(), out = X) del upW - dX += DW_f @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) + dX += df @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) - # And add the derivative for the gate projection gateW = fast_dequantize(gateW.t(), gateW_quant) - dX += DW_dfg @ gateW.t() + dX += de @ gateW.t() del gateW - dX += DW_dfg @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) + dX += de @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) # gateW, gateW_quant, gateA, gateB, gateS, # upW, upW_quant, upA, upB, upS, @@ -172,6 +178,11 @@ def backward(ctx, dY : torch.Tensor): def apply_lora_mlp(self, X): + # gate = self.gate_proj(X) + # up = self. up_proj(X) + # h = torch.nn.functional.silu(gate) * up + # down = self.down_proj(h) + # return down gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index 4e9b7ba2ae..ff6b162680 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -28,7 +28,7 @@ def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) # f = e * sigmoid(e) - f_row = e_row / (1 + tl.exp(-e_row)) + f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row)) f_row = f_row.to(g_row.dtype) # Exact copy from HF # h = f * g h_row = f_row * g_row @@ -50,30 +50,43 @@ def swiglu_fg_kernel(e, g): @triton.jit def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): + """ + e = e.float() + se = 1.0 / (1.0 + torch.exp(-e)) + f = (se * e).to(dtype) + h = f * g + df = DW * f + dg = DW * g + de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype) + """ block_idx = tl.program_id(0) offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32) - e_row = tl.load(e + offsets, mask = mask, other = 0)#.to(tl.float32) + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) - # f = e * sigmoid(e) - se_row = 1 / (1 + tl.exp(-e_row.to(tl.float32))) - se_row = se_row.to(e_row.dtype) # Exact copy from HF - # f = e * se - f_row = e_row * se_row + # e = e.float() + # se = 1.0 / (1.0 + torch.exp(-e)) + se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row)) + # f = (se * e).to(dtype) + f_row = se_row * e_row + f_row = f_row.to(DW_row.dtype) # h = f * g - h_row = f_row * g_row - # DW_f = DW * f - DWf_row = DW_row * f_row - # DW_dfg = DW * (se*(g - h) + h) - DW_dfg_row = DW_row * (se_row*(g_row - h_row) + h_row) + h_row = f_row * g_row + # df = DW * f + df_row = DW_row * f_row + # dg = DW * g + dg_row = DW_row * g_row + # de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype) + de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row)) + de_row = de_row.to(DW_row.dtype) # Store derivatives in buffers - tl.store(DW + offsets, h_row, mask = mask) - tl.store(e + offsets, DWf_row, mask = mask) - tl.store(g + offsets, DW_dfg_row, mask = mask) + tl.store(DW + offsets, h_row, mask = mask) # h = f * g + tl.store(e + offsets, df_row, mask = mask) # df = DW * f + tl.store(g + offsets, de_row, mask = mask) # de pass diff --git a/unsloth/save.py b/unsloth/save.py index 6c44d23cb0..471897c0ad 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -14,6 +14,7 @@ from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit from peft.tuners.lora import Linear4bit as Peft_Linear4bit +from peft.tuners.lora import Linear as Peft_Linear from typing import Optional, Callable, Union, List import torch import os @@ -72,11 +73,15 @@ def print_quantization_methods(): def _merge_lora(layer, name): - if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit)): + + if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)): # Is LoRA so we need to merge! W, quant_state, A, B, s = get_lora_parameters(layer) - dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] - W = fast_dequantize(W, quant_state).to(torch.float32).t() + if quant_state is not None: + dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] + W = fast_dequantize(W, quant_state) + pass + W = W.to(torch.float32).t() if A is not None: sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) @@ -84,7 +89,6 @@ def _merge_lora(layer, name): if not torch.isfinite(W).all(): raise ValueError(f"Unsloth: Merge failed.\n{name} has some elements = infinity.") pass - W = W.t().to(dtype) else: W = layer.weight From 1ecc0185a5759c7a0c95dfc96aceea5023cebdfc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 28 Jan 2024 04:30:29 +1100 Subject: [PATCH 0128/1088] 1 more bug (#138) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py --- unsloth/save.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 471897c0ad..744ec48327 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -80,7 +80,8 @@ def _merge_lora(layer, name): if quant_state is not None: dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] W = fast_dequantize(W, quant_state) - pass + else: + dtype = W.dtype W = W.to(torch.float32).t() if A is not None: From 8faf469f028a05852b2dc29ec8df1f36998fab33 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 29 Jan 2024 02:52:39 +1100 Subject: [PATCH 0129/1088] Fix saving issues (#139) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print --- unsloth/models/dpo.py | 13 ++-- unsloth/models/llama.py | 15 ++++- unsloth/models/mistral.py | 3 +- unsloth/save.py | 130 +++++++++++++++++++++++++------------- 4 files changed, 111 insertions(+), 50 deletions(-) diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 92fde81fd0..3ae4d636f4 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -101,10 +101,13 @@ def NotebookTrainingTracker_write_line(self, values): def PatchDPOTrainer(): - # Patch DPO notebook printing - NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line - from transformers.trainer import DEFAULT_PROGRESS_CALLBACK - DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin - DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log + from transformers.trainer import is_in_notebook + if is_in_notebook(): + # Patch DPO notebook printing + NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line + from transformers.trainer import DEFAULT_PROGRESS_CALLBACK + DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin + DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log + pass pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d5dd7833d5..fcaa2a19ed 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -486,6 +486,15 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + # Fix up attention mask by setting elements to 0 + # Specifically for DPO + if self._has_no_labels and attention_mask is not None: + inputs_requires_grad = inputs_embeds.requires_grad + if inputs_requires_grad: inputs_embeds.requires_grad_(False) + inputs_embeds *= attention_mask.unsqueeze(0).transpose(0, 1).transpose(1, 2) + if inputs_requires_grad: inputs_embeds.requires_grad_(True) + pass + # Ignore attention_mask if attention_mask is None: padding_mask = None @@ -617,6 +626,7 @@ def LlamaForCausalLM_fast_forward( return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + self.model._has_no_labels = labels is None outputs = self.model( input_ids=input_ids, causal_mask=causal_mask, @@ -726,7 +736,7 @@ def from_pretrained( f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' - logger.warning_once(statistics) + print(statistics) FastLlamaModel.pre_patch() if dtype is None: @@ -826,6 +836,9 @@ def from_pretrained( # Log Unsloth version for future fastpaths for inference model.config.update({"unsloth_version" : __version__}) + # Add save modules + patch_saving_functions(model) + return model, tokenizer pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 2410572174..2941fb32fb 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -195,6 +195,7 @@ def MistralForCausalLM_fast_forward( return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + self.model._has_no_labels = labels is None outputs = self.model( input_ids=input_ids, causal_mask=causal_mask, @@ -282,7 +283,7 @@ def from_pretrained( f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' - logger.warning_once(statistics) + print(statistics) FastMistralModel.pre_patch() if dtype is None: diff --git a/unsloth/save.py b/unsloth/save.py index 744ec48327..baa8f3f5ca 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -278,7 +278,7 @@ def unsloth_save_model( not hasattr(internal_model.model, "layers") ): # Do general saving - + print(type(model)) # Edit save_pretrained_settings # [TODO] _create_repo has errors due to **kwargs getting accepted for deletion in \ @@ -483,7 +483,7 @@ def install_llama_cpp_make_non_blocking(): n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean os.system("make clean -C llama.cpp") - full_command = ["make", "-j", str(n_jobs), "-C", "llama.cpp"] + full_command = ["make", "all", "-j", str(n_jobs), "-C", "llama.cpp"] run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) return run_installer pass @@ -499,7 +499,7 @@ def install_python_non_blocking(packages = []): def install_llama_cpp_blocking(): commands = [ "git clone https://github.com/ggerganov/llama.cpp", - f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j {psutil.cpu_count()*2}", + f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j {psutil.cpu_count()*2}", "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return @@ -515,6 +515,7 @@ def install_llama_cpp_blocking(): def save_to_gguf( model_directory : str = "unsloth_finetuned_model", quantization_method : str = "fast_quantized", + first_conversion : str = "f16", _run_installer = None, # Non blocking install of llama.cpp ): from transformers.models.llama.modeling_llama import logger @@ -539,6 +540,16 @@ def save_to_gguf( f' "-____-" In total, you will have to wait around 26 minutes.\n' print(print_info) + # Check first_conversion format + if first_conversion == "f16" : pass + elif first_conversion == "f32" : pass + elif first_conversion == "q8_0": pass + else: + raise RuntimeError( + f"Unsloth: `first_conversion` can only be one of ['f16', 'f32', 'q8_0'] and not `{first_conversion}`." + ) + pass + print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") if _run_installer is not None: _run_installer.wait() @@ -546,11 +557,19 @@ def save_to_gguf( install_llama_cpp_blocking() pass - print("Unsloth: [1] Converting HF into GGUF format. This will take 3 minutes...") - first_conversion = "f16" if quantization_method == "f32": first_conversion = "f32" elif quantization_method == "f16": first_conversion = "f16" elif quantization_method == "q8_0": first_conversion = "q8_0" + else: + # Quantized models must have f16 as the default argument + if first_conversion == "f32" : pass + elif first_conversion == "f16" : pass + elif first_conversion == "q8_0": + logger.warning_once("Unsloth: We must use f16 for quantization first.") + first_conversion = "f16" + pass + pass + print(f"Unsloth: [1] Converting HF into {first_conversion} GGUF format. This will take 3 minutes...") n_cpus = psutil.cpu_count()*2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model @@ -566,6 +585,17 @@ def save_to_gguf( print(line.decode("utf-8"), flush = True, end = "") pass + # Check if quantization succeeded! + if not os.path.isfile(final_location): + raise RuntimeError( + "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ + "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ + "You must run this in the same folder as you're saving your model.\n"\ + "git clone https://github.com/ggerganov/llama.cpp\n"\ + "cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j\n"\ + "Once that's done, redo the quantization." + ) + pass print(f"Unsloth: Conversion completed! Output location: {final_location}") if quantization_method != first_conversion: @@ -581,6 +611,19 @@ def save_to_gguf( for line in sp.stderr: print(line.decode("utf-8"), flush = True, end = "") pass + + # Check if quantization succeeded! + if not os.path.isfile(final_location): + raise RuntimeError( + "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ + "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ + "You must run this in the same folder as you're saving your model.\n"\ + "git clone https://github.com/ggerganov/llama.cpp\n"\ + "cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j\n"\ + "Once that's done, redo the quantization." + ) + pass + print(f"Unsloth: Conversion completed! Output location: {final_location}") pass @@ -765,6 +808,7 @@ def unsloth_save_pretrained_gguf( save_directory : Union[str, os.PathLike], tokenizer = None, quantization_method : str = "fast_quantized", + first_conversion : str = "f16", push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, is_main_process : bool = True, @@ -813,6 +857,7 @@ def unsloth_save_pretrained_gguf( arguments["save_method"] = "merged_16bit" # Must be 16bit del arguments["self"] del arguments["quantization_method"] + del arguments["first_conversion"] # Non blocking install GGUF first if not os.path.exists("llama.cpp"): @@ -840,7 +885,7 @@ def unsloth_save_pretrained_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + file_location = save_to_gguf(new_save_directory, quantization_method, first_conversion, makefile) if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") @@ -861,6 +906,7 @@ def unsloth_push_to_hub_gguf( repo_id : str, tokenizer = None, quantization_method : str = "fast_quantized", + first_conversion : str = "f16", use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = None, private : Optional[bool] = None, @@ -911,6 +957,7 @@ def unsloth_push_to_hub_gguf( del arguments["self"] del arguments["repo_id"] del arguments["quantization_method"] + del arguments["first_conversion"] # Non blocking install GGUF first if not os.path.exists("llama.cpp"): @@ -938,7 +985,7 @@ def unsloth_push_to_hub_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization_method, makefile) + file_location = save_to_gguf(new_save_directory, quantization_method, first_conversion, makefile) print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( @@ -960,6 +1007,23 @@ def patch_saving_functions(model): if hasattr(model, "_original_push_to_hub"): return + # First check if this has already been called, and revert it + original_model = model + while True: + if hasattr(original_model, "_original_push_to_hub"): + original_model.push_to_hub = original_model._original_push_to_hub + del original_model._original_push_to_hub + if hasattr(original_model, "push_to_hub_merged"): del original_model.push_to_hub_merged + if hasattr(original_model, "save_pretrained_merged"): del original_model.save_pretrained_merged + if hasattr(original_model, "push_to_hub_gguf"): del original_model.push_to_hub_gguf + if hasattr(original_model, "save_pretrained_gguf"): del original_model.save_pretrained_gguf + pass + + if hasattr(original_model, "model"): original_model = original_model.model + else: break + pass + + # And now re add our saving methods! original_push_to_hub = model.push_to_hub signature = str(inspect.signature(original_push_to_hub)).replace("NoneType", "None") signature = signature[1:] @@ -988,49 +1052,29 @@ def patch_saving_functions(model): pass ''' exec(push_to_hub_text, globals()) - model.push_to_hub = types.MethodType(unsloth_push_to_hub, model) - if hasattr(model, "add_model_tags"): - model.add_model_tags(["unsloth",]) + original_model = model + while True: + + if not hasattr(original_model, "_original_push_to_hub"): + original_model._original_push_to_hub = original_model.push_to_hub + original_model.push_to_hub = types.MethodType(unsloth_push_to_hub, original_model) + + if hasattr(original_model, "add_model_tags"): + original_model.add_model_tags(["unsloth",]) + pass + if hasattr(original_model, "model"): original_model = original_model.model + else: break + pass + + # Add saving methods to top level model if hasattr(model, "config"): # Counteract tokenizers model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) - else: - model.push_to_hub_merged = model.push_to_hub - model.save_pretrained_merged = model.save_pretrained - model.push_to_hub_gguf = model.push_to_hub - model.save_pretrained_gguf = model.save_pretrained - pass - - original_model = model - while hasattr(original_model, "model"): - original_model = original_model.model - if hasattr(original_model, "_original_push_to_hub"): continue - - original_model._original_push_to_hub = original_model.push_to_hub - original_model.push_to_hub = types.MethodType(unsloth_push_to_hub, original_model) - - if hasattr(original_model, "add_model_tags"): - original_model.add_model_tags(["unsloth",]) - - if hasattr(original_model, "config"): - # Counteract tokenizers - original_model.push_to_hub_merged = \ - types.MethodType(unsloth_push_to_hub_merged, original_model) - - original_model.save_pretrained_merged = \ - types.MethodType(unsloth_save_pretrained_merged, original_model) - - original_model.push_to_hub_gguf = \ - types.MethodType(unsloth_push_to_hub_gguf, original_model) - - original_model.save_pretrained_gguf = \ - types.MethodType(unsloth_save_pretrained_gguf, original_model) - pass pass - return + return model pass From 206a9b65f090bd71ccaad7dd88b67ba2bfde0b58 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 29 Jan 2024 03:45:07 +1100 Subject: [PATCH 0130/1088] Nightly (#140) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving --- unsloth/models/mistral.py | 3 +++ unsloth/save.py | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 2941fb32fb..91a2dd7e35 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -375,6 +375,9 @@ def from_pretrained( # Log Unsloth version for future fastpaths for inference model.config.update({"unsloth_version" : __version__}) + + # Add save modules + patch_saving_functions(model) return model, tokenizer pass diff --git a/unsloth/save.py b/unsloth/save.py index baa8f3f5ca..4cda8b9651 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -278,7 +278,6 @@ def unsloth_save_model( not hasattr(internal_model.model, "layers") ): # Do general saving - print(type(model)) # Edit save_pretrained_settings # [TODO] _create_repo has errors due to **kwargs getting accepted for deletion in \ From 05624642802c7f90dcc7aeea0e1c8d447cde006e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 29 Jan 2024 17:49:54 +1100 Subject: [PATCH 0131/1088] Fix inference attention mask (#142) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py --- unsloth/models/llama.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index fcaa2a19ed..36bb58f3d9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -488,7 +488,10 @@ def LlamaModel_fast_forward( # Fix up attention mask by setting elements to 0 # Specifically for DPO - if self._has_no_labels and attention_mask is not None: + if self._has_no_labels and attention_mask is not None and \ + attention_mask.shape[1] == seq_length: + # Careful for inference the attention_mask is size (1, kv_seq_len) + # Whilst the input_embeds is size (1, 1, 4096) inputs_requires_grad = inputs_embeds.requires_grad if inputs_requires_grad: inputs_embeds.requires_grad_(False) inputs_embeds *= attention_mask.unsqueeze(0).transpose(0, 1).transpose(1, 2) From 051a73b0e63d3ae3acd7c4d962349280f69bbdb0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jan 2024 04:03:37 +1100 Subject: [PATCH 0132/1088] Hotfix - fix inference (#146) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value --- unsloth/kernels/__init__.py | 2 +- unsloth/kernels/rope_embedding.py | 12 +-- unsloth/kernels/utils.py | 60 ++++++++++----- unsloth/models/llama.py | 121 +++++++++++++++++++++++++----- unsloth/models/mistral.py | 10 ++- 5 files changed, 157 insertions(+), 48 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 9b861f4934..f5db8fa890 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -22,4 +22,4 @@ apply_lora_qkv, apply_lora_o, ) -from .utils import fast_dequantize, QUANT_STATE, fast_linear_forward +from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index a9527520ab..d7fca30b0c 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -134,9 +134,9 @@ def forward(ctx, Q, cos, sin, position_ids): half = Q.shape[-1]//2 RH_Q = torch.cat((-Q[..., half:], Q[..., :half]), dim = -1) Q *= cos - Q.addcmul_(RH_Q, sin) - # RH_Q *= sin - # Q += RH_Q + # Q.addcmul_(RH_Q, sin) + RH_Q *= sin + Q += RH_Q ctx.save_for_backward(cos, sin) return Q pass @@ -148,9 +148,9 @@ def backward(ctx, dY): half = dY.shape[-1]//2 RH_dY = torch.cat((dY[..., half:], -dY[..., :half]), dim = -1) dY *= cos - dY.addcmul_(RH_dY, sin) - # RH_dY *= sin - # dY += RH_dY + # dY.addcmul_(RH_dY, sin) + RH_dY *= sin + dY += RH_dY return dY, None, None, None pass pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index e22e3a11b9..4f4ce41006 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -114,11 +114,12 @@ def fast_dequantize(W, quant_state = None, out = None): pass -def fast_gemv(X, W, quant_state, out = None, out_W = None): - quant_state = W.quant_state - bsz = 1 - q_len = 1 - hd = X.shape[0] +def fast_gemv(X, W, quant_state, out = None): + if quant_state is None: return torch.matmul(X, W, out = out) + # For fast X @ W where seq_len == 1 + # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 + bsz, q_len, hd = X.shape + assert(q_len == 1) if type(quant_state) is not list: # https://github.com/TimDettmers/bitsandbytes/pull/763/files @@ -137,9 +138,14 @@ def fast_gemv(X, W, quant_state, out = None, out_W = None): offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass + assert(dtype == X.dtype) bout = shape[0] - if out is None: out = torch.empty(bout, dtype = dtype, device = "cuda") - else: assert(out.shape[0] == bout) + + if out is None: + out = torch.empty((bsz, 1, bout,), dtype = dtype, device = "cuda") + else: + assert(out.shape == (bsz, 1, bout,)) + pass n = 1 m = shape[0] @@ -170,30 +176,46 @@ def fast_gemv(X, W, quant_state, out = None, out_W = None): ptr_stats = get_ptr(stats) blocksize = ctypes.c_int32(blocksize) - fx(m, n, k, get_ptr(X), ptr_W, ptr_absmax, ptr_stats, get_ptr(out), - lda, ldb, ldc, blocksize) + for row in range(bsz): + fx(m, n, k, get_ptr(X[row]), ptr_W, ptr_absmax, ptr_stats, get_ptr(out[row]), + lda, ldb, ldc, blocksize) + pass return out pass def fast_linear_forward(proj, X, temp_lora = None, out = None): + W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) + + bsz, _, in_dim = X.shape + if W_quant is None: out = torch.matmul(X, W.t()) - else: + elif bsz <= 4: + # Only batches of 4 are faster with Gemv out = fast_gemv(X, W, W_quant, out = out) - if lora_A is not None: + else: + W = fast_dequantize(W.t(), W_quant) + out = torch.matmul(X, W, out = out) + pass - # Save LoRAs for inference to stop data movement costs - if not hasattr(lora_A, "_fast_lora"): - dtype = X.dtype - lora_A._fast_lora = lora_A.to(dtype).t() - lora_B._fast_lora = lora_B.to(dtype) + # Add in LoRA weights + if lora_A is not None: + out_dim = out.shape[2] + dtype = X.dtype + if bsz == 1: + out = out.view(out_dim) + temp_lora = torch.mv(lora_A.to(dtype), X.ravel(), out = temp_lora) + out.addmv_(lora_B.to(dtype), temp_lora, alpha = lora_S) + else: + out = out.view(bsz, out_dim) + temp_lora = torch.mm(X.view(bsz, in_dim), lora_A.to(dtype).t(), out = temp_lora) + out.addmm_(temp_lora, lora_B.to(dtype).t(), alpha = lora_S) pass - - temp_lora = torch.matmul(X, lora_A._fast_lora, out = temp_lora) - out.addmv_(lora_B._fast_lora, temp_lora, alpha = lora_S) + out = out.view(bsz, 1, out_dim) pass + return out pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 36bb58f3d9..6029005265 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -69,7 +69,7 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt -def LlamaAttention_fast_forward_inference( +def _LlamaAttention_fast_forward_inference( self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]], @@ -185,11 +185,89 @@ def LlamaAttention_fast_forward_inference( pass +def LlamaAttention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, +): + """ + https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 + Fast inference using KV cache. + QK^T can be computed in 4 chunks + + [Q, q] @ [K, k].T where q, k are the new tokens. + [QK^T, Qk^T] + [qK^T, qk^T] + + Since the attention mask wipes Qk^T, we just get + [QK^T, 0] + [qK^T, qk^T] + + Since softmax is row-wise, we get + softmax([QK^T, 0]) + softmax([qK^T, qk^T]) + + We then multiply by [V] + [v] + softmax([QK^T, 0]) [softmax(QK^T)V] * + softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]] + + But notice * [softmax(QK^T)V] is just the last attention. + We just need to compute the last final row. + + This means we can pass in a row of Q, but we need to + remember K and V, which are called the KV cache. + """ + Xn = hidden_states + bsz, _, _ = hidden_states.size() + K1, V1 = past_key_value + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Qn = self.q_proj(Xn) + Kn = self.k_proj(Xn) + Vn = self.v_proj(Xn) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K1.shape[-2] + 1 + cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + + # New KV cache + Kn = torch.cat([K1, Kn], dim = 2) + Vn = torch.cat([V1, Vn], dim = 2) + + # Grouped query attention + if n_groups != 1: + _, _, cached_len, _ = Kn.shape + Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vnn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) + Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) + else: + Knn, Vnn = Kn, Vn + + # Attention + A = torch.matmul(Qn, Knn.transpose(2, 3)) + A *= 1.0 / (self.head_dim**0.5) + A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) + A = torch.matmul(A, Vnn) + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, self.hidden_size) + A = original_apply_o(self, A) + return A, (Kn, Vn) +pass + + torch_silu = torch.nn.functional.silu def fast_mlp_inference(self, X): - hidden_size = self.hidden_size - X = X.view(hidden_size) - # gate = self.gate_proj(X) # up = self.up_proj(X) gate = fast_linear_forward(self.gate_proj, X) @@ -198,20 +276,18 @@ def fast_mlp_inference(self, X): gate *= up # X = self.down_proj(gate) - down = fast_linear_forward(self.down_proj, gate, out = up[:hidden_size]) - X = down.view(1, 1, hidden_size) - - return X + down = fast_linear_forward(self.down_proj, gate) + return down pass def fast_rms_layernorm_inference(self, X): old_dtype = X.dtype - X = X.to(torch.float32) - variance = X.square().mean(-1, keepdim = True) + XX = X.to(torch.float32) + variance = XX.square().mean(-1, keepdim = True) variance += self.variance_epsilon - X *= variance.rsqrt_() - X = X.to(old_dtype) + XX *= variance.rsqrt_() + X = XX.to(old_dtype) # Must preserve due to residual X *= self.weight return X pass @@ -234,7 +310,7 @@ def LlamaAttention_fast_forward( bsz, q_len, _ = hidden_states.size() # Check for inference - if False: #past_key_value is not None and q_len == 1 and bsz == 1: + if past_key_value is not None: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -350,7 +426,7 @@ def LlamaDecoderLayer_fast_forward( past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ bsz, q_len, hd = hidden_states.size() - if False: #(past_key_value is not None and q_len == 1 and bsz == 1): + if past_key_value is not None: # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) @@ -488,8 +564,7 @@ def LlamaModel_fast_forward( # Fix up attention mask by setting elements to 0 # Specifically for DPO - if self._has_no_labels and attention_mask is not None and \ - attention_mask.shape[1] == seq_length: + if self._has_no_labels and (attention_mask is not None) and (past_key_values is None): # Careful for inference the attention_mask is size (1, kv_seq_len) # Whilst the input_embeds is size (1, 1, 4096) inputs_requires_grad = inputs_embeds.requires_grad @@ -501,7 +576,7 @@ def LlamaModel_fast_forward( # Ignore attention_mask if attention_mask is None: padding_mask = None - elif self.training: + elif False: attention_mask = None padding_mask = None else: @@ -522,7 +597,7 @@ def LlamaModel_fast_forward( hidden_states = inputs_embeds - if self.gradient_checkpointing and self.training: + if past_key_values is None and self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "Unsloth: `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`" @@ -581,7 +656,7 @@ def custom_forward(*inputs): pass bsz, q_len, hd = hidden_states.size() - if (past_key_value is not None and q_len == 1): + if past_key_values is not None: hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) else: hidden_states = fast_rms_layernorm(self.norm, hidden_states) @@ -644,7 +719,13 @@ def LlamaForCausalLM_fast_forward( ) hidden_states = outputs[0] - logits = self.lm_head(hidden_states) + bsz, q_len, hd = hidden_states.shape + if bsz == 1 and q_len == 1: + logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + logits = logits.unsqueeze(0).unsqueeze(0) + else: + logits = self.lm_head(hidden_states) + pass loss = None if labels is not None: diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 91a2dd7e35..42f26b921a 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -49,7 +49,7 @@ def MistralAttention_fast_forward( bsz, q_len, _ = hidden_states.size() # Check for inference - if past_key_value is not None and q_len == 1 and bsz == 1: + if past_key_value is not None: A, past_key_value = LlamaAttention_fast_forward_inference( self, hidden_states, @@ -210,7 +210,13 @@ def MistralForCausalLM_fast_forward( ) hidden_states = outputs[0] - logits = self.lm_head(hidden_states) + bsz, q_len, hd = hidden_states.shape + if bsz == 1 and q_len == 1: + logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + logits = logits.unsqueeze(0).unsqueeze(0) + else: + logits = self.lm_head(hidden_states) + pass loss = None if labels is not None: From 35f2ab4a8b4deecbbbe9fbd95f4efde8694233db Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 4 Feb 2024 17:35:56 +1100 Subject: [PATCH 0133/1088] 2x faster inference (#151) * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py --- unsloth/kernels/rope_embedding.py | 12 +- unsloth/kernels/utils.py | 26 ++- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 365 ++++++++++++++---------------- unsloth/models/mapper.py | 32 ++- unsloth/models/mistral.py | 67 +++--- unsloth/save.py | 14 +- 7 files changed, 271 insertions(+), 247 deletions(-) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index d7fca30b0c..a9527520ab 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -134,9 +134,9 @@ def forward(ctx, Q, cos, sin, position_ids): half = Q.shape[-1]//2 RH_Q = torch.cat((-Q[..., half:], Q[..., :half]), dim = -1) Q *= cos - # Q.addcmul_(RH_Q, sin) - RH_Q *= sin - Q += RH_Q + Q.addcmul_(RH_Q, sin) + # RH_Q *= sin + # Q += RH_Q ctx.save_for_backward(cos, sin) return Q pass @@ -148,9 +148,9 @@ def backward(ctx, dY): half = dY.shape[-1]//2 RH_dY = torch.cat((dY[..., half:], -dY[..., :half]), dim = -1) dY *= cos - # dY.addcmul_(RH_dY, sin) - RH_dY *= sin - dY += RH_dY + dY.addcmul_(RH_dY, sin) + # RH_dY *= sin + # dY += RH_dY return dY, None, None, None pass pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 4f4ce41006..2ed2a68558 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -119,7 +119,7 @@ def fast_gemv(X, W, quant_state, out = None): # For fast X @ W where seq_len == 1 # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 bsz, q_len, hd = X.shape - assert(q_len == 1) + # assert(q_len == 1) if type(quant_state) is not list: # https://github.com/TimDettmers/bitsandbytes/pull/763/files @@ -138,7 +138,7 @@ def fast_gemv(X, W, quant_state, out = None): offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass - assert(dtype == X.dtype) + # assert(dtype == X.dtype) bout = shape[0] if out is None: @@ -152,7 +152,7 @@ def fast_gemv(X, W, quant_state, out = None): k = shape[1] lda = shape[0] ldc = shape[0] - ldb = (X.shape[-1]+1)//2 + ldb = (hd+1)//2 m = ctypes.c_int32(m) n = ctypes.c_int32(n) k = ctypes.c_int32(k) @@ -192,9 +192,9 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): bsz, _, in_dim = X.shape if W_quant is None: - out = torch.matmul(X, W.t()) - elif bsz <= 4: - # Only batches of 4 are faster with Gemv + out = torch.matmul(X, W.t(), out = out) + elif bsz <= 2: + # Only batches of 2 are faster with Gemv out = fast_gemv(X, W, W_quant, out = out) else: W = fast_dequantize(W.t(), W_quant) @@ -205,14 +205,20 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): if lora_A is not None: out_dim = out.shape[2] dtype = X.dtype + + if not hasattr(lora_A, "_fast_lora"): + lora_A._fast_lora = lora_A.to(dtype) + lora_B._fast_lora = lora_B.to(dtype) + pass + if bsz == 1: out = out.view(out_dim) - temp_lora = torch.mv(lora_A.to(dtype), X.ravel(), out = temp_lora) - out.addmv_(lora_B.to(dtype), temp_lora, alpha = lora_S) + temp_lora = torch.mv(lora_A._fast_lora, X.ravel(), out = temp_lora) + out.addmv_(lora_B._fast_lora, temp_lora, alpha = lora_S) else: out = out.view(bsz, out_dim) - temp_lora = torch.mm(X.view(bsz, in_dim), lora_A.to(dtype).t(), out = temp_lora) - out.addmm_(temp_lora, lora_B.to(dtype).t(), alpha = lora_S) + temp_lora = torch.mm(X.view(bsz, in_dim), lora_A._fast_lora.t(), out = temp_lora) + out.addmm_(temp_lora, lora_B._fast_lora.t(), alpha = lora_S) pass out = out.view(bsz, 1, out_dim) pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5f35851443..617b8509d9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -23,7 +23,7 @@ platform_system = platform_system() import math -__version__ = "2024.1" +__version__ = "2024.2" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6029005265..40e5e56e19 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -20,6 +20,9 @@ BaseModelOutputWithPast, CausalLMOutputWithPast, ) +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) from ..kernels import * from ._utils import * from ._utils import __version__ @@ -69,11 +72,14 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt -def _LlamaAttention_fast_forward_inference( +KV_CACHE_INCREMENT = 128 # KV Cache update size + +def LlamaAttention_fast_forward_inference( self, hidden_states: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]], position_ids, + do_prefill = False, ): """ https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 @@ -103,146 +109,67 @@ def _LlamaAttention_fast_forward_inference( This means we can pass in a row of Q, but we need to remember K and V, which are called the KV cache. """ + Xn = hidden_states + bsz, _, hd = hidden_states.size() + K1, V1 = past_key_value + dtype = Xn.dtype + n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads head_dim = self.head_dim # assert(n_kv_heads * n_groups == n_heads) - - Xn = hidden_states.view(self.hidden_size) - K1, V1 = past_key_value seq_len = K1.shape[-2] - K1 = K1.view(n_kv_heads, seq_len, head_dim) - V1 = V1.view(n_kv_heads, seq_len, head_dim) + kv_seq_len = seq_len + 1 + + # Prefill phase + # if not hasattr(self, "paged_attention"): + if do_prefill: + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda") + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) + self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) + self.temp_QA = torch.empty((2, bsz, 1, hd), dtype = dtype, device = "cuda") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda") + self.scalar = 1.0 / math_sqrt(self.head_dim) + elif kv_seq_len >= self.paged_attention.shape[0]: + self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.attention.resize_((bsz, n_heads, 1, self.attention.shape[-1]+KV_CACHE_INCREMENT)) + pass + + Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0]) + Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0]) + Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1]) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) - # LoRA or general matrix multiplication - dtype = Xn.dtype - # Qn = self.q_proj(Xn) - # Kn = self.k_proj(Xn) - # Vn = self.v_proj(Xn) - Qn = fast_linear_forward(self.q_proj, Xn) - Kn = fast_linear_forward(self.k_proj, Xn) - Vn = fast_linear_forward(self.v_proj, Xn) - - # Qn = Qn.view(1, 1, n_heads, head_dim).transpose(1, 2) - # Kn = Kn.view(1, 1, n_kv_heads, head_dim).transpose(1, 2) - # Vn = Vn.view(1, 1, n_kv_heads, head_dim).transpose(1, 2) - Qn = Qn.view(n_heads, 1, head_dim) - Kn = Kn.view(n_kv_heads, 1, head_dim) - Vn = Vn.view(n_kv_heads, 1, head_dim) - - # kv_seq_len = K1.shape[-2] + 1 # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) cos = self.rotary_emb.cos_cached[seq_len] sin = self.rotary_emb.sin_cached[seq_len] h = head_dim // 2 - RH_Q = torch.empty((n_heads, 1, head_dim), dtype = dtype, device = "cuda") - RH_Q[:, :, :h] = Qn[:, :, h:]; RH_Q[:, :, h:] = Qn[:, :, :h]; torch.neg(RH_Q[:, :, :h], out = RH_Q[:, :, :h]); + RH_Q = self.RH_Q + RH_Q[:,:,:,:h] = Qn[:,:,:,h:]; RH_Q[:,:,:,h:] = Qn[:,:,:,:h]; torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]); Qn *= cos; Qn.addcmul_(RH_Q, sin); - RH_K = RH_Q[:n_kv_heads, :, :] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda") - RH_K[:, :, :h] = Kn[:, :, h:]; RH_K[:, :, h:] = Kn[:, :, :h]; torch.neg(RH_K[:, :, :h], out = RH_K[:, :, :h]); + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda") + RH_K[:,:,:,:h] = Kn[:,:,:,h:]; RH_K[:,:,:,h:] = Kn[:,:,:,:h]; torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]); Kn *= cos; Kn.addcmul_(RH_K, sin); # New KV cache # Kn = torch.cat([K1, Kn], dim = 2) # Vn = torch.cat([V1, Vn], dim = 2) - Kn = torch.cat([K1, Kn], dim = 1) - Vn = torch.cat([V1, Vn], dim = 1) - - # Grouped query attention - if n_groups != 1: - # _, _, cached_len, _ = Kn.shape - # Knn = Kn[:, :, None, :, :].expand(1, n_kv_heads, n_groups, cached_len, head_dim) - # Vnn = Vn[:, :, None, :, :].expand(1, n_kv_heads, n_groups, cached_len, head_dim) - # Knn = Knn.reshape(1, n_heads, cached_len, head_dim) - # Vnn = Vnn.reshape(1, n_heads, cached_len, head_dim) - new_seq_len = seq_len + 1 - Knn = Kn[:, None, :, :].expand(n_kv_heads, n_groups, new_seq_len, head_dim) - Vnn = Vn[:, None, :, :].expand(n_kv_heads, n_groups, new_seq_len, head_dim) - Knn = Knn.reshape(n_heads, new_seq_len, head_dim) - Vnn = Vnn.reshape(n_heads, new_seq_len, head_dim) - else: - Knn, Vnn = Kn, Vn - - # Attention - # A = torch.matmul(Qn, Knn.transpose(2, 3)) - A = torch.matmul(Qn, Knn.transpose(1, 2)) - A *= 1.0 / math_sqrt(self.head_dim) - A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) - A = torch.matmul(A, Vnn, out = Qn) - # A = A.transpose(1, 2) - A = A.view(self.hidden_size) - - # A = self.o_proj(A) - A = fast_linear_forward(self.o_proj, A) - A = A.reshape(1, 1, self.hidden_size) - - # return A, (Kn, Vn) - return A, (Kn.unsqueeze(0), Vn.unsqueeze(0)) -pass - - -def LlamaAttention_fast_forward_inference( - self, - hidden_states: torch.Tensor, - past_key_value: Optional[Tuple[torch.Tensor]], - position_ids, -): - """ - https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 - Fast inference using KV cache. - QK^T can be computed in 4 chunks - - [Q, q] @ [K, k].T where q, k are the new tokens. - [QK^T, Qk^T] - [qK^T, qk^T] - - Since the attention mask wipes Qk^T, we just get - [QK^T, 0] - [qK^T, qk^T] - - Since softmax is row-wise, we get - softmax([QK^T, 0]) - softmax([qK^T, qk^T]) - - We then multiply by [V] - [v] - softmax([QK^T, 0]) [softmax(QK^T)V] * - softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]] - - But notice * [softmax(QK^T)V] is just the last attention. - We just need to compute the last final row. - - This means we can pass in a row of Q, but we need to - remember K and V, which are called the KV cache. - """ - Xn = hidden_states - bsz, _, _ = hidden_states.size() - K1, V1 = past_key_value - - n_heads = self.num_heads - n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads - head_dim = self.head_dim - assert(n_kv_heads * n_groups == n_heads) - - Qn = self.q_proj(Xn) - Kn = self.k_proj(Xn) - Vn = self.v_proj(Xn) - Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) - Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) - Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) - - kv_seq_len = K1.shape[-2] + 1 - cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) - Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) - - # New KV cache - Kn = torch.cat([K1, Kn], dim = 2) - Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) # Grouped query attention if n_groups != 1: @@ -255,28 +182,31 @@ def LlamaAttention_fast_forward_inference( Knn, Vnn = Kn, Vn # Attention - A = torch.matmul(Qn, Knn.transpose(2, 3)) - A *= 1.0 / (self.head_dim**0.5) - A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) - A = torch.matmul(A, Vnn) + A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:kv_seq_len]) + A *= self.scalar + A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch.matmul(A, Vnn, out = Qn) A = A.transpose(1, 2) A = A.reshape(bsz, 1, self.hidden_size) - A = original_apply_o(self, A) + A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1]) return A, (Kn, Vn) pass -torch_silu = torch.nn.functional.silu def fast_mlp_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) - gate = fast_linear_forward(self.gate_proj, X) - up = fast_linear_forward(self. up_proj, X) - gate = torch_silu(gate, inplace = True) + bsz, _, hd = X.shape + mlp_size = self.config.intermediate_size + temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + + gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) + up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = torch.nn.functional.silu(gate, inplace = True) gate *= up # X = self.down_proj(gate) - down = fast_linear_forward(self.down_proj, gate) + down = fast_linear_forward(self.down_proj, gate, out = up[:,:,:hd]) return down pass @@ -307,19 +237,19 @@ def LlamaAttention_fast_forward( *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - # Check for inference - if past_key_value is not None: - A, past_key_value = LlamaAttention_fast_forward_inference( - self, - hidden_states, - past_key_value, - position_ids, - ) - return A, None, past_key_value + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention pass + bsz, q_len, _ = hidden_states.size() + n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads @@ -351,7 +281,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if (not HAS_FLASH_ATTENTION): + if (not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -373,7 +303,7 @@ def LlamaAttention_fast_forward( A = xformers_attention(Q, K, V, attn_bias = causal_mask) A = A.view(bsz, q_len, n_heads, head_dim) - elif HAS_FLASH_ATTENTION: + elif HAS_FLASH_ATTENTION and attention_mask is None: Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) @@ -386,11 +316,14 @@ def LlamaAttention_fast_forward( K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) # Go back to (batch_size, seq_len, n_heads, head_dim) - A = A.transpose(1, 2) + A = A.transpose(1, 2).contiguous() pass attn_output = A.reshape(bsz, q_len, self.hidden_size) attn_output = self.apply_o(self, attn_output) @@ -425,20 +358,18 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - bsz, q_len, hd = hidden_states.size() if past_key_value is not None: + do_prefill = not hasattr(self.self_attn, "paged_attention") + # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + self.self_attn, + hidden_states, + past_key_value, + position_ids, + do_prefill = do_prefill, ) hidden_states += residual @@ -540,7 +471,7 @@ def LlamaModel_fast_forward( pass # We already handle KV cache position_ids ourselves. - if (past_key_values_length != 0): + if False:#(past_key_values_length != 0): position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype = torch.int32, @@ -576,17 +507,16 @@ def LlamaModel_fast_forward( # Ignore attention_mask if attention_mask is None: padding_mask = None - elif False: + elif self.training: attention_mask = None padding_mask = None else: - if 0 in attention_mask: - padding_mask = attention_mask - else: - padding_mask = None + # if 0 in attention_mask: + # padding_mask = attention_mask + # else: + padding_mask = None - from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask - attention_mask = _prepare_4d_causal_attention_mask( + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, @@ -598,11 +528,12 @@ def LlamaModel_fast_forward( hidden_states = inputs_embeds if past_key_values is None and self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "Unsloth: `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`" - ) - use_cache = False + use_cache = False + # if use_cache: + # logger.warning_once( + # "Unsloth: `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`" + # ) + # use_cache = False pass # decoder layers @@ -654,13 +585,8 @@ def custom_forward(*inputs): if output_attentions: all_self_attns += (layer_outputs[1],) pass - - bsz, q_len, hd = hidden_states.size() - if past_key_values is not None: - hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) - else: - hidden_states = fast_rms_layernorm(self.norm, hidden_states) - pass + + hidden_states = fast_rms_layernorm(self.norm, hidden_states) # add hidden states from the last decoder layer if output_hidden_states: @@ -678,6 +604,50 @@ def custom_forward(*inputs): pass +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +@torch.inference_mode +def LlamaModel_fast_forward_inference( + self, + input_ids, + past_key_values, +): + # Fix out of bounds tokenization + input_ids = input_ids[:,:self.max_seq_length] + + hidden_states = self.embed_tokens(input_ids) + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.layers): + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states, + past_key_values[idx], + None, + ) + hidden_states += residual + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) + hidden_states = fast_mlp_inference(decoder_layer.mlp, hidden_states) + hidden_states += residual + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + + def LlamaForCausalLM_fast_forward( self, input_ids: torch.LongTensor = None, @@ -694,7 +664,7 @@ def LlamaForCausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if causal_mask is None: + if causal_mask is None and past_key_values is None: causal_mask = xformers.attn_bias.LowerTriangularMask() output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions @@ -705,18 +675,28 @@ def LlamaForCausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) + + if past_key_values is not None and \ + hasattr(self.model.layers[0].self_attn, "paged_attention"): + outputs = LlamaModel_fast_forward_inference( + self.model, + input_ids, + past_key_values, + ) + else: + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pass hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape @@ -1228,11 +1208,6 @@ def patch_peft_model( @staticmethod def for_inference(model): - if not hasattr(model, "_original_forward"): - model._original_forward = model.forward - pass - model.forward = torch.inference_mode(model._original_forward) - internal_model = model internal_model.gradient_checkpointing = False internal_model.training = False @@ -1247,10 +1222,6 @@ def for_inference(model): @staticmethod def for_training(model, use_gradient_checkpointing = True): - if hasattr(model, "_original_forward"): - model.forward = model._original_forward - pass - internal_model = model internal_model.gradient_checkpointing = use_gradient_checkpointing internal_model.training = True diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 56fc5436ab..c8c73dce10 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -19,26 +19,26 @@ __INT_TO_FLOAT_MAPPER = \ { - "unsloth/mistral-7b-bnb-4bit" : ( + "unsloth/mistral-7b-bnb-4bit" : ( "unsloth/mistral-7b", "mistralai/Mistral-7B-v0.1", ), - "unsloth/llama-2-7b-bnb-4bit" : ( + "unsloth/llama-2-7b-bnb-4bit" : ( "unsloth/llama-2-7b", "meta-llama/Llama-2-7b-hf", ), - "unsloth/llama-2-13b-bnb-4bit" : ( + "unsloth/llama-2-13b-bnb-4bit" : ( "unsloth/llama-13-7b", "meta-llama/Llama-2-13b-hf", ), "unsloth/codellama-34b-bnb-4bit" : ( "codellama/CodeLlama-34b-hf", ), - "unsloth/zephyr-sft-bnb-4bit" : ( + "unsloth/zephyr-sft-bnb-4bit" : ( "unsloth/zephyr-sft", "HuggingFaceH4/mistral-7b-sft-beta", ), - "unsloth/tinyllama-bnb-4bit" : ( + "unsloth/tinyllama-bnb-4bit" : ( "unsloth/tinyllama", "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", ), @@ -48,6 +48,28 @@ "unsloth/mistral-7b-instruct-v0.2-bnb-4bit" : ( "mistralai/Mistral-7B-Instruct-v0.2", ), + "unsloth/llama-2-7b-chat-bnb-4bit" : ( + "unsloth/llama-2-7b-chat", + "meta-llama/Llama-2-7b-chat-hf", + ), + "unsloth/llama-2-7b-chat-bnb-4bit" : ( + "unsloth/llama-2-7b-chat", + "meta-llama/Llama-2-7b-chat-hf", + ), + "unsloth/codellama-7b-bnb-4bit" : ( + "unsloth/codellama-7b", + "codellama/CodeLlama-7b-hf", + ), + "unsloth/codellama-13b-bnb-4bit" : ( + "codellama/CodeLlama-13b-hf", + ), + "unsloth/yi-6b-bnb-4bit" : ( + "unsloth/yi-6b", + "01-ai/Yi-6B", + ), + "unsloth/solar-10.7b-bnb-4bit" : ( + "upstage/SOLAR-10.7B-v1.0", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 42f26b921a..bc00e7a982 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -46,19 +46,19 @@ def MistralAttention_fast_forward( *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - # Check for inference - if past_key_value is not None: - A, past_key_value = LlamaAttention_fast_forward_inference( - self, - hidden_states, - past_key_value, - position_ids, - ) - return A, None, past_key_value + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention pass + bsz, q_len, _ = hidden_states.size() + n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads @@ -90,7 +90,7 @@ def MistralAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if (not HAS_FLASH_ATTENTION): + if (not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention Q = Q.transpose(1, 2) K = K.transpose(1, 2) @@ -128,7 +128,7 @@ def MistralAttention_fast_forward( A = xformers_attention(Q, K, V, attn_bias = causal_mask) A = A.view(bsz, q_len, n_heads, head_dim) - elif HAS_FLASH_ATTENTION: + elif HAS_FLASH_ATTENTION and attention_mask is None: Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) @@ -144,11 +144,14 @@ def MistralAttention_fast_forward( K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) # pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) # Go back to (batch_size, seq_len, n_heads, head_dim) - A = A.transpose(1, 2) + A = A.transpose(1, 2).contiguous() pass attn_output = A.reshape(bsz, q_len, self.hidden_size) @@ -174,7 +177,7 @@ def MistralForCausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if causal_mask is None: + if causal_mask is None and past_key_values is None: bsz, q_len = input_ids.shape sliding_window = getattr(self.config, "sliding_window", None) if sliding_window is None or sliding_window == "null" or sliding_window <= 0: @@ -196,18 +199,28 @@ def MistralForCausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) + + if past_key_values is not None and \ + hasattr(self.model.layers[0].self_attn, "paged_attention"): + outputs = LlamaModel_fast_forward_inference( + self.model, + input_ids, + past_key_values, + ) + else: + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pass hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape diff --git a/unsloth/save.py b/unsloth/save.py index 4cda8b9651..ae0f97a87e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -744,7 +744,6 @@ def unsloth_push_to_hub_merged( [](https://github.com/unslothai/unsloth) """ - def upload_to_huggingface(model, save_directory, token, method, extra = "", file_location = None): # Check for username username = "" @@ -797,6 +796,19 @@ def upload_to_huggingface(model, save_directory, token, method, extra = "", file repo_id = save_directory, repo_type = "model", ) + + # We also upload a config.json file + import json + with open("_temporary_unsloth_config.json", "w") as file: + json.dump({"model_type" : model.config.model_type}, file, indent = 4) + pass + hf_api.upload_file( + path_or_fileobj = "_temporary_unsloth_config.json", + path_in_repo = "config.json", + repo_id = save_directory, + repo_type = "model", + ) + os.remove("_temporary_unsloth_config.json") pass return username pass From 1d393f7406fb3c6fe29bf07ff78d28f577b6577a Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 7 Feb 2024 02:00:12 +1100 Subject: [PATCH 0134/1088] ReadMe Revamp (#156) * HF Perf Button * Update README.md Adding new buttons cleanup * Update README.md * Delete images/Discord.png * Delete images/try live demo green.png * new transparent logos * Revamping page * Revamp mainpage * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * finetune button * Delete start free finetune button.png * free finetune button * Add files via upload * Update README.md * Update README.md * Add files via upload * Add files via upload * Update README.md * Add files via upload * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Squashed commit of the following: commit 35f2ab4a8b4deecbbbe9fbd95f4efde8694233db Author: Daniel Han Date: Sun Feb 4 17:35:56 2024 +1100 2x faster inference (#151) * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py commit 051a73b0e63d3ae3acd7c4d962349280f69bbdb0 Author: Daniel Han Date: Wed Jan 31 04:03:37 2024 +1100 Hotfix - fix inference (#146) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value commit 05624642802c7f90dcc7aeea0e1c8d447cde006e Author: Daniel Han Date: Mon Jan 29 17:49:54 2024 +1100 Fix inference attention mask (#142) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py commit 206a9b65f090bd71ccaad7dd88b67ba2bfde0b58 Author: Daniel Han Date: Mon Jan 29 03:45:07 2024 +1100 Nightly (#140) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving commit 8faf469f028a05852b2dc29ec8df1f36998fab33 Author: Daniel Han Date: Mon Jan 29 02:52:39 2024 +1100 Fix saving issues (#139) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print commit 1ecc0185a5759c7a0c95dfc96aceea5023cebdfc Author: Daniel Han Date: Sun Jan 28 04:30:29 2024 +1100 1 more bug (#138) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py commit cd32ba76b71adf3317ede9de7d1cf6f30ad3bf0d Author: Daniel Han Date: Sun Jan 28 04:20:06 2024 +1100 Fix bugs + more accurate Swiglu (#137) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask commit 89daa0efcc38c7690abbb8170b5d9f3d364796ce Author: Daniel Han Date: Sat Jan 27 04:50:22 2024 +1100 Inference bug fix (#134) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py commit 87a7ef1049f6fca409a0673f51f4758e0aff248d Author: Daniel Han Date: Sat Jan 27 04:47:54 2024 +1100 More bug fixes (#133) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py commit 3d67790901696e953171f64b4bf9d980780051a0 Author: Daniel Han Date: Fri Jan 26 04:19:17 2024 +1100 Fix bugs (#129) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving * Update llama.py * hidden_states * q_len == 1 * q_len issue * Update mistral.py * Update mistral.py * incorrect inference * Update to transformers 4.37 * Graceful FA2 error + torch 2.1.1 * Update mapper.py * Update pyproject.toml * Fix saving and bnb-4bit * Update fast_lora.py * Update fast_lora.py * remove patching * Update llama.py * Update llama.py * Update swiglu.py * Repatch * Update fast_lora.py commit a833f403462e9cfc1f96b3b84d9da15d7d8db5ee Author: Daniel Han Date: Tue Jan 23 03:55:24 2024 +1100 2-4x faster native HF inference (#119) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update llama.py * Update save.py * Update llama.py * Mistral correct RoPE scaling * Max sequence lengths * Apache 2 * fast_linear_forward * Update utils.py * Update utils.py * No print * Update utils.py * Update utils.py * inference * Update llama.py * Fast inference RoPE * Update llama.py * Update llama.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * LoRA * Fast LoRA saving commit b370c9c8aacc31a7845404566dd95dfa8c0e3bac Author: Daniel Han Date: Sun Jan 21 22:20:22 2024 +1100 Hotfix (#118) * faster saving & inference * Update llama.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py commit 57a5b5a49da588b1db8e9a988cc985dc20393d34 Author: Daniel Han-Chen Date: Sun Jan 21 05:00:37 2024 +1100 Update save.py commit 5145a61e69ab9b3035465f649e1c1e5aae749f8f Author: Daniel Han-Chen Date: Sun Jan 21 04:21:54 2024 +1100 Update save.py commit a7bd8d119c16433de4f8b6a36903ef7131f225e5 Author: Daniel Han-Chen Date: Sun Jan 21 04:13:03 2024 +1100 Update save.py commit be4b97e7d89074b6dd1d2e984fa429051d328192 Author: Daniel Han Date: Sun Jan 21 03:43:49 2024 +1100 Fixed saving! (#113) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF * Fix quantization_method * Fix quantization_config * patch model * Update llama.py * Update llama.py * Update llama.py * Update save.py * Update save.py * tokenizer_save_settings * Update save.py * quantization and loftq * Update save.py * Update llama.py * Update save.py * upload_to_huggingface * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py commit abb462be71e8cf01ad989dca0efaa17441113651 Author: Daniel Han Date: Sat Jan 20 23:23:00 2024 +1100 Hotfix for Jan 2024 Release (#110) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF * Fix quantization_method * Fix quantization_config * patch model * Update llama.py * Update llama.py * Update llama.py * Update save.py * Update save.py * tokenizer_save_settings * Update save.py * quantization and loftq * Update save.py * Update llama.py * Update save.py commit 31e2d71720e64b854145d7779833b7d2d3d4177e Author: Daniel Han Date: Sat Jan 20 04:25:06 2024 +1100 Quick fixes (#106) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr * RSLoRA and LoftQ direct support * Update llama.py * Update llama.py * Update llama.py * Fix DPO + GGUF commit 8846337e5c8c2f206a4ac8fe6d239f3d1221f7ac Author: Daniel Han-Chen Date: Sat Jan 20 02:30:31 2024 +1100 Update _utils.py commit d378df87e5f3945474915a098c9aa58313465064 Merge: c1e7480 920e3c2 Author: Daniel Han-Chen Date: Fri Jan 19 23:15:38 2024 +1100 Merge branch 'main' of https://github.com/unslothai/unsloth commit c1e7480ac2ad0e5efa05e84fe0997619ccdd86a4 Author: Daniel Han-Chen Date: Fri Jan 19 23:15:20 2024 +1100 Revert quantization methods commit 920e3c2ea07a044addeb7c3fa8be6f0189cb7f84 Author: Daniel Han Date: Fri Jan 19 22:57:22 2024 +1100 getattr issues (#103) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py * getattr commit fc25ab0df032f8ee5ea750f27c68d63f49d2d9a9 Author: Daniel Han Date: Fri Jan 19 22:52:30 2024 +1100 Quick fixes (#101) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Quick fixes * Update llama.py * Update llama.py * Update dpo.py * Update dpo.py * Update llama.py * Update save.py commit b8b1eafda35d124046e11766aeeb6343957e0daf Author: Daniel Han Date: Fri Jan 19 04:51:19 2024 +1100 2024 Release (#96) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs * Faster saving + other changes * Update llama.py * Saving modules * spelling * Update llama.py * Update save.py * Update save.py * Update loader.py * Update llama.py * patch saving * Update save.py * Update save.py * Update save.py * patch saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * original_model * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * saving to RAM leakage? * Update save.py * new_save_directory * Update save.py * Update save.py * Update save.py * Update save.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml commit 4112eb4a3df4c0911e36211b47381086c963b4e0 Author: Daniel Han-Chen Date: Fri Jan 19 03:41:00 2024 +1100 Update pyproject.toml commit 59d74753362ff59e664cb6d650b564511e6e20f3 Author: Daniel Han-Chen Date: Fri Jan 19 03:35:17 2024 +1100 Update pyproject.toml commit c1ac4d2707574868767345e76ebe49c8353f9057 Author: Daniel Han Date: Thu Jan 11 04:08:03 2024 +1100 Fix some bugs (#83) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue * Fix up bugs commit d3887c7fd93d9b910bf6ee3ab3c7fd485fc55e46 Author: Daniel Han Date: Wed Jan 10 23:10:48 2024 +1100 Update README.md (#81) commit b5d94d9a0ad9532494e1b3c7badbb94fa92c50eb Author: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed Jan 10 23:10:23 2024 +1100 Discord button redo (#80) commit 01d7f58e11373ab07b9282a42bc14f542dbdabf0 Author: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed Jan 10 23:02:20 2024 +1100 Update logos (#79) * HF Perf Button * Update README.md Adding new buttons cleanup * Update README.md * Delete images/Discord.png * Delete images/try live demo green.png * new transparent logos * Revamping page * Revamp mainpage * Update README.md * Update README.md commit 9faaf5b388e025f8ffc302450a12ffb84e7e1750 Author: Daniel Han Date: Wed Jan 10 20:03:01 2024 +1100 Create FUNDING.yml (#78) commit 82e6fece0b78011707090639823d2d7acf5a3864 Author: Daniel Han-Chen Date: Wed Jan 10 01:02:44 2024 +1100 fix_tokenizer commit b52278199b7ae2764f242622275bb8a85ba7b721 Author: Daniel Han-Chen Date: Tue Jan 9 23:40:43 2024 +1100 check_tokenizer --------- Co-authored-by: Daniel Han --- README.md | 394 +++++++++++--------------- images/buy me a coffee button.png | Bin 0 -> 18966 bytes images/made with unsloth.png | Bin 0 -> 70443 bytes images/start free finetune button.png | Bin 0 -> 11432 bytes images/unsloth end.png | Bin 0 -> 892307 bytes 5 files changed, 170 insertions(+), 224 deletions(-) create mode 100644 images/buy me a coffee button.png create mode 100644 images/made with unsloth.png create mode 100644 images/start free finetune button.png create mode 100644 images/unsloth end.png diff --git a/README.md b/README.md index 2a3322d629..3e56ec1307 100644 --- a/README.md +++ b/README.md @@ -1,68 +1,105 @@ -

- - - - unsloth logo - -

-

- - - -

- -

- Finetune Mistral, Llama 2-5x faster with 50% less memory! -

-
- -| Llama 2 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | -|-----------------------------|-----------------------------|-------------------------|------------------------| -| **2.2x faster 43% less VRAM** | **2.2x faster 62% less VRAM** | **1.9x faster 27% less VRAM** | **5.5x faster 44% less VRAM** | -| [⭐Llama **free** Colab notebook](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | [⭐Mistral **free** Colab notebook](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [CodeLlama A100 Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [⭐Kaggle **free** Alpaca notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) -| [Llama A100 Colab notebook](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Mistral A100 Colab notebook](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | 50+ more examples below! | [⭐Kaggle **free** Slim Orca notebook](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | - -* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. ⭐**Free!** DPO Zephyr, Mistral example! [More info](#DPO) on DPO -* **NEW!** [TinyLlama 1.1b](https://github.com/jzhang38/TinyLlama) on 3T tokens! ⭐**Free!** example -* **NEW!** We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! -* Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). -* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. -* **0% loss in accuracy** - no approximation methods - all exact. -* No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. -* Works on **Linux** and **Windows** via WSL. -* **NEW!** Download 4 bit models 4x faster from 🤗 Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit` -* Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -* **NEW!** Want a UI for finetuning? Try [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) and use `--use_unsloth`! -* Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! - -| 1 A100 40GB | 🤗 Hugging Face | Flash Attention | 🦥 Unsloth Open Source | [🦥 Unsloth Pro](https://unsloth.ai/pricing) | +
+ + + + + unsloth logo + + + + + + +### Finetune Mistral, Llama 2-5x faster with 70% less memory! + +![](https://i.ibb.co/sJ7RhGG/image-41.png) + +
+ +## ✨ Finetune for Free + +All notebooks are **beginner friendly**! Colab provides a free GPU. Kaggle provides 30 hours for free per week. +| Unsloth supports | Free Notebooks | Performance | Memory use | +|-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| +| **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | +| **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 43% less | +| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | +| **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | +| **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | +| **Mistral 7b** 2xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster | 60% less | + +- This [conversational notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for ShareGPT ChatML datatsets. +- Our [raw text notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for text completion. + +## 🦥 Unsloth.ai News +- 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO. +- 📣 [TinyLlama 1.1b](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) on 3T tokens now works. +- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face! We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth). +- 📣 Now supports **Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek** and their derived models (**Open Hermes** etc). Llama 7, 13, 70b; CodeLlama 7, 13, 34, 70b; Yi 6, 34b are all supported! +- 📣 **Download models 4x faster** from 🤗Hugging Face! Eg: `unsloth/mistral-7b-bnb-4bit` See our [HF collection](https://huggingface.co/collections/unsloth/load-4bit-models-4x-faster-659042e3a41c3cbad582e734) for more! + +## 🔗 Links and Resources +| Type | Links | +| ------------------------------- | --------------------------------------- | +| 📜 **Documentation** | [Read The Doc](https://github.com/unslothai/unsloth/tree/main#-documentation) | +| 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| +|   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| +| 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) +| 🌐 **Released Models** | [Unsloth Releases](https://huggingface.co/unsloth)| +| ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| + +## ⭐ Key Features +- All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. +- **0% loss in accuracy** - no approximation methods - all exact. +- No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. +- Works on **Linux** and **Windows** via WSL. +- Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). +- Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! +- If you trained a model with 🦥Unsloth, you can use this cool sticker!   + + +## 🥇 Performance Benchmarking +- For the full list of **reproducable** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) + +| 1 A100 40GB | 🤗Hugging Face | Flash Attention | 🦥Unsloth Open Source | 🦥[Unsloth Pro](https://unsloth.ai/pricing) | |--------------|--------------|-----------------|---------------------|-----------------| | Alpaca | 1x | 1.04x | 1.98x | **15.64x** | | LAION Chip2 | 1x | 0.92x | 1.61x | **20.73x** | | OASST | 1x | 1.19x | 2.17x | **14.83x** | | Slim Orca | 1x | 1.18x | 2.22x | **14.82x** | -Join our [Discord](https://discord.gg/nsS4V5Z6ge)! +- Benchmarking table below was conducted by [🤗Hugging Face](https://huggingface.co/blog/unsloth-trl). - -If you trained a model with 🦥 Unsloth, we made a cool sticker if you want to use it! +| Free Colab T4 | Dataset | 🤗Hugging Face | Pytorch 2.1.1 | 🦥Unsloth | 🦥 VRAM reduction | +| --- | --- | --- | --- | --- | --- | +| Llama-2 7b | OASST | 1x | 1.19x | 1.95x | -43.3% | +| Mistral 7b | Alpaca | 1x | 1.07x | 1.56x | -13.7% | +| Tiny Llama 1.1b | Alpaca | 1x | 2.06x | 3.87x | -73.8% | +| DPO with Zephyr | Ultra Chat | 1x | 1.09x | 1.55x | -18.6% | -# Installation Instructions - Conda -Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. +![](https://i.ibb.co/sJ7RhGG/image-41.png) + +## 💾 Installation Instructions +### Conda Installation +Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs. ```bash -conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ - -c pytorch -c nvidia -c xformers -c conda-forge -y +conda install pytorch torchvision torchaudio pytorch-cuda=<12.1/11.8> -c pytorch -c nvidia + +conda install xformers -c xformers -y + +pip install bitsandbytes + pip install "unsloth[conda] @ git+https://github.com/unslothai/unsloth.git" ``` -# Installation Instructions - Pip +### Pip Installation Do **NOT** use this if you have Anaconda. You must use the Conda install method, or else stuff will BREAK. 1. Find your CUDA version via ```python import torch; torch.version.cuda ``` -2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. For Pytorch 2.1.1: got to step 3. +2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. For Pytorch 2.1.1: go to step 3. For Pytorch 2.2.0: go to step 4. ```bash pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ --index-url https://download.pytorch.org/whl/cu121 @@ -84,16 +121,25 @@ pip install "unsloth[cu121_torch211] @ git+https://github.com/unslothai/unsloth. pip install "unsloth[cu118_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" ``` -4. We're working on Pytorch 2.1.2 support. +4. For Pytorch 2.2.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. +```bash +pip install --upgrade --force-reinstall --no-cache-dir torch==2.2.0 triton \ + --index-url https://download.pytorch.org/whl/cu121 +``` +```bash +pip install "unsloth[cu118_torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121_torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118_ampere_torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121_ampere_torch220] @ git+https://github.com/unslothai/unsloth.git" +``` 5. If you get errors, try the below first, then go back to step 1: ```bash pip install --upgrade pip ``` -# Documentation -We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! - -We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! +## 📜 Documentation +- We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! +- We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! ```python from unsloth import FastLanguageModel @@ -159,10 +205,10 @@ trainer.train() ``` -# DPO (Direct Preference Optimization) Support -DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). +## DPO Support +DPO (Direct Preference Optimization), PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). -We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! +We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! ```python from unsloth import FastLanguageModel, PatchDPOTrainer @@ -217,60 +263,21 @@ dpo_trainer = DPOTrainer( dpo_trainer.train() ``` -# Support us! -We're currently 2 brothers trying to make LLMs for everyone! It'll be super cool if you can support our work!! - - -# Future Milestones and limitations -1. Support Mixtral. -2. Supports all Mistral, Llama type models, but some are unoptimized (Qwen with biases) -3. Dropout, bias in LoRA matrices are supported, just not optimized. - -# Performance comparisons on 1 Tesla T4 GPU: -**Time taken for 1 epoch** - -One Tesla T4 on Google Colab -`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 1 T4 | 23h 15m | 56h 28m | 8h 38m | 391h 41m | -| Unsloth Open | 1 T4 | 13h 7m (1.8x) | 31h 47m (1.8x) | 4h 27m (1.9x) | 240h 4m (1.6x) | -| Unsloth Pro | 1 T4 | 3h 6m (7.5x) | 5h 17m (10.7x) | 1h 7m (7.7x) | 59h 53m (6.5x) | -| Unsloth Max | 1 T4 | 2h 39m (8.8x) | 4h 31m (12.5x) | 0h 58m (8.9x) | 51h 30m (7.6x) | - -**Peak Memory Usage** - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 1 T4 | 7.3GB | 5.9GB | 14.0GB | 13.3GB | -| Unsloth Open | 1 T4 | 6.8GB | 5.7GB | 7.8GB | 7.7GB | -| Unsloth Pro | 1 T4 | 6.4GB | 6.4GB | 6.4GB | 6.4GB | -| Unsloth Max | 1 T4 | 11.4GB | 12.4GB | 11.9GB | 14.4GB | - -# Performance comparisons on 2 Tesla T4 GPUs via DDP: -**Time taken for 1 epoch** - -Two Tesla T4s on Kaggle -`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m * | -| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) * | -| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) * | - -**Peak Memory Usage on a Multi GPU System (2 GPUs)** - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB * | -| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB * | -| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB * | - -* Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. +## 🥇 Detailed Benchmarking Tables +- Click "Code" for fully reproducible examples +- "Unsloth Equal" is a preview of our PRO version, with code stripped out. All settings and the loss curve remains identical. +- For the full list of benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) + +| 1 A100 40GB | 🤗Hugging Face | Flash Attention 2 | 🦥Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | +| code | [Code](https://colab.research.google.com/drive/1u4dBeM-0vGNVmmO6X7cScAut-Hyt4KDF?usp=sharing) | [Code](https://colab.research.google.com/drive/1fgTOxpMbVjloQBvZyz4lF4BacKSZOB2A?usp=sharing) | [Code](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Code](https://colab.research.google.com/drive/1ANW8EFL3LVyTD7Gq4TkheC1Z7Rxw-rHp?usp=sharing) | | | +| seconds| 1040 | 1001 | 525 | 419 | 196 | 67 | +| memory MB| 18235 | 15365 | 9631 | 8525 | | | +| % saved| | 15.74 | 47.18 | 53.25 | | | | -# Llama-Factory 3rd party benchmarking +### Llama-Factory 3rd party benchmarking +- [Link to performance table.](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-Comparison) TGS: tokens per GPU per second. Model: LLaMA2-7B. GPU: NVIDIA A100 * 1. Batch size: 4. Gradient accumulation: 2. LoRA rank: 8. Max length: 1024. | Method | Bits | TGS | GRAM | Speed | | --- | --- | --- | --- | --- | @@ -280,58 +287,10 @@ Two Tesla T4s on Kaggle | HF | 4 | 2415 | 9GB | 101% | | Unsloth+FA2 | 4 | 3726 | 7GB | **160%** | -[Link](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-Comparison) to performance table. TGS: tokens per GPU per second. Model: LLaMA2-7B. GPU: NVIDIA A100 * 1. Batch size: 4. Gradient accumulation: 2. LoRA rank: 8. Max length: 1024. - -# How did we make it faster? -Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unsloth.ai/blog/mistral-benchmark) for more info! - -# Troubleshooting -1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: -```bash -!ldconfig /usr/lib64-nvidia -``` -2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. - -3. If it doesn't install - maybe try updating `pip`. - - -# Full benchmarking tables -Click "Code" for a fully reproducible example. -"Unsloth Equal" is a preview of our PRO version, with code stripped out. All settings and the loss curve remains identical. -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | -| code | [Code](https://colab.research.google.com/drive/1u4dBeM-0vGNVmmO6X7cScAut-Hyt4KDF?usp=sharing) | [Code](https://colab.research.google.com/drive/1fgTOxpMbVjloQBvZyz4lF4BacKSZOB2A?usp=sharing) | [Code](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Code](https://colab.research.google.com/drive/1ANW8EFL3LVyTD7Gq4TkheC1Z7Rxw-rHp?usp=sharing) | | | -| seconds| 1040 | 1001 | 525 | 419 | 196 | 67 | -| memory MB| 18235 | 15365 | 9631 | 8525 | | | -| % saved| | 15.74 | 47.18 | 53.25 | | | | - - -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | -| code |[Code](https://colab.research.google.com/drive/1gjL1TaKwc_xv2TcxJC8QWEWBG1msh3g2?usp=sharing) | [Code](https://colab.research.google.com/drive/15vlPjMr8xDj5BFhGdqunGaOQSMqXPEXU?usp=sharing) | [Code](https://colab.research.google.com/drive/1zPwvf-BmHyHlPMBxDsY8zS0BnQ-KKbCc?usp=sharing) | [Code](https://colab.research.google.com/drive/1X2uHy-arRsZxqWHvKHwwW102JaMwChD2?usp=sharing) | | | -| seconds| 581 | 631 | 361 | 315 | 82 | 28 | -| memory MB| 7763 | 8047 | 7763 | 6441 | | | -| % saved| | -3.66 | 0.00 | 17.03 | | | | - - -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| OASST | 1x | 1.19x | 2.17x | 2.66x | 5.04x | **14.83x** | -| code |[Code](https://colab.research.google.com/drive/10NzDreFbuWELGUuBv0MOoC7y3MBewaNx?usp=sharing) | [Code](https://colab.research.google.com/drive/1TwdkJ1sHsuEH-kgeCPqSFeCpOnCfz6Ou?usp=sharing) | [Code](https://colab.research.google.com/drive/1AkwjUkOF0XeRBMT_S8Uhh74kitEsZHla?usp=sharing) | [Code](https://colab.research.google.com/drive/1roMkp2UjbeK2t3DkNz50cRs1MT92RPFT?usp=sharing) | | | -| seconds| 1852 | 1558 | 852 | 696 | 367 | 125 | -| memory MB| 26431 | 16565 | 12267| 11223| | | -| % saved| | 37.33 | 53.59 | 57.54 | | | - -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Slim Orca | 1x | 1.18x | 2.22x | 2.64x | 5.04x | **14.82x** | -| code |[Code](https://colab.research.google.com/drive/1UNo1xsMl8YH7xnWnIVjDFnCAPfc0RGgu?usp=sharing) | [Code](https://colab.research.google.com/drive/1zbphER-SKhbSWGjHTfnBLPFyTgIVvaeH?usp=sharing) | [Code](https://colab.research.google.com/drive/156si33585iv4Uh-VILFglUmIMrNCNuc2?usp=sharing) | [Code](https://colab.research.google.com/drive/1_mhZy7dfl9jEnJRuJBZJ5y3OwW06jgQA?usp=sharing) | | | -| seconds| 1824 | 1545 | 821 | 691 | 362 | 123 | -| memory MB| 24557 | 15681 | 10595| 9007 | | | -| % saved| | 36.14 | 56.86 | 63.32 | | | - +### Performance comparisons between popular models +
+ Click for specific model benchmarking tables (Mistral 7b, CodeLlama 34b etc.) + ### Mistral 7b | 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-------------|-----------------|--------------|---------------|-------------| @@ -345,7 +304,7 @@ Click "Code" for a fully reproducible example. | 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-------------|-----------------|--------------|---------------|-------------| | Code Llama 34B | OOM ❌ | 0.99x | 1.87x | 2.61x | 4.27x | 12.82x | -| code | [Code](https://colab.research.google.com/drive/1ykfz3BqrtC_AUFegCzUQjjfUNlxp6Otc?usp=sharing) | [Code](https://colab.research.google.com/drive/12ZypxQh7OC6kBXvWZI-5d05I4m-B_hoR?usp=sharing) | [Code](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Code](https://colab.research.google.com/drive/1fm7wqx9MJ0kRrwKOfmLkK1Rmw-pySahB?usp=sharing) | | +| code | [▶️ Code](https://colab.research.google.com/drive/1ykfz3BqrtC_AUFegCzUQjjfUNlxp6Otc?usp=sharing) | [Code](https://colab.research.google.com/drive/12ZypxQh7OC6kBXvWZI-5d05I4m-B_hoR?usp=sharing) | [Code](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Code](https://colab.research.google.com/drive/1fm7wqx9MJ0kRrwKOfmLkK1Rmw-pySahB?usp=sharing) | | | seconds | 1953 | 1982 | 1043 | 748 | 458 | 152 | | memory MB | 40000 | 33217 | 27413 | 22161 | | | | % saved| | 16.96| 31.47 | 44.60 | | | | @@ -355,87 +314,74 @@ Click "Code" for a fully reproducible example. | 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| | Alpaca | 1x | 1.09x | 1.69x | 1.79x | 2.93x | **8.3x** | -| code | [Code](https://colab.research.google.com/drive/1XpLIV4s8Bj5uryB-X2gqM88oRGHEGdaB?usp=sharing) | [Code](https://colab.research.google.com/drive/1LyXu6CjuymQg6ddHX8g1dpUvrMa1nn4L?usp=sharing) | [Code](https://colab.research.google.com/drive/1gsv4LpY7C32otl1rgRo5wXTk4HIitXoM?usp=sharing) | [Code](https://colab.research.google.com/drive/1VtULwRQwhEnVdNryjm27zXfdSM1tNfFK?usp=sharing) | | | +| code | [▶️ Code](https://colab.research.google.com/drive/1XpLIV4s8Bj5uryB-X2gqM88oRGHEGdaB?usp=sharing) | [Code](https://colab.research.google.com/drive/1LyXu6CjuymQg6ddHX8g1dpUvrMa1nn4L?usp=sharing) | [Code](https://colab.research.google.com/drive/1gsv4LpY7C32otl1rgRo5wXTk4HIitXoM?usp=sharing) | [Code](https://colab.research.google.com/drive/1VtULwRQwhEnVdNryjm27zXfdSM1tNfFK?usp=sharing) | | | | seconds | 1599 | 1468 | 942 | 894 | 545 | 193 | | memory MB | 7199 | 7059 | 6459 | 5443 | | | | % saved | | 1.94 | 10.28 | 24.39 | | | -| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| -| LAION Chip2 | 1x | 0.99x | 1.80x | 1.75x | 4.15x | **11.75x** | -| code | [Code](https://colab.research.google.com/drive/1EtdStADehE4FVJnU2Cu6O8p9jDYdqG2L?usp=sharing) | [Code](https://colab.research.google.com/drive/1Ik4jO68odUiQIJ_szZ3xok5fk58WpA5Q?usp=sharing) | [Code](https://colab.research.google.com/drive/1E2nR4V3bXIWBQIUE7uR39lYPr3UikzqH?usp=sharing) | [Code](https://colab.research.google.com/drive/13jbj8D8FOt9KyXwZt9Yf2MsYkD8CyCVR?usp=sharing) | | | -| seconds | 952 | 955 | 529 | 543 | 229 | 81 | -| memory MB | 6037 | 6033 | 5797 | 4855 | | | -| % saved | | 0.07 | 3.98 | 19.58 | | | - -| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| -| OASST | 1x | 1.19x | 1.95x | 1.86x | 2.58x | **7.3x** | -| code | [Code](https://colab.research.google.com/drive/1aXzGgEM3yYB6SWy_XR81nQFWME40ksSy?usp=sharing) | [Code](https://colab.research.google.com/drive/1-5MdIOp0cM0scC-CdRZhh8OYhnGHqct4?usp=sharing) | [Code](https://colab.research.google.com/drive/1n-fgduZhRUsSjgpqNtVkXA3rSfE7iBdg?usp=sharing) | [Code](https://colab.research.google.com/drive/1z_GlHr2M_bB4lQrPhdWC7dseZv23cBIy?usp=sharing) | | | -| seconds | 2640 | 2222 | 1355 | 1421 | 1024 | 362 | -| memory MB | 14827 | 10391 | 8413 | 7031 | | | -| % saved | | 29.92 | 43.26 | 52.58 | | | - -| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| -| Slim Orca | 1x | 1.21x | 1.77x | 1.85x | 2.71x | **7.67x** | -| code | [Code](https://colab.research.google.com/drive/15yLlJx9IE84kzx7ikky45pRcarPyUtEs?usp=sharing) | [Code](https://colab.research.google.com/drive/16IShIBmjKULWy87I-xURpj4nztTkAF13?usp=sharing) | [Code](https://colab.research.google.com/drive/1CJG3XLg_OQpCz71eB7Uqx7wuK_n2b-a8?usp=sharing) | [Code](https://colab.research.google.com/drive/1UmwuWHtlrC6MAfl9mX7A_TRfo5iSHDa-?usp=sharing) | | | -| seconds | 2735 | 2262 | 1545 | 1478 | 1009 | 356 | -| memory MB | 13933 | 10489 | 7661 | 6563 | | | -| % saved | | 24.72 | 45.02 | 52.90 | | | - ### 2 Tesla T4s via DDP | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|----------|-------------|-----------------|--------------|---------------|-------------| | Alpaca | 1x | 0.99x | 4.95x | 4.44x | 7.28x | **20.61x** | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | | +| code | [▶️ Code](https://www.kaggle.com/danielhanchen/hf-original-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | | | seconds | 9882 | 9946 | 1996 | 2227 | 1357 | 480 | | memory MB| 9176 | 9128 | 6904 | 6782 | | | | % saved | | 0.52 | 24.76 | 26.09 | | | | +
- | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| LAION Chip2 | 1x | 1.12x | 5.28x | 4.21x | 10.01x | **28.32x** | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-laion-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-laion-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) | | | -| seconds | 5418 | 4854 | 1027 | 1286 | 541 | 191 | -| memory MB| 7316 | 7316 | 5732 | 5934 | | | -| % saved | | 0.00 | 21.65 | 18.89 | | | +### Performance comparisons on 1 Tesla T4 GPU: +
+ Click for Time taken for 1 epoch - | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| OASST (bsz=1) | 1x | 1.14x | 5.56x | 5.09x | 5.64x | **15.97x** | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-oasst-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-oasst-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-oasst-bsz1-t4-ddp) | | | | -| seconds | 4503 | 3955 | 811 | 885 | 798 | 282 | -| memory MB | 11896 | 11628 | 6616 | 7105 | | | -| % saved | | 2.25 | 44.38 | 40.27 | | | +One Tesla T4 on Google Colab +`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` - | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| Slim Orca (bsz=1) | 1x | 0.97x | 5.54x | 4.68x | 6.88x | **19.46x** | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-slimorca-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-slimorca-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-slimorca-bsz1-t4-ddp) | | | -| seconds | 4042 | 4158 | 729 | 863 | 588 | 208 | -| memory MB| 11010 | 11042 | 6492 | 7410 | | | -| % saved | | -0.29| 41.04 | 32.70 | | | | +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 1 T4 | 23h 15m | 56h 28m | 8h 38m | 391h 41m | +| Unsloth Open | 1 T4 | 13h 7m (1.8x) | 31h 47m (1.8x) | 4h 27m (1.9x) | 240h 4m (1.6x) | +| Unsloth Pro | 1 T4 | 3h 6m (7.5x) | 5h 17m (10.7x) | 1h 7m (7.7x) | 59h 53m (6.5x) | +| Unsloth Max | 1 T4 | 2h 39m (8.8x) | 4h 31m (12.5x) | 0h 58m (8.9x) | 51h 30m (7.6x) | - | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| OASST (bsz=2) | OOM ❌ | OOM ❌ | ✓ | ✓ | ✓ | ✓ | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-oasst-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-oasst-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-oasst-t4-ddp) | | | | -| seconds | OOM | OOM | 2719 | 3391 | 2794 | 987 | -| memory MB| OOM | OOM | 8134 | 9600 | | | -| % saved | OOM | OOM | | | | | +**Peak Memory Usage** - | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| Slim Orca (bsz=2) | OOM ❌ | OOM ❌ | ✓ | ✓ | ✓ |✓ | -| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-slimorca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-slimorca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | | | -| seconds | OOM | OOM | 2990 | 3444 | 2351 | 831 | -| memory MB| OOM | OOM | 7594 | 8881 | | | -| % saved | OOM | OOM | | | | | +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 1 T4 | 7.3GB | 5.9GB | 14.0GB | 13.3GB | +| Unsloth Open | 1 T4 | 6.8GB | 5.7GB | 7.8GB | 7.7GB | +| Unsloth Pro | 1 T4 | 6.4GB | 6.4GB | 6.4GB | 6.4GB | +| Unsloth Max | 1 T4 | 11.4GB | 12.4GB | 11.9GB | 14.4GB | +
+ +
+ Click for Performance Comparisons on 2 Tesla T4 GPUs via DDP: +**Time taken for 1 epoch** + +Two Tesla T4s on Kaggle +`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m * | +| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) * | +| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) * | + +**Peak Memory Usage on a Multi GPU System (2 GPUs)** + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | +| --- | --- | --- | --- | --- | --- | +| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB * | +| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB * | +| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB * | + +* Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. +
+ +![](https://i.ibb.co/sJ7RhGG/image-41.png) +
-# Credits +### Credits 1. [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support 2. [152334H](https://github.com/152334H) for experimental DPO support 3. [atgctg](https://github.com/atgctg) for syntax highlighting - diff --git a/images/buy me a coffee button.png b/images/buy me a coffee button.png new file mode 100644 index 0000000000000000000000000000000000000000..5eccb8e94be8c5b89e06347e463ab51fc9865109 GIT binary patch literal 18966 zcmYH_1ymH@*Mx+mNVkBrv~(#*ry$)8(hbreAT1zW(h^H|hb)~-H`29q*Efj2|2OCC zfirLBy<6|Tn-iiaFM*CigaQKtgDxfcK?w#1J`V;4_8BrFaEHH_o)-A?+)h%%5e5bo z`|$@0lblKb10&TV_2K;|x77UwFVEPs=7%G-7#i#38k|tv58t!CDZg*(Vo#0jZ6cQF z{y18>uWq|xo6={nHNL=GQ%~yZ0v5=E&TsurTi9~qTmR+AL%Op!{o(x~qGISWIv$F( zMGBWi@)Efk{I-q*b$tw@+Of9~S#^wVeTjn2m$NUM9-BmWmlBfif5Fn6`$X(M@F5eR zh9ef+aPy!eG2ksZou6?;)J*ztR*++1JZ}d&O)8oHx)pXD+R0sWr z!o8->ZDVV5S~g$)c3;}*=NsW1@(s9*3&Y_fXgKNTOR?RM+!`Nt(e>dE`9_RP{9*)A z@~$j#Sc!Sacrd8K<;6;q9$%1bf?>^mVAN$PL!injLDEIg?J$ps6IfMJTWn%Op!$|R z`EX|*4y}2FiqbGbC>(Dh!59C!)wa5t5*9~x{}XyDLh8bUR}*OBak_6f`IvtBe!?(C zzJllR%BeWAj=OH*ufA}J7yW8j4}E-MEeiP%Km7YOQaH3V1|B7Eh8tTQSv!1GNA1Jo zm74k7t?a=vtvMHm3jwmngID(qfVR3G59{&joyS|QZG&$UPqC!uLCKpWgulfefk`#D zrT+_HjF_b#Zd>VS;wKrpL{sQ$TY_&G>m(uW&?_t;cgJ;*q+!Q2>^n9J7751HP7o)91-7teDerZNhztWCYm(dSb{KQz*31m_bc zk4XNdPqAWXG+T|Kab)>_~UilVCB#Vip3uE(s7w)g9f2*wbBlZ(VaWXfgK!o@p z>LSQjH@^5wWh_)u%qFzA)7ru7DD;W(uoo~F@NryIe*vY8LC01*a>@VGZ%aes$IkAYVI*`yJLO!-(x8b_r z|AR%Mq^(Cf8<9yKqP74dSY@*fV-0&g*q4-h>LF|^ zr9!8L5F!NCPcKXHug;=0q{bBIX&!-!Aokbl^O3KHC_U{fta9BqsrLa3fxr9n^}p~D zDL*|PV=c7I-9~F&V$|ySJP+d6?d$L#rRm|;+YZ<&68DcO63I{mLdwCB|JVbj3xDJC zYawu&RQ6;e<+`6h6#L94&Q)pMLe3ws?Vw*0(T)Ga#7^yH z=0*$~HP&X>y4gIVu)p*6D5@D4SEp7|0UWOz3|jpsdxo{JW*gT(>oM}M*(Ups)-LP* zaV-_;l9QD_0cp^Lm8pwB(L)g<>Pi1l7Vsipl8OiQhiD^08hv9O+ytJMl^T{>vt!TE zMu7DDGCDfF`6p5HaReI7yAD{g`LM1V%REoh8-f~rvn#y5TOlcvgN-F{HH7|Qug>Dj zMyRCPonp$L=uOTe)ZhI?fapD5GO9TfIj(?hi?%b3`PTo4^>P_|X4bvN-vXwN9HjO{ zb|qwiHk9yUvwAB8$N{{w#%%4sr*%m+cE(a7^uMr|(V^ZvZB8r?rj8!Z_zJ$nFMb}1 z__}|=juu&K@A1@Sbp7lMP%+WqNa#t zxA?mS!>*xXD>j`Vk%GUAO0v;)_V~t^F(i5uU51O@ynLGf_JT0{O(Gkkzr}Mr8j~kd z*@tHs8h3=NW68Fnkr=gmKh64EBNDMCW?P;;?e-Vq^!S6$)9p|NR94V~hbFrRiI)o7=2<5FDG&?yZ;B)Tww1Qv?RnlQGj}56eYG^;zdS{V*@kFHP4rLTijx_!}Y*|`(wH!y*e-E3h!8jKz(x>S+ zJUh&@7yftGN_V*kxI3Y*iLRZ#^Zp5l9CPi+Xi_;D&O~p*Uy?^cPlBK%z~Eb&;}HDG zzW}q2IoILevjjR}(G+r0)QP%q;R^L6|K{ad^^+O2NtI@8ra1ejuX7}IY+Xbv+nw-O z1)Kl7hb-6K6C;&`o~I|<`gEpZ(JQH?%=E~zf}Y`-sy^+AZ#bD@Z0sf?rvBTx<{<=6 zYe?xJ94hp3v17}Y_BH+QT`3Ne3Vl9Oo|)wTEs^#irq?MJ0g@wE@`MSc)a!xx5dVTO z9J~+zB#W>;mw?bwJ;dKa&@OTP>FD|Jk#E07juln`Q6p9aQTY5(=a!$&;z9Ia^m@d^9Ny{eGS8L@nM||g(q?$Fo0yn*xK6CpDNM$DnbFdp-W;5K4y&nlf*74qw-e`;>0~5-1FWCtz6T4 zGjLZ+HudCHi;5IX?XE}iLziXru4Cj>aO2J2_CXgKXKJj8cPBxsX|9Vam_C>AyHlkK zZmo+`y2eWpn;*}wY!zMnbb-?}T7=7|DuDyzSZUpv!+E?-W}V5pxoHY_<^ICM9OaDn z*p1TN5V}l^SeqyhyV$G2TcJ0sd;vMDBu1L&a@>&B^*U2YVPL>$*Z+RDJ9W~ybQjr8 z<`K@MRr7j&k$cm#ukg0KP2*MrQ$Hp~#>&j^fBszD z@Scq;KmO|1KGZb7Unag-;dS~PSya84epRh8cM%l- zg1?cnX=bt?GDtzhKY`m!kaL`_Sxwc`;e1n}F1NGeX_KCzCEAwaRv3uRL?Q1}z{xEp zHTnzFt>K!%IO!J1nA~h`USGyP*v_I)p(9CATw4cQxBU6G34BWN^0kIezkrsj#YS{AwP&lxKm#x9ZJ+xl zMP+bjW@3PRzq*m67aL!WPOjx6hK_?<{Q|Wlmc_5Lqsb&r;Gr~o0@B`7BJ!|E&>D!t zQr*+s`>tiC0ATRS^$sj(FwjW=E0|z}j)+`V0-n6znyDhg!#vDFF9WfD&SEH)w^*Yx zh{flszw^Gy_5#W2z6_SKxt?iYC2#7o&S%Kn%bx{C`z^J;>+>5X5Qp%j%xmm3Sj-|b zwMr{h=b=T%YH!S165&vQC?_TP{-jWWl9-0Vd5}u_?rI3n@6mV8d*nwJTc3jTran1j za$C6~u%p$^4KBDY4Fa2mcW>BZ=}S_{!}XWQA_lZa6xx%LKNLylS?4+h-EI{tn+Wd&#lqG)tH=q-in)ReSW8YztMb6 zGdp49`aGtMb4t6j`C^fOZ}`joA!(8-+-*Q&llj7Xj zT3DBzarc7*oQ1g6z^%SI}o3@sNlxZDws|dYYe3FVn=m*y5=#K)cKS0yOcR?k5LHkHr-f;^FiM-QN;!UBK2lB zw9MF1?KmR$)x@>n+wp5FosB=oRC}1+N_B>-s3#!C&884 ztpwsbtjDRb+syMQU)&72wse9ch}|z&f|2IQtmoto)r7Nd!)o)i8{c(Sp@3S|)|iE} z=4+-}g_b{B78uL51dSycqUN8_CLMG%eU|t>SLNSsDA-D&se(V2lk6U zVJyiQx%#qis%;kdCP7-=!`|l{HVxY}C5=!%D&a`>{~ph7Z<7 zU8ho^4w?%hcKH$@y?_nNgl`;{Us(OA!mLY5g)nezL;tJm?3bBwygWe?6Rr7!p775` zt&b`pw=cy7f5mXSUTM=Y#hVE_T^E%r_g;cj;df@_@1CsoY1{r$-#l~d?%76H+t~}R zhr0`nDs!x1ffKaW>xt4|za$0lUcW%L>n3%z>!nUWyM3RNke;u)+17LKePv2y`{ zT2lCfX=%9GfA4tuM^r{x=mPv!wrIIt$3PXzG~Y|0P|Ah>Qq5|o?Dgx7Yd1;^75`|l zDKO%Sg=Yaz@>I&zWmq@4Pip0`P)cNEWX>G3cHQ^u>uZYI;QL$FQT^i^{E9q9^4iv^ zPC$9q?R7Q3txnOC-%J&K%lMfsg?m&XjiolWb90WbP>BbRT(i%gzMvZH8k9X>Zci>D z2_F4Zyro(%A^F{b*)@6UTY=bd3o;CgcJ_WGKPPfN#zCA+W zxAUM(7Lh7z?&yofyi)h>vWu&Jz59-<&5YrU`_ufKo7U~Tl)7bqRK)qo+S;VUCAF2C z-)0~;PB#xN;i7YFzk^G(>Zar7#3Nq7c~K%`7=1h>{GHXV|7X4*IeiOESqMJqt0}H;8;lKBf@wFDd57R ztRJ68iy$^wRh>Sk*!vtTs(%?ZA&6YvUQ;)wvQl^S-x&;<0!i~{}p@~)|!8F@gQY9M=T1x*sC0N z=x3@uY(DT}S-A%!-!o4x1q}QU=gD+$Xb~Vha+yN$Ej3X$o8j^e)@{34Lz^{g9kQw0 z^HTSz2#{TxgqaqrV4S?UNQ|7*MQP`ruiJNA!RmLni$7@el7$QE0=B_5^DZVsfEqX+ z$FkEWg>+iK`5^uxLL4-wA%Qf1L3i7)utJO3R#<)5u4`AH`5Qg`{$NzdZBz7e!SQh6 zAxKQvKVMI>=^)`AW9k8>rIJJyx64t+o_<6F_iwRy)9|%AE|!0a&Uw zEQ^XLBSaO4vy7Ngv@!XJ&mi;w->AOn1#?jCa<~2Q3HC_$pS0nrLj*_og(@Aow`~R~ zMIEVZ_mF9&ta zYcf#QB*BTZf?(hF&|qHO;c2IWmSFTA2%8qru)#!`5cmOW!e2x>Lf0Gr`i#=!6*erY{_3 zQ@h4whX=;Pjz&3L$v1V`$+Ja--j_ak!STJV=x1gj|;mb{m`a z(hh(MFMB*ZT)^UbABG3&38GbV9?qN;yNz4Ww?gx)biDe$<4ra~8^U-UH^SXo-@R)4 zL9aYrV?F0Pce6Mb6Tu$Vv7>rYc`3cpu*K>#8ISS#*O^IGi7^-F_;un(osO!Hx(rRs z7|a)YwlmhB4|1vIyN61UZEMbsog91Oo%KL3J&I`~IsLV*NQZ{(et`B3_&PpvRR@w* zf9>7GWaOyUG2`j0-gOinoeSM~kx)y}s&>)55Er{%|FSthB>+okCv?vTq-VK55o4h7 zC&w`?_A{;wsV1CkRU=Ao%+do^vP-)`Fj?YP^SZLFQ2%}tt5pZKSE=Hw9c!0F=GMXM z7or;UBo5z?PRt*Kf);%C9*~gq!&l=HM4WkV58&l_)8-w+H7-R$|!QIcB?N~g(l@qTZbGj?Q+ar%=Tq)FayszN&t z?hE-aCb$S#j#66k$+I53_Gjs`G9=3(u@WDJnU7x54Ynf#r-9+NJxEo!VfY8 zmD6s-ozGjiiMWvp;`@W%d0_>6Pc!YeYc>0GwI@48r8G@TqY{`p*7UPA#Mh_Kb(^9h zq!&f1TYSNpLS}}~eH#j2ZbS`vqAw-_G_s&Wa2wm`mE-H&XmHl!owlf z^`FQO6O(g~6AoN9eeuG^}vIdNH5M378$jSyIc_ zOzFvW)m0oM>lU#bl4gc8(dzBC(;y670G0chAM85@h2+ zM5_0_wxF;qNDZqPHWbv8d?-koTu8N^;My7tZiBJq$+7oyP|Pkb_$Yq;tX#UwrjZ8x zn%OCYNx(`GY_aLtC-*x2*l`m9n8-cWqeEUF_~+gf4fX=o1ysP|)7U;&)mIXZ$bsVE z8X7&Wtzw+`I7#rpF>R5{GiyEC8o~K)8UEPdRo|&D=h#8#h_{^sNUs$RkjKb|7EhEn z9V^dI*#N_@r|&+0>hf~m!l**X{fCCkZg@OPk=eU+?$Hd_m2oR>-9}-83eB1&2n^s# zstY#G9%B}wgv1Up$rq|P6i%X}H5cM-T3T?!6{Prq(n{Kyl8+w9aCmgktoDr+=}1Xb zFKlJzhY_98gYJF0Ng)$UD8$b@J@_4t_QHEq_)fE`o0|n!yqa`bn5TZ0(5|HamYKi_ z;mepi?SOL6v0_Qq2%dT$Lz`er`(sKZ!Z4XQosTkDKx4JGY}~4SebwTFRdh>@OdcSO z^7u|Gg(jTC26e8Y*>h%Vncdqrd|VzY_KP2cFGymWS$~9H`*wfA{|$KXa{x|bfE+_M zFJ2P{tmOGa`Pfw@}%8As!>ojRp732!%yW$oOIkH)t#(4U4Pl^}}J_ zMriOL}$D4Bg0IQD*PNP(c zr!@BHYQ0)ctV))2I`uhpTnqTzq0VHFm;G=aBYrcYiM?H^-px@`7 z-)k7H^XUd>G+c`oGTI2ko1R2i*NUx15hJ*gzhDEW%v#v>(~5_gQ#Lm@f5;91 z_s6-}$KilO4gs_9uAes%OY6A3ScMHEqVVAX}0o2hP2S@zbQrsQ~_3}m5dNhKYs0DI)7Aq^JUz3|oJlpesax}ZR zkm6vv&Zauw2RUKNiMc0@!twqz`2oMA(}|!$@Pa+~*<}08w>hCczc#`-8X-3fjHZ&h zhbXzewIg3)`!MmEvwhNroVMw5BkV<560%vibq2=ZvD?en!pcXGE3Lsk6P@D;;2`Ek zqLRw!WTd^Z@NA|686^@1&k#m#EFi&{W~2ad8=}u%gdp3*VwuCBc*V&{g{J*2--57T zzyTjz<5J*<~%aSRYz_QtmnBE|y9k&M!G-vOgLTtDUyl{Hcuy z9KK^JIm(>CbxlE~#G<436Ac~;O=0x%5T>j!S@=FjE0_G^(A(nzO6rips2I^G-B~M9 zkxBi#wRI+mdqKK#-_Nxn#>G;=GK+kysfV2dg9D#qFt2mK5#DZyBG&?`8*qGb@LNpd zFQgn1%#I{$x2zry~rO=(|9VeYOhmpqaHaN1GXyOGhk5vuMzd1UfLq(t&_zb%sQWHpMS1BZ z?yBqBi_DaB1j@G;ULqH`L3ExlQIw(GBe&cM#kg(pc?i&wU%eM5svVYm28U}mZBQU6 zk9uZf^?H`m2`(Mb-LIRLHU?9I=Qj!o0*kU{3&&8z&4N(#`6uoUF6`E;LcjX(7{$KI zoh_VE+~t^<{XQt=#cr@s4{MZ_a?PY;p));XJl^AS0n-eCIzfz_aKUWwrkYNvlCt5IVQCyK;W<<=4_!mvy&_X2a`{9Poe# zA9&Alewr}T-VwjQ*4c4x6c*Bcu|XJD>}I+=W)<>f3UC3g(^LUuDuQJdz_M^EB7!1ny0)@GrBqP8A0nRU-?!VtgJ z?iw&Xa0ad1zPFxFs^jdF2YgcH{vF>dXU$d4aT@ZlA0TJy!K!gC zBq|G?@UG%{;tfVi%~%_?-HWnI#ofsP(#RzHCN@{A`EHd`*PPorVt%UBdnL;bRfb+N zj)Uz)C*4&bT!A{ZzZw~_VQNwP+2L)z{aAgMRCVaS^a25+jx<@HdAP-(I?uJFxh?VZ z4)3dPV4EnA1yRHe8L8Xcvf@rCX8lJannelu{Kr>lM-K;%!@P|J0^4#pbs?VQ=Ck>StyS<@o;J>Q~VFT`F92bBb1_E*G_a{ zA_}!^yZiShI$21Qc+OqbHegL@q-P^rZPwzN7f$cOEM+}kk^WxiJDiwBf#kJsXAE@2 ze%Ia#>_#rRT>L)A$iPr{+{cK$cu_s2YqScE5Vc`R`qE@_hT8yGs8DAAYVZJaRE%`d z&m^g+X+97(|{_(-8>9=?Jjp*9F z9noBToo~|nhdE3kU)~H~g^aj+A@M&)e~3&?=gk$qFQT)I-EA$L+PGOvH>TTlJfHVG z8I;02YJ`-Y zQgR{E%sfLk6$%+LA3oT2{4!Tc7>Q90flQnK?IdQ5_;R-lN z_ z%Wq$diQ>6kPC%QcmHMd&sHB02xc>g6eo9@ppFgtV_ip)}j7727TW|U#b5B%;JEo$$ zo7oIu2wK)?-Md|vP6_h%HNLuyhGJP9oPDe8o9ITnxa)08TZ-9+6!Wrxo_Mv8riY<# z7sSHe0KYFc>&3ry^SUov1Ld;p%Qj(Xx41()$&~RgGSb!*u5S~!Us~71agmymbysLi zXz+WNB)K=4cE9BR=rKKiFjG8A9f_W&To_a9ZL-Sgy>*wKvpHaxoICTRkKHpn(Q&#Z zWUrWG3Eg6OPh{(kQOnG@om>fblc5SJUhs|U&i{{3D|ur ziR>Kx@#?%K7Imwej2uh^1?(qoDYG7=#61JkHio3qzEU(({Da)iLat+k2`PYLXaWPK zFAx!kwMiImbye-%?B=Xv(U!#hA(ebCW11mWH#5@Q&kiE4{U^^%Fa&GA!JSst+<}~MV(Cj*G zV(6Kn?#sx^u&RkN0mQ3ib@X%CBS$QfMZEIhzGUIG_S5{JX_lNHdFf!BU|wlth+R#P zqEYjEz-`GXO;<-dEqXv;F^wi5MHw`ca@&Ns0S?@x;wU}hx86t(aB5nJztvVnYju_X z5)ODv$Kp}MinO}|%M9GD`p&Z@_mtXIFfLxtR-nM{6ozM^x)_TX{Fzcld|+q5S&}=x z>R01}?9w@9vCtec+=^=4nd1?$36ibI-G$A~AEDoU|so zkaqp?S&|ZB69+9Rk5s-%-WEwVfBBjNa$v^cW)eHYR(=A+Ms7a{%{)^YZ2xA$(HY8{ zBKr)L)^aaA@$-IZ5rdS46di5vhUKldpHCSDC5%%e?49$FG9k!_FUGPY@G8SEjr<+Y zJULf(z_F=z+dR1>(kE~ataG9rQ3TPAxotp+yt)IG=gxpES=WpFG#4TXQs+<77rcMI zQPlx4b@f(n!KFK>tK8B7kdVo%u6FL(H$iO;^_VxHpHK|9@hQ#UGSko=C6S(W5Ev6# zHl-DG_psdvGiy48UxkH#LTJ;M zC$(%%_GhavJ;9fL$Hxu~jEomP!k5bdfCY!fVLG4>ov;7#BWH4AZPv1Mec0#z6Fk<8(WItP#MV%;=Q`j5hs+{Q8F9Q_UgRTB;CM0<8O@g+If zCa^%FlGPZzWen<-PcGmg9zXse7||%2w^Z9-JHyYWkv6>lD`fQb1MgoKCQLTi--682 z);7Pilx2IbjNTEJ`PE*MNh~3>ha2ST|f4BI8P0|maMmjQy7>3t*yC;YC z)}#-|`~D;Yvarznz#E3}iSc$zLii7m?jkxj^P_e(F)>{csdjNaHhGURq#yI0!S-Ih z;f3SFji5F!h;l`}=P|R~%w~5LSShQ0$>((r0m~ch&l?{^1E<{>-^2anN|2`B*yLKi z>$6Jl_lt&E67K*rzDRh$>zy}<(_<9|JBh2%&g0$h`FmZ=DRN>tz82nPXZwY~Vjk zz`8QxxAHdgy^m9N=hta<{AGRos;ahiH50)91A^M#O#UjKE=dPAS(IX zO$Ugm`~(v1Kn$kdOGd3jbI&_saL>aT>nr-db1Fi|=4Tg_;n^1PHB$R={MgF88@A`M ztdSg0B&XOWo9S_|3>;4v!D9=Df&I&>+Wirzf|L&E%~l@f<#Ba!{W({nqzNU;#HDR=r9DFmW?d6_P2OB9W| zt3Y&d8!0Yr#eCFB&gIGW&^5P*4{VHS4yg0wk_%eC=yYv?H%mhwBqzkC%q9A!Q{or# ztDG&@&C7{2wtZGgBYL)zzHjT63|rk=nfV zc2l&G7JvdvZvqaTC?vBD=G4in`8R27$45Yc01yyLt{o~S>_<^VKEn_`cS_1KCi@mf1{ny+-X-O*Xt#w|>BpfqJ#=7VqqUrRV4m z-`r9uz&I0Aj(JGq{C^`+S5a-z6DtB^-~S~V;&VjDX{Lkg-IYpwA|sK;##K%EU=1?U z!^AOj9BmsTsl*|9svo*`?=eDahO}&(ySQ7g}1hln>G{!z$)-Ztwn9~-NFb-mR?h zpy|xDj-kj=%^3D^B}gn0^>xmKY%K_I)@+lv7I|aTYq>e_#gf=7OH3>HbZ0FNjymk5E>4>1K4=ls@JxR-S(#bl+D(30#7Q~bj^AJbwyX*0Y25k z2Kt$^ptd$)q0zO{0z#u&qTO5Da>m5ma%pHfnA}C3pxt2zD7%L4YKAI1z$ZXBD_^q0BX^d=Qp~}{xdww_OtQZ+|s7IM{RpZQPrw43>Odt)S{fo)PsB zdhkccTqK@>MH@hUgJf;BIAjAH2QHSbm|Syg?0757QCkXl^bW`BmgSktrW|F}|@zap+xH&-)@a0H_1!`rwqQ1DWmSImO-84Z;l% z-DwLk$8UrweCCO!LO;9W?d#2@eSss|eJ>#W$0GKLgvZsbfyn@wTD+=lRIXIV=~&*Y z6WS}ezy*LUNqmK`bEoQrmRD+48RVF75qblhG9CT}L|lHrH2TTEtE`(guhVevTwz@) zi{Kd4ai9+e1@hYl3o^0hndH65CuRgEE~qhtg8*!Gu)usy{Fe>@$9z-WDCD zza6gd;`?X?YSflKf4`@t-Iw?gV7qrj(}+yst*`o~$7D%ss45Ss7Q}FT!#I)Nk?gTr!qL>`kRF z=fbOk+XDj^wDel#1)byM*+B9$kHxpWZUiW>X?^IRoiRMBkN-R!S#ETjL#pJ}Y*b8; zog4xlLIx6^tU4Zh0Y8B-;v^KKyqQtNa;uHx8AqYaW7$?+xex>t1Z+^PNOFWKle;V{ z3+3+|;DltGBe?Si-9?Ank(oM7W`&j@lNPNy8*)ntcL>Hrwf8D-&jykiVDxBI%xJ6a z0i4;e+Gk&wz(Z^S1DRa%GX)#973ciBCV`6TqMz~I(sBR7t&nFg_=%eW{0o|;gT1BA zE!7s?^3xFG;_Pk|GTmv%Cy6MW_uktMN9UrQdF95$X-s(>33w7xiE!iz%1Ro z*Zr#GtJrg!p9dYS{vn$dauAEJh52$B5?NpUVs~jRTA4Dz(iq>4&_#{kcK z@c2pzp{-~fdyG1rob#JyU?&4rZY&_?bW^9GY=Nq3j(j7lj76-s3u4TAn$H|fu4-%` zr9f^+anbq2(yd-_6J(mULZ_zzNj|yg1>RPN^Z{wD%7(SEP#jwPiaEd|SD&3saoBC0 z;OiU>@16tDt1By?NR;E05L6qx)B3pe&s9*iZS%u}paT^+Fw{au?3p~w38)GEEyCKJ zg(+xidTDBzILiT;LIe(4K*@BJi;>OvocV!p%1MbBE57%Y&2g^Yv6sj%Ky3AUlU0)gW^O8PdM25ft*RajfjTGFM|h5* z9*kVhISUA!i&V8!4hTj#DI7qSP_6K)E3hhPEF% z@nA~(s^7c8%W2-vR;ig^NMb3i(*daWGX5r?fM9b5@KxXn^sVJvs9NlNT`US=#;bVOu))Kp{a zh@atB(=|r+2GB%Tzbly#PQGmVKWUnZ&)DL4)ZU71UzX{#`mhe;|1dDp5|Ld>h_=72 zA57t{w4SS>sP)J*&)W7J8DH!Ns&Z0hI|OPOQzsOd)u8B=4rgRQ*|f55g&_c*g4c+V z2cxm5^|1yrf9FRiOO{j=qKA`*-S5p`93+68uYn4$eggXvh*ZB>noJwzO9PU4#rI`% zrwu+Xy=&!MKw)5}Ye3C|w`qUq#>jnjTxp6nxa#9#IC!}$ex~QlBMW#N;P+$hMhsh; z*EL3XozTtbU~_%00heKH?WxS5ct$v9ob&o9%JAVqZdnyTEKBhHZ0XHwFaBEex;y;V zO9Z{k4*Wg(J_=~li9F!>H^A1`dzyqd*b0{Pef9{AX;K8vJ#SEg)H<0w3IE%k-;)v^ z#k{H9`0oG{RhBR9o=U5+>XV3HsL!xG-34oj=a$%Ae2wmGr&rVDZ2!*WVw3wp9+2B! zDb^%c(VhCfG#qL7L(Y1o>E>>r8mOp=g(oX)V$O>=#+XYnG*kO}(MVYpoNcumzNX_A z<&zfdfp%1SmfYNog8RR6B~R2#STwY}O&En)mFRcf7eWe`>niXzT6K05m;#5sa=5&m zY~3WC#I|Uu>a!lVv;~*YfODMJF?m2~5beg~>jh|&nU>QL4ba;XxU9WQ&%+AF(MSIo zMQpXkYY6^6=Y7$*RPJ@-LR@Gc1Xy>rl5_}%L+Q4noRmrR4i3kG-P~1QaioFnmYf%- zus#uYR6>bR*r1_TRJ^M0a95A89!tiDkF_PzZnZAbKnBl3sn)z8NepvIARzY5|8lv4-Y4hWvV=o7lZB`$o~J5V1RQyb4` zK#23$BNO`)5z_jqhT&?O9w_6=H}I)(J2GUhTZX$ouWengE4(cO9E8W(=$+l&nL7K` zvbkq?EJ#t@-EkqtrWR@S)VQe?Xnz|}p2Js|-4-{o9l!^#6_XHB&MtJd>CbQ3kzvZ z!kLQSfLbzvbU*w>pgdfkRZ>Et`~F&+8DB?7$H~P7`rxXeL1^22hBqvDq21xEbBA6F zlt3m|k;i`10U{Ri4KCY-r0?-c<`S|)9~+7)cHs3zZqYjtfSwyo9*F5^4c=_43-r6Q zoQd20@WNX|Zu4l*NyS|wpr9#djtS6(uDjTesO4~f1YL`ub^2i^AoTjO7HgpUTsz{M9F#DPYxYA9O->N zH^P0)mlX@SYt#ov89B$@3QE`8!!tUeKm#p2*+!B&rD-mWAkbeI-GSBBlM0VE6^F0S zWG7O?`10fn(0Tw}T9iMqWO;#(d+>~|B=oa%UYg{#uy(}7rNXREq@+0D3HbwtDz*)T zFp~WUv(e#G^rP@iKJ_!jU~%08?d9&dYrkTD)vYIdNYv5ZfwsqWmF1(o zR?=6qQ^7^!QD&Z`i^@YCf>2gQ^2~2cfYm<4KB{?ricuX8P*U=}2Jd_X6#|6T_rRnS zPbwiFN+96eP4KXV4w)LA6=t-F_qPZy#hE9gQm4U1aDC3a#A>u@crmQ{9nb#TiDk2u zH74x>bkQ0m57^U6ie%(aaCF0QgD=fHIpwDgHp4(yKfa(Oha%@_djkCfEk7- z#Nu0E)DR$=PznP5*YBp-VU_eI(FR7hrsTgaJ9ZW$<9;k^)I69Ab>;uH=y}9i>(eCO z6K(bUuN{w_w5>G8+L$I03h$j_&-( zOFV9b44s! zUIHZ(M+p;H6QRRmUg7CBiWUJCN8al+{n=-?0ueV)E#Hy`jhIh&Q4ZmM(hlDI)ezq# zSGHko5h0{)KKbr?PvoT+cnFBbRgT{&ZaM?9d zAm}2Rs@E(38zCn4#r7*x;jgOb!U^vvfuqDbc=~9;hdq4QmT!TXIK=>seUP9%9o|oP z{nqXG<5WEu-zjNggALzUXt3JTe!|P4{|>qC z#~aRze{55AHZ*Q14pi##W<{8OrkxV7`XpW7s!I?S`_A73jnS~GFc%wJ>i-t0&oDBs zFYZ@iMW{n`;|=cq+f|7becBWrz2ZQb&|ZafDWR|S|J~R#fzb;Mj?Qh`h|0eEZJQ(4 zkNX?AuIT)uXKFT&AA7rjPgSH?^OQWT{QBHEd>~%g<(aatPd9<&6VjAd1_*~n{1{eh ztO%FvhsFQjOkUacP`SJ8f(Ut01~aX1g#I*pmkqo)7wU|PZ(2LfPjnu9x8V)GC$ff) zdJrTP4-Qf8VdUuxU~L+lU_bTlD;dxFUah?sH4p+;p7I@r)A_GIU+Gx`EBiEG$}Y?? zZ6cfil6GjsUGfwALf;`%c?g=hQm|d&$ik9!s}FGg?^^%}0EYEswT<81uJC{{#1`L< zr=II?ag+oay9SM@gm1_JwPWk+Bfj!@r~pSliox;;fd=Ejk7=EEaIIjsV{ZgE>;FDt zQ1Hb!%s0*jpFPIYnk~W|`BLKN-2c85@C)@4&7DM=;vs;ZKw+8pUE_b>Q{a&#w!=FN zUkH4?`W4WSX3J>HqA9yiZR_^_X>HoeVvVS5o%HQE%ec#*^t1naZ>>C{?M*n^Ig%;NHTjHlhoc!y54TU=I;(T@v&>V{;rpRP4p-28t7JM~eHU{B8H z!k#t31ze^xn7%OGXY*s91v^ecMb=kh=Cj)plFJoDiwt^yavR9*kli{DlBJBlB&N)e zl$g0}fvB?q>kF2BTxZzKZp1)p3=bvghN>Q(%ZbIhmVr74O`hdTy#7-&n`zcT@WyBt zE+=!hFiSz@-6Fk7z?vqpr}6M{#`+D=vq1z6Hl6h8StGP<`;iacj@a0<xM#Dto=+^cT@Q5pU{7Ht3_K=W|dc?x8bk0 z^RQ6Zc%QAuv$u}*%Z=!+&nvX^tEw7A4-_8A1Rgpw8(eTNx$z|-rSY;)LA<{_1|{I=fZGg!S?rD^82$|6hw7NY01U^g`-Pe|Rfa z{ePWWeOgnFYuPtR<}=La*v>H>yB!TaM8YHRmq3No59SSAy^>p7n==1d$jo3q^X|Wu z)Y(}&pm0oDYWINm!P0;0#T(0W)_O3oUam1?Tfv@|KrY_+wr&(f0{of|z5?D)67`B(n>zTcm|)|WGeu3!xVY5wFae}T=VF-+mg vf7@?crEjpldGr4k-=6FByDjegXTOuNF=p%9lkC8Qo)|n`{an^LB{Ts5tg_<0 literal 0 HcmV?d00001 diff --git a/images/made with unsloth.png b/images/made with unsloth.png new file mode 100644 index 0000000000000000000000000000000000000000..6c5e90dbb06c81a68a8734966cf0a11fd27a2509 GIT binary patch literal 70443 zcmY(r1yq#L_CEX$-QA5MptN*%D3VGEl1ev7$Iu`WiimV5g3{73bc&=>(jnc#Ftp#{ zUcLX{x7N5^Yn++0&p!L?=h@HRuQA%1s)TrScn|~;-cwW3g&=ek1i}1qu)sSh4*tsE zFI-nOBM%6|C%^s!gVHi+AQ)NAJtg@^J{jAY-T@C+P(8A=v;M;hsZP1uw^({9Fk;b_ zNHMS%3^H);aEi5t{C+4$LgEUiPtGNf&;6qGaa$U%N)F$1&-2;LJhR>R^k1v1?DbzY zrVfLea=r>`eb+wo9TXpWB|v~4x{nqL6T%?D&ygQkR)a{PcB9~Sh|{6@3=PM!gp^2b zmB|A)lm!R8h~Y5pPQOw#ZNeNH_{UWT(k(4kI2vRGxx;=zb%Z(n(yL_Q%e`td{h1Ad!IWp}qa-%w>7d4*_$0BQ>DVVL< zp)gGDO}G~+hCB&)g$q)^>IfF3;2y$ee2`j{j31oEgebJOJ;-=LLJHq6rYgK~? z^M#}S`V%MQDf$UxD;;zWt$z@XN+csgA<1dsjXBgu323(5CF1{nPwcD4dc@UZ2pdWq zF0x6iD>L)N;AisFnSW$y{CA(Bu7tG1u@}rx#;ZxHb`9{Eb0st%hE|MqhavL6zoQc@ zhPB`iwLqMS;}q@5;4_U!uzfVR4s^j*6Yh>QjHC14Di?mZ;SliiK^z1)q@zpJ8@~h+*$wIR)T%{~28?Mqthhj~k8+ zYFB4*+mk+#%hr-*l%tPvV*R`KJZBHm)&NKrrmgOX&kB*DvJ&{Hn)Ny5@0rF(b0`~q5}oY$e?Tmd)Y0TC@+ zF5>F_!cBu#gHrHTjCKUGLf{2S-jQN?J2mTlvvAH!l8xmAnm^{@cP`lisQctfbG#^;&<4=SKB5E_!^1H(uA;T%oK98k}a8!ry5`6!L z?^eN0O#SV*YpxLW=@?}qj6Wd#840@|E!WUC>G5_LtV60hAOq5ISr{sh(ow zJyb)lB92o&Po$|LcmY@Vh}utD_#2*xj6E2FK<-urqbYiff5_V9JAeJ0l3RMQ4}wx&mU%{6AWIL-AmqOvUfs9ob1euFaI z--}$zd(2AZQpzLixbklMMT)BYXyt#O`MKWE8pf7N zyS&g#B97Qf*SdA~cQaix=b11^mc_YfcG2H*wIr{CK~h zt0K>M(!doxD%M#c|5GHv-Cm|@6I>6`8N zovDJGaTI48DNnfMW}rlV!e!4T{(AE7!AZR-fwC~PS*lsHaAN$4+@m!2(9EUbJ}LBi z#3KR(@znF(y9iGP?q0ciO)HAM?Q(x!9bz9X(L@f}kL~?q4`l*= z0+QmRwpkq>SF;D|M+7dGx?Tz4GXG%_7>gDk(0wpk@5Suwi>b||zmoJkqk-npa9k7g zkWt3T`6pcGcfEe)=eGJ(HW9dc2x$2t-v*)~kPn>RACSO#HZ`V781Onzuogom^?lfC z2-7el>HiEQ_Ws*QG#>N^z5KDgq7*I}N*H*K7oDHH6+7p`Z$x{mYSP#h-MEr(`H>ry zImzx_+F$(0ar85UA6yc{tpB^0Nw%NCuQB?wZNj!%)lrYQN-2}_pz_vgUTB7PoF+lY zK8%%+Y>~?#arobIxL9ZBBQj{HG8B@C36%k?BGCEX}_6 z>9GI0QSR~c+ZZj!pH6&{A6ZWrSg&6G^fNgWC&nx;{`Rhva&}_3=1rk|hv)llZOqtjr-`dZU$tKNU=mpV2G)?)McI%_{X|Z?aG*!fQ@IS&kFU~C=w?`@byQ>@ZIDz?<4rJ4; zv`GCKL4qHceirj;cl;5VcqMekUmhMzm*Dr7=*wd%t_M1qHnnl_dD%&J_X&Ext?}?T zl`bmtlHRg7JB~e@LbWgDwD-}u1+~2!uHs^fbx1JJ=ZgvWFIFqucSiStT5&%Sz6$lb zy-OQO3z-YTN$wH;*8S}H*bxy3YnhV@v((UwGceWPJKQ`~syEEvVHhY9osHc}Y!kwm zzi9d*2Ip_ax~$}!haNckVRwt}yo4f%A-tb|kDe`;qE!*{0%@o5llM}V#k*W62%Chn zT6!+>aY5I|ZS|8|heCK`$-?zV3coj-gfZqT4>t)*YKSo>omR^_baY%!{a{!+eGpMvTO>$%;hF^3M~;MB1wu!$jxf#6)6WQ#ibE05n& zh04x?>W%FG0=BCx9W(@;=-4aHC$_p-b}8np_m-|>cQ)C; zrMzwF72}*AIc&(aYD~zG9JzbPsb^)UR>}%iDxGDLBzx~x!H4aV} zJ#OH>xg!~cyEr^;2n+Y`0ktFfA$D{__G;GMoaX5BH7Eck$6L)h*Ixgffm=iNC4PQ= z@$~Li&~<`AXFai88B6zBRKDY6qdwZ<$XAN+E3Kf~Bi<4>EUQ*mR4~O;D-z!;{Bns2Ko9W^nwFop!>ALhBJxHRh zs55z+;A?iNdLV#pt%3@xV$IzhZGCJ;WU9tOW`UN zQ^5bOPA@-X3fr$Ajf}qcgdC-%VAhj33a5OOW*?9hL~~o*;HAZXg4WP?RU?0An-s~v zRrF-Y>mA~&)!`-WLrKCt%!0x-!|4m*)UuACmhjcF^ir%O>46?2MJ#jA4lf^BJE%Pn zy*Kt>6p&%H`arVxoT3d!Z@I}bwEAf?`WIzzHpgl4?kQ_N7hf5xq$+3co7J6)R~=2? z?Rwt30BFZK*hdduZiN{RRSjckkj_9HTDHM;2CMP zW-R4I!eZ?-HPku?Fzfy_ZFl;MXR5#dPg?1AaJ4WEbc#Lo{yNHZET)?H?dgfx@}N(_ zGue}ey#H2^_zcYuo2QIMim5*#K{3L%F-gEx%<7GMVHx0QM_l@b`u?M-!K1^jYy+1& zG$sDYMmK5IE51q(Rr?}MmbXb)@_vnZ6ziP(b>X*p=NF0=JaH9zTiZf>uWjU4*T0xj z7JvsqgKAFEfIGfNsy~nT{pBB!xA17wpzI{vZQ?b`o9AReE@ln1t9Mu zjAlfka%?$ckSB#emgl}N8QtPhIv$JgTc+Vq*dOSaPzDKpu$H~g$l>uzk3duDbMXhJ zT!za$&8*g{#HBW#9X0Qggh=FNg4j(1$o|&L;c6zZVS2ez;H%Ku7yf3@AGzQ!3gx}U z+-=+H{)r#&Bvw^rIvfJH9mvSv!HD*Ym8pGOW3|^MZC0My?e(o<8>JUrSmq?~hc|Vq zgK3R-3QKjl=ot0>nJY!a#$UcyxIQ#eN(bRv96(A?-omMHLC0+5p1-lk1@I?gky zoglrQW}H`I%GR>fQY;UGvu1p2=y&Mg#D8ge@0EcIpL;67k2F;0o0BZKiOJ z#WuP&b8Edu_(2S4vjI#|*hYzkqU2u|_Ft#}G$##p(b#VkE}L@vueWa}tCyh$L4EIYAIf6wdl@7c%iB-5S5V zyg!{9>t~f6=8G(Dog*06cCrCeMKo>p^~pt zXHAxuVGCRjLt$aM&nZV=QXR;Xa*h?k`1g};=UP|wmgwdxJ;da(I-8oT`7&>on>QCw zTuV}V)X{fLoMZ6x;rrH-GIywY|@0Tf% z)jX^7w|=1BNDmqxBy*LsY%od96qdwbXaDcOIgAJv^ghKcbo}VLw1V{2ftlq8$I1cE zFAF1TnMfL|GB<46uMmD77LFpXD=5-u4o%VxhJ**xIBVCmp_f-p69Q*HS?|jd*d@YO zVT-opWN}2S0@1%qnk4dciy|W}Kye}*?11^|8|T&^;$!?o z?m~OFjynW38WnQ26I=hLvrbC_xUeq01)&xd$_Zk?BDnz)X6xtvA1C66RXHR9T3?IG zoYtr}B8l+jot{Y?BElZ^j*#FyfUHW5A&ZvYT|(Z3|d2 zpstPSl3e8lOtR7AhrmWZ)f?N-4w4U~PM;4MyTswTW$w!t!L-MH_J5lGC2OIs@WWyJ zfaKB0cgvrauKc0pd=UkE8tB^(;^ngKZ_XfdPa*u&U8DgSo%l6Y*<-}S`;H0Eb->2B zxF}V&!43Q9g>9%m9b2m^d#%sJOx#-wS7kl;`5)tmyBgIIvrMOaa}iQM3(;5E2-!mZ z-N!Bzl=XHB$3seIMiyqCJgTA=7jLoedlo#I<*%o0CwUI|S$2J8RpzWOvRelGaMLKT zdTVT#D+T7>8#h$+oUZrTU5wiSq+8T4S3@kcw>%TS9_8*Sr`?yLn8pgsOS_@A7=_gw zf^&FK>C^MUI=GEc%Iiq<(UuUjnO$fjC{e?+TGNc(eE-%z&ZiDufhA=S;q1 zUHfB+!lwp0?{HCt^c-8x758--EPItGP-dL;Qs4+Q?#IJ0T2Q(bXRAgJ#19RcsE)Bz zx~kePM8a|(aX#RkLN9#8^xsn2LNo%BK<`bQB0Xx}m@Mr*%wc&;Zr+|!pFQbpX&Bqz1ns}#;x?`4$Q`rK z9kFFe`ns^ZY*mX>GHxfZv~SOFyJ6!7ep4(5U5#kudeXlOk?~X%-d6Z^GMNrYY2`k| zNRvtQ*1oNt#Ml0`?x_y{uPAwDC&{bhB8@Sh=51E|-3u?30WX!nTeVg$yVp8VG16Wo z78o`|J2LqTiODIoY-MI>7~R)*9|IGURp=3OvHRxe?GGP6-VhWdzn@PwHa4bxCwc$y zFigKFB0HP;ajkQl*Ul8{ojZN2OyH;as@CJ*bGW=lF|`gEt>wJ=D$qCvSnFIJIDCUWw-*UM8eCY?P;J4x0&8SRa*$e^}F z#dxAz0Z?b+hrVgX(LFIl+||nc{28z1@|gb1moJG~S@ENzkAD67wXiynuFXk-LqHI& z@?PHAnYZdO4>a!I$`5SDAtY2T(l6nE+Dq~vU4)R8^{%0=!t4=J2JN=%gTZdGp zcH`&|jgP|H`@^*Um1pHi_D~ayA!&+4%hdviwWPq$E`yp~X~s6JsPEU@CS`35z3E1J z&J$Pk&NIwhe;)?A)5BQfPn=v_VVVVsMn<$mj1u9`x5imTMXA>YGZl4oNTG*|L=Yw> zX7}l8Ae5tD^eQ_$;o>47L&}$|)_Jyl`)B1yxrJhpm(m$lj|Y&$77rN@wfTWaxs70Q4IgioFL?A<#_X7<`9!}fU?ni zbnf5$K!rjIP&D{rCB@A0WPfpe^iz`Fw2%ZN z#a(fsm#Q>(mJDp2dTfSs{ocQjBiffb%0KYB{|x?SNrt8s&nHM&AO-LV zf3F z0`KJXlvPraem>}&C!Rq()T$>QL}FhZR<=3yl+D)ZK)R^g>F;NzZe|}o6}S8u_5nir z!BQ7R@s1eUPKRaO4%E0L8JK;w%zp?);#S1QM zAwUufyG8*w#$oYdRzDS?knGsL(G5?D_dX9lJrWDL5F=ueiY_nbS5i`9k&-w>HbYFnwQrJ>7pJNnR2nBkr+a#^B{GTZtEY~T9^o^R zH#SRT>NR8$7dKX)kg`l*zv@*c)47w3 z_vaGDT;noN#LUdxyW;+9WyJ#A_1>W&(W&-Ztvi(a$KgL?Z*7v3lOL?6yDVk-@5%=t z${`xU_Zr+YZWup$?#`RQSGXqgx>bvBnE}!^Ozq6TFc+_ z)%DMfYn=zuSCxI(!LlA}gI!U~LAKws-n+-3POgq{3PKmn2XfQSY|uLlzdyFD_B@>*FQ# z0fB)L2w|#=^k7D_Cz3BNF=@5!X3qqBmQ&{t9Hx#_g!iwB`KbJOiE-4(dsX8dvqmo- zl{mT;l~^1B8c8p1{W4Rh>fq@br`I?aDP}>m-`?_6P6Y!9l6Y`q^X?vWBJ1tEzMFBb z1Xa!;3VK`{*yxMyYT=U|?-I9p2bwnNva7bNui8@5#K1;)K!c{guZVgu_|Ik3_O>Gh zHMP}pcbu5t5$ES3q-C;JVJz!rL_qaeH*zMe(BMw8+Wq_Owevx-SZ4KpIElVb*z(&LYGFO>knLbx?@F+tKeQMW7)tY=pF{Xz z!Lv<5h^GjYrWb6LPO;n(Xp4{Z^ennwP)^pmFgwpT$2m-t%9rR%Qdsg;%}>!7@2n;; zk|MD#E-v;DdYIqa{rIR|G`4@>cI6x@*-p1O-~6W|I3m?&d1jvNH_Vn6tITvRL?a|2 z&&XU1YO3SdlfK?VjN=^Q=juCvqubN9T3_}9LJJE{&<*j-3&9o&>X>0UrdLWM{1W{w zUzkCQaL#SeZ%YG1s$)d|hq=b6F@76$8YqmdV*Hp>83<_lU4r=z z>b!R7R{K-KK)B`>7x!BXA-E1VoshV`gUZUvm@4rMF@=Skp1U)&P5vmAKUbF{WoELR z$%B^GpY!}ZS>s>O`6-3ow$CqH$g|w>6yHh^3ho@sUy_@#k3rNC}5#2XfImwci{o4&n zT?>U2fFW0xC^H6ecZ^2wJrPs1&Ra`QQVsZi6_tn__LvNv4sUpZSMT6p_k8f5xSi?R zn?gbqarB}p0Rd7HUfa>w%mE}%8oiXwa{@<5??NNZAwI=`eB-#he0a;q(0hx)Ywby; zLuNxKtsbxSlS}Hu*I23HYxFGtK~5j?+}vE z{zL3MgA!SNNwzlDF={*nm;Y0^dQ!Jz9&f;tA$xU#F6p%`2T=Ucon$v=oA#ZZgF$tm z6%Z42Fs19reB{tIPaY5C2ez5M4` z;K%H%`EK2c5Jdz}mRnGK*CL50CvIZ88XlLkaf`Z^pfp6cm))KkBwK*HfKx67B0JOWc z!)hb?ej@z_j0tu&g>#BNuthTjilE`rXbRzFZW0r`PO$d&}~*k{lC`;xhQ)d!Q`yxC_WA)w)J zy-klB+L{hEV<#^5X>9+An-5xrTz9dA`INr#8@}nI+xzG70vS$Xp2Ouqt<2Be@FL*)o4 z+?k@B4>*kL*m~(WN<_cM5F4LcL6ooWBY7v2t}K67E?LFtL7aJT&2<-E;eKdnXt`w< z-b-4+$jachP)wiIRGS$9g(TJ(?Sv}}Pj>3}uomu7?=8M6ZhTGzV6}TK0!Jdyy=sAXnBwfzduTBjVrAPhj?X%^Z8q}8xFpk9EXpL7T@%MC@of2Rv4E-sgQ&k9v=Yg|5k^XPOH=hS|uf+N;X1mvyBN*iL`hYv?8ZM0s}2%r(moUq)u zdDBU&loiOKr#pLHvm*ySd*7W+`1ss2+n9Q(Yc+FRe8`Vp+M+@1r>N@&AFHsuyu6yG z=EoVI(0!9%!&}RZPpU(>x-KsvtUA}ggbxZu-Ah%9!)Z8@JqYA>Sjb~mXOU~M+Hb3C zpvVUD#EEN5#EPLADX*SwzbYzgZqHXwab0MIJ+-!`Ly_M2#YlP?ux ziEJw|t}VCdz`8gbl0DffYe~$=czJbsev_3|zFaMkrG`=>T&VSUMkpY_w`_nc;W;Kx z#{#mbu_FHmXZSg$T+tXE-#B`&|6e6w@+kyggsxn>mP*M`ck32K?fVF?Kd zIi~~Y9Rj6xHf&hROJkMBZbe^4yQuuJQbEg_#feWwqXGMFD#7CtD(EFO)$4ynvg1B0 zoCor>@L5SVD0i8cA-5Rf zpGld=J?#+I?+rMfQ#hM+>fX!vY2`wCyviUO#Gn{S(Dr-fn~vVC8T>si19Kon1$=kr z&kNWZ%1ovI&1+?FC2U*$7!-EUp7_k)sTH>O{%Q8wNRcQ9D&F550+45`Ukv^aA*acbAP{r&$yB%{tggU+?%-NGMvaU?P|qYTUfDFN*Z z*R+{uT?!N^DK8JaD-qe<&?_-yrGI8hp=aQV_fMWGZ*zp&F_Yy?NZ-oR$*Er3S==WT zf!K712BQ`PSC?tZy9Ga5(R6d^>F+?F5gus(7n({l0wGJ2~pM=+|7garc*rSR%cXzV|nBX2uh!*h*?HBQrc-g|RiKa-M@!leC=7y45L>jF;i0%l4G zT{FZ)R>j1`MELQ}^m{<%X2%*+Ki@W z+BSX^xNUsDl_Bnd1*-nZ=0Hi1RA7)*_e=Rlk0`JRXyAYmH?FXwK(ahMmOQ*%FrNSU z-06~@kcdb{K_N7!A|@sVf)^Q-xgXm+-A|bt?CmW-mHD#cM&*$0aXggDu92p8CeDGw zaSvl(7BY*eFMRMcBoA_>lA{-(MtbV%o%(ED5XJsBKL+CoUh1!$1yTgezFn}uDN9{^ ztdB-3CoF0!lmmC-W!y2X*^xc)UaI|voW(7q)(nzf1W6oP-GJv{;CgwbH!(d|ZJDxP z_A{kcZW1S?7wPV9=!mS}9$p1S*ka&36}KV$)58LZDzC?n({#kr z1+DvOF3z^fh)RldKansn^h;W=^d)znAr-Eq(d9r<$M{7RCy8C79n@kPQ`?u~Nv~gr z`W~)|UjqO0Ez-fqq0e29wYSSJIJAeto8DUNE1!{q3_&#(NrkWDMD}~|xVTbDq9bbn zFMSQK&4z&W#MZ`~oQv(vA3k6Jk)ey;1z|yPyvllf z{HsrrJk3R`SEg~`B(o@t^+ch!udkgWJ0SF$e{M~bMX9_e72*RjBXc!v_-$P2MK0lsy=p?umM~gtmE8{#diq$G=F@IFNyLDN z+@HV;5DeGv#gKDM-P(q+2n$n^b7(3#I&y;`=jcQI$e=d$390>4RL0<)u`|~+Z9l&` zR`e<JiTRI)7iKv-Z=Z1e&XF3 z=j5Y1l%T5S_x=6!LF!%1T9tWO^!H0lXstl)1sd!DIWdhG8dG6Oc(n@+2 z7cPw^IiVlvPFl($7wFhTVpYd{Ef?+6 zExw1=QRC8{>m)9J-U(PK;fJK8fu)YNCWdwuWkf!G`a~n`Cj^$_%zK8hNHrH3U5~PS zaqc#}Ml?$QVl>&%|BwL+a%qZziyrQ$zqAsY%Gu+`)8O}$H?4%%7tD_054nk%ADtP$ z2>x+OkY)QFSGs)}qlFm+PRI2rfx=FFTW9U}ZkUr~!dj-*C_d@Q>dtB)G8w29i$FbS zeRKQaOFg3S^1N@ZU_#WyHr1wld)4jNqfal}vk)kyCyOAX*6lT)Zy)EmRrn$8`!gk} z;WFDI0sN8+ami7B%(~bn%f}HQ4OJ4Y0R;G#IyGR@0$kY~!BD*Kz zYxjH8w>zki+&acT-V6RDr!u|hq4(}Q`S>I!@#j^vtq|P>YCia#pbZhYJ~oR@o^SgeEDK5$v+=;RKSLo7J!i zs@W%wsa1}BjW3sP8L_u#VaP1*L|@{ILL=?9^@dG;)eC0p)VwC^WCxC|si;A4E4HbR zLF&DGn)K!kn{Af-f@E5y^B@qPTs&m`6%QkraJDBaI!d2AG1B#?2^S5TFs;4F7KHdc zRv(PWNMbBvJw~Tt?N+c%Zck_ZE@28oUzn!nFl6%g&Rub`+1gKP#f+R}s_ zF!152f;LN6=X+~BanC_T5Y;}*p^^6V_Y5W4%ykN>A5jRuyoTqK-*jT+WomRL; zzT&9kG81btRE9jRRCUJBnneW&>lKba?_PjA4TRD%U)=F)X_5EQ zufxFVG3%>$TjH!QBMIr3Du-k$Q+sS#${H9F7@tJ_xb+iU6$u%2hG+|cdw^AR|MqdO zpW!^lZBHuK%X={Z^56crv5OIpbPC{bKPzn{x$ih7f4P6}9%%-9t3jUzz`l6NT@voT zYwRbzsbbhvw+&E7%yT_)HZtuzM6VtEBhxhq-cnF7N&Wi^SNT=zVpWL4~ zF^qMv@7=ox^z_#`)pLQUCyl(}6lZh*8qFUfQj(L67|d~Ri%)9|}>Klm_Flg>=?&L36cKjq}RRZm1#xB87$@vzlZ zYPKqt#ex_D{jPSPXXxnYSk&5>-qCR>N+?e#F6Nd(jy|jlx|Fc)FK0xM@6bn=Nf37^ zMg=QtCZFWTONB(FstSnZVTS&4U-y(C2Qy?oTnhH^p3YA5jqmwtYHH=`57=+rvY2gn zF0wVo!NFm*HC__RJNLLHs7cT5biyncmuv`d+qR{ySf@xC)b1=jkoCHgv}86$7b6R% zvDPChcr1dEn|h7Tu3Bnpt<1p}A&VVRhjSC9ftnf`U4R2R1u^N} zNuEt+I^LV-TNj?h`)XEh9@1ec%+4N(T8P*aybBrwZBZie?cTj9fSZShW3U0M+QV^u z)-t^@lnE{YiR7(KXHoj4Cw6-`bD-V@*Zf_QgAb*o$B#rkJuIj?G*w`zXhQa`&j4-Y zt-66VSoetL%9!8<=u)|vmy{I!Yz$N*xGLLW0p)o0=^1V0^#$DDsh!V5ATk6eW>XuE zBBdRx>9Z=QORC-CSS|R7=;E-L1CK~SLBVbt@GVE}fjR}%@AbH+kCK;aXVlqKwo-$g zaof{LZfyU2kSQ^CLurN%MAc-&^CAt_XDmSX0LWP_?u^_V%Me{FdUU5hL;T0;1tvb# z0`PD(SHVx$x(ctI$88>nE)nz^-X-HkuM)-lpMNEuIe|v69njW$G3(%Q0F;2kV`g#p z&Q-eLOJvIIImld5p?4&JC-=sRQfB@AC9apQaiagInR#cuG7~rvwEXa1FjP@}k|Sa2 z<6lQscZhNA7DR(k)Gqm9IZ)#95iN^{h0saZ)Z^i+!QWphBiQZLr^g0AF_u}C+0+mj zj|v7XYfEbO>{`)uVn;5ooJhW+p|PuEufLz)4*1aiMnU!x;O3#>h(`74gBZVK9|b9Z z$oy8_1SXAM8p9_7agyfGQWp1*&K}RJS-gj{7Hyu5&0lZ=xc7knD>03!EoI* z=I~&VlaZC1x1pskmzp-&kAO~$OnR1^s##@-&U+Pcwmj*dB654xYlp>J;H$Uy3KRFX zfF#0bnmw@47Up{%@ut+cWv-&i&^QXs&dv^z9rob^TbnnU4>{lmnX|8GfrbO1Jbu8f z6p`_o#RSga7q~Nu7Egw^U}qzO61f^|dI1Vv8l|RBwR7JOO}G3-DUs}XjUSVC>}qjx zMUQj60tIzkIGuX3hK`rg8E8f)U(ry6#0)en#=B1E_uW@l7x3OSK^%Qo@9phfpqS3m zo{A^T?&auQ0Xc+5+?{POTh_(HF{d4ySlAILPf>MkcTq`tCGDHr`0Ptr&Vw9?S8G}R zgSox}0s`-Z9jNx+-AiH@|Fwd7o!}-JJkF1uVnvLX_;26F1zhcUFS`gRk;oXD%v%9c zI8LJgL`d4dCN<8Rp5ym%@Z2254Z|Y*GS%WR!8=D5ATBOWWX58*rELywGvX4IXWq7E zTGpTuN5pK1Bfb6J_z;(G*>3%LjiT7i-E1_H?aqL*Jn*PQgRzacvNX3+1Ck^FpHI@@ zopcoE8kkx(4GZLN<#p;I0;&;ya6K`*(9R2gD>BTctL&gNT(dwV<>#Uz2=~uE5-nlQeSoIv!Sdhbbam5tJTTvr5zsM%!Qi;VDf2h{eX}rT# zd`hz>KjE_BDoSB~*9Ez`AeRrIs?;2ycAo@!ePxshW6?SkCbhBt)Mm-j%+<1V;=sV` z4&G-j7!Bh0nW~D8%arbUh5+M}X=m}dlga*vL#92Q?}_8xz{FZF)cqkF#?Rk?x%+}b z|MxBkeZzY9eDe^el58N$NPi(yaVZ*@qG=E0CkO+A_jUT3tg=h0ctpiKB)k_{T|1a( z_+$kOe^xcC0j=oE&~knElbkX8IMG!u3cODmH*sngw)wVeXzo6ZeuAush;9cGv4qv@ z4xcYh@EvV8x!{`(l68USH4Bn-Cp022|1=bu1)X9JNgWc{%!6K=-SasBocuO}45mX@ zz&P;<07}amfj}Bh>=@yFPTOITEiFro(~j>$+Ul;#E^%k~9BqAJ!KCaxP%!suk&E4q z@`R%=JUw0w+$to*H~bI!PsZ|W#(C6rr^fgd9XfP_VKc%T0ceiR#{h;92i^3&{rwbd zzfPsrSItf_8nr`Qu3>-C^!lW~CUGnS^|HoSK(oq)B7r(3x9xpa}o&4?h(`je{2yci>z<8X=^-V}= z<4MTL!vw6l4Jj8RhXXtWoYdj6zk6>2RBO$V4=awZ^l~-lV*3+8y_u{vz`HHIeG`W- z2mQ#;Kf8&Xk?>oYfviQ%ii!NIhQ9kaGLjF#v!*Vjp=OdGk0C&izmgrxStS+yyZ_Ox%d?YLK;ME?KmiK`~UFLr6ct4{H;Q`f; zoRs{IofOyt;?l&@A2bjU9t5|E0~7*4X_hfUSEKNo@m>LV+_%Vg}`N*CT12ctz-SdQf3~FRs#93mSP1wM{xYMMv%09c)oK;hQg;p*;|Sx9m9-X zG#S9qwLmB4x&V*hcN5Z@n>&c?S#>h8QWNrJ0%W(dtmW#C?KZI8;^}*@#0deHW0d%s z_NS5g442*(j$EKAvr5-=-c6ZY<$5UUFwSjNl~z*iy*+91dq!WW6>!z)^o{`z@4dpg8SgEP0AgZx~&UZxd4|smvm6joH&duB^@=ZUL@@hQwX+JFd z{JC9~IWQcIZYY-MPc!_nHSrHWA1z_#sUKUJeCM6ow?rBq^`p+s>XSzK3y$g4fyvLC z=0OfuA?VmXiY@0y8&+RL%=FL>714}YPiuB@Sa9+*m#~|5Yw%S&1DkFK_dt`b=b<{I zm7&|>3i}RQV;v730dZeL`mlQ0Vd4|zpPHu2Zjyjwn)r?|wRs3J_E|RcjSQl!zPy}7 z{VjLfUdx~O&!6uUH2sbg2(YR@n>?Jn9M2-py65L70c5!-pwuW7js1?_Aq5k-mPy*R z0jO4DBb!`du2|JM|ImGI$B1tA7+|=wf+AxnQ5UXpqKDh|q`>kr;Il@IDS&C|6l7{8 z7Jw2;w=%pVTq(q#|aAb4~sgc0br385m3xBvF6w zL5=CtPk4Ao=T1%E846-3K{u_)`oj2P$HhFtkDEQA$xjaD}pjSw?<>}~(u zwB$~Aa~=Ufw4-KT7E8254`9!Zs372wD?}I8B8EQSSSyMt8UqDB>bP`D@vm$STrPx_3acJ5@`DId+k9 z;7cz1G+VJ{|D#G7Xslv)@OxUx$k*W-=bsYX(8L4RyR{||w8-<`6eNY_;;gyyTQ$apaRXzFTT@~tQl+B!AxM(?29(IH84yFGCp9W|`cAMd4!pB?tO7t0s{|{4e~->GQmm9;TO}hI19K`7ejHcq_z%XaJ zh|>#jmX@Q1I))XNn3|aq5tLl|=G`x8L1*A@yrkPw_<)p~f*9xx)t&D)g0a3K$w94B zCHDvFLijiyNzY0{c9LwMq%S^f+qmsbY;0`rE&|pp$3E*hf9a;U7IOS^unFm&R?kZX z&rGk+Hj3_TorCiDx=b#*Opv`2+nWlQp9YHGYf-jrt{JmCoA&pV!2fiylrymyTCO@{ z{AWY2=lVg*)bv5tO`|zKtMt@@+4flH&8AxYslj2Q@hOx1op5G6C=OlWz_mj}gl_~L z4oc*kH*a=)gx8{o`_0f*=_hDxloCvwPb>5}Dc0v(WIsMg4ZUu<6li64gApwOi*}4_ zhRE}{il5fZ)HMED{c}Wb8vrG&+g=lDiecim5EXg4e*tQ4p+wY1y-lyMYAoYF&)C#4 z87tTU1&_Vjmm^XdkFJlD2U+t*En;`=hWLmoGq?Zs>4Gpb-eIHGAOr zMBjGnyKE?n$qREv-bXX%xlCg<^9G3_md|fNL^?y%q2Tf5pSj5KPFZ{9Ch7bg&w#DF zt-UHc7`{96zH5vO5}v)RWr*C_DFg7hR3@o-P*6~GLV_yL^WBD|QRbVYpVaQ%YulY| zta$c~s@S-e2z1p6srXD^GD-WyckyR!wVJ`4fB@a85OJU4W?jcfWprpzLS9=BA>G=oW(0lVQGSbIl@ zj*(Fe=qz225P~AB8}!^l%J~CNyke<%hMWAol~h#F;0H^0RZqax);2ZW9X6=|gPG)j zdxf>O%3)w&h#ldla=Zq^t;;WqvK9%_*%?B z(+2gmwGNpkCn(!!gzYJ=Wvak)uYjWwRp&;v|BtBefakhh|5wV$u4F~2NGY_Cosou; z2C^bVwq&a$tE>h|B$OhvY#A9*Nyy$Sl)Y#2f8RZw-~YVM>vhiaB)&eM`@XO1daqlA zm$#^Ysp=y%6*`%-bi#T1jZP*kXYcBx)6&en9?zA?OH=G`L!m%n@%GS}xXE}iy(PGy z#V@$MR~OfwNDudJ2=A}wl3w;u_-UTE1XuT}hNrKbXZWEor)%l-%PgwNOSyng25@8F zJvxp4`l-8aG6q}N*nGe#UHI9k$mTqb#t?y!6aHLcjF6>>VWZr+Qu)*X3K#an??!(gh>&y?>TsJn zMXKrM@Qs`MsQfnHxpYr^ak47znr&>%{cB^i^4sR+Uos@><#Xa#OcM-};^SLe;=|;q zDdn1LNg-lCcs|_Fn~{6)KJ_g?I0SR|=lg}{w$EkS)Sp{YJ^lI`ZSifdyc(W68jY-? zq-v5b=dfr`qM(2pucX+E?pfegJT+b(mi@n#Yqb^Z$@xP!}xxvGl`PU|B z-Z!^uSA}S#q=|1#-P97Kr5(Wgo1VwwiRUV{iE$0hs*Fa{gMXI|WQvY|h4RMxIrN9cV*WTJR^tUrd7{QmrGiyKXkoS#Nv^}>^7x0JI}BX8sKx)TXxNn zY#)5Jq0&Yf@{6NNw&7)HW?6B+{-6RZF4um@h8zn&tD_xUujGFQcyL<}(^{o1m73KM zg?#K;UOd#fHan)HK|GZg&2IvwIi{jgD%R=Nd+o^8lqZ?5|K#PH)6vd3uFV`(rBjl(R=jG+q-E8u_ZVB0SNk8KTAX=DbRI8t-AGNgR8 z7o+{$`0IMveYbU$J82sCV{|OKdK$3PdL?$cj|6~@u`3Vc7PZs?lyCbL%{>wD^mL1!&-nwJPJwSS9U_@8@SazXcs(qHnD)Z<0fHBmX&=Un7 z`)OU-`AzJY?*M2AIo+=F$3@rG^{rOuI+fp7W?MGU@UM;54BXfGeD7ps$qL6&;o&hF zYfqX^ijKzk8dIB|QrefXu@f#m43|b2io`w)a*b5^yl7_2pQjEr_X7QPgS8MUObv9bGxlovL*k46)v z93YyM(z#OI2bsb?*IA3DX7&qKsc=o!vx=W}X)SeCjb1=OSP!(yvQ;#`va6Ilr|4F@ z{;zK@<_1-Q5+`RKstzY1{lL=*3JFgCNPV_7S4~7pNp1s1oTXCMM zt2{Zg>hiZpX+OOFvu6VH6Q%}3PVhzET)b-0p36#PA{4)*LrJ@4?uIuyjuZa!SV#U7 z3H#j}*Znz3pVU1z7DUdj{(y{YIh(;mm8|@kll2*69p4CMM9+2PPHeK!EitYzUiSK3*8>0C)E0ZDyiPb`JVU8 zln1NkMp`whU9YHONjRaU=l?cbdz&J9F z^z8!YhgwRH4v1$=oPz>V*lS`uzkPd>vWma;aKQ?I8n>OM_y*wK3uhQ}@=3$~EBqU< z$td+MN!K&_en5BiN856Q+V7k?bLM{39acO5>a}ZWB{u%7eI3+p?sxpm8D{lF&F<>% zfcLeEG%-Bi>0NGEv1<1ebU2NuZtOhzAon|fiIVjVnesm`Zfj>hYONdG$g{4_U-VMr z37Ue1KL#>?vV}84DjOG;#tIFgw(?NQPZGI7=@#iK#IG9Zj|rYrRcc}l4qc0uk<+VJ zuU_MoH#gMCL_oP*$1z5^F|UGaed~7b-o3dcOF4)KXXG7SJBQJ>yV$BlRjOxxoeWY; zQva*9cAecbmTkbGy?vj^Lq~shUk(rJfNiH({|Lr2RPIcXK60!Culf$=(7XL!dcgFf zwEMie&=V@T0n4Ok!NEUlb{*LV zGZoblW{0BiUc-}Zjp5ppj(xG(-dzU>HQW2Iy+5ia9U4cyM=AfCkxcczgPWGOhffuh zO-^2OTN=rNrnjFVEo)~=$M2lIx3`5>OhBH}Q^F*x2x;$%n9?>p1t;s5LHF(zlW7(T zN~!s-JN!4G^I;tM)!Taq?8OUAtqxVXxz~3Q8PjLgrDxsXKSTvL!WGn8U#dsb6~DTB zPx7W2i?i!$16K61<>3#uVS>z|>Aj;c+N>YroPb=50ZS{fR=JyiCdtoBGpQ;=BeAA^ zTG*4U4QxTH1Y7IV_+MwGqyrC_K&%4$zqHBJlH~5Y!b7c7)w`XoWGMhg< zi+S1;9ml~v+kYvIzG#3ZMC_OyH{5aIK3#OO8}+@q(xhg}m~W=Go_etkifu*zEi1of z-&{zrndnhUxU3h~*x@eCJ5N#l^7sae)=UPVh5kmNeYDY+3U3Ms3ul?a<4WpYj@>{t zX7F^Y_Li5IqR&Kunva&2v>!B@QhXkS4q6|b$ozN-Q~ta2diM4im8P^D?ko(gQA%=8 zq&c;i1A{Zu4EOJp^s|oAO5jyfW#*AnQK5e(kAASI`KPMbeE4D0RsrBUeBC zHfFDGOn!%2OjF7oL5^y|NkG^GbT`Mpe0Up%;h|o}WTo>JAKsa;xL z4&@FvqtCmXN5F5shKn(QCo2K30#%VeCf$QpZR{sQg_JOZBtawaUt{-v&TXnvY-05N z*tV_ih2L-Q{Zdi!7NsI@aWDbYfQ{K+#|;qI;LBrfEY0TiIt;`bPS@DS4DUL6RkD2U zv4L}sG&^$*JZuAroTcxlgZ=9y;z?umy)VArmFclU5YxeY$K21WU^V%*Nw zDl>gF` z0K<{WO*0~`eyi@7gty=ROe@V2lOE{^#KOYu_e8nFT^BNiBtfNcj_ghz$8D9$#>RZ4 zT;YlGZrw_Orsws4;!lY z`K>?QH?|<{EZcQu?o<4=Hp#6Sabn||*Ui70-DKOi&P%OLFyzFG?4AE}u|1hMMDb0E zry%t=Z}QuZQ{rdt8*oxtua~#xc#5{RiYKX;K6>C&g3p58D~HXG#ejY>n$TtG*%6r{_>I}`;j|;^-)uFLN9r==xxBfVz!#{mLm3lgKuvt4Y87u=i zM&sm@tvd?{w4iru**@K3*D2b=%H((e=ujrfui5R_&7eIM*j*Bdl!ggL?=CL>im&`D z+7AsLI;wX6FTRE$aqznN?w1@CV`^(oBO*aj{X$rMp34i3NWP|=$-!rWmL?LNZQ51r z4I&pW$~qjozO9F{#Q9P$Ht~HR7U2rQ>Uz@Q_wO_R#*TIR@j!V^)r5mqro~JCZqqnd8@Dc>gmm19q(TAV+^>v*mAl+LWKU-FZg- zWNULrt4_mEj;?NC-Q?b?L@#RNh`){-+^kZ4=MR+fx!Bo)659LpM);IK})&DcFXC*zRFmcy%hg zA88;RpL&C2;m7LE%28HQ%8_~#jb}^r(4?8!wryK+tn1w(9Ij5n{K2bCx2(UD$y>ae zF8f9Lvzd^j&x=~KA#Dd;Mt?Q@FVu)4!lnIb_Sk^Dg4DedFR@d3eWD8EMT|KEk{d;6 zE}v9mN@Zkpk{@rbS=}GKKvCT!y9PF~i4#6tVpjUV;_=1J;+!5;uYgcG&D3j*M3x_Y zU3>V@rFEFJM<$DR?${B94=9u#_gF9ArF;5UEoqFkXYGQ&epN}lRCO!V#P5U6bvpA> z$+B9d{M&b8vyxtY{a@}N^>Pm9P{u$3D7W~Bd;FMPw0Y<0&88-EkauLT@Ho( zBc}#hrKWsWF_W@4ZZS@F&ileco6`cR-cU9@xKsJ(R7AIy#$)!C6Q*Hwum!0vBDso_=z&p8I2C zBO8loXh-Abxjqkw{t>^NK@A_B=H<3F{^p>|<-PxOI(d-eb=W;h| zkKRDFw<%j2#X@~wb%UFgbl)a7*TlFh*TuCSdfj9>by#|y;J<%gs=-k~iU5kXD`MB~ zF1>HN97t`GH=lVo?h2i4X4Am>KK^YHeMQtNYY)2?L|?ccmvZsb`MRNzhZROYH?jCY zPo0-ChJFdJEbGF=sOeQn8JFpww@o6QsklTfK2*uB(1)DxdiZdCgtTkJwjeG3M@OR1 z$i1i;Pri8Oj5{_}gzPOIIEvhLgp>)|1OI~*P6H^AQp&KX>b^ zQ(WnmtqrFhhsB)VZQH2pNTeb2z=$Ux4ea{8F!ivh_#>1@Wd2GwZ~XQq`@$!qY%zY8 zl}$QxC)BF?H7|-TdW*ds@1Xq8421_^Q$zQG%7HR8J4H+^>X|@+C2#4e(v;|DC9Zmc zUz({`_xC3HxunkSm(_F+J5|PN7Kt{?K<<2AWn&hkkeE2@jf8v6wz&*769GQTrT1Uc zQXcONeM!^CSsf@_FbeWQ+(Wc$qq##uc~BMe*a;S@@GAi5uV&d_~^5{Z`S_&7KrK$tvbGJ!a9;dEV3Djcr?ufe(l_`82qnXGxKJsYh zw>Gz6MgMfmqyMFdChGH!=3E;c^2$zWxN4hxkgZ{I&|W0P(K=G&l3-In?1+DF_SE32 z2D^^D#g_eUBF(C@Stg1brs;#y7C2Ge0^hycANr$S`vrt|a6E7Zlv(CXQZwPyBH z#cscy)w_@-ab7R~M%averi8|Wpc6@D^VZ+FgdgqrFT}I$o|lyv8)Ucb?S_8o5rUOR zLXu}gub!I#7Zezh*_*5Md}z~6n`E`Lth;e3(mQ^#)UdM@{LZLT1yT;VEBo{JT6M0g zZ~dO2_NM2!Tx&RL#(B`x9i}Y`Y*qF0>q89;aL~<&rwrrpBS>tJfPp}0g=GizeP;IC zJD*&E{{ZZc$b%@A>|Fl$*CPyDVFk>a;o9hVNQ$ zm$`$B;^ZegT5AM50?tMZ%fxO&rb!qr5LOAE_P_e z$kGA&J3!96AgV1VYIu8{h+DpIJMv$*dsH26_|lcu=gjY+_qx{ke3;Q)AkoD(&i>6KBG{Rj&I^VD!Gc z6xdyu$r^S3O<`nW(wmgkQ4F~)te!?Kp~-Ci#7K|*yg*pAB=|xMm5yy^!!wc>5$9J-MsL0U8#V_|8Fj6_`f+j zXcm0UOG~~Mzo%;3k7wK-Rht&PncdjS>-y`5#6i&&cWm>2{&utZ?&0R8sD|BCWp_MF zqiFO}q$x;12OnSTH(FO^=_fvn+{Ms5@D<+Bi9X}>ELDt*8xZJI;mo?eptnElt92%J&JNEYyj0b&;&yIi;lV=KTnbGg~N5bn!$Q)|BRuiBi?jo{Stv*j=H+~rkg9v zj-;^txF#ttjzU4ik7(eO*yiMmD}%Pk1_C~6dXsI5zwjmgf)d4tU-_?7 z(O>30!&G<~F3Hm=cC*75)2%OA-?Ze@p}!$?S3*v*&ezpJ^Jf0SK|S?8E~UJ_k(os8 z@uc9)-ZvW7>#eF-lC6`{xd9FzKQCALLVV@Sj_m=R1VnXjBPV?sQu1mwsF3 zdQN)M{?WfJYOE5{xu3_0E&kz%GCs2g87U^Gh#yMKs*QOd;Gr)RftLXiz=aZ5`4+_D z2cT0yfIPPD-~Z5Y_@f`2E%c)6Pnguoc%U;<=Ka4wX6mEdRY;HG6Db{mjKSk+%@OKUsj)r0*8FWK zw#YpG#FY6XO;c|CpF<107rMB2R-I z<8!7}J0XNy4mtk#^x$V>%JWl2^VnLAcZ1jNn!qyz9%Bm$s>H!dWskJ0_BDy?L8)(3`TBj)hUVV~Wo1e|JEvI!`5=-5)>OUSxu#P*awZRceeFz( z?q{`4vKx_|^w>N{V9g~JwW`s4y$dIJ{)Ms23nbAq(f7!#m&@MqDm#peRS9u|IoJA- z9_=+ypZ1&%;ChoGJIlb9d#_d*?rPz zl5l}NdlYSL_tlvxTi?$L)DbF23$Rv6Q+&tZ;W9QDLy7XxuH?pVmi}|D9}I)y8iUvZ z4>-^COZVqk}P+P-O}nOsPa3rrjlk!7Tq z)>}fbP+p3lv@|!^kUT@FFjUcPLPFoV7wAj2qV_a5H>;O>dU-)iayZZD^`npwGWT>t zPEC~4_{fB1L@)!dj0{ivdq-vbW<+`jIrlnrx|uFZd3)g$uF0`88LHL2Zi0 z=|@Ho8k(-n+e1HBf0d;#xW-o}(fy0wuxFaxudHg z2Qg7?M;*CyAC^nR1faXAIOXKj8z45`YF!weo!#`Zy<_Y}^#s%oknt%(ZJroNduCpa zm(sIT&V zZMM!iY)1w?$+5f*ty$L8T@q^6UHG~9JAK@>AHI{R0O1^`omLe2Z7eZR-i?TH8(w;Y}911g$c$Zo7G%NCyXOQdwpMk~X zmM8hMWRK^AI`-6?*IT?Ubult-=yBI1XIVUkc=Qa4F*6@EU8b z+Vr6M+T`T^Es=rcB_r`2E~3r#yMMBC|2U)(^Tc)d|KgIa99(NATxY&Jn%8PO3>Oz9 z_71I<%Ky>uR=BX%@u|+^*GxrX(rauo)42^Nv+QGT++j9(QE)?D{jsUM>Tc-stuw~( zsh%9Xx}LlOe}8%kmuL;ll&F=oV6v()D|-6)_!%S{<;^r`JAtOxom~EgObr+e?}E!G zVi=G9>EY%LjsqRf~(FyS!-0KcrI6n~iU*pew z;y_g7-KEx$*scT(VZ`E|R;!|6o1yCYt-u`fql&K(5hQr{o_T9|D#-eW6Y5Ag%h`N* zjnTo^{*opE7KiqKsI4)}(9E{sm021-5ITAe;a4PL0{RfAU$LP!KR&T^OaNOg@n%TN zN!Fbi$?5-M=NZ99x&1$RNDE}|ziSa1~Ss6R9GJdGXn@J5s0z6cnau=EZ z#F}RaV8Q4BTQe7VTKJC$*eAnQfbWVM(jj(wxAa|Hl|bGffXTLJUcxdj0NDgL7X@*I zLZ4Lg&iEv6W!#lk;v1|&WFF();`nQxqJ6ItiFv!+dSeN4^mF|$ZVvz^(;d|z94tC z#G^cy}|Xp3wjtPYLSq zn#%~5t@s5q)e{5(0avQsc4CZ?sHOnxcfTJiEdQzYVjS+>ZTt4Itn>Qw;W+;zTffsg zrDnYCD|ZPBmJkKVo3fM_c*Od#8GOVO9FV%!!{~lYA@fnPl>w? zbZu0tSML*PFmYO!mX(o4EQZDvS!fVHI6Z5OG+;+M4#x(5mHX4D*M_Nh*J1h8C~t|z zYXDa3b^I164xA^Z?0Z1Hv7vrI(&%_o0T*B_&eJt1w=lbD?_J=WZ2Q+(JFq0IVcwp> zIvNv|Tl~bDDOE{&DW^GUVfcX%`s(6_Qwx}rVCsEyu5e!C{Q0PunBrNv+t@XRXp~PU z!6|!ZaL^33$pFS=@f(W=IYljev$Ms?AoWbbzK228(K9bk>Nh0)@Kt4*>lNNyzK?h) z9BtcycK|_f_%u|-_paXT5CN;Ox~LDBd{bPS9U)Y}LDOmqjD-$kMOkcY1qB5dh&`Tf z$#T@Aijq(0ZY~!}i1^96zhiNo7w71)h}`M1h=2Ppgq?5j!9aoLxvM=&YzT&I!laY9 zm;X?u*OXd+)i*Pj&GB-Gp1O}dh{2UejJMz(TFedzs>5C9& z0iE_XL0xJK$n~9sRtxXNSUUl8SNCmd9(R20v12qnJv~up<877~?f)7sWpBysKXBl% z@ScOpzSVk2lE9(j4FeZF-k+SJ;+h}Va-wD`Mbl31&`?*8K@i#7w+*)}?3t`RZ%w5{ ze^>e8$2Y9IYmg!MYQicT*OB;*Bqav_EjqG1*~XTTkmxkT{WT%wmyKvj2PcNRBPVV~ zKM4uBuv4hkjuke2ComRk+ta@(9XYa!SRygAvbVy_5D;{TlYxxtL@|goCqfbIvuYDp zv+C>2b3QP^B;^|KxA$k({ePei(?^6g0P#6Df^($#$>sUXQh}j!1D=ozuR~0D2?gtka8}W@9DWr_;hin@hJTi z=}UU~NpBHFl=pyz-W^%+rW{8n^ITj#^B3mNCbi5r<#k9e3I6xAkJK^mqGlE%n?y6I z$}nlB;lP!cg4trJN8>km)R=sQ2a#C+C_2`?V)|vCY2~9|Eg6EOf}*6?cmG}12#RYP ztyp&p%5P$r?Y^;!Adz``OJLqqp34w?Hq*Qv^>_Q>!_D(;JkgexdFlyxYqu5boT=;r zLp~eDB_^itw)!v2lxf4vY|5oKN&Tk(034bG&qhJpOVRb3Z64Mq>$tpbobqv|DG*#X z3HWWPA+Vz=pAW5fDSoE&$yd!iFt5+|_z7>0yfr^=fS||k-3tR%&z0d&ss97bvjNhv z98%1)@5m*e5#KvFQZ$;MIE`D(a&*4gy^x7w9j@P$mHtdRrlVo8-v5u>-I4g6)+D%# zEKG&v^>ZN-0hQk!+|5t6(khjU_>RAQmZhty`SbG0ydZP}U7!;koNivYz)BLJUR#LU z_4}8waho0FB>1n{Eq(p1R1BOy;;s_0=Vi`=%%Yk&LQQd)ksjS(0jUtzMU_$o@!dDf zG#veTj@IZ*$u~dR|2l5p#najAXO6Xnn!Zty9#cQj#bMiR>?}`n;acd@Cr7B^MeF}Yg#*PQ6c*(-8#&DKXS_`^q(^sInjkvr zG28R7M`2;Vs7@vxzz$8f?&NWHb~bIx-UohTb&|RtPFFxYKpD0p(-RB&O#Jz5=P7>F zb5C@q8#iu5h>kpB4s&x`M%x^K6Ct2czq{CrWGrCXV%|LbyZj;u2SlO$vUeEk2p|EB z&}}(X3hv(Bb1Liaj}o&UP1Oayo0F5HzVvK;AfwJ#ld2|6@8{LkwYE@3-J7!8p?g(Q zPtAHU^liXx;{{0~!}<$-Ink)EPE~bw+GqCcTW|i1#c$RV;fAR5e9K-d=xd4hSLrkuy-DAvs+x``U|;9iXP8Bo<}LPCrr)pFoTTbm9< zmBGQmBzO|BPUx=X068kXeU7zBg|B|y0FIooG1nSk70z56gtdJ8gA<}<<2(ktVhA}N zKxzjbOD^_oQF(c}nWBfJaE09!i}UA$Kbll-zLc~2T3CeGT*o6M-9P_Kaeh!mxNiG@ zFN225bJr^I$cSNM&ApZ;--kHxbbR_MoJ-H3LcpzgMt84n^B%MnL}9(M)VZSb?&h7R zex0BwBi1Qpg#s$O3|kK(^qwkA-MMpTADn4E{h|;LK)jRI0{t~($O+>NS&Xt!c`CcO z`}EOviLw56+1blZgp-`G=Is1YfIr z5#bt-t#T(w1#!D?^9p;}SXp@m1U^}c+N$a527scjKg?u^Dg0S(urQ|HAgM{;k|_ws zqt9!pR*>++IMP`>|c*^=h9zyUotU#JAh5Z({a|q272j`!b3s4Se5kytib=R&ej8!#U(Xi6jG5S>ps$Y> zZ60YdWcfe@-TNX)rWmq06Z<@Ap6lOo;gB95Ut+L?Lu3^i_l2o}*3Dx>Lw;~s_ttA% z>SKs!lb4gDAc7lA|60==!y57Do#3CY!B7RuOZSh@k0Q*Sjj&D48oepqG0Q(PE@-r4^dBDco<>bxczkH>&1+uec$fv()~;s#Eq_>8m`UQ9 zN$^(;-rOiYp2Vjck|4wnyORv5QUn~tR)sN-#2Ny`+~pL2s9WSZ&=Ie4;VoHMVuR4r z;uNJv0t1Y56ckcmWSs`%UlTFX8fwW~0c%S{h_K@7b{{7p3>3iXve_m)(Dsm+?STQ4 zDT-YwPi1>PW=Fbjay|N_)1zu{+f2~c&edVfo!7qP{%?IM=vjM8cSTUwwr`9VR7>ie z`w|*Tw_)EXIZqHU{Gg}(m&8!K<(Bv4P%!Z^Yx?m8NA<7^5-@n>~pWo5$+(*!MF zziHZV)7snQ-4S|nv7441c{$~qMc#X$o`J9E#cqKT?x=%M0Z8-_ zbnQd}j!zFQJXyn}KKAtX)C)H4w9iweIq zIy@$5OD|YVi36crM?!p1)3b)lNvR{?O3h7};Ry9d;@pPy4v34h13Fj6ICQFo!`t>V zQ!Su>5@4b#J+l$58lP#PvUiuHU6a13#wTIPkAo+MDrikvTFkAlN}BV|?@v3*VKg3p zsWG}q3Dm&k)PgVNWt za&J!$fw+mr4nB~rnC^a#YemZ8n+5Vn-yX%ci%@ImBQ!D~JjP1ap)_Y4lCT(@ z4$h$8!N=#U(-L>PxLBSjC$Q1VL4w^!Ow+g)X>9K>$@oHA_2SmbA4omc*PJcZKC`+y zD$>`@dYyf}h=k{&W+AIiLVXY3s=9SWC;)SGKLvjl&Cvy9_o?5&I>+yDLJ0SRk*s!; zFCIW9A=LVHKRe4i;q>voYlo>AH~T`Xu$jz0Sh2c>cj)Z;KFU}yPj0zzROxM>)@m)! z*>zP3)B~;+zmh`hzqoNs?^4V-=~Pv<*O8oqBq-pQJ%nTCwr2)ti8z;--(9#}FFoHl zHk4uBn2H1i!kETf(8;kM+C%^rH;}v(?pQXxh^Z;hpX=~L=b|=ia|kioL;`~(xBxh( zKk~Vv5hInkDcknN+mYn=qs0)(?1Kl8(AWh99lgx9WKSSTgqm1lkbU#sCtbli1OJ5(!zL&u=EV3%>ruJFu1L=w%j zI$Y>beMqjZ%;L}Om{A^ov3-|q)4iJDn>cpJFKZW;DyICp)&O{+)6ZxhLU$0;4~zwN z6&;@0Buy0x`lC=@y+tlW=;2PZ5CzM#+DHa@0Wgy2MKP7@zGLfF3vNhgsuGE{SIvH1 z^FfT}&$^*itY#UU1jlc|Y;HG=1QQfktC`;zMhq|rdz5_ARYERb{eA~PH4a|+uU@{? z2f6NqSd9q+%tA!HzCxvl*mwXE9a_-KO-O&X)7OPkzW6AE!2&K4N(!)fmCpS-PAu{B z18ZSQ=9s7r4l$z$n)kq)-wHeoyrb<<2WiPcB9Q=6q)a;n>n9~AmjM$bd|A&&woAdk zS5)P;tRka2fq%9i_5Xmgq<#IWpYcS(gUus_Q{4xDL-YF_Y%2y>iA1m~n3Cs#tgI7P z=y)UWG`eF$^@^W4cHBjh;i&pn~4T-SKwSaJ+=%`cmjxrLhL*4^mni` z5J&^D#oN)x`XaZy>(7gblkR65}-M|0A^`d&n zX-Y)ehGunS#!+u(nbDsK8!nO%=+Zh##UcMg=Frl)V=`)Dz#F!@%nsiL5pu$;9QH=y z@X5pcEg&Fpzitx+Rz3Y_&H#@;SSZjxXTwWAlMEU(W^6={on{iG%VJO7M?xT1mOEF%bhfHEG18Y1F97K=_URtmP2%yO5I4kA#{CbE>NXYI ze_B-+K4vAL`sDrV==&3$%;-AG1zV^YH`4%Mgh8jIvvZk+E?msI3?lOO0^x9Ss6q%B zaYJ@_dIKJt55&U>QvmD8A%fHRksn=JTG~Fu6uwuju$SP*d`9vO={JirV}i)nor000 z0ZwS7>=i|~?Q-?X6)c{4)=rA8`Vt34W5r3$3;c>SMPeaigtb$YZq>Fn{edLHF@1LK zVI|oF8V@GSxWLFs5awy!;GLpG`cIrO0DexsD^ySTNMh14?do?H+<4feFD&UeT1q%_ zoI~I*6wK^&)|H6hkfD^Y9|Z65N7KyCspi*plEy=fgCs8gY`8)yAirX5>wGUPTc^72 zcH+W^hT2D1b%zvZ$Y#0xyJ`0(X96p(Q*4Fu)r@;3>y=4WBxyI4md>e48~4w!8e5I4tZM~CM!UvoNqPffzu*$!M6gmK8acxCsOy~7L$3bG4fHq z{uw7JP1MBkg6C_P#r6X}%@VwdeK&tQ9pQVPcWBM?LrvnMuu%<)cpKOx#V~Vv-xib~ z-#gF%6aP{+Ed5@RjjL|^+tX})w2EcjrHYO8JI6G((m0;O)I=yI%#n{c{$Ws9f?N`|}Mo@1l5+xOC)er||lD zPq8z2axjw^Aa3exc(I>W(ov+9gP81kynN!siSg-L)n4F?jukhPV~~$Yj8f1#N-sW? zcRdWp39+uZL1B9~yhns?)1D7tO30-g@u%XmV6i0Uw^dbARw$5~>9NjE&5+x18#W5A zs(beQi>UA9Oy1M723(irpFSynox$A5-d}~EL~@J39H~=ylN)y~0Au$!8-H+}J2vCz zYn^s>b`wY@?ZGSh2;tR^x2vkR>?dmqn9-&im`n)0L#BfHc_#uTv9Pcpmb81>SQs)> zbp0N)z^h835c!m|LeQ7~uN)mVcIKd44_Y)a<2+TyF0%z^NLqlMs*$DWn(l{&`oO#L z$M2SoCRJ5fj*cPnkD|_N6&jtTMkaDw$GVPj*V@88W#M1@c3JK`VE$vOPTO-RJVQw! zZ(vf@Lxfb$#pz!pfu`;iO+OTZMVJ=IF&R3d2FWfOa08rcLain3d$iJN&>vzu-)Y<; zMJgTsFaK4ijxR-bsB5r?2sr|wWN1r-ix*lrOo;jYoz4*KL*wUxKbVpzp-G9>x$6>a zw`FKRI&x|EGpg2W$w)z#+U6|OKtb1^lDWql1`zEKQZ4I5d6wfvswmj=y$|WEqRA%!027f#QV4rd{ zYl@QkpDHR!&>`9YWBzu(6h-ASR=X4WTjE(t7=oRU3Vj&sM}A5KT9Gk;81*3drbRva zKoZTq9;gE2PZb%a!F1C=7)>EGR~`Fe2hDz*uUD6|xuutHZ+*j(M#eSqqNa@bM|Li1 z7hHfrI7wh1F?CnOJ@Ev#2T2t>vk&em{5|RSL(KU7-{gwr>Dsg5-MCRle_^uE{&Z8C zGrF}2RE^`7NApQbj9iPS_y&p?Tq2!%XmeAFmUbm-^vIOSvKL&tnpYk;xim6WEfT1k z{o;&(xI$6;DS*f_f94H8OK(D9B9R~>rqx>kQcC7`klzKH@~;E?+7B8s&tgt800s*| zco{J`K@pxXg(1%m+iBa@4;k{w7%bWMU0y> zMG*8rCS$@}jKdQdgN=qiI>4~X(PoQANcGg*+(06I5r2Lj4Ik&|d0LL9n8=Y`J|7Lu zV>-gVJ0Nf408@d1QbXjvFYcVKr#iXTPkV}W?fRwvZ5;A0KYmYy)xRGr(B9d>W(Op6 zyzZRpaZB$KBBz8dBLdGP;B}XvgQTzZmbTC>ffvq0DE08M%Pr5+xNr)xcr zTY9uhxj7vYHP%%`IMirPU`cqV?uD{dYfbh%cvElYY|V)Olo9c6p_3KLrZDuS=8EIU( zYkXm*Yxiw(0klpPDfJ+rUpJyhWkUNa=%Ex@j*j4~c4Xb7=J$>%FWHt)d_);edOXiZ zLy1n})wtVO>@ixo#cWHl^|<&Eq~%FV@Af&_F~7GiZY;1P~>-WPlVR?EKZR$3{mv&lWH$mm}8ct7gXoxjK<12Zkf|2-4Xh~q!_v&6T&z_4~^Wl<8eqc=-o%9^l zEL+=$qGEb-Z;S0INkdJG^{Y2FSTj*ii+!Y~qH^)irk>_qRFzrSff39bQR2rf86jfR z$}sc7ztCb;#RNglm8n9~0^swRfmmXYhn^LiN&|Fz;@=(%X zQD6Q}n&KH0B6d*Rds?!r$EtrruX2x*iW@m+X!*ONeRz77oj)nz_9jG96AK2Zm>9iY zoV$DrF&GZ5U9{)VpZ`06LDm*4se5B-iqIq2S4%wgJGEvU>w1oo>;7eseY&0eP9<9e&4je7#fkN4w1G z0GIZG!ndktDmB+nD=zb4`QW)i4F>IV=+{7a-`4|h%2BWbezyB5H_6>aj>NC=l~4#D z${xmJ7kqrkcamILVbT0Vx~lb&8D?cMwt zxGx!5h)6gR0bR)bwr~UjMX<51?kOu0A6*QMv&e47O4^Tto2i zq33Hgr}dy4pUB=Ss4=$=txlgiD-${K;K2h^+>S6dZQiH!n627YCUfVS=bckktc3Rt zZ!MPb6Y;km+;I7Up{o4zxC#<<0XjyQ%zmh)+xhv+dPGui2@xQaV+eG`#u%>@>+C|m zDU@Z27N004Y5?E7ANT{rEvAXwS3;r*qoaF5rWoky7195aYugZjx&bv2$4w6%UdPA& zHk_J%W`6Ifga?iNX~GW z0XqT41qJE;Y12s*Y%brT5gH6~UhOI_dH*TiF>=W>ie~=Pk28`^OL?!=k}oKfi`>^< z_;@M)yh~lIOFtQeiRLoWxu1iC^1@;k+Onev!d%}uHW}-0CW6QdBvN=H>fH;y31R#$ zQjOhvGb?*o!>7v+H$3h2ytImgbE&?+t=8B@Q8yN74gs=>g62raPcSAB3PlruP1J&& z1grtiAr~eQ(DI~Z`#WR0$Q!d4-}jU2GqD$JU95p88dp5dIj=%ZP2G#hxg6;RK6nb3 z(YMXxpAVOdLOzav;SjbrIR>{M_VIk$b?#M{7n@$HGrJ`Na9#A%QP>(zlb?5z!G*LC zLP=^XK*sd(29E9vBbRspX^PIkLr0$>A#PbqN+C>gfNQ0~2u=+hOg2cgJwn$-%WdLq z5V%2rEX>7g(qM{*ktx-tTb?{Xp)^bae;39mnx)W@Bfh6IH_Oqw?~mVW%O*u7KcrqWRNpG8 z2c0P(uDrsgHyqr_Z`SeNiNBxBTHSE6_48M+OhiqnG>RlvbkttoTcJJOe7P{*nkIOE z&c`K%NY|KT8{&mSwMDWtbEM1QuGht1sd3o{Dicl??ik;7<^*hZ2z`f$ji&;xs{h#Z zL}Sp>JzJ4s=qsM$5LpD;m<9!-nR|s8n#X=?#LzpjzQvrzL)1pV4 zpKY}!S<-Fxjk+<3=HIaVhb#_JtNtLb6Mc{$6w5?RChC=c==bt60T^JfH^6W&WbeNW z-I$PeIPv`$i z<+k14)DrPB!pQLqUM*&VZ*OTMY!I=Fb+?8mF$?)MUB} z4RQDlxf7~Z*TW3jn$HvzhnJ4uh*pSm)UmiiM%BB!QpoAB-Cf!izUK8owRyEF0wuC^dL&6+|-!njSqY>>TY z<_B76p^Kr+&tR`yunL_My}az&g5Pu=z7ylYl{0^cWqopDhDKv8*hwY_^(Z#xU6e5#Mo`>2jHs< zeWAmA%%NRs@#l#yCeEA?BZr@S~Eo9QS{+o$hdn`8E#pKp$7SLEB=}8i`CE!b?MLuzN5U4s=Bb z&AKKo-*tT}{Vi}k2gk1vu`91%zVy|)0ORjg5QNe$)6ABqTz}!7l|O%$tko(un;07# zz~3K}q)b7i6aZp=HH0q$TqXlD`iZ-_qrilfbc1X-_#>AN(lXdeGW_;n9vHj9R%{b} z|MQtQ@%`Mrl{x3ZGmtwS_aWCPD|k81+6`ra)cgkz47NL!>pd8K87C%%$F|d&UHgdr z+HWEK?ng$r85$JEPnG;??mgv(J2tHF`#z0hTMtv*%zh5$q-kZ(?^Qj^QD>bz#rA_> zBH`%9261lli;I8F=f1wo_U+Tt*WX((-@z2<_wMwKKBUL3x6j8v5&kD8`4LnP)o?z5;x*nh^1BMA#rjq5~MW7r2f_pQL>jiboj-l^g^Zdqa*r^onh1lzvHl|X_ zx^tgC9qYL7M)iU3g3P@`i|HGNVj@{PzFC=6?&Hv|72L6AOTS?yaA>lFzVu06KCpcq}D^&Q>rVN!PfI@#S zILVAmBiGjfOo%ic8fkh<2~pE}%lySJpqBQL#FUjWtJch)0vNIfGBJyavLx8xYls;hek2XE|Ux>*p%=!gyJ zDuc-feaFRT+DWxtD#tT#>Q#2~ci6s&%Yx15Y;c8{gct0>Pp#jrH{y?6ZaIAXW;g%X zD&06`r|lRsiQ!h)!8TIbfAit>h*#WTnLjm9MaMH_dRGv3&M}$w^NgvGyVf+22onfl zcel7)-UnQpm`c-UV2$N%(VBUHbxOzpp6)YTvN!YMYEDkOW@+4ChhJYZL8V` zeY1rd-*mrD{iEP%z)n=J_WX4~=;`s}$|0fgAt!aQ=WrB~0m#z8oAt-7I#6tUvmO1G zAa+aW(P!qau^=3$Q!an*)m^~-q{cl-jlySeRq)P1*rA#YFY?awPr=GNf~03D9g6Cf z!dY1ZAvM+F$Z;w%K|;74Ur7;FXy68%GA79pi3CN4K<~YcT!}gMz>yKBHo>=IlIBp#?q#aqAl;{Fb=2*;D-_k8QL>gT2WuMA!U~QbJH_Je z7Ny=sIrG)^79R2Zm-gp$0;0eAyv9Sd+2+WYz$NDJLTR)c1ky(`E}(vJfNPoga+0!L z3FJH)xGg}FsFCxPrq_QuCeJ1L>8NFE1~KhAH=fvAQ4(FOf+M*Az$w)3&NHvfFM4;S zY23f)Z+zD!aDvLsc>j+2;G!2guEpV3vDE7LpAaFq-(wzqRn42o{8DUwREZ6Gk7!}h zHpe{TRtDpmG?HB%Ga?jxYXaDcg7 z2eZf2V5L&N!F9nEn)xpq)fFb;r|~pYR8}^`g)}jV_L#bdd|q5hw`g7up>)h}ML#5k zBqmrZ-SjqwK|*02vCkNp&RZnPct!0YG#6}^do?B?z4py)K!H`%_PhTW1&bsv;5*Kk z<-d=tY!w-FnzgPf#xA9CRPw#`Zqy2~p1)nD_KZCt%OBmL)w^Sy7{%{mK70W_?%pc?qE6fmd59$=ttN(897*g7g~@Z8^h(PO~(}jXuyIhz!s1DmjiLElS^@w z|A}H?Xox!P7mIysWNgffQ2}PyU4TLo8?+qMqa^n$|C|C9@WL`&G|xPNgZujzx+It4 z=AKp^qwt{Gu}WYFf(N*(M&hXlZl;<&zj>~t*Wu5DL#H0|xaO(^ZH-XaQ^gup*mgmH zaPJtgm0DXlpJU6uo8V7+d65vI%1%|TkpfT(L zpK9mA{kB80_yBQ23k)@#TNiwP&1u_9nEVl;^8qV4B;f3|NdG{C`??pXs8X3(EJ)Z2 zrg)GF*%b$~iuha*euADm2$aiqNVbAx76pg_0dqw5j`L?vkn47EXrfc8a_Ygs_5Vn^ z4sa^__g`t&NfV*eQ8Yk{{7eiV4`8&)klH#)LiGLm-@vhYSzw@`%u3GgfgC+3OOKfh zQ~(YJ8G0dQEAg+`^E;ie*m+IKU*9JNjJNv^zN*}w=h-{SjrR9!$f<670+R;QZtv+$ zut0#s0hYH-LuYx>kV%L5x)5{7eJI^mMt#p9wA@B>%Y%Ny6wulsG^k+J@_z`y@a2h^ zfHBGO@)m1pZ7J9`p=p>Hfd*aMd5!O64+EvVLHdrjm;W0Yiu8*E7Y_Ypene=S9eT0Q zSK71L_vr}N-8@50)7R|~lZ`oY`@rIV$)C127o;_Mk6#O@kOj;cbXz>lx-!m72Y-Py zl*4QJ4G&ceQ6SzlZ$3ZlF%usU9NYk8b0D;XeFHCrfGz~7oC`W0s3eeC>mCjqe(2&N zyi~LAozbVB)dSKz##+wb=gtx>IbIOPu8wIof9qR0pe#uigW2@~S~t5+F`pgzZ=Z4a z^B_&eU9A{j;pdkJ59{r#eTU`_mCl+V^!6#-bl5Tbi*1+?{TvupA(i(K{NxWoYlA2R z&z>E1#KYpyfK(iW-AAw?K(3H#9hlwz?DLmU$EzQFzqoYhW`KUKBi!VNEQO!F)^K)) z#V0KT3zNF${#DJ>+{nYPob@slKeT)Bb(40TqVl49-NWcK=9$&*STbt5sgzuVy9{H6 zz~55~7*?ENBi_g!G-^R04L@&*TO|Zd2*AWWB-9n?85x1d{|)m2bqx}_2s(2`y9A!e z$sHfh8MbS39-_;ne+ziF#tw1Lw;wMZ=jGJCf2FtIR5U+QE9mM)CFLI3a~<_jQCcYq z*&nWMp1t$ZFzB>+GIRbxP99#iT!-lBnca!#==hPLks<6*C05*<5Yl27jhQaz9Cq`z zvs36DfOrk10#!>S;uJKUO|E%`07kOkv$j@B_1WLkj^GX@C$xH6g&gagVjpEIxl}Gn z(btE*XJ%SoUf(m7zs_j8^H$IV zh4<7;A9YhUz)UyeZJIst2V^{Hl_cE<4vgVMT^w{bo{*komNLByPS(d*0(tVsu5_dU zih&lVJ+HFd77+V1!KEO4aVktnU<}%2OF%sqdwpi0TTHq_1PNZ|6{5@4w2N{&KTchd z*}kRn1tej%p(^R6mpUt6STye@w++eny)Yppeflxq@AnSe@`MNxi3%Q<8Ct*DtcL=#Ij%cirR4(`z0{upe9Z3;icd zOi#ZGh1nzc;19C@4V8F6*{ogTE!o=IS_OEK7rcegD5uLEF1?daQbcWJ#5tV={{~-1 zyz50+TQ+n#EChka>;}U1D<2Hi7?~ei$s&V)nx|WJSls=hB|SwT-s(w*4b;C= zan5b`AdgneE=Lsa?#t%dpY(A5_w1+eRWFhO9CFCKfex#12j3xI;dy?uCH z&e>TAY^=!81E7)KgQP|XFvLLnVwpv;jAIhILPYpOBrn$MoS zV1D$<_*=i&_NI5+pXzIo*}i-FApZRWT?b;ED}0o^`*#;6NU4HEyzRT>uX$~@Q=*tm z-${Q)i?p2;=dttXI;z_C0A+4wEsg!Y`G>#nwKOu1e0qWM`TC>?U-S1xs^JkXm&=^% zs-%8vtbeKj&1#K^hJ)bow6yf}^z50Mg@P7jW_jtbladZu{eM9Th;PMnv6u-&J$C>u z5{xka>Zx~RVt;7BaL1>v@?6@FJb|7g^hq{P03cc7KhCCPSZ+ike6T0JBwpaxgNLQW zO{abH?1Ym-Wa;WVckbxBkNF|7T5>&j$U{i@qAGS>WdWnyH3@Z-ZSg_fSf1N*RoISJ zeUg@<@sLP~-7%dr{>JU??Vlq`BC^CW=|vDb9q2BRAPUh6S5}uYqjn?bw`@sG*EaT? z0oJsD)jtC-DDC3{Tz|Hr9$L`NZjr@3@p3|lK=-s8`c3c;OA~tqH zAHM?{BQz%FVlFuA3zpKJAhq5m!N(_9V)bz@c2gIIm})QaGsx=QK3}N5bV|mq_KZvs z(}&?xGNnl;44gSWUtlkI)7DhD#0h-xzk?}r9E2Z_lgI;-giG=~oS57&#lRK`=HU_~&U=3y=re zIJ|d13AKF#QVH2E(;FHX8=3LDPkcTvCZw7vHssr?m8l~@ng*4s3Fe_k>8XZ=E9 z8(%B^Si@#HG^5n_FljES2G^_nx5Tsvs9F@(R6(45ilD|uBHchesHMiq*j&hso&QHd zGaajntqH+fyu04RAN#_;Yt>WnobrRSMeYXh?(9qn`pjvDF8l1QCBn}50WudOl;gz1 zz{?Vr{ghXIN%=>X(z)1k-4OA&isv_Zlh-w(uhy2<+dp8o&VnOG1-it-m%}d z@d)|feg+tCpspLpHIX0ho)MfpK#N4Ceo#`^Aox${Rh2$zTKsN0w2d#(Kg{E5nIlJZ zmfS2nA1pS{qu*x^Tj1T?p#p_CsO?GyD@{asS5FHT8M8wtTz@=y2NVEahsT;mOB z&q&_jb!R7y%P0;WJQkj^&hMhX?BO}iu1~X!&k#DHp}u#&ZTI|vKJ&c!G5+MW?U>mQ zqo-tWi0%wY#eC0>4p=N1${ilM9tHB92mo4ZltgEEpRW%Jh7whU{cleWKD@{B0W%E9M!> zdGCJw+y-)^n=nq!B>#V`@c%}9#iKMsGlO;itty;XZW_&|yD`rx6v zGjYyuAA8;HJoorV>E@>kLeQHkYv#ML9&2=Tx0~RV`eDG5I(b9%>+O)NUyka*r-J`$ zRBLqmZ>-mPCo}Ls=3npm{ILq=;Bz1UWeEJ2%DJ8S*h{y$CPm}=?t@7whPg8t6NlFC z-008j`!;G5I$(8EtKRUi(OEQYgt;TIyak}^u&>#5eH6?J`zAv@C_(Gh<=OtgM*Dmz z$1LrclhKd$y|&b6bL*Y2Rp7li(zN^Xg>@q-jR^TA>RsbMlf=V4Z!C@%2PGDMcSYe% zU5D&S-*QV_3@UJ68Gjo*VjgMG4|n^d7~{utiw3R(egNGjo)RcK@Y;Kqk`J4?^9ZVM zC52=6`%h=o>+fzc-A{-6TBNHbsn@3+OP*0-_C=`VWB6?}>ouacHZQ0F-oPWIB(#k@-TwNfw1 zFr^io<-3G;q{ztp(Bu$ANn`QS&t^gr@p(qEhLY!>rd3CL{!vWsy6oID5{SGz$?56- zg>7Pi&@x6L-#B)2Fl$)vNvZUWyefPe*qj zSAO}}J;9d8Du1eCqK5FGTy;5y5yg-?cWia3EK?V~U-r4*vX^k9F><$^U;oRQZ!gfr z_8QR<1&J%gIMI9et)vaq72om4!q237ckD30V>mFqZ4cnQd(%?35vc}Lih8hm-oi+{ z2(x$~0Kb~W!|Swr7EN9<2tyryF#Du{f)*(E$vh<5X+GdNYUQu#N~&o$2@c9x6@60< z`H&~+f0^CZc_vSA@SzSt`OZnSu?y`6rgGVrW9Jg-4>H4h8E#Sn(50n+sDbw)yL)Ex zvdD?P3wJh+6`<|qgSyor)Iu6&h}&0hTO@eRUxyL&_o}HQB;;Jnmf;w7W8EtlXR|e$ zWjj{)#is6y`_xOFz5))_4!1+rjq1$7Sp=fL;aUo1PtLc!HRNwb-ppZ*JeyVQ@fsiH z@2&*g;hQdvHKYICH{xopZ$9{-FkS!p_9YEMC`kM=hxN|B%Z^HK0;z2I{EsYZ2rTM0CHNjIjg#qQ1ZD}_duHCGYOPJ2TgMVAIc^>Kv+roR zv+|TC@en=8ncj4Q>)Kaj>G`V}+C5(aT<)*Y-fHCA-W$^=w?Mdb56iO z2qHTuW)5pO3|+B{RU?Wl%dJg6J1=?G$>3?8zR@N_|J>Upd15|Bhb-`)JDq!-0$ZN# zwOy0fT8*R{`@=I@Z|ZMIV+bAOLz(2}|8}Om$8^2IVVFVPt#5Mm)qnc#9r`e7!x{Qm zp?5C$dg<>7`789s5clFm9U>sg&eZDa1aJ|*2PWn{NH!9|m=}Ta$t600!Z;FjCOTYE zIT1ad8J67@EdiSWx6U=fakB?!D`<44r1~5b5)Zb#UXtX!YW_%nM}>ywW%Adk z^ap-)Nxwh7h&g&Gsy`vSe$4Ex*AguejzKnC!7F-W@RfO>_IacJ!j3*X7Ta z{r^pQ+io1#y!i7@o-L(nSkGEW)O%Ieu>9BvW{-uib+l%0BhQ%~8S93Wu-)bFPqQ=t zcG?I$fc~=Q@9_dy7i&X=y$EoLRnFeal4nDmhmUlvK8Lb;T2WG6nvy0p^s6i=DVD zEPkT5fkmfFA2*-gnjI)w&|z=Ebz+|HnEA!gi_AQ`cW$W|u)pA;>AGKTSmJxoG-a3h zLZ&LUqKGjEr`$#8l&eWjPQF46ACG+TfJYRHd!(2E9jL0*wwZm!+*M@YHIfm6JN-bDp`uz3Q0%cCR{oSh(<}act9$lSGC-wXO>TjT7Y`G3b!LytCdJJ2&;5<}p%=J+pJr;#vUd!mXopw&Eo8y5M*Bg;3=< zKsDo`9SyJ9U37?Z9kgcSwRIaoF*TVL)b$ERK;%cbS-kZAHm<+KAgox7eyC)-D{=2x zSn+1RX$WjpniZY6&`U1HPH1#>&0ONcGhu_{cfI=Ek3Om-|D)n!ZMMI88)P+(!2a(= z@+2s+unEB{fRt#ou=6DQT!3g4+g)C_?@~h<;1C);{q2wK4EPaUJls^_ z?w+ph{(!}vak8DJt%1$Y3Ee;IIQ>m(zcC@?i?U&;84;yG5x)DPk|y}s?UpZ|HPMD8 zd5%}3**5BM4j;k`I=Z0DP%4SI5Z>wou`j}taZkl#mW z`RY&01XDDtzwr}eT~;A4dDsevpmU1d{%I^p>d7%TN!+P9uJ!!6BXVN2I(UBpH{DP4 z#>;fIM4)wqhFlE6rRT zj^CXL z=$5G zzh58T+a4Mz$!Gu7WfSSNwaoUtB-tZ`2xbc~)R+R4!F`Ved`LN*QFkXlKSX8?z1|5! zgCB7fX=E?u6QZor9g4yNLd8>%0c4PP5QqT1Ik{RtDkyJ!kKQugaYvvPh=Vh+9irV! ziiJW+e6i@9Wt`)q+R&i3A6dBbfAwt+<8ByeNx9G7yt*Xpr|`wicsTeRqz6uBp;&H7 zJP2X7ctsGVq3QZ{JLi783JKMc^_A?0zr?qTZ)KuKVy6tmEUlhv|DRcAGTF zbEG6*5$k5aByU~oX22jzWNW<47}x=8>Y?c&m7pMiUFZY3QvR=GQuwf1qE^Ikm0o9X%b6Aj5MsMZguFX^G0lTq4J(i+~2td z&(nn}+Ivs}nltXKQfauugK=!nfBeO+Kcsu7>A^_|%$d@k!8of~$RyS-h+8QmUG-kE z#fcd{^e2@j&5B0115kA$b#YR1!R@?&ooUgUF@F|&o>>mR5EhYF8(~a3Kj#9g8c(xJ zM1fsrS~5NS`W6iEGF~d~TZMDa88m%|`14+nin>%0yhB#c`l#l71$y6cdd0i`-Y|{e z(8WEWbn{!roo2yObI-gfEk@2g#WiDFU(_s{bDw7BE-@&E^kuG#jYxgvHKO}~E?$mM ztqpnfJz(CH{|0?3j@ZYC3DlP5Qs?aW9vMu^vD48uq+q2Yjr2%qYcQnXPrd=GR$rTV zFz4LHfuC3xuOY`*wk(a$E2Lj9(g!wXI(#QxM%c zmKf5scEqT5Hb>rRHcn?j#G3Q#R|f0dQyMALni{4Ur1k-k`?Q7&0`FC~I7sanGr zq%E7GPO8pL3we|*%c+h{h}PwLI=>-Cr?!ClQ5oOex)Tsd@i~A5XVnWALrUN9Z4_QCQKrukgejAz?;|DX(ex(>;{&E@3h1ZCR zVrpAh)S}NUcC(@KJRQbN?QQPaKkDZsY=1slO4Lg**gxEbfT?B z;)QZil`kveOZ5sFiLE`NI|+q?Wl?jUt*Pnhk4~X^G}s?w38QW2wmv=%53n|oQ)WYj z%zpRmwuxi<@F+6?e$9r=$&loBcORj>g(UYm?+W-1X{+3#T}W(uCMz(Qe*GVDHgOb8BOAT5N*LG`3J;<+65czUds=r+v10~t0QxX zt80n-aR|Q5t+xi%o}HWKvrM$WFq)WSFizw^3*01s3JsNU)I@)sL-8of6-uRfPY{)lo1wH!f&A)i@ z$V0SuUxVfQd@&wGE;w+Y73Plg?oOil7w=;3x7|5P%l@0)6K6re3(fU8yv~gz*LM|U zKN+d|V679CYSDTOwU#7CwZ$C?THBd+x0tWyz?SUrlFZD73du%YOowA)TnelF{f}N`i4ABpsUOdOhW7gIkwC1dNXPfo zxRm`Gt=+vKZNc*LM^;z55_3{sqEX3<7>k>iFA2S@;qL2TY6%u!((c?~2T3u~5D8${ zJHv8^E-GF}riiqGen?d^ue}_oQJAsW%Alo>q=2_z;~63HpyUb>fnuP&3WLFSUjWOi zF(Vp)UX_MTEF1)9rDisaff^rU?JGXOIw?sdMvRk^5Oo>Q!6lyYuvp#z3zFM%M}py< zz%OZC@o%61?w*>Q``h<)#b-duzFyW}vnW9X-?Lq0eNL-tpT^CnIw0z){=1jXw;Qen zv22g5_H7fU9G=8}>YD5y-j^F{#GmK9ebRla}?gk;i**??VC~r7ToXY1%4hoxI&Q zPzyePjn_GDetuj}le{t(PPG#L!L@)dtLQ#n5fG_KlAGC`^}I5kHZJc==L&E!Zy3i$ z=f>_WS54WBaXtSFpXyHab={ETp?CNKD0Mf!gpJ;{Q>m5Qxnw8!HnIEqR+Ab1kfYVh zm+2+B!GnXkFcnFpb~>Q~>+=cd@qQ%}#ftY1GDecRp>ZJtV? zPHgSquCJwVl;cDQQhFACpH;7Ok);55xBwQQ`%QhVa&VwA)odNH1lT+y{wdEHBuE2k zumA|feWA`*hV0Fskd3Nv( zX>>x1^p^CJ!G;lIAIRJ;G&ZfB>8?a&m9M7E^pn3js-@g|o9KlfUKJ!GduojKUd6sFXD#Tq4ZA(oPV{K%+5M3u57)7KE_6(X z3PQyVuy#%>)q2q}3cBrRxb8jP-{d0K`G-s{o2pG_^x^+DS;jZgp1Vc|*lQo3}Pbwd^F$Z3YSZy~97CIWT6Dc)B;qw9!X ziA8o{C5-E`ok}&OsCf+M!{sjuv9cOI%2&Tou)!)4p@6x)8LViv#b-O9ic!MdqMwzo zpbW4RLG{i%G3Z9(4^n4_k%McraBzGHixO$x&^px3Xi&%xUn7^&ai4N3r;=QQllch6 znG=jJi|`r&O=BojBb9YN)<%GPUW36V0Vr_{d*vuRb(?Fn{g1P?~XY6-kB zO(yP`4r|85Um^SCM^S6Em20fHjX1=Du}AGy5+A1}(9zQ5exF{`<~RCtv5Z*gwpCTm zn8z#5eA?pB~I@>6%JyR5fbg{jtCDna$JruE~F%bkk`c@x+`)g~>)G2Yp(QqiH1iwX`7 zV)x4w%q+t3OxrI$zP`3G7Jw=Fj;%4wohkGtl|G@pH~BdXH0yDW4Hv*78HGF~K z7Pa{rpiOT?y5NEpQ<=VLK&{-Ke4JYQlDP8@1Z<97Dx&#)hMr%oEw970zU{E=!a7=U zz*IDx>*?fgdaO>qmG59|zSE~)2e$LAI*z?31)cu-NB~hBfW=7E47ZWz>SPnEV?7WV=}PQ zduHiT_tlBv{XZt`TPC*-V6%C8G{dVy6<;n!x!tO}WRNMueqs4N!>NFKw9`SJZbGHI68>F?#7 zCG6Rf0Sgv5*6zZAmi~70i`u*|{)m4q7@@X_M*Gd+aR(!BN+dXh>ZMIIsA+^m~M|Tt@i4CR4 zv>a#mePbtnH#eT%z43%tcctNvDq^6wl_qwvzxE$mazg648%N+DD|`F&TTuoCSw6

0bX1gjY(7IAR%u|61G8DVtt-GI|idaAn}L)i|IAm-BIsUJbFNogF*x!iDFW zjV!N*LOUr76;t{X=7%N+GjM~lTv~!2mq}<))K4GyF z5hAv>sAmK)ZRm}tT}EwqV?fSCYJ~#qf~Y+!Xn3?G+T;_nM+t)1+T=5l=S3r zL0E*R%ctn}_;*cyl?>SrQ~GLNv1UvXPsxam(iMf2D&86ks5z)-JCiv7_y~bb-1mlFXw%YtqT=Uk8i|%~ z^t1#<-{di_6|!wkm4$v;buBnknC*Co?6V?O?E2|iAD`*Ng-e+UOv;#+8@ZIs_$UCdw#~u3kr^_GSh+J*I#k<-UtVc7~-Sg)3ZiJ#+ zx*NCS7js@clhPhK&bcmjn^17hduy2a@CfO2UAqC%?pi=Sv2Bxg*2-bXyIZPfQdNF_ zC7If+)Y=#7DV>IQ)DQb`xV2RUfTNE7e%Ys{Tr2LM+$G*<2z-vwBe^x?ea|%+qO0qC zC#A1OQmQ1#-t>~zO06yqz*JvF)A!(7tux{qxI$bOP&1$u_zD-pYe0tp>74#K;yQE- z^po(iBRK)V6Yayp{~#U!VLd_mApnrzFT2t8lK}d1cE=*Abpl&eFB3WXHIe8e=q-+I^H(|xWTajyangMe=gX`-@ zaNRog8^6zo3g4;r2-8h?d!fbqy5^pC9+Lk|1`5o%tr>Zan@>CVv1@-Ct~ua!;uD{% zsHERhd6lst_T>B_mZZ;%&&uQ!3{nd$Fl@!so+ztf| zYhAA=HNk;Q(ya|FFnF0OWbh0et~^WH%g!{(Yd z)n~sw(H%S#?bcO4vhsBPIWIp!E#ECX7X&G3T#JSmt8@RT_4GAPIL5@pSlQbq=V!hF zfOtHgxyei;W8@IXwN9QF==gE6ftEv+5HtTNkjX;#Fg~RJy&v9(wfey4>svW;3qm&Y zYds^UV&JKSKzkF6WHUT%Bg2-y2A5lN`Magohmbr30RSJp2|&91gX@#IP8<*ztlnE= z3qbmTdbG^U6LejD2=3$olbe-FrXdNk8P>XDiJ?658iMZB?GaiXMRA`LmeGBgksBJde}59A* z@(M5v4Oe4gSghd%$}IojE)h&D<0)#dq?}29uHHby@qiG6=TDV#7QtB5O9|<6L{Mz$ zsQQ`Pd*tG?wM-h2{_foKTnn`&NPPQ*W@BRmk+Qw)yBIw23VGmg9e4%hAaVw|j-a%_ zBlyf^(Gf(&NCpkkO8QXsY7c&UyS-UjGs3@DO#zs>2_A7HEWHuyne42r34l&!i`E!^ z@9*IDU;RFD&844$V3qHiFi~Lc{nq1@fhbamB%QbIL1i3^V@K3&Bfoh9t1b0Ufy-?i zwuEJ!Y|nrAg-=!(0h8d?j{uxn4aTi88!ZHWrQB4@HvH}F+ju*>;3|nqD>i!x2Cm24 zK|91;r7jen0hj%Bc3~UuOG6T}*aL}LM?(PETlL5FyDe*TUPz4gu}?dXXb=z%vP5ei zy3A=X3g{!#r18dcv+35Zf32E>e--fASFJok*l;{-eohV*iu1%c=`G0j!?z5?%t+?rPeR|YnYOm&(%s0&1uZqiif?dx;d8FAz1Dy#} zt!TIKL?=azF~g$W-JsCjEhZ7KW?!RfoQJ9FwU?87jq{N;JKSJUw1gA~YaGb0oEk1z zzN$Md3Ifhkc-7x9dAC3-s+5w#m^ZRLARc!I&8tW5?AZVJ7pw6RPIi#SiGuWc<82x+ z_SL)bq=JcX>+HNhW1YQ!E@};h-1YFEU z?-Lpe_@Hs;4fT9rC4K6RTcF27y=m(3A4VRsF)fEkf^?q#Up-V-3-)XF`7dvc6f*@? zi9<{|IJ2eB?YMhrtz^w5buWnTt6FJbLF_t6S_&AL0)ZGsf_-S=vy>?L&_9tm3cQUsUg6TjoYerBznS zt|eM1KEZ~%Mt7iVbP^c;mOcO3t%s++jowHy_1#`KG^iom$8kKLDWeTuJgPx*OX|zq z%I!;%Iv?t0z&3E^;>@9L9BoulZz{FSKSUQtm|V6|oWdX*&@8&PcGeZqpe z7mU~2$7mJ^qAEpo*VngGrL-uR^nWv9)Sv&DG(1T@1i#*Ua3>KlV>w7#TCfbpr)I!t zE1*!izVsWy7w+xvthHc+l95IM35Es2hRoQ7!NO8ODjGHo%fuk?Ejo3`F-@^jhoW6v zrGYHC?8ZR#FZsJmctbew)z00av;hFe+bZ57-n8!px5Vu@D4OU&h8$A52I;AtT&7CH zT)g}ercGi^8IkdguiX8goVRE%;pTphLESbOCk+=r zn>1^ABKSgp(=~%U<~f^fjWbaZe-fx(LB6#6M1-za zJccVzit!vet+BFu)eEW(T%Nx*Flha)0UGa(S?oqn9`|O&Cuim4?mcyujv*&n^b` zJCnsSg!`bAx^VZ^iw=`B%Z1#6sUI-Q9)4Ww_rH8GQ<$I#C!HaHW*oX-edpFY4?gOr zj|TOlg=dUbAJXnNID+&#)uLOEkxmI0*tcp;|3Uki`}GOBu62tZJ=t9zmn+Xj**9jo zk2w=_Uf5&&2uGymb4aaOnEB<9XS)OL_GfdbE9fo z_P3{0^p((V=lO}1WoL9ShI6j9RBhga-1II1m8jf*W#LMRY1eQ!f&in6V>7d|(vqrG zPkHnCPu%5cc`>A3H-LJ-$Sm>GM>o=QUsFdXdQJ2Bb2*4+8eBl{du#6$j-33u?}Z}E zI3h@V%|3S4=TPN&q#LMISuALW8Zg2|+rU$65=xVCkcK@%SYt%)gp@pCA-+UK2#Jmb z+jKOjgj2e3lJ3i+wAQ2Ae4Jz%(J8tjHzql~O6%d?1~SU?cTK>)Rb}-0hx3B(xra`1 zf2N3_-$4j9lVePBs#LL)M)}TSAbD#}9>=scove*7`@j-`ZH|qtKHkP^dnW}Xh23Je z*!Slrvei9}mJIr)0GOZFs-ZA&$xdH8T&FCTGa|s|gj#?7Io(H9SFq*Y&n=&hVPA3U1jIdB+Q(Zx$ffes zkZs{6}XeTKE&@14{klcRU-lnjgY7l%@AevTJXKbiY8gGi*_t^KIeq@MSr zpb_7!$m;HGPoCADgz&eC4pZV%p0SUv)t)|grk-z1kgZP;6CEj%J2^6VhaZ=4ee>wI zOLKRjaf*oglKT5+K{IxLJI9&@WoYehqK<_B4c2NHCRpQ1hu zykvfOl=kr*O3N4T#CxSu`cD(1{f!Kf8efMXCC#BJCUbTSdt0^$xipfb8n)>Lc&$#N4wM)zRb z_aKiXqqR^v5G&PWv`^830E4GXIj^7`LDGy141R_G#kQZ;`Lwy1Y>i|6#4my-K_1u4 zr-bJEnA&(Z)sGHQmS&jt2xqsT^E+NQSbjZ(GN+Ij$yD*ekUqCgFrunHQM0VZh}Xh2AmYaCu9%k6^g0EA%?%GS~No=kXuIkEweI{$S7lseI#c zMxVyK+yFmYXy4pMW;peu)x0)aP~RLyESaPALgpEPvr!hGy-D)(s0u8TU&u9ueuZ(v zdCag{o|?P$hWB~t3+Og+d`LI(Z`H%wnO#fgPOuEQJ|0w9NL2x2+v$zd-95ePT6#(k zF^Kl%%J3D_(u)o23KJKk%%1S-T~7T@cp5x(V1*w?YE^`$P!H0rJcRhp z0C)gre;XB>Q3!Yen>$E{?m~JPR5Gq>iw-`qh+4=%X5qm;)(aSl$VJlB;NW2I8Txm* zaT=Uruj$!NzBIKp2k7v)M45K#u_@Ag|l8sPb#|y~&A$_7bye=!k z@FXa*TtqeoUokna+w(B$WC45(i9a6YhHQo@#M=B`tJ zc3%toAe^ao)s}vYg3c?ETFMxsW|9@uC_*oZyU)C85)7r8RBP2;IQ=5Y3Hll640rO!vPN(pShM*&;g_N~l&;vAM8ItNjx&btUgl7Rl9lGbE$BzRM-ABC_REae<$`-{K`P$XF* zUeVxt(^1f8weCR2JO#-0;_~*a!NHb-yHK`nLo`$6bKjOPYEseA8^4C^5A*=Nh!u^zxZM+RusVbYM>TLD4vu`9k)& z%1tp+3T30JYDFkPyYOA9h|h&=!Z>T~C(0)_cN30(Kgpb1JeYnWi_}a7x8Pdf3FrT6 z^Ku+UdrAo@CAmrFY+kxKWzq$095$9mQ|6T&+A#IPSe?ziyW$4&4lzgR^7g9buvoXa znsmYBlEoL5O)mtniy`TSyJBdEs(7u34nwX_1WA3pL&0uzb%{D060RG==$c$Z53c3& zcnMF2Pt{8G6ZIyJF|ij>@fgmASQ7?w23P+=8dw12)hsHN|C1ULhS^3sQp4Nwl!y?f9U`6eAEU(+f?k&#l^$ z_@YN11F87@_#%IIN_F2|Tz)=QXZ=B;#E`-luQXt_a-}DvxYshbH)vP1 z2C)PtT)ldHYTU8Ns5DyR-Dz*Fw|6S|$BdS0M6CE4&k*t#Z8{`1mIK99)KMFI^W1kG zdM7r7a^I>nG22W8WvdD2CtC}(u!wd<$u9))nK-4dVkPD!;|sHs|LT zQzv>xu31@Gr6wgMQ2fy^dHJsfU1;j9dw=xw(rw~I-F5QVvz=Fp-WC*0nUu5^KPK1F z&3F!H8_}mrkt_=LqNBFe0=fcKZ6QpbK}aZ9fQeMa$!^s!?A~V|gFqd0o(Eq;m_h_Z z8im8neuupE$nU{a7VfDBRd1Z0eYqdZi+u&IDHkQ{h2!9_zX!Kl2YmNP8~&cnReNC5 z31r$Uz&adGV;|Y|ENT&`{ND-#0NJru#}f7T9we>yT_&ECIksH6DbSApEBWixuh?Jf z^w~Z9D?ax?GnYvEDVS$x_-7`6+~OtUxhw(m)as1X(HZHIe*J)dea{B3efiktskU`z z>j9T!UtlkFZu_MEtZ6K3hj2I@?|fRR-&n6|2vaqtuVWur1`o^8;V`s2q(Sk6~?mDjULvSLLCZ8;IQH+gnyLeQ7XJV%Tjc28f{`cT} zHNN2A{rT2Qiv;CZInv?cHMA;NhLY}WsH$d`E(9A65g{&7$a zjeXTJ9D^ZJSEH2~iyy(@Swt>|Xrv+6-3+2#?S5p2G)~+N?xMWOLDCYQl|r~>dLeE| zC!9zeWQV4z?ih#HFPLS3Bn5hajYtQJMk(|rNO;^vbh{94h|FY%X1(YNvE-53?ma!A z?LmM=i+lsfBttl}!0}ORodbGbC$LMyCmH)me2q-keYj-RTYxV!Um;Kq*qRP4QwuN9QwaLQ&5M=Ifc|05lILs9A$-2Mr57H zijYGT&LNVFA|sq-RaOy+%#)pjjO=re6)Njk;h5PR>o_h;9WUM_pLOVq=sFRu{kzvN(B11EP`vin&bZv!*Af!xu%Kx4lq`%4Rpi{Z`ozz;WikBLv;PLLwfT1rHdQ_r`>pgDvdz-AXS6Uw|$!qtf~tQM~pr3 z*6gds)TA

PS4Vk7VZH!(T=D?qdeauXa~K=mE;=V!AocF&)}|#hexlF zm@-PP=hsW~lRp(M8bQy1if`O#{8IAb-98L#93CJQ4&{vt?{YEdvp6Dt`A%wuU016Y#!&DFJD(Tr)-fk_06An|OOWqNHTT{LA%Y!X(9Z zvGs`~@I+JUmi9a=2jaZVLa92>?>WwzC!U^gGCLP7lRcu>WhqcD-4{F>;{o5#Wjk|a z{5$1=w-4N@_~)JKlgPY$_vNs%`~LpTgsbU2O@y26tRwkC5w^d+n35V#>GpPLxytr^w>k}ghz)=vN00g7>0&?21Ied?9IUF|{T`NoM8jSUtAOn#b? zNaf;d&4$>*BJr^C8UwwRMhLO;GS6{(xK7@;_j&Aws7eY-CXZc@q_e>IhvY{sEOt!q zD&h6~agY^SW5yMUUw_D^x0RWRWyd)G5`YR@jR|CFP}W5C8^~)lgwV0Y=yG}e>;=j^ znI$zMn$85qq{0#9!ry-N7M}rEXNpOYFzIfL64t>=dmj5^D<{$au~4vM_QEEcS#(9@ zZJxsQp2cfEn>f5``%%U3X3FL9JFEJtt)^$&+$Bip*J`b|^6rrCWD$yreFb=*88+SA z%rAR?OlP6d6JxYcin{OyUm>=&>m)*<;yYjIWB<&2?GpRJxNrWU*+81iv#J3>u%FPD zlclWktsE>+FX%$hzklZNa%_KlDT;l}ntJu}0t$euGq{ z7j_EJo8s<#gj2PjHgl7vc4CmdtC=KN8Tb`ac=^gRmT6V~R1=>@@;!pjV>juI^3)qZ z9jK4~PXK}~C9%%1YJ6_bIu(wvtkO2pIWUltrfMYPDIB}~S-7{=@EG`?so&|WC7c4E zP2yY{ARX%U;q_yN*ayDCDQROVH&$O(fTrt6u2@S|;{{R1I+CzlCP~v*Z@a%P-R`U949pK;~m-gfZzK^%m$~jxZ51-LO zxH>zGV812FIL3?QF0An7R&9ls1|anx@IxiP6Iimx7mc^Y?(5G9r#0=et?!*i2Bdn> zW5>@0f(1Jjf`N7=>l5Y(J-wONZTOGyfo9{M$_9)vh}Or!tn|1+iU5(iNwLo+$U%>o z>-1)6m~{bO%78k+TfhI-A3W^v=N)UFe`OLVAYod7HPHitGTheLW#6OIdVzN4)^p+r@bZy5#3wq5)Z7QEdNC4vearPqp{F%Z^f zhgGYx2}DoZS9mF=j+@8cM*uOjA(zF!EGQl5P8%Ra$`BAGA6ezO_*s~TXuRExsB0id zOYwM&u3MThd28koV=|XZ_%R=fl^_@|{iWV)0vEz|3yK=fuL2=NnEWPJbbiraB4uu* zKYqf&)q#Jxdd`BOSD~zlqPP`r=}YVgXljy!Ebg3J+-6(dtJ*wUOcq|#%pw?%0^MTt zhsm>@!{x7KNQ6$8{uMHe4OMBHHl=bq$KLMNQB$ahcHLt7&v0HcMv_4nKG>RVs;Gk*>p~V58p&_YNm~)Y zv)_x^Atd_ZR@b!LW29RcZNxqiNj6&FnDNm*uQbhAtv`VciIDgE{++ zZu*Sk?w-;tBW!7_ow_^QRwbbD&CBX?tLxdFq2D2^Yb4Ud3VB~&^;htO+$YH%z>)Hz zD-zH8XO6de3U3ff)nzRzo-!J!)NQm?{@}~h8jy9;F{2WNq|o9blz2Ax(_~4@)?kwV zw0?t0O+xoZ{cHd17v|byq>_8Jh1<_P8zKimg9@)Cj1S(SHKbkKbDk1-sSgzJ`kiOo zdBe3Yv3XY@-Pvr{rOlLn$k(p<-aYFZTt+efj&VU z7pfVghKTvbk8yzC#|*@5zvDuS^l^au6v46m_d1J8**go#&-xM`4=+9?R!7|HIiA+> zF`wOK`5FiH-5uDR`b3UjUTfiih^#27eGy%BEFtI($A^q(fZ!_!P?#5DDGQPPGl_fi z#6C;P&lvH!V^w3Px0&Cs%yIcJeIbzx7v%yP4UDb#l&=r8DMJ<(tW`7KokSj_X(kPx z*~g@F_@45UGr2eb<4s$nP%g9GOX>xSa^EM$UWOYq8pN-!y`PWJ@13lymN&)=F#pzB)pq+rk;Eyd z;Ppq>8)S>|*IF(hh$eONI0)Pb%m{juz}F#!B?o^UN0`l z+nVvU?l`R0QWEW>A*TxW32v$Hr{4~GNPg5m@pypKH799|aq>|Xtn@-gy3Dq-A_;AY zlm2D1&S_PQAIxbVEW1^W3Hq_8=E!!HgaGnm z&#!G`4@F{XxHM|EFkFE~0QdlPRC5oE8R8rB3Hga=?2CQ_IaVy3a%wojML5FwwDURl zJ9m1PvB$S@wR#yG=LjjgVW2#A#Xk9C9@x#DxHtrfRM@kz^w5&E*7jPFxz)6W8c%1P z^7J-zRX{v!uBJT!$<2uj#c_*ar9K7&i50wiHV_a@D&c=X`R%ETe7F1Wh%bFCLt-4~ zbm{7rrZ=A4`9f!(B*rnGp!)6Xe6?E1R^~Y#NsAUQ5Y!^iXD%4XTGE1D3=td%O3OtA56sF{xsndM+X-|Sma>W zuU7%fdIi>w>dDU}JK<}pFTs=<$Byfed|qP5u9{4IFsEY2Zg$q)em7$7f*ZTg`Q+iQ zyG+KE8~vWyFOo6$B1duhYIl<%~~ z^7`3*XDYy1E-aJ|^f5u$om1DSt(8wzrWRv-Ihdl{1sl$P=zXRnZBNYY&($3Pnqsjm zEQIV*!pPkvwffc9geLdWu8!LloUZf=}R-6(wg`N4S@)Pu;x->*(S7?_A?;-Ye zOYx#=OXe-_LQ)-#f14s5wX)lX$0E2(5=2u&7yd)d`O37Qo!Ba8GgCN1Au$*zI4Znz z8H2#Jn}*0rY)fYgLNb@-n$yfT)Ec=^zoKzoHpdu471{0vFDxGYnC_@9vK6B*0^KwU z5J?Gl&T1mau>t)+@cE@obk4rkS(B=o3WNmIsJSe*5j!=g$%4L#0A3i`WwFs=+0C(6 z$-b*5!y97zHDy-|U^fF)q50lw>o>}f;D2&ji_f&)%?|B^sVwbXN5PKl)7ae1+3>wC zC!E01G1SGEY&FQi4{L=~@$*u$elO>cU*{N6jp(=C_du1zX z`FjD`sgJ^e7FWn8)Cj#wG9^M!c?#_6zVp?|GBi*x`(K?R5Pb7Uap=0v3av}v0i8p~ z=b%CZeE7+>?{m_SRAI!tT5{y3 zczCH(tJhmbC!pH%uF3pK{Kid@^{$#Ev&CzpT;_uFV#fcXiNC3WwsKu8l ztdM!1tkJFPu_wV{*ySvlnQeiMRH1gF-l7=?A|NXjNF@>oO*)S^pE0iMeDUM2rl!?) z^Ff_-K>R#5|qWSJO_?`#D7+dy44FSQ0!|T1YYl5XiFn7!_cdRjnONVqZoRu;R%)9Dv zMX8$`V0BqR$+}DL6tFFVX>7g+~(zxY{*%~w|YGr4|Eq=f=(mI+sR+{U5v%>(&fVDU7L>@ahL1OcnKB_Qz zKk6S}bJFqc@=g2g53~2KeBkx;st98kQ`tv8)n<#x>~hKxed(ZxVwbn!;U#-mpmAT;4<7FaPC21@W2kUSPj5F_|u_YQr`l$hOr zq323x@{0dF1fNYoff%vXaAOsOa3)Vu8 zW_wH58if1bo)mD`z%Hr`7Fv2?F0ph14NlQ@%PQJ$jAVQJ66x+NjC<+K8UjfM_xu0y zH(hdIYEWkfmIG5LMy45~TahM2r1I5WXE1muTZgz4@UnnAQH*dyL{@<^A&WA;;HrfBMy2U6Srlm5GfnAxc^X zP^p^;B?WgNRu&bKzc~UAxR>fsqbW{}n+~lRQ`Oe*>9{G~1JvVuO@xD1{6H^tt+h$R zT-mbPOQcS%yAaHB@gM(}khi=sjo<~c*cWe|DyB*%<QGTeurCx9H5(E=ajO*nGxQw!$gXS1qRz5FfWsJM zrpBbsJhmEM7lMoGS@d%CH&OiVMg6gb78%=0o|;c2J=q32Y)|||_8LJRRyxmjw zZiRQv`;DINB4?m?01qw0J`BJdpFQH;`wW1dZ=vvLa^10W1Ln}7K)slX3C0|p(|kwL zZSiWDQze=l%vwrNZBBe{jr>_-uHxb1ra*n!eszUD=(3sVo|}CU%`eHgb=^__JhH+3 z%C@r)Ic+@u0^2&j-oW&5;mgIQyvrv!^;Wq7vikcH3>K`B?0?}ft@C<4^%p@~ zrn-8BeuM#80i3x70K;vSr=G-GYuJ8Wwxw3+C{d$g0kF}1jt{kUuCj?{D zgHz$y0Uf}xnn0brHrTveAT0# zW?+2dMUM~ZIhgRH%V_gly!h6<(FPFg$R4MdBD|XjeP4*Xbf8@oO3{3`uhr(m)&}F2 z_mJ{KrCZ3`1V(q9h7&kY8r2~q^v7s<=&TbE!lOZMFSn4i1x5<;zRj*`2Eb4sC|HNqJP%IoTKvI}Sone3KTZ3Q1+7_-4?vZx zH!pDJ*-*_{)qns#KK-E)Xl`CI?^Os}W67UnyPCMTaQ!-hHIIGY+__UK`zqh5cGBqu z30Z+L_xH0U*0rG0<+DH+lzqL%8;Bo8rA$y;c7jjH&3hVDt#H}B^pnW!;26Jhfy^)0 z?f3x4CtwmLLO_q+iXVzA=2TrSl@R3JDkTW>==Bv^+|d!keaPbm;S1YCJ)6l(3`zmG zSCoEqSi#6RN+2~DQ&Fhl06hj40F%(!T>=|e)#v$}(4r9aD8tVV9`}(KZaojbo0BxR z<3pZkfdgj1_UWJ=#jW>3pJ7$wl1u}lVO@*Se$X{&oac2Bd-Lk+H_Vf>HRQGan$*mW znzXqz%x4_)xpH13?)sxK^UY!2hbBwkU#*6)D2@Um{$AurZqfpy zDKa~(IXTbs_1nrj*@T468|kud@J{X6zlWzR8Pol}wp6tpOS{LtZ2=tN)UN|ZV?NNy zC$V(}Xv{FqM*FoZf+%^Bu?`SG`xxE1Dj}C5j5#b6c`aLGp>W%3;W2mPvVpOITfv!G zJ~X|$ZG?}!dIv(rhlQKDR?FYsLyR=>bJq=Gs}KE0 zi}!hQdgj)sL3~Z-2)#L-ES-EiHfBW^cOMZo_31LY`A2X54p`S!|m|mg24!w#L#8wV_X*iC*(#Z`TVn4lIE1!sJ2PK zrtp?QRdsk;kNnb9`2CDWbon%ZnTS+4ZG@2KPj?3*K-q+U=-B2@Kgb&f=(Re%P@p+7 z6V)1Ri ztaS5oJT0P|O=Zd{Q?*0=L2&@bsXHq#y_HH7hdbhs!;k&ojh>GvE*!RgP|~o4rL7$c zI@00RZcXhaUo;*jL0>d1P|0E@Vf^rC60NQdE1z-p>>0xB#{qV20?okMGmW?;Vs+D! z@|&nAd}y{x0^X?~|8M&VP6Ad-3BXx>m|X+Jk?z@PbQJ+?95o#=87c~nzj4u>3R$^B zrgh~qx;oLld-*z+H;3Kn*qo^^gDCXPy<6RcCkeevYZReYIK#9@?RPsEL_jTPk$LXz8%8kx zwuffpiE6c@s_cxlnH(vNs4_Taq@@qn|Q0e8Nk(rZwJRHb3Fjq``Y&1*Hnh(QRv**cNrRbcNFggYj zDQGF!7^_z|N)jd$W&zFG$cYtshN(Kaj?(`6Q+4mX`5fZS%u%f z^JDhkIKb0K9Xj>I4s1B2hYJ{_!#S=5Ag76twI$U}ac6(Ipt?`3a>9Y|{(z;RN5V5_ zoT-Q+ZlrE;&pB(>=euQL#+PTkN#!{S5$~|D+$pJSS(z-CrI$kL;5@h8TiRi#A4x8Q zdbCwW6lD1US11Ya9LSb!pmOlD2F>gN-)jGAXs8 zX0|7;=1%5LkJh)`y?QGS7SDkc+@FoRGAWZ>>05@s*1`M_Bje&gbOUb?TmqUV@rXbS z%>bb3s)v6%>2!It>?>|C{K+ibmKYz=lCrtpgebqsN26rWu z3m!?qx!Pp5VAR@>j(4y(cyryJ682zuCl1QD8UKedyKY7s+W$rG8PeTsuCs47rpb_n zIQC+>qWrE^PtU%l%1)Bhs^-no@}O-k_r6(6`DA_}@@NbSO+v+j<%Z4jY%T0S^kw@I$oI`-FW#*X5-9Hy(+m`QDHc@yBkvz9 z_-_u!rJw4rd%oXy%NfuDl4f5Iae(DT!2?rTPcXn0c*(=x>`c2_ z7dD=da-*&D#iY|~)In9HYw0_*Vi7)|?F=d3*_)G)(J+~}vV*>+{;R|%BewBOC_In6 zt2pp9hMpq#K1T`dyZ23D&xS-w?51d`5PYBGoiun3jgqIqrgMec2i5>=AF&3E(v%}3 zw6_5UHW3|Q#(on+CX(MkL~KHR7Y07PyQ}D~RPZbEw)#73!Wncy9d8x549%RmEV)d{ zX|M>P$bSp~LESOMg>{%4>Bm_s9p^#e-;8&Lw83LufPZ0t?caw-?-5u!r|Gq1?GPD@ zC~Ei_Q%}8mtBTzFx=n0nNM(7pny7r-R_yfc!B5XCfJaw87I-{xgpuAYlFKDD^Fm0$ z5t0-BRgg1)gBxqvsz~s_`Re|28`-bm@0uMBs@%wbQal(m&xa{{(5-fQx@XA`O->75 zl^`kF33lbCP=+|a#+TUY|3sG;QizHl9UVVLH7r7p=;ohN^cISDo>VsOKZ(THc%~X| zMaJyeXA#B>{Y(-m-=5h1iR+wr8l1-zMVBVEp`ns04djo+L277CXc{}p)1|{!JMR=e zz=4X%;OfFN$W8b1hp)?)uP#7~y3-CWB@a?S)-@QZV2s30a|F;@_}Q1Ep!!hHjTjR`j*MM7%%l&@edW+VVPTXhV`O`ox*LC?{8W*;PGRQk`Wc?OVh zX3o)Yr&DyN`F}J%Z@!bO^5dt8`#jNKQAZ5&xPoe4nfg1PO->1A1nDnEfuc4;2fo^f zpDAcV0dD^PFWuq9Pwis@1$A|R0$>m};-o2|$gM8vz8EzU-4$!|eQS?WbsuQw-PtL+ zy^}N=erWw_CCqq@pK(hUSlc-3iJ{NN-yGMpdlLFk|AFS^vz?+V&-BqUa*k>vH)Z!y z{wk*4d^7Z|bgsW0VPRiUf^iVjaiw}+m3!B;SUvkXXKOG-l3Q8-rT>x7{0FQyEF!;e zXy*|nyEj>s&7T<|ID3vHjRJllW!!81Z`3fAPu_Do^X^YJ}}=FLGA7 z)BmQDZiH@AqAghKbRzim&bjTXG}o7I96L6hhJWEv63m-`c%f??@I+I^8JbP@`M1ZxLp<7N!MQ_G66QTRGyg$Ldhc!8J@(ZbiRA9g-U?sexRvEUmi)c8xRGWPZraT>h9Y;7wUX^Ez~z4e^1%1F81^8q zpq<~yt8kOB%OHL@5jC@>vf`-$O)_wFAx`8Gdw|D)GUhfDi1brHdg!pkcMs}{1wYa zjttX}(f**>jq{g1e5Ky(3*Bt%`@FY-SH`@oJ&kkf=7lB%Xk5(uhB(}Cy6d`zht#U& zyF%b^22X+Ec8+3J>(R85|3huNA-^AkG)XoAAchH`cY}`^$)zkXSn%(?rN6$zJi@le zHh7x=R33$YrN%ekE1L!}xj}*74-cT`M>;`mLn;E;XV;yu5%rv^Wse9UE}| zV)e(G!+o87-Pt8S52A(9q?D}a&dG(1u+;_b_ z+fgl?t|#Ov>wS{LPP}ZII63;{$Zov5;o)n`!>7AW^ZHj*1T_ajaneWCxnmIN@|OaO z>!a#L>@s|Zc}|(PppO2zTnjOH>;j_(jfOVsUTA%8Aag>O1h9ALGmQsrpsCT8=PS{) zyKfvV|681fZH~hmL!Vz8jd|z#D}i>gqe4-$3%FtQnMOUc-T8}6HQ@Rh68RCewv|VP zTF3c*ivVJDQSoa0?{Nw5X!YUby?CT6Vz|FY7JqRqP6U>u_9TDdvZh2ehp(9BCevdk|`Ffu7I&U@9XNiGY3a5Y2tnD?o%{+F>Yx)cNSya z0!$X$7v0*%I5!_#XW)u7pSU|76jF=OA7bzooVefNBd5h8!tPZO5QxVZ{53|yZoEpB;+#zE`H;cz^}%^VaY|A1qJtHO#eP2rrT7{e0(jsrcoq36_& zbb!r1r`US1Mf7!;EuL0?L{*)u|?Nq6{N5JMNOfM=GQ`hsN z;dcDWOLA;YpU0S^m`v;Eo+<^y3ZSc+Od$JIX`g$*rXX6rg#@Y7a{f|@ezMnDBUsk> zF=^mKC1o2 zW(DMilb=u4=Kt#jC1TQ@-MWs367#53bg*dA=`B~Wm!7W&3dY4%SZZE zp5P6~saTFRm^!|>g5VhS3IF)pt8ugIvU5l#3{IpX`zSMHmjk)9=^;T7>RN0 zbZH=WOJkfpWYXY%0}L_OY(e@^M)Eh-fD@S!-Dv>rYw@RG!j*Q7poiOcf*$Uq-_yq$ zI6irLM40Ds=_C^3^M)3B zy_gwThovpQq%7;RK^Q3}{Zi;pM1A7-pUg7C)};7CcgP=E$^*2dvcuoPF(bARdWMGJm3at`W{(17y zHcUwzNA(6$ATPgrN^Q%lv$m4(?ITfG=FFX1KTqrNU})5)~ zum@CI4g$?>X`ppw=hbN;!RK*~E7H0yy92h3P0po0SwYEi&d511-?Dl>ZsrW$IZf|9 zXbS78q?Trl%#1urNKOqO5olAK|AT;KEag-Zit6O!WYXjNYreS!+VdNCUuVA0HCAxS zrVK6lYV!sN$a3|#v*mzpWgm5Uln={%h7Q1P{dKuUT19WLXV^M0@;QU0v`J2xg33Cf z9Tndk4ZD&4aQ)HXby?P(Fc9T2ad|Pdd@;}9ViaECC>+{_FZiOTSuT2-ihOp}m{t>gMz1%6TlLoM^yb*Qm3 zl~chvDmV*kfV*X#z(E-_X;n|DMcuMUl!e#Rdcl$FP}p!Vnd0%h^}X$amnlGitLYWc zJpnDKwn+g#u0iOYcV;d2fEK#^L(QfC9vDB4Vz;2Mfqx`El?z`@gnFKD!2CH&;@hc0 z3j#x2P*6X`AKpw`szKpob+uRnTIu(4+@a~|jOgAjkJO1^4aB!((vEO~j<#EdvQdJ} zRPOKs*!c~o_*fv2viN|^Dx&kQ9-TYfkjRMcpBUsNf}o;*!FTm&t>GUr{6xR)L9-0z zpU1#`WtlsrUh+EYzFvh!XFW-gi;`d_(}5o|n3>2znId6kU>MW19>T|VuGmXLs9xKFfUuu z>4H?WzF1%l)X6%(g%&I3-BxbPhetC;D)8gJmM!LKUP#&xI$Xc}eqkEA&=q7KRlO$E z_O5?1aEH*_G6T89i_E}5=|Q72?^7P~1~-;PNey94>UOk&CKmQPHJA}csQf6Ymuwma zEhm?4Jn=1QizhjL*aLUbBYwirZc!wtjfYpfPH=)BQ=rehQSy+$m@WV9WV*X)QZMgp z9W{1B%b8`x&C@s}dhKaKg$1E^OPEFAM8mB*|f*1(_BOW4f%ekS)DrxiDIS<21) z5{a}jKN9e(sF`CA*btUN2pL0uP{&6YOdxwZ@KadfRm+}Y5SKtYA9Cr5$WS#T_=UG5 zpXBHxbF2ZIcmIt_6zKYUO}3{ceqiWQ?YH$oH(#~RUy~K(y$6}0ML^H@i-g&=iqjRli3ycrncbHc$l$Nl#ALgQ~0sI-<4f|j=M!b7kUdW2#jN5_BW4@pP3?; zZUzyAUrujigq!TUB7Aa9mdQNOr5=hv_E){|4l%Z6PEuo<0B&elT%qo>ajaXKXO%Fm`7FI{yO+7G_m!uiTcCf@7BD1tJI#uHw9HrL}(FplzD`OjZu05p~we4Hz zrxK>gF8iGgz8B7fe4@+mv}>1j1XY+s6I@y!yZq}An>M|-TWRg@{+ubKA+l__-@fSK zmv*&`&POuy&eV~g+V=Kh^XYXNRwSw8 z{-#pB<}}Av>a_DpFR5QcH2LIHsm){KC*#@Aqe4ITOwITq(#-Q}~7w_N^WQN6i> z;@i(Xs-*=4O(@9*=Vw+E@hm9|vir*np&w85HQ4R*ZuUOW>sd@7_~*|zLR|Q4;Jav1 z^ReziL_yyfF1xvTZ-KJryU#smCvL52Lm$P)Ez4MbEM1Q0QOzJFPG4O6o8O1=BOe$= z((|EDLbZZoA}6jZhOmU|nCq-9QjfXd@hfTe%DOMu!U{3hTn*uLxJGem+$&vDBvW^0 z69@zhMjr6SJ193ntG7>3;mUhgj-^+~bL(&-BXP=eSA$DXAtn%hLiFkYSLXw)k+;hA zoxT$^b)mTZfHdd?0b7`QCd^4uL=t-i-Z&s*kY`RkBcrAJ60I~GHf6ew%#WUV`wd~)=L z|32tYFJ_G05zSQ%G1+NlW;K|W6dqD1Xj&`KtJ$t%uvgl?$XE-l=qX-ZLzkuWr3hX0 z=Zn2(nzI)ld0is&uXFpJ_F&-0Wvb9jhI$no&cC`52zio!6{9u3Ja*yY?;E`b`*L*O z&;E&aHxkLLPCpr7ntryZwQQSrlkr$-8PPkDiO({yu;I0=$Wu_VdT`X=JJ+sXYhKrG zqWErx#cMO6j)0vy(`36&$8u&sX0mGUC#KE)&9e&c&nrm*=Xgo(YESYUil#=t{YeF4nAA{)7#*z1^y?CDyi`V7&cs#fi*wDf3{!pi{V~x6mIZ4X*bQXhNK$&$Q_2!NYNMC=NHf_ z0d*=F?G}sY+P#j@4^uaubC1nfLjV?@(yb#nCt{<&_ZCyQ`emOm62hTj=s-PSut@yp8AThw)bq7(Ng0dRRvpqkZia9T`xI1O=w7q+ zM6kb855^=3BSJvp2S2$>Yj7tLCt+n5FSGi$qx+Uf_{%@V)**|o(jfMC_t9;#*A`qXZO@9p+ zQia(!4|H{itO2Ba@SV=;P=B}k?1aO~;Q1>v@tdz79}qPZlW!^b9@cSFt0uPsjI%7d zLk}Qw@vT7`j|kD8u5i&u)~>-`{n4#`Fizf6^j}4m6ba^4F&Zq#T8CIU(q9;^yH?3ysLLlt?J+d$7@{2Y1;g(2&Ia3`e*kDf^09)%JgtUfXmaPyzxRoL zEY*7lLYc02m2>^Mq8-=+T-MT31+pcu?|siMu(*4m)=-ugd&i(aCzl?ugofPSu9Kq> zJ|D5HA72ph&00>t#gw;+Wm%trKd}LkdO=yphTE#095yXk@^%r#VL)3oT9s7bJ zajx{Ri3|){O-v%?hHEKnCB^CsH><4Sz;as%f7ccLR<}cj=WQP z{Lt^zzsz5pKUuPrO0d(ZcYINsoaVmPy(+3dk(jBi0O_J8QivrYu+H(aTzbsCz{^@z)w-;kCgD~@8lx#aNOq)Ya(c_(puiqdS>Xzx$ zp_1`lDi3H+-s^JZ`BY^XkD8z>?JN|qJJA~2b^uE;L?FW%*jZWFzYGBUne2RjhttGI z&C%~*3qqkkj^HjDKPl{QRgr9TP;Hj{r0eG2OJ<7YA@R(?)%yJJ?LR0Uf4K|vL%!3#Idr+X{9BOYrO zQ(q<6)|&x)JnbE$aa|Xs*rv0#ze?b*?Fh5SQ7>qIDpe3aTCSGK&dkIL_3wTp_a`zj zH4E@m$2309ghE=0*`-JVy`7X4dbh}E$^H))2Ylj-Nc;Ke#O;T^zdi$;5Egtx67qiC zFS6Ou`_7Qgmy`V$3m|(Wp)6GgcCJ$v?i}9Rm^zLPx*ln}k!E3)zhSZ8N5eLcYDaXP z3pparPrOZ=5LpOuCrABWY&}2b#j-zO#>FB@SxSob&Lu4SU5 z#y{4Ai;G5W`+{EaN|!-x1Mdt>jXWMSEy>z*_I)vt1YjrRSvT)b0@Kpcw3~KyCnkOc zOJnIYHAQNyl-!eFlBw{LpXO;(RRlQZrNy`J!{ZX$1M5EAT>Z4m--J;3RW*@+- zZ_5Nb!A4E^6Zx;qq|%JszWqkw=4#nN0lQ2jd&3o^bECbTkXpAn%Z&4uYwlgIlisSU zh(!S+Btsl>^t*TQ-E|oW5YoN&F_RXT1Ssz{m=>bBM{5%aXAaR*b`@*!rhko|0S2Bn z1CaSz>F-!>b8&Z!3i*ij0~_G|LTxCi4(Haq!5E{rdlBUe7Ia6r6x|u%8wgB$r&H=k zj1Uzqs#{d$NYZ%OT9^3OCQ~uLU{!c-Il9Ki-3i zqZ`noO=GgLo5_2J&Z?Gq1J2(-{$kk>b>-XZJ(e4G2f|HO-J(furw)Dj%Q<19U?t9> zQg&iv80tGdvnEEj9Af@~g5^nJd$I@aedQ{SWUi;9W74a$`;Cu}AH=WQJ9A_U z;aAJ-E$icKsg3L0r2{d7V`FX@q&fD`Wa4}HN5T()XMX)WG&SVq-Rp8NM}21!;pya8 zL#p%x|CfJQQmCjBfd8MqsJZw|vCq}KRE58<1g_s|8K^AO*!A#Wn1wbI*zm;a{te-Mq`wQ)C%h<$$Z#<$^oKjP_z4WW;L%|#&1Oj zhme)_r?{EY&um7?KzuBWRCPTfnKc$Js<&EX!veGoRjCYJ1Pd`RVtp4bi%=SQhwb2Z zPV+v5F@>i)k1M8pu8PRmphv5?lW`(Z3AxUvvo3?dgrdzFarf9lS3Aq-ywBr$p@FNI zZQB?9$~KYI5G?QhZm(=VV5h5iaOg_QUfo^ho%}0EZ zzc^&#XnFc?h@r%u$tl-x!CDzYt~ECHM;Su_(+THRLY=e&&>cjT?9`W)O6BEW;KW-e z+|Ex!Eegw}Oyr9t1O8P;l<;=wLOSZ*S33G@MGfMZ#3_kW2Y@xt^6ahHxK+{Idh=nG z(gMXiI(EsrXLr;1>rqzcBNf_(=5~H&MB61&dTNc23DUiC>h%IHI>}Yy&m~VhG!EgewtFV7o0nrePkz7lLZ6AO;8eMYtD&1SRNGNwv5e#*omfZJZZ#OI5 z|NZdgwJAxu-(evpeRbVciz*7~F_U0P%X23u^vc@%<;PnkYRHGJmO)I*mZui6A9&;i zHCn(qZtXlCc3aG{S3fqH`eMiU@m?CBGVSF(k}n#Whg<=3zU%XpqOgyB38C&;xnPt7 zsEem6sw92yVDWrm73|x5&X*U@&EBth-&*7cmjxjYMagoZ)nGr^r}}a4ez!L5=%_l+ z@#`swHGrLJ-A!!6_3c9N1gQd1DPwm8}-4wDf|^&68J7fbZp&d}LbIiL&n5 zc}^q>|2ntMjeUNK{x^BMrdupGKmFAqh&9p8qhK^fb&%joL-z~O z4)?)HyDN5ae`4mYr-5V0J2|$kwx|tWk1T_qW~2Xcn`FFP=t|+<$$loD(Q|N10L?7n zq?I>6x?v9yB+NwbbF+^U(eRoan=-jcKc>}12aA^OS+@-BU z6!Ew2>vVf8Jv|RZnV*6Cp#Dt&v7Ml$Uxah3>VvD41=iWr+#78*Qj3taNM>(>iJI`z3uNs>Wjm(0lV?4 zqKO7B>D)uv=QrF{ac~l{8&Zkg<*xtE8Hye6s=~P z3T;7d>>Q6YMmXXH2?!ImsV%%YX15M#|s_CiBSqaTh zx=`_2V<&4vVX`V2R9tj`%*c61$6Tx#<(d$c0k7r(opGvj__-u$eCz5Zx>)^lY~i7v3I zX}2)k3R3%j`-)4fywOQqdtK=g>I-%sU`Srh69O4BXMnT2f+YW9Y^Dv|G5tpDVmNs) zNc`IYuB8?CV|gM!8sgXvWe`_eXXQRArWOc)Vu$zRhwkm-IXr!I;fVfAktNmpvzw~p z(u8lm$d0C{%&XHrI760Zi7k;hJ&hfW4+Iq5ZQ{JT5sNTtbi3x^1tJx-P=OQN?zhSz zthmNR`~sX@lJpU3CNZ)zO4uD*2H zWSSQW*m*Z}-YX6+m>`zCyM1ZXk4u}Ho4#=DI~vCi@ob3%XS3G zVWG&h4a7l~Pr4x>%?nN@n*F|dU}_Xi8i(D1FaLT?;phq0-NSNR(ED> z)=^gN<;x&~gXt3%&rbC&;iMRXlL!lOZ0N&g)kB(z>l)GCNpS2Gd|8aT5b`I? zP%2#zoVg~5r8LAiU7dsG8?pp$vMiO@(9m9@D(6zL0P6`XbZ&L)@_u2%&kzGLJfNiy zY9)l#|KG6tz3UHZ*KY_hbV|e>nnnNc$WutJ)pJoWozXr9(Z(0Bb8Vs6pD!BQ30&Q7 zOdjf6-7-60pei#p2OtpfY1EH%TX{V_P_4`E=P&K4mZl`6r<&bIv|nBv zmlUSmT)v3~U4jAdYjbrrnk@gAD{goc7Us`cibq<=(Ry(K>1cDt51nCEBNA!$8pg_- zBb+KrNF=(wMp(-N;wC`0HaP2M1XpUhFX=ZNKoA^_N1oz7{>ge7KO<25?7x8Unp&{` z8#sXZ&ClnaiFe%sK9>`XM)=@my0?^y9u-6@eXsAmKGL+y0;~ilWr4cmo6zNU(Bte0 z&t1|_`!!{*o62RL=V0Y6e;iwbGgsffN6hs`+p+Ni+MI3Tm(c{e)&JI&?Z837k|cN`GsX#0HA9Ag0dZ)Tay ztbQ1Mqkn}mCnA&X;o$Ptat|qyUp0S;=Y}Ure`v=~JO@YN#1cDy4{y+< zfHj`4#Cg7P^k+zFm!yS3Q4gwgC=wjWqu=|YME3S;ZDNZ5s}}7{nJ z45?L0ENI)u>T5T-l*+p39aQ8NgTf+fIk4ouoHl0f6$&+G3in}dyR1vI+rB^8>p907 z*l8&_w7V=4eP7E=YY#dmlx@aj3@g3~>95plxhvpo6oAAx^M3GOLF~9k2pcEUW~*8h z>M>Rwc2%X?rAJRhCtj8je;x^8cUOpMz_IkH9saWdQ$V#us`AEfQsb+J55%yf9Md|W zp``qitFq9_S0lpL1hQR$HUBG`?ND#=)=^)h2)&N|p4#|7(=@M&ZTRQ_>i_CVD{0@j zD1}IiZ@0HX1>G2JW+80*RxVm0Qp{81|JCs}h#SlvB9|sw?2j&=Pbj|ij(D)}q=${P z3Hw2mC-f8}CNB9xI?DLbPd3uMqRCsem$m84nvNZ2H576QV@o4-^U_f)4`ibZ9(4%= z@e=(y|EoTr_G^Bn(2C0~({9B-n$~{~ z_rW<#`(FigR~>eP)hQU4Ij|WwlKP)N|)}s_ui-F5TAN|B9-|V$wqF3ip6a zp7hTh?vTM+xAy-l(TW9L=>8RhrpYJ0h-c>Qr=$6l)gIVq^Gbsb! zx#j=b&XcbS!LTh$^xM!8$ZADwZ>&R2O3rs2q|#CS(~-^y$ex!4mMtlo#ax|Il@VAr zA@QIoD++~m%25z6cLU6WduaWAH4d6Ru%~58vxIL&Y6!;<*Gu>hDA|GP_vAFrL**z@ zW(s#WmKSv`@0+XU#tpm?xu!u85DVn#g8!?O8zP&-Eq}P+WU8UxZ;f2W+K(ea>FT9$ z%Xt?Z=D;+pgSOgBKh1BN1ic^&6Tv2fC03=r_ojz*?fk_Xx~yBr2&(#?f$C(x=D8uW ztP5k|$~VK@K(U`WFcBF{@B*qAYaQw1H&Pxvjsdx62F9rJ9C{`bQ@&0QyTRUZAbGEp^0(xW*`v@FrmH0%!|5e=O9cOvr$51!0a7<+_;D1@^&A7)aU_0_ z>GyVpBO2%-0{cBYB>i0^;q7eFPz-;9Q$}Z&tM?~AR`8`~E@;?CG2_(0a$Gy4WS^G2 z3ysj-B;7qqQ)-A}1aCwz7~9`I1!`xx4hj5QnX{!Ce3}nDvWZE^2Ng5GI}_uehvx@_ zJb$&bX+tqk#C~)#P^iVZ8@Di#@BUL(lN-k&GQ07WchfYm^Xhd{T(f0MGoF8x$E`E= zVNO{I6rmrMm-v-h^Yi$8@5!&rXdFXkp!ZrbuHM&Bc1okg&_BU*8CF6N`?hA+w+uTk zqo3}#3EB{C1HG`(1tH|tLS&|V^&n{F7qyv~^o#^WIk3Y6n~51aaT=(WCB2{+ggV{; zvgAjw$Y2Mz&v~{qu7+R*p^i4mcZUU7!@&HrwNBWmP&X>bP+RZBbaj9F%XdO_p+1wu zew1FfFLeg;h=WjvQ?@i(;YE1N_!~OOxGxkg-wN4A{g&#->pQJDn zbK=u^n_j0o87Fj#lI|Uf_7j~pdtD-W=lm!Pv8YUNJZSB}2V)LYL-!#KS{AOxDGfg} zyCXewR2*lo_}z>^R$_!AJQ4MppYDbXn37{~0*3f&3qveOTYdpi!4rhnnu8^&Qb!HN zk%J`cp+dj>hEh%3U*a~Yp*g*ly_QYPiL0k(O5+|usI9%0F80xwk6&+w`uH(@AmYNk zo@MwJtPy21HViTI6NZ?T?vbMO!aEc0|WK`49s`gyBY$h(p!$Wkogmf{xPC&iCJhoGL8br@o(lzi8KDh8Ry*B9-gJrw@qFHm+8c)~Uqv=A*Si{X28R(xDBDPR>>lZlP1hi^UB&c!S5Gd9i(!~c zn&`SXdv<%=OeT;$<5Ni|}+knuW5S_M*+!|0im6gY z8_ZaO$7P?0!tr?4&ON;baR49^yUGWyw?u9Pn8H7>?BSr-DOa@^iDbvNg}(mYLG-x4 z6w)9G$L?MCANB|c&@LTKnz~5E)SjaPGi8}#;~{4-@B%&SFV?-AFe|Zr8%!IJ)f=0f cbLRmO#}BODndjUEPmeJa<$LsrD z*L(K=u64Bc!9IJ|bFX{-_7_ccWqcfJ8~_0DU#Tc)0{|uz06^zh82{cZb@6cm03+~9 z;iayR*+GYI^84SOEB4ai{fTd}Np0K!e+Yio3|i*47y{>)*~@YRMt5|lO6b(ThmZlf zn#aV^1gr$KXHoA-7{7oyI0dY-0-_#*Ry(#Ix&`K$hfd>6(# z{rwB?r9Pt*5L9KyC~dVxn_Yi+^m}+OZ6ZlE><0^4Mg}&W4}R}mzrjYo^b3?p+4rfY zxW2oYZtd(0+Un{sL!x-k-vn*4>MUFDq?;b^J!r;H_WzmmQ+qsKl^gRgeedS0H`w)x z>i)oY{$VpGhCqF2eZ5SzxSl^#ZBULt595AfLn~sdRc2;UiwaMsC3onc`Q8JYPay4(1d)?%za* zsBd?n*fk!%yR=!)3MTL`3kPrQTgV)IR8u^yZW1Ijcl}U(zWn4QD{|GXDr@JyWg)+{ z($r&Bz~H$hLlE}3CSFYHsABazokoguuEz7<*AGw@|3AY zjULq@@dK8oZO4Xw#|P<-KJ*9kMpN*f&*$N7c}q^&S8-My_?2~_x9tk?EcQ3w#7IZ` zP0TFtu8Ce*29Wo^kL21nSZtDh^jwXUi7?md)8%C~m~;Y6=);cs#ie1rFp*hqWq=N#4eSe#~RPDs6% zda*rtrBTeAyt?(G!Fk`SB=GRXo?rMQUr|8ZO<+;KN$gIbDn4#U{KQqv1J%Go;B?mo zWtg+wCDa$=G4>UsIOpTl=Law3qWy{awSe&W3!M=Wik+0L-QDJltgL3U{7Pq2hU_0n z2~OhQ4sZ8x2kGogoMR)zZA>DDgY7Th1!-I-Hsl~yG^;%h=HBcDd_7~9N>Ue|J@I-p zx~J6qps4<;j$}5b>dVE)h2IT#psm~Y_wg;d9^W2%s<+F=RO7E30u*Pd2MIpS|2*g6 zc^&Ncwh=?zt|TATo@1J|FO^ox0h#7Kh`6WZyb(0o|66Kcqca^MUdHs}W!@O22%eA{ z+f~JmLSK5{w%ldPZzugx<-(ZYEgI`bV}_KIhYwR`vxd8=G*i&RS)vW;aGJI+Jx)R_Yj#cROloA67+h z1^-j+;pjgFT`BhorDPnFII?dejXK1qqGogTuY&Tdqq5Zp57>-KZ+>6;ug-eh`eWe+ zq$NAhcT3Y$zkOMqYzkuoj`!Wy836BwsSDGVYnq;=SoB5+{V;f+(WWD31h~4et~km| zd#4#HVo^AzV%ON5k@#@7!GF_x#{X~~UFYdzHrQLBTFq+xlrO;3^SLPV(;gNy2o!d| zmSyo7z+!wooir>ZOj4Ix z;~ugK9pegegNNrYma>Bek0{1IOI~6FURgw3)zjgn@3En9kI<&Q$fBy_s%v6eN70}m zatHdZs}J}BFd)3A%$ItJ=Bh(J%UT{N#s)fp(F`irpXNk; z&}!}Ed}&T;=9V@3$DdJp78tIO<)4LIwXTB(LyW2*@lcJxmp1aIvng+I$epnVVsEV* zd7&sDlPdI(>ZaPZxKPabns*yK0ETeCx%!Et!2KcMgRZtTx<$4=J@ml9ZB8HQjB1vY ze7*yQ?Z`X~@%!H3wlW@MA?lH_*s{V&A>4SI*D6x)*;V>kq4Dx0+s))vxW|98Y0zf- zJdHAsZApL&%1!aI$)tdXgY))`Ow;7Q?`?c$NeXDNz=X=iU|aYqH%J*|ad`A@81$^k z+ouwF7c?~Xi5uj9ofyI-_{6-ch+0RWOPGH4>@8Jm=DZHsab0chr1_uza;xO%gQj!) zLlh6tr@t%{yyI~OhOVI1PI*?f_N@>?Yv5c!YAo5*sudG(hXeGp5&>K_$-V>f%ZG+~ z6Y>!)NQ~y0W{S00!n`4Gd|2k1}$qJsza zJzrNw@>ZP4b_EDo+208aeN_|0xN?Ga-Fdk1wf-2A@opNrs1B@`e7Kl>zsNo2?Nm~m zRK`sW91)aW-n9Dd#PYn#u1HT^Ke7;s$M!r#ZfSgQ`Bq(R&b~MnKX|PBx@PiAK_XboAQYy}l9ipAQvN&Xg`Bx&fymqw z(kB4@E0*GLKStfv9E!pcfNvasz&`vb4gz>a28>nC#TJT`o8Uu9o?>P36p_fQK`f*o zI{-Xl3?miv6ki?0_U36?;0N|6kKB6jpMfLeA*u$!wF7sj-xxz1TVyfjGWPL-X!a#E zwj1`4o|=eRj}T!9LbfG05dzQ{c^`lSs`t!^qH=vhIWw*1daq^Odl;r%L49B}8WQSt zO4c7&lw!POWluA-tUx~UU0gk{EnaeeLXX6vEmF@3cWyO-e7pVbiF9eJ)U4xr@E;yLQw}H< zrufC{hoAs=ef7A}pYw9*3_vP7v>~_KVX{_s<6`bugtP?Q7246(79hg4*e2n3ko>0+ zQ5{z+M_*jKy2}*Yo~1gZz%k1iYY)_1qJ5+x=k9nxk9RY{n)%MM&G*ix-hRpmA2{n& zF-S|aPv?e)hTH=J4lwVb9sPA3X;mW41$%~D5NLnC8x<~?5N+dcaV|Y{Srgzb%ou73<+qB5gX@sjy&lFNR7+@l&5nAZ z>n^uVucm!wF}U_->5xahH-&OYHId7v%5#57B#%yHcaW#K;e6=Wtgr=l$E}T(8UEIu z$lFgShkeI z#2af+rTjzb%ydx69P|j&Mr|5ZE9Vgn_)r|o~L>q2*Q#Q<=YOjiXRrgS9 z+Gt19E_|Y_Yy=BKoCGjZu{Tub8Pa^09hv=n^DH`_U;I0w zSUTlK0)H~tbSMSl03j;rB3~+23eABZa87ZEFn3A$R6Nb%k4poxfUc;iFb&WP)4yn* zWlX3^2;dZ6X@|*V=5O-7rVqp9xxA;-AC6b1U&6}3&s)rL{#iArO%@J4es4W8klnTK zE9-c)UP}ZlEQp+at)q);O;x_b3CNJP`pk}yyVEI$U>7>wmP9z}4ZW_G%T{28xx`IRET!V+OZQsps*cWSj*US# zYd_tAJ~5SwBgJtjqC|l=_~Oj{!d8asJM5vHUrbUn)p+?wq_KYXj$%pfLi~+Lzjv9% zVU$X1X%{hod7wstm7Sx(0bIE8#xL&4WDzvt67J7ZY zxy0n=k5f$m$Kd1dRH8P===P)ML_0Uk%~3XX$QL$0;CO$uve9qdUSH`J0nV0=3o-*+ zB;M^%tPC)HS|b>eE}Lt@g)S3F?vXj*brF$m{D$_vp3QUjuQz9@s85%zTXkPTUcd?_?Oo(#um zc5Y?9mT;PHsU+Y9EsZv`XXAy31>C&T7;LeP#)w?E229A{!|&#Hk^n>523)8Vfk(`< zg;69Y<>pJRqLcM$5IBzgvWvg^5lP|Vvxn(_WeQ)6x^+0C zfkh{Km(HbrO(#i}u_Rzb^IgKVR3C{IY%isunxjlWx={9?)$>02gIBj>kS=L?_SzPw zROrBGuP61kaW$gpKB%(4qlzCzCZj`N^dz%HodB;gRn^_x`5u3jBjPz^80cir4+H`CI46)DOW;#)!a5 zmw@qmH$T5h*U{);Q?gDY_-H`Kh)K3`zbD9*8|gVMX=-6IcF2J^GBa!=1d4u=p%Elg z`@q*+-s-Z$Hm_@jlxt%>f( z3N0^qQPd@pr00}z3~#R6*eKiS07C@!faCa^EaNFFGT=MK+`ByHJNptXN9i@FZiJBE z)jXdeqoHP~ZI6DF)~wrS<2MFYU9*UDVZh04p7^yT3o!mBSzXD-^N@P?vZNdkbZtcX^KT0!!U3vQdCM#*8Dre6plTJN!}5t>E_TbNe*vHG*X$xRxT{6C10yEx=;TLmw*ox#i z|Jo^s{X86{SQ7>b;BgB)^FXm#N%iH$H7CZ8eWYXDDbN>ips#EAv(76uQ88%emJUI0`Sz-6Y6>th{y1!LAW!%os!YFPI9Y z_ihw`5;ly$!z5)ffx*GBbu@SH3@a_`ep&t{I zi%TuGq&78R_vPmO#;%Bzr{N96dMt>orGlf5_t8vOKIfeLqTOg*)dj{RfYeFmPoAC+ zqMg+BJu`S-R%qfTvcr!*)v`x)%>av7^Dg`CncTQ}OOJTK=}!;fQ}Ck-WU>7X3$mQ1 zEQPUm@f5rn|9S5~4NsuU-{D1KefYWo5(|azlzf>l%fRndlFfJ=P^1e9{c8-nI6U1H z>2yZFP`zX%)Hq@oX6bCId`N!q%Xe|1omgqlMHdDk3P(KZ7qvZIU|0jjld6Ud*YhN1n#BquRrydoRF4(*a!m@wRa63sTRVNM&+kBa@9%42$`e?3Id zH(ejc5?#iU--v%RS(-@I z;H^}yoH_UQLtJtNY}T~Ghi>cdZRP!9O3-Zge4s9ti+TQ~@Ae`D;VH3V@;%C=E$s2x zBeSopKaPNekDo4v_T7bVAkQ>K85fACN2x1W+E=CFm;lNWaG?4cf&0>G%FScbG*ml3 zGk(ynotK>j;1YVul%>UX(EyEKARD(+-E9{a1qO+vxjQ;EAA(~C)_}2Ud4xzE0=zJOBv;)9< zJymKP<`2uw5d*ve3(-< zM)-Y|Ddel63Fd*R6~$h!qkG$^sO+A?Xss##fmf)t@`7i}d89halmLg@tVhG=Ahi{B zdCRz0li)X{O~j1(^kyngRPA=d-66aeQ*lzLp|~t`7Oi;#e%1OB2JBZU76(ay=xxw% z$S&boRT9Udsq&*X1E`5}t6ci#PcScq_9Kt&Jm~v^6=74+-`AYlCqy;zK$YK6hRE-}+md^QB%!O|IUX02U#CmL6J>~r=<~>DQZdmk zg&x2F+i`=hIU&%W5$52lw^LSD@jWxdjjoZ2(Ch)-k3~?x!>)ukbBLC#1}rm1PTd4p z1kRzurC!dhxwfLgqvOD$a#xNIe%OF=7rl;8@}4D7aXs*a#GRRlM&Y;^-O8N@HWTnA zD^c=S;0Wj0e7HAjOy_-U-R-hV%39piuT#@oP7jvOS>U+rJHzg@l11`XRouRl?2vym zQ>ATAMq8jGp`39(`=&0v!tClVllb84mCabt3t_3I0TIo(+c?EN^wp+4)~35d#7WJA zf0@J)!+;&~st#G+cX@qy{}l%@i~7CT`fj5IAAngQol_ji?fyeoTR))>+`%PDtuo(! zt*;;FbFe4|5A-UnCJ zU(4FCW;$k;qpiFl2(@VU+90yM7mwCb+yv8BOwAH0c}>BtMoKclS`lLeZ_Jr&u`dJE zMooM33i0IMFeX^A$~G6H`?;Tul2L#3<3Z0oEhFBZa}a;NiqYd72~}bdk-f*DeTjV_ z2?r<|hW7fg=4r(kaxH~OUsy~j5qw>%UJpO~J6DMWbhvA704;#OX>t6=B+}gx3G@tpXWp{Hac*+lUK** z(};jmR{zct0Nd%-W~O+o#$fER$BW{vE7)C%J4z(j^qCgni&PP;@rSo*#2?7ncZhzH zLhD{2UViihM_D)TRDpfau^mNGP>MhFiDg>Vq6F!XfcqHjEYa4#{wV)3wrPSgpr!CI zNtW+kghEgvW&J^Q;|$UJ&i7#*j&q0W+-Ybhk5)D*#mKi7d0BY1y!`#fd-mA7r1mq| z$2%Byz`?+`Rdd}V{VABAcq`fqbG~oX5;l0lYtHhqU#V$0*gG6*776Da@t<3qlnr}+ zQ52@zIn#-Gg9eQ9jhEdz<=rhrX~KiPmdT*Aeha4Um8BK5ATxsuZ^ zyVCO5L4UbG#^!#HPXH->@Qty{!GW!)$P@?U6~z%$RI@P^dKxi|Si1D#M! z^mG>25(6T&sm`ru4>v{1HVWOav8s>7ZSOt4t1}+7fobr_Lb_o>;76ON=yWX>X$`<+ zr)g36!%EGoQK@|xq4}1u$Wb)IR~P57QU*=a6oDx^*y4uwjImMMbEph_<%6EXf1G4H z@+&?rD8J!;NAM}H|KlE^Obm5Lwo&^Qta(+#dR4EKjct-ah3Y!kA!N_{+|_5tdrmFIoLI;a#E0@t5y97zw&g_rvN@3j}mw+h~V)E zGLx!m?AnT1cH?USRsA{bQ{ek7v1ehA>^{|PNFdhdC_0+CfukuPk zW|8$6<8P(XQ6&P_p`r_sEULkb;(Ps;C|;%#2;QwkXAxnh1l787TI)UKRPo639zON0 z%1=o@qG??ETi+zWrKDt177Fuh>({p+DCi!GLO;jCp*Dr+k?01qt=h+CF3nODyhHMDwrwwqEtF4 zn&ZX`1VaW-p5Azpb&B-F*`RNnxi+C%a1jPFxL46P-^a)5NW*YzpEqxasleYwI zH=Qst)X*iX(SB+q1>b5q+!t%eaE@ikOnQH>s49G+jHN*#yn7Ikn5);{sV`{|@hf;|KF8R9Kgi=bI~Q3$6?1CD*XXIFmmF z$PYlf!=ig<%#4Hq@1X93=22dsw4^xKm{~hJg`e8)`);>q~^g?qFLv*%EQvua!;>g1EHwb2#IoAJxkUE^B@ukV4C8{`11ADE|W zXI5&t7z@9HQ}MQ`Q@;^*kK)%|InfNY`GOC4XI{NDTgAR^%}sawt8b$W^A9=;bkb`` zg}5~T-rH|yo2pL3|H9ATF`go{dhN49%=zos5Nm)wZ1n@*5vgo^lLTa^;Vw&0+Y`UKzK3+pIty3lV^M6o7`bF!KyKr=@$1r^4E z*W@SJ((VrZH)CjnT7}dAXUtYR7=gBfA*!Fu%Kkzy-fN^XqtLA6%O5H9pJQ>~EotJ4 zFB7B1X_B6eU4e4-@!he9Vh%|FlA^YCGCHJL1l(*y+#QtVq)s=7kcCBu|l19V>{s z&ki;oeb^7?t`EMtt;qz)UMpLvO*=9*i&N~PX&OtZ+BrgNMIV3EeuR&9o_kBN$~eo9!1YJV5qb}y&Wlr2ELEeB zMUZS2D&hyY9-HLm#t$V)+qTDgdk;Nq+}H79wbwJV)R4lNOM^iPz;9=FOkATpO_r%D zvec=DYJ55}VDi~g!H4DnVWq$_X;G!vcj^lw5u+oICD3dJ`s7cOlkQBw?`_gZ{~{NyGKV{DjIA><;D_I_ z@8*Z(M+suNQ8}KcaSJ^vs$^AF2Vcd3%bu}jJi5$gQh!@S7u4!Z@w?x_GZFF_5#MkZDM%g zFcI5+gbW6V6Y`FBFBH`T=(I|y+DK#;ucu2|U_rv! zW@`+Yy=4hIiM(*$Rz&euY``Hs?B;Hz=z59A)kFV&Da!Q2`_{@|s9n+he#vJ*=l(+a z(@jC(?)qATv-Cf9-T%A)Mog2B!%)E`dTYUtFjVKGGK|a1&wQUIJ3SBt>pAR&>CKI< z{_&f-6y}b*s{C`S=1Y0Ukbo%Z@2v@hW~h5_x-?InNttTS7g*!%oMc^nrh8E z;q>ew5VU+IQj<*|i23b9mz}$GmMw$JVmng$ zW(S|>`bW!d?2SDWpGvcf?70aai*C{%Ru7THgizgxKM`4=bj}0CLPvU))fNu}SwT1% zQ-C}w{igsP1B;mVhSM9|>%~*WEwn5ot{^s;7wyw6_&df3(@^_b-@DVF$kTM)_qIq# z;oto)iM0kY*Kb>{T!P>7Jl+H+U;q8G@Qh~2hUA)SyJnqYj^N^32)|yzpLsPPX7bsy zYPi7t^TfK7hD$}X=N}IpQq6Bab}K@p0`UPaf;k*2=f0YJ59{JkQVHVGDF@daKHHod z6L-NxWQhg#q%^02fx*_*W=+7wyzLg^xH$tWR0Hd_avu*p02}ju_m5R+C>gntZ)m?Z zI$197|w9`pU!U5euT2^*-KmRz&V8=G?ZY2vPOr z(`H$vii5%wk86DZkx=C34nUyTAZ{!!e`;V19r|yy?=dEYkAf;3r>y#NlcC3Zu+TT^$j-*}SyO(UF*VPIOJ3S7H435siv@xt=10$Qw zozbG)O^t^24w0FLh-o;U%zlh{oBKH|pWF43c^NHz2y0rs$?nsT=Jh(A-0Iol2*(V* zqR(HGIDYHz{Q^lRa_JvOKlTZwi$4ty6G0u=$@eHn)r1fmCa`LP`)xw^@aef6y?jJh zKQDsP|2ia;-}*EWPZyakuAp2(Bg;>gdFrr#eh4c?{(_iy4ixz<>mBro!w#4JEDyWB zLH`Gv#5X=pLf)2{Hva!yqNnL!b8WMjBM&s@}>sKjC zu0)I|{SNuYnm~wI_9I;Xfk>?ORZd{^Ni8)cw$f5h?Rt>M_^+q7Bl-44O3k(*Q4eQ2 z&x179-2#}PZwzgd4VkKqD)17L$qhE5f`$x=OpAygrW{`7#F2ab#^l2J4x}z+S>Z zD1UU+jkl9+m?4FAsXralTtDBl5XFX_oAqqHDtoZG2vFm3!*dIFQxiBp)5V?r&PX>} zOzWG3FSf)3viO%FpCN=rh0xdpJ$_!Y00&A|k-*QPILsd{V=4PbAT3~vKeT6?!{kzT zt(Ezi;95!k3$x~a`mjoL3u|>tu1yoTgSQ^jWDsfHhfcD^+7dc|<6R+H1E^-t-9PN{ zSIT?L47}`#^~bw}#1;*-kZi08hpv^^?|(FRsv~igA{Co-8EivLoeeT=Pq&6NvY#KE z#SityKPab3`;()dnpD~~hg468$+qH=5yWcJEXTW|b!x2UaUK}UV)%{Zes~@qx$qTJ z$+H_oJD9wW`#IMxbMTQu-wv0cXjyY1uep%PWD=lcXgUn3S);k{@_B1u+`IN++S{qq z^=_%ej?76TF`h>8c>X%veJU%FB9d+E7*S$k@>Ryu4!GMr*{CB#OBVO3FzND-=mLMk zfQRvpO16{rZBu@^Tf{4Y`Z7g$$5)$ZR%eiS&ug5RE?W@!{?J+eK}U5KW%}w+MfP*% zJSk{%xYpjdl+Z4?LC72o#lDwJCmd8@ypq%O;%})VUU|UN-OLrUF8S9XTQr5#gjv~m zU`tb(>80CR?4`Z77FVh3BW%f=Hvz#<7di=wKVTN)^l%$zXJ(cWc8w|;cGQ7 zlzl>rJb7%3Oho`hTF!zAVWdrRX(ZT1KAfCkv|3O#5h4oW+rUTncxp7Ca7q1-=@Z$R z_{sI!tFJ@k3ndROdHL$DK;revZoWTAt>;bHV&y|&T;Q^K1hulf>8CNQ=aRN~fhf26 z+gO*B!|GsCE$-CW3G=qIUqh}ik8$3^Ar!W7rt2rE^$XuKlX$n=B!7`$)+_bD4_OXo zS*CXMl9RV@0;Rvam|9F|y-TnW-U_!3;O|>c?lbJ+J@XC|<{cSBJ!tN9Xxi_HN0{0d zd~-GW{8^A+v;B0%(E3pA^`q5RRgm0+OYHrUs7Jm1ki_I0?y6*;(n?Q)k6ULYcI`U~ z%FKULYvV?yl`qlYV#szg4JoMAD!Tmjq2B#^@&F$Rb}8N;^d#PQpds|qk`RteCWUE}+#fk3E``I{$zstxW)}5pHk@h3O{Yq# znN7CKABCTPUv1>M6stBa7SU5*Dqd2{-8|BDays5MF!gU4O^wzp-*8|bSNNKw!bL+F z?Z&QP8>Xm;@^?3DBv2omssGohi+{(tdfgKA+cdqb`)IK6GxS_A#N?XTTy$a>8pC!BByt?WhLX(b15Py?dP_V%v3#~1b?8?M%S5Mq zeYK=OoOomSaXzu_Y7RoD%x8>fg05gqbpne_Tw3t?D7q9L)IOgHizu~bxG3R4(%s1> zFt6(mg^;)VY%QqsdIePt%su2ReJSe?U4k2gg=6SqYOc*;L1|+_~fFb@Q#&L#>N#dSman!sLE>H8EP|HroB+^3iUQ;ZQ1V^8;Q%ygK%Zl5th>ir_h1}}D z9uS0P3xqo)o4|INhNdp9i}|vUWkG@QdFFJS7vU_6it8q>a8?7yg{sv!;&~6wxpu@II_1vjZ={FhcF554^X|4bbzgM8u{u^t-r17M6c^0d z*rN&fFrwEmlLLr@q$x4LbuPl)LG2%t1{VPMg35(2&@<8qunhDXvsF{o%ZizMCR{5gB z7X~YSSV<}~xCALnji-cpu(jXH?U;M4e2Lq6J7_MfF#M77FR@BKl!3p=K~y5NC!j!0 zJ-(ska6`M1fn>{Kh-M=BlElayIZaRKyE+yzCEeuSc}CN6+agln1CH}$dg?Owx4RML z|42XJ?=IvowOtiwip)AHSs2&51m#_xG%O0g;;C+Gv`0v9wGVlSI16_a(j@oFOu^!V zmPp~qe{+7re=j{ckIApMPObH@4=DzU$V zbGhWt(}4U(%}Q&Jn!n5q>E85vGV@i0f)9k1nPj3XkbCoHI*sBQG6XWKpcnZ3^5rm? zeGF8D(1ND@o(})Fj4JE(mlLH9s}p`bwgx34)GkkXH1tXTlNQa!O7DO2X2$@OK}03~$mEf288r(ZRJ z&Qxw6-WnKqh@ku<;fT9WCMwg+h zUejWd&Zxrt8%)#UBy&37Y#tK>{WG>|^BxwY!IhH+#>=QmVzcf#8OVJQtS6R`9n;{> z%aB8$fZStxz z^G`CIvYwtj!BAHyLNn+MRi&GXs9V72ldbj=4uN0maQ;0KvB=Ew#STJ?$reB>{H}9c zw0$a7CWZfM?=_>w#X@iaQEOPpiWa>^xPsDkEI7Vq`pIw4hI3~UkoBQ0h#>#Aj=6{X zu4y5N$cj}u=0PLS<)ictrB~l?`mwncs1&GX%^Cht?!}m3L{)AQwhN{A9iftdmNudk zV@p@|LYo8O(U2a?PC#)G^aRkq^_kKfwsMDj50)TXc#(nu*hB#i;*l-VRsyk)`>X<6 z9)bQ$y(do`_BTkTAy{7+o|lCjNPc|;Jt~KtCo{yox9ygNEdB1KbrtPKSbe5e6zN{* z%lO9zm14|zf`R;+Up`{n^+Op9b5=aU7L-QHJHHs5Pwo(JCmdGMfjJ^dbk|LQwLk{& zd4_(B12{5+m~x~T_C?J@q%D;YTDZBv`%6@Mx{_#PGUshvd16`*Vrin5Xf1zt;=+hp zY7SQowFBp7#pI`^+lwXeA_`9xOnD#af_7-Ekp`V_BYp_j(TvBbanoa)G7N7AvuhwR z0q?{gB6ziXO$H17JY+bhtERNtkW+tJe7NoEY1lNK$nWlOF8 zHCnivWJ5b|Y&R@h?lN2Qz!s}xe1Beeb8Hfxx#!lU_VhHHgynrQ&9|!gf|o0HeIiXv zdn#4Cm2Z0I(vtqn7Bo|!HHwQAp(w%h1V6>LAH+r1t%GzxHO19Qfp@pjErXp5+5X#b zWWXBNGo~75Ngwd=7oJ1UVuq@sRrTL*FY!%L(m)Lo?b`U~Vik+Uy(Y%8k<>#KD@RBBaYVJRUCAD$X?~i_}tqM4b4@ryuJj8xk@C$*5~!8B&E1| ziZQKSeT`mR%MX@Ds-fHTz%VvVvA5)PJ8we`f`UDh$K$(8rCY_axAw&)pm{7#TXvMiuW3Q z;39<>LI4BB%<6e9AiB;7q`IP+I6(&FGn#-cBHPuI>gC_{Z7gi*eWuQ-zDUb`r)RQI zsXsIY@-d9w#$t5l*sw&IX)4Z6vqGr=rND6FxkA@K0=JS)tS)?jyo+=IkC70#h=6>O z+JCE4Ibtb}M2`TzW9KZrF}oclc$H5~olye=g=>BIUDf6%NuM-t;6+MrgnKFbd$c}L5kRqH zX(baK0SP(ZH<@%e^Rq8O8I1&Md25IU(!iM@;or)_kA;r>X_Zjm%c3$MsgWpvAgA+HX&o_d_yl2H(1f#eKKJFa?bC=PD72@3o z#Hs%dk1iai+7YM_dzTZ7i2(;!^~gP#gAfT-A9jpE2u)zWouE0-3l*mg;u9;sb%1iM zFZL#NCqqWCWdcVsG|X$BJPou*WYm_?ow26d+EG1>T@VQWta-lvN1f_%p=t0)1eVuc zt!+;7^hb-Z>7g{tivIMn?-m5Feq3Z$I$to~QQBW>-@HU^^G1B!{H@d?VbHlUAJVt# z0YX@z7D#=D0{Z7>Av0r2SQAn)Qz?tuA6=|h+?>_Fl_%|UqEO*?Iywwi8G>j|q=r{n z1&+1PpL=KsT!Mq|85KH7_oHitu>;f;vBB}AHsYrx{msXR`;FVv|B4^ee+cAY*`$H& z%2fwK&E^5WXm*%}!U3rltC~yPB&dO82cwA%oS3bWT6+A+49;|$sS+ROjf_6%7cvs`s>|3s88Jo2Lv zyw(g+#Mi^JV9H=E8{MCb=J)f`R)UGL_k7xzV3vIT&4Tn}D6Hfoa|$PJe(sf^-SD4T zW%OGP!0EK>Dd!kjU(K*GrJcoheX$UsvVPSqa0uIAl5c5$;?h4)>Zx^)Ka%9gzgM%RIx%RCwbSc5 zltX+B8gDplft#@Cp+OVMpoO4Pu(&+F{)vANKYav?b2G-~9 z_6z*&n&U?JmA!uTMI1VUVhZ7dLd0xTtR3aV{pbW9XRyB!%$w#U`gFW+&jUrmea zU8nfrlg-K5n~<0oz+M3ds!8YhdQ~75T4~`$A+*u7VMS?2PM`aVpkKm2XTSZiD<E<5odJtTbY?pRTPiykDHmN|rO7Uum45-Tbr81}n^7LKZC@$j+VOZ0l zkFIlDj)0?{I>2%CtLy;O+v~!Y%{?sWCjkwt+<*?^ zP3bn#l+(AJ_%V5oy7BL}oBO|`Nb zqvWae?Qtq>>7szw1p`{k`Fa}-9tsm6MiJi zf4+*UM!PIXQU?ZthPUIRsst}h>RyDLFe)}OV?P6mdfB1=VR;xg<e3?`=p!RAw3FFi@v`9AyJi`ay3>@1>>>_d{#U|5RE=iW%sTLD!hFrF5+`)t_AshKgTWeb-S!< z_EInWbkwENpw%*Nr|NQ>^kebm%8tmRBSG{c;o1G+mdvb}2=Vg%4WX-hgPwQBzbOZZ z_>kiCpFoHmxlb|q6@OTG(Cu`qx5Jb(Ke( zXPMXk3tcI&()c3`IA=;{l9~B&$=9LwldE3-F(97J-x)L z3&R$-EV&&-wf05?^drSv-u_M2E1m<#Vs{=m-F_AwdX2$L`B<{CoB9eqfG;P)q)_T~ zmE-*2+=S@r{HF;PpuvNpV$vcD@^bWsIZ63dL;^L5@cR?P=tnS9SP;Ii6*%5dRfx|L z)S55&0$TVWyEHm{!VkoSJ5}$jfT8$F?p;aSlx!@3_oMFd9{PbMbDHCnqAkSYJF98- zQfpWTxgi;v2wvaA8z82UWSmKp32;i{KGaFCY*n;R$Fa!yvvf~vC@h=q&(3J+nUw;# zhN@PZpILVsn#+5JJ*`b#1x;dA8-cDOpH3BMAb*E3 zr`@tit_G?9Odn-L``z>8m??*&C4{Z5hyPwhEC9x#mu*HeIv@C%kWIC4bP^V9#Phfq zKlD5JvE1G7ynDlvQI_$^+m^3~^D2BrH~(0}Tb*sWLKA^zA;4+eX{JP4aB3p>DL!C1 z`|o$NNB=X~%}kbHr&T6*&wy@4@_Pt8KTIc^W)2)pa<^{;f4wtqLyLXGQ_%h^1v=Io zb~X!Eho-p_4i43{08_`^K+BP|w|W?bSIIlwZQ6GS`;Z~~M^XZAcm zxO5S3SN{a@5Ig7y>xaeeV}w#N{8@b?6m%}d%d8#XyO#U<%rRu z^&rYdKj}B900OuJ#TYxa%sFVw%>F?N*37P~4a&BIj4F6N{niBt)${~HaYXT&~w1{_%Zn&VqvY15TK1?O_+XBZ0bG@#K$x|ZD8eGW?|Vf zao?4b<*OI?P8Lg4BXx=yP4Evk;-C}v#Xm3B?yi6JX+%i7|9f`8aYMiLW&biX`tm)pxBOsIrN*H04)p#D|lM{EU_R4lL2?4eG!Jpsk<#~3uRj@@H~MW^^7 zCKENB)NSq`m8nb1VQzJ2V6f z5+HbRcMon21WDt;9fAgTm%q=g|GwQ*UHhTCx@y0y8f(ol$NDChaU$$X#=XB;~cFd&8pc6KiwBBhyDr^!N)@j46aij(y2^08p zfNFBFYC>d4P!=QrSe^rE+b_r$v{$jV@NpY;jE++nQ>4GSya47!hDChfe})kekN=h7pjB)E^oE zCC3OQ3}-q0y*iAiu5;l+YlYlkdui`9M}i+{{7WJ_F40epp)AGjQ4OR942

alnaH z00Uv;^NZk*wOu;=S;C(={1RfqN_;ank}dn?ruc;2-r*OwYc#f6_Qu+*n5>MNryw{3 ze5S0xoVTfc7%5i+t_;txFRh_S$ReTHkh{_eNj{R~$@Be-+&iWBad@l#QqPNqfT$Ug zNuoc&&V}ak1(l7iiQnqix22yiie;?JH-#gwLK-$dG7?^-ZD+V{Q%_ZdL^WWDm^eaU z_pL`d6gbVDGUGcyou?jl2GC1ofEvGQB_jiK8_OOZ2v#m;J(jwii6sjA;XF=)*t*hL zEnT=1?mY_(TFo@NSrNvV*hCovR7AiJNR~O>kN=S~kE2Y-kcwMI_xQt#W` z1@2VOu;+~{U=mKjG{^_&0HfDb4d%?xd_Zy~s(2_A1=#2u*0uZS+;Zap^POdAOFwN} z!h$-M^M{;L1JJacOB&(QG30+_<;W=F+JajoWwgb5XJ(sUnA04ZwH)*zJuB~ydb%$GqB%iO)RyS}Z z>6dQbN;uytn(bKz*Js4Oz8|N0;-Qzh9r@p64-QuU$KAY(PJXsU8aZTWgyqDe{(kI{_WqIYK<-iH7UgAK zx#JzxJtxWl2|B=ayiAqO4?J)p7!~>dtqFf#2P4tMZa4MbIOkp6PdaZbTcy|+&4j>b zL2b*jSxQk6#sa`YTx3R@kKbg^ykV4sXT?CvptiASVr4l9AM^VgHJv$Qw7B;y`F($$ zew&j?r2N%B@w~|J>q^1t9hPQBW^j?1fsSS1aDSSRSf{2PddXOUx9*7KQo#njO%$sE z$@8K*g_=PjURY(?pf%g>Rol|`ZN1HG%*|nB=nl-hbmgwC&Ag_3smoWdf&@6Sel?fF zPKUiFNGLP@iW>AAtHul78dm1gQq*?PUq#;>U`06(f`J_H3G#^f{^@f1%yP$=hGF&n zz*vi>|2IZ8n&zqw!Fur~O+@Z8eT}42s!?XM zb`*wluOuku-VmBy?!&iWgLYui|Hh!0w*w6?qf8H$pY)e(MmS?~rvS#=+8*Z7XSjfi;z%a&(*TGkHA# zy%e?M@>fJx7OWQcqi7`@FWQ5^s?ldggIZcW`_0_ZXfX=Xtmg+uPU|PA|HZ+J9 z{d~0L?k3~plGQW41f;f)Gc|z!QwP_0vcnuW;8En%>tI4Xc?CT^keGqG%%3DGPwv>*Tlw#R13`$!~70MQ2zy5|2-Jf0CvQj2mtMx zv9CHp(x2-&3c)5l*2b7i_%3O9<1(RvD=#i6&_Bn7)wMwNUgpf`6yS^sQmqeDmCYj2 z@9YlGDjDuXE8X&|u1xo4c{1+hyUy>vkTjb_w+66=CZsK&jtSXjDPc?3DByUn^Y>dq zL=`2gir4AFbbkUbH|nnUgJHf48o%0)9$A+^Fdf{A1mco8f1Y$~nk`-3r%CwuIq=`y zr^{MU~SfBH3Av9;lGeQ^sU<8ZQ12>h>LsXHyj|8}ZM>HCj`P4e-7RI7KFT%)UZ;^4X=?A-BZ`}w9s6exY|ufJY9I`eAr z%5~|iOlF()8%y<;eay}#!{bKVrH)iz&(eK!jd5)vFBHDcx9m6iNkzCtn`p9IzD}&u z$EIX#o9EEDF%qy@Fdn<#weH}ousdB8cRtY?X^mdTbWnvv&30Fen%S)@5qLpt=2y@F zD@l!cfziq|-?}pT@_UI-L93A5Z10&J*F-NUG4Z`;;!9=P=B`(G!+AXL8V$Ivv#p1w zj2z?PKVZ!&##)i+l0C+s0@%JO3;q@XV?P7`mYIKVH6;Q96)(*sM!JYg$Dm*aYW8vU`7{XQPpUv-mATRwjj4_-it zarHXD@IPeNogIv&KvB$&hW909Y74e60dts}6FaVN_3z8B88WN6f0VS{$qj6s6$ct; zJ&hQrAnxGhePmERHK0O&HlaIz>~$gEbZRv$Nv9qN2z z4LHG&*WfkN!p004HR(IH;C_fa789AP6sNrh?LgfdnKMj)JGWOgR(W(v5KJ6`&1hi5 z8fZrPO-yPsb$~&V0 z$@cEQ>)PrysSl5nYF!ev@dyJbfk6qyO0wB@mqoA&?3bc78L+2j z^SLLyymIsTl9A*N7l@`;8J}$l-k4v{saIw=@So{5ufcS>VaYs~P>lPwVBab->G6e4rokJO6d zTl*n`=Kz~v!D@ZKUrg~CfO4fWs9xKe#YKNM_NdZ&h_n=#byDu`v6}%t<{eGx(BSh! zeQ~bET+Ag`ST>BGE#_~p2Fj{O-7hfr@Qs?D(sz$W^o6xwVI5>;b3VX}mAdwikoH!t zmVA`c^UJPMo8Gt(Kssb3+$dkVruou_Kl2?MX0ol`6{ncrd8U{@xyBR|NQVtso(xlo zHiA@g*;xi1QQumz<7hhi6+G*(t~u_Bz)^>*g0{jE?=XK~ zOTif*I*n|fHUg}O9DB^Glmh-{osn}D++3cL{rgl^n@{#3yDzoI`qF7BymO&FP#0x2 zEhV$s7Ke7pPx|kkR6LjXlfdmL13+J=<4n@GAxa*I^Gp2Ygx5#w(aDRpK$wb0oCHNqm|=z62Lg!h(oz zbx%;i#+V0qPxIdD5J^gUK^X}y87_)|W>Brk$dFFTH`~2HvT0}QQEe6|wmp^u_@(EZa%Jb0N;26QWcW4UQL@bZ$6Lhun<)ffCUk$NR$-zI*oiU|Hflr=0gC|!gC{9+ zKj*zYy_Kx2keHii%7GIbGXPbK=Ihh;vm13q-9m)zE95ufnz@-sNWMHGSYM7wL+Vcf zLBIj>6shM~)OzwtS`u|G#;I$L5thHMnKa(|j*Vz<1Ts@~H{5EOvY++N`_e_ga(jEwckXh73x-KPhr76VuxHDw0Mi@U`>5#}#Hu^G7gNjXQ zcZ5U=S&%^N1rRrzdq;G@9}$CBh)*uugLTih2u;o#Pbwn<*mFWTM_{DDfj<%=Ehxc9 z^a~8&l4xrQiWt-walagVjJlhZZzjJJ-rQsjAOBaD0AexYA?(NnzbmA|&|F*=D2xZy zYrFWAo*+Zbq`H>|yV({o!D9vRbdcN&H}ZgAInn#-k}?+=A_qt}$*B50F7T>FTqo~^ zP>z#?_~~H4O=B~KoGY9OR;{F(eB=W#Er5Ntq9BxH_71 z(CWsoA zzGbwBn03aSGUq@PWC>9=Ilw9puv%ewu8|3~eD6s6I&QU53Z#f;IJLQ=1LY7g&8RdP zRT8~0O_f~8W_Ywf+eQAX%0}Zy0LX10rqMZt1Z5Ef&zh$E?!p8*ne~5+d6-}0?->Vq zu40r08}8)MdKlSO;g<9Tq@rvI;Zz9%CfQo&cWRbNK|S{djcN7hVZNaBPb#?tMOnhp z^R%wSs5Vum*Yi_veybssZ;<692`rc-RUf5Ll|rv2v?NT6m zhkaEopLebsPFLKYI}$`9gqus-Qq5U;UW4{N+MT+|rF(V4YB62t#M3WSpB6|}65$)M z*K#iJZJb(_iRoy0uo0q8vPUvGZsbPJdfy$$0(s5|Ft5k1F}@z)pi z7y>1F-rUJExi-T!65Sp1CQL<)7@p&|v;4(9PJZ7QhK}{W`v!C!yr9eA15&%!ONa(H zEnCuH-@WOH;|4Ol*&;0{dKw-D=$YS2_KiUVm@gV-zo8>Na>&xC*dPUD_kGS_s0XcS zw_NPp-MTD!JaJqU5m^B@N*pua9RHhxu1!3y_6xVc<}C2h;yC;H#L)1Zu4=Ly6W95+ zscX9xVOPJlLHGjy-n#+#aL1eL==NQL1 zEzCwL#Hdk$*(50~b*=GE8PB!~ukhvDgMD%M%cdB-@sc1rR1Vg3I{M3KzotZxn}ET= z&t+ng_963aKnlGFOKB2ka}>*fI&fMUo9DKgS{oZ{_j2DRc6Sk=b&g5;_?zkz#Z<_TDEA`J@X|&%3>WW3p$&9} z{Apg7ovKIdVSc87`qO?Ez}pDtKKiXQi;Jp zjzuTQ*fR^y7_A*{<{=c=!R8NOexgUM0o4o@ah3h`#0Dmf0D)8vqRe4Ibxh5T{EUn`^my9olsfQ6OxVvos|_qFV%OO7i~MDn&ksjsWiEKs zmkpP~x*!?YykTiqQ<7!kLkojA?$@Zp@=&GyZAf|AKVL%%Az9L0^*3{TTQ9q$=M^g< zo4*HyHQMJm3@t4!+YTfaCeO3ZLtd27a#aZgg)>merlJ-8BdCb6djf1n;>>H zzn5r!c~^=Nk!uFO;+e?#xlc&Yuu3S;y!{88DCH?(No8)mes`iA+3$IS)E!8>^$*=n z1LB=pKOzwUsOC}F*oHtf#v)#d6QYg%C5;e91SmW%%IN)jBYWc_!1uOm{`A7x|6%^|vN{P-wj<4m{lGpBUJ8FWZ1}g3%>|75dOMuGNjenUh?eWG7nmC6 z`mmTY@Rl#}h{8^V_E+8UkEMPtx8WcGUMlnl>M?j<={9nx)RkK(;9KHLRd(eJtf__t zTV$DG8P(E7^1MY{Fp@~uRY3@_aecS!j(t34dMe?QX`U?@N`&H*=gXA-R_u6B2g!s= z67RixSxAPh%8pU_)9mJM-cFtKD+kP5&zJLH)%dx=%*t-6(5J+hgCz-ZtCA6+sg z6>nskvd^Npy480q=TWDBw9!Af%{@@edQWzd9JQLL16{Q(i|GF?ThOGlCZmlM7-tKC zt*$sd(Im>8c{-ez{eM;3S?_CjTK4cmyY!iCT$KYou;M9D30xk_fYw^^Y~0g`9e1Rr z5w6$WrY>3FSvwZwy^oqoZr!WZT<>Jz&tQ{Ym`XI6kO!zn^9yJ@&%fexKlvsMv#*8J zdfJ3lgn$XGz)=gHkglE{-BnBV;82z{5j@{d)7;s`;h1@n;q&ygPtnQ zivaCxY~DAVQ40c1Z&UyiTA|z;RSQH9`3L?@P~>}5PFmm1hnJmPIjCgm>yDd{BFdC> zx||dV8{di^O;iA967k%Jf^Ql_xAc<*&+`x5jbpjXO%a4zm@s$WyRv|dfNpAZk{}bW zx8OA$DN7FjOI<(Mr6|MFB`y_1EUCJxZ_c`(&;$zT4KRPXx0WMzzez)McT2M6=fNrY zLh28=Yc5F;Yb{|aENA%1mB=n*b9aWQ-;xs7t*ZL`Tu+3R7j3}c2@ClrMTsNSRRK^O z+|d9;1Fg4_Jgm&`xrrk1a<#shmss7CYA3ONGe}7&opQ%$QfS^Ylr8`V_Wm5D_%OFq zOE?}cD3~}qN)q*0Nn3buf$b^LA-fE;dTeq({(0rnb5ejGsL5gXs&hp+qt1``#P^ky z8%u-@9v11hRJbW7QfCdW$}Fq-m$e%4BNO0YmRa~q5Goo|IHSkkc8+j52{AE_u`DVy z0COAz>*zyn+rB}EOsEa<6b~AXkXgx*)35fSTD#{r-OS7^yKcEY*=!+)we(DJ=_yMM z1}Rbv2Mx58hgE0s$1H@eZ^OdtT;~`b7|%eqMo2Gx=+XJFT|&II(bEn0q%d%M9a1oY zlv|icc5qN*;K?!E{tZOC3Z%RE+@!ScB+XD|Oz#8*bDO&uklh5U9ZRsy9@;*SU87ez zE%O7zKS?^MC^*n!u<%Is8o*wRC$fQ}b|Ug__VTo-Cpf}Ug;NLCX2AmlL0^4WFF#OsgIm<$`uNU4C};rq5| za4Ju>`_){EUi&kGGqhoRs{^xV5W*5JZMh&OAUf6VTIM??e9d|%F3=p8?Wr=5gt#@rbrIKd zCCz=K7F+r)o23p_i@uBNF`AbI5PM1ZQ{xc@#_AZ{I@cH_1{;>cF{{QKpAB)-x;c&q zIlcR6A`>BiV!#@5mn7w+=p3So3M5GC%xVi+J%<4}e8hzhhF1A-=^ZkFPw?MHyu(?R z+n`vA?pMfvQ}8SVqD9W&6WVd>&Hw23OTny;xHPKpf# zIywAdonjyVKRzd+h$f&oC;!Q2Q(*n|VmOT+gG6&f@c%GbcY2?5Ao(=yaGmR0)aP(O zFK`~@WVqG<7a;LBGd#ImI+Xx!rgye`I`_K4dx>J-;Vi<0H_NvziCtVDGmc4!_6;z1 z6_|HIhtY<}b4}F?ztfgc6bVXS_M4$&yA#v7mb(z9jzeG|oKJ;Q!erQIimI2~9Os|J zO+bawbc9f>H9GU<1`u~K)B9heq>HB6g%P11Ak2Iutcz%K5N)Ymqj~W1t(}avypiEq z7L$O8svm*P{PvBOIo7o2i@)9<{L!IETCzc)Nyo^n=Fzg=9{tG( zDp^+*apLh;{FC%|f6wwV#QgOn62LdRyDc*h5UzLRG`u!*m}4YU>WgwJOfV{w`Ex-2r?n7W73bA4CAw0BZSh7t?w!yCSVKCvGYND={IXqf|K905C^-OFE=$|RH zl2@xto(^D>1ElCwmbW*Xd(xb)|7!WZ8y{Fz*yY|qp7ac(K79n{OyaHv1GFf#`~1q{ zsjA6g(lo_;E0=mKGKcx|0Xt&f3>gAK?gUM0B|*a$b2b3Cjh-}%GIB*VCupOHG#C7B zJ!?vv#S z%VRryC~w~0)TH0&=?QNn?9Tm(lig#40vn_{Ec$b~UVJ6E&CoCZ;$16UO7yl99!A~&bCTY!}yu^B-)@uZFz`k0KV8NgSN}zj{-D`t0WIq0o#iJ z_ZDaWsPIIHJ~sn3Ryw$E?A`j6J1SH7c&PYJ1^#e6axQ+CGIru zkI?wIafaLa7nokg-3Af}`c!q&qobd!zY_f^`3Nm=g5Bpa>f0pluL?TZYm&th6AK%` z8Y7%2Y!2g?K^$>`*y%Gw&!?Sh zxw&_BJ%{E-d;S-0Rq}sC%J9F5)HwUeH;$aZglCoV7taF-r2F%4(awtw&g(aoM3fmu zfz_+&PjZ#Je%Do69h}VVhvdfuK=tf>iYCa91gOI7^SpeP0h!FuTe6R{(QIEvnT7gG zFzatVh~qtoYwNt_@Th|wwWk=ZV}(+UGfH=b{a{39XJ4>nHtjGhd%eL{-p?xVE{GH< z-c<__x=d9VUk7rG$`t_vy7xwbA5%4eq~5LXi@bzy`eU_F!d(W;K|wb-C05oZ5N!6D zP6%Lh%*_QLQOVp|d^}Vs214Tc@QFoW^6W73Y#&Gq*rYnT8sLta13F#-A4##%KG9{y z6@>UF1^-)!W!tJIDuJ5sI=%~Z2i4FH@w>b|W7?Z9(-kQ$2XGudrF$|BvBvSGM;1CF z#V*7i4H+yka&eyQFuujmX~L0Ra-pFv6Q_s0gXF#J;#tEo|J1C6Dn$MQHN(@(;>(PV z6BD}^`GWsO#k8*0NcA2r8ig1kavb^T*=`M`-<`Gh(?jW5IJE*5n+2X|vS(Gsy)d*Typs5ceHHL0WD zyCa1MU8e%@N9iw&K+y>rL`e{=q0*A6`zZRyv&3%z%xz{!K#C}prT(Gayjd~LLEX|n zP2bbDJGzd}hEg{2n7q!dN=kQd@}N?7@RXvwYjr3qDHGgjb2IoCB&ApjzL8ER8--YK z@uMmY4jkMDN1+^0V5lL*WmM0x&Zq!wD{f!P0rtqVUIr)&22EQ7gZ?$)(`}94+3!i0 zf`^V044S};60wDZ7hc-8KG#jiHbgjjHYV^kHLBmHwobC5lq=$EX33vJdElR*8?K=v zl0p)-$`m#F6P1(&YRIt=I?n=ufD0?_YCbXFo;2H-oxw{gGZ%|gnvx79KE}~u$focW z4p?x!hOqV`G;sh&&Yx4Z?kwpe)-2?~eK`3nCzeRSb0|P|*G~HHK)W?W&}5FTli{E) zDhk2mtYmm}0I;lL&-WOq8~KHBYYqg7etv5GQC^1vI~xFC34@?$urYzuumb*XP#emF zSufIhfHqx@YC3PRNVIEf=6bcF&}&s~9Fvi+mJ0yS*2>F8E@Wsm5$AIHOIAQD%fiOm zxnZn#`j0K?na?T{;cHx~V*9t$_R(G41ES{F+K?d0r^uw~Da+CJ18|xUGL~l*pF@{d*xvy)CO!_B>XiZZ&uUoqV0kX)vS-XbxDh@eA*RLJ@p!u0qzFY`Q; zKMQHnVPQ7bj_VWUofiIco*T4D)gi);({lwp0jG*T`V9hdkgs)7;lv3HDfVDz$349#f%BtQ*11NJa&tFz- zm0iVilJpyw{e6u`*+}=+l}MbdC)|U#ePWnbjJqci$*0bZfVy;gGIf2G?NAkANFlZgNBIiFcGYkGd#JIx?#AC|UuoOAFY zEfT}6;#}oX&$+^to`$&-u_&koeXLADjPGKMb=&>SWX#5O`KFCR!79GD4r#6x2&s?Ohi z%OIh$5Q6Nu!;yLljG* zrH!FXD-iC>e$ZH;9kEl5?H7JC_ibta&_m9GJh2XgM7}dtg|ENcHw<9GCJc_u5l?j; zl&m5>F=px-aSoF%CcGzLz;~3dBC>P)QH^Zac2E77E2~RgUm?}*))Fvr5t9?z0%5GR z05xt`e~*6jxbh)tC8$;Sk%vD3i?5!ATT9XCOYfq>oV>EC*qGVU0jP6m6f!IU88)j1 z)d#WP>?6Qv@#Oa*>>m17Z2n|8w#>gTfVZeXeQt0-EbSjV%6R%4eVQb>uB(x{>(8-N znp)ip8q_q_gH_^>hkMm+EEa~??H;>lhefOIwYBVifsc-pE!Egi|8)TNtsvKT;^LxC zQL>GBQu*W1LmGWV29MpYVA{L$-RKgCNU!N&14~aj4wh?6qMzO@_go5V#(m(DWeX4Y z*sh?Q!(ZpsEx(O3@hR7h-~suP8evC_{p;HwGQD5(M}4sEvYZ{i#eSm90Y<0LQDARn zO`gnf!1AKVuh+BiH^XqZ44JokGF($9gO+)U_O|mshEKJ+J-j?hg)XYVa8Z6ZVnMgA zOQ4VUk+zVBOAE1npmVIKYCs+dv5K#mcsi>yZH4p*jG6&cmh4 zv^X)Q0LPU$G$tkG!5n~9n{_!{3$I!~^?f>ytX*lJ*=vTI7;DZ{MztH^jO>5;k-Qa# zk<@qeHPK^h-kIzZ8@R8`0TfH*OUp3VigO>jlu%D%XPMLOz9r<|_;S_DS=ma;so((RsD{}u z&QR`@s2RS!8a=3G!_5ADOUXfSp2V%e*w-J^)%(u1k*#}4nvPRr$vbnMryZs>(t33! zSP>x33bta(HsTVn!I#d$JJ)}dM$7%%Vzok5mPl(FV1>S)agl9IK*hA(4vI&F9_ue~ zTdySQX8`~OgDu)3Hg29D*}byepEogF@Od}fM}>2UVsyO!ph)M(H;F?bd-wdB-Lo}{ z6k{J$)cg2MFd;YASQmvqUiM7DDF3MtH-(AUuyELk6K^-?-nKZT)|Bq$3;c92BjEURI4w+M~Sp_t{4AovbuOvE$R%)n{DV zxIgwHbe8OZB8Ji$`{Nd-ESd$aA+CGdbXWUFpkkl2S-1P^vz(AgzX+XbG5l6E(}1~p zI~1;Jr4rAsQP~#!O=iCD*5r58SIJ$;o#}=buU*_kP;2dJOed-6Ni{R}!-m3M_zFL{ zPMKi9!tee0HY0wwt~4|LB<|b@h*vb zIJ&Ra1QEHW5r2WKMTW!{1XBAW$C*n)TjyY58okanU27(zxJh&M5sc(BHc^J@9W*xe zu17jMKaWuaFe9+4x>84#Q_Oj$JZlRhiZvMO)u?NGT}~qNVfU{nAjHeDLsEu$P3U4S zYAozZ=s!dVnbUs1_m6V=70nqQ=`2cA<$ifxB2#DAql$z# znU*Yzv^ejJeSbVq$8a5UO<$>t7coyaT~DppPlvYGw!DcDRs|M-IqV{u2r<(P($U{@ z8vdz*C!iyRD^Y>h?@vX13Z5Qf91~*$w#u(~WNv?~HjNq#p;w6b{+y6?SVE4=4N8VM-YMPe9q%81ZPv&vdknr$cQN70%1-LD+%dm8$Wu(bPN~JhW#V|GeEQL1 zmu8GMK}jJL!3(9T@im&x5z15f^^kWPTeCgnv3WE4we!yM!63p{amoIZtCSu6d&#S? zux42vzmwl{Uik7}Pd}#$@tQ7ij!@bIzM)Dsd9PUZ*!D}BfUfMRquoU)Y@iK9bS1{Al`v9KQ+k13+oY$)v{LXA~ENY z$kE8c3%RZ;_@;f;W8+SHOr?#kR{HYkb?af}fO~A^N@cbYjdhPVbjG+G^RsvSF-UO(JM+ z`~FPJJ^7r->?g)C_Q;~E0Tmb>BD`Q`I!TwfkKxCua)maVuF$)A|Eih^^QSE4{pO8( zht6O}E$Q#Q%DqGPNrMNs1Q$@&?Cq1ezP9YxUi)-;dM73ApV5wBy`Kz3GDLvy7Ma(e zk1bfImqMcz1qQll=d@!rMM72NbNcZ5lWx*MP~_B}amN^5lBx=B~VeFQ5F|Q-tD`)w)yxKiFR*$tTbGdwW#+;5_A^W_E2QX@>+{+7E;~ zrlw)Mfin8QDuwLOi5) zARXIx`Z$dDtP4$-k~?7x8v}&J|JJGJUAO?h6$Cu1gp}Fnc|YrqTrm7lzK&)ZerMUt zTv)uFrO-X3-aRxqdG6xkW}$glD#0+t1M@-9!~Z(UG(SdMxB8eH5Y>K-qcLQXVS2^hsla=;Oy~EZatvb&RvvpVknx_VPMvNpFKI zNY;IsBz&a>C~}WQY%7Gx6m)xjL`VZ$6qI47VCZzsp-sNuy?noj_GhR^k+v`8_A^9SiET?WY{sjcNXyC z`SZH4lfU|pF8E*Xt?WE8u%hX;SbI0n@;e1c)tdGeI|AcB1nsxcQxCZF5p>2rUv1Yl z^kl|fyrSztkMgg;R&sUecQ_vM_Q;G)UHXO6Pfo%Ybd9Ew*;wJ~n)u>!B!+?*fqI?2 z&HJ&qAXZc8($rP2eHFM-Zm#cdZE|G$xgf}Ac~p!z#jq=(BCKcMFIzBV_u&H64)gqU zJ%7g5R4|$;D3I%Hl(EdR4eT7h42#((l6RXe)f$Vz*j-#^lL>htC!S-pB!^XXE-}`REx@9(qLWTb zy$_9bJLp_^W92@}I#6aD{yaT}peo(JlU-tjXwNZSC&2)$4nI z4x^`|^pv}+>6M>@H#RwS-a%&)nE2J0Z&#PJ90U6q^_ShZt`P~(H>1zln&naLxj5%% zt`!$s`0;GM3~Gq$UNsvI>OS zAKE^d{_IgUV&X2l+Pg!K`lEhUwEfR0^)ns6T8`E?i+?(rO%)5_ksJ#Oi^PCQP?RvB zb1VM))Edps2|rdx#e#>j@MY9+d0~_L^I=)FQPut7`=9#+O{@}>0%~P8CIWEoa7!Gg zP1TWST@Y3d&SkaP$r+`wg?`>kv$UM*AQPSGckvNF^f6Co!7ngbPEo=O~x2OzF>`{dFx0H;GWzCGLf(goNeP+Ot2f1VFRG$n&ms zqgzliChOQ}zbM908uQEe!+CIEnaV~9ki=}<+f>8QSzOMBb=cQeyww*KsIhAl8kl7S zZ=(O)tZAS@?6JOeF9QB_Dw#1uqCgPVrKw}QLpalKtDlS~3p1mC?;GIkpm}^$l~@Ed zg#9^cB)%h}oP7XPh;Az@(MH>6LvFrFCMJmBo>(oQWYO$9 zFH>YM(FmM-q zx{U*`y*h5h7eS0f>L=>`#`A7YpTU2s+egXDo|Zdu4YM>kR%7&h>>E6nup8)JKBOuQ z^0eQmJ&QgPU*Mwge|Q_`y(laJ5}A?Mn*#wtNng4hw4RJWCuiq4idfV6(hp{Gw_xrc zR=6aQZ@KAyan@+yk@LG52S9n}%3Po|cQN8GN=lH-A%x$Ald7VyrB+Thd=+PY%iOgp z@)MJKN>JU{zE{}Y;kf-=W|qb@6&@0}!UGsRJRfveCx4!IFZ3K)CO=M(7^FrPt%>Z@ zl#Em;`z!sSftUx$1?3M`0lvnF)DI9p;>)J7`^&y8WY}AQ_z9oYw)7U;!268*gO+eX z{G)iO?_SB=&HQ64ZP!ajeYZyva{l;0MI+DKA6MyNfwL`Ncrs6-9f%tvPu72W8heNO zsvQ$#rTzV$;H{mW$$57Uh7H|`S}x8Rh(MoW_I`{|A2r5&bq(-%87VYZ{%xPgEX}W% z;WCJhu2?&9<5oKvepYO2UV^upwI7_iL)swBDSp(t+N|`IqBwJ5<@nQ!tbilB_xfAy zQOnEGwSV3%Dy@68IbE`AZQY4Ypr-1#^F+1RFR|6=DyGNY-A78$orr2Z{j0U()iM^H zaP2ALy$(`|;G3xxX4MyHn8If4oIVMCad+?I_s&|kyof^Xfy`I{#v@?t#c68oCi_B7 z^sP(AFkSDmhvvk$$YdoU{91;dG^CRH=UNFHz7LxMV|7l!O9$%{-47`gpKLJDOt{^} zW|gqh5y+adwU)8~TV49sA6P@&OLr$1ilApHdA`BX?R`W5-*Ri9ke0Vwy{@}RaDRSN z*RJhkZ?J^H3}t7|PJ0C!+7-I1f7VB)BZ4DI^wIKw)<*wBipg5fe*=wN`}Y08GH4W>2@%0`Bi{YrA5e92u9>=K@5cgf zpaf#O#o7i=xFWOq&!jhtG)hnJM}JCs?b8^FoILe)4*kAqv{~JUFN4-fHL1bS<5{M^ z+Ky#BYRlazS|K6u&;aiE{4*@~wQYxgfnU$AHOclaTL_&tc)Sl2XEjFqRh&>L>(6=7 z;;82OjbrWvlQ`zZ*^p6a8)>vbC!_#naWoKd!;_T4N2vbKYu0vW+U^rcm|05APcO2= z$&vN|0^#pm0BNu&PSQSd0d&~U!juu`QqdKEcmomT*JI0i(5pqk$7 zzw<;D;K8>g_Aso#{HGKIcZBP*6jYN1l=88lq2IzSf3kRRKa8k!1P2H;WZKTHbEtzT zKhwryHlJ^s-RV;FG}BmE{_MTM0C29|sXwE(KOdtQJ7fNB%+cZ)x&)e~XSHsNQqGlc zGhLOyaRAAYZC$O%pPSFd!hzYzv(;T%A_v%Sc;}Tc)X<5M0HiloU|2GS5fO|>{=`ok z$k<>6IgQ-!TL?sdds6B`smSpXC8}6mR9~@g=!3({n*CNyZBf-CaCS(fbum+2vn(#o zU`8n54IUuZHUE=R2h+e_8a(i&ijUuW@W)U=EkiA0WT7OpX+S^x8*+-O48c=Mv1wRR z7bp-LT6v>{oQ7d)51gh`Ew)v1BDTWl=GK|zu3fD7=hSF7T2VLnu$-;I7CTGBRJD;!|kn%IytcYbps z(5uXM3-(tf&F7X(`Xl11Z>3T{}4C6*SW+tND>%L$cyFbOA4ymDvjt;VkiI z^QTov=`l%?oM(;?#TjT>kFS>km~)jYd!@^OHS||}g5hqs0X$S+;xkQrFDHi)Ude?@ zu8{w#7=t$3H8Yzh$klwufWTE#YTH8K>+hpKmOuL|qJ_S5puY8@t?3~M4Sss^(Y_x8^fL3}Blq z$R-?CZ^TyMs{Cm6?zZrLPk}E)+Cm};erG@tH+U>I;RuBhEQr(#z_hU+zn!P_R&D^g(T?3`Z3s7v`PKZm3zgHr|8r9XPJ=RWsk$am!NvD-%PHk`L1 z_1llN3RsSLK5ZS)cuOWz75x)TS(SUf;yQ{5KJP!S3UF$nSoOYHq#j48PoW6#(AZHV zTJ?WQ+J7{T+6Ls!^@N!7MI(M8c76LuM6%@n@zkNqTj-&|c59h)LFA5;$qW@*+}}E+ z)x_W9i3*e_df^}9VTH4dYWGBDt?w?aaPb*;GP{Cdt|fnPAc0%svLM;kmQKoeY4DNr z70JfSpJwpCtWN)%1>ho#$CU?cB^8&*Op;T;? z1GngNh)_Axn+cYkLdt=6PFl-@Un9@rm%3mkJ905&*j-#86WX*KtNl~NHc1!sU3~^m zha-qjTqZFFoh-xQ zi?+`E(m2|=(m7n{UGMCM^a>mdR#$bVrjpNX`EqxyJ1@TU?W6|IC#u>#MoFElvOoq? zUmX$Aux5rjqQL&8I+iKOKps2;wxzU#J1osDEmPjH+S=}$e`M8gX{?+0L@Oi|$NF7j zjk`qC-En%JaF}(xBq<99$=S#y%$6gcB?*p@DeX)yv@(oan2J_1i_K_}>7M8mP(TL0 z{XhnAWbLsrsW8N+w_X4rtq{nKeteU2H3G9$a`YVC&WGHAMj8hjMyQ{osVD{U;N|bQ z_{WA?(5u6VcozUusAFY`wc4oTk7ckQj4X!XZ0&pcb?{OJpjGO<^Dej%ULRGqaO-(V zeX}^rh`;Aw7WBE3al;SqpxL7%u8= zUO3D~`5KzWy__2%8|1}_-{~@f(J8lzoEp!jx1OqbRXM9~u3)}Oa!{PP4Y`(6Lo(a< zel+~P;I%bTM%Wz{Cc;GCw)q|#$Y51kWCA%+(YR9RI-3xEGkABtFIFY2qxwqXYsSr1Hu%D4%{avLQDiSV4dB-!4fdJNr5p@ypsr zlxf?yGK~l3?I%26=T#R2C(Y=uD9!$O{}j)hYG4AGaeF0Xx`e`Fd#3=CBz=F!oX&wu zi$ZFefOegkz{f9jIajX(-Ifl;mXj8n+l~ZR&;eyk;&h0PoH!>Jk_p?uq|v-WWibEi zZBNqs#NriOhl~khIb{5s|3}wbMYSD2d%M5jP_%gQ(&Ad&tw@UncPmATdvStOoC3vy zm*QUB0u*R*cM0z9{${;r?X~wl=lpMSo3(N=`OeH|o(C1}dX6%`&O&a5*`-0N@uo<_A)<-MIV6^{ z0k9i%;*4B?uqh69CR_vw#kezeCFx)cQc{@RATPhV5cMP zk9V#v!wZC+M_;!}NXH!d5Bp`>Il8;fj`RH)W%v9$m%ieG zc$M`cUS%m9Q<*>iWBFPhM>->V*vx@G?R@o-nD-yZZcXW?-pTcBc&B;8iuaFZar2<% zaqyb+%4p^gIGH^AI=5HPf7b(o|JW92n?@^SvD0#(+YmziPRYc(l~OryVl~q5WLtQF zM_jL_;xgGWN#&P&5=ZrbhACcW2*HGaA_($$c;lytjip={#ylo$iOhFRqTjs=S*ya9 ze>bvoDEQ`Z3mXIEE60v}(6gTA4cz&%46ghIS3*42N}Q?tXav7<0@hoyz%j!NJjy<=mpb zwugsF@_KD>$STS>(fi2Hoz-cy7st7IJy+U=AbIE3zML0YS|zowG`x7t$dG@4DY3ro z~W;7YquDEupy;1<7FNU_%Jwi)i%pN{4V5gVxOF{<>>)cu;FRV(a?n2}N_whxf^Y zXadQFNjFj5@K8F?WkM$z4(gH+x`x`ylR=72r>`SkiE1O+d!pvdUYG?44RypY zlj!-wor|8aQNlaVIkViEX`SS59Vb1Z3?T`Ao{g7{+$dTU$j_Dv`eoZ`(g%+A_V?cR z&CAi!Q^E#ZL#RsOqFSzB_h)Vx7JX6>I`~B6U{69cR<8#ljf+j zh5qm42-q^J@j{1?`f7H`S9AHU^YJ|*9CIq+A_+v`b^Rsf4dD7*6XZkrXOU7;kYjL* zrk@NDlyEiKZU#X{5KtXgX0PIZpw=SM;(_^}T!lEK=3UwM`iDXcFd95$x&Bp)*AkUZ zai><7l`tVa|9`HW5&pC{{_`4{TPXv#2SIlgb^~&BoJ^zE$#1u{zji@0d}URM&DMp3 z7OpWO4UNK@hb7OCqbr8omQ0{p9B@X9!->9+ik1EhkokwL{?A`$9((OqOfm(`%iHZ5 ztW(IW2MMMvt^9NoZK(IZ@uL_jj2U=-M_YS;DK?&7Hi8q7hO|px!nwL(Z#DOa#aX^G z$3A)tIo0Ri1OZ7N8u+JC3~$?D^{<)5wL#ll{}_3ek=n%=BthPOf*J{$37=?SSwP~G zzr!?9|JJLGc{F^>TBK`*dc`h(E!f`bE)tpfJ9-bnyb#v{LZfbhSH|+@%Y!H9+B;i< zmTcq7l0T8R<`&($c>f*7ITAEher&3n2{a78T^z`B zAmTvt<|r1`|I{B*C)1&`UKKfEJoV zYUon}B8w^Ed|@=)zA)nX{Ap#o+3`gv&00j`RMRb3etdy%ghGC)Py6x>9td{0ubokE z?_<#9M61$i*qHpQFW>v^ORkga#zD~qrta)Erf$-Vg0FekE+=_s)vpz|rDT~=J~vJY zDiChy#@O^(;RGpW^R{8qZd##|+-kkrwvi})^`^zwnWLXo82Pm86jQLeo7>bCkC^g= zRca>$zv2%+3XpHgcY}k<q)>FJfoF!pU9-X)9sDSxX?odv7KBV0_!o3;Qy z3!B6!5m;32%;vT-|J4@&HihZ9|GsU1*LK9sCx@u#7ZUmXearBDOVhb}iTY&6oBUY( zb#p(}W$SDE1LX+iWghLbbO&^r#9^1~2FPy>7yL@9AG{zgFZtcALL|0(_5CB(LGE&4 z&K?#7yGVse3$AGb<^Y@~nZnG9I^@(<@_uFPNKP(w3vCJ}_a{1aGhziSkkV$QM7kSq zlUe)5A9>|mWe=TDR! zbc`60&CR++-z>)od;K2TL`fMF0{>Cwwft-DlVb=AImptWLhwLj=|mVFYGNt#VZ?(d ztw_;o7&I);r`5EyTvMwWe2#-V|FFaRk2iM8paf6GP99}Agyw;p6Zyd|SPCTA{FItP zy&+);!MDl?>ZWB@VvVO!j)${*H5(J>Nswj>$Fi+v?kYEq0d{#5K>xGjVBIupQRrkUO&9Xh}H(%v*bq zk4nf1aioyQSEu#={1oP%h)2^tXv3i~o-XtJ^?s=~4hH~xbxK?~#o)QGuXoD>Y z&%59Wi0=T|0|&c!g8r|aymd*e`YbGX%>hvYfEqC|IHbuP=aynhqqlRo!pUIJX6dTU ze(~gD+$Di5s9Nw>d{k60jr;?sl?5hh1z<1YE}#u!Z+@C@YLmRVU~f$2;)-G@sCM}x zVjr}H3TfLW-aLN#{DB1?*Z-Ypsc5!scRStS|321U0W-r1q+h(F`wfT-aOICK`KEQ@ zN4MOnL(bkLn@8;7EYE3M7~DDKZpy3K4U7tf_yLL-co*l?S6^!|?r@#{HhpJADQGHnByVpR&r#iyT-= zGKfCONZGMeMK*z8+$JI@9uyoiLfkVSDwEq)b8!c4fwftt31HHYaHpG_I_=GCJZI$* z0+2K>2l@UY&7|RR(#TbQoOyaP9UK3-piOWK4+ZDzW(@o3-x(7m8$a+Pi@p>To_gdi zy=cqjz{1e>hf@de@y^D-%%FGKJ9R9KQ)FxWf;%7N?4`^YkT2$N3-6!xNy%RJutRi< z)4RaQ{hrp6*Z-QK{m-Eq+=%VuJPd|+0dDo1@`zP^CcXXl)KE{ja~R-v`S+gmo`Z4S z-C_?oX=#|`#04~_PoM}@;L7ZmkG%4m>x4TyVk>OOl9tIC0c3YTE4m*2p=bSx87 z^BYj!GZ#CldzSU2;zj4baw4LKw~i0~T931WEu}J*!e|o-d#wo`i}XsxH7NjWTaq&T zU0I}Yk~OF)zASpPpcRP7{j6-Y<3%XU3+;kRoUO|?QIxA*-dUa-Q)wMt>*P8Kr~jMr zURW1qG}oNbeFOcw@!k!?+K%QHa!6MhYraDnYa)I2YkYgri(3uZih?Zuy?-}9a3pk) zFOItN!tyl9@LdIRM6Q78%bX_ocr=U-{{{&k3f#0JnNr@P!7#z4`C=*wsQ;)_uHL17 zkd*fCRE*n`>{x@lA2;n?3_Uw0!B5D1&SJR2gpyN&^t6FEE>@#fp9^B_ud{={~)x;H1Gt%JR4fF$KV_15g#+;kly&SBC$j9Z2+$~5+y|gD!vQ0+`a1)cszG&aej7sg=b2u zNgLHre)hwO=I==gX-c$Zdu%r(AT~241q0nmQsq(hP;lCHjdCtm_jC83>a!QwSfhD> zxYp5zxfDE+(C^oNq!@()`9aYN{tq8nFCR6#5k;KVH$dxa*^@}3E_%?XIBwQYf!xgu z2&FXiDGTGO!~eF#_xvJs-T+ItZf5PQQh_{QkZema|pDxa3hpa^mqS`XgSg^23a(Nrd+AbTJ`CVQv3>k zOiCfPLKmd)eXm|pZ1|unqHZyuR|PV;=A0R3)BGrSYAnN2z^Nm!VB1+79@%f!Z<;Up zzPIjd{>o4yuId1QFTd~qy5M_YIM29Ia5(Y;sQhvWcKCt}->D@7Ien3--%3sGUxpqd z^U7H)DQkrV9tez@@D8Vs@~uP}kiW}RaM ztKOJi<9TN;*ZNkg(H>g$cp;7$RW2Dhak9A9$yfi}=9;s3W)=N?=aKOKIHqc?uCVRP z(86qfH|>2^cdZO$ZV{V=;&q;~Et6f!LhxHq)pAP|SEv{DXp~{LQTwaj`E!H8a(5Xe z+?dqomq6cUR|PeV;mApv=sz``$sQi{iisDD4ibCuGiQr~;%NMb_)T=%M)K5f>W{~5 zCd%#wnJy&-v(?|it4m!I0%W$EXy0Gt-Rup%emho=y{g7jyD=^_eXQ^ITDfZ*SrOBG za5B(ysLkMpzeVTA9d^1I$hNgu80>W*e>m_W_rFxn&HhTfrsjlCHW!4dA!Yn9olY=m zdc*Gga*vQzeS%q(-br)!eM@;|iFN+LSOZ0&u%k2EUW?vOPLIE5Isq6TV%CWnd4X55 zSdfpgZjvKSSz==ML0o_-bP^3_NFN%E_#AyEHCCKqp=uJCySsbJTAon+W>=Y}>p;;F zhFk=_Y;pUOn9XO(wVB@znt)UzxO$VQ>MxgH$^H~l5!_q=X4~yG%W;vFg9p?&D6alu zvRTke&cX*)>6~c?PNRc$Ak~aqWVtyO611)_0Fi~fy9BCEAwF5_LAM-N4!C3&3}SRj zbt1@4Q?KgCxH9*=QZuDhP~~JrcKzeS@t$WRCRVr&`wK^wKr9C~>xdx3^>wF?R_|LS z;6`>%Y+fVwV+sF zzWQuS+~=y*_xyk&G$)e@`>cwM1^XhRY6ONHjFPNo{-TEze-_CtJeVW`1=nDe4ft zU;}&^=6x7Wd=SUyRUJC97l^;q_a9ezkbAllW$Mz-2J^7cx=Jad1{5v4?Ele0b*8nc z7`#=0l+n=x$*MJN^D{&9@5P6@hYuH}j{UA6Z^YP{z4E>Pr=c&3{oG0wt6oXH{peyu z@UIGX5O(+^7uzto82k3tm8Vy5BC2MTIXy%O51x3Q)z4r)L}(QPD^SIHdRafm4h|1+ zcruGU*&;%I6pbr9*HtYOj|Tl=I>E$1_Tx`Goxn1amyiVc8vd{x>EDC$ixmZA=kENF z7I8MuAa*Mr$HXeXr;AbGoZm$h-C4^zdIIU}T|oN}zf0)Vc)w28-V zk=a(qC6+)Yt4)QWOaruXkJ74kv=45T-l5k$`?k7UyuxIgN2xhQ6ZP)2<8KCKfQl2{p4m5 z2ft!`Ub_wvOy?e^Y(DC6@Yz`4GGn2aQ(wL1H;%QM0Vc}sTv6cZ59B{pm52nq;M%3i z#tAAA9*>7ge1)eBO`{Jj1yZj8Q!j{5Gj2|{AJ5rJ*&9??(#7Sf3XWiWe z2AjRpUQcs+$xxOgN}nz!^?yVqoV4)bsS#4KP|W28ju8}yK)rEFFi)V4l=SvDl{&qm zyZl=HOUvX13jljKwG9nGpR#N5S{FquVRm)xKgMeM*<-L-2jqm7ys^Y9!Q#i-oF%1AI;qusO!JTu);$1E25(Zu;br!ls@r{qOJkl zgh)D&#ep2-$sZHB+Z4CiF-O>-fp6fG~M zm{ZZQg+cz9D@q&~bIQ1I`as>#TVjki*cXx~Ee=X_h0gIf6`B9IS(8k!Lpe8*R5HSz z(2`{d%Fcxm&Udro?vY8{LL@6IAN7~VG%qk{eo%#Cugvswnea#3()F+OOCUt@9!%pxw3?mCAek9|8m|5| z&x&c1{)FQ>KqccHY4vR%!K*xp8<8JG-i;|gJPFNp{S3B+iQ|PQChVfB29WAxVtNY1 z0o?>^RER37m8wwXHsA2pA*nLWS)^=H04o*d$WW_lgcH&CKWx8_t=mPGIqyY8urS4$ z={r|e$Mw^&NpR{vkiwg$VX0n1lYh7aRIM`q%%y}mLB@GSEkFB%@R0&8jj}JJhd|yE z&w*WD&(rc2mx+vjE0DvqGj=%X(%Q_{NPq*UOC5%0#nB~%m~xeEwDLFo|Bai#h0IaiW0aDYiz+k=++_rnTHRAu5i z=k~x3NwrqjiMFZ>LEjVF7Bjyf5SE6Ac|OuBGOvQ}*`a%pOO~H*vNNEs$^k7BW3{@W zP-Fh2tQlaDSxKA5Bb@cyjteZ2Ho*`^oN1M|FKdt(kUI2k84|xsXV+sxF}#1(;k9`L zocuPZx7ywhB;al9u@uXx8VPXklZ*R%3d7JW7W0E)4>AUlD*+-=giVd#|_?g z+i7+?>%(T(^E}|RFsHHa@!`XSXDqed)$t!pe>H#%J+GTVUn+?CLWe6Se+tpHbz*k zXeUFlHC7QOA0CDotx|+mwoVCe9baw^rn%2AaAQ$K^e)&Ft3B5g8x@i02Z=~F0?uI_ z4%=>SR!i0NkDZcH;^8;LDhJ>l@yc+gUS{Ar5P#oZ5C(K2jVBMZ7r|UJ=wYW73My5yey%(PeZq49~+nf23Puz+HsIc z@HkUQb`Qfu7Jwq2Cu@KbCN$X$5}|yT&MEq}h|vilR1+_hwG#*@(Vj!(wg!Q)3VE7J zE`6@qM_hwEkM;1Wid=vCSAZ*JZshLW1>b7|6hH-j@aaUDU;QM*x(o$2Th_%9%rNu1 zqF{(|E{zb2RXu2Ok~DmeR2?9EeYd0Q^mKxdAbcG-x>A{2noDd{_X4SG0P6j(?0bB%-sG#eiQI|Mb&Vu-lV4u#jd$1v1@T?ow5q5aeI1+0Q0DX(t1FsOaAdkDq*Hss% zs%YYRXKMDV?5P)ha|};hZ9%TX;%QTNM2mdBZ3w;43XftLd+=9AnXj=9Q@=Ob%@o7; z^QCo{_G0K<1{Q1O!GNijBM{i_mpU#FffZeha+o)nBvTb?8{JnLKjagweCU9h}$~*WKS_vSe?|G ze=l7+PLg>Wul5uqzay1#L5YZE+svDF;75xAV8&0P`tv+p`|gLzUw!if2s2u37AQ6L&~0BWGsMhFA+P;yV6_Jc`P$cPhI zEcEISvOB$<>2nSR)nCO85dJw zLoS7rWph1MwuLXbMHvs?sT#enJZzN%4c->8ye&J#+>>L*r&o6l5}$MVB@Nf4P9!Bm z=eLod`4-&5XRBQYO%Avjv7d39yVvzQdI&zXzUxY!Nlr1&j4zLGULPx>I&T~e2yGrT z63PwcQPR#i@E^KHk;_(SYtrH8Cw{t}s;$X)sXVFo-h8j?EA0s-EQuVNaF`}E_-EC*bEZMvqaQL-tC@#OHH9;b@>q2*G{@?;3)YNC0e=(} zW5ahpvnO10ZX>}vVOykpB7~? z_Jyq|tf#mqT5)rD{oXGdZ@XMB#-?*_c`XnYaIViy#)8|TUk>#(Yo|(O5ZhhP0-q&! zF6p;0!+#apHKC!h54n1|1{1;hl|On0Y~7?q)xMM9;b0B^Mq33W&Yvm~i%+0|Bn?Aq zQGXhR4k~$p%FLVTC4R$Or|ZS$&3y)hprG$eYv>m~pxd<7@4XGulBLf*{0#k)4p(H` zKQtIZT-nPliK2gDE$6se@bVm<(qQSf{qR)BGHsidgOJCVyB$TX7_i1YFBMuyWe8C2 zLcJseOf2x)C#7fFn*3`O?Q4r9u3SX8XIK}{o2Kv2Y&tILQ6SeB#sVm_1N99?#e|Py zdm-~Fb<`LG^H~w#V|t0kRnGmg?XN=;Ayk%GRd>9Ji_)RWYE>|yNzk`+-=5wewv-lx^jmN?iUA_<%f!<>t@j}ZC-Gnr;UYH$ByzM zM5Akz^H{&{L6@`QioT~Z^B=;(!${XPkXH1CGyaDoJRgaNxi%H2zBq>~6;Zg2fdQ%Q zW78?kfZ4_Uwb)PxKd@Lak`WnmufN-J-{Lrt;AXHRDk267(ssyYO6uB^q*L#RKf6A+ zvS3OkH_Pr*m%sdpo6bl3O0w>HNYstWTsMP zRrbaTDAN4kk5z$T1N~6o_H+ANtPsJQ?;=KY@oIo#9h2-Ez*F!glN4$heiaW>U_qA5 zSR~JFO&nssOBhx!okP())&PPW{^3(e$gC2xF-kHe1`O=q6!Mg0m*gpHy#@BWN`{^I z^BY|a?%lhzXe(KkezO7*zJVcRs4p?^Vo~K)pm=0;?nN13rE7};J;@Bs0=^4gI8IgY zI9Z3gvL`x0wu!Y+*$6)!r16GO1RMAqZ;vBITDsM~tvGFb90y+>9@Vra{1T&_>+l zac@`jJ2xW93ndhEJ(XJs|IU8qFSi64lEg=jKX~CD8!P zZ*JiCJsar*4g@q?lyUjqp1*SR-^S=AZ=z{7|7)Vw?&S2mB70~<`e=2tC6Z~yA#;?= zjjr_k86!V$V@!~@VV?I8W$KELL;0f;|E1-NE!jJwRVXHiz3wXQz^AN!VMT&TBtyi) zU~k_j&@`!wRDC(I&`avpSBEIfN6#xthls%&lZzR1Gr|A*Vwb11PW9)siJ>9EiP3d0 zHgKZgp{q}K(cn*yAv!5aN|zyHZ@67nY8%omGUL2gp(AB8DFk<%%@?oPU#RFr4~%m2 zBifq=Q#{DQKT}Lrje`3rf{`={!e-Y|O$%0BVl_HV^6YegMMA*%AUmaQwF$)4BdFEB z%u`H?{2DEjx9AEZdIZ`n2qJ$=Y1>L!inFuMNvv{zd0vQgazBh07X!kt`QCW+uJkI69{a*E zK=X%$|ADjCwwH=JJri$l$Cj+UL1jvL`1UMHh?Ty`NYcR9yweS*k)U=9nUWQDF+9F& zThe52n-G#|=z3M3arh^tS58(KmMr9uZ1*rPNpr4MP69LlhP++K$mSHAy+|ap>=f?X zM~TTCEEra@(o`=dUbr)GO_HUtBSCO#`~tiOeITgV|2gzJlDS(UF_YXz1Olw@(#cRas54}A!>%1aaI z@`Tl_ykFbz-E?N_I<=wR=-T7{XahP+A~OY9uKa)pKoV6VrmK+)*dtGPrmZr z(hD&@!|ALLNj#ad>!|0$hFyi`A!m}G^w6Lp4921n(e--W<`!0 zno0mAzOG)>rsrF?%RtG$xsVK&~b*O0XgUg&K|AIsq0Z~q^8E}pr7gRN#92mSj zEszNyLehH9cJ7&y_mQrWxsJ3N_WxAE-eoiM<^1si1|Io6$yFW|;>na;ENFrQp?h5n zc=%2^O{;(pZuA9RPB&iWrE$%8#~_4|%LO=>$x{vF98*9e5<6?uzuL6zAIelDntQyO z;b?j{9)W&H$~-lN$v<6=5Eu*!RX28IQ;}quy3eJrL_aSuXkrDlR@==N)=T*gt7eER z2^sLe5#71yB(JDm-;{YJNJPc6ZLtr zm|OIIOUXu@yXPmj(g0-vPRf>%zs6+00+dh%nJBM{3YIxg+^&$*2e+d}SGJ2@vvVXz zs*Re-QBY{l{G@zyZqH9bv#!sK{@n-=>-b%^VBbhiDm(l!%{e?3-=kDJHmHN1F%zB^+pbZuDlDZ zk{H6|>Z(Q_H$M=0&x|F@?^;x`Am3I~{q@tyzir4{S^{@6%$q9^Og(LA2(Q0uO<^^O z??|oq#mDH&hQhOax!=-hwy9#4x|*yFglFlpC^@-ZMQn1O=08IT5nG;FWCT63c=6t zrB`C^MC!%S)$mhNJkVdVZ>JIq9Q$S{n&_Tki8>Nd8M}>uO#u~0f zPkp-|i#=$q^Lukr5x+=LIWX2);lhkJuzo3TVdk!E{<-rc>hT88tp18DbN6~4A6&m<lY$-b`UF|%2FhTQyzI$q*5d4 zTb%$u9r+qXV+*ORcT5re@}g8`Bm0HU-TUo*9QUyVEUwC@@=*kz`~EzE1Ez*L>tf+! z96<9&U_}-%>`Mgs)+_pS_sE#qmLhkY*64k85ZMVkrdI!@0`kQR{wlHiU#DgNpUI)) z5YidL!yICdd?J5*k7U0x>LYaBIkwS(wfN8wo^@bC0kjWLH%Je)_&-oRJS`x@a%r&{ z?M3G+5C2YDg(!+AyxjX_u4k#QBxjn|6$uYD*h)=*PQlQ2&$4#HwV&JZ zKq_T{nCFF{SnL_U|IySxk(TS`~fOyxt}Yg65_Nt`>Rxv%nk&@y=F9 zK7wGlt^y?i&x|72fDe>8mg6TdEc=C3d!Cb9;joa=p8YZu%n0ww^~Q>dLmW_Y!`e z-Cx0>m$v$KhqB4a41y(>`{XV}`i~+w29$pkRSvT5CQkjmnl8&k10H`s1*$;sL1B@-vwEhV+lTFr ze^fUu<&3C~6!0usk;kx>B*mu#syfl!`DPWEEIskpv$;v=z~*dyvvK~%qj|#eN;s0W z)_4Iz_3IoSV)W9qm3>LFh8?T9;$Klr8bCo zWUo*Sm&i6~SvPTrI7}l!M{tWF4&2=l;faqWU>({CzT)4i1^e{M;mNRN2c5pvEo$Gq z9RU(_ir-jfIj7FPSLEfU;6P#M^Uktkr8&Ivm|K~9M}(&G^1EN2P+APuIS?S#zuryA z9GK1e^TJ=xQ2&AfvicqHW69l~q#;2GoJqp-G+xaupEh4taB%qM{m1hYbwmv31m>Kp z$GzC$4?vI?;#DRfp+L1iqVAUgr0${!zW3F2L5;)t_0G`u30geVFB{C@rz&nH)|Xy5 z?1pqEkj<*f0kKgV7%8E>4wB?D-w$~h?Vm81Aq;OWICg5ESNUDgi%Y=}n>=m$RkdW_ zXim5Fg+;Tlz7W`dYa($yf+W{HkJ0l(mVqUDY4}K;3f8!B5hXQbL+R~I`5pV%Te+WP zMHFvSqvkweck5hz?i|s=?`V*t|L!lR9Nujh>qtN~jO6G4ay@ufmp%J*Nh*HB2ERwk z^eZh7YePiZHY02Z$|>j~G1n5WN&-cpFC; zY(ZTeyQMf{&+WBgH5Xb~ZX_{tSu#20j^Xn@;3}xACm;XPFKd3^qs;_!KpQpY2 z8Ia30mMJp;4Yq|7H=NRbZtK?eK~@UwC`P+lBLrq`tW#L~XWBa=M?U#;1NES8t6c2>Ih%-rv18YPvf$v4^@FoEze&FN zQvK?w<=eBEfagWuoeT;8QAQzG58y^h`@nzkKQ`i-s7H>0%h?K;58>C(k9_`N*IqSk zZM7>YoHucs^SVAeUe9$Pcztc5TDVrBSqVP%r`l57i44jjok{`=%%VGS8U&yrL)v|f z9@_j@^Y!_6r_|5|Zpizg&9GW7$`m#4QjEHE!NQo6~}v`*nn6R(=sSGB1hstVkxIj70 zFif*bKYT5|aic}!#)?@BPi-6oxqQRBr<iu3)vl3A7T%3vmx67K^EP5n^`?gh)bC=F{8d*4virT3a=bg6#h>FHy(vC z?Kat8anSXKoGhw#@4+w3N_W2|q`c#W9o@FN&)}dOxcq+QbT?6T+eFA8=HgC*VOXhF zDI+-WuX?h>U~{flVDoaA)t)2%WinAE>^qqnN5|5suvAV|{68T|UP-P)Q)?S0rPWVm z{6_gf5x*FfOi*+&k~cOeZ$t`!>S|sCqThviU0CUn&|R=NK>1U|8@6#pwRb)o zC*=5?raXuIlDQ0UmtVJVu$Rr(WIT6sbA+_a`>I?R6^d&?ADg)a#I|OM%3xM{zdgXf zFqD^(p2UJwH>8oRvw_%7BHD8&s#Zfhkbz^?+vBHyc0!^I@~3J-g0ngeap2h`(i^Fp z?ZB+un*Dzyl5o_`t|>B;#7K2CvJa0PSJvfhWf&#ZtH?dvky?ZB7*>#2`dG!xHVntn ztyo6j&YtIqSpqyugMv~#VT2+U3*XS*LG}|PJ`uQk!WMZay7HOEeNbEM!{6aX4I?}c z4;0S^L`&atz^rtU<${)N>z_sH^j_-WVSn4=eA9NwBL8geTAY2r7VEYZf6rG^!YeRk zW@g}Qug31DS1iYxG^ncPxcTA$*UUerX?CU2VWO$2gxI{oVXhN71A+YGz?T%zT$;oy zDC(?Sj(z;vtRAyFaR-ecN<9DD^n%GF1ljTZ%Lj2WHW{iq+Sv}#%J!mG#G(c~_eMNY zxubfS3Z6EA$}g^A^7{Dfhcq;sDG!9T9Qsz;suHs(jtB{XWi77gLuw6a)+s^`=DlMA zyp`UB)S3r+g1oybSRFa&S_5mDc1S;4K}2kBg<^WfT8X-}`zLS{To*D>cpX4N1Y^>p zl=x+g(RM$P@`b3&ar*l^9;-y}4yrZCq7$TR)#wTbS$WnA=7J`?&DE4p>m?|DLRQTA zJ?yD5o*Yt^_r(HD=6>*@NG{GIAY{WPs!DFj&5`o{x(zA);&Qj?=LP4$1TNB^Uk@G4 zm(0v;gBs&mhPV9o=kH>EY? zxFRkJw60r#d zd^1V|MEY8l(;QvO7Pa$QOl{m}C&BiYufCQ>8mKgA2T!huz4<+Vc=?6d@}++`p+<)d zbg4oM5Z-e%Yfeis^q;rV))Hb*@P+1Yq+<#wCGLe9*D5gqGUNkzJ4{1gkxrAeuz z9{QJ)NEw0l5?1H5=`(#A?o%2d3D-=q&!SJx1657jX3*9;A2mDdy794NZ)Z&9O9~1Z zkNgA}K3^7RZD93$2q^PSXAH6+EUq4QmfUZ3bs42Viu;L%1`O!MeJn0X{fppU)g9}m zynb-Uxtpg}t0Q*;%O%_T$odU+m(50>X;&GVwMtatHww2-pzn2;qOZ`tJ)@l$vQ-Gy zF&Wkj415vjko5U?e_7zmNxYL%Zv`r5%%U8c>|&K7H|HWoEo1;oePo zziq@jH4jP}Dl88LRMKS3x|9lJVC&1>^Jx@+7>C@C*9BNF!UgQWI+^{k3GAEjQ!}r+ zb9LZGZ%#PjmBw=d#RP5c?gStHdufP|esRuJ%Zc$#tYa z(8T_hgrqMa%n7{VKRRw*6xE36bIEamR$nZ3vr2jRmx3wbKuv#f~>fV$ojrQOjnvI+$OV z-_-Fx7Z_NNlNfKrH`S~j261NDemW{%AhB6F3B%OD8&ML^i(RoUU1a*+ape<0*?%lN zBU?M2C3e_(g!5!l?>cPDz|qu+e7oEPUrK;Ij4k`m`Lt=(o3w&w*UD=O7duU!&bK#j zQ&a~O>`xv2PYgEmRmC-N4VXJYYw0uKy*MHg-w#3|+XXL=Y>l1%+E?rfy`9{*k!58t z@L_!yBo+O!_7f>jpTn?LUEegcnEwYz?;LrJQ1=7Qn|JG$>|--o{caxHX;WT(Hs~)vnt}XjL|L-10hkZS*TDD{>`2U6Xb)VAmSA;sm zul+IG`?>q>zIn&z{`#pU%>T&(6?Xllu{3sce>f0%V+q71UUb4@Xo68sE-UExiM*6?ZS+hP~S7@>~HzdZOfOIOy%G14~I`Fy(j%a;BQfBzQ<}*J^`n=zB;al;5!v?;0sX=w2ad{@BJ3IqMCrhbO|J zrj1@`Z=d3Jw0;qNGC0<;$D?UZpic%Y$~=~i4vN)nmURoeszE9%Lidz_HN%sPs=-{e z>95VkL-cbxLPT$MSFME{h11I~7*Ke>GYHjMsD}R{mLw;{eMMYVT6Vq*t^Ix)ui>Q~ z**e#W2nurizS;`UjV6XDFtU+)JjZvk$t2gJB76WvE>Dz-MAphpoLEU zD4f(jX6EOW1v7pWD1E0jYU8eHr1g+a^P4ZZ?g!?AsMACso}2bHlyuRM$dLR4f*<$c zdFW(twMafQ17Fbf0L=}9?^QReWhj&o#RYg}Rn&d4NP4jn|N6Uc@x~=3sOI9n-Bei% zaHX2GC(tH`WPF@oB>Q9yzp^-Z`TEDcyg8U8ycg|cR;6gv5*3ai`-1$_?~>!aujYQ@ z^zp`rOtEME`|FU*jk`Cg$52WwiwLRH(Z^=^j(u0=Ng zU5gVyz@8?h)YU%25-jxVxzc*gnC3l($Uf84=G22*#Xh`9WMk+Q>h7r2Mu&CW(}i)~ zy^FMvHuc@(rVQH>2h#Kz4sedcadPH%)z!;}@W zQN5z1_LAlDk@AxuDojcfr!QBGGnYGTEIkVP3cdT5Ii|p*|?|GlMQQYXu z)iTjdQEp{r)ptCRaf6?aF_o~~x2^II3DOPQg%op{#F-QR2* zm*YM{bL6SroZFW;Pg7;r5l#AJk^cFYQe(I@rSnJV*)eMY=DeV8;%sxe-rnKM3{a>)T@xSG9gK ziA68mS_@V|4uJVzxxMOMXB6)J?GoCAOgACpN7Vij5<7AH;K%pg=__i++b84kLCru_ za_f%~FoxVw%iI#-VGM0BgP>vo;|n+0a6XsYPH^zaPL zZDpM&2CK+Qdu@z{LfI6ZkqRx}IJ<}bu5#+yUCH!1iDUpQwjAPn$m_De@(T4=> zGQY-~QVhXC;RZZkJAweIQ&Y2|3VHJIGp;*ZTZ2-bGGXKlg19TLuN7<^mMy^MejfdS zO_t^YEjG8TnBL+nZT4E=qIIFtt>b<$)f;423G{#=2j!pk7(h<~=8YRa-wH`WGRU-* z(K#+iaMi_ocTKU7;g8th>7fX|_sG9uKvVRqWe@0ku8#HFC7HOetfG53+82UWtjViCLB6%mIL~A zbbOrpK2&UdO;m}`lJS0yK2SE~L|+5l)sB4Ii4y7Pvo0`yWP7p zlZF>Tb>G4YYP3#byBPU4pRG%H9HmjWWv`B=v$s5-SnuKt4c?G0mFNc8tD1Oq7eB;^ zuOtbj3s@1(C-EhcDc~uN_Eu=_lmuMk?XW#POpa=b-q*8~+4*m2_85xXA8;~$hH;_E zPM3QpAbC1@X`i?%Zzgq4ytrG()EiIC^42)PfbdrtE`tS^G&rO~aFzkmO)Ze);+x%H zq}ok~udKMLc+LK?0V1>IrLq`1&-L8a)=2yNFtN26lim%;FEIV{#qSZy$OjzUCbi3EMMcAgF1Q<$)xD|EH$>JJ9ykDc{{q%;97^-Nbu zNLOLRD33{*?`XsL@B2HmwnY_BxJi?;4bHV%I$QjW+@ArGejC;}$5dQ92RX&hfSSw?OpB%+|+DS=QN!x<{MXzuq~_ zXX4a_%uU37=3|Y3sjmi1*#I`s@~?E9b3P$L;>FJhH!dd4N%9k3ki}Zo&arNV#=1wS zihzXZ-~cKiv_9~TjGitC_4!x;@Rd(cq-4VCpT@zEE__b5=U_X56v~wPGP&ybj=kpNGOPh_-38SaHU~~qx3v=@EORNq*l%br=A~!D>*WWn8Iy5L-J6;X*v*8CAsD)p z%E@o;hjaNi`I&6+ z4>&V><%g&oJ79eK12T!y_F|a+3xm>Q!xL2+ahwSAz_JmWLQX!Yq!y~c+l4aJs8@oI zS}MyB^Ab}h)^Z0cANjT@jzdP4=p{R-gy%KGq*V8cRGB%qx{#wB0bN_T32}dPH#z(> z!xSfTY6#L!T9dVMJ~2+c;~1Q+7DHw#$vOd!7UxRbrADKe9oOu9BT<;Orx&Zzn3beH zKyEPOnl@ll*eQg8+10G`ZhYCbqbx$Ys9p^h%>1un;H_ef(5d%GWRyh0X|M~Cm?o&U zPYfww7)OY%vTCk5@p|4LcJdfLGP@JS{v8z9mUB=H&!Wgdw`XuIt+pv*h|^|l8*=)r zH5yngX|Ld1KR?yp9d=Ib99dRba#&GK(YW)dC|Hctq7p%l`ZZ)#PKKya!jI<;--y8S z#eS*liHjU@KY(7+*v$=8Bi8Yi{G?CzF6ZHA&iCp|Ldz`D1dRLGkXNeeKS>Xd0RzILl>6benjc1&kE^qifnAqc+&pD? z;=8QV-}|!K0T->6RiaGiK|vQD%%e(~WO~3Tmp?|=7hIEGr2_ykE`(z9sI*~3jvNvG zl;jnkFi9hjq4>G7(Srk2Cr$f-*cY5Ns~}-yPKN?gZO`f5uydij$F2^|?uU}xVa6OrqX4KD$9qzD zP~1&eSpG3@R4?EL6Lw%eJ;l8~I#U?|J{rBb|1fYUS)%l*@dv}-ka^YdE;P*4$BVs%o%4Xl z0^g#9)%#_mPu_uZDRcFWjpd!^;QamIb9=lyb=C=cL!R7H!SeUg84EDe?OtET(Vp|q z+V^vKLc0@gvAh0I$ucHA7ZZ5}t43Pmbo!v4E(e~`nBRhW3;`EfO5T-D0piJ6HEoGl zSxxL3^;%$wCh9uOT1f_Tq;CT`ubUc}CI-?tg%y9ucraKxWBa(g?6K$;2UXs^vih(4qnHE z+>YwW{Rl%BJsfvi^=U0Ex=RyU^-|>MVv7KWkBH23w@{a_v*`Qco3P4OhcCR0BJs;m z%=A;ga4OH9X~b($;Uj{6yt3R4ZH+oUcJ{wUXr#TVG;t)a+3K!dW}o)T{=!owtu%5lS5eC*gCyn)SA4Dda*=xV-izyix%R&ZMdzPv z)L@7M^OzlZ+)??*l4mwSV(t1rUlyVpA&=RQ1`^_;HyiiIWOr?3hp9M#BHV8{mfLN6 z^=pCy0l*vgWB#%R3k%ULcrTw{;(!8u4>1t5-@yRBt5U#Lq|I*HW_xl2^vwC)&me}iRH>{`FG&3X z`QaPy%bP#mU@k{8G-z$y6h!a?Jwns7wJ<_=MG6C!Y^|O&kOGKlL0{i#_k>}Xl^ry||sxl#r zBSc2SUS4grl66vXa%arB`}^2!V zLqg~o{6j(lbxiIVj}K$(kpmQSSf2R&VX>){XZ%_F`9I2C^!v7S*nJL5^4=3d1vIYo zUIIDF9nL6sY~-)db7SPI4vMPu@g&5OsCi%e9{N0cWsm6$IZq-v_huny1QoXhc6H>K z-p)rkR!G2~TuKqGp6lC}-(G#xo#sZcILk_PtgIGOQYD@Sg7~)h2R^)Z42sG=EtP*& z(*D@(ybeBZ#xr2fI!;utDCNtQ8Sbdg>UOEHDbJE<&Rmx~yDWH#qt>mK(_Q4)I1w59 zTaD(AaQsP5UA4dfDNr9Kyo#K~1aed>M1f?A=dLmI@a7HO?M8IfF>h=qz<$GyB8@r2kw_PY8X$jYBRJoE#4L~}9MR4B_V)0NAB31>!LrG||8Xh-(> zQVhjiP8_9Tqj=FjM!>iDc1Mi{Hksn1_j!n@?vKQ6fOJq<&^!}7ov0QQN5RtR(n#!a zdN@%sgqrFo1yShN`=ouHq9}q;Vt+`K;FLBjn`X$`;^sA;?WUL}{u+#$o47D%g=U*e z7L~JjqLNYal5ZV6_ktv>1dA}z!6*d$mz)XJ-Ci=gJgMX z&Zkb@90kIbeC@%)-55n?1t1$}UoU=~`?xWrU8g;>l?;;aN({28s{?Pc`Tz%zD>0*{ z>9S;~#Ta>|sF+c*oo44Qon5yxx@EY3b(H3Acx$SoqQk%o z>K2lLv>Ha^ae4S$V^bT$m)zWbjVwkWC(JGmgAu0xY_ytma}ldl7H!p#m)+$6CY5Nv~sq%SrsHX)+FKPyO?8 z)#P7-YWudFYY!wXbFggGcHe^aL?LLM!`Cl_;wl5>( zX-;ihw7cx$CU3;azB~2L*!*DS`t9<$;b-|kZ-KpjCF(rl%?$QVMME5XevC)C*4L-3 zL=Ov6c!cLAbg+QedbaXFi>A7Ia-4iGv554-W;A7jV2mf8o+J z#`a8t*KB;Oe`9pvD@QC zN7`AE73SPH>Lp0aMz>r)1d2eKHBb&FLi!{~jU=@6N;}`-vC6$(cP_3Mf^oI*R|sur zXpbBwZ+U>9P=?=>BrGYhTgv%>{7B~5{}cCfb-HzxV~o_0Y)3rvm`r$Ek4JYm5I>hr ziu+0Pmm=KQM%%V^-?rL*oJFiE35#F@6rFK?+J>kHb(E_NfU}U3{_vnkAp%&Jv+2a)(P_iw_ z*Mol)#Spc^3xDeF=kV$HK<97st^nQ}7U(K#kv38JhV{|1#PtmlD_jIktkmU&2|@C2 zOqZx{Crbn8_4({m(%u1dH!s>UIlh~j*{4C=SVK1~BI`w)CBjxe51^Eb9UmdT8oY4)_$G4q zOgblYQn0)A>ClclQv~K=2gFn<5p;C9P#LL8p9TF}5UQMtS!iOp$-Y5%+&r5KYVY7!%+v3U?Ccjvu35V2p=hOAtl4GeU1h2Ky{#DX#hsPyl%d; zg&$3&14EWNTgt-P@QEBmE#b=Xsa445uJhqs6w&yg_JYS9*$5_ezH?egZ)WGEZ=8<; zfemHn)W)~XAQXwGZpbaM0Z61!-5K0w=hPvfSO{97;luef&&{$-FJWk6l%hh;z~%3b zJF=Ns_2K+$L)MqjgNLE^S<Q{QJ0upR4D*Sq_hX|HL1rub>X8IPwzq_c9V*DJ8efnHJ~d=Kd>?1CV# zVeUY?Z@F|v;1kJExW&P4+I^?zVvR%3U%Q0nwVw`xU!H@aGv#Xz|JgNX*p5ONe}wmR zzkZAu_JcbeKhICz3zx0()-A(_76GiIg2C&i0i=Ly9y5DrK{Ypyn{VpY_EFFp39wQ6 z3uP=c{1o<7d(7OiJ4a#$ab2+Z=SNlLP%Q(Er%8pY@0OBO9fezc z;!l_jy`_OF8tA}>FAGDEQ7JHLC6DK)8rTQJ%8zDegQZ-Fuou^fGEX?GRolQvtL!BV zZ!w%;5!g7Rn+@k{R|aFbTFyZ0a?oEMp1dQh^Mk=Y75PdF@7x7u9NGl@P@3#_M}*Zq zXHr+#Mx#?FM~{XL_`hxA?a(~E!fmx$Wzarag(%(Y+PP7Ba-%uAGhEYLR}BnP`&9=u z1H>2S`2H&*LB@U(y87z~C}Yx8AnmeTUAV>tg6l!9^FHs18FC`#Zc8>dbna zIzIj$>3h*-(b?X$C=<2sLum{#iJE_(Erv4bH{9~z{aQ)+v5(5xatRUc+zaaC#nQ0B&9ubM0bjcw35);j>Y^>P!$S^LOfm6zn{u=D{I*3B zWPP8@=joC)2)tqjvlJU6%!%yg&%B>{TMO}|s@DPN>04L6lz99m78MvcuypuOAw3%X z&g~4zPVVEM!JWaUlNskq$1!H+5pl%Lv;Dkw>Ky!A!{AHTx@x&qfy#fg05=bdj{SP9 z2u?4^Fd9qI;os};qe%fzUG+El&U(26#krPGS|pTm2A9AQIM>1*cU0-bU z)PgXo4Kgt;GS_5!{_7JZ%uVqa!?ULXKx4;G)aW%%`t>ez_8NJsvmj9d`@M8&J+X8rZww{CUM>URR0XowfdhO(SAww7p(rsYKmurwj)(dDg z8&OtgS_)Nc?^4Wvlzdj$mIkJe_>zBoOUP4gbiSQcryYUOAT0T7F-l%_hJ&nvbeu$4 zk>W_e4&P58F8=teL_~3RSpBrPt?Zkp_fi%mTM8{tN$soGJoBG6T>an~2e&zRdm$Xu zvl5=z_5PkzJL?2m)yl%mwIaWT_o8^-R$q=7F&>e$9iMe+c3D{QwlVWGDRUXe-CGy5 zI-cJ*f7;l&BqHb>RXi5DqM^xixbqmvCh6LOJg54Vu-to&|KrUxHyq{S_!2ENsQbF- z`Z|v8NnN4-$-G7S^6j3RTYaNA%q`3^i>vkhvV%XB#yyImNx4kx{!UxB5s^S+pzVrz zOCaoCBgLhA(!EV*9d4@>UpK?|=O^Kn2AU?{g&0|w^qmhAXU-uL*Y@*hhyqSl8*sWd zlo#a~rIjUz9r$gu$R-5ux3iMsCZbj6skNLeJz6O}ROxm35D6wRFje00B{%cea80Cy&SmL{aTJ9m3&4&Su2G#kQ#40lVyg_Z zJ#MZaHDQFJOlG(cN8q@l>#~$q*3Nln z0t{iy?I6ZnuYDG*(BR!dhU>_zMJf{9*6WhLtIO~Drt|L#k*Fyln>NW-iTr*8C4~JO zm*=EP>O^}zxEn`sx$%DIc5SMm27ml{pUWrAqE4B^oByHlse6xxaH|SzQH^!d^fg|d zj*hX}xHJHToB8#XB2u@UW(ojh{qj?Ll{Dv^Y~b7ZSG#=k!wG)AO{ojf_=^_ZW*v8H zT^57ECa~5`BY(@u?~%Tc`OkiOyEpk_@OAN}n}^Y}{tbkQ*J%SATIA2JLTR+{9U06Ae+NCP8dk=KhBJ~$X2sgge^-y?|)BI@C;E3M?h$gkjT7ya-&u;$hWJRM3(3Cb~;-4eY*PcF#w2<2%vRG-q zh&k}}$DkCY-+?TOmRcItcux_9WtAW6yAwl1|3)(P)0Scr>UgrE=&fATVq1Md)|}Y& zt6fuPO~Ljs*3Zbe<7*lKmieS;I&*vgtfBsFDf~#Wa;6C;r52O+8$DIHAwTQqBo9h} zhnrKGGpL^F-TuaBFb}T@OUksoHO$>GmcvNBV*iD;@%;Wjo5Zl*o{?8%p;dk2@IA*t zCQkmJIDx~8HBZl(Sg}d%-VG6aEu7!;0IOobM(vjOJ@6q)o3JF?`zrQ{-^`?NIR~1 zfgv6AO8P8>mT}#%@e!`&COJHhmaUermTuqR9pGpimQ*VgCy%dRqfeh~&NJjYa2VNZ zB&fr~E3$(q3z#*~{YmF!84_9S&(T;IcbTsLPH_B!Vtn#81?1QjAJx6>^JyN0wEHj? zS_Sac^ml%vk51_M?NCZ4k_fPr`-72$Y`9MP;XEzr$ZaHtzt^u z4(~=Z2e36(FMilw>FR{Hx8xMZ~2^VIvhX$ zm5H9}11JEde3n5>EdHC*RI|f9_=k=FfmgqL5}4Q z%L+??O=1Nvp%uoBB|az<1J_V6h4W(t5l`7e?*ksb_O$xj~CGm{5S!z?qxcnlbO0`sHb7S+|c%T*f z_TWd6i*wdKn=Ff9u3N0OI2E%L%3M|eJ`l68#57XtjN zla2E3R_?@dEDSW&o?;~i@`&?1^nQmr1M?GHL!yv#0a}1sLCwdBxf%q=+Cxlw2)5(7 zYH$8eTE;7(w;IgsV6}#z0*#=*izS3l=YKd zqtO3hiT;^H6=lSVPljpQ{Z(qp;vwp(!wOLEeB;xkaMMWw9;i^*$vmIuc@WaRPN`O{w=~>mz9Ydb2*+C*HSiH%W@`PzG-&tbO zxkY&+Pbh_uk8=0jN)NLkVn%T!UjjV&B~YLQ_Ux((@iPxbKL>jSqKznzS}=5)m?(hn z8V6l{B*&tS;kArg-*D(TMp4QXIjn5o{uVl>Xkx)36EFR2B7+K`&LRaWDohlCNia1w z7FRv>-3@w_aJSOWmguQV4XhhgVpf&FDT4AzgoxcQx6s#Y2#t#)-gQs*CI&+TsF*1F1v>Fy5XxStKVy(1mXut zKWENn0-kRIJx|w$$rgs6Bj2N3ZC$#B2K!eEt=C@$5!TgrsgeN{7a_%3){9;Fzbuf09O;!ah-@=3ocP30vSaL`s%9cE z!inbjtZgpEL>VB3B4-*epx~k!>EhDDV~~GmJ789s78V!wqm8%|19I6VxnaUGGMXW# z^#lhTLleeqI9$`%o~yJ1?4w~Xa}a|073Z`zYu$Ol%u&4`1&{X04*UbxNNpNJd3I{+ zqLrrN;{L$v#HD~VZ1Rt(Mh2GxQQ7)|8oHkbiU%B>F98CtxA(ZzJ36kf0ABjh?(gIu_XoKuXxJmAv`Cur zao(NO5A?SuLz(NUr*&SuE<*WXqin+sN31rY*7?ug2R2&V!@q>@IxYAGJf#c6k}_Gn z7ho;Pdq=t5K4mt!H}|-}hS~bj{eNbraJ4tP^Yb(fiQBY}_id^_Y%QlooH zbA>I3WA^t9RE;hVg0tuw2Fk?~<<>LJc0JKo9n7mM`k0&hG1W%w`ZF|N>X$Lh)OYog zRmy!&<2PEplPVj3;PClUs_dk(-65p;&(GrIsFNRy z>1Uh1#IIWc0BcBLJ3^FTm7eee)T4B&beksY-F9hCSdR)S`4OzG%DQ0b;Ri2B00gyi zU|#*2gd$?N6kK;9Kz^a5LQTm{wm67(=o*45Gh~Zcib@bdo z=GeV!yRuxe`tiMPA-8@Sm+VUArMkdRqPomKyGxS3Wwsj3V*i^`A@6#F zdFra z^kxVTk@qe(UosYyrS3myEafoc-5<=0zY^JakI)S2!4w{^EFoN8hl;1!?y?;3<-_XN zTW#hZC*DxwS4ri(7J#6k=|_l5FLCi}86Vei9%icJj5<9V`z>fy2r8&h-uqb^z;XTL zZSak2nk1v^_DUR&MkXCp9BtKQz$Hf9#j@3E}GkGrolxAHF@1fi`I zxs1LoXd~fuowb;VRWJ=w0~X#15DzSV=_3Mlbhn`2Cq69#qzW~DZYEWOkWdiy`HmZY3`ED^yJ@`L+PxUeTU#%(n6s$<2@AH zk&Mu@3Cq|+`0+QM&`M?mwEo~1DqhVWFX~^ng%!n*#3D zk3NJ+h{M$I?b^18gk$MF=c4<%p>jtQ|@v=E72JN?7e zjssq=`;6!?T{#xhULM)7tPh9;Zi;b0n)jcf8Vet^RRkZb}B>WaXp+CxpqrinjP%QeB~+wEO;qWeg9# zbwvU6WDvpNR*BzFxm~Q*rwNORp$RVsi$$G;vdLTR(P4V_z8s>&7Zj_> zn`Ra}hAfJ^pm`SJ<41^v*aGUMG|VNWALw%FIXz&v6Ky>iZQIGcG-OGVhnC==AWga!=kU@qJVa5mG3gx~qh zOm+-Gd@t&YhVHpJ^DO6TJ->P4?<9sLB;Q}5oOqx?Vq5qW6<|*A-1LbPr85gm$N7w% zI22c=MT_%uwsu3r18i-Vmo3}COWUTmU9B{RNCF}=(@}|L!%LIa4RQ@y+urlgj`iak?XpRB<{3C5>sma+(QKM{t)3Syu4rX z0ULE6w<%}9q8>O*KO!pL&-LQCs31+kk82uXkD^c48}3gJilRvwUQPF>gU^7m@Iz@w zQSzKiMhEhqXOFqR<(kfHMi;xt2kiBm;kBdnrw6N+(`ht=4P}b>^rZ405w`y05#Ab< zK#vZBUPFsGb|nIAWSRPpCR^b6^(0|)GJsV~y2oohSHSa^wh$kh$xtuN!8GUr%x{pb_O{4K$DmV98iNHFvFpNKiYtID9}A7YB;FdI3L` zuRUja?jx0JMFVCuHiev2ym$($`Qcran>FhF{_L(_LRW$?Iww@mc@7KX$w_z6P371D z;y}?)AU%N3N4e=m_ZPa5pe~3(aVZgW)`1s5#nyc9%_fDBJbadHbIRS+U-D?~XBWc@ zv>x_rQ=ED2O6WFZ*p)xmQB>vjaBCD_;rZIzSm5Enk;>E!D+(7@4;ma0929hi&t1p& zklj5W#@tPHiJsBj==x2WYZ{LW-lo8Xp|=VeiO2+P>b@<81S{RiO{IRtYm$0I&-|}E z$s=#Zh69E}Pkl@Gc@#&X6@zHQ%I=mY%msD2WM8&cD7bn<8v*b5FuLB+x*CS_sQ zGOC6C_X`hGEU)8xbD!?pavv`{8sXWGT~BwHOKZ0&!yEq$XP@Gduz?$WImBZwPDGkO zF8}Y!h8=I3MK6i=i&kU#2(Ua|u6$AFOg>$fejMWmaSPz~j-o@F%vb>C>hTB!A&E@ncG<8{e z{y=3`IuiDM@v}qlmmS9|8|7>6Ruc>NU&8LlXVO`f&dB>&SVd7B|K3z_AmZ;9Fpb2V zvmkFb-Kew$`>bj!j+RB-dG3~tk+YD13>aV}-XlQoNW?A_E^@xTIR z4&HvUgY6lcXSehk&0&!6sgEc;{V=D2f*V~z>?~?4MnV=YpxSlmhy?aq9m($IN2gr2Sy^la{Bu|A z$oPmgWoCP0co2b)bU9Atc~Ud!3Wr)oA7P1T-U~A(S#`nQy|dqlfjR?#G{6qWsKO8@ z>2GUuMZ3otofvd#O$mdDeIqu=t-Qw`F5yPfj1PJJi4%(Q_D`K$mmbK-n7F8A{Wzz~ zdtTV@QEs7$Ewp~tb#}0^K+XEozMc-wJeeeWAo&rGPVa1miB}CHWG_m^g*(A`P$b^B zh5xQheUHC@gp@j5ma&6vZX-H7+g^7nd!csm%<|KvvV(_*{hCUwvba#}t9@3svYJ;+ zVxD=TQD?L8s^vlCSHR9^N$b~!z)GB#t+uQY_o$A~fV)8clT|M|iJ^^ZutgrV$@eC7 zh9Pg4a@V?!6%`j*n#faXzq8u=>8G15doRfxze{oYlvJ5I)e$DV_HMk+-LDyMn#63e z*dwPu(5yX>eUVq!I{CFZl6y$+0IDy&@+w4WzEJ6Ty!17{a#2nD=A!t?{TL0xe^VLi zCsJ%GKid5LBu1bf(P)gGfJIw4b0fBk2A?l`eX<^%x|U=ZFfXjEzqvMEW&e4~O`aQ) zibB@}vEtWK7O)J$M23`6Dr`o8aUkza-&mI0;M(;2$xGGbqrk^>y}9%5bZUG)qh!2- zo937wopmAiWPvM@u{?;jPcHU&*DQMOyjt$>ygW1yJ6tTfbdz4*JjZKYsS&-3Y9x0C zyzNC!-C(X6U)nOzCia$-zZiv5;k?geMf!U5*;H~(%O80nsBb=_xyvoTLUkruAeT;1 z%7nMU7D8;{{0Y(Xw1_}#ApAU7h@KRw5T*B8mk{2g^}`9%Ej2}jfENk%t{;weQ@V@| ztnzrN>=|$_O4h1GJ^y(*BX6&X?i1%6GaQ~**ejgF>QF*R%*B2%1~TVP$8ktg^aXdFOjT^%qIx? z9_z!39^e>v{F=a?q;qmtqo1+8E1+3k)W%I!07ezOWxhI2Ir<3oMk$XS#OzZk0dCXT zT#>Zd_3*Nr4#0o`0RDn=P!Do@-c4l?()_f~Lz{<1tccn)fxaljp4rm5C@=;G# zShzYx{_nB+Kjy5c8rHvD#Bn9KdqCasQKbbp{z*b1qGzAdn)Ag8N#ORgPXWt-HTPe5 z_4EJW_Hg_L$Z4AEU_fH|TN`XWP{_OWA~p}@LFSbVqw)`Y2}H*ge6pjTq^4uId;j7y zB0Bin&jifx8PB6hI>^|9ERU}I8INnuY~qizF_!~5vOS#@wc&E$`fnC(21QX}#<9NZ zG7*-`fp|bc$vy7a($A>767KM)ii+)zM0^6O9j7Q#>Ywj^{pe_7Q=3p{hPYMEfHO5H zkswuk1J=&CaN6dioEKPvb8jE@MJ1WDRybahhGsx_8OuD}>`TJw{OnXy^GCRW@8^1W z%#g#OfoX$@9cSBR)2m!mzq5ef=3ud_lx0`NeT98^weB$g%T)s3X(XjD`{pIV#k*zM zSI%Mk60LIgW>zH4PE|5x9Vjp2im=JCz+074OJ+pu>chKo!_aoSh;S{2y?ASmIA zV3NR$vmvt{|0|=cR0whDMCl&?h8^{U+x_gxtd}A^rhpN)Crkh7xaS+3I#4YVQ1H?1 zBrra{p%pt>e4RN0sIH#T+qV<^#V@?|Yl|z73*eLOEk?D4SK7q3%Lhh$xqJ9F@pI96 z*}5!d_s3&4dglT4k^7{9$lO-uk=SNApdMA_$#Ja^HXf6CX>@lgD_11To3j zfHP;!O{9|?BwZ4VCcXV{E$$ts%{1r2e!^IH5^i^qq+;V4s_v5ymE6r@Vp0mdCb@at z$MBzghY0V%fZTAXS={4_CgN+Y%HWmG&Q){6ZoIo|E`RLMn|a$w?_Ic&!U71TG|s@m zEz?k#1Kq#c!aB^sa+QU(%0^zxDPiba4oM>q3*$3E7ld|ctxF7Eu;Pwka{OuV@b14^ z02%Ki(x!efPhaGXJQ*AzWMaCkAHPq0SXI7gp2syaMwsgEVB#XndbFq!cuP0gapN8Z zcuHI49JjMxpp{>CFwC(vtq;GUSR_Fo^8U+*ok>%3xk+;VtMai1DW)2LQW-bQj)nh< z!;s|=YFy*^fM21OH->dZzCTX&lCNRP@3LQWZR6>1!();ei(=-KmU-Y|LtW{1{rKFK5-aMvsnbo5xvS%w-#9sGz?d}HeC4#s z#@!LvQ(uUb3SQABXt*}dZW6W)-ZT0@y4&6v_D-fBu`4XHHAp-gX2b2II&R&C*2%9` z5eIuTtNiY#%9%+xx!j@UbCI^-Cw4>)47G_4=~Z9W+WkUsSQ|!#TIZba)ob6oQ%9%< zItc^jcvzOMPg>dIH$w-X57!$PwP}0}e;?6+sk#hry0G(_-F^Ia15(U3KW?Q$OKpE{DO zXo*;}0aJLxvZyX$^80~BOCA<19=FPC^}AG%zLAH(1@lSCt~Y%9_d6dEzPvs&lEuLp z#YM(@-`Phu$+?Mx<2LFGbiiD=%uRFSx_ta@(of;b1}?kdw28jYartgU4he5#OBIt+ zzm>`jmCwtC=^$@wHtU5W4q-}B?#2n>=gU5Xv&P&)O^yTD9F-b4S^n?E_J2&^|5|+? zsg)DSt$1Fgz3zl>f z4#_ObX-WLjuTL7a%d||I`LjZ2+nC8RMW}K_wJ6QI+Q+XR%W;0l6-_X)&$&Ie4(wXS zNODC@^9gVe@)Xp>p~$xs9jq=dzpNGBpHKPiA#bH6ZBuCf*-AngUa;!6Z@$8aD+oU- zH?-swb2pf<@aYIdQi-C1i!)h@(J1WnrvOHAXxsdSi*Fs%b=ozyr)s{}?@a@ab2MCP zf3`{4F|D$yEWob!NhpCcGe=@fyIWg77g5z;hM@+jV6|SpV%J#^_nrofmIsR7-9Qmv z2Ym)e_vHP0`|A#%mrrGWq$&CQB=4oOcf7=?d6f(m_FiQM;-p@Ryk7KoW5iGfQ7hth zR3XNX_M%JHqQ^<9Bcr=QjSBmk!CDLNhx~DYj&Y~F8QTTo;&*x$mq{?qx3c3NOi+(;6VJ$K)PI?H#`t_x*oj2O=CzBGV|SR;D* zav4%CtEqovB5nwJYpDa4-*0>a6ImCSNpzxn^-zy(5Clnk<~6c;5?X#FjIfo9`ucNy znJs-#yV3T(OkjGT=F)Qcs0C>gHPj+69Zfb@rZIBtgTsEp?YpSOCCbjQ>Wztmfcto9 zDftGTgOw6afwRp)KMi=K-8aUqT;hZT?UdITJtf%qf{NXHIf%1`HO@mUG)y`mpT#DI zMeyX~>ol@$1KJ;}+u}K9C1vj{LmnAEacw2LwT`X)B?F@tHomiS!rDJw<)RpSa4Yxq zyZz_*+s-QBYv)GNbNu%QLK;5Q`6kBRN+_w?8*x-V)6N>0CAZot-vTmPGH_8oZwP8w}ql<=ja!-3k$N@mB}zG?yD7yuX@Q-zbVpG+X$88Jo~26cSKTVtm5WAy$Iy0N zn|R-bwh-OwVBz@NMI&#r54=nmESH`F%6<`jgbGz{;#{lR5Nuc+v^p8MEpJ}x2;#VH zVvn8k(EjGgL9?53YBDgjAcR7Hqe7Sw4SyK91D}6TR=pPt3ZBu@*a; z&TpCampUmZrJ?R*S&Js2dOKGdF|5_p?zRPBXDI(KzRvn9%JA*>&oFc=-57L>bc@p6 z%_tz<-2*5Vp>%h5H_RXsN(u}eqeu>2Lk%3x4=3KU&i7sG`4jGSJ@>t@&)&Ooa|J@W zWnP{`5`Jk81ZV+s5oSgJ$EEUjwvt|jo*g=>I+XLQc=d1hMcTPwi>*J9oCWL45ZP~c z4ffO*uGXybj};=QD0P4~xsuID8Ur%YXYZSqxOo%j!9>TwfmwTtRd@?S($=(%D?Cn& zA{Spk4Ijj(dC(z7T8(cxhg^ze)@rcw_8p31n^GNJ8$NgLuU|onot%imXaNsc5rWgf z+p)vOkj(e)`Bmo$3-c}ge^n6v%bZB)m7m28h_Vpl{%y)iT?>9)H9a)ll6;)-rS%E9eXg^?UyXI?i$`a*BFJLLCv>-YYQ)(`u!rpl2v>%L|ZF#l~KB_I~noARb=uvI@j+Ed@H*;nYbuls9Bf zO{w%gmGt*){UaD9qE3-Yxn<|DaZdAr^KZ;HTt*x_0(x{{%r*}%h*6UYbp4Eho3P2Vd*-#?Mec8M2fVCb2fT7JvQ!7{>A z3x%{I;lR_zBMLMD6{Z3k@c(5>c!` z3S*X?UUY`F>U%dD+245}HRiK?dq0Q#)rabowS%sl$ZGY?GoKFySKxJ*itMz^bjw|_ z$nNd0XjMDJ|D^YKp~|Yov*LWNil?CcJUBSC8`6{>%r)rne6gv&@h-T9ry|!rj*6AI zuEMumD-kMJy)NBJM0?_$k(p#-GI>%I%-eiMbI&{E&F=iFKpc#4xr}V8nvdU#DhCCQ zQoX|RZ56R*3BAedh@* z3u}vqXI9oIv(9#hh*z9~GCm8TRb$`i$Dk0PuFpIKvmz2I_dtGxKD`%vXt zXf6C3ROo8Cm&ok6iMf%zuOoZ(-I;0pD?J6_Qeb>HigaJzE))4MCYAD6RnD}_xW%7_ z#GDM81cmReHdYg&fiJv5?7Z(-fnWNZ z*MnzNFPjkB8j`)@-ER@!jk*!zhItq4G)Mfo?Vl3=7DN?cmTT5rH#>jTAG;!tT(awD zZin-(I_L63&$)vwd}U^{QS5#_yR?3Y=h`ebC0R8`XKO=+R^>VCKC*#V1?IPxo6B2` z9C{>sl6xobq(3q{>z4iD(c~UryiYD%xHz@k&)!doSzp)Vf+P59Q8P_zV79S(_ z)9JATGsI|2Jj!G`3M)pu`^;Z`F}MP@3B-FnRHmAle(8DDz9OJ?RA_m8|7ZAQLwBKW zsRqFX<|>|JNC73&wWJ9{22va|ut)79;{wYD_fn*7$A6yU83^kPlU zm`<@ts`+rmp!|fK@UW6TgQ#q?Xyv|W4O8O+P$}?zuGVuA*=%ZjuJcbvFi^kl7E!4Q z>T<1&S6L5swo$Buw3}Np({1q)j=ekFV%0m2-Uh}2pQ6R-KgIGHQX6nki#-;eO++hZ znW^tZ)_h1T^O2;^uzgFDA(eqc=re5HjV6fo7>opq6^53c^eIezM!RU_J z-I&!Ll2nh6e|>J}5NU4~sC=g#xgByJ=S6LD4fRgjLDN)Gb8W$wXKVY2@$h+9*|4S9 zav`4njgILJ^_uP~IA{mpA-7v0+i7?1R_Z2fF2I%YE|a&&FVASD)q;Pm&l5rx0aXH6 z@FrqQmhxD1bIHDnFBuYZs*i&k@))OXQaV`186zotnnB{dY}&V2=hBiED%(06w4IF! zsTQSilc%4~+N+4{JD%DtCp z+n<~&F|P%1;KHafw8zzlvtvmh>w!nkYXp^Tr)KsN9iSnD#DykOcVPYH?iR0`IqC0} zq_5V?(V9Jhdfu+p|IOce)vI8$^j1>`tC545OHj?Zv13{%6SM4GJ9RSCSb$k`_gUdIW!lZ3(pdc$UTET_w{u)0!&Ql9I4ILZ~eM5939p~lo^&(XOqQox=u+gkSu2J4qI zs4*X3^bOw}pUt|c2p_ze+F1?T9Hk=_|BqsF6wa*(Z(jKPstHa=vUR;XY9(SCR>~0` zSbsFz+xG?OGs7iDq#w=+1hj;vf@VO#YRE;HGhm_B2yimSbLwDM{>!5$GSO;5cV3R? z`V?A+AgZMUSF*XgH)u+Rt>6E}woN2J9r}Qazd$!(;nGwdOs^IylC2 z@rz^+NZIk|^)s|9)bA^0|4O+QhIx=d7a5N%h);d(%oME_<|hkgu4R>w^-*Ec{e+nL zAMgjzLLeK(d;coEayA5L*IV=@!ABx21kpoHq(y(14C)4r)C+Tz;0k`W+yQ|D%tALZxPAP%+6a9v%__Uyc$^g}!{>5=2hmKx zko=3aYY+e0#W2jmU^pury|wlsgX0%I^sn*L?4UF&hN_39mLqGnWDe<|jK~QQNg@E< z{G;CXzKwX{Os=(2^Ym0vA8z()ce z>nZZ;Qgp&>elymV=rhTz<}w`|TwJ?&^M@fS46D>hJQEcoiQArzQYavgP{zwCloi^i z0hbn`#pm%w2A1aYAOAAaj$Fx8V~tu)!@c!zDE`^ZxEBB2U8>?aT6+0&EWtLtH9vof z3iSjsGE2`eSP^)Q?9JX+mJ2IGrrO2+eoMTevd3rFI#Vp6*m$%U>+4x70_=D0a|0G1 zY86}8EO^(1WWtK6v}E5lr;Om6uFdo0$OQhDG4>cJpXvL??@_PE*eT-_jVh-&daw@mrUP1H|gbom&tn}JcLCtK~M_$*G&4+idT*BKoPFNQb ziy{f?UK4%5cNw+FIY*x!%m?0Xh&w2V@Rjw0R3jU!QkheYu%|%2dkZhy?LYv-8z+oC zv03w?@Oj696Q9@ADGSS@kTq_PJR}LJtkyZ!&Y1QGqaR$Sp(IZVqy?%DP*TE;aJ*R> zi@Mca|9IoV#|C<}Dd*B%i*#`poR$JqRig>scb7{9@JEhK24po~(WO z#vk!H*|w+G9IFzK_L`Klj3|)k$s+R`WF13j)h-Rozm+#yxkn%EISrpr0npVf7BCPt z#oovBpRc;^eOSi^H>Gkp*NJyQ%Lofpyyo8Ok=)SF!w)oMeoX&5%^pbk^50E>JUaK? z{kvy<`xjU5LiK^g-^P7~EO}*NXA87Ag3V5_z-7K$y+gE^-W?%!wpzrC<%J?2UOT0T z1oq5rP#tMT(^o^eI_Z6TFFmt#bAXn7L$W?ViiGwrQ~w{bX*N4g%AT^Y2>giexbU)% zRY07uL@JmG;i;0z$=rkc5_Xcc~1WzWN7x|)EKj(zSz$vEeSR>(U zUYkARmPkNQUEmqujq`jwDeS}me(s>nrH<3$hhs#BP_mOt@ltlO1R=j=oliSiCeY78 zNaQ)}IYo@K2Y$B@`aFGg+>?|jVAZ|f6L{OT_z;kNc%f9>{s>VEs*~TVZuUj=Thw>h znZo@%lbfm}>-g^|l?C`sC+!j^JFW^-d(GNIc7y7R{>T(rRcX{cz36=0)>SlEz&aku zT&5?vlwUhfUoiy;^ICUw?# z24KXbuRfD$z_ zrXFJRpFvy^y1{szdVt(s4lJw&LdfL|f@8q|q&^sVQCJF?;CXkd;cWZl53oew8rA%{ zaRcE8RzI&ww}BVXC*nSqG;46uTT5K{jD=IN*IAK-G;$H{CIkewvFP#HyZxZ>u7Pz= z^gK;`XDux3ap$-%OV~vo16TL{v+_IJSS z`+xirf3WZ!TZ$5&I6(B3n!1cfiPjx}2TQB}5nj>q7U~JMzYKdN`)_L>YQ}N%B6i@) zA#X8|FMZKb4>6O~@?lnFnmbi{JwS76P;B8qB+ubS3S&V6753Mmrzh8~P4T2R%~l=$ zIp)@S{m&6?u(q*&lDhn7tOw;&<3K{M8y@8O51Jzp`v?%^4r66ekRet(|Kp>3rp_Kc z8>BaKyz6s^)oXO;)GaB!a^UB0jk*|K*1km+?ia?pNiey6Ve=wf!Y$ef7?rKxIhEaZ z9k|*DdFU;dFzN2Liq_{8!tY`4;>P6hAlCxc0#`k^@FQRUw(zoMW$!w@*Wjd|q`eMl z__t$R7mli^?TVTBYqQLQO6r^R^DJ=r*5IxjC#xySsV0ClVCgJsYHe7?nS6QIbS-@r zbCeEq{>YK-O^U$d#9|KJvOV>HBw=;j?3%%_!w=1NJQL?5LZO520AMCsFY1|?8vf2@ zVl%AyiEC^|ma=6k6}(zzO4c0+IG5z_G;X1*n$)T`hX{P7epLxhq|My*#SxFF9-%O3 zn`cS*Jdfb^Qio#+88)SaF5AwKC|H+D&q@reNi4^ItFR8O1f)}6mI?I}N z*9CS~?Z6O>C6FfC#(qT%_(8uOqyqC6`jl{5!uM60CCzcpcG4SRPjb;@<{i0dACj@> z7DHk;BM*3~R&I$%Pl}hE4-4W9fw_}BxNxw@dCZ*eG9w9olh7s zA(9^5H}xO)XvEYPObyUmJJ==5=Iv<|DkEe?(eGzzE1LZ-bBXe9uK^q?#VYCRaytP! z^qrk|@fJDkso`0KP4fRwdcjh)qG>#%t~nXr}7O3|%yRtr6xfyNnD zGkwl`ef$gYWeV1U$P!^#UVyiZDASJ)iL8hKt%z_x;sPHYAg0kI4^9ZFyv8Q8;1CPH zZY6#2Fk~0 zYK5G}!a_X8tW0JH>2YL*DoC#|{oi!}@efMepbD%DI}|R>j(u-FE%# zT*1cJ>Ok)oBW{~Y08A8pnhOT~ORX&i!fp!|4P<7!Wk6=8_$qZJ&Pt#V^+R_X8$+lm zt4{nk%A_yT2Zj~f67qH3J8D!_@A@Yfld;k$w(MpFQ^00-i%ft`TivTwX!5dPcN!J)$xE*Uy2)8zuP2V%4zp!jEeg72W%l@Y2W)jk6|#yllF zn40lN%FTiFkhVA>CjTR0G1eu*C?(bRCsM!{qe!*w11{pYJ{D*;?|8Y%CpXQ$p8zfH z9{pTUOmV@&()>$1H|?I?XH7}%Ab2}& zPwMkZO+LQsKW@g0DI%mwJLvmJ>`P*5yv|roDTXtAW87RD3;)XCOuj1i?8L3sGT)(E8jaZTCY1DtHA+cazcagqgrXQhD zG{@rTsup_jzgd9G;J*t$%*|_y|H*mKFKUO~6uSACi-Zg1wa$%$dXdu{>7#bzv0#^&DOjNA_btsTbkxxT9* zmun%D8to2p8CECfzncm-QHxGzyt?&!H^d~fq-HiWr8qOH0g zYDmX?&yx6*k3Z+I=1L6+$6a2#0&{)-euA4$(waBN&pF>u_c6Eg8>qumumH*lU!@t) zvq0LHGg&yGV5U-7BKytsVe4`^Ij7zUyw_oSQ_?$IuBH0rKC?njkS|L@iq@iBFdfZ7 z@}em?9K2Tzh(1T$%o-7t`SxS?4VtYrv$tz?pg(l@Al5aN$Xr0vSuK6Ab@n0hxr#T- zH#zxB?|~JMOXAttNE~kbQNdfk50=)-zX=?U>n*wohQ^ozORH8E1klBW@@qn*B#`!eqNC+fi)-p$Lu440 z)7g$<$#$%rqVLBPD?U_f-<9xeq?~A6LoFTj9HOrS@7#M8?ykC`h#bCSjwgmRzG%P5t;C2d@nz zXanaSE|VxE)C!ut5rD+zPD}R$IrY)LyYpa@L^St`M^VP}!_6nob)e3{!Ii;$i2WA5 z7YF%G2z%=3f%j}&e#O`;I+9z4APd&R&v~i>Gk-sF`uJE-!ptl7F7!m8t)M2U;K=XF z9q0O7xin|Xb`eN6u88CW1R1N~FH4Y|Ox1Jhi9)SEH(khxGvB$y5ByZ|T%SwVD9WK| zpO9ul?&pwun`rC|4(_bMs{ZynWi?F2iLU(&bwi^lxyWzuXkdI|z)sTW@LXB$M?X=U zUfT(y1wG2%>3rPJhrUuFx`JzVn7Muk2~@R@3FsoeP?E0pp3~hk8$IoNjn;!7Zx7oqZ>$*U$)YetVOk`}4mF7^dp&tH z5Y>`wHnN8UWLi+#{xe3gVkCN~Ez;|^bSqlgQ#=vx-ED$(zd}qgU4kzp_#EW-tPoSL z>bu`b8m%fkf;DvQ*gTg4u3;B{EB716dmfgVqh|J(3|+SucWnM}^nG9}g<5%#sFi5u zd2Gm$gK8zl&NC|A%6E5qeo7Y%gFj(wPUFsel;Ij}rDF7~T3Gf!i0jan3!7fCd=3b!S{hbj9S0e#COd z1c8t{8D+P0=F+8wWAW}R1=WulE4{eVDz#6l(*yOU)--|LGW^hoon0)zzNwki;7Fsu zhd`ur^?k`sAZJ4f#V(Xjg)48og8Hyzk}5ok-lY5snI9Q(hzj~kWEN{A8&=5lYN^T- zRTMw+dz;4RTWoOCE-CJ%E%(Hc?%_AdyF(My(jT7kDX|W@izw~0E`B&8Qt8SMYN;P3 z+8j8Oc5fkzHt1miVmvi}IMI#Be>zUQ9M+-PZNW;@)+IY{S~LjSGZwINJ`g`Ndnp5% z;o@0|=qUJRZ&QuA_fDX$L?pxKrIV}ihD3ZRz*(@2p|e1B@V2bvpU8YV}}rY zpOnoqY3KZ=DyND2S#~SGIwD36D!SaqI3-|o6Ev@7c1vI^Ae$eVfZkqTmE}L!YjdkJ9DN9=73lkFLCF@}4y* zXUjPF-lz&<`kLFUZsv{iVmqsqV|lXBv-Ma9E%GbrZ_+=jQ*RP5pd~hbdv7Js%p|V; ze+K0L_{44LW_e(v3d>}AYNZz%?X^Qcm#~t0I#oU{`g;xgE3n%S(pXjr zJl{M6;@D%K%1Qvl6GR+LW8M<5a#J2agRecX^f`5FgRt)A%k3@7mZF5Xf)Q@{#L5o- zC|v%h;ta}f`aQ5B}DOd_!H^aSLmCIgbiM?|bq0GK9=P!M9^_2_xDH~Z zeb+hWB5~qLS;%Qcabrnk!o2N24R>b1Y76q4*?BcjZ1)Xh1<}%H%ZJE*^9r=~u-A57 zrD(#P;H%J9!Wu5}wKOv{*QHH-nBfUsn;yG5UR*?2tV~-)*pqpU^~HDhhVbramf9&1 z@Lqf&42b-09&XN5in6IHx~(xv;iahe&l%fiA*g;ob4OP9}Yaw9?46&-N%v6RP6ERc zQW_54_*&^hKNG;5p1Oe{3?Dk17qUE5S{0_Rd%+t)lJkTVsso9DbHVTcqBch_6OhG@ zIvn%aAOkMB!-b`k9l<@C2E1n`?5XqTTZ*1C(7Ym}A`&5ei&MxxnIZ(k%CE?#J!DEFJEBKbl;VZvp z&^5htR*C?6tyAm;$;EbJy^%m*<@Ut+o^11;5C1wPK^;EttH8S);39UW>+<_9-z#{W zY8)aaC;Y7|Ab3Wx^U5whpqlsLs&wego}<8ga!~Rvan%~>i{JGAlBFZWdf8xU0!|MW z;81;jbX9A(X0~iLw92zV;41-Ol>Qol!PH%!PF*iw;R-e_#9VR-=s3FHhc_(K*sU!6 z+q?$D)9<2fs-|#|X?PrjpUqBh)H7bkn|CYmAvdvhYOO2Ao&_@i$p^tbLIX_;4w~#I z{RuUgzUJorh4yp&y_o`Jl-6ZT62!i11xFvmCWqi9JO1>3qxOt0sj7 z%y340*>QZdMwhBXA=E(Q9SG`c0URDZD$kn-gT@FlY~tUp@H*9|n%v(AwG?)X=iK`4H%}ousFiqlhlk{ApOwe-PxjUi# zud-d7G01q=vBVk6K25U;m#A9H6k(7xYN}7QwZiQ0mt8*WnhUGC68QT-k9m5pqgCT6 z_-6)w$yCPEd%u+_(dwUOXDw*}>0h^g`1?Oty6zw7*-da?ZX}N-j?}%Z$=;mEdbln( zD=`-~3monmTJ88`o;|jFlTb-DejpbXwn_Mkdfb{m#a(%UnasZH*-FaPcCpkBq(EPTtOvGntAu; z>swuN8*RAX{ihBD1iP0?iHj4uCsR=S(wvcdM)UuCK*v_6_kyT~Uo|@tOeZ27`hwme zZ}(fAM9LKk>9E?IrEP1R)Ct#@PSZtl@ioEW4e~65OKr`s@kfMoj}r8}US=By$F7~m zmC~ry{ul^iEQ>xu?P(TAtq5hUcWU9qcBD-srok-~(uE_NZlAk0EZh$%f`15GuoLrQ zzoCaSxcq!YM4R6uY!eN|k*`}(fKjMeWE5e+U0-ccWz#rZYYna zUhx`TssjD)!5P@3v-#l|UQv*t)mrU;?nhudcO-uIJ6>+bZPii?IRSkPE6={F3070{ zhd!F|B6PSCZ^yd=K0iinf&^vUQ!-{+T4oB2_q6jABt@8pH`}zJ z#te@79y;*SQ03KAa;Z%S*+s_pwJtgp+E>nc=g0oBsSP!c%+!A`;C`2q&NZLpzqE=< zYIF~bE7PQy%$%j7nERT9l6;(}fplu9%=<=D+hIK3BhNc-idaW$gSC6^|BxkDzXW){`>_G7wG_&KA>-4jO z{;AYu@AAs{GhADpI{JxP9vCUMH zL}2ubqDWkThvfMgbnx?ZrcOdc5WFJWrC6w7{}l+iPBF?x39lNKtljuHYn!4vI0g|L zGj?nr4ze&i-HR z8g@I|E_b=QI`g;n%D@^X+bIQmeyg$nuG5ciq>Ma6nDh+cD`#9DQhV(dd}kRibba|H z4vtFdPn94Q(8sZ!Ijs0R=nJ&7 z7A4md!6m~cF21$A>EB%bMWZdLt11ygW_V{@p%eJhbELC`Gfwc@5iGF~AhOG={6-zL z9Nmn%9n-a@wr7y-5=UyON>rnvFg(fed`83vFOLEx+RA&`O*X-b9 zQKfi%2hV|qv05u5v3V-iJ5^+?xngI}Ou)}j3k5%P6Q^FEhp_f<@*?!2+oLOb-$)?$W?MMs*J_IEo52j1)TGXp#KVpjTJEjy&i}rgi82D9sq0+q zz0H3FT+}cCu(-5HeG@w0y%2_A(y*n_6H2@d{G1FXaJJ|nfXb-26uzU(d-!E>p5fD- z;=;H^)T2+E*6N{_izEjiE1w(!Zj}YqiKuB9QOzMmg zp7M`DQ^fXzaUw!4eJZbu^%P)X&4}u6X6LvKY*D z;G4nlKeT5uI*~#xm8|$Xw^-j;WI9%5LQZuYE8Brl*NsgqE*m=j4kf3-oet@{w|a7T zz=y{YY)!M3P`Sn8(b-5;Di$Q#c8vf5mch0u2M#&qAB%IL=wNyOyqO&5k0=!lSR<}~ zDX>rZHX22Ezs!%=_##I7Iu57X@1j${MDZYC{Q!8+Ad(yKCPT@L;d{mUkF`co= z?1o!V)#e1E(DBJTIyr9N`1vKZhC^2NuXoHVM7hxf2+7jiVORlGWWqBbHeXEsm6L?F z6}&L%1(Ah^behA8J*$7V7YX%=06M1O>B_30WhJxBmm=OZtP+js25Fz1O!8Sb4vZPzgA@6&C_E?&|rt9rpz`oAL%T6?!7uO3(pGcN!qV+CRq9OdWGQL zV`Au3;HE~1ttEwNxQ30ud{9+iR}K@VhpO0{!|JGl)SvRJJ@CPPu7T~SKP8GHgS}pT zwMPe^OZfr+LD37;h zfJZ3IPOJX$LNC$$XjgRp)ft+HHnKhMeF_HAOOM+^(DEkt$;GIj*LgHyN}kM8;bm4t z7w+h%xg+i$?Iw6{2OZ=+&I^*nhg=CZMSXAqn?kDCHK~U6s42Iz8hOb9igyCS0djw^ z8|l7?`*o7_9Iukgu;p4Bx?Nn4z#a%(D@h&L^)}C9I!kcS7B}cY7s#(gvp@vw5*G;R z>tT`={f``!TmaEf@S(C?_Vu zkO;0hpa)h@C2O3}*B6E*Ur!@T&r3);{&|blue&nu^Q&w)P^{UBF*2jMdDXl)t4xfb| z7{*dPi|mOW^7I44pk?-v2AmeVmorCM)NOe1kEH523~1}x9v*rc<94R+t`(_;6s=xI+TmgoYmoXXd=d3leQm3ZNNsHMtS(d+@ z1azCzAkB@r?J0AL2$jx0a}UPZw4bPALIOAg_#my_jflNHhpMmirmTUQT*|XJOh(v@ z#s>a=uj+bl%?lYLvDNnUp;Ls%8TFiN zh~ZN@w?j;{DDlLxjYF?J2+n9+oBalTOA!+~{qF^>WDQ8LE@|Nl-7_OErt(#*0~fzl z6;Kx!4yt}v+j>qx>&m{BGt@!ZXKBN8B{+tXPw=Lj5x|rEA!K#+OYNJ4;h$i)*_$VI zHX4w3R!`=7Md}Db@-Wg<0v-5(U^p0>8bM_Qt`yM4E*zLf)5Uduu*ZU1`@wDy?pdGD z_m{~!uhA?H%nwxZ7RL8arpu#4>4Wm(a0h6^Z~=xe(}FA{#T75%?LFa%x}UXc`?nwb zJL1DnhXfA|bfO5bZP^X$47(YLOfyk;>_8u6mhk(uZA3xC&C3>I<5*)M-{JEoB$ZsuiCrn}SSH!2IU?M!M>Am@Z zkdXwo0&bv0K3A9q-E4oN#U+|Sv*S-IK^E)BXtqon1A)%-!1^x6pI}w8-pao0Ww(Q~ zl6z6!uQu8DjXz4KSFpleP15KrLRiG0+rL49QC}>(X@$NLzIsi@;*)#x?{#ZfyeGU@ zccg)a>ScC8Wy4B;Wo1kEI=wq7bgGdCq#mW;_+i|B!Rp)j^9$!Qo3r{-?5{Cr^UyE7 zCTDiOHTApboAPHQoYgIdNpV9xhVv3lS`uZiya{j)J9Y!iy&Wv0=Yy`gq^6m3yWLfT z<5+37k@*SHZ*WGPvo+L(o#^bR9;8=hbjvmC88~mhk`jcqKbt(|iP1>yyIT(MvMBO~ zc&uQ>dF&+K#X^1=Z=dPm1Bos3XDG+0p5CC(SE&#NHx8BId;AotdH&+qWJrzJF3wU% z;v}>JOZK-X$3m;_9W!GVHzXcX=7*7-E%18nQ2oyW7jEl6;a!O*t&Dj+$<=A|vkh+h zuNl{&lnC#aO?+R+Ev5XSl0_Zj5ofv`=_IlvFt=HBJHc`|qd%W4;m|c>&}YNgi1%O< zyZgaU2(W8R@zR!|IK>ML?*aS%#0!N60|W2w&w>pMi`zRq(QLrgtvP9Ngy!HxWVP!K zfQsa(5I4S*-1~O*Z^g6K%eomlD?I36#lz9-FCKR72#QbdGv=)3Ey)VCu;n{M1 z>M5kKq9O^a_WsATAQ(&%GRmFK6UHA^vx*1w{A9an0ePr5JAI7)t`aBCxI4JowUeTA zcYFLrt6zidvEHw@iR4)^z2qdKbE@{r4#>n>Rrdk|5{Ghw+A}(^85pVNO8|#UxU|s$ zA)Jsv&IJFbWRK*pG@M{Kv$t&Bby)+EpNOzDrj@XJbnLzAli+@aMxX!cPckV=MDzyi z>C`O{RIU-QJ{56|c;ZBCl%eJXn(`}+6EXOx=r?n;45?lIdrsa;nLKJ3XH!4Tr)6m( zKNhtLHoVil&U>!+H!GWPC`Yxu63^+k1SDUd6y+{)oIPf6{`2ye)#>}i7)8AU6UR>l z>8KwMs-926ZIMSN7#`)_Z^(m}@e~qe{kR6RraPvk(KS8^MaXD|5!WG6J@D|xllhP) zZo{129`4RW?l#2lLMs+$YOd1q*^W!I?Mh$bN$)~ZOMRzp!(wg5?O@KBh#_(BOHf&f z=%&GFhFNF5ZdS`c_L&$YLg0n}q(B5{PV!U#I)(pd2cG5|PCK2@_%NOyZwBk*mpl4k z{hvayk~YPqJ0D>`oERnn#{GH9Ht-*N5Nh{SAaTxSo#+2#R?{rc+}6M=C%>)zFAu9v3p5CE#~cda zkFq`%4Yt*ca4$d{4u4OzRJ4kq_bgC7vY6PXf!$PHA6E|28+$L~`5*YUipuKn85PG+ z!(Uf@?U?dV1Zd}84*^}m?@GxEf1GEy_Iyv1PPj6Tz{cxb-iy`E3P>RNUn~Inh=Z)N z6YZw{n_sz0ejqC%Z-cjDk+PB$s0n1$`Id(dwqoFXy4i^Zk2rnrSxrB9J6+)lbPNcr z{ZizXU*J($%vor>_UW+CCVE=#Q%F~^J$}QUiD;ODcxa~I>POcayT4DDU;RVo>Qp|( zui*MCg58{T-kqYO~=~f{)R^RyVEb%{@7MhP!k9@1c3uJwDFjTJGvybUIGF zN9{G9}Cp9tiTA~Wq?t8j9?D8d@+nOx}Nr!!s8wiV$$3pqOL|2rV<6gSi zTAEPGVowZ;x+5ttt7#0!*wHhVY*28d2;wK5vJ1zH`QDhVl^A*$gu{B(+3{84+$)mKGVg2Zw536K{=@l!xM`S^Ls$1i~ zJ~?nOoms9=W4}Pz{KfM6w8Lr$PMaQ|ep5KcO9)|2d0mZ!CRf5I^zL$05k8+2c1^~M z^p!xLU=(v~%5C}_Qpr_4+P`#)*v3qUFQFbGZH(`nJcWkoEyUFduZX_d-6j=OT2^L% zv*3ZGtl?=Djb|z|Br`zh#moG-MOrM-x4~nMh2Jvr{=Hy|b(-$5Uc7g9eff*C<-Gbl z=BJaEfu$3S5uP3Qp<*b`Y*Qr8$>H($V5O^nPl}gh<*TvDQzT7lSqRTr_G61Ftx^)_ z3F5$b*g&G6nV`-DmTucUN!8^Fy#7ZygsoC;GzQQ zs9FbArF(0LToKHe5B6;%7WYEyj(In{e3e-f--FN zZqq*#u?sgqX6a%aLm#eTKM9C=`AC$Bh4>p0SKi`Wuj;`A15xL~x{cSADptRPP24_? z@&tj0DA*7#SN2d3y?h3Gokd!FIW<6q|o1wf0Y@G6-6*x!$M=6lAw zu0OKBO#WuvRDxeGblET2i=X;bPF_dv*&X>jwyQjz%uRVhedvKXqgVVrwfU8=n0w9n zv)2y!>Pe!Iafy*-ca_#`%SjMY1l_&dje=SQi4>a668#uJ@_FaF?I`z!`@+I<^XAUFX zZ6QNL#Eb*)!=QY%&TPhdS7R$}9xedUgI*!jcULWjixiPY9*xRq~i%MCedThv94 z9zRx~QA@8rDimbFyUpVIW&aZh$j5`@VeJOjiiVPIa66vTsq+`=ur{+fYTqG3^4*7f z_K60TiEsV}l!eDPiymeL&&x|TIFo;Pzk}8gfodmxxcKLzT1^SaJ{fJr>c9?H{&iaa z*bXh}yFZ(d!Q2nC;i-#1kV`YB0X*1keLb(cXAo+_xkpA3j>4fi0_=Gun28Ojwq;c7 zxtYIxv<`{}{b5sB&;-q^X8s0{#s$>(c(0Fxj zR$-Np5I~SCmtaRyPc*tObZ;%FM6?buyWrfG?YR?*x9U(!pO0H|E*RXNSRr2WT~`yj zskb019OPQ*f?NLvTJ36$$?0yLVa)h2D$QK+q$*4+GxSdPj~ZXvBedo`mT-;p?N>IX zc;01Qp3)yLb|xuQ)~|~eC>BE_O6fopMpm04jI&NhelBF+%=$v(XZ-^QS`HfVRT5+8 zYB@hQlzE5d!D~O*FaP3ecvJNBMR@KrnIG+|H8-sjWRw6ynRVI}Nm{ULCS4qjKrjGz zr^D|X{%`pHF)Vj=aOm%oEuvKk+B^I5k=q8-*XJw~jj(WQuDsQ^m`eCRUzzh-5VY29 zwYX$IGxW^A_R!73`w#33AYM)YN-p|XtW^dq-0F&7g&zR?&%wHo?{t#+nSpT4=B=;mB%jS3?+U_S3veJpyPbUe!>JO;KxliJ1o}YM2+@!@taodCC#XOyBV{ ztEfdch6D$ZX7RQ?y@W|YNMYkiDsGTsv`)b(6%L>hvzjjUeelb5*Q3?c%Nf8~FA>w$ znSnrNAd+ey>E{2CPH+;ag3CHq|K{|@DMO#zo6z6Y!4E3VrW~a>yyY7EGK058ZHFC6 zd9)^f|0U)5M-v-IR@}X#e6>k0!29;?pY3WvuBVwq8Eg`=ov8d?CPR*XEs_dJJXM9N ze7t-prlGU&wjB2oL*?{rk!LOCHe4wne7RX$PYbxr_N%!n^yQS zn@S?4wja6QsNVdu#$2k5O3-})%Weh@_Z{s$nik6tCvn^IVm{aM@+cD=aOd;-W@Lc( z!G|UnKdEIcF&0(0sR`AH%lGGkGc!b|u$5gW^2xNKn^prb8K(T*t;EG<-VM1`IaFS) zjMKf;cWNAGOvm=iwRl=zIp{SyL^jpn{DBZ9(aDE1sj|hga2y7B4{lik)p&+>pHJ|t z6nhraj2oNG8;_T%K%aYd34Hza-#oT%dl{`iH$>@STW&ldDEOmW_EaA^a%HhI-ou}{NSd!BH1CS&|;D%&}-hfQ}6`8(Pl6ZjXs*QR`OdaAkDNjtA~@~ zv#1^#soVZ|KlVEr};U0V1&40pPsq2mN(yAmSOESzf;;3=!=wU*}`?_g9RVTd4 zuXYP8`bxX0Y8z{9@6A~L8B{5u2^8KbKbYUy*M*6b%$JQJe82yUO1%-PUOvtE-OtJ; zb|786{qZ!4uwQ?@W5ep!XjRrVbZiCeD)ld~1-@Jm>Xf0PK65(1$^U5iSa~tYXO(bR zU(+k%7Tswuf-%+whA*wm83;IzP9YF@z$E4V5S_2`6C#?jeHkn=$tRt;H+6Ny$REfo z_aOq!f2KykFNR~r4-ID+#5!34|J3=5!iPc%;IP05+V=lSnvUv;@54IpSDMA{P9iOv z5mMC+UZ<&)SV00ef6ocdmN5XwxTOk_nc$7GbnKa6)Q?j0We#(YjB%&1$jaJiN?GsL zsUVwRX9se#10Q=C5vVh_EhyB);w_cR6VT0cocmL2aBZd)`Ah7+l;Uo82Kj49$94UC zh`{JoJ}`VBNvN^GLHSCvhhRc%!D`#Rv7oW|WG)WxDa$`0r;UtsXTE^u`**w&=(J5x zAEu4qcJ153q+*F8@&0rKOl=1otR}kkGznFlXjY){B9!ughV(56R*}%a{60d0s9RKf z;;&!;ON>rsJfDHy0>BZmL9P=x0X4u@VM#x!{w_VJZ#!OS$-YILM=QxW;zMHO$7}nd z2qS4?20PxzCo0i#96K8$=xpgp18A>*`s+?x^?qj*kG+Cc{d-x)Wcxr${oRu0O1lgBK^#Gyi^hDJQPQ@;__%tVC2mK|P8p}WBi z6pB{EdULzj8-f%-icdpF(Vq71n`PK-ICuKJyu49op8oW8`z4wJl1AMRt)u*vvXO$2 zL4fCL1tu7o0YeVKmk)}$@UgSY&NICtj;gU!n_0ge>(mlN{_JZ6R7MY+ueL%$TGjEg z;kfPELhU!~8qVk}ch~-#@{e>S*1bAT%6ZmX9*<>7FK2=Mj_d8z1h|P;mGNG&(@YD& zzR0HIa7535j+A zlvajLwfxE`wkaBl5hmxiBiJU`VV7`zTH19A*E=<=17`V-a!vDmjktFYmlGuuY?71j zXfPY9G0%{H2a-1-Djma|2N~j7pI8g{wP}G9Y>qI&31>xM7RCnR#nbUstp8N^o&%oV zXk5~uwunu{$2G<2vpu|MD$V7UZ)Z zZZo1fPkS@hk#$v>f_{%{b=?y@aZfsIpVB@spzCP|T$*oGFZdiy1gin2Y~M{uFqsBO zwT*pytvzOZgiH0@emYK%VJEog%p3D?DCNcW&ije<{*c#UzC;8wWGkR*(Eyw%^&#!; z<#^?jv^MPdF%sU30Y3OVr@qDjsH>5f^(Q@;R+zwzZcGXzUTc|Hcyisv!x!KXo0h};ZyT9K%1EzcO(rsFS8Y41^zpks^Ky1&ZF92{caaS=SVjQoa@!9m zAUDntt=cbAUFBNUlLwljmdOdDj`?(i@r#F80Do67y0-o)Jy1PJuv?bU{cO&#U+|m9 zAa4SM1KP)R7On=c#JnHsFfq`zJo;erPc$ENw)IN+z>25dl&LC?I(E{}+03B#eG5Hb zkQ7LW$pDenR%Q8J4M%r0g=ziwQ0$`^YcEH zikKosZ?_E&4&DFylXPA=0EdYc-a8>_PDW==aW5gKLZ=7+FNar`fp{iNVNyKn;^`)r zXX3ry!0~;~UH4V)p!xa1#peS4nrW>E>(2c)en?7k!%B-;XT{zOXzb#s?6w1p5-46% zjGs}aQw>&!I(9yWupU;}wa_jjm78_Z;JeZ+8=2|xa3Q#ZTi}9AL?o9c3s{>{vUq$j zcnJjykkzgeS706Ec$xj87?hppfWSf!bjBY)X3@TtCM3C4{*XbcAe>UKxD1B_p+$&cy#&egaIFm!g}G+jrG}wg8Drpqd#(hF%Iap_6Kd_d}u62aY6} z%9=caq-XClj;Fsrzkn(bjJLw$;>H#iF zgOwf?&0$|z&!o526_)U2_n}qHhm>Kn* z-z#5yys@~j-Dt4Y9pZL6;~!M~p1NX9fN0EHK8iVeYk1qg?;CRlI>rVVHsAf|R>;{H2GG|f`ZXo5}K+P&A>0Nou(|-gMn*Y-Tt+@x2Eyh52{o=*OpJ&V!iF=AjUdY4)kz zw}2&E#`Q^5&+NQ|7p600xQ_gDzWr2-TX{KJxNkZWsi~ut#u#>(s%q%Y-1diAz3iH3 zr?Q`T*#eH!YNZ7Vh%L|NduX)|ZHne>`B#OIt`i0NIUU=bKWQ49X}dq2nd>snnD$~S z*;Zby(23n?rd_qt2p?QV$}A;Nopo1lB!#_mP_`;>dhw2U*P580?DIv)+P4OD@3L_# ztl>X)!y4=alL<<8cfY%}-Zf)nFGAg`MJI5Mxmp>#7 zHTATXRW)i^RaNsC_oGC>8xJ-b*+Q7Vm>U%KOm_5{o?HIyRF(3Yis@*_L=6j;nU&kj z0jsUQZi&jBcd!gN@O|w2Aa=dJ?V3;C06Jlask>CDm^QA$1h@+JzWs zsC;S9L&Y6vQ)_QZQSG!M%L0>{_#s}Y=KN!UZHS=5jMNQg!#W85D_+p}+^rXCvzZJD zE}<7y@BPAC(yoKGSSn6&?pP#XHR1--yXNH0W{a0; z7zNdZ)!tNM!tIODvj;-B0dL7{ViR~rey}B;CT!-Hye2$tbi1h-W)#_svN`uur@N7S zo?s3P-vL{#4I3{2(ZwtQywhiM66LV9G$4Imr=bvSLXNIF+b@FQd|3-ty|l3;wc(=u zTqTC_7hXmksipC=)%7c6$cx%QOwE%(b?v5>8Ku1)@-}J#_;TTM$6&l>$PqPFp%5CZ z*{aK24amZT&vIEjfAs6tVZs+L09Z}Dkc}n&)ZM#l@&#uKUc7qKVb}8Cxs!hv0Mph1b|_q0AV}lJ*lOX&{K2-%Q_SK&?+y-}mNt%&N1i9MVHlTV>unSi zz>r?6AAcO%qC`iWW4H*(M$N^>Nqrj_ZBH8 zymh{o@CJ`-P%j+`L|gc9f;t&poT=F~D75frd*ad~SQwr2r^yGPW%W(Zg#>v|-{F6> z7~m;@gJSFcl;lI&aL*STdebzzOiEG~#|i*Pfi7xoDf8>vE59uTjj&5Dm*3wB4Raqw zjd0HGZQpT?x-HGld@;SS==TYo&REE2X!s5dHtx`__8XTwzgnS^Hj)H4R~tPh5lG4V z5G~lUN5Q$_q?ul69;Bc%Q={TzTK4uY&(-;QM~(C{_tr@`I9S-6tC9TGhnVqc9zB(cwtXc@IuXS~aWm=4%!NnP(2c7jY#A@mII=RDDKdkhoPs`N<=UI#_x zmxouN!EvU;8QHJOjevDg7$;NKBLE-JKqIwiT&Hw1Q=5^;w*F)YVLzmrCxnOc@)mB$ zY#-$PCpf#+*P(e~XGc-`$5&=6+lk^IP$1mIV&pc99pjh1`BO-EHZi&nnX#~g?3TGe zmzI4dbY1{_Qs%u-VLY`3aK9A77=pj#0^Qg4ONt_Q1I``(k2hIQX3 zhkeHiHWqGSbfbUlm1A`j%k-rxDyjnoNFoJ#fka_TNa-KOl7iPvGN>gK2|$oiE1ArbP0aUuH=Bj>pSZn z@_rc^zrdLJMBZ-xDU_ryd`bR9;Qdnmi`d^eDTxU=@NM8;s!%;ewX9h*@^hpDDZ^} zUl0zgSdn4$3*3qV80Eaa6kC@pOz*qW1Q{}$UgwFkqT0d?Mx^XjzER<%l5C4x?#sw) zQ38ft#tkXxr*8-`XGT2qv<`S__iTWaG1fAd+Q!$(kI}bIvGkp3O#<(@T^w!@twTUw z6&nP8H=a(G=Wy4&b<#{R&fn=i*U7^6=(6=}c|=^*6NZi9{r7_13Ry*74LxvNOg?bg zg)jBZ8f)GycgEGNj8%rf_Ue`~&`mI3K2&&2d#G*e7*$anjQ9whNKitP+XE{XIv%3M=8`;?xJJ<_!1qVKQxkpnA5 zm1~$pH46S1Q7iT`WL-HKo^`kT0aWUvb~09OE$b1-fhdwpzNQAsT;;#pzM1)XtR8Hg zBP}$^K2!VT7x*Y8olsaGTNntEabeNAYNs>meETXw0b4L zJ>jFX7OKUBbd+eb4BYzpg9I>6G;{HgnqlMc|J4l$@SS-yB8s{1A_Lf4h+IsKkN;i$ z{T%xfFy|>wmz>%A_(^RIxK@S)W(L+dy&AJwU98ru*2S07R7+GC>sSPE8 z=zvMy(vbUS~D;zg>}#1&FKu7?Fxj8x{FWg4<*!~!P*?#|*J-4yHw=6$^m?gOBM zGk8JCn*6>JcTW9gth6wlZP$`QuYxq=#EFA=6-}wjq}~WoC*GtTOpO^$TmJQevDkm2 zs%#}$po|*L7b)cg(yq2|ISawCI!9RsZTSUjHlJ2t*ZuIbvz9y>n!XBGgF@(_=-r*^gwZ4alV|fl|u{&2j(b%hL6k-DtT_aTWtx z0@ucYKa47@rL{A+4)GMz@BUsd@bTPvcJM1*2yx%Cvwb=960!~8E~>n^)jVs7PlZk8 zo}-H=O~(&2oukWBX(mAkXkqHn61+S}={z)-1(oxca4Us^OtWLivQ+{$r&-JSi6E9u z)=0>fSCZGvCzA1i_Q`;|#E`}W3~w!0^>-%-4@@9Fg7Pm96Y_x3;qv==kqBr-#KMO5 z20}&-H1Hsp7cgLCCe+PS>|svC)`;oERoU}4oY2V&yuZ3)!TyY}$@Q;^L*!)%Y2z&} zyY25zOiWKh8)z)5-eB0_W23Z6wDoX^?CDLL^Y22=*{VJNOP`b-R0YQqDSk6%4$^FS=bT9)pZK6&U-yC8E zF`=@Ho#rbaroh1#T`>*?VC7@zN7=%bTN3D%Y&z=zGLjK)uXdP>nJQC`e!j(huAGX7 zABsw!6mRzwL%=H@ll%1i(wRl0Eov)E%qB(GS&}FWYoChHzPav-Hq86n4hzNJbG%`p zFAS4|0rT>*0@A+0rnJ?cewKsT7@f{UDV%z}E56+In~E@GkKY6=Cp62YM)myQ;EDv@>v|DnadsCmCh_2Xp?xiA2xo10mWI3V1s_eUZ&Q7V) zZpQ3_F7UIcMFsy`d+#GR;qnnBezYu$7BCur^;k(})Rj4-!AR1#yb#!dr`Yxc$h;f2 zr|1~*2{J&QIsnx>4cl)xc#SN>4bDauc7Ja;e%v_devL;;Dv$Cji#OEN$)K?FP%80x z{YcZK9&qzMzm#q|uX}9LdGz}JX!EbAjdcAz@IvlodKP1aX`aqw!u|8{&Vq}HsQHy} z!IwzaUN%>uj^M{|TIGRU0IxWKyy0yZq`3I=*6FwaU{uGCFIToIDdS4Vl(n7_rq*Cb z5R(tQC|0XbXGxgo61A)X&VBwZX2!=STnr{V6w&iJQ&wmdZgSwy%V^2n|kjMG?rKI@$g{`J_9Y9d54hUPj(kgt*!lLyn@&CkXH$;km&XC)b~ zeUiCd$S)1eA1&?qLBThU zp9y4=Q319xXisc;SQ)1-gkixKmsx$a56;fgFKNpX!IlC~O_VbnIz=^3npLbQaLxfz zGxFU(mN90Y%FSxaZz?jZ-M)5BPv~fJw+pPd*b_y^jCv{FE=Y zin3BRd_5Q$xIEx^?T=UmV9W?J>eXe4N8W4Z_y({f7>0(kou;wKvlC-{(V}J-?x6l+ zOr8tEQHiIFsn=U9j>)ue;nmpET~V=b?WbXOFRlh`TS2fnoQmPtu)6QaI4XoP+gdfq z;MzIuSJHiDV1=lgm6oc?(okl?`~X>Zlt@(^dD99wJZvT8yy$c)ZK7hiz0AG9OZs8D zEsWQKsgH|2Rgd~KELj#NT-J#eXfhfa96g19}oN87`Qd4}5G2Naz@UM1cP1+P;wIpHzJ zha|z$k$aCMxgumvt@qoqtcq_9$CNF;nt*pfD+3@rG2ec%n+WD>tav<;UyrmuGNs7u z8no*-y<*M<&BUDU1k+jgjW5u^CWFHl1_qwD%>*osvE~XlR{`bC7ipd-b>bg3+36vycU_RUf$Xcc>}`>82bnb}Umx zfM#uqm%L@c)(e38iK|4P=s0ab!%xnQ;9zc{xujqAk zj&K8Vi~Ei8vr{dyXofBmY6Ok3I)!0a$Rqa+9*jW0LNE98inuE^*}#f8S&DGr&P4v2 z_hVw^!hzjE%L1BpBBIkv_4{0ytXMoxhKTMbg2}N36WJ-aw1O%7A9<`!=N4p;w`f!2_o}Ii34iHMXfGQ0 z)S~0k{!J1$ETtQH?W`=%ON4&cg(5N2jSf{*NBW|Mj}@AMV#}CM4}dme{~*-HvUvDmI7od)WsZ-6Z|DT_Wa;)V&o)i zXmWk5xQWOqE;xgovq`t|ja8%6G;C-tb=UuU8>E{SR9WO&|r``x#s9!zKZh>nUIAN!l>uI zRvX0&bJ_tjXrL+1LsjKF;u4|^uz`)}8ptGPI*;;pNMK10H1OaXE^MUFln{wq^dj%B zg7=rHfXmwp90!TS!d0epv!dIt9aY*UV3=A_&g{Af6fxyQXhf*&oDLr%C!ue61S4ve zWY&=@6pjn5reOBnsEgp>#(1T7c)NO91G|DTQ9*F2lHXhu01g;VW&`nVJ~z~{ltr06 zsl}$Awrex47IREzcbbMKCLV{QvF)PocxmdJp%w;mkwhBHOxX+M=2 zY&vUR+2mS0OTXr#TzTl3w!BbevN35};-P#=e45cpBmdXapihqz5EV)!G&-MK^8JG} zHhGN7A&~8UGNv%!)Oks{u}V`A>a#@Tf}u^m5)(`emR%aY=aVL*MuqkXK+(CqGzcpz zIZ6%eFUI-OPg%FGV23L%aaW$!(I+H?45s!=Tfl5xK-R2Zl+?b0osjTa7AtHE{t-tb zcn3z4HBxFh7ms9wr7EQNJ@}X@`(^Nzf1MYMRNYy)-K$ZV6xMa6VjU?V-cMI>!$h_6v%nL-bn2XkL9#lh5g3WjLUVr4r$p;`W<-f^PB3M z5bF7Fs#E-YD(5Gw`1v~>FPXkFKmT0vO+1FvLQynKCg{NGyw&gZb7(H?-I2iHkjP?e zR@ePje*1Z9gbC4T3t~YfG+&okVcgU&@iw2BF=ox z=peY0GH|TG=r_tX0WLsa#$6fRIcTaU333l5Niz}G}f<1!p#nqPM zUBca0#q9mUf;3632#~1Y#k@=-cuYmUtfLfIN-Fh~GCRHf^TEgec9x@|DDaohrueM@F{5T zr>?ye>PhMoI@W*$)lJtR_|Ky@;z5n;+KnMfe9H+@tE}h^Rq8Y9{$Oq>@o)EU&fI;Q zbdY)whm>V%n#lYcdGyeaLTLHA^h_7x8h&3V&T_q{!J=BK2`!;Zb@ZP z;XOasa%>bYlCZ%ea3agjaf;JxO#Z^i0^LVOf|!d=eDU>QeA(g6Rx^+9zk`E60BSkQ z;fDzS3+uPzy4f}2X2HGSXb3qD&;S0Rh0~vPIP;KmX7$hzcpgfCx^%y7I_ABVMuP?2 zDu2q`_Mn;eTpim9)Xre4!u}mZ4j5MPi(IvM7kL>21WjK{~20x6)w(v*v|qs6@ut28u?Pl$Es40IWYxL11b7RAlIaA z6LBI}aRdhlCUv$Et|7i)u;csX@6U@KOzmDMQ_;Di@l4ImHgJ2E88u!s<}b#7RI&NKF>jg1#Mb?Rigwe2+UZgII{vu-?Z@=2 zj4IcBsXt%d$g|+wqPAu0vZ|7Bt^a5n1qjAmBu%hWl!kiD5WfT_W-?rx>!y=WQpsa~ z_-f6o!qso*nbgAN@591fX!HYBNT)+*&>#QxJ35 z#H{#tV#0z6?M@?$!^yFiH_%LD$?VNYtIMe%r@19KK9F)ksA64r^}m3oUJ+>1G83xTGmX+DS{e$r$8+rPV6yYCKHe>XOi z1g!QJZiAgn;{O>eaK2Bt_u&#{r3vzG^}pVY$kJ%lQ)9y#p1;piz5 zf(4}H-!@(H&v%NQs>-xa3e^joWGnXEcf#(Cohzy_>D<^d39c4*aN(&*E5gMRrEYQa zCre%ggMaU9xVfJyp4bEByU*wEcV7aRvu63>zm`fuf+s`%P?LaQd1G=Orn zgT827loq^*gauaAhP(e|iRuCZJ>(LnC+fnu#vkqv5GelL`gM%-ePsak3>fZdYiHSF zXl1La`TH832u{NY@=`Q~u#3xnwF#7Sg`!BIx)2Vh@Ff7V=F!r`$=FLl9@Z+-~0>Edm z1*BSYOV0H?4I_7dW(YCX>Y*^rML6%5S7UsF2^Ul+_w4&e7)3}%SddEvGTr(AM+^Ty zacoO~=m%XK7zCXsdrZFhGO}`Vq;m}Nzvcr_$t_7H_FeLBDbLW+V>w;u@%0M(hoGc;mqWp0qE`UEzcksfPNT1|e@BhOC19pjO_elDVa zhnbCU541L961(h@~wvP_B(lgv`8oK>wV-rw1n@a~nK^bG6>!aLqmZVPNf)L~58AQq3G zwCl386mPhhT1&3V*yuLcrOgTzQIauH+ly(Ix#O`DrYF%6fnJ41f% z(FRr^iUes?+vspPtpjFD%Kmk-ko&MM+Fo_J2!DNkzuE?@_K(d@N8BiaqW=+$I|246 ziU?E#F`#IIcbR)%N|nQbjv1N-0w zl@Ppbs9+Zj5e!M2Sl5CWzVrd?W+;@b+HSnZNk1(sNSF|H(odaJ*@~tcrwbK>5{1I0 zUt<2oaaV?k=a1q!Z%X`iOWD#-&k;Y!(1(7}5HH$%(QH%NckAH}pwhVsR?oEwxDSq4 z_dXpj9x}ubiwh;d#j(%3DMdjq2KW%>)UuyNW|5P(bXZX%)veF3x;(QQ)#$s#6wY|^Vq9%;UtF2U9@pufAY(}`Iy980o zaOBdD`7h$ZvVBU}OOA88Bm9$(VIL zW7S!^H=24pQ&24z!&>GVy38;+_C37bFvNl z^RpQ3b6y2wuXICPNC4f9!4wk&+O%Jh^-Lf)vV8`Tk}U}tyXvdoJ7cJFj#G!OkmqK# z>JUmj0|a^jj@p5WA4J{w z<+_a*!gD8{HC4VdHzNm%79{!Hgx1~GYgs)wD3Z1FyN?QfK@88I57frqOb=gaj5vG! zXs(!DQYoB`RVMp7>R^c3<}yZaT`A^=4s%eH-=Uhq3^X}|X$|I;F^Yb6`Q0v4x#M$T z3Py8v}gfPOo*z}Inqt3Um@=%Zu)|l_Jl45Z%=}B?_XHmK( z_(#gP&&qBDDQ5vwag)|O%__tt`Zbi;U69Fz%b?^77j_^0Opg!b)+pcC(9jIw@UA3S z5Tjs8acPWcD|_#YqD*~8=lRlW;kbATi{uY5h3@!Wgnn0^519terMv!U zY+q@XV7ycd{bwGS4yviC;vDJj`?tgP**wSrg=HUwSYT8_dM#$z^cMjfGSZj2%}OFj z2!Q{lrYpCL6io9b_H=4wRiG;7uEg$v*`Yx*fWG7k#vw{`hyPv{rxZ#UxRQ|M8`A>Q zF`W5KbXNSJnm)9X$@ETSd8Tqlg48U|{fDw}YW0A!&nQCqe2N^dQb_`0uO;Zq%+yuU z{v`P#!mIJ)Fc6;ZnM8}zLe)uZ_zVjysrxqt^i26l^s#b~7sRX#JeG%*C5(NKo)w3k zs365UlZ34+LBh4I+lU@Rg$1U`GW@CHP;oI7Hj!a0uXNMp*EHzYw5)cm0WzHXmW_q! z?Q9H~j^H@*OBEFp?h}nd`Ha{#vPK~3q~E8PG4>jbye)o)CH}&w?AaC%OL=ow3jHVV zZppmq7c+mcIbPyXg&@%HXG=w^N$n~RT!~z(tJiDz)Q4r7cDgs5hk(W4V&Ah%RQQQE zwoa-{uKO$v8>#gX$;Wld74Kc8Adrn{PuZGAaOyu27Ld2jIE9uMcorco=m+ku4HgEu zoa^e(=AS!r{rs9kwY9$t|3jmFPxha2LL?bIn-_6BoBxuoTS^Hs#L{5fXJ%`Zl}v9j zA|M1kqU2PI(+NSlWQDP*ds%f3`bEqJp=eq7^oqDrFf6Q8uu*#%d$mnoNYL}jfU+am zEHnI%%HX#K4hNp#^tcGJOD~2?KSSyzzg1WK{Fj)TYOXMHcz$ca`E_Fg%HaBT^-myF zlnBsf-+E@6bN4(UG@^#ti0W0Bp{Yl?O{y_3#+DvFV$hT+wnw!TM{m3RTCY+sVz8Ly zG5L`~F7Ja`W*&4Qn0bC=qa(XM{G@r`x^ooqmoLIEJ!Qc<5B3dOFMoPq+>ufcFMNLceLNC7%}@JxD54j2WD zFR+MDT9AO02v9uIyc$a0bC~MAJ5WZkjTX1V@ z&H1)>W$KR$0Q!*`C=|muj5UR<6ZJtj!)iC&9G5ZSddl>mmV*iRw;}74Xjtg$9sM;N zb4yDm=t68jb!@CeP1oZb*&geTc*gg);roNCG0wgEg}^?=zol`|hc<0MQR-MhCR}>_ zN3C0O>GrjIszAhd@xsM9?MTMY1~`nh9JG}AzXtRF zI;o>RztF8fqwHW6Q69Al%3-|jT)SHUU^*WrCUs6Sgx#l(_zZ=+>HDw|IqG2<>9zps7Y&jp3DuoeXE-^DH`mEFX?nt9hn zy5-_C3&o*?Po;=M>#0Eo>cW-sVeRCX_b>WuKe0uA;Ps^L7b>Hu1>(MFS14-JZc zq9aU@bNlIg2LV54mr!3?t?HFJyej$_30sEaSH$&{UWAmvTb7M1qa+ZIC&%g^g0uv+ zlItbPJ0A5BUr|jD$85dwD%?yhLZVBQ0X%hY(K;>rRAF3|E3R_;`fPe_ykIF)xQvmu z`#T6(F5r&Ts+4hfa`0}gyKFS}I;Tu|Pe%<1SGK?Mp7y?f38~H~K5s3;Br&iM!@lsp zy~yV#HKiPVtRecgiXCD2J?xlLtNN`wvo{^!9~h`L`g@>=TXUJhb_Z`R%QvAo&!rqp z{RIEp=k!-c*oA8XPCf@)Y|-fd z^a3DCkV0znj+#drV9bQxALkY0-^i=7VQi`iL_HW!T!$$Dg=biRHnzRGSl)flQ&Ed_(oZgZVIkg=b(5|+EyWx1(SAAC9q54J)}r6Gj%ii3LGjB9 zX3H2(@rafZYNk1mgNBF|cz#p}6-eQL4nNA*>w4iXxWEV3_=x>!Srxe9?ygk@-PAG{ zHz}$A=}Y+K@!dG-W!VLHrAgm=DOqdzNJRI7JZ8I_QUuDbfPrz7(}7kF`yhW!R9^Ta zb^i+6zNI$bW%F{!BWqu9QM`;mWi_);_-(~vUQjkXI^hsT+be={Du(WE9_H^J&?L~G z5o0!A7S+g-@B7iXqzj(YDM(a6D7UqMbStzbt5vuKE*PW+Ixt#h6%sP=b!?SUe1|Vr z~?3j?{LuEjNX?=m5Kc$h6__#aSz4)jRg$*QU#M5H+d*)Yq(}MztH1V@%PVYK8uQT_T%XOvgn*9FqjxMB;sZP$t%B zOmGc7Q%}h&am(lU3=4#(=%ITGAYs+Y{j-tEeX4JuI*hG>hD}m)`QkWowJPCkj_hWS z5@>~v3TRsE?>s8{WN zp;2B%L(T`M#bv#_-N3OD%Pfl{$f&K~%*eXo@%5EMIm`8QlN36`tElsT;uLH*kyqt) z$aj02sw72&0$~kRV`kBNTxK#+(8?c{Q@kM9ReP&4d=t1h%HI<%lszl-GULLc% zI*)hTZrx>faJ-3Et;$TTagwUarbVInWU*D;bF!B)c(8)KSHwk=&o%5+@!hxa3*#pm zW%12UoXZ!ps5(B3%m#r?7w~oC@g;1tjLF%CIIHSAD|n$GA^9i8R!&AXlv-m=MW>Ap zOt8G^(n|UrI>C`w)e9vKYlb}Izo-Ri5BZxBAr}Q_nbh%PIH*fbz?pJ7bFTU`)ffnd zEDn3k=WCBiDH9iJqLo$t_pdhd8gwf=F~(KV=odF|7-dR2H9R#F0ty_9+oC^rT>#7+I9EbR;Jt}2J%AtK+#hAd(e!M zup)%NSwo4PmJl9GgD|~x>5jQ=Y&vFy?l#zKm7|Cywy@jPv$DRc)FCoMHBb}}_9VOb zU)|4QRm~nNTpr5B4?3zA`FQw6oi`;~-ki^Kb?pN8_q0JSYg8asG}~9eIp;@3J+($K zECWL@%YR0VSs&oohUXEBC}0%(flj^y=};w<&RF(DMKG^^tGckoJ^xbd)9A;`Vhv4x`NB z`@&!kyua{mglwh27!PhB%cEAfTDd?n0otmlJK1~ z-d-=Ig{&gAI?!i=!Jfka{b#5LVW`4f5epof{b>@(UVNyu2`Nv9=> zZE?XlQH=Sv8R25t@G~TRkuOT?DBXq`y~->v1PIJpa~NbJ6kWfQ^w-qPtGS&%l_bYt$MY5 z;!(_-FMoGsAaL{_2wu*8t!g*)hVhN^bWH^hNN`hA(_n+$!dmah4n|k~iDEEOe(^T1azB1I0h&K6DE=i-dt$P83TI>Z1#0n{cD;SA}w@oq70%PbCNpVIjm)BpM# zvmIuh3z=XhsCxgPemV78mT{(vXhBBMI^+q~aL$ZHgp+;lq^Af99S*an>0agxmxiJ( z7kb|h%@1Xg&;h+A={DSebE8o`*>V~Gwm+T5yU?eue()9?Tu5+QN>Gx#gV+UKxS5ZHzXS2vTeLud& zcHY7PssrCMZ?bJ~cI-QOvdJH`57oMqnM@C?C^?7i)w`N}zfcf6Bho)0)Oj$fyLNhM z6{jmQ*7L5>|0FlaKxE}jWGp#wE4;(A%=^E0PXASGvuoh&3*aF^cx2E)mZ4(Dcb$Ke zMeMr{p=F)#JPsq%iP&*qK}=L*G1H2ltoRc}%>TYR>sG$q{)&&8V~0Mp!WZXeCt0z) zjAYGtxLTOFxm8XbtWml)7Iu!*_dAa+#Cajpv=sjFH5O?3$kwV%VtnE#BXDLrXo`O- zGA!@K6g88q3!tHwNHjo{8=00B)(KEZ&Z7(bIKlnQ9uc-d$U!NHmx|D7d zkq#+o2T5s$ZV(YELAn`0M5MdBySoMvP+Gbfq`PaV%lC)7*1c=p&;1k5I_tg9`|SPf z=XDj134CQwKi&i(NcrDCz)AjQf!NSK3&^2B{^N$bwY#{^Yh;~+Wr{^=RNF#aUIHZT6aY^Oss#`Kz!K7ZZKYzt)`-K?tcY`UdPkf zK~?ql@NQqZPw?%$X7}OPEZ^OaT8a~NhGPp+q>BhX#hU6UNb4+uO%q+o>MgfEW2zb> z1$OW+8Qz#l()oSIK_*d`<%fHGEgjNF!r2NhwCFX&`vkaFllm?K~%qNQ;8CuR3C!)CulNaGt|#DQomT_&uXikMHf zW+!T{7vS5BmsuWrA&*)~K&~n|VcG0FMn_MFM2M0(xT36}Jl3X~wWO|NWFm(V8HR*F zQr+kir@(AJgCM(Zf3!A7+bKNEkhP>5s@Y2$VFsEX01%->Eg%GV4XU&Cm45|QHNL2j z>$GI6+%96o`VkWEA+*8N>kq2BktuO&{o2q^1#@XE}(jx*mJd5CR`49e@=dC^#OJk z_)0nQLlamk{-1<-Qo@gLXWi-X1`~~lF$cEZdv!|H%?YxCsnUtg#blZW$E43LMvaw) zWhF+!Uk%vEMB^DLRvadR@Sq*Bu&)Bnrd z0!A!ew7?T5vZ29BigpZ%R^sIs!yneoYWJ*3o4!i!zmSbqNZ*!MR4TFbc^kWRxV`hBo>sY!+&>Jp)Dx0=L! zof)EX<0eEp=Oo1Ii+6XYu78TYL&oz67V*^Tu@v44oCY&FvCK~h?l`gh+EUG9#lMo9 zb1KFp{eZUSvaFez`t*Bu1EmrghaBamRoAxlYRC>v)Fx^MZi3knm6OU}k^DBHA!jj7 z*nfZGB_L==I3oj6B|U4&nM7z#L zneRnwu)dx|aG*hRsC1=DjLs?9MY!`Z4&tZ-&JH`=S`Fu0KL$x`2MNEy?2Wq6p>-%h zQdtA^Vl&e>*}HMRgg|9wqWB~e1?wm-Al5<=zaP$t>GX?=UJ?!Waa@>1ER+QG*L9lu z2u^%XDqdh}MxgfEwgj=ID`MNLhefg+CQoAX(8ip*F2cH;VJ zT{H(FO`aHDvnGiOuCy6n z*Q)xM_Qf;HJvWRBK)JC&PKZcrDl7}W69vxb%Gy#V3O#NZMEp6AmZip0(%+M}r)Mv} zDU;-^;-_ZrzyfH5tJ=*mv9`p@CeEoON6CStgu)dpZ3f!Lh!tL<;j=?Hdm^nv6x8`U z4*WVEb)9GWHWE%q{GW4}>K5-`u)z(+cFFdwFlPI}*7M5f*JljeFRjE=O9%Ou#?|L73;g{i<+hMC^Ml&Kt$u zAfX7fm;dTyv7cD~mM0yU=52N$Ch9ti+t?1rf`Hi)k zvq;T8b5F>RQcC9RNtI(0_fKD@s%7q*|KU~&;?s+(s4w^k*{I|G>DpsHjDN4rRjatO zpPeDUZejosxftYy{sOUL=8vcUt03qhnx@_)=~tu~)$)A`B<(`L6%s$6G&}s5{ix+c zGT+1Mqy-KZ5`gqsP6Wwdv#(=O+$lu7Nvpq8F7|7<@l}3Sn?6fy#4XXSHneu{Go1ag zu7oJq9MyEJ6r8_I2iQ);<51nmrI)-)YrxfNf7AEw2aXUNW1p1h`3i;|L*XUKc`Wp3 zhSgurf`#Z+N0f-wn6{m?HPgR!Oj(6@<3>6L>;morM8h5}>3gJqxNQuAa3;kpkFS5= zP;{2>*#Q^`a=28!Y-N%$!FpK1VJurcXA<}ME69`d1g#wllv_aoMgs^-6#Sg@V^}ab z&XF+3=kB7PZ;^;M6B(8_)c$w>P8zZD!^%#V^k-kQIHE@tVKxzO(C>W&uBGb!pT&7DMw9^ zyVcO8)$TxEvDg?55Vn9(Z8{37^}!Nq7=Yny1ZP(q6Vs6v)eTB8Pkxi^PVFL7a?-M<7d-ESdkU-CZc%Zf*i9ku6qo<)7uQsz!7!Y>m9f zA;BQ12$3gBBKm=pU%`JDQg=G@lp^=eCX;w9Zs&DstLXZiXrtmy%9vy*O1d{th`jc7 zv{1mOYm*65N-Ebn%a-@i>iq%ue~Mc}8f+`7eK1GUMw$ zLC&;hs9`#tysK@XYz3(T;=vqhz+tgOm!^-O32^n+Dz|a!wZQ#^>ie^+ zl$?>^nrT|Zs9=qARzz4CxLG-(76*=fVcI5|1rqSw4}`yt+u_=eIwigNbJk5K@XFiq zbLk>SpF>iwL!^K(%Dir#M)WA%_R;e5C45((o~*^n%hLL$!IX1^guCzflqN~)J%>}~ z^Hv*tKJiO&8L>Xg!_ zX-s`;S$=+)u{P{enaNAMj8FIlFRR|0@g6BS(MFN{+joiBejV3jHw!-)&DXApP0)lm z2!1CRpwS zy(gK*y+Y!#){wn`%jWQvS7=U46%X^;&!&qH;Cwsv8Zl z;*Ot`+)tF|;Q9kZiQc&um-F&qZ@7C*3$=M~-^o?Gsv(Kp#eN@K*w4Ux>xMy@9r8$m zkS}&4F)f{dCI_hvLL0{QO>KkUdK*rE1ry;WajX+1xhnZIQHctC!hNe-XCna=Fb+M) zkEb%sPgy}Ahl!0}?;)fnD<7;t?q+MAU@%B)3^LIgOi^7 zQwW0f>NqW8>o$duwy9_MxoLTh_ZMZTcxxApbJSX$i|j57V_{sW2owq3VcX<_!^ zymADnW0C*+o3#7SW!1Rkt_EuJTlLjucNLI*dVKaS5P-wVMsf<6>vz;Wyy zu7aiNjlFW_M{xvpvSRef-RV`1LumG6laYsMEtRmu(jnAcMVt|h|vu2jX+g)|GHuX$~Z}L342XLj!Cu{ z7!1RTcQ%8Ebh{GZD_St9MkwkU&;?eLagb_FITVI@@ZNuQoTvB>(bf1$0Kf+W5a=pV zt_D@7(%YG>EgJ?U;;pR}aO~OoKHW##Qnr}Ut$LPn=HosAe)^|}Lo`#QW}cn}!B0Bu zsi(bf(YZ#3^JYXjnH5%Uu8!1vR3&MI4%JhbX9}IX23PYM^Q|~0KF6!c7))GOHw0@8 zZhC73s*63JcmQ>vi{J$_c9F40LOkVQ&rG%sv*G`u7_f?SW_!<#_(TJ3np(@H78Mb= zJIfu`=M|xXc35KsXDt$2xJbLLb}$prfpK9?ou9o=MY8+4hH*QKTp+Ff4as;Ov=f$x z^L9;O3=Q}G!1u_E)j%|xY}m|r^ZE@xFt{$v$#Dl=r6v;4P%uV{7;Sd7*PirjE*Bjf z`oiVxMp^>J(_jfFU<||n!*}MxSjUbsAFK7rHcuvt&v(|zu=bKpQli|m>oQUFyoHX( zSB5xD*llzr%^Qn$H1shzWswBV1_Cje*Y6wM1nASHtEOs!DwcmrlCK{tG`4MKJpQB+ z{io0SSVvUAY2Gc^NjC_gt@M~sH0^~KD8wP1Zh(fw`j|EH*B(2=Y^an-u>Tp)H0XV@ zj=853%0kLi)!LV9YG5z2`WG{6K>`OtXP=DqoBx&mc6rsbZ6wZA{Q{#}*(w4n95QrF z3;I&{T6VEz?aKHWo{ca$3_Z4ZRl(hUYO*7Aip}EGBhO>+>Sq_VP09m1urwi#NXST? zAM-ufYQe%7K!(b+d)B}mDsz2UF{Ev>kd;$y)$z^fWDdhE5?R?WH8+$~_+i2~rtK`! zt86xG&P6Ziy*g-m;`b{Rr@Nssa!DFJmfr#Xlt5+6@>umdH03FLnErUA{D`0Mw09OB zoi;fxuc)guF!PQAUX$#ZTc9(QrZh&k+=&yy_TO9BFgQGu`D#SZMn9yWb` zL?R=>05y+jx~uobjnr&XmQc%QozDH8fT`{A-JG4qxSu!O+OrEQOBTgrmsGO=(sJIVFni0N#5#~vqy6yrvv`3Im zkIlPVjffSN1YpZ?4DU7lzzG9?LKi9lW)#GP>kCe>jPJC!Ga>N#&YKD)y3AIBf^p6P zE33W~Et-%QyEZ?kEWDv59(y7cVq6oBlCRG2s&n7*mgv!8II_Ub^2lh3+&x)0U)1}5 ziTP*9Oj`;#=v)(uvZ%`|KDq!cQf+F!oh0e7FU(Y@B__IqS4-~JIu(VLRx43{+rRKw zEu!jqw9@J{STNMA-+`n(yNZ#A&aT-$MP${QowUN{H;z{qc__WuSXX5MVA>PK3HgUt zpppIa0N)EuReQ6sOG`vy)fS9`fWkJ}qEOqw5e)R3GB)K^n2z{Kv0q&*f0@3rT>3{_ zJGb)jud)o_5(7XAbDdAcL^z|e7aXYlK#(sV<7D(0K=w~6KbS6J?N_03PKM47IW#1K zBuYIp#yMlJHH9sZ@VhQge1uau9~pu@#!AjkYZzDPb)qhX->;fz;*N#;oajp{tItmXp4Hz9CcTtZU!Aj( z!BkDZt3?7Ni;A8H+0lxq9Ay;ib`K9e`?i-SOooo}AzYR`K*gxo&n@(uiXM5EbUH;W zCR&PQ$D`WJv`BGY_Def}IC?^JWqqh*&<@l_;EYNUeJb&lY1!mst@ZEhK6NUFaQlcG z@#6Dv7iUuJwaT0{7IVnbQ9MQ_SWFCJVi&j7cceuhi0vyYTf;D&7B^cqi1R1k_C; z*A#CaTvUu%*<*`sAC|jxu?)or1m?kIIR`HUw1h`-Nk8Tt1mPDq6pt4w^sq#=m1~ZQ zOUk~ZVE4l{=(XV5Jnbp8|C2hc35bUC`9K9Len`tMU2hk8U$3X1EH2(uoT`YRwsy;~ z(RpNot3-Gh)vgAaFiK+FEI7ClNU3>;EQq zI+wPcE-BZaLs^H+7Ym;s{rRf~TxFn89iS@WLoiPA{KN%yPVh8lDi{&#$N=2xo=&!0 zNJ2p_LpCB-mdja@?stC$Ji}P&d2rM**LeW<9Y#wS7Sd*2WXD2MJpree-K98psr_Ei zJrqfdspwt-0@p6p4kYb9k9t0>x-3@38LP-5Cgp*zn|o0vLEN@BlL&JdHzsD?v-GqW zdI@EQRoT37Qhdg+Ncm~_nHHZztdlxXq9ngvzc^v}KasgS-j_mI)PSx4d_)x;e%XhC zFsHSwcnX?JpThe#6c0MOs8B%Pv%V*(qO~oG1DzVY6p27BerQ$`ahF_99^=ulOqpwA zjF(3GBs+FWI-~(kUS;}i0P^!Jk?zuhrY;GaX}^?OuFy1M%e;C+M&bDP(5Co-;Yt9Z;!++PFw`3EJS`*yadKxgQpoCnJ%w7UagUlLif z(ordJxDsUl_#K}1T_dhjv1_k4)`IIpINwE6xb3S@Ee4Q7+9}frUYOI~w~cK8F;^(T zka`9c+;-{^jUDsUA<`|*!rY;;v)~KyJjq@dN$tq=KCy*CAJ$&l6U_X_5>zkvfsJDn z)#|s5s_m?!UORh#(dfu^+f!R{^Eil76~*n?EV=F2Jh8MNBY#v0x6Bnn-!;LPe&TvY z%X}H0(@Xr3jn1oetxp@^>ih>Jv-pP<4$co7d(-eRO`d;~TG>DO_(w<<*F^HYs{fsF zhkQ9XLtlz!68eEy+PKz^4gK6xVEk`4eKLXJ(rR0IK|KXi`q#OFpUad4>cVvX2y@X> zc_#M0)Oh(4@$V=Nhl*Uw#z>81&%77qcouefQOYcdbKN_0p$SG{(^URv##PhIAfC<7 zX~`-RCHIIx;nxc^+sqo%eY{Kx#Uwql&zt z-t6?vk0USC0?ryr&ycJ|M`FRo$fk93mx$t4=agAX+tWO-J1JnZs>Eq^tc3B6V~%|D z+Hj}8q~Ss!=AWqb`3gD&s4Px5a2%_MZqdm4i~+>P>>Jkm2ZO1kSOeuIAllZ0(HZ8# zeT=Qb+j>&iR01m9Duz;h7mD`oTH7-iJ30tZI{RqS#YeoXwluZnM3Q0~#=IaHeBhlAK*PYA zoWe}}7@>vfN%tx*C(eyjTF>}=`9Ql4p4HC5a}RkAn%JD_;a64;it|~W zzP3AK5Hwe4Nr+j1{)q|s!tkNyHLJA-nQm;bwt#DpMzzIYRj6&L$c3&z)<~X^L~|1h zCV2E_hxkS$GB-EZ@g;>18w?Ow?U~I)=`p1-1rs+vEu6P0Fp%0!+Q% zCDX^s?Y+;wY%extUu$1n5UD~}%dphIUdCGK3@wS&E{pYFk>pGqHbql_iw5)0nqIg)QuveMz=}J51`rmdh z0^TpZpvNSEEXbGF=+~9_g8hbF0>OYOLMS2_rM*z1DxItXx{);cErYUw zg}|8&RP^}m4j*p{OZ_oLPaR@U^YkQtirf-F-jaNMi(_U<_;+S2J7&}P?cuy9{i(@d z7ugUMAeZ$=b32pS90sk74~;E#bbe$7BZsy6?%(#cjV9p%WxBYt?&Y>7#A?fAxAXh) z<#yq$5YqDEE>Oc>Z1dNf^lK%j2$Dv)1&lW{lTBVWpQwOwVv&PBrPjOhOTLeWj7u=}G;RBv>+>(LP*@!}&d zg&JkHZEh1zG4BgAC*G5s}bMG^9iQ{gL^Kzh5}6x5w7CID=vCj>&3KXJuyF?{~+T*znTv{*Jk$ zHsPS6JKup0bD4}ZRpM!axc8e&X4?;|miguUiF}j2G_hn_#{7k4Qg_|HHf*rgevLd# z2L0`r?5}u2J&cCG+XyFByv+?MhUG<1OQ@J0$&A8jva)dE*tzSJSv3}AAt$M_h z3Z-+ge(R1i>*&Ft$(c(h_H&dDQoH=~qc60>{=t^ch7@R}UsqpasJN+nFTC{*2+cD( zP8P4h{Y&lGLbg+>IoE3e;qO^7c!uk8JSd!w`E;{At@&gvV$GlK9Vy-|twR1I8B#rF z9>L3brkVAvZj^H@z9k}a$=n7}L(>W$daOElJ$h)(c!%eSU1&Z^HwhEgY&^5D@C{e{ zoD17O9^66s6On*vM)s(RmcByy&ubg1eb+RWyRB$wKjUPAxDHX!xco^b5`S^=95+|4 zr-P>&KU@<2?r*~1-(krPM6G}9{_PdB12e67)@gUFD@O|SB}6@+E$^$Ef}Yu~gzcvb+W0So}bsh*R~0qF_Mp^y41>nfC3vD&HkgbV`PuEDys%~eX&#Fu5`CCg%u?t zpbNO^>s!xYIAcuj{vY~5OyUb_6Pq=&__o}d8;{U#5nYEk-H+dXgqWyA zBpRp6KMCuP_H5nb3#gID{jAwTlGqv9MSrTrjphqJ68*r=kd7vK#j>&Z+xCeMvBkb- z&?o=*^uAYS*XNbKS`|j~4LeV+u+t=9%dIy4#Nt~;5k?@4xwUxU& z$Gpv~c6C%q^^%rIXu_wQ={dXt2LmsHuvZ0IBlEx9kyI{9B)dB!6Ve!fD2sHbO{}*5 zY3**_?o69VDj3Iki+WxcM1yl%1ljvDy6hpy|A{&JoHy-{+BirEgyuix%c~-9QMOCh z+|kBRHT6WgnD0^Ubtm>a%DjoJ8m-@%yX)@j0XtU8)+e#QOS5@wlGzR8{%YGh7xJgN zig*7?(&$wCt9#F9XRD`*iAyza0)FoX{6^b%G779HMO2+H`p(YM{yxCz*orlK%i0TZ zO5TB2vXoTjab>8ss$hzXbWYq?#635bR3nGFY;Z~1O#jlVN|ry}5G>Ae>42`_@A#m7 zob1jcdzr;}qE~ytY35DA2R2@y_s>=PoEf+QD(|Ah6j{ImI7C&j2gi=LV*!Jy7u_bP zII00?riak@+s2~gG#mZ1Wy6i8H>SRjJjYBO$r67a>&1oPQ(HqzxvI14CmGNmcZY^V zx7|PLi>4MyM(H98;sHdC$?BYEwkTqp5+x5g}JJN!G@l-S=;t+yf(z)X(pJbl>owB!LmZoiwb(tCcO3Tcva$sE*#zVR7JiF7 ztv9dE6bo_<7+7RXMVl9gcZWO`8ju zL5|wQVgl}wNVUUBRRmrO0dw$qb@vncgLU_BubxAnd7qxAG6BZ?Ffda3d8lBc`9eb& z6i)B?@BwE_Q*Z^Xv4(Q%`{Gl8`c@mDDb^=fEj{q~SguBpOhjd2WT$Hm*ZlCRtcjR|K+TR^;|6&MQd~JIa7*csQk9=m|A+-zH*N5*yDv zxP*!Gw~k{^^34tBj!GN&N|?POoIi#Z%&zK#x1H;8iV)WUv;$ z*tZ<1zLLTg?)bypJGu4yz~J@!dr1#xXBgdXn{10KLtG>dwA!e;J`0+zL)>uOcoPx- zexhW`@b_qxay*f?1BQTk!LyI9BzfCbOS7{}EQgCNtbliWo7=vthYs6c123oiAKJ?a zoQ;A@tf{;7aZ|`|c|WN0YqO;;<=#Il;Vu>28G0mq8A|xa=IMQO8z8X)MErt~;UrN? z9rU~pea=Sis`gJP{u!rdefaC$^N#jmT<`bn2|7J7dZ=tN-P z_{sIANqnGOFdemcalXH~6Xe*v&FeF6P%ByqV21H)r;tMz{>~**agrNT$dm4aTq z7f;HZvE5wmSJ0{Eg*p5DcAv0iue-vzAAvwc;9Nr@j^V5(weQI>^Lng_Pgxm6pE@6m z0b~{v`nqjK#2u1X*oO^o6{_cvJiH9=}t`x#y7n^O&38w-8SrO zUNhwV^s9bEe1z!5-|5YSG=1em{on9qZ^r}e8<8Yw=;V1}aj_DW$Hph#w&0&Ly3__l z&R{23HMz3LSBY8)A^HN-T4e{W-*3L@YCMmUVy~WeC~>7Ami~px7DmEsdNmK3aXJ9q z8zQ(Rf?pNXl>vT%QYc;0Qd5|}qRf>WBG-dT5Q2&k6U$3glBLAH*SGmRV9)szJ#OzG1;sRltZEotp@_UVpspTgq=<6}|Cf<~E@|@8nCn zGb<>o;ORo)gC#zB-}D@nfd9LE@zwac{)rbVE=_1Ic!yH2l-;rd-L21?^xF@ApNs2s zz-wptBFJY6o)-lvW7$Gk7q#~-P&f{$-G4WLuW87dwW$g$m6$9uD zIrq(5uDH_j#RG2ZN-^Zvj&L_wh}_YEf~iBD%l?fww~DMK52CO+m}S=s3(hz4ef6|u zldB(is6J=OH&RFHfDpFy#RfVE$$~Sq7dc00DD_W3Cs&zzj_{ist7Wb zj1d4KD@^TmTztL2owV(2UIoKU*0{zi{LG9@H>y2xyUQE-vi_jYo3v9(E+46m#87k{|uU^t7*p?0KIZmVJO5qanZg3nob znLQ)8G!3+>0^|PHMUxs7@q%`785oaDk>sO9YFKrYxu|v$lBn(s=L+vd17^~#eA?Yq z_&Z7NLhK$iJLVSZy8DrW+D?(ao*?AU?%;rk;65-qU@8)mHiO}1_+h5J!Q?E^A&xrZ zc=NIO-MM~r>=!Ed>kF;v$MsW71X2eLVdyq>H>95q@0Z6ezkf{K0)01Fh^EV6IPrwl z7C9YV)k}4NO}oguj)$-EZ2vTOwnILUJ2)yFYF$z9bSFSDUF7bTRuxoy;9?rVeL_)z z7hMZGLnMwJG zp|2MrKcv7ltqGBQM?VDyLd^zj=0_6>4AG;EeIPIJ}a z;Ohx|3iF2H+O+3=MBZ6byaMvrbCN-B#n|5%=yr5+8>F9DS8>P>v^8>luV;kZ+)OWX zcHSdBueaJW#-wn?ij`ka+bgi%hQx(54x}Kx>=mIZ{g)SWO=m8T>L}AnM335h#d3%G zMIO(74j6;|%je`Dq94rq1ubMp~T;TDj86|&q(r*zH%y{(gRt8-wLaEuL z$x0&T>MSQY>w7V&aB)7t{K{>JsCk#(_ePu#e8KVTZt9G~p{R0^s(Ofg7^07#KWsDq zd!Si&aD~iTcsR&Ftc9#-C7yr7!}e7D7Fe!ao;pVc+#(5q<5>~i1ts(GqN?zcggIiQM*Q><7xU1h%xH0e31@ou+ud%6f9|f~psTub^?850S-Ib8m|Dy447+l6=Q(lugYAq?`HJEs4!zZo z#s#{ho#qtuOgWud&BMroxgnmw(_C?SEVVyl0=<_4J)=h)%_eR9`@HOc@Q6Tf^D@{~ zo5hxb8X0>w!Y5vgzsDdpCPR$XMbv-uWW$W1*-m$b+aLNlbDgFNJt2G8;z4B+?-$eW zQR}r&;{tKRDt5|mLZ}7>JL$BSyr2&DId=}eVI~2Ah8oF8EX`Z&$GjFG)e9ReF|!Hx zdJQtYumT1l9^UJDBlw0(0}hehW0Gz!&RxQO4gNy7Ux+^s4LQ}tz28}Q2PvOe^4Yi4 zxGQWe7r4(g+xE*6)|Ird5orVv+`iuSr9wez#J*RTp)NKS13i|d>aVyZf|wgV0Y~gq zWwUMnpPRcLw0ZdvrrUD9%A6~|@bgM%_oEHo8DP_R1WZmiPih2xuEjSJ*fTQd(E#o) z8TT-29Alb~EYY$oCn~}Dce9NatMm`~hKubw-@xg4gl%Jj7ti!p$XHp)3lj;X?`8Eg z-}qSoX;q=i)PFP!?ZKvLz1~v&({Kcff4;)sb@{>E)5LB)KWf=O>WU(AZO%f;Xcb7) zOS+%$_Ua;|jUCkKMX+PRqCIr^i?vN85PdRkLXFlry6qq^I)~@IQ>+Ix3 z;QM~rD}2x(?xVb6Ff6X3m@K^4-3p-b{o^cuBKjaP$4|lvQtpOi=)R<8s^DCCl{)8@ zareyq?HRsbnu4)kjDo3G{`l&rBmWZSNhjC2Z~J5}HhezsX)t<}XO?_^7g8xqqmI11 zM{TWG)x5`h{RdUmah_CV^)^zuvuKf5f;-`+(^v`S-Yjxr)=bmvYbd-U;AfF$?nh2m zMG*d(53sLvq$A!xyql4>b*DV$L?%2LU*e>}gFYKQjC+*q-sLy#`KJ_zn!uw(e~yrz zhJ|UXTH_=5liC;>T8Se|%w|m^jcTVUK;r^`hla!0y>b0Z0b%l%m{+SngR#`7x9BjA zuKA5P`_mphi}1G*nmI*BYIqIuced+hfa#_)=38N-GE+Q&f|_>@ck<#;bb`glfGa4-Nh;yWrTwS|boVZ+7I3Z&nAtDt&Rz}mK)0gjvP z{vN(|6u2*s?1Rwu>)|lyqQ+2gUi{1QH(o26ZkPt^vGbzqkrH!khen0Q4uyN47otv3 zfi=xut$3Qu?pc~O>7a%d5`e6%Nq@RpWF7BUVYS0*;N?I>hD`l8n|@n_$McN6k-=KL z50T#U9>T5OhcUi>^kRg7lV-|Oq6Yb8JdPhN|H`LYCez&x7Pvd__Hc*T!BPlcl9-km zwLg5!*r*S*dZ(NtA$#U!&F=&X^P6Z(I`{+%)q{-FGW2D(`KuFzz;YpSpzkh`cZZXPPL4^hExc>+#Y|UM(u;zW?n~~(>9JDcYX12 zzw{Lr68nm^D}<9Uc4bxiKtxhq>{-Xs8!2G9c!ToYZgi%zfps)sQUwN#ib$}T^k&6h z8!NsWq6R1!D_!vmQW**+vHJoKzBylt)AZaFPs_@Q`mGeyB#3vUsjcjZ3 zm45C%uY?{kI@u?(rjO-}3_n^cB&-VJeLiHJE-t~8k#_!%$3seVik1HW?li)VPN!xm zmSuZ34^%c#omPszX4C)FGPi_yy|?8h%Wf@&ub6WDdm=_bTM7jrhM)dyCwQf&x?uL} z49PsuYR0R8EV;**5qI%bC;5)2()Pbu0AlzAX&D4vPJg>e?|aS5+xS^>HFh+(3l#OD z$}K1L7yE4Fm-aU9@5hlp(^^J~D#hcy#JptmhV}BEn$WyLX)jdB5+(8(MUB!@Tl+@v z&($>9D)A(i1YZ4FlrstU$7ym=RJe__R45)1%Bw*Ad%53yy0UWzOEEoPkKY*U5g+)M zaS-o&5dK(acLZ&G|Ni$3)RTkZaKuCbBzDW*>~+84!qo~wRGQb*KfHc4Rl5JcfvIe7 zujpAloW8a=>XhKtc?3qESxI~ujaDSIG%L}}L`Q&Rt`3z!JX!EN=Q5tRMp#AN^Rjb# zNCFkNE&`IyIf7s&z?~&>xq0k)M4Tfj)+&FjP{|kf{=Y2%f<OM z%Do|5fRqbKM2{j(0htS+=kPYDnOLks(e4?7Q}{w&!#&>z+%*^pgWTqUuZBPl zFOgvcMjDA!3@HrP=4mr4S!Y4B>N|1u>Ujcm7l;amA&6MS2s^_?0mcLA4m!~AK3-L$ ze4wi=dNm6heob7TN}CVFES$3ef;%|HsUxCi4Hs&v~R2lz^(0 z$|Vy#hzT5#cc%%GvOS~5G`vWwtwJz+novCxvQv5ck<~3T~KT9C6n& z8fc~uCK^MQd^Ry5!bIv`1m3pBb}!u(pZ~Svu4${Cw0{1J0apzse)O$g00=x@U?cC( zg0^fP9xQUZ(GZ{$dDt_XjYdRpK{#Lz@lRtHI669$1`(QLG6G>~5U7&@z=bH$EV#4t zZ=;2z?-eP##!u-S-YvkgqT%KN?@cqNKaO;qVehF7>6or4Iq7tI-yZj^Pj}u+-|WQK z_H~~?eNI*aF)d0aR?z~UsjcixiUU%4^F_ut+#irHhYVZ{czS89elz@uGh2GL$ZAo{AO=%TIu*+E!LC+mf

}Cwq+)pt zd{Z3<&T_@2UKV+t-MOd58`SUG|%K*xDg>SKxC%8_8P5fRK@Fk_(R)&~rR z?jJF~!S=C` zHM^_bslT7jGUsdSA5KJ=rvT)1^<3mbstGTEA*S2)OpXd+dva8Dc7MG)b~B{24%@bo zQwd6RP>m2Qov=P=KWqQy!~vh#1)I7Cy&WC4O*0oVTL1cMyYb~FyY-vt-8$k~Va6}w za-Pz{gncJ_#_|D9*>Rzapf`m`=`VFTDUL1r`=P->&4!dJq8^*t<1e|f5;P=g>)Ugq zD`H!|JOVa$t4Zxqj8`xf9iZxILH}Qz&=g#tl`7AQKH~)bTL>lv8tgDp1r7Qd((|5( z$Oao1ah8H#BNexFe}bqJ)&0m_v9@r&W3gaFh20N!?3*bAG=0@03hYQzM&3xv&^f|H*!~ijki->b69E8X-gn$ddNuX~cGdQLMvLg2R zQ0;jFi{0SJJe1ble)ZT2*gJQhZRUI2h>IBYKgy>;IK!Mx0AhBtwtP10Ld$-{BZnuK zu*f6~-II=vHeq@3y21PWJpCjc{D}Y(7&)RcaS1m>Ct*o1V@SQ>?(2VbS|*SO?xiv` zHOPt6=ro@0XwXb_mhy3>g=C|OCv8#&dK%7b& zn5XVi4DPB5Na}UeMtx0dtxko}GP^-*D(o4!)Gt8lX+NB<6T zYo>l+%8rfQ@=U()BTN^9=zyUiGgpo|AV>Y)Xy3z0PU+%bWngQnx5l(zmR5F*pHi2K zynfw~*MM%#)#p2u^CauLfU}{BDnb64R?8vga>9*UC$aPrw-oD7|4>fQ&kE9Ym4$uZ z-`w-YszAU_DGYFnXLDy@y6x>P7y}w%fe!|3)$gMXKni?%)Bhofi3X##6?K zn+hv9cMf+IChwRZE>!z54;M9Y?%4C;9R_p@?WH0!6Ckp+%cgiw%~e||z+Gxbn*`zJ z>m`R!xGSk;6>6(E&S$`l9LPt}A#K+paZGT&@S3YFyT1em#Wu4r*k>7|23RvE?L}$h z4)Zjo>OGZW$H)P>xyz(j+OSw42_PyYtpJi%AN?R;k!i*efmb7CAIHKqCs}mNjp}$) zuRZcMtBiX4#vM1loUw<+}#UPOHLdx>$E5zHOAfL?`i347Kal)j0;kjq0B z|1Z0EItp7{u)R%+MhS9h!(zKaBUd-8qdtG0G=G)OCTy2VwglVTS3wv(s}?3%N`H=0 zaH_nC!oSOi8=F_)Zm$?45nv1<9iEA)TPHv&%zx=YcW=fvT$y6*p-do;=KhNnU6X7Y zi}lkdvgiyNTJs=DM)!J;!|yiYRVtQUUTgj8N_}g`*|s~NwB7Vc~0=atZbt4 z^7R>ZFWUgk^eU75B|6k>8TRV=cRC=ebmWs?#cYI8K9p=gTQ;b)CYv_OBKtgp_qVs0 z4l+D$U1vus;;Hw&x!KPaz5BJE&f9xmYRFM&*`oLIf8KAyAxnec6Q#S(^4Qbk<==mf zbSi60r$GpAue6ttv&ZYqWz*u1!p-E0PW9Nhd-coyl(UlXLm`?0n6fS zfqtkQ=lk_iUUAqyEKOHPU)>E1FJ0weN-GsuoH>{T@m%2caGtO2{i-?rUv!;iRFvV@ zub&yZMH(a&=@yVKk?!u0knWCw0R==Q6o&2|8tEQXKpJ7_mhK!n4)0p$J?lN^tpC^f z_N+C}bKiUK-@Yz`*wOM$r(WLW*Spr*n-tT_f$5m4S_Y(cON#wPUT~?Xf*hL>ePV65 z_{0RQDuEDU1RgPC2GbJ|4h7r=yWW$#`BcPDEyWEW9pNyXK=a2Y1%!0~9 zdW~5BR)P=bGdPb~bTT-it(nMf=vx|!~yi1>GpjmjqMl=I5jr;X3DhtjKD=_xgI#XV}3$CMx zX7NI%DffHuG^o3-DjHyb>w!EN;T3z>`n@@>EE>x^qkIhR$ii-p{kyWv7GV@zM#)hZ z+M%6Ch;Q-(vv~Z?zpd`RfqL{K%yV!8K_r!qaCVq$>BF_63~6z(o$qH8)ht1%6Mi>+ zIJgOHd9C*q?mbNN5_y3GUC-?miHl)&{DN0lmdSx}<^zg#Qf^f%)yv@_;_#umflHPP zLM0Fx)S|kb2Jn}X0l~(E7mo;k(_Q|t!SjN0tMbu}Gf!<6w7jo=_{GroGrjiWiIFPG z-%jzOqAf|XlQ#)Bo+kEd_mswEvb7&R5_a#dhci#Ek}Y6Wu=4;5xNk4m3ff)?H89xe zDlsdSEL+mOq%3WNj+Qa_lihL;E8SIaO#7M%!k1co+S|OWtM*(t?x^$v`EsNzsi_m3y@L{6S(e}m%yIBd#wp0~Ww@saQR>y=${OK^aHg?2OV zs>Z25F))0ci}YA0#6)AFF!tP z!2P+{1YXCfPk{SjWw=xY*f zLIZ@rWs9TW;bQ!c)3jHl*n@<=^H`Z_Mw`F;-^)})Vkn#Lq>)IYakgupedq1OLuozPtABk zG1&>=)FiKO*E0D2xD)fSdESV|ijW0Qp`bb`P``HzB~bOfk~X4BJEjr++=>oodiR6~ zei9=bhhMEUI^e&utw#=Hg$$FrQUyL47wgy2O|99eZKDC4B<$5%Y(0wxpKIF^MQOjs zn;y2?Oq&vXPXNZjmlaWNSvBOTPIk&JlhHcz!-op_7>18q?dK#p8;p7~D+j?_PV_%H zNxZnUD&P~InqB660qz=iC|1>Vy7sK}UNt}iX<+=oj=Y#4xB40A*ygt7#&*Jp@ncUx z)P0P`$yoyL9pYppbhNC1zo-Sfed0R4Pl6ONt@~LY!K9rwwU}wU# zoDspXxk$|_n|<+&ssROgs{D92+db)|Ld>+1xGP6A5QZ&*V>fxrMtdq4i#` zA%2vn5A&@)PBqL<%g0qnmVcr~iP}ED1=5|6`Ok{#u)88`l8^7L&GP(Sy&?$R4h18e zTGRn|jk|!%V-MMjIC_nN9+#NYO6`gp$*n6L>r4X*lL?bnioudE^};Wxvx0-&ig~0Y zcphNKC!KJU`}eC4K`45`&i_mee&kEB;?><(`ULac&(#O)@|bV7S)fCg-lonz-klx3 zt-CG$KL65QE^Hg33zVaM?0^2uPS_P5BAVoNa^iQCa0*^oWB~8{^2DfD=R{Z@Ga^4yCd*L*i;KoPShT7 zXALj=MS;SmAQ8MAhPSfXi3`TqvC0#KGGZ_h8lSRdj0qn?hrkFekpv!1x8-L1CTE4B zjA-$V0@dFloW-6Bofv?hP+`lq4o?E|W64vGw^&k`U#FkDZ2G9hQA48-HP@Dv>Tt@j z&0ja^{9=Cc8CmIJ+%~PhvOvgGE%l3UTZa&fE!NrYrs4DC9Af zIjgjFcqLAjdCM))Um5<`fi)E0l})bLp?r5O?%+iAwD~Yj?%a!xQpLnAmjMYF`NN-I2 z(Glny@daJ=yYxk;3%ty5XT#i&aaEFvLqi`P0Xvn~*^=ml|AasY9I5%kyFd4<9mLrI zUya^TT9?1R2QyDb`ZgBu{3ES`eCnWC?#2z~=zb+w<{a*_x2LYTdfeAg89^moRP-rs zbyA%S01{R#dsEfL@E?~UzZ+G;&rf1vw9nD!Z!~d>{ib>(lMtUjVQyz<-Y-~Z;{yc+ z7^H)+gR6pP>$$qwjKpbzLqZ>X3Uv?4Hdsa@XRD|a0Ih2< zuF`PXd%cqF-!+9dX(J-TaTjPan>~}h0T;8bS7+^yQDyoCZlQ~YesZtWSxCVhy%U|3 zxYmPMC%Z!FyVg)XjySrwft8t(d9i1>+6T#fS6a=J*@+hylLJvge*L|Tm9V*~#==)* z*pprg@SH}!Dj14@VKtHqso#IojCA$V-f?qF-lY%LoLma(ELVDd*wCH!t}-%3PFC&yrMr9X29)(=gwBGg@kXO={T(iKAP^P>@!? z+j}FUz*4-q-Z2&udfZL1p6=G+Yw=9U77y6Lb0tI?OrhIO)rELZ+?QHaooD;sZ9BN^ zij+hLmZEca8$$yndNQ>6@GcKbTaA3HmcDD>MCx_Xv`z`)H{>@oHxxDqLie;~l+iOr zSqF{MwH+Sjq^|N$?t>0-NV5{fI&X|ek5pIH;XbH2It|o#N8*|ZuC8#vWz(%X$kO(W zizNoYigA{vd-nC0h~qW71s>o$$_(sLx29QOg!Qq1Ya}GZUNNS`%Txs|knMqU-t=s; zLX|*67f`DWCe)@P6)j?<#@t+^_9$!$sTnN_YuJ4C3S{G-+{^2Wyxv4)7-D4x$hv%G zuVVs&;J)$%G1?c()%xAPe6V4@?t!k3O_AlWatysIm!tAKGd`BQ-$`$>M85ds=lkRz z2L32$=_PEjsD49k%MLHrv-4MOf5-OGr>U*@P+yOGQ;?{K1_zZ81nrvmtUslW@FbPouTQxy&G zJqM88uB?*=q+RGQ*XGrvE|9+-M80$Na4ZE~eW6BhmpuxQ_;p^LHrp8-?3PEM3N$x= zlw8rgMoC2MD%b^X<9>ka%#Tfv*8@-?T`R_@?9hP^OX$EPb(K$+GXpw%gS{h1<1VM4+UuKHLr$d=A5*(I-M`mp)_jcn#W<4}Ngo8AjZWdu3 zDh6MDrY~4T)j9;ZT0_9bQNS zwsYV{C)1*(XGlV7I|w1w^>q4K-hZ4IhP=na2C@ki9`<)U_oXBt%p)@H7kq>w0=BZ}*>qUfu%!n5mi-AP9yz^!ODX2eGN!cxNj|f>_#uVk-()$n` z77qXykkuHUp+V>qiUB0CE9hPzD4Ouhxc(QkJj`ziWS2qA^+O_XsMfs*jB{9oRIwDn z%u(?5DTA?ooYMI2q_#e?9(%^o zbVSEgMM^U75u7`JXPRmE>7g9D_5&o*0zF}gzm}X(8Vgk;(GRs3`q`WJCte(uV3{J( zsYUZCdY79h^;p{8#&Ak3cdBCkJA+oR|Bko3^Cs?yT3*D61zRY2?tkuYlBA@ zYnQ*bt!H{t^L^S6o;ElLgaTJhyhuZRn_TLDVt!%M{n_L7_NU5MS1#vevmqlGi2B?G zIcq6A6q`cLXB-yauSAP}f|#Jvk_>luoics~UWh=09Km`^Po%_OgZYtDf8U7WQarmL zbVpzdgENua?3p&Mr8Fr(d7HNjw$ELjIJWd6{QM-ILaSCH5-g;Eg;iP`m_yoH?1(1# zo%RmF?7&I;t{#qEwfu^#DigZp)NTBl0#1ld$BB(L<0=Cmq}V?DTb>Lia zzt4`ed)djM*Gt*SXc!c{*r~}{Bx6{aFN`5y8r~UPz)yoOLYm8v{Wr=|>ePWyR!!VL zv-%GQ%H7lBjHmiJdrw_f>g^yL0 zkd$AQj{ugc^6;Z}QaNkMvKrob&8#9dk$zB#0s~YtB|OTHiq6vPLtK z6UBsDpIsC)+Y}!-KVfm^A5@r|?C3sz0M8kYdR8RxY?l9R zZM-ctG2{N8)bT))=FoYHuVZ%qmO_k`A27vGszN zn)gNSR5ZhpA+NtESt5B#*4BiNFN`@*jV&*1?lks#sHdRrwLM26jzMc3{Gwv+yO+;6 z31~TWWBfHl6?ZLG+2qW2E^PU5@*v0gK*^^?M#3!dx2T22cuhzG+WTSDHQfg@69`!!(WGt<5)&?*l&(RG#zF3hXv(%brw%WG}$IOEFzPU*ol@|6`N%B zF=2iE4FA%8UT2lR2$Whe4=;Zq%02hZtZ2y~J}yW6B1D3=k-6g2jpv{(CvWt)DjEOqZD~D zAUJmBtEx7v5T~M2A3F7Lk-V;r;_zVpS7l{FstqClAy!yJf7;okpr`@=zna4CS^%1X z_Z#U-GQb(Z&V>KDtb~jbx4X33hgOr7m@QP#>ri$3i3Uc>u#%GtkNd$0?#q2FVe4KY zSN?S^Z)%&25r#d@-4g?0j>6#dxU(6#(xx^S+Fbvby5^E{`S5?6+7IAiErmsCcw|ZP7B2F>Wx9dkFSY0E zVSb21z*T4**Ij-$r`XTugy zMptX?GFSN*zIsIdt@DZ3eg>AuEx_vVq7NG(mj0)6^xc-Ush&I@>lydm$r#VmsZMT* z!Z2So-yRI**$KSdY_6Vu9gEwyq1*}z6^vO_I2olNz1Q)vuds;g1oShP+&Tg@J2c6^ zdcolpmjw&RrLecGV6ER{#c)B0g~MVThHezDs1^4Palz;g%_sFj!mL=01(O!AiwiuY z3O#gFJIq|ELup~gt51jrnGG)-OnK1(ieJm2OE!bHg4J-CH6a@nyQLGswWgg{mAbP+ zq#x_b?%4_}d$5=YQZhCDpT@uXHb|<++J9gHIZN3LMrz_7|M+~!{xV=1O2S~Ud*AakYxy*Umrp=xd{9}<<&E6=~D%-7W7?~2?{?ZrIG(_UJ)rF zNAcV-yBfU!%E6YuLdbtN87zmI%S%-Mk|ZqnsXi_4bm{qE$(zmg(>m7nz6ucDlp}3y z!K|9o&J>GB*pRoBHjLiscUFazI=u^6etgLuHSXap=!Oi@72 zIZZmsMPJ97jexdvVR6SHsG1$&tK9P-*y{bmUi*cg%@nLKfLL01TD3rE&}K|Av9zUh zc5o-vyvz=Db}R7U;b&j~Q?`8>^Gq)NWrEFEf!`4}8vo7Q=HjO)#g|Z<&)O=d(h&f< zxwzM&AkT>3g=Ez%rg|xQh#nx`!8~G@UK9Fn)UVvh~ObdupEg&fe`DmgnpU=E)FIy8E zjkd8&tYvb@fVDz3GcSBY?a-k$X|!Nw7Zxg9nX=Q8i=8#kl9TGt*FaMJyWbqtmy43o z5Vrcn@J#(TPnjpa8Eb*11GxIRzX$v8vCf)fB3RWfMSyBC?N~!rC_zibCI0$LMd&FSp#f;)e{42x!oyQz{v;-;Q%iUGWK^^;T~Svf`t<1;FLLJ_ z^X|Gu#6Vx!pURQ*B&vBJG~w)j9ckoZafF%J_QJwftDC?7YvIvv!6X)_^UV^X&l0+e zE^qPvp6V4#`&%Tf|AdNUZJI3Fxhcs562X*HGWC+ zd%8ZSMZ988r-xC*jr(Mk4?hw5l#~tjsYkfR1!23XUCf5>4AKgU3 zgI?p_S+qon*`yU?rIsGE7F37DgZGzbdVDO>ehm742UgW5dF~a4ypX!&g`D;E>fgI= z-KN#LOpKUSLj(okD*N6XDOV$S{LJMu3wKuH6~nz>y&feAQyUu_V{&Yvq271(XPpld zapM*pcY&f3EMGjkeq#a($~|n?DNtS6$vTr#k>=TCj5B#DiTKuEnXr&+M;}p0NL6;n z)rMAJu*DO9RIX&}ckYZ?Sf2muc8?WR(N2H%%%{5IK^t#GA@4Xb+OD%F`cus{6DmXpQg26-o#sl1`2_>&JD-#P z2-m$^EK=qux0fEfeg3nt{#+O(l%?Fx7PKh-fL{PM4>;$nsE{Nv{26ZO^%e0KKW2Qi zW_lpsG;yr4Hm9Xh+df;O>c)$WOA#*N#8hqhd_fa0BW1~6U2KQG6`b*Vd91m18|7pm zPQCy$3kdaM=Ou9o0e;CeQfjDTKECDk_O@ zM3y({mbH=U!g}x+OjMM$+zrZ4Ee=Cu&}>#v?Ch}(9kRLXwr5!hYtkqXSIUXy`FNzH z{Bmbqp$h9}72|BGp5UprAV&p+2z2VJtwJCTxpFGU-IhPwk$7J$;1PNzX!MTl6ggC6 z!aLh$U2=(Ct>K7PLs_CVz>=ZdPQayhGQJFFgfLJ(^}AVJ5+3W~SW^CrNq&t`6}+Xo z6ib7Pc6f@fAi0lVR-U2@&!4QoTzEr%+KMNErG#2ZvGp$CVzSB3y zu#~gnk<=6Je%C9pr%v0l)F0U!Hk~gSinM#ga`QO)@MtT?A!A@E`V)U9^zL}({g~R% ze^g{;41$~*`VTP?`U>CuNP)>DWbFLihKytTjYC>9%7g0l@cqxH!DO zE{&gk3KQ`CXE6nNfw$$c%HQ_9*_B4CjISoJfs+?0`0|_5-nPV)y!R`j<%HTmW@k`5 ze5#3wL!A^cEpw6B&TsC0Kqt0Xg0o5O8s1++&(uf&l_ltzh!mCYUwYZ}iV2wiZl$%{ z$X~X~(bt&&HUzC6#)T2X10iITtJZ55@!DDQ=^Z)by6H8g+P{kQk+#+v3XlEZ9!i;7_duSNgUH;}!#*>u3 zP`8Zx*pPq%$6T=|hWk971?Sph+=setA_CvyLBHL|bK+p5e4*=K!pkjH{1oIA@s|*@_+vdoX{k%`Z_t|p(-jz2?VrLNMK)A;2 zX5=zuy&ipO-%aIG# zmbzgKvere$z9kCM%kyL9z{6FIF}Fns55#BF$!Hj_&Ve)4?1{iQLs%X@ps@PR)Mn5l z;jh2(UfZfbFQWsM!a#V@^fSbB^kQMoLEo{MFBD^)4?V@6$%=3=-48#;Rq zp{}R64RPz-lNARZ+}Zl^oec&E%AW($Jf_-6UwpRTj`BVn&OEz3Hk0WMa>zxgPL9Q# zopips>k+QLj43GN5{pb8WlJ2^It#7 zr|Ku#H_ZWFA>{pmlz)M*c%t@o208i=-tw`Zh(pdEX-@voUVnIVXT2jCuTL`l!X0~s zL;&le2(Ndh0c25*t@XJE&92k-%;z40W-R>1#=02fN7fOLIkl`ig|y6^z2${?`i3nU z+(XZ7m@~S|!4UMnI$8MbiZ(?X1931!-Zq7}*9`t^(apr`5 z6t)^vdDnKV_z2Tp9?GRi|Mm8`|9JFZq#EQFjC&jPQKzLj=wa{PVS zJ+GFD+{OhklOA2M^iu)+#U{TZg9{L1frh^mQuhUX+W zimc68;w5F5ov{|Azi~}3+HXl}sBBkI08NE*W@<9aY+UxyYzrjmxf~1`vPcN{`YQhc2D0Qp1Kv*Nw6S1| zzx83i@ztwSOtmM^4nWD9~l zudc{D6)&F#jEp|X(9dH~^V0)RF{rZ<%;_EA$u*)baXxF+$iuB{w+{wyb(}OfGt0x()ykoxk*FKk|ru|P?MQCHByQ;AL3H7i)`=z zqWw%?$}6!r|JQ*W-E7ZN$K0Pl_v(;l$X5-wv*y)%T7%c+TsaopNYv#2JPQ7=v*Dt?kh>BQ~d*H2NrY`N<{DpAn5@ixOdzBkX_9SSlSr^#V+B|0OmFNp8``Tr!*PQG02~4T$d?&Q8znHi~UJbFpJ#NyG2ABVv(JM06wynq3>aC%X{|WxkJg!A@o)b%6j0Cg zr{y7MCsFTwTq2(OE6=qSu`w7aiR!8W%MJ{@ebPWa%<9qBo11X*K2UzAqv4SdxkIr2 zTZC&J@RImTfy4gr-y z9-bZ2eAlKQ;c3T88OVXHuFWaWPJKa>@7pU+#7Vbc(!uDwNM{vPDglqy!*>V{tQ+m) z)sq&;FccnXmoIdkp(X?D{9}THLk6&$nw2UFmbc$UjGb+N=c*YW57;rD{y9f9v~6QM z_DZZRl%S%2vULVg%zGiWtj3xyhhJd!WQ!j{Lssibnzn zKfR40)pX*sG}ajXKKtjaD*%p6LR}?=MPKs^dd>apx=!c)$`z$mFY&E3P)%9?`-y&_ zEpJ;CL5rJ$xwn;}`9J+=BM0k6fj^q87q1lA4|^*Fr&-3#1uP$2rM0JXs@$B5<2r9e z?@ksjFz}?lu?KXI22x*+%8`Al=XEh-Ol`0O#RNv_265_J3cmeadaW1Hd3Lq)FR#QY z`uMEFk!hr;(dOZH*ko#~o4;6FNB`%Gui6j&a3G>IR)ovGQDNxxJe>1GA>saT;lw*J z$dxlQ>y4#o{+LlW#lNabTe~f=eM~R*mk2}!b%y)BCPR)-=WLbrf0zK`3x zF~Tq?xM+9l2YwKAx^q`ZhLnmTMx+-p5w$yBXT>66wwyU`OSi8o21HH$%##|tj1!-e z@qP~82}2_T^-RC!kDJ8|7hTv{+L_~~eclSuO$F?)t;PjIwNB7zYY7An=mOp7)4e~~vgQTQE&9>5n=FgQK`LOb_Lg&lherU(yW6F(j{;rZCHPU`S0Jr zG=ne?g8j=K<+z$_G&4(*-cLTJjQ~Dq8-*^FYwuW*CZJ#x2@V~o<&Kc%7f$WAnPNYGqjQ2dUue6N(ui67xAj$Tti4*GI4b4^C0zBslRK-@-D z-}CCPH9SHBbo1AGJ)6`(r}EnF$mD;%_y?e$X&fv`0ltzO)dgKR(rnSgd8%9}g@OD2 z_T|HgHXlI;Gl}zeIak8i1DkQiB9aafzFQ5$my+3r50FW^kW}A3xGwq8P5#91d#xK> zQq)36EG8m1O*-tIAV#Si-1b81;q}n{$@|%d>pA3{q0l2-Knk5Te)RS7nHe?H1AQL>Ipwd-S6^51<%CRinLLVL6?8Qlc7EeTzulv`WydQ+X9h48zx==v#8ZB5vP-r0 zUP(jI=!F9Z!?aEVp)J4iG8f45mn%~Vyv&{{8&m|X>a@nH_*nO}ts58F=fv?bQd!dK zu3R*h*2u5)z)c4g8r_XJdEwXqif6jO=9~ieUm}e7C0u<`E&!NOdhHIt*q6WhZFKvT}QdYYHp~* zr5oR-$Z6xN2k$#V)imK*oweWj_gOSD2R*m-$nSzkz?tTg9Ar>9e;1sk=-8L459n|!Va9*-MX1uS;<=BKFU&Z|f%1`x0b^8+qq8f{ za(qFf=uE#2wCc=)Q&boqsMRoWrxr>c@cRX4hbl}xAr7Jgyv`#@{wDRGCw$Q*ECUU` z!IQL$faurf`(rFVGeR@Gn7deT4{UoI_A<2Sbq9ZC0Na}nFNgtE&tM00c11J4^j%C$ zrS|%=j`8v7y2AU`Qh!N3&M3#ZGZ$TbNz9uf?#GH1?OJ`)usaWDn6VdvY|Nu?scPwz zr;joG_<0FW%2RB`Z%%quT3XfHhCC@4auJ5?%r3I%czAbxJ#6MS{_P}L5-@D|VSYzY z@AtZ30*t-?>kIu^cBO%{-8|iDFWSY<^5K;+QfhiL&k^53GKhV_e_uOLr^+c=k4PWl z(F9NXLERekhK5j zfBU;-b*)>hd=0vl$3&?7DRck8XgSt%@uOvua1~3koi;PGC*!@d{R|7&d#>02&ZiE> z8(vHjQvSL&Ruu?!d@cb~zDP*V-o6QJ@xH3BYjRh%j&{y=L8}r`o_wMPO^o3{j|Kd-#yQHklL``3Fj#(|GFpgk1XlAG?w zI{j9^b*tgVxl-X1-Xw)n1hP=*#%X!p_4b5^Q-p6dRtvak>(7#1C z(+9MiOa5YSi&CF~U%`5uJC8a?h~Y#aXTYZ&4L}>FwBCv<5gfQA$>5gqd*ScX`A%}B zM_%H9HdESyea2X9g>dBYHh*(A=wJ7s;WI`;;|8FUSXt{gNJyAy*hJ`w{x^67f7(&ANy%vzb6 zg5PG1q_BH0&CB!SpVvaKCGc9l1J%w7q;?bSwlz_Ua}!?D7WJW8z###~Ckz%)f=TH^ zdo!~kJF8?wj8M`X_|64zrzmuMWUI`?_5^iD1U7?|6iDiCVtchW+(xp+Sw2EuTc>2J zSW#cJDH6CNr+6nNHq17(kaYo>2I5n{!QAO#msF(FrA}Z>Yf#Mkk4ii&YUgwlQ3G8> z2HqE9cvp;s0I#3%$V;__#}eF3L}_p<@G^6|q)&V!fOit}hs2Jv#NQsuX9OHslE~uo zEraQ(bdM;Q$flzZiSxgJv(ca6hy}q(t@MlPF!PCL^*>(@G^dH zy|RZnaoyZ>07qdrx=igDv+*>1pH3cZt9fGwv}!ITLGD_%_`2ENC2LNhI=$W&2!3HC+8OgDurfi#%t}P5(R!BV5bNL(ExF3Gyy% z2@DNGXlXprgpTR;SuzoraIqDN2B|-3M3+g|?)q-@Y{T04*lGD~D^a{IPA`IF2M248#2Z(8&z(%CrODI_z&ksoVc z>K2@|mkD=g2)`f6{pEYI)K8xi`2B^wghb#atBYG@l#g?gRI`Lbw#(E#IYsQh}sgO2#77pe|Ym6A2`1p|XPGmCS)x)zv-ni4SOua26?!leLW=sR`f-`FLP zAaZ9?51}xfjpqg?_T0^fzrwpWas-H^_qC@tpSN2iKqny6*P<;hWDd$zl#2Uh~iP*9;Q z`IdSjXt}N0JLKr{-SyE%#n~K@TT!}2|CpbbPHBtl4)@@UVAmI-V4`2dT^IV$&y{Y) z-pDTP+vnP6qnfEKUT9}ZfwzC|dQkdQPCt}sxhE!An5Q+ z1H&WRQ3NtLVG1+xOYMF<3ot9xc9P>Kbu;z$RHvQ$O>K*MS%b!g@q8AGqT;wEjhptl z?Y5pqp<(V~4wcO@6o)|pC5-lrYK8z0zY&9?7z6E_eBN-8r=!<9KC|G{inc6u9HLO@E5$k2n#+xnM?H7w`hxg|aW$a5KH&NP0@WtB1TK;fJ{_r$pK_qOrL>!T& zxOUyY>V5MiZoHU2G+sYzUPml`jFc&FIN}~^Jc;rvM3?m0sncDU8c!Xku)S&JD%+?U zBgq`7DDAGtm)bP|H5$I|%;4nbE?D zcZ^}Aae;IVGByRd?_dLd0HUSWL_=tND!bH7%1kGS3~(JOTkHDX<2@1Ze($NX^)brBCkHyw3*K+U8t;K3%ucf!{_*jWYr(4g zajpL1;?3IYNXR#*+!)RGcoEud1$+%5`g*1<+$b@%H3aw?m+DLrW2(=Z7+t_#(f!~M z0+N&|G0vpPyVG|)rL-0fUD$N|yy>yRdpVQdabn%havOR1x8h+FVViO$sIY~XbY)laA^I1AN)fRsLq)bzVf3fz{=PP21Zz(~)98*IXfru+AGI!RnsKEEz`Tr(; zC~=)`m^WwiRI!P@TC)r^+0iyDt_!cW-1PZr9Ce)1?4wk0?;;Xn17N51YW-WOAEjFC zl_EQ*gM0H0fMv?i=1Bkh<=_c3m);m^N}#P!6yfA_SJ%?;?R8__(L3qrr}Iz2z_`Ng z-cI+od9i&(pedJTLjT!&R0h&$(xgpwKxShI7AY7@8H47EqIxDcd~*OM0T z{-t)WCR3%LlNk|$<+b1UvZtXRQp$nz`i(EtM#{;6y;w1h(us&ka+o9cJQl#rha2n( zHj3WT#0NpW#!^%n=4hm;ZVcPQtO}}ZOf>o>c4E&_kdkvZTlVu9ZD&wejk2@oHqpn# zb_OLs84bW)LCI>p7dFbMm9hUEHnQBZ===P#h4OJz#aVBKLqT2 zI+Ukh!*sLA@sl#tE|BrB-5hJcVo6nuf*Quejc`xG+Dq* z8Co93`R6PIZ)~pI3Kj_%z<_=9SsZpaFMn*{rL259(~R&im8NT3PCsvnli;s{XmQO1^Hs*GavnGdYzAln@t|p z)PaVh|F1_P2n1BdvejJL;X~0R0gFdPai!f7^17h2x$4`W*8t(J&pB~@Ts@D?_Uxj| zLwx+JBZ` zB3I$Yp@wja%)f1C-N#{p)_?t*yZn_BCftpb2_oz^Hd?d%^K^y4Ka1?vS%^zI(r3Q~ zS)Uzz?{Ga7L|@=1)dHj2F(>&@$>3{JsyO=;PXqe8J|~JE;d$XTih%(w&c}kNV57ShXRq?i38k zIC@FN&Y*`6XSEigDUzWjX?a6a=q;6h2+tGeX=k@*W^26<57cH-LJ~L9HO42#E6!%O z*UzrI2exBWD8_7O-aK}~aCIlgf4@^!ezR`nX?PEE!N_sXVv|Jq zWY&d~-4tZPqt%G+P@{L_2KxX4hjg8%|F3w%@qFQNd|-Z^-tbYnoBmi7eQ=WgWCOle zs)p%!(ksAmX+%G(;Ni+IG+pxk=$H5QzwWb1lfysY>mP}&6m=;pi3iQd#+~^g2ROc8 z<9L5<)b{S2g`!?CzeLMJ4EcuhYF_zk#PZi}=QhocW739$-mb@6 zv-Jv|G5#19N5izJ642Lf0X5~#^30)3QP{9Wb4JogDS0*Gu5Ra2`$G#?J&VUoS7gS1 z`;h}nrUX-cjicCMNj6G1zb)VCach+?)*7^soVmZib5HKa^a{PxR z2>Ix`6(oH8BYK;%{c}@#w9zbg*80zz8Vj#9OQ$l`?k2K+&if%J2dlm^eU6C4o4$PG z$1~*6g|^2OIFwEDR_kOwp>_!lW(G8BEdE*9$mE^Q03VO zox)$7hk+@bth!dR_7WQ3J2MV3qJMVcDazg8RHsWDFbOXPCLEy66xEjXBQC3=64il; z?589wYq_7JXf6ahdI$K%uRLL-RBQ<%$`(A50*H+?_|n%x!BSg^Vw{q^`-t`v!HUgF zs(Crh!!ao&R;uh=_=HQ>1o_A_Y6>m>M)WhS0DhkCI~WlXfwUC zs!SW8oA?ZV^Hct9ME?7M)$Q(1Z%y?@7=xs%5;{m~sA7brKoAI~+e z;ejukSW?l(XQTS)12za42zKa4HPJXa!Fsp;-s+M6(0~8M)1&umwa62arGCCxx=f;w zKhMkB^fFrsZ!R1OphR(yJJ{CJ*`n8s3+K66Uz;sP)YkR_`X~f8cFs*Mn#bIj!XAeV8sky{KT4`w2&yNZ0 z=|o~xKSq?ud}lV-ZWQ*dPi`}(;WmEqxNEoOFMlL*>b`KydAYXS7pP1hoMzm4C%g~G zWM~N2mrQ#h-@~#@Y7xfu>uF0)lA?$4v3{Sap0!@P#mn$Ju?Gb!RVe_RsVT5WFim{^ z>D?}Vd<|_4$i1d8rW2%4UAoSnT8Lp^{|57@V2$_5w6bM;q{N);VfVP9x9W>ldKx=|x_j@Vl@ssva#gl?Yj)@r$DUG25 zd3#|EP%CYjwQIK7C%5X~eJ>yobk^dgN~3Ms^vHak!l4=SghO6=zOjZcV`8xzs#r9# zW!J2Jec>Qu)kdk5p$_6{1H$k=7jztoQCXZ+tflflAz}P-+aNhS6O;H`L`dvMX_26) z%$M+a0{gSpbXWcJ$Rc*x`Euiz9Bof?VK(R z^&i*UzVY5b2^ehwaf~ZVsW7Kup@7bJ+Bsyoov*goMfkk$i}wD1AHvgEZpPPy-QC;p zZPS{S;zyosdRTB}^Q?X$Lr|S}JOP4MZPlbEgZVwP1#~vA%CC z5SOcWf{{)h{C<4=v_8WIB3AQlABJ}vgbXLxSyg?TPS_bYy0)CuM#E^9j|l@Wb z9njeuTV?5TWp3+|D4}{jP|V-2iJ?cYVLTS}70=^Z_lPVB0L(xk6sRu8tb2neQ?-2!0oUYm04KlzoDFFCS2ul)@B*R zxX^qGBj>tRKic#a<(E=47~w2x9I;SxvuH{*Z*AfvI|_E}FHIK)ej4%Sm)3n&;>c|E zV9X5J9V+;lM>k-PGHs9g{L_$-6Oq>R7*bAmo@2R#c;3@+nR;nhKLK5$pB+CLm=4#g zs4znl&C9qoMTJV;Y9>t^D~>x3w+@}@x#v{IG_h+}WgT@+AZUvs5^(c`UqyRtW z52+^I!w!Ix+nI`!826uLd@MPJrP)jG&7sZnb(#mI_bc0zb<&CxNm)y=sV>t>1F*?6 z+3-FtoewoWe*_$PKos9MjR0>zDLnO8Va=skuO!@d@Of0Hw7!#HlrX`&X12qz#UdoeP5N+siGLYH zSpW?ou!pu3n5D{p8E?{#g4zdb5dGb%sXCe*_Wf}qr4yajqo>(rdzc$9<|1aZJz2|_ zw0T``#KGak_*$$(>3EkY&I9+)_>aDIY0twPp~>vuZsv7&FGryd`yK&CWV5FnF-QUO z&a=5bSe0Pk%@k?f=|HnZKq?slDI-C{-(@Kg7?$i)^ll^%x(-hq@ydR++b6j0s{Vb- z&B#=v{ebwN#JR>dv+0F1d|%k>kG?!DoV{x2^I)6H!@nLFqjfE}xTM1zHVJjY!VF0c zbH5iv+t?Jq;h5^1vbf=gmbd?sMXU48eD)dYWIQgExAiV;gx@O@m)sOIgCAZh4g6T_m#_ZmpC^XB_m^b=}#MsQQ=w$K7Zb>+Xr9k&U^FZbAJ% z7tc*9dNH&$^9T)&FNaqBIN(2)z2M@ z`IhZKSifO@WR)FkRHUEr)x^qLLYt3AsvGk08lOY zobP{Qiwj!<>Ce07UV=xJ9!ww=;f7!Mf6z}W>*rv<4>}FF^E2E3SbH=m^&V^_R!j|O z$=D?HsfE@Yc)8AnYa(Iys-)Q{ErV*uWzK8z0fw9K(wOEN+rR{QVc+Bv6W$doEO8R~U>z~Ra30-?^ z9~Lt0@)yd}q?y~Tu75?tZ<5k0MszPnHFkIp`C3rViCGQsTEHb@-zf^o*zStTD12zdt+%!!n-Gh;}`QF`jz{#g7{{(d4V zITB-Sz?a!@#ASUd!0FWu>A>x2%Q#wu1nt5&z-w1}?E@GBsGv|6DbTiH4(#j9-zD{> z=1gCCTg$V|6KTIdPUGYFNGWieJCf@t90rVzngTdt;^vl2yP(%MN@TXBIPy=y2+Xxr zD#_>B@=-|1E*un{OO3)b1|v)Xy&r@BakQ&7!|!s?u)rPVDqiwE|70pAFg!fG@OFX4 z;qcW#dSk%vFz>;KQpcqC4hWHlTG?h?|Le>U@oGeFvqc-7)g8sI7L{u|q$JWE{lwxk zyzA}+e-Z@%dGNjY5rLAU%uC+8u3(U_Q!jTV316R2o-ZIps)In<=Zioac#;8wFGdZM_mGL zD}bj^#SdhFTy>W4?VbJ2Mx3rqWG!0ag_Gfdvx9@t+|*G;LDt>5!=(4o!*A~LZPyML zmEKI{j*TA4=@nQu+`>S7r-=cpHI-V#iwSZ`CzhLOQ=x(+?#b(Ydoa14y|%``k9gPj z5hi`{t>&QXHRQOVqP%+oBf7& zT^>?y&1Zq8Dzl5P01^JM?p_&qH{*o1) zngXp$=#QWK)*uzU-7B3vuG^^=se63DLhKt+Qg>1fr=DbRd}4oCT(RMxAmJOPz>2`m zIkP&W!A+|6xxjhEAK6UKXkoMCeUgZoA89@Q__O0XUy5?LUv(G+)E#o6nAS zGbWk^8mgQu$b-07AU*}u=3=vVLrTaGE(r)mL=zvhVJj9m4tyx)!*RzSg=lXMs=qCA z7Pgo_(IVX5bP01}7@3UVf278Ho9e;)>Z@40Kh6ktQ*s zB^3NJ>D0}zfaY{8MNxoQwWnY!zy=+z?L%UE?I+O?Dm2&|NE_c3+$E~Eln5G4wV`7( zz)2!t-#B}wB?vz?$X1i@;Vzn<5oj#8;LJSC$Z?zs&pd4>arrijgI3u0t8`s&Cg;j8 z0&pC<=%n&fSflRIJ7PVq6BTsm3lx$SvicjKhG4s@-hAHxZ#Xtm#UZE;!02?jOVmSe z=l!_iIb#(3Z5zLVtzs%o1DKy*SxnDNsI)6MRJZG&=GmZ+w!XD`nK}AM1UM31ZfWq4 zyDQd4H^5hREq#TnINWA|<_~ob2LC^^@&Ddj{70Vg5&|@s#9a=6&)f-P9MHFvKCpps zn%$ru`XK)I77fvtach>FcPE^3&O5-}@lQ*d!90?`OA`%2-vs9bjC^UBuDRQhyz(;l z3to>J^UAS*6x!$!&b>n)e<6d+=9~atOKukv$9Pg#;d7(fsqezuPeB3Kp*V{J-wpS( zz!PS`CwUbESz6PO?+x@uO#4h?V>fSKXlzXgNVbUQYGHq{iDo&O(aiV>dl4dgj!dZk zptWblB1^P8#B9yh%N7(JyQYEz0&xA5`a!TZ9=i)RBS$q6q&ZY{pk{1dlSTXOotyF*N z{9XoP`D}4E8akZd%l$fFVK}vx6PsQ4>R@?w zGbi%`5w~u3-BH|Ks#JgD!ncM5!v_OFJU8P%!Nh5}p3JXV_x3bCE?~^diZ{GOlquly z1`&>}&fYU2DcaDwh0@RwJr;hS@3nUZkJL#80}*tgIo^ixqbbu$Ve{^RcKK`k7vWLi}UG& zg`Ko2b-BwFw(poOffiw=Okn-U?DqDZezI*oPyU1N z_U`ogRsz?~VoJ+9f7u565-{7K4+cU%V^C>j^!LN{K7~VJNr5C%=EZLe1iv*aTd~@6h<4HtjrgIB<`4`+)Xwb&W8@UI!`l4@QdhEzDe%Lj-Ivuiy6{|rlC|6f{EeI`h1dcN`MNrrY;k+5i@$gOVnr|(XR~42x!`fgm zEn?c$*MT^g>tzd&0%B(}Om`b-BVtQv%Wlj2NS@Qg&#}#N?EGh_L5g@q`ww|4<^;f_s^`gMMYrKt-1s3p$4>0(vVbb^Yn@RCP z3;8^Q+^4A{c$et1+Z%3_4Xw@%euzw;S_6$ zS26AV6mb9CwPo#3f(|!akar22B6#=(tWI=C-tY_=KT0uYc~3jFVIUF|3?8-q^Dn;f znIoPNAyEheN7QZKMHygJB*5~d42`?Y(dBgeG-VxbNi;VDT>Wd*U2{pExG7fDI1;;9 zjySaC&Qcf|{0@yZ1DT@%I1em?1$r*ze!kG*;s=MlbtpQycVz|+K+Qld7dxqR-~psx z&+~HXBstd1yw+FnY<@?@=jjNt*nWh(P(Aq1d^pKoJztoLz%aGRJ9?B9Rj#^T<` zcMfI~|C2v3q^&;4PUzV)7Q-@|_P$k82MNR@>Qo*^WXPs^oulVn$N%Zhl{Se(nOZ-q3S1ik%CmJUB#*#Aot68xf1X*CU*1!mSI(3sK{vxF9 z@|0e6L3zT>|Kr-T%x41{Y%IzTMuuR^$jR9dbz@Q zMJx`tl26gL4xacauTSklPs>C97)X@9XP8H#2O!gAn+F3v+sk0flspko(d1Uoi{kf% zgBO_5Ouxx1pz2E~-!9<7GwLDT*c_Ul$bS9i13%9&ElyhJiiv!B>|k6IH$I-Ye&a%z zm__GG4JGXQPy%0ThtZ&J^AEEf&6?J33&UuZWF((5!`j~xKkj55TVAkEC1W;d# z4W5hr6jKAX*1_F^bkyXi|MlMasK~8ntM_BhRCQgg5DCHbwJFu`{q|SVFYWPk_*TNB z$|6WfY?hGC){u}z;M%iZ+nyK{lBulsh{T$`Eglz*}!<*MyqR+ zkv7r=-Q1b9EK?~OxSj4%OkyBN*A$U_>BY^?-a$ujmnycsUPMzkov>n!*LVN9v>~pt zL}5RwQeQ@--R-@;U=uCS{tgX+hZo#7PXl;dH`wkBr8(r2P3PX50!DpoPz(Sb@Pf(PbqbSwk?anFay$pXL{00GswB!(;<>DAUp|GCVW+3m>RyVVxy9U zly>OISoS=tm{QeWgF5&oW|kf`SExKD+IC~>IocUt>+yTb*w`^Qk+?!r^_k&(Z(Qh~ zCq{lHY^kcTmf78O87wkWF7y4!Fz<#p*9fo5uGz|{=u_Dd@qRIS`rlrFF;@lm$#S*= zOEpc8C=at^yP(R-;X~j&tKF!-0YT=X46;>_d=ifLZj4;>8FW`nxyntVLgNN)d+WiX zS`XDpmh4TiVAOL6d1=u7g0AI=*_6*U7%~s)QfQ4Fi2snwElj zLfkbw8s(JkK(!IusIUR1MIWKbFT7!}mUc91Dc~D+De0^DMpgad48J@RPw9fPrie5A zURgGmIRT2Zuf-Igk|_z|`g7C5zUP$L<7f)Hng1bniy!0d7dzk^$|V}0i!t>KDD?Ld zL@c>B}gy>bgb1tI52nw2I^Lnep1yRdOa)7_}UGnK-v0esA_&`%+fThb+$Ie`YWJTy_cb~uaaY+RIV#$!s4{b9p4(cqp}!0t{+MsMO2}Mo zgSU!nQtvvm5;=clW>SCSy$==D*>L6BKk?SSw;(d|gaA^oAeErhd=0=-N$^YS+lefN zPbcob>&Di3hEvzO8$Ap0g4q+mZV%LB~?_iHm}<=77%Z zyZCwc=7JCW&=T3;MGDw#v}88X`+(^5bc~x;DHs<1eH{@e#={?*7mG$5l-m|Y>o7Et zpupFaVD3Vq6#igJ;y#X-jDK5ZCz-zbR5jBw2$?su&@mi8<`ziO zk?!XpXHCq_M&}tm5t6pvxSICjyREKh+~-a9rRO7)>q-8WxM3L7#j|ul2h4kI4A5@r(kpM)rkL$+@V-!u>n>tuU z1~&#E2Vp{R$}LH*;M=^Kc)!-ZE=){BhHP?Hw^uv4ptR~gig-VU4=n)PRE5ehjt8|x zO<}t8{^{)1tW8`iUxb(s3;5$gFKcY*MRfIDtoIu2q?N&~mz90V|1|ZA*r)MQO~Q|> zSA{iuL*h_*-$G%Lt?soLrFzKVaU&BYnIbxWO-SFHi^-}^S&{1^e}yhTqneaJa?+vSx>DgxuzLe!$z-!frOcW}Rr59Gl~05ZIncsONahdnfW3OcGRX z@52Jz*{AOMrdg=}x{Q61d(`g6d4JU#^!ehHS*&LgEud~b=>?4CxTDB+WxPf~9?eJ; zzfbMCa5g~Avu@)uT(Q|NBHZMA5i#DL)K|Z|QVaTe6m^mAMm{!*&7`gXW2kDXfpxk# z$Y1z76x||eTDpN>_%_P%`R?}qF@7h|$M{1(UtOC?`JR*->kCd%(jf6z0-M|HC3`|dXvou^uOj9l=v1qwX$TJ5h|2i@U?0T{CxB)5iboJ`sdOfanrm)g; zJ7ZE@z~V2#wk*Xa7X&bsNQW&qK;t(vjn2?mpT1DOz>9RqTz#RsU%(yJ*Uo;N=9Sv! z9h%k2Gc7Gp+Ez*4rCRK-99xRfmQ%PkF-LMY?aHk-@*~t|>n%u1SHyd0_{!5Av#)8@&-2$?tnBck~&(%<>QSYK0Ew#4<--J(Q+lFXO4<8+BT!~ z=)s|7If3ee@kyj;9jLnV4EMJW6`l!BzDbk0^ygLaCrb`=cdj2>$J5QzK^GfW;aZS$p)15VpGjNw4orNQqwZm|` z?Wn7~GW&~qWL?6fRMLAvsKNU_eAid1@wlERe#R71eVN3btsd}aGg{9l>i=G|WQyaWcaGoKC z5{>)v%7sGbZ|tkQ%8Taw@VIEQt)!z(@~05QNH7HW6-cu80c=!GCUVQq?f=mB?tYVs z!8XrCR}^Yf8j~Ce0rM&D{XPSiA*nSN_&i5_dw?8(jk39QX z6tY)52_N1&psK7vPq_&{y<{Fb(lREWvkJNS12lI>>{X+2_MuNRZHLn0W*NWem!&kJ z;+;bNAm;Wfup_*pDxNutj18k7eZFgCN1Xt-wG6W@YTO?|h%CcF$Vs2#=H6OP!LFhnTknib|jfID!RssJH zoIcLEZ2|uV>X97DCxRN^AzI+T99kR?QM;d^6P`|nZMxQXhol=Hs+z=Nh-o4oqHp!; z_u+lPZ7LGjbAo>79|*2$2p20aNw&+p5$f3w2dGJ1WO=DRy<9dTGv!C1pD;F{d8_6+ zME>h*dh}@aTFZfpilEKD0!rq>;FVZ;Su-N6r~b85p~ki`dH^Tpvo)6g^%?iOAC|sa1StwkxUojk%*~-?hjCaAOKS1=<)fnwb~T zf&B|${8Qc%-&3G;=q>5l1r9tUxvuhS2tg0h1aiuvXwU(&xDBK3{Uhc2bOKP1r@Lv(ZTqS#+rFHi_An&0n zDV_A;RNyQdf!CxU()3Ph>ETBEX7$Tx!Kz`5mqvqQjo*w%S6OR|p|3O7{p89cY~JJY z-SEDBQDPmnJJJr1O5C27le1_Dl;H_?to)>V1j6rDwj>{>X`o0_B?IxfOlQk( z+vQPlR%y??o*s*rlnV6MK{)CpE^U7vtJTUG+|^uP+~s9iKld3~7c!+jdkrLC>$u>Q`Y6^@ zLRlZEUb&_IfErcgxGCaI_for!syvj=?0>4O(&^Rlm?Hbvdi}=R?$6#~9l01jBO75@ zvQwnWN86WRjmgD6p)J>nLago8WX)d?$@`(O^2J}KPT>3W!p)~?0jAXcQWe#LY@5M{ z`s4mCRO`FVYFPGvW8+4H{ASkIC7t)Bs9;O3w%0<1qN8D)Gm_( zWAurWWI&6CvpD9}dQy!|q&!;mMKy}|6m6dITW)+~Tm-apKr-h%^--$4H6|}p{517R zKGEoN^U8+&)cI}$eDfYQSip9ywGh|PN7p4H^o%S`Z`JE>7;j$f7HcJ)`4~TY+vX^+ zuKn(Ed8q9r8L(d0x)TQpHOa%EJFr)e|~aEvU8is>_@8kF_#eF|`lz6OjM@cgKM zD_FuWP_*_NuVTvICTa6icp#%j5D@kR^UZeV6M(09v(l|G+ct}DQZB>x*Sn^0G%b7u zGNmYDhHF#PIm7u;nJyaSQ7tM^t@_ZR3~P$R`L5{o#=K1jbXWVVtf}h%PuxHa25}TBsYQR;xbn3XrRpugdj}|PhY|fIxR`nq|0dbV6rYu@o&G+WX$8rJ+t(Pp? zLJXv9cD(ejS>ZDDA*CFQq`u!lFdreN&ma3)4r2`md6Zkl$xa-&W(I%vGQ@=K@3_8A>;+!guuay>EaP@Zem0%dgoMUFy#*WWU ztXj{>Aj6?B3(5_(re(}(Z$u=cn}Nj5mRWIG6OxIT zHdvGi8>9-z51PtUt&lWoeIcn8NoQv-QouoYFc5Fan!QBQ|&OnF0Da zzi+g(i67$RE0@ranOea=>q?v{$JQy7*2z!hZLk84cT3-WgsnYZ^;ftxvm>4W2A-`Z zr%xPTZQY_dami+a{TJraPcw-S49bQhP$=zcB1SIKsmxUM+S6IAC*Z)jV|w>}2+R;t zM8|7$VGKeT|FQ$@XvxQF0OQzG;7t?WLc_!hE&~QOg?MTIyeim$+xft5DbYll&;{p| zKB{D%0QTuNcNntI4$ZdFcxWpT;B67XI*C-25BHsj2QNy6`U>tE z@(Y(%Sn8(jSa#jOZcr%%)tlYrjQVD#FntgY&t&!@24~*7WBBe-f^~X(Cs7DQ>ZYz( zipUnqpFJ$KKvfY0$o`oYL!yflVSY{y)qk(lEz*Wj0|$pupue^Q4-KDqFlMPd2zO*_ zb-vcR{8i8UnAqFu`6toO6moocc`!AvFH_{&wbJgxe?PsDG?C_zu2_`M0{e~afik$V z0-yA?BfxSh0CdGeh}-O${0c!VmtAPxwDm-lK@P^TlhLb{_x@$}6uv@-HgvP5ZYu>2 z>pOH`bxE33MR56_Wdsqd1lm>%Em7`j z&1^+;I%>c8^7(rIy8dRv8s*>VBu+oC!@&q6c$Zn6HH4Dz7cY1wTTp`)9}9CV1`!ciuF=iBD|`6P@q(7<g&1+6=RT^ zyTVpVCn4WB_)SV=swX<;%dFbed7N z%9GYUR2f6y&_xnn@)N+cl8>Ia9_ovzl@zB^7XrrGK%ix5sz*( zj0RKomXBvFJa2ua!bkvwWi`{|{VX87zw$|)$tTvSaj0S%Qq*_9P%#Q&_YkEML;XEn zc1G~X=&iA`{3T5Sf|r@;;f>QOSGNgaZ9099N~EUzNIMoAO~d0a;Mgi_J=|DSiLe$h zLKMY1cMKoIFF&%m`kfft2*&#!U;#;Hx)|yj!{MKc8tXsn;iWbd}FMbLpxx z#z4fxv)_j87Z8c=ugI37q0aPYSU_Aj24Rjk-h#=cY1pUB91@#1P5y3WB;Vb#nHn=h z0UPkqLGA+)tqfS7V(h6pmFqSduz)chpQhuHb^AZK7+Tx;?q9Qt+XNHFov-n1-L7lI7+EryOAP!07 zz93FPzmebZ9N7)M%s6r0k;0^P{G7shRes6+=z|Uj zY9*XLv~RdtRR4;jhUdPadC0-HwoAKIH7bt~{3^-#J&&9kIh1+HfsB z&Q!y<9ApyvPIj-W4n)hX-VgzL&Dgt!2yMWq#KQFfnZNsR>wm3%vu$|SWi5$xHwgVS z%U1B+V!?3b>@=aMGKTxjaCf#MbfLg<6&_KsTy#|b&!zGp%!Uw0t$_w<9)EEoc%fWJ zT=OZSa$u*XXv!r2WTWSGH&&V_aj8cQ*w=1BwzFi1LGpzAPibdK>W?F=RyJ(CMR^L8 zCE2hmA=TSX=zV|rh8x`8`8?Hpzg!tho4~X~r zIPHaD?wGQFou7-AtOd%Y%xB4cogWv@A4VU!eJ@UKYkQaFaR9l9JTUVY9(7gCc1FB& zPcrU*ZElhoN6BW3_DcmjE863h$M~AyiKzV?>pJIWMt3t0#eds*8G~97`#0{Q>187gDXf&Vezh?bS8~%=VJJBKd&|GW?@oei6%wAT0 zQDIws+f(fQ*q4LM)e<(30w6O%{;G2;C&ZoFHejN&_rB6Cr|0SZpl~}1>hcov0WBcq z%w+tzX|krqMnJez^fum&jG%Y?$;|znOP76cA3m{~aYqC;uY?gk%c4a5FF}XWdH~1j z!$je`DsK1L{Oo<4SG)5ClrVR0OPsq#NI(gZ0K2($OOM-bHq>gQF))d6%(LeEQ9OG_ z3ul2yg6+2;CGjkKH}j!yusjA~9_nqUg42IQB7V|lz%;pd3fIzhIJPD8den0oJiU!pEI z^DWL-!HugIXt;fA^~!w`GO(ViS%c>8pV24s4vqCuL7u}AE)nkpph?l%Li|`8JWPxr zV5UQyZ@XiBduYL1G5 z;Fu+>z~W3UUayi!U$qv;D3V&>x39?GX5Q9s5`MhQ?HPB-f69--LOWf*cr#GaxU2ha z*YtnpX~>~xf)VEC7xV;z`+<*hfQK{D6@9ZHt_o!$uZQy6zw|(Ib2$5|LAudHC;2C$ z;4U`Q?;v0ECC2WMjM3Sv=zDRXJX$SIS{=aUphSabRTP!Di^yac`oCnD%;l^`L za5W&P#bm?9D671m^9}q?>#YTaRGD8B#B{a^;^STGM;zbt9yQYMXBm&*3e3^4fI^39 zY5Qy%zs*Lv2Zbg{QpIF%c)5YvWTP+5&6g-HyOSFnd})sLt8%7>!3^V=XgIzBz-;w- z#>h+q9F2`cW*=OWz=k2)fX+RS4F^@n#?{GUxI=LGYgz0R-SYur+{>Bbo#HH6Lj41D zVxS}Y^>O&F1`D4^(^w+Wxn8O&Y_&4Bi%Y+@HwW>QK|y0)g?dac;3ief5=U@VU_FbE zb=D!sP-C7?nZi`880I0OCT{VzxS+#>!5N_}&@w0NlKhdqPL3G!L)u0LZko)hSJw&S zQ^=xLQtL&^l&5Z~3(gq5vd|0KSG6v6D(A*MvaeFo%DW;+#q7^=+@K|Q)h!!gXM(tk z3a=gdR^JiMPIvO${DHsk6g1fL4yZIwLxU@$x!8m-!x=%2Q!P&^9CRc2m8m{|<&TnR zxRw_y#Kfm5D6(b6_<3$zcJ+tQ!zel3{JirB0m)W6Us9^YFnaTTyfgR+3+|g{hz<|% zU}A)fHYV&m?k+E#i};;Ro)4}RKU7s(YV68{T`$?A;SlsS8!`opk{2IJx|Bd8Vnpy5&dZ6QG;GW(T#|E$d7DKetMU5ou;T|?^%nj&qd&b?Ns?Y z@^xHqK?2semhB6fxr;|!@prObrhS8*i-fvPi}#-1El88;^lYYdhI`TlMtOYAyI&UN4;w6g8O#EzSPSirj)Q ze`@g9OdAze=(?TFD(tvm-0A(Jx>LRze~8yKxwYlx)!rJLX>XUG@W4@_tN@?G>7T&- zncC#z*-P1kKi_ol3;b_ROsosZ{l+w4F(oO(MbmA;26Le-trXIpmS|x)iGo+FR(@t`K7!;a0*~dADhmfSs|GM`!T}~2Z+yMy?iuBHTyDn6-QAzf&$3}; zH#+I#Q=X%tsc*2ZUI&!*Av3XYpOT&EpLu2_2-O}!f#j2bnqx*O5 z>8OJdYd69xSjiz&KPgD^bF~nwm+1-m1q=_a%`tA4X+gPV@xk*u>2nh?QQY2kXYZ%_ zNUNbMLog7v#QsVno#tR5HOmwtUo+B)tjT{Xs|G=Fsk z^|m~jL$#U7G)Sh_`>1 zth2QHon+JQm`T~`_^n~R>@6>#Cyab6I~9k1Rjj@a!A^p`Zn_r+j*YWDPx<{;IFmo6 z`)EQOiO%arcRhnuidR#eVz;Vqy;M+DPzgi`V6~G_mYBxkLLnX##6K>P@JD++GIQhN;w!DrF?ci8Gq@i8_RJ47{a7@jmu4Z!Le6# zDuFg3e9A4ik^a;@UTtZ*jIn395jftOZ)D?xWF=TNsOJ*asZI^$xuvsztBd^u5Bbnl zDe)aW>1bzre=l9A4Xfglhf?C>9R(f1C3)ENJ)L${**C$;&VPzbL1?GJ zcrMBh6SOin^D5l4W1nFL!rLMiGp2xqchpApGy>xsSe+Y@F-DL-S-fj`Tj`9e)sbNA z^P1cYVG>CbOS^JL^+&t1s^BzouQv+A87YR~q5-&(H<2C2AFbrU&d<%EYy;bz5e@{WRGcJ-4+)Ls(X zRGO(UHx~g|@#A%SQT`ilpfhIV&7*lLCUCTgiU33&YLx~5`)u6? z$50%L#_VhR%W&_5giki;CNfB+k=ICquR=~Y;)(+GUU#- zq4{!XJ%g4b*xzy&^D!I^1iLe;A@z1~u0wTlay#cgbC*rW<7i7q%Q6hMow%ce_&NP?wk5Y!6hT(-NLhXX)wmD*Xm@XHYsr_ zG)g13%V-be0N7i3Sk+SF!qSxViJ_^45iIs7b6>kd3dk$B~oG{mrkbhb2qv>KbMWx!5ZD z9<7FE+#{@rzGb39g_(-j;#(cs2AD@)=dW!&+fq zi^glSs7@5I9AfYT6pF@6o5`L{gj;46;3^wP0kB@r#bcXGDa9u3E#@e^10uU!r`T{& zDJ`uPbn^=A@J|k_E4lSCc-ieq1;w$u?sYUzW?Vr=%;h%=Dw`~STC31sz8@{VEH~h~ zQvVr!2A$10niIfT*%kT2g^hjC|Fq7s%`!lL)xn<&cK+Ucvp5NG^tY6gnacsOA{FM1 z7+%%zvRxZCfI+PR2KL?>Vb7{R{^QB`M}nUCyQyrSe+=c~S!IK2ON@+QVT=y#I^4Z+ z4EEBho-BBYcaaNq9mrmjp<&oZ@4X<>9iF;LG&D~i;|IX z`Z~jJYN7GstndckhChprI%X!HE_k}v5RhN|9Xg2PH=?=69f1qB3qrxIW8jaOr1BC5lfwx3Co z4*Tt{g9+ELC9gE}ML%(EI?d{oYe%0E1K`M}K&n5B12|srmvI(9*qJ;1F{G|928Rxx z;J!!jFYj7Fm+saE&qW^o(oA}1`K--1g@uI$H&^E10dHobUse$zAiRz+)5O)Rq6_V> zES1|#)pTEgmMU7jJm~M_tV4cFS(5f9f!j14Q2u~V>+~pyT8=M1Iqfg%_rjkbjIYwz zH*j#zJ||8;YCLdtV$au(txxK%|^jcO~ObuljD%-Y*l4@9#Qz$Kyo*zDj4ybojV5*qRz)Sw`+MBZOkla84@U zO6#F0=n9nG zgT`-%?C9*%t+A3a-Bo%@=&kYwH3GkzsjvT7wm|lguk<>f?%SJ?AZzwF&KX+4^LBC( zX#A+qP6-{rc2)MOJV%3jad>8Dk)jIYd&?5?cXd;Eo@bi~HZT&wEaf;lUUNs^`I|D@ z@w?iWWmh$ptMu%*ejQdIq!ipbp5@_{rBkv+7uhQD*rrY~>*iKBrR+|1b6VmDo zvL03z4h)gsf(}1=RsQwt$2Q6mF@=TGt!s-QU_(bW{XbyPj|atP36_7y!QvPQ2KzYT zvGZY0ATlo@$ZS6>$joo~7%XF;SLoMo8SLdI!7VhUl}$M9>@v`WgH8d|L7Bg$h>W$$ zRab7meu1cc*VUO|0YWgr6|vw^x5&9Ga@IJd1~Ml|-(A|-9Oo7Kt$-a3H;{>?cW?Wj zAqm=2tBR<+kvn7Lm!Y_U;9qJ#M*gZ1wo2l8lL8N_4RGP7fZ~j`AN6VEkNz+aM+~I{ z$*v?Ks5}^A;Ux<@@VRAj^}9BYHz)<>DH1ex1{Wo!LXF=eNG;4N&SJ!>r zW711CVIgqy5iVSYI_hs#MNR|MpYcy&(!_TVE?@BJS2%Wj-Q1*+6DS2Y(5`LkI@` z>fY2YB;#arS+j6&ZCrq+b3t5ftZVcBa-7C0%Z4O4XYPf{!v5?i6@$-0E!KhT zi9p$`kINhOY>Z6pPdLM#9kZNcR&rDye6?760K~&c2f;sL^vsy)^+gpfcJ{&D^G}Qo z=4JD=*JA>Y$&{akwwMK+O$o8mI=;HUK za59rLj=|3`qesr#KXiz2{gRg$2h(}+hgth?ryWq$6oF~Q0TsCFx$QY-ZcXe3szorX zx{z3T#tk8IbaT=TKPA9XH0Ja|4;4xFct+bB*na*;21|#ktqTf-zhVg_c>zRsu@fDr zYMZTeFsA)IU~j-|z%S`*9?5+VMIF~i>IL@e%S?Z}LlAMCCrg7TP|-i8aU>ic<(Y_x zFYGkYFGTLnQ7Xx!>CI}LBIIYQwHQE3p#xO|09(Tzv{aOXvPZkE@#3S?(_rNam5wND zFV$6?e;7b-&CD9C83NNZ4Gl&|`a)L-?jRU-RzXP8Nm4wMjBk+%whT!@_`#ZNqGB}s zXlDjG*_Z50&He_+)r-%U`%x2jce%8~`VH$y$XzEffGW{wX?H#Gx!H5CEU>N0h=H}S zWkcX4>NvbGEM7Nr>GsbD7gH@hGJcgm$x!;;b#*6{cnnqb_vXxMNIrqso``nc`dsya zbm8rci`-kFH?;vUVum+0g_Km-(%IW8QWuV{+)CcuE%__lDjpVImkgitI2bBCZi+lC zoZnp7?gY^8Ga8D>c0HW$osaV!!(FaaXSa;v ziettWci)<_P)$_#Hg!^qzekS&%SU!*eHJBzPhZ!}nMp)~1?Cgo-q8*I)7N$$&E!iO zF>FmvR4lWo1E4r$S{nrGlz7s4)t3->DoMSe0Y5`ej!u5y#1!994Z^Z24rA}XKn1GI z;LNjQ|TvLR;nr6-FN;IoIOaz+_Js3Iczah&mzWKw(G){?kb@7UN%S^XF4|v@u z*%KC+@{lWFYC#${;j^q!+j~c48+@9KyHDk!@#P?yVMI7)?ZP^UDv;6VCWE~kx)cHy1PUKLArYsknZjQ z6_jp}MrP=4sTo8-a+GdHkZ$RI=Xuw9<5}yuf4%;I?{&^Tdw+H<$Z<6z^lff`a?TZ3 zBb!$ouabb4IW#r?SN(vS={?Vqa`Q%)aZ5vN=3+0gIbZ38ceiLOL0_$-zYUj9i8sGa;R3w(#J~5u z|69h>{3(FGivGxy`myoW)Ix+I-P}Gh_s!efF9Sz`n=KaNKd0ZaC)z#GJ`)m>g?ird z-Y?LfSo~Sm2=u<}*6z~bR@QGx}Ae^;n z!c$Y^U2_v81#!ACO}6T)+3#`OHC{8HKFoF!ZL2rvrwU|P*X*V-(v1FnG$S}Hcw%HtH^9IDW|wGMVW#hoh5-&OD%?RJ={A=*yCweIO8;tOX3T4S zFSyZgf`Q5{v$t+iwGR-~2h36ixtD7K9`?90nM!w3vUP^3#=M?@uunLK{6+_1#|>ls zpXto^4hZ$yPFvb1`eFQ@tLNzy5j@VFm)ap=o=;T;2inKF zle39m-#Yc&0Ss)+eH~x}pT9K7g~%CR>6-l@Z)dS><_JGg-r2_I2>(qmx0S{q5}{R) z3s1%$Cv?1SPk27Jx2M+A7HV*I9$+GSr8w~?n+AKV!W?y0K#{3nBN5oimuB4icWZss z#Zy(esaqEf;{o6$$$j%(8jLvK&lV2I!B7uz6}j5dGzf5)KIyW#tmHbFoCLZbEd-$Urdp}>P_t`eroN-Ynw_s1z?RMnD;j~aGOgGI z(w*fUZ}3eQdMpXEf4!)Bd6>F-oOici3Evj0D!$U3993@KFJ1}?;9F>KPRu>U4b{rh z=&v&J#P3!HFw$$6${HLfhSSRu-!Edexbkxg8uVMPrdxb%^r3UyMW6}+<y&}F?bHEl$re9Fvc0S z6Cq!*ZfgQtSDlkTKks?heDLjehEoU~VDym&tmhdQbJW725i4z2o6jEez{N9B_zCN8KMswdlhmA(q=Bhzi1hWoL&^2fAZuXMz_ zdEh)=$v8|zt)Ga6Kke5Ry9r4xm&k#O<_Y%3T-PX~x9hkC-uB1kt?nvihYlS*DV*+n zYYGI=RkNKoE(qKdux8n!Tx}K3G`&MF=vMb%Gn_MdM_DB@iBW|4#60@#996E&y}9B@ z0z2}ine%=Ad`{xvS~^ahMENl0VTX{5OAM8Sh5zbH0D2>lo2FNr5rI>4aB`}%cs&!g`;*x0jynkydsvsh1r5od^Iq#vz$YJDZFo% z%pU8S*OTsdvXTxYRZZViTjHgCE{6K*8zIM^U3EI%{O3u%|K)4O@&LNiadyDJA#DkkT-QDiJYB+HFm#bVJ%lO9P z0G3C9=>y)=FJm^5V5k+nv)!(L;;BZpi*+yg}k96wi~Z)a@7Bd-H2;4aG4+# zU(LZr6+dm#N47s^_K83}om2vKvPND>O=UtZUs18y%_a$!ut91-{M3m9)EZogG2eTn zAg9)VG+PFi;Gg2H!F->OvV5Q%9CD?aaXJZ~F;Dg|EGz3wDtTL}>Q)XdXoQUu@M_1x z8&aP@9YDWTes9%>BaI};eIfRUh}oCBZEOcEjht?6Y%pSMJvSxit4~xns5zD z-|To*Z{dnL;Y0`gqMhhhU-fhrYU2?(G^pmg+6XbgA-0gxXq7EBc!#T}#p_cvtAj&K zDg18it9*YrC8}7ecvQzx0ouY!tY|Eys`QF+un1bCj{+V^`E36bB9iyS%BnFyjUEcz zWj+CZ8v5EU_KljyHK`+fZCs&47Xvpx52kkKFgxIKqzhl`r-Fsojnx&M(Qoo{1k%0&aZ@QMK8Owo z#hEkN(VnD{^X(Cp#gqu_DvMdIJwR@l(%)(8< zdiU`VGQ-&McCF$|V{7_Ai7HzuN8(os5PbqauKYdT;PWB>Jm0(r6kk-H5G0k|*B2|$ zdiTPtExv6Y9dfp)G1z)0#%F#X2$3=W<|WF*$5S*p|8*>b%xhP@R4rX>gaMdB=L$SL zHu6G7Nj&^XapYEVFYmH{Fxj_1-?onQbZ8~mp|3xxS9V&$$vJR+x7QQ*WE>8f5aTBjjR6d2@CSsbaq!p(9saoNl~Bp)4pfE_iQ9h0>>wTroKy||Q}Kah5>C9) z<@a{iX0e!>)@SKg*^L&=sJI7U=+s{lgBAHIT{)uWo!oN2}sLN{W5!cQ#6=ViyTUir!)vqYKq z>~6-StE_}10(U0@rlHvc=Q>a26J^6IFSBUESS2vfip~@1A3<95f>XLYRtXJg2!ONt z#p+dhHA^uagjM)OVU`G@&-v5_N5P@eo&t_=EOO!Yd9leQB@Xeo$dgeag&mzAc=LzW z%DkabqIzvPI56?hi<%|rGxwQdvng2}e^3r?Mo&ztE3X{u-@dRzp&+kAarCj#Cx6-U z`ufK+22skDei4VAL;8mb*2bSOPw*co;Vyhu9GcRO``9rvtSf{Dq;2kx^Rkbsz}5Bs3eNFV#WT$_k3s;@`wriE})?jG6Lgsd2@_H4PA2@pFQo&&?&HTVL~2 zU{*z-PIFp^adSO=@e<-pxZ>|nOaG+huL8MOHo5PzC9|_kfJ! znWkjesTN<|TQ^Hm^OcIzys0DOVDE&?^zlBl4KO4`#Qtt4a5qcZ_u++W(WtschDEoB z?rGL7bZdY8MobR^%X`zZwK4!sg*+qHqb}jrGS07)w+Z`y9}Y;lv3TgzC7)*}>~{jL z_yaE(PsiPfXeIg!ByZzy+=8;sdwHbW5q|O`L<=(mwz+Y-s+GLy_P{&56r@P6flQRb2HQlBOJXe5`L-e5@qA4&rz`p}Qp~p-7I1p;Nh-o6+f6FqhbZgp?wbgF6yC z#WK=$hGvl*Mznq2>T)Ssppa6WS=NtVlVypY5+Ce&r#iG;y&$T|Y2Z0;8>6~T4fx`G zI5#ph5XNQ8wa)-^lrSvyou`qmB4nCkTd@Bu{s)&mwfGUF(_&a^9K4t;>7=H5V6O0~ zs!I1c%L&zJ#5OtZ@5_p++wr72Esobw+?*f;fkl?Nnk{Y^jXtrTwtWE&sz6TVPwnj*F4}3$pS9eQ%w;Dv4?NVMp^#QZ= za=6Hm+42gIOG3-|o5GmTYd&qZm4`u0gqDJtp4n!-Q$7C-z}WTox9f+KWq+1#{!&cm zEchg~n&qhh9P@v?05kGfuwLJ*s=YVMPjDum@YTyCl-r6L5yDA|Gqzm8R`alsAu6fqkggsVOl@qcqGnjo|54TOe|x>j|;cBx}Xb)sNS!yO)K{5|P87KB*vh3cEtIo*~q`r8v> z+gts2gpXVoiYDbwb~D$|tr%DH&7X7ONY6@{6T?wYu8*y2`Z+o`qQ0(Yha}cH+AO0A zZ8R}*@!z_e&TQckxiB;QSc&JHy~9SIx3)))Qr7c02=5gxEr?%Q5JL0Sg@jM>u6Nbx z7`{FD+b`Iv{BW?XkLSEEZw%>aXe)y*j!8`M{ggyeYxpN$c``|wx^HE8z5iw@YR+%= z;CdZK0Tx^SHrV$@P=H`knw2WRG7Jn@VqB5MXjl%+Qg*ESdxCeIf4nS8@)2szfy?eS znvG{xecyQ9^k}R82eRk>&fx|N_a*Ue%KkG$;ZOdoQ^4u`_oG^0>xbK|LL9tdPw??j z4^Z25CEOAI*A&18{@dA~c(_A&*(XBA(*LaEhP)3+l5m!#r<9lpwNQj^6ltP!0%7C1K`MgwK#yI?0*_JBDzWWt-7T- zhJEZXQu$W=1R0i#JnF|s#1@_ANC(Pz9&OBBm{gA-gD)c{n*ZbPM#~t(hXM;_UCCUC z)vwRrMivfKkFi$g9a&z7KE{zDFVZMC)_mW=7vRs{HIkh2lb!L*e=2CUF`JrE?sVOT z-CSbCU1`6*YExL`PRt$92+~_-WKX1 z1DCwRwE1!WM%}Hie+1kjgjaKsgjjv_G$O)tl1yX71>aOW$+3!p)&dI(Z%2wd97ccD zg0EHNY(HQVh~(K!F>hAqo6j*AHeR4`*hmNKWS)D`h{xJ`y%~?QqLuo)0dCDpE>PpkYPVv0Scdh7}v;>A~Z)ZF8<_K4b)$+>c{vYOj>G*9Eb+w>%-%IM9bbeAL$i3VO~m!w%#}4i-AhCo~q+WyX){j})J}k| z4T{GI^kzgkzc1_&P?hwq%f>%|07uKgRVzD^)8QH4im+l3HdYa&jrqX%kDt?L@>Pjz zM@JhMaIN2ZcTxyT4A=4QVt>zEf!<@f1f^qpw25SIxr;t6|H!+Xt9bj*zjpiZ!h;@& zpGqQu)kcFp#V!5v{)Weoc^&FIz9iRjYJ3@bx6U_uWhf`=ZF0}fQHb0=FAR8|7ZOfy zvSduL!}1x1B%Ogd+j=kY48NJD0@1BVNQwP9c3?X4F@UW32yyf=T}YjA&X9@lv*ui) zm;_2byrLoHSfh1en9)yu9*Bi<>4i$?;~hOWG14-(3^R)prgTy(93H3LRk^u%QmIyO zRzHRPpWJT7k3mXRX@sjA)=f(JTAGyaj3kpqEkal;&DxV3bTTD1MGi)TxnJ6cHM>Mh z`>uZ=++(o94SEcaO?OFCc)#w}(%5zJq2Fh9i%>6~2zDn`0#ptNu3p?&qH*1wD?AL! znnzF2`-u3eg;#;96G-@}GwJErt|L;T^0xAL>FyM9JULi0*}us*zF3N=-uD@+Uiy}L zdwa%QOpXjn5DoLIL#y;DU7O15ecKaY9nV|$H01NyQpxfR-=8)&GhD2puEOM z1MQT}>F}mb79b`nodu|3hAoVXh^?ik==gSV4rn_ypZG3_d}d9?vt>OeuzMX9Dk#KF zOpAoCh(JDWOInyVWsGH8JZPT{Y{=@I?AYD+%1LXIk>Z*zmmSG`WT&4uVWhqAC&B?K zrjk{aPlf}<_eiK!oLc+FJ?Q~f58|1AW}@r-+}v;Yfe3`=VMFC(nQ`e^tr{Iij`Q+{ zInUwX6QoSo3&=apST|>j6_ss9N4}_l_zOX~f7ht*N$cc!u3J|G^jJ|TqBm%bC5Eb+mxvp2&tHR!0Juz-Rx@%m(N>g*lra~ZO!_|8H7fzSF zz2eIN#{ApWX4JZt=MCs!(UX(-!zs;QHh-cUaF1W|5|LKnlaHDVh5~fF7r~w3Z%0+o zJBRnR#ENh`&hGDOKE?m%;{`V(dlQpBB?ZUeAO)0rB9NQVzp{GkGhP>Rg-^I#VsPm69ple)0(%rm~&BN8ZV=$ntOezOs>& zA+GP>Fcu_yWk;o$o3F36xL%7SG?k zYLX5KT}!swH}Pp)8xPXZEw(ztJq&0oV?OUHF1^^BNZFISXZ_@(rFinK8Q?Z$@(fW09B7?{Xais( z+s34#lJhHVZsTmEZV&nA^6b@*S03 zY_qr6g8#Yrg2BMzm1!9lw<4)G+H4B;-N%pbWQK&Z!29gEjs0H!mo!{b1MIq?e$P++ zAbp9e3@h$`ma$uNB?D>$`odPAJid6ub}!wbw6G$mnljM+8ae=EHkn4)hazMqRM+kaas4b>1Qs9#S^9b-1|Fign;AYNAhpwz97fdJ{eCn~F z9tJJ`FrxaBtA8Q46Lcn0#ca21QeO8vjdIi%d1+#w&&X|`@ZDoT zT>Oxh44*p>S3mXQA16S9!?9WSP0Xp!ALI^3iiGi4N8E2kY>TYHkVDLoVeTc3x0 z1BZrna=@DEH+}x7tPBpwIGoH7uvwLQCpeP)~0i| zkMPu4yW5FPA2W4rfA*2%q$A8_c-(Ln6Q=iTScnhNs8)^mSicVOq-^|i9swAa5EbUK z{~~VJ2V{<&z@lH1sr{&Zz2WH0%@5COul@9g7?dh62>#OcJx>r!mHlLb_8#%%aa}V{ z7lt}}K6_8%3Z(9Ot*qU6oy_hYTdNUa()Kiu)(RQ+xu9zrmwc{OMTQomX6#o9S3YW< zSB+S1YTjbUm&KbGVWdQtn`jYT%`hIB*kC6{l-obg4@>;Wbn=E*aD(*VM#Y?f(xCnP zWP*T0yNUL?5@+6AbG3Dd6XPdEJVKDEL$#=WUvq7X(>-am;CB;*E?_4{4k};UQh&+ZC%Mc>#W3tS7(-;Y<>EWmjdj+hIWP+0qjA2xTBuTFBEW#& zFur&Kn27-fNDqV0T32z8_>VH*3wr?i1{#;Ia3Ez=13WUJ!ni}9L`S_#_V#y%;=-eS zgeT#E%QI3h(*zR{D@QDt+-i*^7l#P&fkw0HX5`r!Vmz@3NIT!D{-gJ@(J4c&>6eSy=M-ilZ8tr*XxZ6-47!$4W>>t|?)N9kpE;Ec2^B zA5quPgnF(Xua90}E1$T!f704qOuJhi;`UEtXnTh=`G|C}4b-7&N6PqL?>6{N?*5cV z;+h|dg`3Lh)e?B}ya=GkNSk^Xu4ShBUI+ZA1_I80j^-O`ezBA+BPRo7fqPtWADURCzu(KKW951#_ZVn* zQ^2l$b2;x-z77l4pl?`qD1#e@1p1~fU4K<94L+p;ZY_q47~%h2lls4JeJbQOoaZ11 zILI62EiSdl!`Cf7<4N4cIzB>FdBj##Zgi*r3Y&g#ecEPKl;bD<&dnvZ!<5}}#=a(q z9ta(CgB3CgKJnbr0?BboTxOM3pWU*TUM%))%5O8G|&!hPrCT6Plp%<^V;5qP^E+zdeSrLl&@>i3E% z{3?(c`;CwWRA=GqPr;U`D1RT8Bp)OEqN@GdT*?BE%);wkf?F~@oVa}zI@MNNDHLhI z>fh#}Cwn}j*dE~6<4O(V%Kf%7wfV&R4%NtXQ7O~S{*ea?vpfnMoEqeC@l`jE@9dlb z%BqzdW@Y=5WrXveh2ikP$(yQ5ZUh$%AAFEzq=>jYTBoRNq%b=M{t)u^O>{2&SzCBU ze3Q=@>B-0P=FZn*dkdQJxDdROYWIxfxqDnX>v|fN?agpf=YdiQF8o_nD{Ug^1P(PI zJ7y4N)IkkWA2^x5??i@*9MCyU$*Jv%k>C7Yr<0hQ>fskG+nBDE`a3s#{YmGf#4D)O z`%zMSF*3vMc{@WV8bJKg?_yEIU4Eb*a5FIc2o^_1d5UqlkQmjHYe3(>9bqQ&9GTFS z`QhBP6sO~U*2btnWzWy{#*u)as^sNr(m0YM_v;hvhiTS;+`?IEA9ipV2_+2z%jTXQy#`)iuC^Dh6C%Nhg13Zwe}ab|9m>dIY}fJ? z5wn%0BaXU67z0xxZLI9E=+*NA0cLV4BirHV2y*ASIf=JalQW>-m|%lM+vO_BOtGF- z$*+9Bh_*jj9xVN)dSf~IxNrI(VF;jos|&d0M`17DzHbiwte|=!kAJc@y*VNT$WZ#2 z1PWaz2qs+WQ1oAI?==48p^T#mA=zQ(YdpNT!4?uA=xlH0_;+9P*cAC`N~?u^Ct^C( z<0>b=fle9CfDF*GMk=QMd0 z1W4cL&bC1rmLiwmI>Z&6Vli8V*Oh|Ux$2icGT#Xfr$u+Qv5Ty@ZtoTIIVEUe=WaN5R5}Bmpz@g77?U)e$l)+>a(DA#p=#M?FGOpIGk6g0tytK-KH&cB^;%z+UuKNtzM0Cm^dL`*vw;%6sf^g&=Gy0xVDW$6{I4JX zmLK9|nv7`jC)NKaFGjvX-X~~QKI@lZgM!7uJ`PvhmrBRy3a6w z3e}4rGHE~f9akl%2so{K0MH#=z-QnUz#q~BhPlVt+_j~CZFigB>Qt4PRsoTcMEoKW zRCQO~T944<0x+4BPf$=;`c1cS(|vE8E>4eY)m?WMZ&%;yywg)$hm+@^dF|Qt0!+2X z&xr2dRZ7wYtz;6XB@9un0@=1Lpxr6YRx`?Alx0Zrt-q$cZ+Tlz?vAWRmd@SYE5doh z*`YAhrdCvDVv3QXxu4lf!qo3qx^^-{4SA%XCGVNFI$Q z=_qj797pM)JONxzI`682c7!+OsL$2x%@=){%h<78M^0IHplVJe^c}e~>W|hX9<5@J z<4H}T9&x?EeBIixzlqsh0aHs=tp zkKiXpau4djPy%kCSXm)Hu-tDwwP?FSo0^ONhI}Gvw{g63nB21yz13kw_dF@Z{1$c5iKF>uH2ucI`ahB0fEdysDx>ueQcH zFu}qqi$a&p8{7d5{n6uR>Ql1ui934zxc4`l)dyDCN?rL>E8BP>k~dBoTf3R#Q>^{m z+P9+m#m%y6H(LZP_V(GJ29+K&usIRH#Nqex_S+K#KZrEbBem56F!H=;A3LvZAv?HT zxoJ4EHgq&KJTjXCk*k~c?X>G5QyyY28`FewhjV7lK@fw0fXc~peid)8v~_iP$H}jF z*;9AzWhIORrxFR_twkaQsjWKF``fvJV3bUcxP7pJKVG_|8{^xQUdbf#FNK;LcW4~c z%v#lj;}N#p{U8QG5^c^bewnYow&!~I@E$@8p4{?5gDiRKF9|eKL~!vQ5^KNm{nG%kj9>J@m+yg8$4pwD%yO3?0WzKKQ0% zV~a}B`(JlLUguq@#N$i<=}itGE--^h*h$;de0N$D;J0dnP1E}BtYc`{fAmFBw9t@~ zJ8*^{{+rc2U|1K=cd4a3aZx^*%e7^kWi^>BIuTNU$J*w)TSA{nf??nAVhO#;R6WVz z?+mYX|Et?M72LX{bfd;%KK=PLhVmD^?or`*XHEJVA6H=%_}GE~U@%w@xpro_Fx@i* zVO;H79Gd^%G55c{yL!~;mPw1L$3xxpMe#YvkmB7>`NLd~Ir*6Kgn}Z5RrtYOoZ*!7PV0 ztSlU`V&7mf7B~1kgDpLs@W*`sphiKxnTL1=2GA<*@NUsnyCzotsW>66rbz9nao3`P z(a=2({ePz;AzR*f9!a{anu{{;EyumtmL9>tN@WY5ee}S~BslI*%G6b8N`CZ3Ak8JZ zd;=w`CXiWW!&l*jA{#3#0kYmqTVvSV-aW_{_R%geXmhc#8&WnGJ3QvFF4Ufc1HE8 zRo=<4Iazr0S!-351*^-a`z1OZ2}A+zFGYeKkH9Fk&G~tU*$hkdXjRC93vX#>9gC92 zg7E{lyu?an@DvXs(n6PLLj;8RC=y1zB|R{M{z#hRL7(+ecW$wC~%lW;pW; znM4P+NtWw)a>A(*d5F$8 z4wuZKXYrdk&eJUkmc4Agg(eTyj8OAYBqll$z_zcv|x{nX%%&^)+6*`+#FH zQxTI;>Bbz9#zt{D^U`Zeg1r&WPL-mZh%I?Uf8ax?YIVD6lYt}JnETv_#-fo{;l0i)9XaqLfkf@pW;XFG*6etM|-^USeF2+fL;^dlTb&p}UjPm#_QC=^DlQyEuuSb$npm@P*GubHW`Kl{k?l#PgEL+%dMH5(D?^%Khsa`641n zV`9PkqmrA_JXtRmg(0ENwje%v8~ji1jN_EW!Ifl1o#HnvT(F5!|6JDTy2AA`Y*-js zc%KnFexUGBnZDk9&L_K**|~CP#O3ny|KkO~2XZZ+=*zjuQ7+El9-8TpPIjr*^Jfi)$0 zb`I2s_ahA&+I{^KO}LLkm0B1p6K^*B;I z{qT==bFw5#Nqcp$>kpdw9J0&s#Mufai($DOO$70K2d$o`iPdmg5Cbk7uHUx*I;K6Q zRc_K@Ia)MHA+Qac{? z!r26HBNy3+^2uzLt-1P&$!HHR4UaTA+^VWR#y^)IxeO!gsNP=O@*Rz9+9i^Y+4-H~ zVSM1{T$LE&bK!C6>e}(;c5J&^jj;XeRg#tNz#F|5JQU2diy?ceB;H+5b~1QgZvfpa zcOiVUw}Kv}0izh9Ouh=irg}&yQR6ED;~lDWp~kdX50jB?zawzhcH_)3)&oBCQ3RoA zQ2D!ct7xQ>0IW+vrAUxT_);xtB?TSLRd2srp%Y(4^rVE>O%d_Ro0X0P7x*f(fDpoP z?icd9`W=&o*jb8vn3D@EjZGT*RKYxu&k?7rj?tpllqz_2#a-;O_aXx4TN=wv6(7)E z>{ZhqL9d206CwOdJSqL1_Z=8j&yz1derIzHq8`i^A?1u83SZwI6DlrIVED3W<$gHI{;dhLV}aBJ#r#zFNolfQ;BD_w3=bB^k> zHuFL|icfFMIDdy8k|SRuuhT=dXZM2_A}`DG=eVQZos_p`-*AMQYW&V-JR^JdN!Ery z&!bk7=r6~f_$a<=n?|rW9;$(^yI1S61>NTsKB!u>^Gw`kLkw~3O&oB5axOVW>BPO% z`lSMTl(}DGTh-9!N!0FlGCFfSCFd{GQIwykgkZI?=5%_Y^@1pb^JO}pna<_9n*BR} z^|5S+U-w~&j*h)-I4UsKLto62^xE52+IXZ#bU_O@3nw!K7O|?;`|jQYQyUv5PX&t= z+9S5H9={M;{O1BbbkT3p6(N1%-YuQQ4^c6K4|X^g4kzMW;hM6YY?zJ|QOb6YvwF?Z z>d=cj_;%i{Mw#7CY3rWOC0Eybr$?z4R{bYX*76VGat-f>5>9EpiXngyqD0#Jaao#I zHg?$9Y+utH59O**Yct(RmxzsPOAvO5n*pQg<%pxpODhdW&E_o3M)?}4wo?Ia`=M=< zU$l9+uZ-pBxfg!rIYBaf6bAkX*-lQxd&?rKm48dJ>mPkZc_`Z7kn176YUJ*<>o{Kp zL+Exn2S22=9`gv&k5jVgC*RM_k4pnTrk7-QtaUtA@TGo93EPA`3DjZYH1BFe$kPvcUyVWF&(MSkfvGQ6(W$Pkw*E z%(8fj_7W+VLb`Iua^Px9=j~s6pSkEx*+i#38 z{p@6V*rB=d`+;{5JJ4cX{E6#J`#-DMM0DIZ9q;=+hU>UgcC`My&U`{>JMTAUO&uyt zUUae^+0M`RvOWWN1MFv4EyaR-KNv&@(dxN_|59Ho#inpG!8hW}&fR5!Tlp|63rnNI zl3}&95NAagPAJy`>2KJJnnM(S2*rWOTMHIb%W2>ZG}9p+Q}wqHlnQ#G?V zcEiDUX~RUgeLZ)VlArDBxkdzpGL-9I{f}nK|C+y1!EmLIB5(y*pX(pAge->N2d zT=PI^4;4YgY zREO8~;7RU;FQ1zFm1XF4;W3g3KzeT`Q9A25ZaxfFFmw>32L>t1hBBZ2WnFzXpIZ3u zFhp)a%i*9|jtmIG0Z3YKmp2w|Rji1wQ@=ad;d`SlTJx1n2M|~R;B3~ z2qKIci#ez&W&>(i!g#)3!AZC=jG?#lI@q(-#|A8Z(ij%1(WH?2Qq4xh)!l=>jK1P9 z7{+*~E;Y6bYBpGXaaT-Q5rWvg=tOYz3*6uN>WbKpG&Qr48v-#JousH4unWoI5Zj(j; zy~2Zu;RZ|OhiB=7s zD-x~S88Rw!d{{93S6n+?bd7#a=8P>leEr*YajViX<{Y&vvMF%5smELwThu8QkQ)0& z*F3f{3~F8|!1eQKOELF7+_w!cYI4ArHCdAerm8fdY#k@b%m_7GbZeCWh%Ykp?q!*T zRI|HiB{GO@I8Q3?iRl>aXW^b9P(jMy0N7&=SmBZ+qsROUJYn}Uf4kl#d5xmb;-NSe zaJ=%?2Tvp4?F^oq70^J)d<*1`O750^+hCmlcQ z>Q;f1>KcCWTe;ih$aj;JafW3zI@6UA9+`ARZmapM5H`>>T*pjT{ZNg;7XB%<&M zdGw2LTSL)>nMibc3bje_C`hlWo(#qY@eC`>q_f7?M6|vvE}!b)qX!EAk+DfZD@XyX zoD-4qTSSSTv=!VNU_tAUSJhkNm|Ok>*nK{0yG3%8l`^It%P=Zr3VK~b9%fQq zt;XYj=&=2ZIm6jM_eIIll^8MihZSEbxVpn4PY&(~mEz?h5RILZqRPdB*um3h8sQp$RX?x?&gCm&~pBI>PVF2?$0A-yqE+uuS&)rxB(EH!h{ z9@OC0Y|Wdn0gagjw_Kmg;v{greJVkDI|~K8EdVP^mgOGQ)o^3xTHCR zM3S^&`GpG3u#dGThAw%1WX9MgZw3IE;r@_r5kH?73(_Z84EjAwNqMjL^WD8ND-e^j zHdhl6X5XcIi{L*x8V}N)!@jTwnqP=@e{(+j>7L{_aC7(V8SG9^-RJ(nb&;qaMH>_H z)-@n5dGAVi!o_|+IWBbegc=Ci5}uk}G<6__O$|J5Msu#TZs-J+%{=#rbiE-BhG^Y+Yxy|26b1i0mC}bUKUEay z$;=;m;YeKUl#F#&rE?;9UZvmwgP`JqY3dh^U?Yv-VzY+zRI34P^-OC8+40T}8OHt3 z+0!07iCfglc{iUPPJhBH7DNh(H%tkRvL6M%`s`3*2Yucl-nE*TVo-K;)pGPiW;!?} z_qYjhu#k9ZcmV22&W<=&{%i1(%$dHL#+PcDR~ zLt$8FhZ6$TbTEwreJvRzEQ_zmRiNwS`g8+rORfq%;kd#2wSB9WRGOPpPKCHqwsZ&z z{pd@1BIKb?tO>Zk9)LvwN4u;h3s1?@q$a6Cy>GTOXo3+PbFTsMLQQ66|$E)f7irx)p6E z*uSF%!bkD@I(8(uY+SnQ^zFUJQ+r1K{40*|G_bfb-!bwL3lIUQgX9@2C`U+RjeDZS zwZP}{m+OiMv0P`ho4{4XvFzKx*_T@iZ6qIg%AQbhZ9jKe;ieP0KFomk zRyHvnzsj#>O&&4a?XF^Ngyh#3h`jJpuc5%AJ5};Ubb1P(8@k2VEz^kY7t4hl`_p^D zmuY~7z+>^7Kf9r-`_~rs`S^3QCRbadFI%S``6ztcnj0W;Q1AjSTS3Jv@q4(8)mekQ1l6GB&t)(^fnwpBv7-bspkcQWp+0FdcDJ2Y^B0QbfwArz*u_kwF7ge0e^% ziFrdL&Vt#%^HiMftJ+i?6o~h%)TEeeaoJ=m8Xv7A(3$xDdZ3zLRDkF8 z&ac_k3X+_x-RHs9$FdBqTGm{Szqz`PwGH?kg)#Fx0;L*x?aY_$4ngsm@|cv<-+Q@Y zldUAw{X? zUgK2rxX$Jgwo%R8TN>ocaj`MK-Ss;^maq+C2kMtt>6g2M-aQUYC<7|ngU{=|;mzKX zLQB_z41s>;*~;|Y55O*>25kX*5@g> zyV{5u9X4-I;T|3)>>&)Rg>qRR6{$iu$3KY3c70qRuU()lyH>%hY!6nBif5Z)C!g)< z`#Zq_dKH92Bl0J7fW*k{=wUzKr*E3-A!NLSN}|R#>rY(>_y@KY^%7s4Bj2Ca{#jHi zEk_DUb9Cn?yar8$-fH!_1#5k(_mYLNA<#GoV^#O6dZkXzNUkYcMO%=)h|y<4YU2pP zuk~KjKMaK4*7y^@15@8|+qtX?$Tiskj+Cx4&*+;WPMas+cFhqrly7OJOU_j>OBD&n z`62r%ls-(X5*%dT>ziXYLiG@S-yrUc(j2yHLv9G~TjfVpFo`%+l zSvXib%>qr9QEcMly@bmm9~_u}%c^rZo;l$Mf9+3{Oal{R^__o`90;uwNpisERB{Bi z0~)MOIn&oq$i&?m()StLYw_s?L{Mbc1Z$f=0SgXmfxcfpw1-q=PQZ(hkM)Ol{JV8v zU&nX?8L6x{Ev?xR>-A+ZbjuAldrWlO0FmCXDyX#zOZV6w%U*2!daKNqb9bbF7w4=x zF|TKCmZajVcadAZN2Sz4WT3g!OMB|Zq*4a`BVB#1r61g6Zm!;5xi#FsgexUe!A2G) ziS={5V{j>y{w6O#Hp*8VE(X3Kw5AIq;B;rG0iyo2*YD?h(0$%2&u-6J^gVSqqPoZ$ z!w6Bl@ATcu{inY-BZN#eZ}Zh^3a5G;3$)m!D;d&CE>uNaa{H(NRnRd1gXY%#q-pW1 z`p($JUexU6Qk__VUQWi-0Sg04D+uYT9;c{+O; zc0N1CT~$@VwAM1eKl?`mT7HHcG@ra7p&xW8x$E-?2k|C+YSJ18ZD%uBl>=|Mc}fyvz%*USzus~@#i7NDVw?8me++QLNDM-hLON>pntrb8Z)FkS*u$~S zU1bhU*=!MvmU2VSuYKTTc2XR(I6L7@nVK zM{=5{RKP2`D3GiA>Ohb8^jMp`z52YZO{2_``}v6QvuF{Q!}p&H1?88W3_hnf zhhU_-wpw#E9z2f;r#^mePC$DkL3aAH5LA`zOf&1Bv>v=eIFgr5JL<4I%nJFqa#iFN zM-tj@Un4gmCAv$NnD2B_TKkBcK2@x&YVxjVb^m0H2IzeZy_uN03PPwD4{Hgq*W}JV zxO#PPZ`a>B8WUlEC$+;FWTKfuG18xv6)_G@BD@%>`|t!RhzZa$q&9L8FAnlccre6{ zY#D8CS$kJeEYYhET==!sUA6yLOCr|Ma%q@@^_q_`4?_wH8Mgg+PiuoZe*48R`iLbS z+eHVrV3`}M@zJMcJ44Q=)sWb8*834~DJFxhEgucLU^>R&45N^n$FP0rgc`!ant82| z$3O;8PqIXX_NVyKEH?^cY}KY>$vWcA-@_9=KPwi==wSQrAbjG`z4@*uBtVij6%ZAp z+mX0DEG3=uGnD?5q$sS#J;i4o=itHx8J(2&)|$1O(vMYOT2azT-dzug+l5~#&BoGY zNu!UhnO>r1lkOyBVDmpu*iN18Fn5uTBlH#Do5<&ciYd`dQWo#w^J_+a=dh8yRZEbY z&vW<#LYx{I+ZGD*WNL&Q_i$_t64{K&v%&R|MyZ*)+oltR&{^6Fw@3V)GML=FfVl^| zZ5UgDASSDefd7nFi(?%Ba>ySM=@0ZP%qRW04?<`G9J)oZ#dud&u~h;1tSR)Qy28BFhYqKe zIR`mQx+-44AukspmfJ$u*C$pBoiazfpgA71Tgp}k4R1gVb_nyP^E+j>X4L(*gC7>Z zQN^epNB8-pUmKWQui*hvAHEPyTTt8E5sd?ub%wvU8bjN2>sLi!qmLknP~KY!1o0FZ zp=lU5(7qZ^>+7oaNn<4CEBK-`KpXH~>=%xSCr%BOKn|_=o`7)#0PZbQA|MO5n+3GI z)pXiOdw~xO9lt-lxE1ed?d8)ISXJcsb7`Z z<5DUUYb+8(YyN;d8Al^xQq>ea49e~EEfA~J@5qC*@DKzhzv%x2TsLP5J$kYv(J`$M zSb29Yp|BhVKbfQ~`>Q7ST-Hr(FfZUd5w` zuibaw#HcNhtxiwZCyAm}jCiX$^Kl1Yp@BB6?BxyDZc=cKG8W)>bY!zI;q+#lQuAiy zHa~(og!`9{7e1Mp+Zt~(9(NLY`XuM)r>PXi$5oKN7iR#jzX`r2Juk5i2(V=80gb83 z34QhP*4?@6$@ILQl!Tru$rl|3V&X~1TtYB0TYsi}+SFrHz4D1Zj1T{LATTa4HE*Ji zQZz{)bx0pye{K=Ix+-xX;u_vuU2AGc)+&DjF(+ zx~*U|BU0JqF4t;USE75@kSiJ-G2U|`fIa%wgTa8cWS7!!c#}57_hBRJ^s5mC(-9v% z%_c5NSJeCaP?lKooRRcyEY1*n#?;_<6@{($S&=g`cJ$iI0A^ki()3 zGvvNx2X|yOhoR(+fN-Hck zO>oFKcr$?Ouh$hMovl%K@cCgC9Ox+OF4^PwKNlfjlc|xmp*G-Ubf@%PpA!mp&YxD zhdId(12ST9$7gK}=N-Puba9k`NNGkHuwQ5uicqC<*eZmBI{c1In!ov@c1jpUc?ttf zhc26STj127_nx8*Z0SA;KQ8{9nO&K50*rxI^3C3>^>EG7it_OwgF+X12CuJ8Jn$U$ zXPnatTQAb#!$LTnh-TO_j~xZ_QzlJO1a@3n-j>4MFf}@JW^ZI@pnME}^c6<b~<2sJw2-&1j@mWGIH5`}mwRj7k#$=TL;!c)3z;Lvt%D3%{M( zX+Oyt<(C9x`>v11u2<_!TX#o^lE zm^{70zo{f+6dxi7!3fK;%x6g7&HwQN=s`qK-_C>d2yMSRgAoV8&wxP1)kunwf(VQG{uZtLys8iegt#7CHbJl(ic%RT-&28 znMC}RKGb_DH5TjOqg%)P1JjrX6LwQu6XJx8n?;LfGO48a{jcf4eMZemlQ$V)B^*vF zN`uEp&gHD=C^5=9j!=zAm8O2FM@2CRy5V+v7r2QqPH!aprr-Hu{7EW&=|E~xOY9)r z^ZX3GMcQ$l?1|p+swAh5@ZZDr@>7@jr7t&#SN0nJTP^9$0|6bH@%FI!=ll_sMXq!) z+I29^m|emFm)RJL&#x=vhg2{D-a)$bb&E(1YO*2*QtrscR8+aYbd=B*NFN`5u~FWL&&6-{^3D4^Wn z`9f)N^8vs~^e-w7d5)&o<*l?4ldOqK7)o8PmXh^n-N0TC)kN5rrMwW>?-uCIzyl@| zO5B!=NC1fdEy6(LoDViACf2b2$L4mvnG%pf+US|<{-4Ti{$Fh-B;@~5W|muIX3?Qj z2kyI6jqeL{Fvk@;wU*iSE^bpZ^6wZ`I{=i3+x5SfXZa`ei>>Li9UW=g*-OenY<^co zd~QSie3WdSEFXRtFv}InIQ*@lhm-|Aq`&M1^_&EV1IN#2#6=^hhw)JEoZ<|QB7v}M z&CbRM#*%ODl7`n|F?>r6JIC9Wql;p4Ozal(AB$8I&6@P&+TLG5SeYy7>}@mV;hDJ! zv?@nWlGtJq{o|svurWr*C}?c462pXiwm$ z0B2iTAfObW8YZN5Ngai$33k!KaED!+i~^7b!9?BLWmjRPCvLu#`d0LqQ_ z&Vrf#8t2(tAP~J+3l^CI9&#X>NM+nZ=b+YjCTDe4@68oBw8=7z#|WGv0iv==&n zn6C35SOhU14#^y@j+eisY1e2!O=mqUvBeo#i3GQwZ53`!b=)yww)d6@oR`#yR1yQN zov;OS<-uGyb_n2I_2ZN!6YDGvfM7_5H}!G!jn}sST0x8hj31Tq;kx0nP>LsL5YU|R zDym-6MS)B9{bWCXMLh;CMlUC@hV^2-3kP&UyUDiKAdDE%9Og_rC=V*tOEVWwPO4~4 zqXqwsc!VJTGAjzK^8;CB1?VASU^+A!8KL{h&-89(`;+|&x4-jKsHZv8xr~SK&bx_`qHA68 zpm&^P9|kG%12X=YG&D`9O-Z+8?k{QSMJ=e$w%+Mpt>yWQ4Qfb_O2JzmFUSd%Uq6oh z;XvzDYN@tWO8)BF~zw<^9sM_FxSeTMlF#@ehw8l`J zs1yeWkFW-nJ5DP%IlCMJ9H8;^bRje>rYQV#bJ{H06dAU^^WJ~jgU#d9^4oAD6fF~7{p8Y>lgxe;5l){HMm(>%qYhJBG-c8jzd=_LPGDV+Zmt*@n*{I~GJd$yrK>QD^ zv7I=et9d34n;Nw?t(h(PzezOWhB??R4jggE9edV-m^^Sd=;{LT5>11);_`TEfBZeg<6PlnZnFiUXnJs}2=U$|0oIX^)rY`{ z(8ult9GGKo8D+j{zFBJZOWL%>@e+|1rQ>8jEx>vhr znV#ss@hAAFJpDYK3y-$6iv!uvt#8}DG6%AX_uG8V$~&8Q>*I8Jj%r5lPtttDra6g{%yapb{e(`;4Z z;$N$=QJ>Y0n3G?B8?mZ)xOB_c35Bmenqp#Xlr(ZIM{QI}) zB=|@96hmU%?q9Ixgoe|WrS;QEKoLR@ixyOpaVBjC6f-T^V#|Omz`|QH3*5}sD@@kU zGz(Cp?%9jor3m9#&=8~r*NtBihvE?w zT?v*R{YO_FW5*hNiW&iUGB+W6Me&2f5_Ft6>BEB3DHnFi zla7L=j%#)Qn?tR%WI``JJKy9BHB+nY*ET{`Kcn=zAz_y^w(Y-uoH%~ZGuZek#1f8R zEvI9v$%CZI$#>-bwn=%3c5NH~R{LO_@z)~8#OM37X)W!?=M%`8lgac-WOm1?yI4&{ zaOqdXf+#llv7qy16)6C}ZBL@zu}OTa-^ zaqS}rW$4Qr1{?`r&NSb7`Mci*&jx+ZY%Wm_&hbmRq|SNK>(%@kBY;o-_9^)bOLY({ z{B=VDyPtZF=WF?`4P;8DcK;=0P+tyCT?1H83&#EP#A;ZwF9(Q(kS!0D7<{kKERn(O ziS@cfjfH+k`&8Lj*JYpv;aX&~!g><4=WAW(DtQUjn7EVWVTnv?t}=Op@@nk1l;fJ$ z43o(2eazQ`AJ+SK1FtI>w(XmmBlV1QP0#%%E}RH~3a1P;n;~;-UrxG$O6ig|F76}{ zggl$P@QEeKd;$(9o(@OKPkt-|6xy&c^_BurLs6rTCC3V03Hm$5cTB!*StrzQI%=i!u5HI#NV)*n$iqvSom(o_`8}pb@i4OGb zn$DspzM@E9J#J@yR)qd2qwmn4I)0yjWpc5f^ImS^p)55RN^D=9`?vqQ@#AeUv=yse?Fuss-d9R{+YMrA9?3} zzOIeen_dg6-)^FW(W&eZgRE8hgl^0X^*v8`LdUBDxD_KfXx&)GfSd+|1`0Sx$GC z{4htU5fz%6=Pj}RBtvn0fPch174Xng2+x7g4Q8S5FhXDSd~%~dx;lM3`MTIk9waRG zF!R~x2B%DIP!&temL5j<>UwkZ^aV>fT&5$ospQLlS9b91L$OXnnXVuzvTKPH|5>X* zb9B%jtVWooaV>Q-z=8!t-CJ|rmReNDV_Z_I3AuQR6hGH3)b_Hr!lAE^bdUY(OIP@H z0*#G|*9Vl#j1ad+i&PP_5dJ(8_SOlp1P1Chu5!*kW#J;Mh;MF-ECCdR%`g*eOjz*M zVoT{1ulR4@j^_4vh*|!RpQ&p7ic|Ja7ML3Ev(!qAv05C-#~q|N31TWYDyXpOhw%C9j1C)=N7Ec2IzJA93JW=W^Ia7S#`CECr(p4dOPA$IV@!wkZ z9&kv?_y}OmfepIOJeR>OcJS~aMK^&Cm4HSb;Kr*tNHLU|R^JJUz}*1MO#-Ml?Jwxi z{L~So(b!a~K5qAssn&J03)iqPUkDH{u={#K3NE@{k_;4O@M6CIZjaGp+b%BXw0 zY)ZT$=Nc?!dNNdM*c-&IP^qzd&2Rjnfkxw<^6!{Ru0mM1cSX3p%+snhsS=oALFg1{ zk_d=cD?NaBB;9*<>L3?%JkalW^~XVfbm?~Yi|6G^^DKM{nFD2FD9+p=+%_-~OV=B2 z%b=x8ro4V3W-;zE5M=0=8eoH89rNNtA;7zg^KhFk>ZB0#{q&K?cZ&l{B+=dWArlAn*hS52l&sNVN$ zeIoMHpqIP$hm7SU!suMJWReDnEL7jyh&Q`hjm$2@Yhc7@-YZzZGeog1Xi%`@1PaI7P8*{`)T;A*B1|&2e zkWqB15~&(J$#*RbE~Y@e6&4w%owDa*&8(;Bg(GU(8_y~xVeprv6M8alN2Rp0eH9SF zIf!hM;$}*EIYN8;mjPzE%)DeNqL632K#Je^cE7BBeKVY~6t^zt;DzZ72Tr)L5ozj+ zCqcFRYKJC|`+lXAX>-dse2r}EC!voGX5t9U1kVB@qY?pw7Fqp5o-D`YnYZBRUH@3oG_7kEARUy~b=OQvDou3h%54)R!VqM?8T`G)*rK-T!pZ{NY= z&zChI-i`?##Lov4=%C|q)2t^FnE^NNz?6WAHq6LO9dFNg!|=&)fUN4OIW8)Gh*zjG zql$zf)3?7QZ7SZf241Gg64V>)25Tp&8CwM!1ACawijJ|W%QTsY^cOKqTX=)7vu4Pf z8?ryF7wOI0TG!hFM#+kBV5%96QfUax+2z6!^NsdFJgf_Q0t}qJU3`mAx4j`amJ)0C zvEWJRCxKG3!29*ca%FRM8WKsi=8&%pE1Fw5=Sf0}1VBSV^h4fVoe?n`eN@Ak_L3Ht zg(NLoqYTS4-MZV}ARJ(k9N0&0aB!7B0i(ituv*aQOo{M+^=e`@emtB1eG(jz>>X5&zVG8qJbCH$va;C4W{u$WVj)`) zMoC32_N54aW|(UUH|@DaPW)f|hoyY^hc+O!H!TR(3_mdj&!*ucA*;wd23M=4SWHg~ zvGupxlD4Ae=c6zUdtmja@W8K6#Y#c;`I~aD=A%JvxHGSX?;q~=|M~LCL#AI9WWG)N zFo$q*DZgHV;%UzxCm3w%o%_?m;Ef9rrp)`(BODpNz5!iOF}HUS2hzJ(_@`=m6%pCv zZRNAIwFCSlXRoaM$d;JVy#4rE4l8%_PdV+&2hr_w6u_eX!|ftDDxsI3Q5m!mxz<1I zK8Euno(p+XtO zzP6Pc=4Z(5XAtLa>a|EU_yj)5Q`rD6HWL!&(t)#?9sLO#wa4g+exwl?!Td6r@698UvD> z0MHR_vxQgy^7TA61}WA$_71)GLRr!DPk1fMDbFbn9#X1!+hP(LlthJ2yfy3>m3c|R ziI3{#*mlFsUkd@@+-UcJrh0?kWs8G}++i#A692a)3Fd!-k3`tDx%4;Q!rg zAZ)HtX*#26>fA{PhSXv-dv}Sz&#kA%u0%mPZE^Oy{=Or8Wz?c|Uf^cgwx` zrk{>bL1gFTX2lrpJr=2COjlx=4O8k){jHfp&sw7 zupkawsySGJeLBAY5qsADUAJhzauP_?RC5~48<2Cl;+%TPSl9tVq)I>8e}t#}R=JFY zXCp`RE^QAk+)(z>FPZ@_nwMSG$$V5?`^oGf6)zs~hCtK@w=MSgLi(O`&)-)f_GZaI zJiiNXCEoJs+ZEBYosXC4Ody1iaA!(B>6rcryeFFGw3KE6Z%5qnU5DHPBawFM3Ip|9 zf--PbN{7vaRLy|)t9(CRfBFboiOK2|%kOMG$CmWMSqE7dIq4PJtU5Kbj4Uyk{d2!W z>p|}kx&6GO*8BCRRct_GM(>Wf&fWTxsU_$n2NXp575pzw;o%qmDMFkC6TK{g_jn;FzszAvyQ(t3!r~ha3CD zN;jj5tBW?#^1Ta#%V`mha@Rp%`t%f>FHhL5Nw%U7)1@K#i**Sk8@ynI+$wdW1*frh z?`CmuW-AGl(G^$W%bF6OeK??n!`fv>JM`lpVa-e0-lG_XRK<|p#=++jqf}+Dz%Mk7 zTvN_)7iz_)Rp&6}Cl**){ZJ3CYbZ{79*=T$5qA1jy(stBYKV?2R!gpj2xLfT@lq7B zwo0I^0#qOOaZ@yXiz;-%S6 zlVoMVCJr8leODSRIUe}c?N?o&OEE$RM%n0O8`#A`1!h(LD)T$hFXsoC890_yhFbuaK zW8YE<8??$-gazQP6i8c33$$+#KM3=9AVTJR`Yj9=B1T1e1G8`jb_1A$d_^Xjn3svk zz7qg>8$4U`9%Q(mECAj;C6v4z6En6G6o=%QRQ+FCj}0I}uNOEh=Q%K75Mf!?(N;02 zYwYDfgD=MMjR2*1J!+8u2m;A`>gAwf zQpUnLMpISuz!SIG;G3T%aQoYRf{k5*+JhtE)mI?iAlURnS+RB!G2FX&Sg{K?ZwD*% z>^^%STYYu+?2G``EQ*n6J3a33I_#7p)OC(Xm^OQYwXtuYH^X7jGKtnH<&c}M!mY(* z^Ae{kyX#+FC!Ghu>H7;!b~jU#k~u2O+BmD>m=rqd1Ke`IwTOwRH$fyl+?E5yTHj8FR-z zBo-g-M)$fi__qm+>;~yPAtpS-JgTo-AFY?~tL&`E%rK^)0aQ&1WL~G|11(^%lXy*o zzP0%AKVE}*pP`ZRU)6u%uzhMFWS-TZ`$DnF7aK>fsiC$~;0TTRa`kuK^WB7? zaaXNKOY#FF2CH|PC0_6?F)}L!2OS(lRUYNu0MT0GWOasn*7B1{tp`7e5qvN5os#W5 zT&aVi-5%J5XzB2qZ$dBR4Y6G6V#PeEGATST5zt~?h7NU=8PRh751R!R9Cd_X;lup^XD$_h_u59d_ zSm&5`viva(g`O{{a1xcwf=#63zxdy1Mp(LLfFerJEL9|=tgb(|_6b8>vX1S3crloS__B#8PN z#FnVk(~F!ki=l|J7$L=DNswqD%3oyM_l-n+u=txVgg|tZ0N3}VO&>!#{GQLh$9)f7 z7|7O}N8N>!X6+OV@f8RicXnzjqNm)F$7+hMvhEY)%gHWB?1!(=xUNP3hn?$~abMj| zSB~T2idP(8Wtk?=;^8vM1R@(7w$WQGe+t8R%!@GXojt4a+jNcrj`8f!lh1Qyvw8n@ zz!2>Fpa?i5tsFGl{M11N1m}{gIR#Pxk>dEg&tpAk6AkpED{dF?>^3r`3nnM%wp;_I z2>HdiVTdP%R*3H_n(P+O7LDh_&}||aJu)2hTP(6EtElsz=eVNUt^*cq+jlE}h~?*$ zfksx4TbTyHODI&zw!TKU*&aFvEUW8}XxjyxpRc`5?xOoxrOWKyhj&9zK;c?*^AgtU z+Vh^F061iWJ>SBsW<&r}te@BqyP~%-akqZjz(utVO5F7LlT-BvMfR@iB*&+Fp5e?<0Oh{`CHD5bfaS2%TevYiP0a; zIWuY7OKi}S`LLYme&R81NT%bO6j_DZb&Q8G8IuJWwCZ;;Lzf}#Pyl^PJvr0(^;f+a zy=s1X>VCJbKa9_7jV9;tNvLu01ao*upt#=9KoAPG6pE54&aqkdl&)z_Nz(az0?d5d zwwygB&FJH$L|2EnOZyyv_}9EWc=9`}4Sr*mGb(a#YEAf0_8v5>3+CwsCR<*)BEK}U z=+{RFjGe}NXVjC>z2m=!bQzz8Ia@667|qFWHA4>SzR22P$yIz4MaADak?)^ zdIM&Then$RGZqlNM_N9w^1IBsezCAQ$wR0%`xN!%@3Rn(4*2X~o*<=ChHj?ezNJfL zyt7K3|LGXd=tPXPYIdN%Y3!jx2+4AW_6XE2Hx#e-l{VVG@79Z=>bCb(Ax%q$cPtk4 zYdl7gPduKT+G6c?0QYqljeV_s6*e(^;dN6mijMQ2V%N2L&NP9sttze^E4zusQlo}O$?#3X+A(nnP#I|ur6nQC(_2TefDba;VN z7lMDbab|RlC75(tal|v7@A6I5u}#y8t88Mg^{6&(OOE5LQx25)B*mhz2_^I-C4SzS zF4tl*cWa!P-yV7^YSeAYn(8fM=+R=mBg_^_`s==*>ESFofQWjRu|n(6j`f0Q(}}h2 zcsuO-i?Rw;nUYHo!0G$jUlouuw#%U_p>Y9rQN(a=VM*`Qyo?@`-m>^TP-4A|H(V9d<&;dqi9=2 z-J85$K(oQ*PfxhG=~cZUW=)*aN8fX-OyCQM^qf!}-w!3d`PEw3k?77)SJ@JdH~@v- z%cl~)nUG}f!>JQ1&LwKt7(X)kF)G8pN&J(Kn`zoBG z?sdsy7I41$wWyf+mxiYf9|YiQEX*5LlbfxfYV!CU_xVqvuhr1?Eo`(rgE$Bawl zIGdY7cf(HDfl7_GYAOf5=0BsCcaKFrR1at5W2o=UG7<@!+jB0>PA5n=(u*r?(%Wb2 zS&Yvo4scaC7FqgoN~OwJJCbM_fO$us(N6gZP%tuSLm!Gjx)vz7d**yx_@Sij6t)*e zp(uUVaO0;i780+D-SUO2;dN^Nz0GHY>+ltDa{wX<+3YE+kmhtX0Y$0e<41sfnIet! zRVq6N1MD_@z^Eyg-)@r(kY&v;{T~y@gw^T2c;hvDx|LnatWjZOY$0atQUv?$_`pNP zxjN3T{nN1XS3ne=5I}I345~V-64M!~0Ww8ul6d|SxKQ@No(FETwEIf$h2!ks z#(GCEbegmMJ)yJV)*-I4*|k9j(~q$|5|CLVf{07}MoC$`qfbb|3nc@x1>>Ft<<8;u zKI>Ir&3p3xejuDYCV-SuSHy#5WHf@YkfZ*9!U|3kD>j`$pPGzy-awYIM0IRKc1Cgj zL$e}P42NyatV+#qH~>MgH4x|Ym_-Od0jgfNh&rH%4~XXAB(W;g@|MP5>VVL>FY$5; z6nzX}Ri7+kdqwwtlx+I7;D=`TkTJxf4bdSf%Z~c>DZzgyEUac61GyzEF{IpKipXHE z#gbaBd+hhBm#$d>Zo4j+pH{1T#%{_PAHzq!rmG`*+sJ?-wDOWuB4@Ka_OK4_!u2zs zI)%M|2z)Olc>}Eq)nE;~Y=14}qqnOLTGj;c46uXi#WV-VI}joOj3OmQ_z3M=WAB1{ z#1n4cIONM@@Xsj$>YRII$dlZUk454sddzT9@}_&$>3iFLpXOE>7n-M~<+@g$^01G7 zixNPs(f*sa=1b##yzRX$K7$l09siLZZf5lAb17VaD9OqB0C`O+JO(QctqgLPG`(9p zK0(?)l=eM&nWMI>dOn?$7K3?!#)+QD8sJoOL|2E~Kg5EO;1S3B4?0ym*bOK~LCN6$ z6czbx8$`lvSw;Qdz>1cVd(mc-I;#RyULgRoFi@E#Lu@jX8EY6w4hy|)@%PgL<*;+!4 z8WLGpgsqCqQTN`-zVw}#Y0v>=Vs3)Ia5*E(!a;xgDpPV|Y42U!I2rl$H%l9&L-=h-kGWp&TNHHoxRf3*RV1&IYgeEM_%?(wZW+#CZ%ZYNpxj{!;6k-uZ zTm%g)0zicf=Qq@=Wkd81DFw7Yt|Z56aDkNT<#W*KNMdsYc4%Ni`iHY%?rkn zP4`VJQG-al!96%i-{}LY!Jt-prLbdRt7n|(AB2WwA{SwyKo73N)6o3XKl8%_sU8Q{ zZKW+GVjXC|j>?j&&4Kl({I3eLlG=qWrKR3`nBe5L^Yegg^h8}8iwRO>?HF*%JAM;+ znKPH`2z4fLI@O--`$+Sh)rbEb!0S};ltztEp!N}&ui$4V|CoGQ6DoDv2FNpu9Pk66 z=VSIy$Oc)DdwR^CCXu+QC;&{slO1xIKYBFf`$QX(WQF{mD!s(va~t++OuEWZms{0H zT|`D+Z|Z=SBZ&=8|FMI0aZ2!Y?iD@Ol+XDjLmB!%KU^p&U;}83zpSr^7PqjuRo=q) zv&4uPD8kT!vGvHiBt~ke-YV7_`bFb{IQbZVDept3ut}+?~?2T3x^WV z*~Xr2wdUt~qi^FY|E4!)avt&Eqq`d-v00Y2lQ2VrscmhYyK@UyU@dX*Sg*MufCozp zf@f21K^{nw;v&c%M#2x93bGT*;J46sU!cDM9ita@RswTg4Eo``@h~1Zo?w)GK&i~} zk1_|YIeUH=?YTjj-a}=g@0<~e6DzGG4|igsv5eSlo7|MI)lQ4J3&p=5qw}z?Cj+!e zGuXtav)wZbIzeL*#wk1Tb5Asf=X2J=djQ1%RM|Uir<6Y@H=yO|K{E(7rg8Em)1qF3 zBt!Q|p@_CY;`;@lDxDbmDZW*??V~5l<9zHEr`Y)UJjXbr>5p4OM39WyE4D=(F`QK5 zmlGlT5ESxYV}Gsw`{&YK-35(*qXF30LA)MgF&=@Ef_}{4G@SrXS&I`6J~(ZTJ0%-R z^a7MYHCG2S{9tWWo$}7n2x|v@d+)*vqYkl{IDFelEAps2cPE-q%>IfuQfol&mpCp8 z)hli=)Zuk(J^O6y+d^hI%*52^}Q#)nMzX@K@$_S z5yb#(oYMJ>)(J1Ysx)5>eRh&LcRo6F_B;rT{~LW`jsBoNXaAjqh6KE;UdBtoJaok= zOZ25~OVnMWcU^4HP}zpkE#j|J&P%)tJ9&1Q71xkt(F2@`f3KPY?bw1=iiv<_#j#a= zZpflP>a}Ac=p@%YDXlznW7t&Xm?IhfIjyN!9DS*eHCCn%-{bQWq4}Z7F(<1C4{c7} z*o|WG(qR?;T6Y%cp8;y8prDkB`H%blPq-i{(6DRZ znc<^SwHP}aalm$$RtYzs!*V`o{-Y?UK(N3`j@znnnSIaiLmo=vL6v@X>zl*EmzhNR z4>e-UF>j4b+F$YrGmCq)6WwN&_=ojG zpnqJLF+VbXF_$W5<3cIZm!nq~6dL}d!Y2aUxm%jh{v=f4%OjTHd*X>Q_4m^~;x}$q z{^g#1?|SJtW3KnV*gDIoD8qN%zcVl(jVMTiA|nYK<_#{#{&2m4{rRF5Db{ zkBiUW@i*yo8@gH7BBzB(Q`kJ%BVO|VFrB!;{E9{D(OT)CrmI$&u{Ytht58`wJJV%S z^?8%mUel$Jg`5;uI`SG@{FwK*Rc;?MgbT>dwjYQ3icVz7KE>s)j=?k6yK7Ki!cOR)&M1SL65*g0DJoUA_HsoTe*na z^WKXDHX2tX(Q}D`Q_Ck_%wmE64>2$H^q(-~$=agf!eqQ&BAl^8o z8u@EHqT@=p<@$rK|I$9#Ei*9t?h3Im>-P zEm9N#UJiP6;W@D;`MuUZ3IPiPl(x}Ah+|wtNDJo)Ma|A`lJWX_*VC5dAQF@q2vPfi z#4;*bk^STptaPDkc zhjiv;lOfMi$!eN@dR3{KtVn@$UEoPawOph*h%SI7dg{fid$o?m@P;<}G7f#r+mK-o zv<1=$K*Ub1CxZ~juV=QDm9TGO4SsX(U?vr&dKjKhZmyr#?GkI%x~kwFB`7ag{~MkywOg`L4|q6CBPkV&q&H)s}Vlb$KC3@gPbC|m~@ z+Rp;`>wwfXk;8X}F zw;-6f;ZAm9OHOr7AyT=0K}DQ~ZRCz#%8yWVqUvhV)*`*N5BeJuPpY;w+VaqvfpEs$ zj!*D?9Ik=y4ej{bn%!2;<%BGhPt!K@y*!KSXES1H0;eNZ8WA| z6Q)`-^}?>SoLqgkIGULSD*24O7x$qvazM6y$w00`z)r<%XeE_V9U-`*vDK|INU&8} zoUNSgeP-)>c1V83ejp1E;UX@;nG`Ff1o*5HGF-NlZSx;AlGOG^OLd}6Ug0Ktx2U)E zERfd5SnecjbY907kabQ6)yZ|lNr+73nW`dhdl(8OubYDsJO!I)?vp0F%e(?*zkeaE zjP_^qD3r(y^cr}d+eC$TqTgACTc)2-4)z0=am_;A=xM;T)*ANbsmIN!+dM{H34ynQ zcl{@+47X+sNP5PJobI7>k$oSD3I9JmX)UQ!dk#fBfp-L3^k4Rf|IWknS8GOMTH(Id^@gw8qn zI%W|gktU#d*<@uk#uOu-IFcPC)(AhUFY)w{SYDb3C1Euu+CL+Wj;e)QPTZfqrG#m` zgHu>To4g?j;~SM3aB0+UdfvPYf5f;VA}wNjtxidQBmu#i#^DBLi25K&$TAYc)xU8#!kC-P zWD7OAF(gQ=Iy}C^+@Ri}p&N^;kdgjj`BNo)U5Smhel016d_HlVdk#{{GW&3R=GXk7 z)S-F|hyRN;%>1LlUwAKt?KR85wg~Jn;qoVtayP#stdCD({d5Y9Xu{Srlr{hIlqbMG zM=4fqF2#&Hm$*^~QFe*y%26sTaXJ3Yl)c!^EzP4Qzgq1?0gTXD`Kt-EOk|Yjt{l6o zQ6K*{!Y%qI>A}scO)B`%SS|MS(OepJ8y0YY2MF!D3@J2-B1?_=aUX;c{4(5C$s3gN z`03T8)HnODHnbgJq|_s4ho38|2_f!LvZ5O@N-@bur#BhbOGhWGa z(-y9e!@s$`{+d_==T?1L_>EpL7OAyY_l`)wrtSO&^U~g@%0okRu)E5cYI<8;;*VEw zsu<^QIwt#fKW%?BruP$>UOeaTVjbFuDV<;YeQPeU@wWSHh|9UvX_#kpp@_mu5Q=R`kQ#9mIOw_>`?zKY0-=%%WAkm7Beqe_2EkSoJ zV$u4gR>;S@bw_F~x7tHJod3_ZUW1gy^`*`=b8IfTI3MVkasxGIv!=ch)qxa%{p0xQIeG(-5yf_cA{*Op z2?i{iGflv$oY*;tqjIm(g{do^xb(vBtEE-8`6o`ZA`9T4mLd^?H;em2-185`r~y+a z$;=b-g4`rjB@Bz8uPnl*N7YeMjBWQ%8YZ)sb ze)EBtVa`=PQse0n{*W(23rMe|GI>8rN%XQxg_hrBl>=UFlKwQUX|v)A%ID%V9N;=e zGNIsFAtq5%P0>vHbJY6!_A($W3yg53kCJmL4ICDS6l1?HfQ~b9d^%n1wm!`Z|5{k_ z2&g!GPOvHJH9bDx?3fS5wrYe^X|-4a6)uxjxniFR(D^T$@<+^R6V85of%Pz+S_dQm zjqA8rF#P}uMQYw2XVv}R;X)vU5B&?+Buucc-J&Xh_Y<+9&-up=9*a#I=i=2*&A{#Y zMzWTZ8mxU=a}-y8FNf6BbXi%@aam1JQ_?i(de=Rk(%#;2MZ$S!`hoom;U8tTsN#uf zOrIrQOBCNZ#QE%e0aW0fc*Fhx98d^guWUL1(R zB{ZFG_S`gtUmxpfCHrH&-6&9DfHgRs69BG z;UI-x4;R~@_WKgrl<-pv$0<0A=lJk~#E}@>$%eYOTo*#4?WYWO*

v8}1JzgDAEZrxQ z8_1E8<3X zy8v_P?lYT@qqMKHniOYdaBlYghSfB7eiLY#9$z29nK}~kjxBL{3pQauyhVR}q&DHm z9ggrOqWLfvaX5-~rVJ5N2ai9)lRT!vKL3zUHD@k={LrFczQ)24d5z2GyRRL@~g7U2HX`>+gG{Vp}>gAINv?s8xyRHT&4hM>%M4YcwBlS5X2Q~)Nxbgpq&j)1lPY2Hw!k5tiJ|O;^=#N z3p^XSG$+N?stHE+`DNeWBMCqZhyJeMqQdLy>go*H{L|pM;LQ&$N}$87F1bk?3-iIJ zAypv8YmC0ER04qBXQHr5m)X+6dBZ?g_Z}Z*zyL4hM(8YxC4(Hh1R(mLs_l?Yq*7`B4nBb$ZP=kkmySDE~%;#GHV9$1q@&ThP z&;09;hSl=8ypK5PsyA>z_$A2}j!%=9N zMmT?8ow?oo&JC}uYlr&|C^k|&v&jKo*!@JO=4mk*C*SlxiOdZ)X6};X4{GQf>@3hi zLyfJ@t~OnE!kWTKlsFk=3Qp>U9Rfl$NAzY+RyL?T8;M3>zZeNd$F;o6@HdlJrPA1}Gu3i4#_*8gG;}H(@ zvD?0dg#)q-9<%Jnfw%;y*bJCNdQ~JPSE>XC4n8es-qmPl;>O4xkydos>>) z6dU+!v3{$Nm7*kh`#OkK{CKB-i^uRe8SQu=ln*N)eb}}O zafgxxKxdCP51ouE=G zwUr`>GSFLG!LM}4czPG~>&xR=Ow<4_GEzLVjSsBmsxF)n!3_%pJ<^!^2$fi8B)6^G z3BA=KT1VVs<1}1%Kb#9sf;`EhFkm%OSrAN5J;^|;M`sGoxQBO*{g0fK*33&B43yv1 z7W`37e63MwYE4-ZWXnWL!$n0sJNhvFFOA5nVz)}(=dCcd$$f6?RjjzjA6~O7lK7bH zzBH#rzjS_P)Pe^cW=K`ww&pFU_*mJ;??FVhDu#W9O=St<=GZJkf92sFc>@0eM+xka z5hYyzfXu<7H~f_0q(G8t1NzzE(~l{u)OklOTA=9yY#adrV+AO}=r@t@rNZq`zm$Hv zyH>)Dn~g(Zy{S8f3%eIKL)oR?2_%?SK%~zN-c?_u{crASr*k`a=b1DsFPYq!L{RZr z^#o>SXBXZ%#J$%Q)otI#5AS{l^SX9sU1YLZ6^Lo<+->NRaa~-pn%j===oMp2_iy}S zJAJLP@yNJ)hCzC_Pl8cG*IX{$MT7%b$eX~jBwSaL)lW@VkDPGNhJd|#i2L`(<=9l_X$snvs zVRd^ERHK$OKJET+Td93Ne!KSAKm3r%u#EcWc_oEgA z+lX!wL){7#`}CC1fyJb1i+Fu+`S{n7#P=<4c#x_{Nl#t&*ZOa@s}lb-s~oaBT_udyjr<{n2j8Drs+3j#x0&-l+@0m%hZjj; zloPf@*SvYp?R-E=Xz2M|aro>Mt_>2G>$5*o|06+}Wd#1OcG{oUwN`$k*6NNu-{q$U zXeZw$?9#T+CGQ?TF;$G(FjbL}O44bB13W?XY|J@~1RpAmk|BXyBUE{B*)15b;e(aS zd?&Xeem(v}=gl6Pk%{fH_lpROg9-a)=UnIQD3<}U4ST3x`6NMpl!Ku;IwIWmNXeym zKO73)16yJTha*bcb`*=#MhP76Bq{3at496Ao8Tk;2M0jQ0th!@{Eoi%dy8A$cJILO zYGs2<&9|nD01{}v+yICa8o8iVe4=@FYB~PNzjO=h7V;1Q zh)QzDF=Sz+7ERr+Me>QGIZ@p*#PTdNMc0#H63pM4`~{mZX~u7SMIL-_F7Y|}m}^kY z6N-u!SE(CsR*18eqXbU(62U`|nT#gAM_eIc{ELxOVSRsIKwk4XAeJY-u78tw>!w2R z8-UU5pj)uFDh`NHn}@3wsRnQ!)WXZCl0kVb-QhwC51CV5f)B$e0km*htnL1)#vu4h zPy{tXK%? zj!%vyZPCm}7+*%Md4EA}vGcKK9cKy0;m-jLiox`fP960bWxXZ+$9>=2$SjPVm5o}t zCYkB1;d3Jgh<9>L!NUN1QnPXf1iRpXgupEzBomu3%B}rhLCgv->MG60tW3(cTR?_= zJom_wGKMVqHticTpDm2t_ZC0OIHX1)yS2IN+tifcIC_d((y3S{p`aW-;9!fbqQh+s z7Jj_xZE%8-aho{hH6{6i#i>>5emj(^=4}`X3rmW|QuIMya}8yn>=@hD#_a2FQJ z!Jp2%W@xl$f^`xBfLpB?@x_xQrOox}FsHHXOQHR?RNX79O;VURu`3&O1S zU^ijM&TFB#I15?}*BWX%;&ey8y*-*)-Ky=+Qj8=R+}f9WM|LTYErDMl^3WfrnsB3mK4=JZyej* zZXQd{y7o7K{PX(Ui?{L3uDT1KGB6^WMDu>|&R7ZiZQzihX0?IR6SM-g+ukH*@qFMr zU}zktfNgaRea@qKe(V->zID=?eX)JA@7Q)W#p2f4H$ZzW5>9^bOv5Q!q!aH#F#rx%g26N_am3gl>piAR6_{Sfyf^8ws~{DA)V z?x$P(JTk|}mo;;t$FbX_n+wIOH3Y%W3(RQ5f4=aSqB+-0A=$LXD5D?^ee@rh(k&a( zX=(S4MA+j$Jg;LxHC>Nsr7Gb0D06~D9|B+j$8I?Gt{Nt2rZc%&}O67%ofQW43o)=bC|%Yi22MFaB@-4m={$60Z4 zRyI}9&p#QnIR7c=Y?0h07Lz)SPB&h5A6-*@tNB}PQmkG`v^kF+7r589Cl-!2UWq)K zK0~_mypjhHUvlTN0>6Glm<6T(OrJcNLRX%g%^4&5J?Dhxlthj6=v-9Snx0*%O}AUE zV~07WAeZYlr=aExn<{mxPGhER(DQT3N8$;G;u()dhPeukm94OF&OgLZtxxDrwy;Xe z{KjJ)!U`Y@RkIlk108VkQa%`(5y^|8jdlLx_5G=vXups`kTOSj=^5YDW=zgNbuPA^ z%+Fo}$48LaDkXR1#UK%cbX8f*O1)KGUuPuC|bmQ)+>1b6pa(c*e9I_(6qOMds zX!vXT#j0J^OkG}$U+sn^sQ%A?2;F^>O~48 zt0`ImI{zW8KpcTkAOi@hI?@yO+k4MCVOz~)KuqP@5iv!Ieil#mZO!$8N&kuT^&Bn; zu^RC50bvRAhi9?5W1hM_CVrxm*?toIChgI3O!&}JTkY4Hf?k|qlSFXUXba6!w)Bo( z_{&uS1yLRl;QFGDDEADhGAMVId@rEyea(oE@}J0i_Fa&6^`UZNrUm-m{JH}I zm|y`;N5|6Uxm2XDWG7dz0y!zOaz-S~DrC79d6XS zzY{0>M^(+9ivARa%z+XF$sqI3F;31@W9}nNR2R-&PK9wx3P+k;qF!Zf#HDfiV z=LEx(e}JhUJOZE|>iWrjRdw}0^IpUV;G-x`gD^`rTH{K!XXc$!n$uP9R$GDKc_RZ_5w?wO>phys+33rNXQ>bmC?pE*)&_F@8>wbhG~a!GHdW|e+1B8 zw7|kO?r|2Pnf--9Q0B_%f60&oLee=avxQSdh;XJ6D3TXgZ^Z?{^y>)#Yef$7eJa4- z(nC3Y9^+jLmgI|IxmRHELxKnZ-9D*!$T7|+pDjx`rf=!I>)=ygmCh(mV|=UPL*5Bz zuP7)wy{{872gqfS!A|Nq(c(uZR5-)b4`#ItzQ}Ouq-!mJ4T%;Y-#u=YSf$$+Ov0|v zPneZhn*Iuy(>PRkh@Ln}p9nP;moc@`(U$bPkd>yHDcF3|F)@rC_1oBJB zK?G^@A=XwvD}hZP+wZW|L+R(2Ir#pwY=|EC}wZc#D}YLu`_)R zNP_fc1jczqI5T;j+rYhYh9Nj7FN`i*x<%4=hid05&ImLn<@!bcUpRw`nic)dKa470tEbq-oua1#&U} z8=X?_Xv1!}Ux>MfT@A%Z;v9$;nWFG(7f~I(9b+a>n;JC1^ncXQG69cE>u4z^zP@XU zW2*QOg*-N*=+9qvklUed%5d`p5GOr86OkJjFZ5*kiVv`|SB`iw9VOpMa10T6MPV6l zRD8C@2Y7a#2+?^5o*0$;=%CW?0>3b~Vm2L^iPL20BScUh059tj70j}|*GNfk^1|y` z1gd%?M{)0nO=EfVdHnVbTKJ>&T<_^RL}PNfU~{cSsI=%|&cTTk|K=<+o?}G3tH5`Y zOkp|n*a7df>?4@yE9in1crBz;!4-d&=_MAqclM`ji=AxsnW}Y7X4X?-Rsw7ScC=Tp z9DBfO0+kYE?W9b&36hYQXS$9o5Dg{bbJqOw{`9H-=a~0OLmt5s-?>IU9;=AQ<$nlz zd@x)n7ZXfewkwkKy^%fuJZK&A`@ zr7fdx)#vVKU&a&f{|L@6wueg2=y9&}CkI9k+wD#6qd6~WH^YXn%>Ygv;oFZ22>m4= zDf{NtmotSie@60U`r=2HPY&ZP*y~*0&97IOyQxUggAIBKYECR599E2fK(V$L*GSqwtB~*?74j zu8r|Djzu+F8E~q_6!@L z;5dd8BO}uUWIFtBT-v{Fm-Tz<-*LMt95zY9{=x6h_gN3uoy0sr)9fS3UF_vu{~>Yb z@Apgtgt}h2t2sC|OWV!IfiEL#7fG^N0imVJP_tPPyG0B(o-}>v&$Nkp6i-(Qx_SR; zQcl{=EdUk3@V=p5U+Xl<9~{)(oe)`q41BEjaS{6pw|*3Wy1>Z+02O@#J0%3H4mJ9H zJ`a}xzy3Tt4f67Q4Ky)Y6T+i>`T(flsAf!q|6(-|SWS%}Q6r{Y{^GZ__dq8LRAYq4433pgY6P1e_!zT{Wz5r0VS$;$oqC(ki8Kuindm#f!b zbShh!cwOE0EejT-fr%$2vKt0w(kwTMMG2S(&(N8_s5aW4ekPhDqZvvYWk5-dnyt}jCRRK^;6EGZvnA)bwMFus1tfl0E z9F#uJuvfMts*R;D6WEQRa5E;E@FB|0XmYSInei~B+^dIY@22tZ5i->?*Xi^^-0@Qm z&dvL16zH<`ce2S@iMC2CY5BwGXgWs;)vsBvwegT8y}wDosZG+D+1JAb2vz8(HyD}V>$rb8*}$#*^w6eLs~ z{3wOWyrNpdff4p{Ft43r2XbKKmCz;75K~VvdHga>=z%%Gv(S%lhktH|5Jcogl`9%qn!M zAGa2f=IpPd876r@seu5r^0_8CS_nti3!m2ZdWYotRb<-Rq*blxGq|YuL}@3t?#P@1 zJm%^^N<$f?=N5*FdcXM7<2iv1V{e{){1ZJmM(0L^?_?_Eol_07p;l=_pCKOsC-HSn z9=OpXN!R8z-J9bU8LNRMvgG_$U$JIcvt5n&CHyuTo5vXL&h`r=FQ&U0`1K;JMc5r<9MjMlE1HAmDat#^3U^mI-M6SQD*g5xbN|Pp5&i?%l(S9Zm9Tbbvn*Z+fT0V zPT@LFpz)3qI5A!265m)&hNtWW=6)kUtX-OPg@qK*}uS`>iEh^SwRC zu_T2dzlm##o5C}D+(thV%7wpPlRtGUQE6z%`G-fa!_UG}N{_JEqqZ!RrAp5!+r`fc ze>9Qn(OH`~pL+z4F!qW+&(1b~rQP99lv}T)Z+mV^j~+bz`MUa~m5+7tlYyA_Y8va! ztMbCZH$fdr=*_S?#pv06@iVN`{Vif~zZg$lzxWAF)+MZ~*^wpcX)lcFi4*;Zrg?05 z*KbQ;VA**68{NnTjnLDbQnlu}7`MhN#rUpq=?L*b+R}msVyR9pKY#HL#kxdm{3tz- ze43fJ&6e}3W709THL|Wh@f~W#BuO5ae8+j-LYMzFUPK_-l?^hkH7CJK;Sl}r_(XKK(u25jpTXStNyN6_o3lvg~`ZrOg=kB zHis>s8`I&w)VDNC%%5huM$8x1{ql?@*_NdE=5$w^s@Xauh~iFddArZUMQZUP&3Bh_ zMJ-N$;H#+#{hD{ta{Rtdag*rXSD|mcmrQ#rD_Oc5oK=w?fwcDf#p+sWf5#KHE`{*o z^|;-34gcdEE0*aX8t|SI*v>rLGAtAen4uKgT)#7GKCBG9rTUe!uS|cEakeUY>0LDM zZ_gy^5iW4)m%|8p6O8ln;bfFJf|_0Pz8Qi6>1p_BMaCQC=!#1&%G!KO>#a4o*of{{ zCK`6-ur5i_I|08>gk=3*G!O3HFS_fLTw3*dQgf4q4`U~XFyoKn0~I_!oVx6C&r(9W zWF}x?+$Bw(!CcpD!i_CrMDWutrD1-HSSJv$F2k7|*}K;8}uDcyAH zu*_D^G-Iom{@hrnRw-1YJ zxWuoQUIi*U48Air#YE@ESrIOPzqYX+7OD$vNIWJMxzkydv@hqR|J4(kCrER&{*ny! zw!g4jp_>j1#_(|tN9A?DnnT4LCQ|`2P$5E*nhuBv^Q^y((rb>T5uLurPzXN{yS&@o zs`EA<#Sl^TK|^l3b<|*mMRO3%HozZh$^Qz!tARSb+$(JGZwd-R5!~ z4LXQjEw6WW^v-YXch-P~AV<##^<}ookx`dS2)FQ^K;{P8*GY}bWnLU8Zdh3tg$4Y! zSlkNJM4KPkSy`F7<(9d`IaS7qZ{sp_9TpPfQur@cWwjaSK+e zVi@T9{l<$?)a5F8dk`PIE$wfIU^#s$UhI{fmm&_5AX21u5hdWQC+XLEMD~NCDSwe_ zqJBmUc|uVeUpU26rrxvs?nBZXoBhaZO0RCNAr_vp+G;KQzLT^>=JTFq0P&9CtB=Xq zb;q#((kLEY`-BDh^zLx~&g80d`O-L!J(@_LD(4N&%|44gn)W$en6_b{aupD;4I(wwE(x2_~;`X2Vo$S?+br%E7 zJ(Bv*v@mh!xtQA?+0?Y&*peBY4wJ3+a4FhS%JtlIb()*SSP=ITMTSav#)l3%$*LQt>YC&AV8=FQz+==~7iTKm|52XnbrbI{k4U z4&piW=U#N%4**)@le}wvAqs(0VFcQm-j0d$n-yWz-^{ad#@F<2*<^yQaUuP5-GgZc zx+8$p$7PFXstdmXWZ5Oh^-@^eH*LEGlgAKx6&qS^+`|kh$>rT8^_>)w=#+Z3Jwt^gbzXhl1f=u7M=Vntd?(NTUl}vU^{$Cu zPfV+Tqy>%G4~rz>+I-(mXb1znePRAbx)ru!Jo|B_tW+i!3rDI^MKYa_FGutC%;+LN z)Jfapasd^w4}j@H0js4l5M1H-;3w>H-X3t<0Z$-6Gbb!{qQV?k89o% zW!(ReFHX|!X4d+7imchYk7S-}-BqefXnK_W2dj(009zssxDcg^?J^wfoDb}NM0hvq z`yEU2Lx*gVQ#gyDrtf7t2<4jo7=SY3ArKGhT;CGNrUeD-c^I64;&-#g?Sd$@cf<~hG=!fG zX8+*RL|%PQ>yMEcv}v$v@301Mj=uQ6>bu3t;*<};(?>{eQy(Wy2o3Fr z^svsu^9vZoODw0}VIO;Mvf~P^Kdg9tiocoQg}uv=$9yqSBZVB5Zdm8;lqj6gq_z~O|lMyz5Grpg<-$Gx00hvmDzE362NL` z4pc27_Y18;M}ybTx7X}`7NzX7Q}nUUBsA-b#`dYx0%HQ*4EB3t^sESRZS%6<1V49k zB&NJ(sSQ7lT+@U^5eLA9`oEpG#NdMo_JI6(E$!{6I_2nVa71Ah9aR=CRL^gqNq4?6 zL=85Qqcw^pv`fxCTgg!`&A(KrkYFK22guoru@E^Tb!-Ql15G2?L8_;M7*m_EF-3tKT>?c8{;6>&%PFEOE4Is z|69pHT9pL8DcavKoY~0#al!QZ^j_k2`hK(fP2!?dCcR7KRksM$#poM3CXv#o!!xx+ zwK=RGkJpJRin4! z$Au}&JOBi}KMZ9llUHnDn*aRlci?CRi}k(zv~C;>!S`3xtNLhgi}5@Y`DcXyk`|Az zWCA&jllQtc;c}izu@P-cYfocoen0*rXt_FsQ-EJc|LP81W%pJ-98?p~ud_Xk$1Jx< ze_Xwgq@!n$G)9Et&EAXuwe=K_aweW3{yF2RQ??y8d*>Qso?B|%Hyp(BqR2Gq4=h`b zz2eLtoee6_M%A*HU5;j+0J^C-iT1y&hcJ~7v<`h&Umt~OKYqphmBRzwxnfeCP|| zJcW#HeQwm>8F-Q^y8dIS#Z1RnDUBd8kgEUCEyILqWVr+^l+#|^)S#0<&Q%PfV$JAbj=l6 zw_oEaowk3$LKsfuc6%NZzqlrndWkS;%GAeyxUgjFK$YdZhX_56LD$ zfO*C9OyUUVU6Haoxt+0WGGOmzbF9B62zH*0$>kx=U9sP9-z1ldB9=QF)K-*R?}-bY zjNC4TL-@Lu$C2ZBKKrDI6p!O>AN(w_cS{LoXm1*qY^aGK&JUN`Z%Fod;d@R(?&{Be z8r>HDj^3mT;_a$VgXew_mSiS2r25_Z>)#U+rG)YUS$?W$EYoMH=k-Qz{$~qs+MENo z{965V`c3|KME&5YEQ-kn6cA+8mzt@C_t^PdO_2ILxcA!B^FHrbu$)MFapk)CK4$|7 zrK;^N)IAX09c)&PpiAm}Cupiqo@-sS>VJQS-p{?lyVj9`(FZvCUpXzRZ4n@g*CVUz zDG8yEoXs?C6Q2Bnz-bJmvK?8^WDbg*O{Q6_y8YvQO#RgBQC?a=@-2^o08w3kIOdR5 znbtsFzw_?L_eLo-t^vRPhMev5w^*>sTMit}`*Tu1mDxOuVsJp~dMpS+fW%M82@&xXZiS70>*Rl*`}yI7O{KoSC7aFXYPU@eJZvl z0~=0c@f!6mKzjQ2RMi=TaNLWitY(M7gW;F9l_kL+I`b-Xu=c{f>r9R1rextDK%WU< zBZnvZ53^x%X6#fsJzi^YS*{W+ppM$};pxz%`%CdT2vD!s_9<$cbC+#Y>PgKH zqlZAB>tl{VmNO)opT1M!RiR@H7|s;k3pNPC7u|*mJ>VE?**e#>uC}OW+|9Ch;zCNB z*jB@-m@N0X#Jl9&wx=8JK{1VlO6}96Jn%UIH0l+-ZNuEK67@4M@rYCmfDTe~?Nrq+ zx_95oRWXtj!aZFvxu>GZ^}r6qvd{Hq2+;XMArQ>fpU2w!egfkEwV@j1jEDyx?)uw8 z4%~@gQZ*AU^wz+~TF42e#RYak2}G8Ai>^UExMRW4v(}jfu;yNsLs7H7_wwv)^JF#W zt46jx-;6mDVae_Kqvy|@%jTNXf+qDma_DB+j)fKK-r^vp3%ZK#Rtk?d#(Gtrj34zz zmhzK4mbNKOYxSU*)9*#zCOl9QA54i~l?F(g${|6)W^}ROm^^5W!l;6?4++QL&eO_< z_S%R=o>f=7`Nr|deH{)c;M&9#`2huTnOZ14eC&|FCJg`DmoA+&_ot}Q%*BD6w_kBt z^VX0(q#qCL%i6vnCr;OO-I$}uZTq6PUOP$beZ3=%eUuq{LU1|P;}7*56K@QSj-cF1aecs;(yAG=*k#o%Zq+-){%qZfvRCAGS_l<&~(@*=AWWT$co&l7CSKsa`#* z6=xw9(QQoqItd}pWc+IpTc(W`mm)wac5;k*y_sK<&%OTnjzOjSFugnVu0H#U)^m@4 z-l?B9mcd^>^XH>Uh8P>u{^zIN>sYqDSdNL!Hw+_nZ1kGK9Pa2FM42Z|YY`lbd}&TWO;xX`CtqsLbT z-ufqfMp}BB2`?I~`a?FgC8;u8O(?bWn)?=HXu{BXx|ciL@5epfdGI;bD*{KYngO^P zWzs~@L7_>*yKI=kp`&!tO?OR|NL=> z#Kv~}_N{Bhfn|AM@I1f1#0GA>3R{r)hE==u?GPpHs~T;!DT}yU2lT}j>&E~LcJJNB z6Ue31Z*1@$n^U6DlpdH0e_3W^vRQqv&=20{f!H~_<97aLhY!Dz2ojwTmT6j3?)zn} zj)Yu&V3;f2h*HB!{NNV+FO)gw?_~oac9&4lQ}HJM2;b0mq{<_-ZIMn`CvsNcJ-q&D0b)7zZ=}stSx(;7uVp<8?{`|W#=+>hc-S-1@cy8!p z5HzCUx;KG;mzwr{Ry+mQOJ9#pyLo(ljcmKC6+1>Ts#&dG*jzOQ*%n0$vj&l0pr`xW zSA3be-xG!$3-MHQ9P1icsx}E#E}XKsBVRSHz0MIt2CUt^mS{$-g9}&>ge6|@nXO(F z-K{^D-4j<7%3AZk5x%?o@th!FWwyV&-_OY7;{St6ESEj-p9WAigJ`dhE&C9a?nD3@ z)N8v0{E4&s6P{cX>&EZruw~?)WD(G}!_3I!w z)Cg4lC>R@)ICwKb9>o)&JEONwBZ!4+nI-)7Fw$TC!Mkm-{Ri;lJagfua{_o{EJc}A zFuR_}mtGiGRqAyX1OX-%;hvYkdK|5BK7%iU0VB9hJ}a=*!kLNUVOsQmT~1xx%l)VJ z7q3gl{x<`$Q~u6T9Y@96k^1T}0}k|ax_Hhk2;3{oPZy}(b1F~;4)%Ua=*Dxtw|&Yt zm{9n_)!2A94u8f#X729e{iuP*oYkDle^k<-FFwL%hjw%UFRV%%1iQEoP zt%U>Eue;PUxgPfyG;a+>!=j4JKOoX&94Div-wcL$JQw>!$%XLVOLVcnir1;NJT#*Q zHg0(c2z~xYsf>p8?QLqQPGu<8sDpzmq;;NX5v;m#PI>8clW7!&+ug0RoPY1W{)RC_ z1kkZuhczM>RI}t*rCWDSpl3IZ01iTCJuBbc$wG9o!-KUF z{ObM7`983YKwWM{!dlUKe#xngPK~q|r~9mbG z+!o`J{!KqAc%xf&12pj%wZaC$&|2~izMrfTs`hg8U#qzXMyL=@G;$4O{<7Jl-w&=o;os#hlXn5Il zxpYl>dM4UB-DS-Y**D$yFK*8y@0T@=$Es$EUYseEC^(o6chQw+Rju1Bl?$r?Gw3NX zJ-Mj&=2cxkxB09G%dQ{uEIiUTI{p)|o==2sy(`A#*SGZUx2lWwuP5HkNUB&?IP}L^ z(59jMp-X80l9}H{hV^Od$zeN4op;{Xyv9M>!zTkOrYe5(^Fn|kk%_hAjGCR@^yaox z+>X?5NLRGxXIs;12@n3;VTL>RtBG^*^bxFm3jTe~Wsm>E)>(K(760pc&yW%#At)&V z0@B@~h!WD>4bt5;paRkjQbTulGl+Cb_aI2e&^5%J-&*Iad)B%4FPK?t_MXo--uHPt zt=LvN3S7Ad@g;XZrWJLN{j{~5>^9+v<9UZF#VCwj&Hnp zp(P^C<8?&%`Qq-$gecfW4z!HnV4#h1h8e!g96sT$vKCi+g$|)8#r zdTg@)LccV~^&-qQC_{MtAb&y$Q^8<%B<)f5cd-exzC_X_aWB&*;h4$A{qNqg-Q8iW zDUCfnBhzTTf4A8fv6O#hocsuq_CxGOVywyHmC%Cs7h9ZkmGveG-XN_m>o1RSM?#cVsHrdD%!6Z~wQ;{Xi;rW^EgZJ&ywTnz3E8W|# zKdxiFR;wZieq5e^6th~Y>2Z~3TG>A@*@BpVL%Ut3_V@p~v0w?kvgRR*2q_-MIqPJz|!GzczAw#k-jU;PM{Z=DP=1C z4htGX4!aAU$vYb6zhSqdQhllVULQwRsEFW}ZN(!cAW9cIl`V;q{k~Yu}?f z&M7CZ3h;%Ne>ZfE|L}{@?cx#$-4p236gsqZ@Six(uv(yu9fbLFUS|f^duKg)q1U-? zSbk@ieRRG!_+h1Nu9w9u%Lgc@f~RC^93;>6Thqadwy2_~K7!gYj*{=w4jzBA`_ztG zU4B}kW3~!egau~qXG8PdMhL{+H}e*pOFJt6AIeg66wLEEz%FnNF9ELKMLa`bK;ZI! zD6eP`HQ-Zxx2{^tb&3i+WPQsO7*+%6TYMg&rYcS)m6(6~(*Gh!{gLP*5K-KSPpE3_^ZeeL@R70jdZl%xI1&OqTW^_J@lJ0H( z8-YQ$)MbfLJ&>aQy;8wOBNmV?0hME{YX5sW{c^|(L`)TENAps8OA}C8 zBAz!##ZxjN-lr1vcv0b@qcpxIa@kdGs3>rJZtvML$hMSqn@;=PG$uFV`vw|m>Q%{2 z(+XVrzReodI%`jP`jPb#_x3c6Gci&amZLnQb!Xn;dC+`ii)U7#OwG~WR||0*I*anH z@#dGA6}dm1zZ~$WOR$&q$BM2Y*MVB;Np*2t_Nxp-bd(Jmq^14yf9t&yI@7*)X$1q~ zRkvNb2M5z?G2i#Rr@LII9}Z2>Rarb^I9Bbv?fbe*-u3-} zbw!$qKYvb|0P!vb7oqUP^A z3=Y&|@J_dCXpR6Y^G8;V(!}yrvLO2I7CcD*W#QhEYL;$yD$-t@`Y>hBM>@*NL}UiQ z{9@fs{(!=Onnm|;M09O1#NK_|f3mN=i-n>B<6{P@_PmwBtf*Oe!$tPRp;^&f()JzO z+;%;_8>YP^r_`qsw~8S$bJJncZ0lNrprFBUv zQwYA^ktG8wLSs&2L)^-ATTl9eq#Ch*Qod&{uk5s1gIJp;!DPROO&Ajp4{ z9qR$Hiwuh^Cj%EB@k3_F@B%g(__`TkPUu;o5hYhDDlVdB^O3nYtA=_?wiNr*nJX9{ zxQYO_-A?S|*C7rfqz@O*MoM)IPey1(mu`h3YL01xvniUE6c%7O?|v zO<(!pU9G@-Xd(0#D-bGVC4P8RmRhtGgtsqRLh4kMx*#FUD9psKx>%}I3YUN47u$Kc zJ~v5$f?3_QB6R;$%8Tt4jiT_IW@da;vGh)oY;)9mBV9K?m5OMPHRFY!sC^SVdoR5M zY23FjXMJ?7?7jqs(A}Qx5{(NKTMyNs2~$MrFgjD^c_e~76p#goVugYOwrH$LYdP7< zlO3T+zOd&dGUkNa5~fGOK>bx1aFbCgbgrO07dFie5L&HLd;C^CqV;o|R| z0ifvGy7i(LBVvzut0+Otl)%`IIw6dP=(jeV`TPyX!u-RKYTHr1uoJ(o{V zlb`4RHW?@DVnpM`KL*3hDthWgm*uDq3PPF2Jp~qM;*~wyvZOAJdYxW8o?^M%ON$Dg- zAvzutJo-vi#tmKXGq)JZXSRLkB@;wd$j}Q78|r_RV)^CX#I`to+cRhg+PxO!uWzAV zZ%&qx^s-45R*F;q$Q}6{#*S+=cFGX2>2oSV1K7sUxN%&lBF;9AOA-Isw>=9=;lG-q zSv$fP*ZdNwl!rw*q&$oSW=2q)hBEan8Qc@b{x6z#CfLlXn%J@>~sKe zx}OzpN)_*Im>B2a$fxnTQ=Q@XPvYf&bk_d!t^CwpAG2Y=5RIX5?0P?rq@y-_WXCE} z;l~~t|EXa=GD>pa@k*RbFR#wiEhof{O6B3zmsgtuI}sd(D4d$&MTNnf@-(<)4q;%U z?)j-UJPIcmlSjz4tN3;dg(W5pgDmTZGic{jE~c7hK_3?R@=o7H0KN=8{^xarOKJOmdaFeHSoECNTky+C z-D-U0V!G#dGjbn=d;dENVA=$E{{@>-f+7#+?uqt~)tckQSJYxQ9a;P3`78|)yWH@U zl0U*CN>m^A?#4zNCf6tNj>(+32?k7^Tf2YNuT>e>Rb=8P1v_A!;|sEI>K~+|`2WV@ zxMk~OFa2JqqkS!KAgF0PcL~m|{T3D0`zyS$3jW@m)z*OS&28#_4^D1#GXI`bcF65G zH4JgR19MWGt*{3H*+GyX(@ zp~9);N?%SX>(Zv%YAx!7IdnfPX)b?+xp%`|&!I~GcMInuYD$Z8iR_nl10) z36EgE<>5Xx-QR{M=NW`lJdJr*H(qX$s@oqW&fxY>MwPU`5Xg*qpF|kRbzMk2YWmza z2xRi+3K!JI&goAS5o9{CyYg&%xUrd|svDx&Z)(N$4yR%#qrGLudKC8DJ*qo2PrIpQ zB_&RYp;GrqctMQ+o|>eaa;XhJZlBjfq|X2s9DDN28|>XO{KbyxWsbglGh&KtwgxM3 zV@6<-kK{X6@ox>d24CCEObEmnv?@OKbmiwqpWBlDsk(7Pg<*Qm7vIjHB!^4sOv9el zL?7pUx%KHytV3F)XM08%$!S!Tal`V$XFa2Y8W3S-@rYVhRKAG+C?4-mW{(`S3tIXQ zEqoPi%yY@kmu7%3TY~ZrM+^WvJI`SO)+``1B_su+`B_O9ZGDnKFwNUNT83&+bvOkJ zbFgzf$O#{#+B8Hn$-B%R5KSy4b+^0sBEzuNd<3gmJT8~g{OSCig+;UX7xfupiReUg z)H*^{`5YkQwOYS)rIHg7*wF2HSR-ghg@Kllx`gF7^0is_SMf`+@+Y}c!Y<^Wg|%Xz zg#_2f#9w_Vg-bCDtJr5b1pv*|QT&sJu5>GMWxFgC0+VFbSDoK~A^(wBymbkL%Ky8q zVoO6LiQK+G2t-~Nt(jxYMfCj*uz+hkNxvO$A>pV>FcFgNzM6viRZ&c@h6T# zMxYg{Zao{J(`$%VawQq zKd|p$Gv-W<2?#)n8mP6ND%`fNmicRqErXW&*!_StxUR@&M}w{u!+^tz^O-qRkD$D= z5T6uzwM&5sMthrJ&66ii=rU3ExOcrk$(DsSH?HPYtgQZ0> zS2WuqI%#;9w7pAoxDMlf`WSlClmCb2R*ePYSSqyKr^nM8-SEQ+a@(_Uk|lk_qLuo5 zStzK(qxVmyTmZWgdZ+mDa%NS_Dzm6XC41mi;w6MEMX0bnz^^5B`!=;~qa!V@UaNEY z9JdHoW5jc_BC)$@;}=S_#QbE?$__f5T=;lI+>1V&d@pQECYD9JApOA{3FvZ3iM6<4^+=VZo|E?idPcQq)0TssCtz+?HbY(tWvw3 zRLeNK@5WQ!+7JE~{tj4^nv)QHAICuPW#QfaW$T#f#XMN|*>A*{#Lo7Kfs664lM{2? z$vOkCy=%WN5&!m@O-iP(I3;2j!WcnFR_&weXB{hWc?u@IMrAPK+_P9I6h|BXw^TSU zi*v}_=a zge3l#fqmOjwO-RNXx~ZYZ>>AUQrO*+r#Ab(yOl`U_k7pMeh#GejT2g=62EFj z9ja0ww}rD6E-OK4XyN4L5ifVK6{5fZ0iFYQ_0XQtLe{Xj4op+_8KWv;?$dSw?N!y&OEJEGBotvNoHmpxZ)P&6-;(-^sb_hyZO*8{V{?6%PYq z_z#wX2l?OUvyh?U@|o|?gpu37pWkXs-$Y5X2YRotL^I^NCvymjhr_)tSj)@q^+QrY zv%C&H4hwtoWpFbyO`3hd`_i4ZhY!O(74b%-Es|7WiPUH~u#}knnDp#2>~|WWM^!2ENpg zm1SFq(bCs53NQ4%^N@tCUZmCbnM}?Qyu97hb0XL_A5?mO-_MZh(AP*%bBl%dW1;+ z340wsWj{S$Q0Yv?7|{rDUIdC*&+WlUIqRPMJbM8nYFSaCvMLFub5=Mr3mvy{iAKqk zN!#V#3S+F}gJ3>P-NlwmRutviJhtb~J89&K#P%Uws*e55$u=w7zQ5)qeh+nZJ<%@O zVsWaBo;)YP78Q=IC4c00>;IVFel=dPSS ziO{~VlkU@3ndXU3EWm@Cy1(LI%+u`x(uB;VwoX zdXiC3Z zN$CA%^bhjLC^&O3IJ6v@ln9}Jg7*Gtsh!wq~ z{FJyfZ;Y!{zDbZq0EgiZLnz~dSN+ZvuH=fVc_OUGIOC=7W|HMfr(+0(CcaU7~D=B}RDN?p*^7T6f%W`Nr$9W~&=2H(+j#z2F zMz(%B+q2{aN+K6<*+b<`G^PXb+WUQ}~D z7?}G%ngfPB*B^ZE9vqCgM9;d^%q|Y9Gr>(a7gLbt<+E*;{z1}Vf9!G5fukELw(NdH z#J4%_yrjdFvoM5N;k(0WwXn*g3;#F5YfUNwCIVh_A-v5j{(L`?dnWeq4nd}?L;~wX{$(Li z(3RA@H{uY^?OX6YvlPAu1J^VA#kATPGvKNqNnov&bi=*uA4N6*lkU|cf#pbyKzZEZ zLPu3PPP)LmnmFpmv1G>Z*g*yBVg)ptWQr*-g9E7*KydHoB|Wa9#q;VJ=z@!9Ik1(9 zdiLMNfT~&1EI{}umw+oCMCTh73#4CQCVT@u{7cn(pPs;9Z-NS-Gw{Mzn5p$=MyF8B z#t$~!+v@5urOK0GCI465?EDiVs`B6ffntpIj~(4odty&@cpD?~K~796bSb(<1*7YY z{Pes!L3@`&&~(S>ipebH!zb8j;&cP8$?#u-_`bT7`l!W!%o1tb|uAQkFVdQ)4y zDed{W+E7z9-5%UY8>YEGy&)RF3u7L9ND2T`0MU{7?hUKfVWQ*e8P#ED)jBECspE>0 z8|fa^_}5U^%;)HKFscDB$^oNh0)Q;cvN;)a>}4X}7Z;C<4rverA!^Lkprgo&FaL_3 z4au1drO$r~GAK#tSrw(!=6F-?uRjr7*>Ce9D)wen&0-;sT?Q_XqffnPb$jb@8b4^IrM&q|a9ZPV|{O z{^IK6(q`#j+~3c1ZvFiRnqFs2v)$oPMJK3=cZFzgBGeoL(I&Oa5bj)26LM3TZ#DWEE$=%khWfQRqGd(VUqd>X0K4nbhjO3kmq1xaJy3m^J%d#T zPW3*oPc#Os7m~V>b0syadByYR2o!&4D_`a^0^+=E$#!ivJPjlFpAX6!Oi+eI>W zCd}1N>IWL*d4$yH&yh3OUYqZ#zRYY5gc~NP98oQE^n4usOe6JYfQ`D{{ zYIx6Q{%Ep-NMyNWCV`y_t?>k{;vaNzVPFc{O6VmP8@z8o+U<$mF4SLN7pa^F|%XJcGk^YvM2_r(scF`zlXfp%ugN(~M7 zZ`zg|>o)BeV7z4xIN4KUf9|5Jp%G5;P2IBQWVb%`mPXm<>O{U~MaY`kL@EWeKl-@4 z5F}(`osXT|#lusoI7C}l#?KnX>`S`x)}I4MXpp8Kb-*#^_7z*=acJ{=tolTnR22TT z0B_i3+WWo`v}9PfP#X|*VE29HGx`%Z;L=`SWfHti065*7joj-~xX)Ksm5Iisl#R<$ zB*q4`Ehnw!h=^9lEI*o~Ztt{GOUeAnJ{o_G#|;ddIU_@Gm7F{u47)|us4evkLavc7 zkUqK*w#A;Gy3}~SHA#qFWMy`b|AOxdwJ!fvTot)kJh|7(AbxXoMBKIwX_`X(fEPB2 zJS=Pvxx}QvdE8^j`Z-%R*oXdN+1u7iSj8NEW?OR^`zf5aQiVeeJ#4-#WgaB+%!O`c z+~R7n7oKg))|dYO3g@C9(BA@g&$@37eRr|2U^=p?pz3mVBrFtdFSB4fOG#mxpi^(^ zwPT16>eW1c%lQX)-0+$MRsS>w$)6hh`AN|Weq!c7(o{Hg>_vXtNeobbZNDRQ&Z8Jg z$d$vvaKeNw;P}{q4m-%&f20opFo@)g;x}LsOzvfcQJsk5^FsM*#HeC+GFx_L)N4GAm$r8O**@Si@)iz?HCD2bJ%D0B5Qmj!XdOzU zf{@VZ#!{| z^jiGGC~x9@&FgP_T0?Nsi_2EJazctTYm94O(1d5@kJG61?OqI4Xvu}h*TsS+$rTZ5 z1HcLQdg;rsi_ihoqN{WsJAd%@%LP+rLwN@kvBr&u3d6myVp_Z|x^REOA!&vW=Hee0y0D5)lA;ScuJ=?$l8TXO{ ztY&umXhp2UQfq^#X3Z!L`QEsu4JP1GAx9Isrm5{03f3O(RbgqgpYgTc$n_&NitVTq zzBgx^eOH72+W2bm)D{K`EpK(=iInU{oQZ~)aS-65&&G=Q%* zMx2&8H?*T>4UteRxUI27+UEA0H%ZmwLptqsjpMcBhc^g z>@Xk^(ypw@gS(PkjqZX=?zyE7cO$c-M=^m#cy%!k&x!I9ZsVOn$S|U&sSpNgkn1yyz z^SxDQl%p`OA?mz$JdpN&7ka1TbMk##MY(LP2x2lFN=hgE*OL(+Rm`<)D9U2&>yPx_ z=H`ly*)`jy@fN*5AAhjB{RzFnvW_N)ea1TC2`{q!jHDzkwbVjh*07ko;xKRBy%5F{ z@8cSOvRWF2c`mJ^X}dZP8@id%sw!9#s1|}R)ZwVxs011F^#MCoM7SXX1#JH86Z4p6 z;7g?vJyuto3k5bo9MR!h9cZ51aftu4zv0IdYQHut9<_qLigO48rxw2HA-YB&5a+d#M^ zA|#qSjUc^G@XorTWY!6r-`8(n>`E|fexRAMJk(6{C)vFG?yzX}=i&CS!OioGv%;ju zPZEC}^I3JWrHUi&!`q-AXRVNqvW~swoK+H8@f={*7rQ`QnUf;SFO_u48u7FK_;*Yp z?zkVO#UmZI+mba1c_3dzv1_ya${>gISYy@miSc}#!Y}rQfjN)K)i=p!^gpMACV95U z+X76^M9<#xRi;l*D$*xl0|ZOOav&JK9Q(A)FFC52oaeyopxABar;<FDd3YZP28uAk@5ay|Rw|(JV-W*Q*MhwdF_12ofCVW&|NU7XpNl98(aNIz zrlSId@`FLJ4_{T=d7%rSsmb-2;!OZ14Cqv zq;!#GE}`JN1=?Bx^^jj8%zn=c+WOH0WmbEdR6|`1XBo805T7OT(1bp%~%_`u`?U*2PveX;&YlcG}Mj`6s*~?Nk;{Wf=jftWIbGOS?;+~ z53S;KMn@98j0@ZQ#$!bByFsnxBEa(?ef!%9t4NkhQP#_G%B;}go;n}uVTZ+#XfANL}x)}{L%=O~R?{5E02s~0#3d~2E zHk|!GAygWN|BSr9eMq_sYxpyOl*ZUZUb9=3kMdA;{OG%u>P8Eh+6KG(l)hCA#zA}7 z+wbqh(2UIpRxWgMyhwqVxhwxiKAoGFj~UO^Fbi}ON00x#0|bwTPr=!NV?({o{^uzb zDi~i3l+MdB>PaK=Fsm*U&%5?c!2s6Z8uga&z5J5jiCmF9^^@;TqL7)t(J8qMuTAS%` zx68iz(jy*`z`AbpL)EFY`1sV`Wp?e>YHCLO9Xgm{jSg9EqC$yyQ5|b z8j3QYga-+Fstcd(vq1VS%dCKon`WhG>oUeXwI*@irdqJplQw&2^#s&1qJSf4&z3hu zYy``p>BUCq`)+n0Mb<}!HUDI2fAaLbDaOk_rD{uFu-@O;pw)!e`eNSIh6iFFOBkyJ zKrf8h4rqo+7d=nG5~@I1AWq}=w6eiczX<8`rwv?sl{0UXs2c4ki`u@D;XyD<%SwDQ zfi>cp=6?$q(UWAGJqYtb|2=fA+5$?O)w6D-N)lsczqURwp z>PwTzd_zl7xT3@ZF$#`+yQ87^isnRorY6cY^pZJQd*$3u3qDE641)O{I>+^g zZ&5`F23);Z(|#X?`wt^pF0-wpBgMh6E8QWlab*#foALn9j4RbU6GQ#~J>5RbTi>Q9 zd8ahDQmFPPfKe6gtcBdD_Lta4A@)}*CJzT(bLGnW7q8{E7^brfh+gx~?A-0H}~3IiFJ&GszwP-3DL@`ff&OCvugnjf*k9275DAY z?yuAL|8^L$|3qSLegk{J1q&Ey&)C)lAZlbls(J8i7|iHHMn6C3k2efR)_;V7_R(s` zSJ>ois~-)1D>@o>CK(27xU{;VH0OFBT{5{EIo_~$`>}w-#Ky^+aKBA&ddX>@A3pSh!7#T0!>o;&2{fC7s8t)}QDk0&HUooOYKYylq6 z5y50EV>Sv?BVNHi{VelHedCqD!k-_lKuNN(M1WnoNqvBeVokvH2{~}Il=o242L;%G&M7yP%j`Meldslst4)sy+cW83T% z%=XMF29S-7xoO9Aqj9oTk&kd6lal3-YMVg&NowD12x@ zO4yY3vKuAsgBHOJ7&f@mq{VDbjJVT$5Q%)}6Nm0;5x#9hxW96WQ`MQ~yO~wCpMg)F z^!Nu4l*TPGgf(`w$F+_>zUD)%iSUq>?_h&`?!82lQmNVdG?+Zcs57z6!V;eqhq86A z9E~}&fV{phm}b|%G999Euqgp2jYX}{Nl!Fw)e31452hD-JrfX^i|Hgo*zb)8u%tRX z`GPu%4FEyfM3J6z0BJ%V!N`mHfs;i@m#^oy&Se)f!^)MlKU`9jT{mqkKzS=9XP2^^ zKKSwVoCoMxld*gyf)))Q*ULGfHHy=K-vq<)wOR`!)#uLF3l{KLOUYlS3%Sl^Eta4? zvHPql#!zC@&(bs(l{rN17gtJmhY#vB#iOq}#KE9DgM3yDa5{*x`I#M6YN@aN5Pq~8 znIrco-k9>R(;EeXp0J!T#JmiPyYN?Qb%3*bM-gjRswVK$u3D?sX&18eK1D}%ed)=L z3Q!*!!#s@wKa-=~Ef-M#4Qbd6=qcua*9JtE`b7_D&Nd z0Rn98N;>zBUqN^;ZbOX!)mNYF44_uHfZfM1^rq*47cuEEJJ2r+Xme+ipIMgeLnZZs z@=S?dYDaaVynP!j$3t{Xv=rF&gsE7#vV0KVY}=>PST5#}Wr;qrU<~V*3UvV*XnKyY z<2pPyh`zL<;^8y?#q#M#4Uw<@X&~dLY)%CQ7QmSBUhISt%+MSwa1{N5=q% zFv1A25NjztJ`xPk653?{WU7BO>v4?b2{>+}av4nVzd<`Vf|?4-N_ISK?_cYF98$$> zk+6f?*=`_CIWV+If02Z^BWI|aXf4eME@R=`B03Ka*}Lt&#GLuCj*8!;F5t zPILc>>rpO1Fj@sL_Lq|E2(ock8{{7dkhT}+OxJ?abA7!L|E4O58+*Amy0jZbW~9W? zemr$PIou`VitFkpm(3wdRqhDB2uZC%QvfA^RYHyX#tKo$v-~Lns1kwL?U&Z;=_#2p zC2lPfecQt|*Qfla6E9r!#P~D5uBlG=%+(?kzlG-%)X=`Ee!!sBCwm4diRZq1S*MMq zV|F=pn2u^KZZ<7%p+SslwsDvGf6z)R&YG+pmz}zr&a?&8t?3`svb8Q;33id zgR?n9MTwTnJh#0PkK6voL#-(*@kdV8_Us8@6D>5e9*6D%YurdK@Rm$MhU4Lmd!PFW z)>pOE>Rq2RmC%fbvk&7uBWK@e7>4JuIOE+aI7I~r`ikEnoyY^vwWhu+J$#-1>7UVH z^*X8!Gpdf}#gc(sW&_3ULQv8<3G8q;M`gaSp61OJ$hWNlBk_GAYu@kBm+|0OOqzW5 zbI2ZujwTQQ+EG&r1PX}@$qIfcIwY7Qei271zzCzX0&FptYES(wlw!B-9BESgO+jw* z6DuK?ytSUC4g%2#j&|ekChs3wu4y{PN?vM#0C_O^k0XoOZ{P6%$DV6@2Ttlaz~7rX zv{$S#Gy0?Y*PrfofvMb}PrzldqA_&e6*t+x(+&UFQ^ogfJW0Jz1Owt55kupI?$$0f z-(mr(ckOij1sRACnAQv^vc)nvSLFU=8M?6V_GU^ja&53rhCCEs^qu|TAlIchz0T(z zFtr^R=sD_NyOauz&qx-If}u-pcRzK9sC;^vYD_IQBI-v9Tputo0Nj8+Gm=m<9NI2_ zSizIjB%WbZ7UdmdRsIr&_+xz0AIQvB-7S1qBs2av6ZITH*Dkk^(&y@k7@SF(tJ@#F z60=B#-<%@VIFaK+8Rd%O4-UK@g&J#3*Q=d4fq@*_7;^rR*BsG*TrV(K$Ep@8uI{~` zXS4M);VsujUq$a$U370>`)xWxB_3Cn>Mr4PO>p+ByUWWyouQv^%al4hO%Ki1s*ECi z5Bjiim=Ag$7M{fvQPZAGOcGxx{m<$5U*k|jHyaZ*1B3q83|z$pNF!t-OW5BPR(wyY zzq%(=J+tB{dc!wPK251cGFh&O0>Ms@@yNN4;{mQ3(Y_d8ROf989|*Ebmf3qpvvb{B zcX6IgzB?Id7I^|d6CwP(#w~g?AW~qt>yW!m5s!J$9;dpoy!?S3q5`p_ZK9z4JcmrQ zE8L@THNb@QVl#M^!5mEhM2Rx(TqRv1&An#F%+u?Z$A=}*8YMlO=kI8e%TFJ^3vAF? zyyy#Zc)v|Z?(VcWtr#3M(4)%eBE@R_g1v0IMQ=rihr$c(jg!`7JnxXp#0~U8JYGNd z!q7q@_+fKw6{yZjX;Hy(x!kD_IJA$1Cjd?lg?)0k$q5I}l8FILPg0}Bw`qg?xWd!} zwLm_y5=wZ=XjMX)iVaq5AU2PYe}eHpsOGz;@2P0#nYR#}#>W#(*{?6KY2UxR)`&8d z*Eb{MX>DPxsqd)|CrJ^Ho3@?yTE(1)OOCnX#+td>k`=;}H$w!9?OUWJ144n%DWUtJ zE@GiF5#dxUTiHN*GBvtx=##Bz!E4+*=lWr?pBD`t>z`wJkhhV{Wy{Dqa|D@MZQorb8%tDbridHIf_AlkKHbJB6>LD4}O&XU$gjkbTE?$S8kQro1j zU$gg+L!T6SV8+%WTs?2^MMFhNLHHkL>pz!Z<`b!ccbxnk+UPTR{H%cSZYB@nf3`f@ zRF(BaX)K@$>SMjt;m)@_L2P6@h;Rn-&MB_~0vSD**Qf{m9SzN8&mjLT8xK>=4KUgQ z9!Ug@sEr)z`iX)1)IP4s`^RfJN{Ov6vR*rAuwT7H*D?Pv{KXj`!oD&cbJNJXtl2|W zIx@eyrR!!o8gyhB25fHcT1gGR5z)pP&P#oJYcD*Hq?NN>K5n zH$ayzCvPD_?w4lsJq6F9W+BbB)-(UbJ~GvH4A%yK#94uh~eF6TI|> z>TF6XG_O7mvC zCd>Lztv6vj*j$|ZRG}B={))5SRdE%A?P~LS$^4QeNcxsN>zsm~Ll?bZPDw*m zp5WDx(J0%RX4T!Qo_7%td6DYDn#_6H=B$*Rt)Y;t~SQ>_B2ri4Sjax zqR5u>30EVfIx>k&OQpOr$Kzwhlt4L%(3KuA>n#Wvx+!TZIvD7|>b*Nvy zND_P3+oYUYM9#v$M($s{`O8<f@d^&erS+AAb7f8)jRi=t5r9=!cd5#n8hA1~hJ%w`Z27x?{KywEI^ zo%K|zuW5GSnL9xa#*Ng;&Q(K|NlYI95cR637n3a4dvU73D&Y(B!>C`knKm)8|3Zca zC$tk=fdeWuAZrH(km9Cp|D9#Bq~2P5?Y$n>(nYsPbmN+&35b4CYii+><t2K0ms&|GA&7OG)hz@kE_RHF$T7w>+{SRWkVi;;`D+RiEbFsM) z*ksSiOCCH&!;4;^C}8@fb*o7yy8lFJsVn!PUc{qkq3yoB(WGDPnsr#KwT`l2^0`tze;KuecEWk6@U;p&CZX^rpU zi8wEmrK_rIL$O7hZV~ZFQzyU4leE!3x?)|??J3clh8%ARy*sUo*)p54TN{ck4XpUz zTSXX{<|W|Au@A@f^D9CGsuKw!ibk($`|mp+|28lO?>;bAe}bj%(Vn`bY~u$Thd%E1 zms|{`mkdUIK{M`Ek*It1d;mxJJs~F+?D;cZmVB_^7ntQM`r!L}y*|SbCwAeRF`(KZ zi0-SV1O|}oS19@Y*Vrk(OR^Flr#gMM23;NSr4qWQ^ii@I{jl!5?tK*)yA3@F1MpNr zU5s?3h_q79M*_v{{NVHjoJ|Hetlz#sG5sa2vlfNX9)`ScMw+1E7wpRbf@@Tg<*O7p zf7;NlIqGh0@#xJ3L|_spXzY8`_xjcbxma@kRLDmVOqBBO@=Ak7JDKE~Wp}(88x6?_ zti?tgJucfvl3IG)P0xqgS#aQFxYv-*Dz`PbC#lj(62Tz3M-HIN?uc(muSw68F4tmGxp-cQ7u1UGL0 zvQ!Rx7~q(g*KLb0L3jV^^#8GcMn&V-M)B#b;~)-Zk2Q+@bnor7W>U_LnT*3zq12y1 z+8;g3;AjVtTt_>#W`BLcmUncsVP?vmh@gebU+#RD0g!)}S|I#Wrf(8*_!TeI52}jI)t&wc+<) zA7q?t(Sbd`^$v4|cuO0YN3(Vr-|u8Ro0Qk&+DTKvN7mv2zMubae{}~#UfO0&U|^5H ziCI_Ty%#6<|?Z3{?{V9*2nJB2K6s^4KOFJ1E zErcnw1ZUdD5&soQq#+gKxFX}kKOq|G6-Hc9O_)<0PjrnAhV3vyI7P3tWivY+Q#q31 z>n1AAq^Azo7_nVdHoAJ3A@gCU;scnc<;9$}hukNi{K0urj)8X@p0xL@G5mJ!oNpu? zQx=Tmyt|T(Y;n7}ICt#2z}l_qdb~fMo*K`8To-s`*;^C0Cf%Ia@gkRwMtSFko|(nw zgX8EmgtR&ve%ktr5W+y(_>oS~y{+=sS)kROz`=;6r-o!^N2LxG93lIjzaV{d(B~9d zu4*3EZCO>osMH<$gRe8!==f;QNAxet^Sr)Ger8oqLRRkD%v&`mG~L7WIL$r*)r>_z za_m_qNio*-5_p1InHzJi*lLdS&B=!+3dw%Ss)C{L=hwK;+O2kxh5K|IuR0!P_fuk3 zSCzJb!ooDj*ZOS$TH>>GL@u9-_E7q?nm)T%vi{r^6SC_Xq=f-AvYpLWy96~7mP&sn zn)-Y6R`y*P%rM|;;B77JlmZd26P~dj* zvnoH7UG(GNw;Z?MK6PDq5qBs_)W}f!uB+PNIb{QsvE=`db=F@|g>TnCGXq11AT1>= z-O?Z`9n#Iv-Q9zzh;(--C0)`rNDBzk4N6H#*8syi-*-LFde`&3Yt0{Uewh1QXJ7aJ z>@BtYVQWPzcBw!0TVwrRUyX>y4pn3wjQ45u^9Q_A|C1@8O^E*)A5>o4)hht?gD@Lw4sGb_&Rc*E>}Fo^&?#0W}=WuJHcP$GO`0AEFN=W9e;W|GhT- zFX7ltME(`DPE?zIL{@|ZG%l)%E((^WyIcRhUy0~vV2yp7+tHzx;J&|Dm4cem(AtmX z#ITO4{{-T~?g!m+MR4g+H^15*+bn2uQz6UYgD|Api6AVUK?-=-|3@>5sC@vkdfzUshc51%%@e+P{}PU{ja&`&G7>O*Nd!WXJPE9$@c zWp&NbDEv_)W`zqD%t{{z1TawJft6mc8Z^FyP$poUGpxIj^7#{sjQ{;h89w)02zOpU z7*rqy;-rKr$=%8$ZS1@tP`(&xW_c_sFRQ|7-9B>5E}V1zp-LcL_A!!uTh^SebGq^X zf_UU0IyBG=)K*iq?cD41neXs;bT^T!dze z0;pLSeoX$TZ>atb&y!^czs)M;H|rbX{>>XPgX9_1a$fOb>6*uzec8D=`gC-VJBn5A z7q*_kU0(o%FN(sa8Uo@pEEn0fuJZ!srIn%L=a})ye?-9C|1f*->oZx(clDE?mWV#d zBU&lR%5Zjqa(nIVyAN))=!$ADRL&xeRRw?$e|<^TL4IbUfTqbpb#e_G(OKU)_9S<& zXYyi`z{rm2ebLpsu`ZE})j#5HcbCf_(iiwk!s`SxsOdJKbFA;r8LAySO{( z_xtmH-caHD!ZlQRSUg15Rkzb_7z+FinqCqH zDrZ$rGhH`qknHevDav={v0chHk^63@uhMQ9a^)=jV|uzH^Ce#h_k?iEF5!?-0VODV z^nQV402dey%JTYvH)cr=R7!6IGg!Zg%xXd)We%!KW>;|=wM4bBpnEU~oDi5du>g>a zvj0FF3hM_fw=sJ>@O;9T66ZkATp{-?FA)eqKKWCLCat?^B~>V?cYHMrLqcrZ<|g-q z#n4%YCWY8{$b(2Wr`d03@@}9}sJkn}%(x}7D{Qy_Jb9nD#MMqBEJW`Qugq1$kn~;o)oTD;^_o2?AM8)A`4xlvl9FeP*y=k& zcY%C^Lh5p1K$Nq{g@x$k%SGp0HKtOTM7jVZ9=ft{aEfnVi=sH{aBFE7_S;ds)0ZXW zX4x7!ImO(s`m^NC_Rs|3JszZGB+E~hUbN03TUZ5f--rR$PcVGdzmtc6(Z#z6cECT! zi66)*{<$l9QBHM|7Bs#~ol(n&_|zhEu9a-^4ev#RRM#A2Y^Y+Fr6CBR--tM_6#M!9B9V=BA(0gI zfyvC1Oz$qW@6da(pVC9Q^nhf>_uu_p7V}a84)64}_0q-w^$|Fw0)O?Ln)<&ZSD!z~CC#tc>M#PQ0(huMAllJ4iku;Bt-`^Jx zjqz{A#oZy#G09rBT|)V-jtx1f`c4WS%%HY1r*<>(?C!{_cy(F6`FZ6a1^?IRTLeEm zG{_PFVek-SLC@^1!(N2z)C$@w`K7M>p_C-ha@obaGm$#%KG?C#BQ-D(u7kVx4EXt~ z3|i|{Dc)jk5dJCLPO(SsxFz5jTo(k_Fk6!VA>#V=B6PrsKH6z-c$K=KPW-Md!O70# zd4R+W?c)L_hXr5;AE4k+T(jmRp$BT67M4l|*S*i8+#<fT$r|VC?~Y{H z_@vkNgW<{`PS!~5kC>13bkPqL{!0hWxVAN@ODFdRBrbi zSHh9Hb;rZeQ@u%I=;_l)?0p+gZ-H3K<3p|X`qTlo*92(#kd#>NPyH|GbFvDtW{Kpt z^@b5;W%*`L--2h(8>_vr02@-hs~^<8F)u3q+Y3-|wC8B_4zsZqcf-|&yXw31zAfM% zFB`@;X7d4Noc)^(7JkA!tX&s&5oFxm8=Ne(%YNFYO$uK~2KW`7a1UJIS_g;DAfQ)v zF|CLLpW*GdBwJtNg~<(pwE1vLFuCThpupGt!M)-4sm^@3AEk+?mVB5yGp zkdCaa>qSaD zq;qxiKRF7uIGAZiMLsNfUwAl4!SchnWu^sOA&K{CZJF1={+kG42s!+dQS^&?nK&we zk|z!9Seqv_+&|XqjJ`C{F-+k)3QJC+ExFmDcT?R;4|7KkG;DssH=NyxU8fam@2GOSY0jG-(+z_R@^bb?k%Bh7ouO8H~$p|F{bzPJMEh;CU} zA-Q{PcyvcikCl`)mY*81k#{TZOF4JHRIGl(dT=QdexFeYeug<#!F24@ZnPKxBWK=0uSw`z{bwLV)qL zmiJn{jC=S@4lf(u1{{9$`9+{317i3Ds#M^FAhhtsz<}P1Xlr!oQJU$NjDz;2p`|Mp z3<`_~Ype(Z>$*Vw#k9khG+Ek}aHhSI4N?s2B|p9gET9(;;F9&1)f#V|Jp$Gnolt>| zX6j)w(4vd%VE3w%yP}{*f2lb;`d{4MhHqcF3v|cw4iqU>WS@Ax5iYtMdcjidkM$ zqSFJ4^OpmFB~=mv@Z@O`fD#KI=oQ8SRw)4Ig=82Il=|`eUkm9hdl}z+8DF-5c%8?1 zPZK!_4U*da#2?FD%3nHt1AjPtgpMphyApw8)KewWXl5-g?vkz4{oUD9=x!7egbSe8x^y?|tYkalQ z+8i8p_t;8B={=MaAJ|Z#%!k4kuCV{(GL@8nieLtg(LYLoeJEUk)ldQxsPOF^jm~*) z^#WIFG544i_s_NX9?<-=u3D;JC7*$c;Kt7=Z%23cowMYXZj%)Ou(pF;)vaf|9?^if zs^%$?>1?w$H~*n>j#!&l8wfE+t*F3P4kRQNGPZ9X?sv_bTyMZgn8PKmhsK)_(T5Ox zhS@CB8&!{m`|-^U+XoY)Njo5YQf;tj(f#&hF?Tb2BnbHu=&B;`Lcq=t^6nYx4;oaC#y5b&t7LNMAW7o%h2E0kAp_)Rt!)62n&8Jf(zDm!(iR4F*m; zHY){{D(1h4#7QhTt#%a{aq|U?A4O9xuk`xLHS4rWhp=ER3Ut7Wq|?uj{Th}SHcOkg zue>L=Rsbb`rWpzAEZpz;Lk-~TY03ckCW1nOS4ffxU_I$ZoZ6R}WNuJ*)iRo;yCD=WjL+|9P!6nejMNa8-3$e=-#tk}O^+Hwa>|KONQu)(_Pjfitew*+&pW7Ad0@&xFc0I-OYR%b{g3jvx+$ zC}jyqAdNTyBtiG4P$vvSr8%~WIn2ZpL-{J;s~7_<|K(}O%%l)7n#~9?1!@5?%xqi( zS1>pm^Psh)j)&%@jD0B1u`ubpEOkK*+`cXhL^Xq>Iu_5n$W@%CTg#S zR5Hg%!OM6^YB(#hcQ2P6jm#kH6W6kB-?)!o!pU%Gz%%)Qf9kwZ2ZnH_@R(L|fgKL~ z^#liUn8HIVokLR$*m>a}wT!I^rv=ma4gkL~Cha8^hm>c9zO@U^kOp#0-@cKK%|DI^ z@N6aeboN!1D`5CQ!t>vInH^S7v*As;GL)5V_8tNMM3I^1W)IU4nU`VgCuttO*sO$P z$-BUZ#hId2|Kp+PCV$ndu8el2g4B&2eJd^;S?3{-Bs9^$-gcrkBwVvt#ELY3w7Y<-Z^4z-Y_Q1VSx&9~G z_{6n!<8Y^0_2@7Awu7n&mKOo#(%Yu$RKaVwlZ6w0luxxr5W1?v#PM!7WZakolYLhS zhlv`mTp`-S2woX+O{b)|-+t-^uIOW(mAk4fy^CP}(`akNEqB^@N;S&a5M~;we^0Qa z$-sN$1Hx@CO5*Fz_+Wqe*q6#W?-2ZiuAZkY@mZN{w@5!QD;Q-NsEE2zyhs-03w{}9 z1YX?N0}#|dl-oYN1Kly~Yg;Mc!_Zxo!1}i_Nphz05kDbTYlZ3&fNw*4KdMa~UUSdq zqWJ*|v%bTll)kgg^P+z6wNE^F^Nsh#AE_Gc*0oIo;qFTq=&1hEEg8GHRxH1|#!M+a zZB@Bu)Le;or6X%w(`Wt*PoK2s%-E$ea>hL9+5JzMJ7wgiReW)HE)=n$msJ{fFj8Zx zE)#&7@xEUa2oVmiip+q*%TKwrUPsI>>xgMmywzO50f9Llm`Sdh3z1D!@A8N`k_r&fgGX z4aI?q_^^QQq@hQAs?#`mMaLXgax_OEH%F+D`Jl(r>Y%&DJ@s>m+^*q2H!=V?Ui zkkcBZBOmiZdQmH^_Y*atIXSw+$+Ks7FK zX2w@8a9wd+1TRJ>XYy!Oy$4kmv@jR6;w#C~08rxPesZ(`L687k1z!Rca^0p^2JlxY zcZLuDfe?2t#QFuQUD@NUE2;YxZ~+g*-N$SzT@#(ZQ;nPF&IJs}z6&N0OB3wDw~TY? z{e2Top(7d2-&k4$?jR=zD#O1%sMX}0l07*v7!O#Lmlpr22ZG0HzdPh63#p`e#t2VrXkr&aDT9@NrOUQNcbg@z_ z&dbVFEiE0G?E6c6bYf`z(hp{H2nO&xsL0zDS!Y{FZ%rPX#nJ<6pdxSwKJKn?9Tp}7 z6~~Y9&h-dNC9u}pR2@t>7C{CE;;~He+NcG7f=185ckr|@&KHvB$%ch`@;;qwz}G&2 zoU~;#0@UjMg4KLc>*g+7Z{UVcJ!IAxOlftce0`Z93>DSccO(?9edPV#*9ZX z)$&*v)<>foS+P%vJ^1bhmSPBO`d$VYwDG*TJFnMZPakuZtCvnztw68`;K2`T2+3u5 zwDYT@{z;3owgu7t2&0TC4@1%NMb>ZngTgh;kM``!%J=B@a}}AJleOa1{^Ijvd7{$N zyx^=>k56}`9>4p@u@K{jzvhXbSKh$r^FqEJ&|YW?9&e0%yvglM>rox+m!BD4ruC*> zrfKFab_?5iP!BuF?@s42X-VihpQ$O;ki24H-r|LId%%CZxs@eWkfFXNQ<|+%XOCk#j%H#9VwM-Bf%qWVW+JPHrX-`^|{6Hk~6{0!`tn(&(6QRj_uGS@5f{G zijtdYbY}$7ZXX%%PM&Hl%CY*xU*>m)6qOUDQ#mp1;^l|rh$G_b{t*-cqZO*xQcH|sY|KJtMRDV5X_6_y|o zWn~HQJSvUIyO4R?ZoxkPccA5rUFQ04lNO}_B7kdhb|@;!gCcfZiRy}~p?Wjm{EB?R zPpsA)SpTug^z;&JmKk1uEkEGYpUsPU0JSfCK?r3c*mnk zF%n>m)s;Rsm;<_SPitJ6&w zg7hJ2B%T4f_&jmR`PR`V75E+X3ui^?k?VLC>5IJF_i|858doQt)*|!cufvQf zYRUPqt2cnU&dqgPy}76^SAEzpDfFT2{kt+YfZpKEZB|e$xLBs-)A>jvUN=@ z3FHAEdh^j0o6;(xKaCfC*jnxA&#Gm;T!W-#WpowI%iebD!qK(LH^)sj7q_9G7q^K# z@g0mKA3{HE{(i6tWR*fqIOqHfy1O;elo`6587%+bHj`^jZg&mfTHZx%%lZhb4~S6Hc zuA|xl8Tb7~f#(^rhTuinCq0sc-_ofNmiPb2p?t7fW>ufh+=8p8@!`2Bmw>MKyz~A1 z&|2_VoMHNN2f|b2EpC=ASSidI6Y-h8|N4s7UOMnOA- zxC7^J%P-814`dxpSjl)Cf61&r(jMsSWHvHyJ5-B(Md{?2uKe+DXVwrq0=UwZ)Aa_w zx|MLs4J6h56Y(Ov9s-q+7|<>&gr3?z-tDE@TN`7zsw`j=-iIy3khuTD@5P7yCA&?Z zx2G;b9sR{PFk02#U6FrvXYqhbtkwBaiFaEI4!wYD1Y7ZlR4K~Z9uKTE>>rd5G;X4I z0=&T8Dh7}z2y~>S`MYQfKlG)@o9P14vZt(wZ-Dd2ueMN|T8@6tWePkso521({)kj& zoa!2Rz|YtAS?>Ec!h*k)n1BMXXxlW^GUmi%C;OL^4Y)_x3BRA}xM_!@(qtb)5PP$u z0;B6$<1U(tUJ8dej){OtvvKTaQ>Zk0Zy1V;?g2K)zv3DOQFF4PH>zltM=dlocJO4P z4w8WlJdfz$mc@p6@(XZ>I1QM>E5So484R_$@S}u%)a(1TrebeQY%IW86Pis8%wBU_ zsh&{-*RlZLfj9^e$tS^28{?9(q$tO$wU7L`fv=QhcHj^^dr-D;KGE9XK#lWHWG4lq z>3{)-pWLR#*w3lw3%ESWm`g@f0`CYq>=<}VbV2A8FS#zD5UfMOPKA`&J*me3FviQ}5u@R(_zvDqXHB{LDs}>vI7MP#JsBf5s4re;4(iiLVnkt8# zcFL&8Ab%5a`$!85=c=paD(8p()7~Z3kj!Nj-$>)V{HXWzJpe;KOJbb1usC{dJpEFb zGHZ!Mip|Wr5(K(q98{M&kO7%c8;sk!DwibG@Im5C}TupD7Qf()K$2vuj40RW^v`; z!5?~{?kyGC=U+D{9Nkq}8ps_iYZ9chEn>#8#xRlOIyGyDU3P3_M8Lw@Yj9)s1hj9@kFk~;bu6; zft3zOzPZQ%{6Fi)%6AqPl-YlRkZEu6w{c2bx5U0CRT|M}EXO^N92*9CTt!`ZJYfDloe!?8W2yW!`& zYa0dvy{!B9Q_d*DqD{pzt-6z+VPPjOv&8Y;o=Q-bM{cJzBq(k4R<|DiA?w^GyD>zz zalUQ?#DYx-!q#?*;g5MKIOyyiT-H$gEht}Lh|cyym;Z;p3|`4A=fLf|8*gffSZBmt zF})$ayxc3F86_jfoNP6MS#G5R`tP6IH2nf#Stv;sfKsFq4e?xiiA0r@8t(}++^I6R zknwJ&|2x*sS##Ddc!jzN z0m(>Sf5Fbstyjz)HAO7e1xoQMOWa8fL+aX64$(pq!<^eObH=e&H{cs(sb%fOAl$o| zyf<1`b*|!>@8_W3`0JvJ6|CypWvVR4BXtWG0X`*n3Z)McQ*=bo<9U35&j%h5bJ`4o zDaop&<$J*1(+w=gk+7z8JXaL}0S^rL>*tgqD2_||^O@Q`w;*aXr~t&$R1RkM(At;EBK&jMm;@O3VH;)gW-pryN5IEP>5<8TDZoO`vrQfU9*7GOS`GAPmrB zrZd|(Jy-2<$NUkqo`V5D8!HI^{`87{MtrIC7@zU8kq*P6}4adL_9HzmjFGip!$kS>a-VKi^gm?A! zLg0cJr3#?v#4P9dld3oan{t{zaDZ^E?zIzXkxd6c7``8Ow+kvj>n+emuP_Qt zt01;2#HDG$<t3r$;M;U!Ltifi`7rY;t^3>lltrW~* zD|puKOmUqjCl`5%H_r*zD0qH(#eY-1JB&aoqpUZ7RzC(gjMZ3~ zybVAk&AOlm+j6`wZl?H)@4*^c+cD-dtZdfm;MYtpl}85Z7t|pNj&ju0bo?}^gs$gO zLC?lcGoH{yPOuu9fPP!jR?hHQ=t*GC2d4 z$dpGDFowDg3d=$rpz+eNwKjr`@EqFw)bj5)XGEnMVS7`%CkpwBf>WiicE-WVmjD|C zSVSZwFPBk}t4YNE!cV!}b#j#*PqPV*?vYTHI%(!Xw+G(wb|;5kZ*=!jxE6bC|EUSv zpf4T|M$xa^{al}d0eIhfuY?lXXel)(Bdedl)fuu#l|9Gp4|ZOHW8L_LQ=O-{Lgi#D z(~|(-UW#Xx*&rCx`7gbO;L|5md>Oo&99`Ptrd@*%_;-6PpvQREc!=kDO#Jep>cYOe zGOXW%-!RL*T1v%l$Pa(bGOEadz!5-J=E4D(G}8c|FB?(+dd=R0GLCgTMhui|p7G8! z8`-OHtt|D;T5;0w6DFkm6;cE+@NvNeTWQSAmc-Ic8@bIi^BLqi7aDb4ODDO-wPka- zFo8O`0e>k7A_!>Dda&ffLtp~imXUqlspC7QbJlb7!(DGkra-cm=5Ctat1zJ5`@Pxp z{EsP>&g$+l+iJe?E&MAG+)wktfGu(F68W@#+^uNG?e1yk^DvGv&S6}4pL|cK8VMG$ zHxyX?Z3I(H_GZyU$tnpY4cHdDfJ#JeXjKAZlori)bXV+dq1$g#ERPjW#F>I<1!{y) zSepCxXZE#6zbGFqb%JgU5J!&+>hrQKnViB{i~W&bV`Hw^=kBgE8+JLzrPHOa{=S+o z=`E92^YM3_ggQz`-H}3?F}J`Q+#I*Db1e?e-gAbdg)SB7m=&KI1O?w zIt-ZB+{OTUzjvWt2i{EL?TY)r(=T^GVGBzH;6ISck)!9C!_o$WaTjV)99i0{L;=5} z_t*4qV(@{Aw|*%wG!`QrbETeWZ-r+F&e{}7A}>)4O~yFyRg}$lh%a_9_)WS|I*D-+ z#-}a#ZGr`@^U^wTuK5P_;}sNDfhL zFo;jtY4L` z3}t?G+lsU#^=)sx&yTgen~C{nm*$6pp=bN6=G#{1!2u66EIUO&JzLLO`LY+$w^Z}7 z!5HJ44Ox$cse!zPwXWp)pPgi1JUfD0TDC3wR&NJuX2Q1i_d~wbr%T&bU`cB|4^Nu3 zWt)hU@pr|g$enjhd5pd*Aua*FiUml6Gtggk8|Jn|IDjjaxA0sOTe`lh?Z`!> z^HG8A{z4#0?EhIs2WR324 z5FGPH;P+(9BSQ^6H1g_<$;0%P3OxX7m*CYFz~DS<&R1 zr*oE=0JgZ&S8k)|F2bf?RbJ3(@4^a1FaiMaotVyj)?V9!{B7mS!&9_RNP^eby3B+w zhZ{DkanFoFszJn_(QOD$8Yc&zpp}Dl7p2#|^@|kkdL)h0Jvu!)n1rTwTJ@iBzwRt~ zV!0Avyt4mZ@Munx(hVBvZ(f5#`B*#PaoG=kN}j1&qY`Xib;3t~pBxks_8c=dJSRo) za{aP+duuf&B>uWeNp_vQ;24nenp2p*esueqq@%vfs;b@@(E8AeX-$RL41+o|S!yyf z5dp$Yf5*vyJ~3-Gw;Ig{KMR(zDs*xpXuWPkxSdsC%?bQnnq9p^K&@QpCa&IHVwTH#ayJ+ zAtk069XHd%{QKTuGHBkK!Z@mt5;pS)K$;n0CbW76iB&PmJ4IdP9Pq#IC^JALlP23H z$#*o}HNn@@iYji}xn+`jQWItfHGiB9KHkn4(UX|C2r5v}iX6)$#j|X>{c^v6(qMM= z7g?%%PkZ0KwSAk<1*^GbQ)n&++>f5@aprU1We;|VE7?8k>Y%uP1`?O9VNAT&@Mwo# za!b&l#wWjsePtDC+ey}V(oOFkybtM{INf?Y;2wcUVJWx}L#og_l$G#s=R%xsE}PK~ zhQJe+%{-+8d!p7nBy|0A#wB{z^t0tD#!KGWiwg!(ugl{w0#so9!&})*=AN9vY7*Gn zVMAGqMGPYjl$)Mix{%aGQn2A8v z$>_X@nN?k#TP$%i{3o{EO6byRud>FpK!jc5<4SM7z+z}i4|RyX!b-jCJ5q3R8rs*| zpWQFrt5-O*P?8cI1)%W9oNM^76dE*WdAQNA$U@m-y?l}dINex3VOX#`r`f`p+*Wqf zY6#UNnTVx@*PB8Rbo*d8mS1B&js4eSKJ)TznR9~2bn6bx#{DKg&v;9|tPwDXn>A0j zl4sV-B>bA;t;ezQjTIk?-dEY(Lsp%i(eRd1xOQY!SMwBcJ6_W9RbTb9r#*kajiovX zBB{2ViQKCEwSGcY5HfA4CxCl7v3v&q=o?^Kb-;xG?uQ=u(V^FgMSa9T)Jo^2$YMGO zC6IQ(g?8BfQVLYoSu8ywOiYnX4c&;co1~x*ku?1a5L=$fSMO_ndJKH34vI^0KRYO! z&d629DttnUiO*I)H{CAd)}7)v7KzB#wpy8>GFDL@%ky8xIHy0Gxt0FA3tcSmz57#@y?Yuv zH>u(#onOt}1p+!UnxKA@Jf}yemXRezgHbCTtE>FMHcGGwVaJswOZ?FBvq7MajdbIK z*&N-AF`a)D-iIul7@ zZuYb4_Ua(_kHq3L8*?-D8N0`WVqc^ZGfYsk3+K)M!Zv$-XjjQ>mi8oGaiGQSQEtVY zH^nWjah-bQEBZx6Eh}O5uO}#?!NJLg>(PxaQLef#@ycRH^6e<(%m2?Txod?7=-+M^ zy{t4Oqwmcp!t<7L$o5>dq*(jADNz{YvZqmRH4?k1bcXcms*A~A9b2INSqQHKCtKnk z9y>SfZ_$+97v`7ts?1GF7eqS(*%n!D$j9srpK2+&2wgB$ekIkS!y5?(vNAF~ay_sd zx6Ah{(qPvfz_q%_i5d(FzrxoXi2WQ22IAHO8frrbxTdar_Ey3;T{DH>%{3i!HGiioXjAO3e;{EJi3Bnf54F^FjA6t>#a74 zjBRU@C+qkpzRnjyh-d2t?3!AK>S`D?hRsNa`{1%iroIfQLh}iPKKg1wn9BeNn#0Se(_`)w> z6+8+V>C?09$_srH{f*hm-sHWBGe#ts*mK}*4=rtP=QAKO0Qe zLrr$q=jTOY5aIwLGzOJxK*&h%{gy=)OtV(`svo~d=EZ=>xr1)B7wP)d>?E`iuH-xq z9s=N0Pj-t=hRe|Ydey8=!zj#%QK|55`Is$6xS%IaJ%)E}3++2WMD7fpcP=>kAp1KT zSwNt%X1WpO+q|TOL24bsFk7GqqyNc-!G=GTm*u_CM zm;;momt%`EN}@gY+WL`te$;&yx%-30jrrN(A3XTU3;nFN%_Rr9#-X~a$D@5`c0YU5 zWYT}Cjh0YtPDu=C;goAERRaRN$UXKPcc9!>+Wi=O$dhknNq5@qv@6}I?6N{#>4&@J@4yc56j$7z- zmWB=Syp+0?tO*)5uTB;D1;rH6xp%+QUsU&}4wr zO5=go^ixq_RB|bTC9t*?vXr(D#+G`?`apc~j`avPD~T7A{4 zr(@xGF_;#AX)*#>Fq_!$t&kc4fpOcm?lZ$Ql9e`=rNjJ6mJ{No%qg@6<0{iPovq#y zon0Pc_vgrw-HYsWr2;Jv2vciM(v@Wb=+^;O$~OjRp58`Ux7-)>kssA7Yk$12;l9(Hu8X^=yiX;2 zu9Nm+T=|`xiqV!*hma-ZMSfeT=J3;;2yQ*<{-e=Jwuam?Z#vx+!@YZT2z30I7+9vF z>*bBDxAh=k?j53!Qgem!?FeI6G84{@{r>%1NTB~N5aZ!m>{25hV2FRJ!n5$Vp8m8l zD#VV;>1YSFV}>}9SSelJr(+XG{*2io0NCk+Ua+ZnevM_1JKw~5aeuG0JG z?5^m8-ukt?JHR}=KOns2O{ua$`F8w**m2Tx0j>|XnBQU{AKVV<(G_B#z>U?o_(sR%&b+v$NH21j**}?c$v$dHmD7U9)=io@ zS+c)h?952T=DB%>Ni;WD%;XaWwN3Jv+ZgHv>g{)HR<}Rg#M?XXh6Xt^heY*NK4g-J z3Qld91;riF%Y12?DgFPTa{mJI>tX;-{}odj5z(Cw4Bd+q;$E&20l}i0=hi`iu$LFH z?Z6l)#|Fq4KR}!>bbF)gV~+d1{kO!qD8t`^T^_URdaJIxVMuVNs=$@`YI{fr2mYAf zczo4V^}B-Ckt*&dG^S*&birPdO(>-=TUi=u88A?ab|wvfTY2nF!N~3>Y^SX%j*w!xsYaC;>J<`+> zN9BppM=+KqOy@y>azDoXRenUq7UG-b5cs_i^#^8t9EMVc?K1Ml!`G~^Z$Lf671wUB z1^jJXXu1$DY=8mb7L-8~@B^Rl5sVIe=Tat8%AP|7cofJoWNkhpyH2gMezO1$+^sR_{^74T*Z3S zcra4UXV{}>VWr%F3jVVZQ5ALZw}WKBd{_lP_2Uo@O%R(DIvQ;>rk63i$5M+l_)$jn z9P>@($u0)*m`Ys-CY`@}nbhbZK0q^;wDU+JyRN+iGmtpB7r;R)R z;nhp@rl-qRwWAM6G1^K_6Pe54bObd8&|xq5^gZ^KfkIeWORyATr0mG~Sp|WdbTUXf zXx08esorSaDvy)StpjatxQ~scc|p*tblP>7f>ioJY?6O(AER)~vpQcGc87>RY?!45 zeux-gEp@OZT)KylzTVPv6nmuXR5{;i38+BDDG>zQ0rCK!wArXN2w{}o2Z(6vhYY^| z3hfM5g+y6G5MSe-^eC#)sMLyr;8$P(iUem*+0QKNe!^z~<*+j;-2lVFtHO~?N(!L@ zcd579uLaJYFU@|dIFM-Yl$}2ONmORHE|l`E_+>8lUK8(A(XfzR+dzjc0$^Q#a^@zR zS^KkUffZ=4H9B>t*qDc*9RNFKC0f1ux9aFE-Y+W+-D1e}8 zo|~Lh10iadK=&Kx+lT38OfvCzg1GEM-A=Tw92RexMQV-~H~t3adG+x5y@(KP=~?#G zvRbt|;2hQnipK|>#y4D<2;E6KwvWjAgbN?)bajzET5G&*Q zE7bZ+rPYkS^p7`>@l33OnWY4!GErnUu0L~g%jDMMTU7R@=Fr)l{{<`j=Y~-lA8(ly zK(Wh?B~ye7^xkevln@nqt&WM|0>PzR{#fg4AdKT!qb`Tmxt(QlOkxd;fPd#95pfuN z{gwxw8bw)tSYUYAou~LaoQ}xHo8eU7ddh{?t(sJ|FCB~DY7t5F{IZLgs1Uu9lvU2I@YeXir0t#1Li&(g#rqVoTc z_10fe{bAStnHf4oxfhd77FzCYHB1}=2^^>=w(u>ODgjn`AdkN%ZMhNg?&i}zKi@K3TVt7Ia{ZNQw z(|=nSEadcLq8vyO5o!{gDkp)jh^9X?1?}0-hAj4;5lZ8rTQX9ncNq5f0m+BOf%O?7 zSy-Z!JL89c0T#=8M_xHIXU-ap*)Nu?tF2w8B&Tc~&hoQPu13iN*JrO>s-6e|s>ow1 zFV7CJ14i0>Y%mF*lO0%37TCC`g<9YFN8TfM<`vn01)+V>sXtJ{R^Bm#n6(eCrIdux zpUElBsV}mF&#}H?c;T5cIGA}~bX(%MQ4WOHR)@$B@qJ4nM;c{d`(tF*OhwyUa>pxD zbS4Y8gzQ@I^7gJz*bBeEo=aPa?6`^u8}Ec5==a@!Fp97Mi-#ss{`V$A3JkscJIdh_rH9*N>d(e%ZtYAr!C75Q*|IZB!7<2W;p||}G%=njPYC@Y zGw1B|YmFYsssSwQ7^&VH{jJoCO?Z#9##Yw1VF2(Ye;DN+nuO#Ep z>mS}*Jz{AzEK6rE7wxo2tc*xC4VCIfs*{QWAJ_?n+e>(fu6o?#;v9O__Y|8B?xXDY z-mX9PyWKUxXUHlCwmEodg-KMcfJS#K22P9DU;7bYu0>{in?EyG0AT-S^3`+snWxf@cCuN(E6DYl<&F{G~qs_M#c4AUrYH zUpSU}Bp>%j$LR7pEg$-D%n$>{r&!Y1s3C)?f0Pb9r~rS{j?j^3DSz%zy$kVQpnCIN zgOE}vu9Vr82}v@ggLEb6`+Y2;McK!4pfYvB9}%K>NJ8Pq=Yz)Ke5(JBgc<;N*VXCG z+|OM66oBrAbI6nAZ(RXMYh63`)9-aBCDcP791<)+e z@9c{^VoCSVVbbUiiAquyQ2Ei1vnvR7fbMr3zo}kOcStocl+z)tDKQ!E$u{o1@@cPn z54mHlx;i)L&Xr3Ke;s*R9*`!puQ#)*0(J@fTNwZQxy}Q%kmUJB$mfWE@<8^`@shjr z4+WWi!oXir`!s9CcNT$NLy(GM62X9ak@Rkl>$uD9Fd9|WQV!75S@_j6wC&6Lz|nxC zIwTK+kFXkqgF0{OW#K1G2BV&o&R^)wjh8VLS@MO|lU#+S>z9dk-etW7!ZM4B!to&&W<{ON%qFwTWU)=Uu`j zg?lEchcg67I9zzW6corAw1ECP)yp{XGMt02Y6kyW#(OGtYDc_aL9wEf2q2S`eVLZT zG1rnDN-C3erfA(9r%e{N>g(aalvT_zv={V~Ur{eI?F(5iVM!(341pX7Pzu7CEG*2D zfGh$$7c&6Qku841R6583=EOt{z6|GXx6SIrU`h2D@t|_?@|B(F=!Q% zPdAfbodwUClb)|*N{?y*KM=kM=ZO!T(%A{x`1c-s-=_``{pUJM9XC`(@P+#5k+2>A zkJsA9dgu2<2QG({Z0rB-W;WUTutNjI(MS+Ya^|x%Z%RbGx6gfch6dFu`8Nf%ZYtwU zXCbL~!LVh=>jyR?2Iw`f-nE2Iz3<3Ja?pAsI4N$2@TmSzJ zL7d)bN=PjTl|85Dip?x0X z3l3%k8Ii%T1hG$Nlv8?W0X16SQ^#Q8%drGlxm{kL808+_tgM>$OshFp(;yov6(9RR z@1adc5Dn_Twi?3HMMl?7Y!{a6I;jn=$j|c(BrF_1j~RgYJ~?e@J>x<+nO=De3xG7$ z<)#q4nis}{N$tiuRM@R4+%NS&z8>z8OVwiooe(+>Tk{|^G-I>R#}R*C{hiLU!Nrs^ zggJ(E*9L@ve9+7S%aW@YMQlT$NzIGh_2km(j}qT-KVrVa_XIEp?~B$>=&sNVfIi%L zx}6Qg;*=RUsFOklE$zGkE!=^Tjx8kAN_T(jJ|%?nhl*~trfX0M7u~QRbVyWch$`rT zvV{}`O__P=E&~VX1?z^nc8Q;tn)tLywJb8fFq~=#;>B{O_7DKk7VP1Y$ZlRfk%Y-? z(TvH7aWGm8p~DGT<03kA`F*qprAIz6DJT^Z`lma1Fj53gS7>9@t4x-}_#9e#^}_;RD9a5*ymA?+>s5zp@t}df=?W0RUydgsh zS>B(kW-%sXVWvZ`dXMIts@zR0w< zmj<880bXzlKYD+$c&!1F2(8J!q|p0+y#Q}`DsND*(BDxL^{8zX2{$J1PQvGsaot#mDDY{+D3|qq7iz1#(6>3 z!`kzD$u(%$u4I9)>WeH8i~xcw#$5NR#+-}bCV@#l`7JXCGz)V zFJ`r$-W?R}Xd0h57@;ePD?MoLud%ZZed5a65n$Xg=T_h_N$4GuOV)T5?7*30wz`W1 z(Em5^YuPXVR$O;BZt>4wEQG?(K*h6HitlEHRFL9&iU2wazkr@=`a?jWejNaVR7#d( za%7Yu0iPx2&v+Oc?vEym`Qu;UfD;II1#IAavXX3`xlEvlRs37Wo=zx zuO`d}WYC~4C3YsrGaZrgt&`zN4p6 z_}FWG$Sc_NR8?JFxDs(u8&FeZGTxPJoYnKb%WJ>&Vo9d%R}%(SCnu)5H`mC6O6{~* ze8fmo!76)NFGT^}wIHubH2M*PUo}ejsnB(c!hJmMQO9S^!|J~nR7tw|n@SOBoOfTB z0%E3`a+f4Q{f`wT+=bj(fnwq!u2^Cz-Fe;jH1--H0XAPEuUmnQy~Nm5F*1-GjfMZK zjo)ODZ{N)Hfert7^Jp~;^_62BVD`7nJpCl3>$YJ($9u)f)6zZoDZ_j#Ikz*^v$Z7C zOM`sDavhFN#XoFAT8e<-IU4U$z|p}#_PP=~_DJnoXQHw1!S~DkdD5Aqssr1OuB7WS zWZQYG_qQYDS-wGtIo@_{@1edc?a=#o>B^Xl#<>0s|L_FxDL+DE#GIDke03Qe6&UB@ zQI2!_>T{?iDq|bDM1oHSD*?WhFqh2UvYb!xEQf-Qih^Gt?Z5Vsicd&GNOZga4#3C$ zk0EKoCTm+i$1)3na`?x#J8+e;5Hz^aJ>v90J$}CSAEQj zSp;))I~O~52|6d2d7j*L3+`B~;q5#=tfnvbxW=rPx12K{??kD&}WT%pG||@h-!n!(GeFpq&3CQd)Djn;BQkJ_FX-)?C`VR>=TH1`gLqU zziqIu~gl$j^wfetprFh9xrVlZZ#ac~q9(*^>IubF_!;SWrt ze7-Z!+Cl*;W!gU4qCpW?52F*-zrUdIBtn+VL2PnWr|I?;U*i2*%4bD*I2NWbkqNQ% zkWTXlv=Elda$HODaTkmj$!lN}rPTw(bS;(afdViDwkgy7&V2Q%z4tK^sE5^IOz#gF zw{&13==a_(0z}`lK&NlnfYt)hQZV`e#wT$taIpEOx3LrS^&M!97vHDj_IacUm7q6yt5$5E{_8rKluDaPRX{Thf8 z6dh?F$P<)SIOGw-;R#n>W>|c8g@ziAWkBrF=ZcrZc2A+SJ}!*IBRuqN$p9_1jj6z# zf{xV~6%b*1`pPNt+J!G<4b>BpCK* z|CP7`AA?1tco_vCD(@KVdI&3BEwEbYR=}x*-4+HQv#*kmr~S6c2{^CPxJ-H~{$et} z_%EvR8|g#ROArdMm2kz8cFz0Ro>54WZHMyWw=tPi%xIG>X09UL>B}6W1V6T*K?o?> ze6+Wdl7@2F1mC9F~M@LCwUE*r0Lu5mo*5P2u0fME8X+{FScjN)?(yW~F#7?K(Eq&=Yvg6aKO#4!cM$ItBixwbE~Idn0icxz5U3V1n+ zP*=-$`*KFXjHrXsmXf(Iev=Y0ed@V?=ZmKNHbPo}$LEnc9bL2|cWnSB?Wn3yUWa|K{6CdxIO8qc2Avj_~{r6UPkpyGL8Hj?X; zPHBBLd9DY+X!C#G>#8;tx9Ag^fxO1pMM=LUqJrW9jaL`lRsy>2f3+3pbYbRI_965y zbmR9BhP!mD_wO4Lx7+7nlE^7L>_hH^Ph)O_&YaDZ@sOe0=sP$ciG!}7xAHevZM z;103WyDjnwb^7cBbgGIAD)43+y&}As8x81ehb|BQPP!Zt*cUdr(`#=&|FkSu0l*@i z`O?YX0x%UGL+PGM*Z=|Oq=f$Kn9?qB|7OG|WM6mcgwL#8k* z_ciC^@g09<^Q_ElHArkWO@(m8Rds`1R!xIEznZU0*7vn9ZH$r1J_YPN;yefpO)?D^ z^E}``pH|ItJNwP@lanVr4&^3#HGrP-%`^Gd=~Lh_DuB4%KN%ID%sMt&v&TkCl3n_h zPY+>^K|0ATGd;eJlQ;5f*c1<(0g@;EC%g#rW|CcaPWPkE=3}<+cnfOr^G*nz-SX454QfN_UM1FPOV5(kNKQ>mShl{q`jqXJ7wQdsb<}$*v?S3_3CPr&CSZ5CZV>xdz?|7-@=!v1nkXPE= z#E6OELVzPPA0FXB3mAuMmj~L#E!X!HP-JXSjEEdNX!fVT?znoKFAzIuGsRHYz*o*K zP97woDJ?!ES($OhQLvgUr8o9duE_BcgB;h&k306E8c-Ha;g2W=IwS$ouwRdq!>`{_ z2q>1&$rbM>^vSU&SV>R5cy<1x{7R9-TMfqja!O`I9DmO{wXVMbHaVjMc1ML`KtHhv z1h8?KLST26{>?mJsQF21^GrWnwp1(9Y5aT{>vzg!I!q#UBC4^gSP}1LgbEl7VsyS{ zwV7OTKiUCaUAZtLz!$gR7kne{ayw9O>Ag?XgPRlNT+5Qz3t=XyV$}1ylxS4wuL)Ay zK2*(IH4#G_x&Rt&1~s)L)^+pE3XetyNCbtgwow3gYc(`}Dklxe)<|@Cq%st`&^t7$ zb4LYkc)EpErqlYo&fI&fc5?3^qn002C<7E;V|AjQ3aFrQ0gwp z_vSF3!`I?5r(zg%V&R3QpYsG3a2IWUWBe0z)mp7QP^Y!S`zs2IC%H}~#O6p{Lf3og zpYWxpwlkTeY;T;O{)L3}i$^UA#&E7ZJrGPrp*a`nq=F9uG=kW|>|}r6f0yfIy4r&N#{F36$%v2lvtz4$MZC<J__jm}iU2doZN;}#2=6CG+Ni0S)O0f1i{F$4>>5{2 zZFU=fP=>jy2pkOmdR6#&Bs4pJ=~RwxknZpla3W_%w+ehCVJ6!Q@G_c-VGy?U%gEzr zP`>^TK;x?6GKeyR@VJ3V&U6B|DEBDWAGl}J8c4}q!3+F4=D|xe-s6}3CEn-czlosi zKMnuH?AQEeW=0MeWd#QfkI4OQEWjg6I}5Fr#DD~*=I^wgm>@o0JGab!Qo7u%avAp{ z)B#hjxk7;1j*XV}r)+Nf!Lx@MFxyfofP2VH=Oq<*aQ|1Zk(LL*iH%;R0FN?0E2(J+ zK#ej!L92FJ_Jw^pXHc!K+v5L2;dee?%CUh}YO=_UO)F zj}!Wwv+l##g$HytckJ1Vo@N)_s{MS1{Ibl1gIsBNhv#aB=^HF8=%;oUtGAmbWau@7 z@Y+FiA z+OqjzsMzf5tc>lsBWs-GP-7A49hIH{)G8BW>#9Fu{LN~@-?4YqLGCM!3U+R10+tP- zAhDMBeef%{s|7}2Bd3J@qNcd13-y04VX45zP_%9g@n7lcfxFy_QH}du9K35S{{?o6 z7{@dI;1APo%!i4eq->rr=EMOyQx6{-Fp&D57uevd4U_>>0IB{V`_EW_lj0=hr*ktj zD85Zb{Mj`QpxFD}Vs==eoB|DS%I-@M|F}7h+?h`Gy&B394H_SNXF1^gFC}QD=f}a;IkVQN^v?R7RQrJ2R54Jy(x%;T% z$DIx%*h`C*Ahi{kBoG59}qfL?(}I}QZO z0Qh8HT|OzD_WC1I2!MbRn8N%URNxqO-VIVh+2L2B+a=8dWHRP@wqo!14T<_)X;3`< zim*MocT|+Y?jq#bK8ZMbl}tFyAM#)AC$rHc(?j`B_1nytx5*di8yVZS#5`h{z-~v~ zS1L#s8Z8PiIKljo*Lt?C{<+z_5|v?=g9Fuz_*DBXBcHu!++>whecUd_p-8P=_owKH zS1laLjvv52VY#h~)|?Oqg3_4WD)S@yyAu6CN8Uh?KXYk@kOVK*Df_8neK*R%M+`%D zK`!~vka|$@M;B3tSG6*|Be8?YV$3l{sKf?Vc1I4q4+4N;VY#;>A9TYpyT$gRyYv+} zPMcCqNe3b zb!h2c{=*uQhlu-0>Wsh+15a~T2dF)K-DWKwsimO7=URdK>AxZpvCNkToODPkKQ$dG zED3}JH;eX2Pu7~mz2ss^L1u(rbO6Y;-|hhv!Cu>uf{NTNlza#<$yYgHI!~=W&?%%7 zAIa}W?Hee?SMw(eb(VEUeEc~3dI~K-wk${tCj$M2Cr0dhz$HPtA>^T9UtwQw7_ST- zJ4NjBnMb>R*?eqA=@$HEU=H`^tQ3to4oPke%C8Vm~$E` z^iUo_j{#hZ+YpT4KrVu7T!;QHGuQr9j6RNDci~9)?`^p-LrRa4tWS zI_qZm3>6*mWu}H#jdSVAyj6%sJuY6fUYgAyZ!bydY+6VmZ3zjFOu_{n0$?-ARjMuEhlRVqeBn;Ff!*@(iAb^&X)eX;suHorDLYS|(*`$oMTkJ;7f~Ou5r3Et zT*C-ROP>nEqfYcv2U3^Q!2TKFS#||PXR}P!#SyZYQ>>?x;~&Fz-Mx*bsiRRzYk6Bs zUpIg20x=B2Ut#-%$DVWx+Xx@3B8s((y&uxopa0GH9nlW{Ao$)Qdd)S^=NVlhmB#m6 z+}7!#oRnYwsyKd!ElBL$4+P_0KQvQ02PF!IOtSXGOeAC@@BI;6r0H z8E|-}qqRfxf_}pzM^-9gR`Tr_L_iT$q9ut*_kkEVylg^)Dcn>xmt5Bn!_w`I#86k+ z(-XvL_zR^LJoBC=Y;)BCPj!4U)LIaao(yuLw~{sVTP&76u$q~op872F0mcfxieOkq zMet|$2~AWDvoSG~SC0~^G{i7c4@gEV#Jn&0FF+m0v=(8C)6$qu;6n}~;4iAd*IoaN zUb?F&e$G{;XlO9kk_6TVZu{oE*`Ynv_jNVn*$RrH68khKJMG(sJi-h=c0%TVd9Mu# zU|ox6#LOJEm88?`7ITKHt7qPwk+SA8oaRaSO}$--l8P=2_!#B5QQef)2=iqLzIsZ& z7B5Y2xVbu3Jkg#wkkvxJ{WMVnx=JG+TPB`Rv+&nw3og75mLGiuoK}xMjcVq!hs~Fa z&efH`uM?XrWJ8j0=&5Y>v}-773hvrR1eqL+#9oOp!psbJQPgZu=l(XpeaVHreEzn5 z_9a^JSsm^D=Dk}-T+Y{|`2j>Hz5@O;W*0J#1Kr(6gYnIh&fKNJQJ^`hHCX?qYI0Pg zvT>ufT6f+NpM(Vnv%ka3+)!(M!rP6s#QzDd~?PJ_ON*o#Tl3&7IT(H#b zH(?X+Jg``7ps|+5o$PQ}Q?87H5R2!*J4=7;E9bC_Tj~W0^8!1c0 zG`HM(!TAJo%J#R@h=^sPC2A0_Y|fq;j>L3E4>aA*t2y0G4@%ZtmyNAj8y>H1pZf+r z>b%9;IB=|+ymr>X?&RsIZk(Dm?yu^cg4=zm^0d6nD6wt!JhGS4g(YX%ub-Qr8Z=*8 zrb$i*_gG2{MaJNco><0ko@Rt7bSWS9_x@TwYX=vo-O#A%Q69_OH?94q&-JZ1_LXRT z&URCz4PtI5Ap~|3<}Qz>mP*#D16s3B?wSZqD{Pr)`5&u-)`?XTez{{~BRJjWA^pbR|YKb3z&3FPX~K zMd0Pfkkh3PF>X8=YxM1~V0w`JGcsHj{pKa``vtJ(`;;_?xQwtx^hHvs`23n?f@Oee zGu2MJoB^2L5mEH9T+kr(YdmXs#DJb%n3pgpOaD5*$3Ug~y!}sQmM?+7!*1P^<4-;3 ze>N>&{U#B(V#I75Pan6ftp$3*Ld5KfH`5%8E?wI{hNykx4JMjfOX{0u%|*qR>*%Zy z?E4jAy&7Pd38Tt!iVQ01(zwIJyD5?`Ql_FHh}6LP0NIdW{fa8yQ6U>mqr6l{{M;&V;n&L? zF)5w2Lj)FSZuOCaA7!xYnzqtmPw_-+*x2m0(mZw_1!#7Yj@@PGuytQ9rO8hy?QfQP zPPEcHTIHyGc9ELa$+c0V^TN1_b{axtzBF*RK^6Q`{Z^nk+FlXy9#x6clnAa8Sj{Y; z90a4{i#r!~XNMBgg&BVi5Iiulmi;j2i5Fp-q<8!y+`HP(SZZj90c6v7_01@0H}OL#JdI)_iXf} z_DY@IA;=ZYsB^_t16WuK^^ep#$>OW3T2SrvMCy;ZW{QM4#(Ba0zQ$tSo=X0Dc8Z$= zDK-(;(4ykI_Ba}0>4|?!W91(m044ofVWALXC3MNh@nbPxWPp1q z%^LBY0LMC~%IFj~aA>7n9(uASVolwpl{}EFvxbz_r_;5X&727Eql5+5b_ANEuEVt& zJr>9}Lp`TzwN8#~CM!@~PotPWkVsplg44g$%7nJxT~Of*bcoPFxh+s~o65elbCCV; zZ!E{J9l0H}!WvOAN>w6dnku9R6@;C1q!eE64O(Zbk{V~YQP|*Bb4|CR$t7v%hO*)A zJJx!5iYil}Ia-|y*& zN*@{;33^x20~{M2m>WsBCc1`muJTyPM!m)&ECOf?ZqbSq_V%JN*jMvfa4!)z#L-@K zM>MOhC9GGlLcz9+DYc7LxLy9<3pH|%f0x!Dj+pV4JQd$7O&k*_yRDrEoo+AA5bZWZlO=-j)6Th)-+iEtfTm3y-0N?#PjbLmW=k#h z!7h-h9Wd?fRklo#Ql6kfA8}#@iX$4A?M&!+Y;)vsSGO;x*c@S=77W6#VR<{Q_)A!7 z0QN&W9#3FGc552AJ-3gnrdbj&4cZn5J#-}z&pQ~sjLN|WIF3AOz+o73r-uzQDfTZk z#4N&4y46VdghXf#LuOlL5oC|%JveJ`fNMis;85U~m7yf21L5r;&LMBDtwnWUn3L~~C9TNC3lxLfJ2Nmq)J<4iX z8?a8zBdg-JZifV~BSBDsP+Vx!lreRS-PkqDBVSqWP2vYLRcVJ6uCBZ*o=;1<=wdW| zUfz$>x)B^4K_B!8U-e*x(L62$a8l@zKz|b$?iiWPk=2w&>6P5_KMv}D5ANbm7#6Ad zUiG!ghIM7OeP`3obJtEYMqgcah)aOdjX&E1PiDm+KVOkMi*;>3!ZDS%D1u2-w_2A$)Kx;@c82n!j-Ei1c1vBLRTUr` z%$$*qVe=h?D##5qg)DHfBBO@{do%Mhg-6z1NM{Bi{0-n7B?GuPR{3w*|6ebF(ymX~ z!lSI4@+&}0j!vmg^4Iy&DzQ`M8a&+N*`G^Mu6mY`tej6--Yq;RE zw4Gwx9LjZlgN6|X?dG@Kfhb>+)T)U)4NcA!?hu=tD|t?p#B~|1=QfTjcpi_&uFt41 zb`J&>CHjx|^;ZPl98nIp?_aL!T&A34w6v-xzUzpYFi9kmETC<8Zb$#VyXR5Y$IN|bugtj} z9Zq@99jO{wXri@o_BiZhf3N#Qf4}O~{V2K`=sS?xCU-E6Yxst ze5pcUqzJg3k$X@C@u<|OX<-0k9q%=YLubm4{U5!`AG?6lxy&RTkK;|hF(4Hjj|yl) zT3Mh5b{WDmUoXZcmbU?yFVX1XKPUbD@xlz|{tU2&yG9c=iK>Luf}z`DlPxPA2thOD z!gxW-UQi}+=xQmsvZF{$X7o$hHjs+&{$c09&t&@(CoC=joIqrn3mp7c&kEwtmpjSE zSyASefDD+GCQdPIg@BE;%Z3X*63vU@`z9bAX;q>+K-I9{gW%Ss?~FDe*acOHdTH># zqsAnV!yU5tRvx#D@pL72Ii=fP4z%>*MRkocIZ(5=w^ee?`J)X-&E>~I_}}*_1&QsR zESflkL8iiU-aB){!WCb*5Rnb+bJEmj3c}XvHv6h}r3%z2Ht5yb+gc|{v#I($O%h#9 z7?n7t!5M|ib7|$AvH9@<2~W8n)H*g!aZcp#qHk%~fhw#Y?WrJvzPvv@A$B_+IOq*Y zO?LuNdOMHI_D~pw;zL(ix3T7WHJ9R_648{xA6ZG~_Igb|LX0shM7JBvJc-MlQX6ci zIx5Q`&^rC97e4E@OAbu72awZd)}6@ANq)!NG)tA ziD_isZ4tCeW7%{kH1lW)9NLPD?Y_YoA_$3J)Ewbp02BRa?@SAxbEGt70bCxGeRZ|V zr%wA_CD@_3ADSG6*i6Bsek#1{rCDUHjI7Z6-4$d}Q9PCMl^>Z-XxH5`p zw=2b(IW6IDz(kTppzf8100+YntCUH}@NEo5{ui1veLR=Etc%M-yPdg7mB9Y?h%CRn z+c6hm#1nWSrM+rUkQi(3>!jxJvWH-(_iTXZgzcEzfZzB>E|`lFdCAE&6!Y-?$)Qa* z;lQ7|-rw$$p@Z!ECNg~<+SD>K7v$4Z*=p0(Kd&9jO_EUpnM;aiewcCx31leXN4kR} zt2u1CV#_^yG=54vZTRnX#fTRZO-EhcsRjr>u9D{baGP4>zS?VVUU_l5q754~yMiN* z16jIGdc#m<69rAm`i%H=kx}{nJi%-Eu&fyo-boUzh?|Et*QX~2FpMFXWu8h{I?GH8 z`BhU^B4pXGgX(lQ9-sOB!k$tAR5dq=rKXp>nhy8!wX~#H{_X8~-|YMx?RL_2V?f0$5MiepYZHrW`z5+!F98&!(RiBsrCx&Pby zHvsv}O8wyRIfkEK>L*n>w?IN(3HQ>AjC58vw*o*h zNi7{uoMe5_T%P-~C9Hx^d4RXAvHcnoP(cOQ2R&Mrd)kY9rok|QpSFMMD|HIHj`ItH zt~I6G6khyjARG`q_Na@RSQxqHE(!OD*p_z$sCp?X5g_PQ;RATzq+4yyTei88e*3mB zpK5YVNJV(QwHl?#=e_xe<0oBuOn!S)T^~NV6L! zfoa2;@7S?`C9%7%GD&pCt)e06bRfW<7Ogk4A_>;mGFMQ3(%JJSo`&Xa{94m+a4J;e!jbA~HRrbU);>hX;9ir*t@0>?A*|CCVveyY`Te(!} z^O}UHE{*J41gK481Q(K!KtU`($u8hNii6=yet|byCon%(Q2q+_JLJFGXKBv-vk{y> z@tBvLEf)Xs?PB1q#h^wHM=+GQv%XM7qJl13#PUAXNR4{Kb#K0*rf}BUL~eK2MEPLF zb^m73I+`0(>PV^|=j_qHPLO2f@IYbZ4yPfFyG_~gdqccPn1_ul_(-(64(loqzmS0e zeO7`MdQ)+I5&+sTTVO9zttor93vNj~+h`Ae+Fr(JMFED85g|JOhes!N9*V?wgumha zooC_o9!@V?2qgbVJ;V;0Y;Zi&YXn@3vqgbh$`a$88DQz`Itrg6uTon!OW z;G^aBOnWpp9K9-Px#n!l{k?9E--L0%ln&aN&$zwGt8LQFn}w$f=Vn{W0}`ref8pH( zhgGID||-cDF!*yhG~es^FI= zU5Ie_C-n0B?7WMAjZ*)=38`n03xW20@HzfScO2z+ZCCQV18i(J-ZZ6XZnJY2Iu=bd z+@tJP_p*Ba*Z%869m?yv0_~F^0`)lkGXkOVp?BmIDMZtuZlPKq^gQc)N-|A0BD|)A z%1X!I6tXF(w!sbof!aCrET-mv8i)UFMf|uYT_m*IXjQg8N#-*~fr1pOJ#7IFWav>} zCfD4*i>sDxw^gby`&Isz(l6D*Ug|v{K6PQd7z~VP3{wo?1{;7Erj`lg)-TIOTi#v1 zx_Vt3?eXTJDNHy=^ynqlJ7o0KT*Q?21Qq3D)~2X?aL?P7uU|*i*VQPrAtsNn-30_9 z)j4aNrq&#{xwx?le|HYOt!tQBNq5cp^yMD6ql11*yzQCv4l+xq+SerQ@f5qG4Q0a3}hx! zv3{jS=Ge@p>d^djN&#TJ)s>%X!NNn-_yA86v;fZqg>^b?NqdUqn6iJgv&T3n-mL5o z0yO}kb=s4|8Zt=4Gmb?}RMrLOpG-*+N?b5# zs+@n(wOGV3sTC`AhOk#J}{>C1x$CyY-JiK7I}nrOcsR#Vt7bs^O|FzaU_6~ zXZ=9tq(_b4WCnWkh2OcL8HC}e-ZLfS6V}7naYwj0CTRc~$5ILD4@rZ3Fj~+Uz8TpYrILhuyh@sP}axaBNjx zuIh=hg&-!Aj#`)QeS8~DqpT`+FV}t(U9dOO4N*AjOzys(9R)fy#V^e6#`(rJC!7S2|?%b*FgAiJ! z=SyG7cs#1k4=StFmgu_j2^wjc65fWx&-(gW<+pYIgnQI65@|GEy9KtNH)DqupfRjq zEpYRDU4?89Z(Qu#`1R|bHpd-^(eZ^ZySV@O*3LQ;`{3Hf0v^6|CU)*6iRdtATNP&m z*3{uNqMWxr6Q(IX6O~_1jjZ`#LnBrOAIjPaa0n!9uI-U>JN{6iBp=|8cb*bdL02zmyu2KLJ8LrT-@^g zV!~*_l@<_hTek0%CcP2$<<7+X4?cP z-kYW^OED3LM^&A^`U975XT?b|-f7>tpQxv_Z3qZNn|g zi$>$jAw8wYNKx2si)yBnK%h=j_P9i=!=+FDyQQI9&DNMPTD}BYEIEUjp;Y-p`cT zyFJ2DD;H^twquEy%%9sGzeSq083D$OEx$+21t54qe+%mBzVpqzhPN=WaGFshp7C81{0;C(*!nNju&4i`efKo zNC2ltqNt#Z(0=?qH&_Q1=FDj5moarkdkF7J@^|Y3TA_3^_W49r74wuA zincfQ+(r8-If@2+t{!jVmw8RAsSLip)59%w3<~p@GeQGqgHSh;m%Apmwk$r~hBDB$ z5|(N69gYY&6@ALoL~0ULt?X3Ghr{dHSrHFbCEO}v)KCx^-Y=Kq{0*e|5rm^MW;m8C z$m2J#^f6t0o^%!I0tG&0_2Eiya7qV@7acd*!2xt0PV_da@_q(?1!>XT`<`GV=#aiB zD`~dTK!M3GdubXezKy5{ouu^1GrIhE_;N%jG#m0QI<<(Khk(X^QqcGn- z4^nL( zTY()8+)nQ_*L9Cp*h_f#s_iv`^zI_D$mDX@JAUqB0-XC~8{CuHTlhqsz>Rmo#7`fl?=>hDWADIRH-e3@y-7w<;(qK@2yqP2UU zEQ_9ujZHqSPJeGKrD%X1gX}`szVFl$^{^lZpi?W;qGFdsMTtCnW>Qfp(W(+!P~?}P z!bx3|kyL%{djmU=t5T!Uc${1uo>+Ch%3xZ~EZT5eO2YM_9k?CZbH;?Mhi>TO1Y?;a z)E;K6SYEFFyl0gg`Mrs{wh%)COtR)~d&Ri^=l|9Q_7<<4z?BE!k@~sMm1V8%_~y0t zLoD69o}Kr|=GesPMs+mU_*g#RvCi8<{JAbkyW5X@Z@|{1M0pn>S5x-1JmZ;5A?w|S ziIR-vW&5G*J1Ro%`!WO1x+Ku3BdWBKD1oC}Zfh8yPU+6mTw(=e_QtIU1k;V29>hV=Wf)i$H zBkB&xK>=3E*(P~7pB(@k-%2$UphD8JrM;B80`DTv)IcoYYca}D&~nQRydu5LcTa$o zzUSc(e(A@Q)yBhUz+^_(tm5kbJmBTCk?lL15F=5^@8(JQ#!*NsLuac+)zxDE^7H+r zNdQ~d=@qHKF|kq}`d(s7QR}*)uF^@{3H2WO+T)tr`X2_+A&??k3SIgoVU~5iXxWK* z=`hMQr8MPbNVC{u(rhhtotBKP%V_^E=7xu-=;f}SPSqtgc&Ws?c&0)Td(Y5?~lfoHv9Cbwu7m#qhFSPff75Qr4e$yK^Gtw;+yhWu5 z`^gh`l+d{SkCO}j?@+kJ3ng*)1Fa_q302eEdPVP#7#%sUAkbR zOMj+TXVmANd95VmURV1-gdFcpYJqqIZsUxBV02EUbEu2xFX}4d3~*gX@MdY?ch@46 z1yOvLL4KF;Mowu_yuUNPSm$LJ5Bg{JsI?Bs%T)$T1iEBcfM6{D?S))}|Jo`FU@uN= ziVi4BWV0#M-~iP}^Ta5GW!XmA0NnzE_G>{Y$HC6m6~U0gb4mEioAtkq7 zN;~19wyc$UF_`{)Wt`N=3ejfyV6t@089BTP20k_}4=*5+><)wm+gA0+$ zFoq*JN%O6%81GI9Uqg!Q)hxAFqm{0tJpr717ozQ<2HrH~Ja;Ua;vXG??KN5iFm7y27;_ zB2gi_p#Ty0{-fk`^!v-B7}xin8L1OHoKK%=zyA8YQ@Nd9P_^>lV_<>=K6AFr|HN6+ zU2B#MlVuIhD>v=v9W@o5ZsDdTeh?S+P)sMIQr^;V%zHZ0q)UUNUH_>R1;dOr6|E98 zpW3JaNHVd@FWF{tM*2n3{3(g$47B69tN*H%p%-e_%%!&_TA}Ea5{i?YP1d$Y%cE z`DFfvl$idHHH_dcH%jLWM`=CfQe;n%wkkUD_`OWnaYNrt@1KypUvV-*i|^n!U!ory zavm$^+i4Mt!a-Ks&?gvRFk^+vB zxLHg?*|XVi)~~XDt#k^&$=YcG!xTgwX6iZ&ok|K&0x}S}7w-R~D*o>i;fAtP5ONiq zKzWH|pE%nBm6YZ8oAtoySQ#l$Ni;1?=398$S713f#)yQ#n3TK5~_8j@on&N)>9qDHUq%=gvoT0Y61btHA|h?C2G z3~LW~r9AGz`}fcdZ-nv`zAt-?D!d~A`>5Wm4<+0mEmf{Ok?tKL`*?*t0&7#ulGI?u zitU)V6rp-M$6`c!XrgbVz^8#dsc_X&WI;_sqd>TYn5jixmus zT=9Y@u2+WTWe~x`CSjbN#+7`LpTYc7OOjI(|Fnd2h#qSV7Qp1yODDF)EkP49SF_aX z@M#VY{S^#mr_;=N;UFlM4LouLpe#J?s~N zkUO-Y|Ax(2D#})|fn%Mhasp5&ebLJ!ov6d~Aw}g$DlHfaU#-jzqd&Dv!Wj$WTY_iC zS?F>Jc>3!12N2o+Ve2fsq6*kF(~99z;Sw zVrYgG0U1)dxx8!Lcdh%a@BR(vtaF~{*?a#sJg0pV4%y<^u+k2!tvElc*g}PEahGe} zBw_i0ZH6KtSgo=@aWyoIT#MgwMpjnL^hfKTWxv%FKFz{f6v;X3y6%IjFXw~!&;Vcg zn~SnLm5v{){aJE3e-Wwc@p0~j=L6W5^h4Wmm8$xdQT+12TBjT*OnPh0mK1cC6zQYea+v-iS3UP~ zv~)_}+)s1gSpC^E3uO^cPoKGsxMF5nZ{cO{KV@2lmE3i2UD*&NeOd+8U|4sO4=z%IX_hKTlIdUyPt#j03?Z&ah(PV>UfsX$O(ZDsdv+kVWn|C+SV z_{E)~IqSPacKMnu1b#q>)u|BKnjioJ1FJgnOCtwoT7o3!#t1xoFZWvWklI&54S zF8+4;UR4a$4aqFciE?)e3cIB#zgKEnA~5|7s~oQ|HO3X3blEHaMTu67rdJq}%+Gez z&79ZN*}_cKjIt+E%G2@w0A(S=RFoyqEG_v&!?+8w{pS{Q5f;qxer?*^ShO}Qm|5|P z;}0{=NPB@xK673iXmF>Ewrvg@Q!@WECV+Q|7HG_xCsR)aq=}eG+s`)uaNISm2bGpu z{>LiQ{>SQPOu(Ygl_wbJ0M!jJv*!JjH+kdZZ2yZAE&_k`#)#BwsRZy)yG;{c*YWEg2zHHZIx4}{7uE-O$QS~qOi5tlct+l;*HB| znyRNwMQ2n%^zVgY3d?Y^&XRZM$`4iGxDPX-2Xj7Q*(t+q#?CGZc?Nwyf%)x1-e)_x zBLXTIgTkf)F}m=4WEf+495mP!zYa8e11oyKp ziM+vBEWMsAv-oi{*3;h!$!i^2PAQ6mR-iQCVU^0+i|Ao77J}2$Q_SAgMBC#fykV@P z3{=9n@=Wy}mr13|AMY^oNP*Sw${Y1DD{PqTgqA7$kyi!(u0U*8=@0Jk5C z1G_EuY}$1JFMADXGt}zK8OmDLi0{o`ZHX4lYAUnIX@M-Vdklzx9q_E_rSD|5bcy6b z@Cu1laGPANKQ&Ob`wT>b(z=;!DRD-l0dBFFK5jE)KOf?R{kpnT;oF%Zdsk8$OBxYUE+ZxLSJ1j4^YPhdC4BizQnjFsP2vs^}PU!82<`Ju{7vnake zwBa9$2YLO-9f4Qf2Rp?et)Rc4s)gM=nx9(52h$)gXy>BFN`mR zDkWA@?wn2Ye5*a<@Qcm;ws~7$UUXebipKpvPMiP!6EkZuYj9ys-P-LUahL!1K3Wak zzy-g32OQ0rD>ZZbOE{RPo&5UfsWxH@FmJzNqwc715&p!iKRq#e)Vwck`am3Rs{o7~ABx={iR0Zjk4lE1Z!&LpQ8HJqyI6+*p8v zD9L}dNg=STfeJo|s;blJq@U~wQyBi3ZqPJh65-q;z6Y8wl zTy~X5MO73R_!pw;)Om7*8(iMtuGQ1ASKQ$!G|zl1;_FX;|It{QfG1NB{i*ZqwQwX@ zvcn>tqRve})(=rF_=ja_@s%n+G(^cEru}O!LnS0-&z5A7xpZ(pA1qvfE zg{RNQaf}@c8QkUi>SFlIdmp>fz?xxO?nCj~#OI43^z(QHN)=KltX<m*~)gV4Fg5zi+ zj8WEsxt-?;kc5oVU0X-PosY%Axuzy{uHKfG^Le(iRFN1k?BR-UCXlC z_VqjAa>5OQak8kFAKz`4U|cT~R{3xNDQbG@#Yx(?H13vJzBM5G!S4GR%o0kpXki3s zfd%?R{OFcA&E*(MBSq*kCo1qGc6tUHvkNU>n$vKE-0}sMxfp6|pxwcUzv*ww(Y!|q zXqUEO=xPx0ZGl4fF^Yy*kBlq&I%6&<&J-*@DmVw<=-pV@ zi_qu|V(I+X3&4Nl6hgQ@gy(D3_Q7NSU&S^t^2Ij&O?2>&^vq8NIc3IN#mwsP+JN)C zu@s5*TUepqNj@=FgZ75ezA9=(;!Kmq7bf~Hae6Eb7YJQWRef$}G4`AF&JNnW)P{3t5?b{2(stX&+ z8>H;nz4~?@X7r#j9bwnSO!Eu-Ml-*DoVIN%A12j*XKf0$^Am)Wnqq2w&LH`#k{7&1 z1AR}>sQ6Ho(W$JB>|zLYwBR~Bt17xHOy=))%3WJ_z%`;!j8dVp4bUv#)t|mMvi9-8 zYC4pxgc(j%bxu?B&U?@UyPvgID`q*B_u3&e+KH{^77e*NJ1}!JDSU33Y{Rt<5YI_` z+d>tn>Z({|)BYv!ffGmXo)S>F+x=?w9i}DdsSi!NNE$Fs;3@ZYFa!)9jdd_3kSQak zJ64nL@?tK)Q@&W)AoDtrp8U%{$jwG`ZXWCRf>+fYyP;xTbuqdnZvXh+_!AQkoPp** zqTOh)Ao!>YioSyOcyH-_dmT@<8dt6=ceYf5np(C}luwrmPeYW^OwMu>{5Qf%AnYO{LSsAX-azo)~3L4FE(xoGC}pzGEys^a84hIYk+))GZ0a@s8sn@~t)y|=TzR|1TodSTdOw{pJ1KACK!Bjr5fmde|35|9w4FW$gxAI^X1x< z4YlR({^-&h0 z%@`h@QamKLEiu*6Kpy*302{|s{~nv8_L@Cn3-Ob+LYwkkjcQb$ykA6A|I!W8E#{qZ z0ZJ;`+Zf4EdGvEI1Q#fTJ-9%ibNUa)Yh9*bLx+x1_v~5A#rm`v?=rQlhEy3ms7g`Z zFKHzGBlW;NeGgB8A=WWxApyIn;+Rp-7qlZ2O6pK9sVn*;429`bc+GhCYCEhiS!XV| zw=7%p`vK0!wa@s?g1IjWp5eQQT6DiC?Ubny^_biuv@##XSb~%mC?fli%H)tQ19lkb!WBz9N({vUgUW7{ z_h>CaB9pIHxDV`L=~AWjJp_Nzz^KMdG%NOsP!Z-w>WBdP3yk0(Ool?`1!2LOq?kSs zZ1X^O^xKTsbgw&9`z9EKE==MKnd)g{(3phWg6K6ZYqZS`i1Emu?F3M)+DGB0`Ft07 zDp|Ssq&uR$9uvSO=D9e&_^dytQBW4RQ(6_>7%;LEAyIz3RDi`Zk*h?Zk>FuzPXT21 zPD+8?tF7PFXAXM!F@{NRXOYcFmYA5QB!~&m=bry$Tp55|Di>z6}(FQe@=!w{@h zAzGGr_s4%ap1h{Fat}RSED=JfBxR3!mj)7Q40^2~E>3Qk-1Jx{=88OO?MBhlE75Se z(9(5-sI;CH0yR6g8Xa$h7Np#Uy8Xo7zoPFuLBJS?<#r($WmhD1SY~N0rSb3;x=a}%zY(KT^1AQ;f29_TeVFblvPU9)l z$-_p{{#rS@U3ybb*yLrlsRV2m8uy<#tOL)Ocz|!_JoU^o6kKS$x>h3dx|!Kzx276L z-{&tC+xKS+wfZiG)n+lhWPLDM%J}dAnPLf+aHAg5EI#W}MDCqbu43TBseZDN{+KrD z6)M-4kVc8lXRc!^(Q|o2^!v+0YiVm|Y>`I)h$~H6xH4pRb`w^`QoX-+>2=Ssn-H7r zG1@Hbpbk<^iNc;SoZ5c)<0P4Rzt`5IGJ}2ie)VDeAeP%ChT`~r}4Pq1&wVcxIzPy{>gBu;lB!yyLm9i<8nZ5INVzo{?VFSSW2@FCMQ$wf zi7;_iQU!$^&r2QMU^0Bzlcpyj+W`OgpvL*Ync^+`iE_+iYb5=M_vUU$nH3C;To@9v z6RQIQk*-s2 z+xc-X-*3=Kb|Q!s_b}qtBjRcw=x|p^RluzgaZMRpTc9H|nn@c|XpD8BNEE%CNUKM% z<~w*%`XrTuex^A#2@ojW)PL(6^RoFcVk;VQf1n8JWaL@e-zATcOLhVae;r#KNFYP> ze9aKECo#7k4Mnk)+aGEa_?f2JmJws9A;Vczy{1yYjO{Df9qwY$qhl}&4wSe<@%XMju``?je zTMJDn*9%WwJOkx`M=E!YBm?HrKX)~azz=7BMk^VbYvBWhjf|fOV!bl{a*H7{=WHgG zxR7w+d~RyzyhlwiRnjw!_Ndj|&!3-0_D(F{X!HgHra)Bk0kmfJE0yi6K|l{j7CCvA zHrRPSkivy88x1JC?p?X4ViH@LVC>uCNAKxKES zj3$I@gmH@Sk<>TzA(N4__G?>#et)ueqXTs@xWUAkYiDllOsz31GZJOT~lXZoJ zgzQ}Y+ra9^z&SaBzBWv#OX$vx_QlQF*Jw@BYMbVM8t?_@2M5wz?ceTJ8^+Vr#OsyA zyOmR_$6B(T`Faa|iE-}HTqQb*llRwmKdq<24miUt6DwQ#gtiNB;0D*+(4RMSUEA5J zX)7GYZf-eR+hzYt*pzC2{ufwtO5h4)Af6+<^5Z+v%O0Qt!I#pHMbN>^JdQ&YooB~C zr%$e|wS1t9YxYD6^zhig_Vg%Hm?j>n!31Z##hC+E>iUKK@~CUK@D!f2;KN#DTTN#6 z-oJv(f$anb$+n@d0mMJmXPfCW~ z9qgEOcK5jUUk~Pp^_c&=jjcJVQCn33!ZO z87N7N8hZD1t|wJBagG@Q_TL%^_y~gf-mV3D=NSB=Dtt-ZNli6qGsvQ*!J6(8uho%C zTH+%LictqYFdot9$L#&iH~?ru%j!9aormz>-<*whdB2V;-BgC1S7iFlaZr2 z=c>`tDnTM=YT~gmhYJyl*y#ychaB}$F$BaIfACU^?x$5Y*5mcno%D>K&AZoC%LGsD zOj$(9=S`q>jZAb$N6op7;!9=l+g}ctdtXXvj23INDViEVJQlMQ@@GZe0`Y3f;RMBR z6`&;^El&-`%FSk~Fr`m?$;!SUIMCN%-IhTFp&i-Rf=VPwB=lYL@pT^<_ZtT*;>zhm z-Vj+x5q^$H&sohAL-V`2ZB%_MK|)?Pr)h_Ed((|(D&A09q(3CXsLwuOke0pm@j#VZ zYg_0?D!)O76&L~p^lG!A!I8L6X;+MhUc+!0T#lwoE_P}fFz=ZqrD04mBt^VmsaMVik{^^h9 zi&7%-f0pgrErp_3XwmLlLj(*Jy%6QOXJmLWn2%?xCZRrL8DA5g<-;y6+XwZ2 zl@Xah}r}p$a+`#5*v5Ehhi>} z1qJjz`l<1YNq-b2V%;}l8HrIUgQwFPa`ir-mQY#~PAc_EQnG9i8gg5#Q?VUee@5v@ zr$<$f0EZaK$O(98G{5iA1lc*F2L=5D^dK1sG&PxOi-nn6c~BKUP6eo{RN7hhaayVC zP!DABsl@aRRQKqJ3 z;Q+iJmPO;>H%rlBrUY(L!=dgU&+SE_TTRo4+m>!cqk2sEsh{XaLQ>n(DoG~ZHg1xD zubiq%Y5nW#1nAa>obbrN(erH1Sxr=PdTPgexiJ01jAq0OMbQ`f#l$~!=|I-lK4FMu z8P8(Ry$-Qux1$KyMub6a*5Z41%Zj_j?YeVLU;DGe_@wSW0DM-g(*rXHJcXJQOhrRn zjAm1)?LThF7@vfAhr6wcTFSU^B5c(Ky^IY(vjX}rwqH#B{{9saP=m^-$ahCopsP7J zvhOow4yIgNrgC7Qq5^u&_s-%QDvA9Bsst>h%PVe@5j}#hXR2C24NhV#8Ydk9xQ3gu zVN*i|$`iMAda?nX#mHk*ZU#Z~IL z&-tJ9q;3{Nv`5dyYQLN=w&>p0ujujZVZYN)j>)UF=7~9rExMDrL6DROI?+j?u6tVl zIf`_DH z!fE+k?lKh$2Ti-TzIo|%-<#859yPb}avlx;mcN#Y8}B|~4W%J6K6q#~8>&t6 zM7G*5Gp9oH{961?BSt!BpOc)vSk=*sZBSi^v*pzP^0%3S3{o-E%Syq9vnyx&Eo zSXKWTm$=B9;z#%Ix!vS0wa)dQor|ez23GFx1K9oXzQwrVAhlv|Tq{=2FSzfz{ONwB za5_xPeoI~Cj@>8?ye^$`y5Dj#oAYk?r~iDs_~CwYyh#0h?vcP@U9EZB^X{LtklYo9 zo2kG;fvtZxht5Z>Dy)~Z75`r^EQL{?T>w-@vP9^4^89<#l<`v^r|a5j>C}T8AIcNu zK^KObt{jVR?no-z%yWzG)37bbWB2=U*IYx)II|cXcb7|KPt-G&o=@SYDO#nDLqD4c z<3+uhipOvLl?TT|U*H0xF*$+?%W~HW=-4v))YYz{Xhc9(Q4Q0%P6TxGHHRJe@&?xa0y<}n}v*>z-<A|i3w7i~W!qqTx7d1dsyB=`Yd4MQb-6@=`WiI}YQTob^|rvaRc8j{__wiAVe|&0-HnYeMMSSB1(66nOlq9K(8fjlIWeQ9Qqeoi z=lKxkDVxC6N5=tA`S!oksL5@Xc)+xRfqkv#Rr1rZhlQ(!rYg#0-M6vzlP*D{_55Y0 zkt~Ud6!vG_&hKvI@}C330}mVG845z5GdNDMgs6Qi?{Hffq4KBfXCn??VYen(6A zDfx3;+LCbr!m&s;Zt5R$-k9#gz_-!LEw48k!)tf5 zJZ*lEHzeB_HyCgur~gotKUa^_B+#P(3(`Is&R1JD-5R>=)&LcD?!V+LNeVvI#(1?h zAx~2V!^1S@8H98-ar5Nz>L(2Xae9m2>WJ2Pu$u}+S{qEys}5S0zh($)*&Wy6rcIBF zh83X6gQzP$b%(dHD|{il16F@H0zUTB8>wQ1BLJyA8@Nlf>+$~$+t*q{6{)pT+LydTBFybuy*J*F>(^Oj?pM5vP%Mznj^PaEvOqiD?HTa<0h$lLXO;wx1G9GlHV2G-G%+O7N%g9;y8E*X25w zLm5(M$<_#$8L6%_=0J`ZiGOJ*1VjwPDu3G_A94~cFjPS;CLqD_y?4h`WpImRqfLJl zy0bZ7(iZ0X5%cz4HD{8Qb7ys1zUAm*DUh~8<;YPIv^iC7y`Id^TbIIgZWkSEiyybI z?i<`w?)iQpM_ z_xB)nj}%(Gxvast-YoD|Ou{o+;-l}g%{rZQv}nTMHR>!}W7C1=w`xcvqd_>D!N41E z-Slb#VeoiDMy5S_5;Gt=V#YpHqEepgi5cYiLKodY!1HEW0e#wG;NIJR!y@C^DPxet zwHVsC>nusYIE29(t=DgHmSm6kEtNM+bnVsad%|?rJhfB4NfOG3EbM{dW|$^-!f!1r zAmtO{59+;-gBiWovAd|y6~xHQ+@&RoYr3h}qbgQx;N255Tx7kB@?U%yLDk9p7m^hWhlKWhV6(AOnBS{`Nu_M6vtea)&7PSptXg;#?261i!0(`vW|C zXBHh>w_DmnfAGnM-cs;Dk%)6-Mc4D`o;T+k0w(7FWT9bZp<2w~O;FmL*HEu0}T zW4K_brMJF-lt@ukb}VOhvf9c@Edk|etJbSC#b;|Im5K>#8#6IwAQOiRG;s2mTXs>u zhh-nMR;M6L-F)r+-faZDJ8K<8#G{l6@!2;!r{NfMd1~iIh4gK8&rtd^z`;{MkTjK& z9O&$>E6+>#)g;xecjix^_ffq7lhdYxAzRBtc)%3M~AP5?C2G*W1+3Sapj4rJa?LLD}Lwdja`?I zxLEp~!C`%q7baxlrR`tM81pR^9|rBFM7M1EOQ!+;45}mC;RPN0V%+GacD}>?d%oLe zUppLO7t^zQvtRv)w*Zb@F9Ns;{dTSin;M~|m@7rk4W^;JgE_g%yJN48W8!#rIQv#Q zCBIotc^K@f{~;LJHnx*CaZBw=*^NGi*ge$i;^CyKhoU@<7(WsIe17}b#+>#@BB^U>;NtV3ZQ7@e;RcF4(!gAv%#2vELKWyn+nG0#J#fGVfnGgc4J^F;*3!jQf zxI`*t!U>Db?`F(Z4+9@=rg}rLp@%O1%GQe~+`5!&tDVPJr~OxrSiKwV`n_|LkmX&- zu+#8>DOq|y6ewwc%dGK9wj#k+ggyFDtV+&L-F)XOQ$ z*Ejg9g==NzA`a;|iPiMy@}Lq+Xi2ELh-cVNYC=>O!LW2bPYI$BMqP|>AS$n~32qS;+w)2djue?17-3u>Vnu$A$s=}^*8MA>-uA}jKq)NT}c@L;&X z{b@Ib$|*J3b_Y6wt~35|J2|TE3%wDp(y4JBX7@r9xT*jsY~wU#+_KcX?Kgc+1&HUJ z39X17*s$HC!H8+2$LWk0T?ARiZA>W;_7N<5{doKh+JWH3eeseg({1WFs|gFcN*E0P z_r#%my4jB(@nhhhT!ujse3W5J?5x`Z5w8^!8xvRZ#lNp2u{xtOL;REkcLW&llUVam z0U*mKHTq-w8Q}Ly+orbfMyrxO-C7rdIxlXU3$p*c^EotaF3;<>pJ!^eouQL{4dY%* zZ-eI25Jd6B21hsS>hmj0I;2ylXyv z9tokZ*}_^?;5?DI?gfdc;ben&${&<4IpsKsKmRMe(@;wVt?_prq{yaFDV|pBIjoH>pr5-l<%SiiM^#-OqyEf`ibFf#_<%tx3Rah_{6;+6S zQ#-77^FSF^>at1@e_G{GgY-mPGlI&h z&G&|s+u6LUb5yj;fo!t<;$HI#fbeFEx`?Xg=6Wp8y~M0yQL(Qmi**s|1zl%DbI)I5 zI$#O}VD(BYjYpM9kNPDRcms-t`*VI1I+L5iqt$T^Ya{FGM_@wS>#O|yg>T99S zvk{azI#>Z)V}zQVIqiKlG-*J2vYUAg+1#Dwp#fr_FR4gc@kfyUUO7^c&8Qo2yHpl`pqr&8KAd1$g_HI$3;toZ2xGpDqd}ubb@Ix`{2+ ze5`Erc81tu&&uBY0pOeIoFbdTq#T;sqW*yM;%vRm0e;!a-sGdIzW%QR;eX?(g3UCL zonQ1Sw5#1P-t0zu+2Cj9v9^Vh?PpJZYsL;;WtZ%Z8uN2|rxOABv@^fub>}$p8~$?i z2VuQfcCBBYN)laN4KM$yF+n>2>jh}%nAvyD5S1!+&rhsq6Dy3Sw4>D50(N&YS^=<( zqpDqo>grGOLY;T2QsGJW>CK)GL3caw{199F2ar_wK$!M;9%&QhXjMKV{g)G2q z=q?Qe7fv6vcf>Mf@1|zL&=1^o^RBmtee&CT6p{|Ej=X?1XQSqxs}1(`s) zp~gT5Ddrp)V@=jM!|uMq0s5upnDlX(W-fS--5VBS;)x3gVZbf`_yz7)m8(tC2?`)w z3AZDeQ}c$Uvu1Tw889I%LWk8>s)$ncgmO+y_}xmGWhqbXV(_=nR(q57Sh*S05dJ=A zU-|c7c;?@qGZ*oOYJ)A`#AlGk3|O8$B=MSEG;f)Wrk4YF=#_;C@&et{pjRvQO6VK+VA zg#EY{*iIk)gZR@L3DOM#vCJB<1)W|FpBk za_W;{Va=D4yfV2SU9z21r=9d)RXqV`5fBxHLpje?;+DDTLeXNI9lue}GT!HBZe@vL zFa8gd(4)cWo}?MB3pz?(TPrR%uc3H-&j49#Md=`s*Ty6Oy6WTj!q>{Wy*x1Ow|KWV zRld8hQEsOTrP=KCq?BGJ&Ido-Zv zwZ8*kkXtYMjO_=rMlqV#sb|o4EJDBMI4{XRH4}|p@0mfDRmCD0y2`UV!U{TRZJ}34 zHa>s(D#X~m2SBZi-^(CU83ZQ~;m`y*DRp50DHj4~=N{~5jX)iBufW?iwlUqTf|?jS zuh5Xd^-^>curh7i|U&QS1~Lv9AWA;39yb|O*F$?i^Gjr%lJO*B&&s#l-F`R zcF|S5$>qi?x*L}py+*YVkNtX>KuiGQ)@9J47=_2L#Y(mGNip}KX=*tS;eAVWg zZNawr$9>^8Qr>$pi4FSP${#?fH-kH^ydYA$*g56OlB=y+0qq6nJ+~#h@vh*qsm)W! zoial^D4_f3_j#khujg&c%f@qd=Dmg_+b9)feb#TC9Kw^tM}z4+{Nh<**b1Ew8T1Nr zV=zV5==Cjvp zbloQ*a+qZA!(3iWh&-+R1nj+wU%z(cG!WUE+&pJ3{yRUfvDm84+7!N7lydz=bTH=z zi~4(71LrOT8}YI5_3pFia+I5HDVN3Vmc>UmU!+kGF&3Phuv{M#c$0zync=jMvb#k1tr;;YN?{bDj|os#Hxp9 zQ5okS<6xu*wTdxg^O1Sv2;`2J2vKQJ>>RHBR+@=jqRu+j(<&0p$_)OWSx!aY6qJ+~ zo%brQ^9o#g@=vE4k{QaDF`Xv9-G{F}^qn&ITqxLDdoV}{2N+*ZFs!EJd^@Bm50qVV zRcG^}aUy!Od{%hd0<Pe?^LMcwTRG%ciE09@KgCQlCr|dGeXtX6a_LjptOr*bY zF0o`#a6loT=PulVqiw|0A~R_xBe8*jpm$Y5!WSxjcZOH~=0=NhS*smYFTWV50n>#V zw_*-_)spgzdvD`UHS38RrWvSTUge4DZ2j16v9rO9*b6P6u5C$=H)<{5eO#`jfWIe8 zvEVXCo%V)YfnOu{_aDwS^UM^oxkC%KCMs&sw@QjLbgmE1$QF3tHfOYyE1OPpUgGgD zC&uhOJ%!@hK+wf7`R)A6pthvIk)w>8zxH;;^|q~p5@0}22XJ)Q0ab!vifXR_UcoDh z76Ik3#vI;Qi!S@wn)C0E8lE)c)+e{IHOPzAod&J+&$A3 zfYkPTgVxGVcA9q@clN%Ax_{D?23rX_oIyHq)4rZ&u{;NN+p%cp&TNpCDgAOBTOE0j zi+dB00T)8z)c!UZ|C|sB_papy>fxZm>`kxBhwAD;F0(T+S!nzF2vq#}Ifh=~emvS@uQo{tf$wfSQ} zc{TZWpq&+SHMbq7!Gt!wm-i=$Us<{8ZYPOox7@|)#rjQK>bpko%76Y8mj);2)N7%A zRzY?!7h--K@|KYEuBv+_0j;LL6k8F>AuQu|B)OER3J@0epx>vX)n7~1nzhRjw)%FB z0o#MB>}t!Le5AsSbv5K^o4$Q~z%}X^5lSLN7$r;ZbV0)CXct|5%0$(cz>dqpqAvza z1^4e8X63-H|f1>w39Ux~z6?4klV<@$i_0*&6D#Cg6W$I+7lFz7ek+%OnIdw7J zvg3;rQES9a#-;3uQRlo~&+MYzvx1bl6R3h2!LfkiUa5&UIDzubYd5VsTJ6ZIz&p5S zDYJZ>i8h0?)ae&>zgKsUqN&;S(zfKb`}opHbPQ`pggwsOd&y}PxIL&(!P~{bDTX zXMi}XNpFy~zmP$N5BTR`3knn-cO?Lm#?~P1te!`)+wNG-S*cy z<97&hLFC`a)F4KbxF?tkzQLAFd_km8W$8Vnc8x+po~@YN1Z%p zs46RaP)~)b(!O;!RllL?4N3s4NAM0)?zCcs5spDDPvuRm1=jRe<}^Y*o5Ktg!XQBf zXCPgp6PYqLkWzvP=jA0w@o=(1c~hhSC?PqS?CsYuvOly^wAj(eG8r-lN{wZnpp1?@ zfqNS)>?1onrn(PB#~vd>c5G8ZIwxGfo8O#hMg|?#aXj4U1popK;nwhpbpi7!4<-OR z7Siz&Ketrzz+h4u|Jg)Nd!UYe9;5uEiZofdP?d9~jA6hU&$yDegcL z|HPOm1fbr5#UzSffadepS{z4W-weDID8qD?r388iSO=)Cv)kdPQLVF23YxIC+&J_^ z{#=iOV0>QZ<8=<2+Wf)dXv*00Y-xf?7rZ$V#e4u#0O0bEM>6+?ppi*XX5O%mNi32` za968x%WhnA``1y0p4AK_2OKAP=)m>sv=0-xQh#&-G4@+f_gN)&+4=cWkheAAC6uhz9h?{owac@Bj(zEEz9*gsaUkR2}3vr#vp92T9 zUmI0fr+bbR0yFNHoo;J)mQJh@9gZR?lE;^=rQvZB^klVL6S43f%#L{%drvHQPN@#k z${v-c5s#}_Q(419-=XhZrf@=n-l3WVi>*~4b7$i#h)xrnh>MXYUUmm|>JZ3-kuvd5iNl{mFZ!yxwxuu3*~$DZm63=zT%J zTBNjzdn3Bfj33$6^=k0fYlIz*EG#+r?IOg*M%0ppnpkG^r zatQCOExqo#^eKl{>%{K59=6`7EME9>Zo5_^{npUw6s0cQq*5i16XJw5E{$l_ZMz<7 za~>?OchjHQc6tsOYB(-sn2iXnUJ6PmoEJZ|mkMS(TMl+qr>wAxt{{V(%x zwf%oGr|iZTs*4xR-&aQbmz!l<%|--QE{f~&@3;hp%*6o*;nY7ikfEIxu5Dv~D^3T= zJ1ynhKqjdA&Q@wuM9s`jWUZcoh-Mj??7DOD0b6{PM8hK|&R8iCyzn zi8_{MyVFdI#8@ONdq>ma4Wy>`={lygKnU1-`_-oDc^+CdKG;30?Am>53Ipzya^^$A zL0MsIzPC*LEVM}b-@0=my4o6;>WqBnqHICgXDVA&ga=b9DXfm^a*`pOH2_b`>Hg8hbLS$9rS>m4l7T)Zp7&g-J#$L z!~rOVJ4~zk!!uaw^a#l`v9XcK_Df<13#bQD67BPk8%K3)>Ww-a>8G|68hkBD%UE!| zB6(5UI3;OCLSg3^BMHr}HG0#RXN}ZkZfMetjc&F|Y^IPmQ*gjwTA)iJn7J$v^QFm% z#@!n4FzlDIDH?YgUa8|8J_B?oq2gs?{i$)N_%0 z@R1(mUdj@!xyO>-o_AOR9Hrzv+n!;Uek05xkhpgE8DUfa#QLPLsZ<;qr$8zmm6ot7 z5zNvMMgTs^|I35(m?3sq5#CEM8bprfF>9@mFi0ULt(iVE4uUZhw+zbbfqi9pK7pdI z>Ejc5Wu=UgohCw$rIzSGWSFE&arcqp_7q}eFw=B>uo9bW@9MuVnlsR(LZjFh*Zpc- zK`(avl}9H%#B0q~u9wcm?$CfTbSTN0Gv0=`v$U<7^OIkK20&ktCh^%ME9nP9WDi>>qe*KEY|eNT)lvXW19R-RzVL#)L}RVZqTf z7ilp*7B_2O%H_5@@uh8*I=pT%9zK1P#HhLpI-`@SSk794dYX#Y2_V;^ZjAab!Hw3LvB4*d$Sm2#Sxn;^}@qW)ATumzii^UiFP zB!PB6CF`7VG_x8_GSg!l>J#^PMc;QS}MNnoi;JkRz3?YCn!EkCdf(dLfF|) z4GL;uCLU9Kn=2Hthy7iD7vI|tczHU=u;zi_6^|rNc$X{qqTJyphZf*GDi-X;BKj7L zK1Y==8ncJvvZt9STBDE=dDj_J%Zy)`ZaK=mWvc>ugbwilOfkYitkDYeJeiRFRZ|nC zM&(lfPv?H3jgS4Zwd1AwTjNE1>{GU?!%zg9cIJy2C%IiERV*A}ROv8&nI3&@OJjJ% zacUt>wON%l>Z#0oK20|hK5m@?5tcqS&xRVsJG|R{+cHj_O7BNpdh$+!`wfUSkZ2|2 z;H#*|M@4XsPfxp=CWSR7o{h1@MbY(s$$jk>CqRC=d%a@9z={_vdzmAKbs0U~DK85) zqVBW~y&n(^yPFA-#*@S1e~uAYES@V4t$C+O>%<$J{k|yzJe7 z^kU^GbW29!smEEPgPS=8f?Cc*SPC8_>^)M*r2&vo6$IWadudj)We|58kn4MH}nm4r$$YJ zv%)-cSf|s55tjgcx1sAmZD>DDEA2GaRgYUk9~je(Ji}PvLQz{9hdZbEjo;7 zS22h9s7Had1PKA$;KhTESir3hsEG#C*^L5qdo0)l4?eCAJZ)pemTxr$ecre0nm9Kz zyxUm&eO>L)XU|~F-S69ydDZ_->J@p*e7e2%0jWpfe{63hrs9GsS%_ zo+n-R@NdjmSN_9yJ4GeTWhLyp>SfRD!rlK#aG-O-Ga&=a%WZ3ll)EOGx)Sh)*g?Oj z77k;wrv9U|`u!}o&jKL71uO$*zWPU)6kV(;EvAX&8fat zDz%qykvdaX$$_f|?04Fql0on@#=3*giy&t)=Rfz8@$#NcEMo5{^tIZzNMj9YNMq%x zw^ieGS@=v3Ig-lOGY6q1bI#w6mtFCZ>vqKb+{5@_czw(*{eGzHoZUwNU6T8IhsQXo#uhQejbMI4i+D-WfezX zLU=4VR_-qCl`?XwTlSDbelNJ=uU{mfPNUNpaD!5tw1vnsUK zzCx89A`qN_)ol*3cl$0QJy#v?v8P~G!%Ja!umVgtEGYKI{e2BHK3ry9zT=JL>9Xmx z#`KLuQ7neLwwPXQl4{HY78}#@>+r-(w=HVtfWjmU`=Niau1)57a#A3;pHqKc%wg;y z<}F%{DWA~;jzLP0RFLk@p*sdqK)OM4=niRyy1ehb_rLDF>wcYY zXVzKkInT5A{_Sa6wOp#=JK|WM*d>|Be~Eqa?8m2b79)I%_|P6uh;sUmrQUz0o`y*F zIBd@mpSgsiPCUT4bv%Wi25Vyi4V}+a?l#8IOK)3E(tNAGzu%mOetj{bAdrBZcRin3 zLjk#+l%zWgq1TFRTfP*$D4(3dnbDKl`%r}COycKQXtbqoeo zqeNGhzxquzd~f^YyZ+V$F#`qVrW6ffbeWU{UZ6eXT~}%z>oMh;38p$I8w`+MxWt8! zK>Y?r5KJMKj2DyDaWbS@KQQl}qrYwlq$iRo=Vx23EEz*HnGOYmBPt6zATJ9X!KAig z>{xGc{6U4L-E|-iyxW4;pG;=H@G1=~rKw5X8zb*m?p7vO?tfEGe8-F}Wc0ripOEuf z+16RHGXnGj=)W}sa6j%lTA{B$EDqoNZfj^y+bgFnVsptt)?NTy7d>$*F-@wJfS108OB}8Fa6;WHnyou*ou^QHR5BY(u{8lTr#{sF zepVZ9I1AUKCeT5pYIqg#Ww?|W!D6eMP}_{_hxwACeLsO;d5S9azJVZ8HQ=dZj`u z43vF@@s-%lTsKz5pILMvn#vaM=fL~M@7ytc&Ioo8wjt&-J9Sz^kCEXF zb=ZiR66FlVMEO@_z^-iCioRC1os_c0SL>r0{qlI6`E`bq%-p)!zwHD+^y8`C{_7`} zs`)8!!}{u}rKXNihQBnp;af|uleKoWNz|TcH^N9Uw#biQC7;jz*%E&v4?P1|%tcFr z5Di$?h8iY6EYnX&0o}@=VbLSI< zL5P7o4QGXC>lt8G%EMJ;SIAf?(Ha+WpmY1EZPLV-p<}(V-b|xe?~EySS}%bHtyUFJ zGFICv6K~vPA)rjM(y*m`Z$tr||0&T^)C$L+ZZ$Q?0;e6HDPsmm4fCj$s_~8D+66eg zV>I)^_#ychAG7yYq~DXe+?-X?KSH4Qc0#H!fZ=#0nE;PI{#4I?v!=&xr9TA@LwUx& zpEETtsTF-iWDbA&_l6lbGIf)U2fJnh(C4tgE*xJ%WN6n$`#4W zp@&Sca7P)ay^ue9Ngb~+xNW)$XSu{&?97mytv-~a;r$rx&7nOGf@`VC8TlWMI?(JfBgu>u#_sqbTu> z-Q&YO_#WBDf6_s+(_!4Y%~c4!v};;2O6-DNB6Za_R2LC_fd@~_!N=>k7lEc<`6+-q zhZmjkraIJL-YoH)?`FZychkM~U6!l*-{z4>tW@PJ1l??exDoc>yF6{K>9f6=l92!R z;49pM{4`;`#HD9i%~wM^Pg7kKfFZuz#QX<4xxUfSn91#l5wr3w z3MMKh&TUtb@={bh@83-h0b%2W1nU|@Ai={j!Y_!nfs%FQJ+aQgsGoQnD4$I$qT617 zo{L)aBVJCy;!0F>%Zkf#bW3st5(V@p zy!JmXjrD1W^(x_F->g3mJ(I)fl>}99`ZTG}Gz8}|mpHU=6^6Eqh;(qVQ(>#T#m>|)9oo#rZ zRano{fs~ts+yH74r&TO)wU65xbxl$LsavNJXCXjHYfz28QeG|Wv6GHkZhR%<{=A?5jcq>q-=p7F3dzm z{)!;clG+~GaZtL@ukphCq4MTj1CYO96zbPf&m&2BHCt`8#<)7%c*gYJh1?e09V&nv z*>Ty|y+N#6hTG>Y{~%~|z!Vmg*~F!btWE1X&FdT-LZG8^w2px{{9V`4zije*DX9Q6 zFI^+t4__2GNuOATN?kdmC+eJP-2$=Qmu8169td->pu6|T>#qB&UEH8Mz1__Grea2K z9gdu1ku5wNJ9SYFJJRk%_Szw@_}$<)83L5N4+U6`>?*%!T1WJD&B~|TLoQFoZoDg* z5I?O<2@jfMb8{h1S{jFo*REwNK7R9}?&+U}*|Z?qIKNtFQlhp>wQ5_HW+`s(fq85p_uPYYWIOM5)37ymU(x-;aHbjhtVSk@(wMCe_7?)zaFW z@UbW{#=_K7#m=*hOAC({O?y^U=Vl7^V<%9hA?U24(B4#ZO0qvVVA8^ZeqvdMb$-F| zbhlVc58<#(jm2XKZ?|Vo?0%qcPC9%7gI0VS4dyjHTL8HVh=jiz`XIMDDDvGtO$P5X zW{DNvIr_K3%WJQ<3DAt_TAjSlos(tM<@Md$$u^T4YxVY^QKi3SQ}=0vZBwz+DUj-9 ze4#~A%s$?7Y@Jm}S`Cz)Q*WGo`Zr>Hj_#i1Rn#okUti)T!d#)DWW`HmH%1F(g%6U` z_lNZAS4Uj}OI}JEI%96e4l0lFGj#Z03+hL~0^fH@0Ev>C+}Tz%g5WL_tpY>(%%mCl zIFa(tA!BQ4!O&b@n6Wq67|ni+P1IopN`;io8P3n=c#9Q z@6H-jnCjV>=JWrJ!yq|k!MFi9oF$y*IX~yigccJA?6n;bMJQ-{4g-yOaH!LpuTI2T zKaQS^twn9eom}+do-dbtW=(c;>{0SfJ%AKgWHoaExM&fp%YC>kFcGcQPtPOQR2OH* z1f7?C2Mc)F+||r2-bHe9Is-2r1Z9rDkdcQ2+=u6wjQ@jctp0Pj5K_ffA zPYQlLBmRQ`=S~d|fL;~L$wf?r>iBTKd&3mQr0?|>=*O=$adv{m!_lGSCF4V1NE=@; z{3S|~UV)FgU;w?&of79s!mjn1ai0tkNF#&_C3wOt)4=i8{C&>lZNXOJX~n?twkWuf z3ns+#^{jET&aIN@?9_eX+PRXSm1;^rCr%&N$!Vdqh_4cU=ZFU~Tg%jvJSAk)^H!ZX8m?3R8Xl$baI<-P?3xVeQ z45N`+cY(iwxIR{uXZU@_8JRb1`)gO}_0KZ=sK=bw^c7=@yJoqEik5gJzPENFE$5TY zyp3{t1tGy7v_e*^iy8}pc^-Jb#r){X^d;QR!H#)Db8`OV2%PE%sc~b#SFsvo#~DtQ z$fGB@>`~SiC*Rn-rFpV^OgU435fy|~6&l#z(X`U|YHX7#+JgZb>xoTJwXdCR? zxl<$Cz@A$dlvZChoW6tdR8F&c5WS42YwC6$79p;`q+6Lj-E{1EB>H$W>(ROG z*9ALQPvT7J9GtB^<rQIo0qCZrlymM2^SFuq(F9qpho@gFU4Kjo;^j*Xm@TJS4D1(%%u8Sg6{s=f&S)q z#q&B}Y@Vo|S_LRo1fWi=HZe9U9t~w5>kB=-W{XmvLZ!sRj80*c5{AAV$ylUUsM~Wu zVy(J|A86tg%MUeSeyo<&P_ck%jG_GEjOoqZ8#$y6f#VD=V&)ks1Pw5@5X+Ho&Yb(S zJV07gGsFtKMtg=cZ;eCs@L$AKbq_#Dm0HfFn<~0fRi+OY?B~MCjHk}ng1jmTJl)-!1kl&@bS_mH$EK{csldYo#QQT;84YG1(Fj-G$XdG$tO5K9%m~d@LBVa*RpMD1eva4I zKaiqURopgMqSSML<*hBbi>^qYVi?{Z9HFkGtQ$LYn2V_Cn!dO{N`k~PByn`YL@GxP zUwk^zLVmq^@Ime{2C|z-x4!7uyqqmgHVZ(tW>PQxnM;z^#u8uZigUC~jZ)r){bRr` zw9!6Zx)Ft4^WiUO({=Jv^kuo+BbbRr z76y}s0HV=7a4)5WG5O}Tm-}&F%aGuA&8*tD3M+$W<$VeBqa8fGF{ajvA>NGphSXNE z1*>lFm&VJ6)RygY0@?6gBvOlnvXy34VZ}Xum=imAoMs3Yx!4yQ;3`fWka8yANK^aex`pvMe_%i$*bQ^!P0_ORF7sPCQLvB=yL3Oa${2)Wa#m zL@fLd&Z|--9MmC?0dk4K+ZvrG`p@YUjgR{%+nmHcletUAG2WSIr$sqJLQ;`f&_10V znjGB4Rf>Wj1sKSH5$2piz|(eHVk{}U6&IR{MaLk9YA$zx=O=k}ot7h#>ZC{=NjJ>5 zZ0VFZ3kJ-GKYn^q+*ZQA2AsvZuX57WzF?>%OZ4-&dGC8Pg`E-C@8ow$7WaL3gGBU$ zCK^m|mc&^+gWG9vmoq9%j@V)EqTwrxW`7F@GLHO)X8%ABR$&h=w!s^)f~#36t0D+L zWy=3e>6!086*n?=A)4^^brm~gT|8O+i@H01Q$8uk4!?}QbxYa7PQs?+?7gX+DiX2b zwK$0$Fk{Y1!qw-U8+$srOlpPBUmE|CXCUD82^78_a&ad^b10Xrw3F)YSJPgqc0Hzj4X4^V_C|DJVF4}e5+mz!camU!~WkC}DNQB0w(FM(HjWt7J z9nl%Tl`63$8&-gnO7;r=btZi_E84k#8f`l3PfRN`OuWt1ifAnofY;sW*4~?bc>_rT zyxJzy=M4yM?{fSc6sp@D1u#eCPz>C#d8Ed|TJ&>kr<2Q|_wPVY9SK5!>n1^!9DzEr zxd;51)uvp|zt(!Z=ogoH^?J#NV?{9o)vxt?Zd>2i%UM03%U1W^DIiCiHs<`8QDoPF z7d9;^SLiiG_G#bgoF^#t_^7Sx)o}o9{ocuCHQ-m!vf@Gc6$9r=CFHoyQVQI;qRa4j zC$MN-X_is-L|wk-X04a~ zfh@P|?(aOG@mxhGUtpi(&ccEN@y{J{%uw%Xc4lR|FggS5a|oek1&y6kG5x|cYX`qE z;Ef}PC!H~zL$2&y?wv-B+9~T#>+7b3AuE}~dO2a?!1nRG-CwfyxsDJ9ErlCxe$_@| zG)*NF4|H1gx|(Tte{hx))l~MBwFzo7D9T)RWV-nD18TmU*Z>vRv@KX@0KvVUm-tzV zv0V2$*gQVA{Qkx^IR9?6S2yfE9(k?IaE^r$_z?%8f6r=FJao?g3M1zw4gw4H)n-`q zJ@pzp+7qhH+>JEEJhWjeSW~ZMDZDVR*7BXb6}_>cIicw9E&gVxP*gF6teIvX z@KRY-Mi4hG7LJ5xiBq|WP7h1R0srwBXRg@lnW^qFiFq^=%=mOqc*S>FtC3fvFsH1m#*}tlbuf{NY9Cx;*O6voiQ7CEblvRx7P zBVmSbHztXFY_^TsShiO-$R=3}HDVNBXJ^U_p}oa;Dsymby1sb1H zEW*ObsU|Yk-C)j)_gw%2DyzwW%PGm7_xSGJbKoT#dq`Uj?3BR8*Dy)8fDIb51gN zzIL>Hw_c`|QGrQ|egn*rh8zG}Yi3bcJAqE{vB-Vj5UMuj>3`!zZ4e7JIAqrmf`)+x zFNe>`F3vZGKG?5(3fMb%Zpx`VjdS7!zwtyr0G1ebEruYStBad$y{1xxKn3MzzQV_C zAHk;VK0WN>63MbhYJf7|y&@vY9nqjnj6!F`2*-3JYbUpbO;JwFZE4%7`wE$eH~b(t z1Wlj}8o`$@MJ%#40YVfzOfnQbPV>`JlFy%Qre7q!!;P3x4ZHrRW}jiG6c@BRxVw7G zh~}Me6?bEw_D?8}cQD+69qX^2{3${JVT}35GW|ijyHp{{QuHSt!ZC+WRJP=A!ngr) zfh?9R^V8*f{3OSi8+6 zcCNgN9M4EzvCiO+LyEL^$e_N%Hg4s8D;S|q z!>UXUx+cFPNBcoTH9;|pBSKP1jN_ZIcvh??iFq#dj`Oqj?!mmJSY7bv`tA%>=_*2j zK-US4jj*ykGc_6a3`@ONGf71=DJeMZLbe$T9CCZ6tvMdwx2rQSJfo_+^WW__<~=L6 z@O&-Y{*|cbc7jk_oDM$=t7Ur3cSbS{es*pgYtK_BkM#$3{xv&rIu zT5L)+<~@agA`S??&^XAv-zl5?@+=RcILl+N9F_;R`O>1oIsI1kuq6eoE|r{xb|-Z{ zM!Nf_FI%lo?Uf;D@olOY+)xxwBl>eu;4{A`nZ9UMKed`5$t+`3ATw6`Z)9h%l}u(s z^~hZT?@@jMW;60+^;6&Be>t{V&{^+6sQAN^+~Q|BYgD#(MxvFI;}93~Qr^Dhfg>9+mvp>I1 z(DtWw(k4V?iKV|z_Dr#ls79;4S=H>#JN--8%BX47`f^O}j6EPvw})94=R@IhqQz;N z=g;e*zMqs})F^NWoq?g_tYzi|B%W-fr+$6o8S{aS!7RZznHSOJEz>N^)L9E4!d7dm zIxMghS-#L|&2~m{)0ul^+SfHaYuB;RsSmpau78*9x*V_Hb%$(ln^{#w9+|!!5>1-G zb@1y`p*G_KTVA#k&0Tmc-P_pA-Q_nM4R(#v+DXkMmBNoHW+xvH;KRH0!hw2e?ed)~ zD|f>y@4FsvyRsQxp!B0TbX3`mwn*)ET#mwY@6N(Pj_&=pg$fzCF#`OYxH=Y0VkMnN zvm|%#kAs4cPxa$jDirF<2(zb>$PB5zMZdofyA079W3ZnF$e@7DN-$_2CW9DkT)h8$ z@qbKQq~I7V*rkW2`2cXW3;7Bf=c>sxWe3{pxVa%2u;Qc6k7g)@Zi=2+OO96Q?-lV> zXhaFCbEYxI>@(Y)GDicS<4fEnp2>4Q(w&l%=~K;CgvRrMbG4gv1;MogIXiNLh9$!v z`UjK|bh0xR`FvBf8!3n#3;DH`Ml>uxpgM_=g^k)?RD$`fGAea+5%_LS?C)U`$jn{D zdI_QQs)%>GGveH7-6m|r=s zrj^OBne%f_L&c&!2mv97##Ay_ZgEJsT(~~9e%QEVh)_=i{4GJ<%P$|SEvT#%9*wi^ z{4yIYzOE?V{_y{f0sWohP%c}w-)=HTcizP-?**#%BJabwccvS{6Fy!6=CU`B@s;L6 z%vs_6ZA14mhyw2@%Hj}q8}`o^+jSHtn1P`dqR3!macd6+q$`-&TdvoFW5%ZhzZ}rz z5In?#=IXUAtYMY|QxM=J4d9Kppx<=Ggg)C5fAgpv`o7+SWK*BK0yTJRPF;wF7Elzi zx8SYCFILrYqD`bm!vNlPzZOqs!*;NvLSZLbOyXNLU`?=9!@eOY7H`=PWwj~xq(+KJ}=7c2i&1Mdkk zVLS3VGkVJ)wesUA1$%?F@UfM*->>>N_c#MLJB2(xcpn2qvUeBhW}5qQ<;Ffa8b@iC zqA5DfnHgxMsGbJpVP%0HVmS+%faK9Z4a{AmPft*@S{fOvw zU-pG=G}J)n5EQ5`7ay$#csilCsZY&0eer{sdiNrRhiZfrABKF&2A}ey&W=q)5XO^S z*D_GtN~5FXJKDlYh)`1_9&mdJ&)Ztnmy_w?OZ3(yJJ`#1V7C{>f^ipPa~uk%tQ&2R zU(j(YetBA){xPX@BVS*(C@kns9t8;4%^p$9geK-jYhRJaJVSEGOin^`ip)=jBwW}g zUPztPoIk7ty3m-}76kdn}0migB7fC_bC zUupM!+^ir{#8?XzVKn zoYep%kteFTE^DQH!75YPqUC|}qzRWP*OZZ9o&1O&%Zgf~`8A8?If2!eb|}cU^{l)% zwGBmf3g?i&!*XzAOkME$*jniyz7Hm3+6e9@g;u^gXOt zP;8kE@+?cdqCxY-%mF9B@T~1@VIQf`GZo8}@YV`SbV1{+g~C-|ovF`qj&`2~AqFH+ zqLT-%%9b!EnNRPJ*oIEJP5dUMkmqP8nm5(hE(sH4`@Luaaqd7qmJk*O66c{YiB!LO zI`&hm#<*hvGFpk7psUl0D0DLZ#qGf&Tk%)^J`S^2BW@LSnGJPz=t<|}ec330bZVCO z+V1&~hktl&Rc`g>&>>Beawe~QHeI1B#HrcN^W2eoY4IJz%Q2=qPgJT3B;)hX zzY@yh3dE0_S(khVhj-x*_vb5-Uk>ul{xxLMUEu&DvDPosJh?3b6KI;lzWL&^n;0da zq*U*hLv?lf8ipZArx-!di_7aU~Ic`Yp0pN}8@D1lJ)w+dybo}5@@pr0+7wmmh zu_RXQLU&Iu3LRF#Grd3$t^x9IY3vyIv7pc@(!fBK1d93xd$oKivdrAda}*JiGS$S- z1t4Hqmd3bvC_S_fOcri2ucJvB{s(utHO*w@PkSKr6anP@TW#p_BaA=B_P2B824bH} zzz(xUPk)h5>ec%)$aOfINt$&bc%B9;I&9757IhKgE*NmWt1v5fOteFfzx{K?m15!H zVezV?&X!vHcME?YWmj~0#ng6s?&?k>J7=WSDy6f(HZno3$*i^`@Mt@byW@80 zxKSVhas0#rTe|(sz)lAtzQXN*c?A|7&z#y*z~Xg)2xt!&p)d+G)LFqh11>Qz}Qh~q6bn(53`yT|5u z3BsfIO`O%z`avsiqcN!(al|2jC)o#B19WdWC6iY?U1ZV2@MsGz68YJOk`*>=SqpQb zbl>>Dh-ZBs-W@a5!tWnGuGg2WHTU-RXH)IkRCs`PstDy9{-i=?qG&B`WrUdUCd$v+ ze9lLhS9$BVaj}TWDRp!^4Bd|r8hFtCR*qbXGkkp}*A)iL6G#V+jw0W{)uw1P8fdq| zo8*&wdibxg)e!vOjjzZR3cYh_&)u(Rn(iz_h8iP}9F`E_?s_=_1CHhna^Z=>AgQ)8 z1RgX7j6e64SPOjOL>kqlSEZc^yWC!B5vq09222O!FLXJ|3dwLU>Xmaf%20;V7w4)_ zE`*?P2P#)e5ZvLbN~s+CAC5iyZ!6~9m;maeh>R}eM-OW_+e=hUspRa}muoth^E2@$ z6>gmlWt7ofRswAxc$)?jsV8wyWs-3?G?)pU`sHg6#$l^pID+sD@^UR7<$H29{5`@~ z$iEnS)}>Hkjm$rPn9w3WlkeT3tbvL#Kg=De0g^n6u)N{SbfGl)?W4|0|NY4 zU#6Xo9rHMQLANhHY@?RWEpkc@C*RhN{1)lvGoHrGQ+0=U<3+2-b5Zx`s1Ur^*qDCP zhrKB(8Z3sF-}1ASz~Z&n&UOXx17$3CTlag}WJwg2BJ~(e+0G=MrAkb_rjBSf3!}&# zH{m8Gg*lAWs}2Ke`ICrkj8nIh{LX*80HUI%N+#@cWEMYTba4TpFHx>n=(2wUzwLA8 zwDdYADlqAh505E~Q)=fZk&QVzR|^rZH7GSUG8OEL5HBYLJMUW7h^L za47%OhKbvpi|F}>WbzEw8BK5Ln)U>Wtu3wAi>;}Y1$hFECEFOF)xmx;)QVqKej>0rWitOa8 z-;U{`uIBq6(wwC;t1^w>3lBLJ1qcjjQ$1hE7XT0@?3uwIB``p6@~OmM69ib%0#OSx z_$(JyBdY&8$Uz}eT8arbW0Xg#g#Md>?zvg$2!1d}&udqwJHSSz7H3Dvq7vFuB{jUgKT!_Lm)KUZ7MNCi zmxkfrRk9QEBAOaL3E{`1`iC}rO1u;TF0Ut=M*#gO|0kMfH%hyy9KPRrz#m-W2^m{F z`{fv05*RC81(RY#h9(RmD4or@2fY;le5Tb$9bz!!uhrJHVs2Bz}(@BgZk z()d~=#E-TAAh|x+;&dSNaSXohasLo6kVw~z24!x4HStEiH!3BfK9JK_0Uda+;}u8S z;lj_uk$lFKO_+U;!$b>f-y%1T=?K+YEK>Tf(BuVQ0#tzjYW$7M#t~j@uJvZ<@ou{p@(t)1(Q& zvQ3pnrS2V{2n#PJdg3IqVx?5FJ|G~Zw)0cQt3>B~O`LbvDS4I}myH8E4z2pa8__H$ z3m@>fFstXCNif44U@dG*kto+&>CS4m{ja2A^WM!Gc~YD_qfZczu8?HhpV#LRao)dGGymM>E6H@iwN2YwD*0)L6yKe07CKK`61&<3T7$fb5o=Rc+Af<{ zu~-lc8X%7N8i#Sw)WYENb(We-b>KCcM%J@!BrkJLy5ijL%+UPh2$U3l(X;xQ2Ytx> zW3=ljxrqVwXaIKtm*-EqIQn_935ATuCZ3Jr0N_q`#yMz3WeB~?>)1o>s#X%5XX=}2 z(fH=@(p&6m+4lBo>2|u7(2U#hcvz3>rqS`m%UFjKDhC?BauM3Nx*uKZMZ(2Ft;btm zh2~yJ{lnhAz2EP8*r)r=rqd)+2TTRFYI0Zwi@_wOs(xzP_B6x*2^<0z|f zyAGXPRHSd~q|=wri-b+i*$Jy&6)<-0k51f6jCyoTEx4G%*8=3)mOEUe?t60s>_(Sc zoY{9gCf%NXJbWzq&)N7tqZW~_gZlJTs%8Cy*1|mfs$;?cbRC0N_5i1B8l*y^Rhey~ zY-HBXquc4~wP!A)2(|pO8efOvLe-2}NXPdjVO3+G5}IwH-1K||#)4T~F|O5trDK^j z@Ry?ZG&j!om?rjPk+L2c8aC_! zOQME>=cLcN&8UNP#r|SJ7TBH|PUDkc10CD6hJz5a0=_j+fUxG)w5I!1y4DAFGY1() zP*uOtm$V*=KSJRn{dQE1Tf7Q`wy(G{hdnIvJz5|IPVt3=sDNL(qyJz!B?GbYLG>a= zqAS+NFnR)1m}>iG6UOLNLCh$B;+C|F4q3;%BIQ3HvvC_#V+cN_6qt{oT4 zug%f6-zP}7Z=@zEs5qeXFz>;dRhN5gPS2%Ehc>h_rW!YJM5a_Z@MXCwNYPI zVFRdIGnyjYBq+WCb{x9AJ8A>*LtEyFue)P==SFjSgAG&eOM5Yr_H{P5fE)taswUon zDk}*Jz!a0I{!hNBSciCw`Y?%D(uqAz0~b@}!+V1#6S(oP7{|ppa3LnUi}^W~l`8(} zh<|_ce(nSAN%2ENEK1p1hIK5-@}&=eqXl)q2s5l{zw5R2>oPxxx97t6n(?3$aA#?Y zGv@r;eV3p`Ey{9fkF&=_GD^#L;n%^L4lUJqnTyR_8u=QC>h*|*>qmILM8tgY)Y!-- z9va<@{wCkEls|lxj=J0*INzp~tnI|JrfeDK`f#7lO^8JE?Mu`Cok|A5!5Mu|1eE?= zg@IYsuvHxqTV(W4S%+)!^VaS>*)W0G6WTZvoQ+0E%4W(Vn!(c7L*u;KvQAwxeBTCx zQHky2Q<12wwN3_NtaC~Y1<2x@F-Yk3#KDmMJU%-;iIO7_TG}&CcVuvu&6MJi%-IB+ zjuFMxv3}ei_em2Bs-CCsuhkQ7t`#FHc{!;CH1qEt;%*)zE-i>y2QMaB<;*bJ1iv8l+g%|*6nMx zqNR|&i3BSU66Ki=RE@T9fPUO{@L)W$F&yrA@WkrHO=HH;j z78$0!bF;4CTNNi4MPL+lV3iXyUZd3018`8&=(h#Qh@1SH`%W6VZ8-ef(2O4oubv&s zvG!wD#jN~S&1X_~DKU&#cKTASU`O)A*`s|!GO8D4qZq4^dYb%PjQ6PuiSR)?;HA!XBg#0fJA^;o&CXT8Dw?q7GW%7HQYGuwCPV}7ciNW5S!}&Sc*~|xL zRg$}iqYjMjfprO4rHwMWj}n(0?Q8HJxdrV5o6jDYoCE+HD}CLb761lB7^TId?S>SW zorfLxj9S>>rlK8L+hfMbhQ8{TP=)!J8N;8W#pn^iEic1s=4VGjX2XGWx%keAuY7 zvr9??&KpbBBeFdG#TuFSt^4}P;hQGDA3LCxLhPb z!liF!pG1g5%fOCsHxJA+Btzb}9N-8Q!GC+1eJkj?OLGmkDxQ?H5pG{?(fhMNkYL@h zvdk8g9ONqXg;sBt57K+=3u_sC;Wl(6__ni!b8`-X%=y#M3xOim+7^?&(A-7%R^=!`)?~; zU3nE}zkl2jcwD|xIx`k9sUm8YsQ==76x!>SfTN>7 zGlydy5SnvYCChTtVwg9R8dD_d-2Y*Eg_<7I8*WYgrQ{R09~cj*K@HfTww~^ZW>|tJ zEe5_;S6~1vLe!qhmGPsx8gE_YHxpzhbDtOb^BLgq7Nn6Z2!#?8c?D+B(GU;j#WWzd z7ptitYl9br1~jTfp0OKI zihKnA+^~l>OpD#{7z#d0JD#|uxS!43F8f^kq@uZlQF-PEU2iQdvFue%nL!bFgQv$h zu;EMW+1Sq7In}l^545I+LS5Ilxw>b8vgl=E(Pt#axX}Eg&u&wFdVvN%v$s64o?v2p zAoEgF{9k_~b5of-?L4M4V~0Ei-S4Y%g%%22wZ^kdBpd*VFYX_|nY>4tDDr+k0qfQ6 z!*JD#eCmfkuHmGJugn_D!x!SCgDKsAy=bp8R{vgCs!V`j4*QJJ%I;XKYlALk>sZoN z8sL$R_Bv5hjAqi_d+Nu(R3v1|o^)4_zHqJ;uNNAM5%nxaUDRTSnCR(?e(+D8QyT`R zV6Zz!^5v!X-z>sKjFyf@-I*3Cm6fQYzq-D(y|zQAr8(*c>Mtm){%)5F^{X*BC*$2$ z&Xd#Wb+O3XG%?!WDJKoku_#oNH%&E=m4gAUxQ`g{G-0&2Xq*S$YyadIs6T#~N!v!J zzPFOv5lzgH^**joucF>G{8crIIS|zOj%ovcGjn5nEJNug{j(%ILen%P7$wU0?rrwe zQ?u}eM0bM5DkjqpOCu}sExMrWOq0b}?zdCv1NA1e#J%lHf7cvmC(k#`(@+6*+*96B zyb0mN7+wE231k)CiBZNlGSDU9DThy+C;hJSW0*SzoO63;Lp}{)Z`g&|P>d9j7LOMF zZh&kBBuUcwNG!Z7oio7Xo}NHE@fyuqt@q-wP%D02Twb4+T<}wOSwJUGtK4pGd$D8u z#b3sgCeSl#7R6mXX+JOI_mzjB$$z5~@uop1$n)NP7uDILzDP@wxvi6%$wc~`d>>|1 zb1SzQvPkR79#58e4d%C&`Doo}3!nkUa8!Tb(#qMDF&W8TgE3|V@~x`kE+l7BeVGXYo)%V!)vL_EhA&)L%@c&2 zbLTN>Zw=pA$=~V`8?gs$!r!vkb8Tb#NDhc(F|cGGjJ==laPSff-Sc7`XWM#FoJ#mb z;tjzPC$SEyOFvt?7jBrYPH6NHP@VdQ~w?X*5e>e~N{^ z$99Wv)5Rv^oey_4%%`i+q2}OyNuyg!$+nm#&b)U~^du(X1dmJCp{I1LF(kBoTA-ri z{DQf3chj6aRyy_^;W_GzW4Jy&B2gaX-MZC4N1_#TW0&b;8FrI>myOdT3HUS0@)_%P z*wV5n;IW@VN8S)Cd%t3moEUE%&2YHp{x<4DZHj6|_j{H46fNgu+gJ_;1Nu`PZUmWY zsLi5Hx>uN={1cjZv31KalULY-RYYTZq^ckUe&TI;=)Sa(dbte%|5}dK5&3~X7!@vo zgV-E6k+ zq%YZFSDnFh&TVF4XC!&TkleRd^J)N|P2Sg$qCFreH=CrjA8D0IJ}{_sI80Qrfe`sI4ttWRY-_t>Rnnc01( zNae*57u~_y0XK%PU)+V(uGUixUe~w4f0RN0@`}&=6uUA4ky(U3hp97y`l(Q!eC4r# z{;`LabG0KYv%o3=G$KKSRLldn@~$tZPtH~_Z{&yo6B zX8FD=?X$N@9}5e6_Bf-h^fR-k5I_qi#@vEzh0Y z>hkF-@$syMPTmE-Kt~7n@oma}51tN{>s9-E)uXZPIzXoqG;$V^pted_nL|qS8>x?3 zKl2yn@+(u3Cps03#xYfjP#3})+}jo!${E5iPa;MSpYFY{^Xn9A{dJqH zJT)9lkuzRkvLmnlnTHb0F-al3qlGF?_L}L9QGd?M6y2n#9*lnI<^mux^$r}omr-j! z16sWb1r73meA-sWs>+YQ+Yv494?jA~Ge+d7#tI!_IC$48)Zqm@ty_|;=Y005GHng1OsoaM}ND!H8FbzWxy)Pvt$Ql(u zFHaoFO*WV>wmv_wO^LoQ_5HCgmmPLG%bcexe<2vliXh}vIg{yHYU>xoo}*4gl3nxu zq!W3+WHhEX74O*ysxo&i1E9KQ#G1OA0ND-G08?=ypKR}*=C;TvgZNX!oLJ@zx#hII zX(n?3gCLtysQ>U`y+dczp4aQ)5%8qQn$|E!@L!-)M$%H;;~TsDa{-+WlF>P+A_2oU z8h)WP=o&K|@tsieyVRvkP$t7oTie9)aQCO4bN|+VWlxmaZl6ol>tmeUJh#{G`6$9k zgo|gHGx+qn0rU7fQ8C=GmM_SN?{hUJEm|tY6v15;WP`U?1r*BNGmFe{+D$Y-#b*I6 z1@cx%)CBST3@(!{sPZR7@UuI+cbMH}P2JCFkBZ!C#H-AU@)WpDP|d@JoLx~i*pv9(NW%_nx8Jw=`U);J+i)#4w`2w7H+i!0JaKI-E)9$;N`7HBxLu^R};4Vv9aQRkJ@*#|;FwTGH zQROP|pNb6P`);g}^ShNR!mbx4LF=kuLa!VH`dDiKui9Qi@l{ST(DZ&ayLb>Lm|902M@kDzwW&ehaAVZbS;~+9kqqQ15q06(_N0 z%tSw8pZ5B8`#5yW!t%A+QNtqh)!MG+OsOWc^)-KMfGXZhYU8~b!y?iEuPEU~umO+f_gUY*?w zS22TN^*1iS0L=F|S29ra>n+ryLxphxHo-xXTM<=hBYl;gAxacDYMViqGa9fWgJmmw z#--ye7!mD8>MOr7h&pO*3Y^(2z}gsnD}J$Sas9sd=zQA_TXr&wo_)HJT27rdt?<3R zxLXbwEkr+c@QdE4AV%#|B@_hgqszL`?@9*i-GH4lm2O1s{YcBY zWp)>t-_+>i-Dt7dZG&0cPHDj1koK-w;0@~T?!5c_6XAUGN-vd8rOEB^tQ^*{bkpxzudw9IrBalhF4l?yx)PI+U|_2C{uW_f%nq*a&wo%%VDim-2Vg5Krp}a zWh`^JJwad_r-31Swnx)k=f9L%k7G_>BmWu3UFq;z3dgBjix|~5F#@jfx{gtD zaC|gP?Vck7^gj5wi4@p-0ELKZgq3$N?n`{!*LTQ!>2+Kqv*k~*96E&(NiZUupatMeMrDt z5Lo3=5K}RYI9$M?2u~S?wr;ps8{7o=V!P6Q1~b{P-EBvIF9;V#*a9b9!VI8fx@%db zB?wWBdeJAmZ+DUH;}`&!_wGBPcJsE`s2a4T7&r2F1rBC|hYZ_cAqitgS5appVz$V; zRDzFTjt+6K6t4QMZa~9O9jf6tKP&8V*=?TWGXcG#)C({i+tu}8$93AH1^`(jTxSh+ zR$5#W`$lQNu=<(tx|(X9kqr*S0{bKLMZ&=d#k=P(0wXK;SwmGpU&C7w*4yJg75WK%V?_1hI#Be5>`Wq{WjL1eYpM+ll_Da6Ah3>IrJs({ca;ZNd5<4I`gD43 z;WX+yPKoauk^gWownc!qcK81*oi?TuskB_*6P}-=k^qkz=W|u$<-yuX||UK607@y+&q%+w2oq z*=9{pR0U5B8Tb!S2g=c@R0QtRZoGieb^63v#`;Op7c2l=YJ28V`|#nkHNQJ;Rwww4 z^y5Hl`apxe-xy+*La zD7jPxXIWzh2=?~g~ zbfA9~^`rBiQjbj(l#`S5K-=lGMA+JsrdpA)qTOi>Zow zv_ z>%ia|d0M3o-Xve}8lMRUuLFZO*+$N6Ev_G$^cnXJpBG1`?Z(J)JHe~R=5}&z4MA#|-b{J}21q2V%xrTM`Lg5wmU3}M$jB&Yo`z!ZT`RL^{UH>mLcl_qdzCZof`!AgH zSHSm|fBBcwzy8<%+V`7M{`wj>D|B~4-7=0ZmG8HAUg!n9-FcvQPAC&6Kguu!r&!1*5Za<$K0 zc6HbV~V*^|xR5_57s2?G;u758U-&S~#{dO3I~A zVhHElSAEwjFiA)#(Esb#K)(jQks7F+U9Z7aLN)pI{)G|X!he^7o?q|r%JvZ^uG}WY zO^^Nx0KV8o<_mcN`h3p!`IPHo$KCb|&b8MyxSad`oVF-ln%h;}^R=y8-S_9)B{>cNsN5+Gm^E@U3O%(jWKJF0ljg5Il*Xo~ z(k8jhmY+MFHVGFvLf+vCrG*rZz))JlV7iWSW`mpJHio<^IS!SxZ~_D882ePnPbq>& zWSHfzuCRT|5h*aEu#HmxvKS4L6VD(f0}}T~^;11t91_YbsYvyq6o?U$Ss|_?A7Ylc!Z0I+@&Dvd6@Gk6NbCH0An4mPU#!>s5KC-k|OUJ}N^_=|%}S-ah5!&toY zZ)cwl14cG*5OLnst^p;Ntr#23TDHp>KhuX1V6A{;0ew9r zb`!)C0G2Uc3au95Ix8q@M^V;}1G`2s%nq?3wsK7Vct8i=%pgd|2LSMXgLY8<-%TLM zCPw)t2KvtA3_xHCmDMP!8^D2bi>m5m)Xv~DxMLtd*KBFnZcPi6F_wx2J2tY@fI>J6 zO4bNly+)a(95a8#*ep0DCyWRUZfkMW#f~OAw|&C+TQqX*ij-(MI9;w|&&?&)x4~$D zE`#{IZ{_f90DRnU4Q8dx4duS!Z&unl;A;M^z+Ru8@ zN2FEZ&L1p$I|g$(cL)rs?Q#Uy?DmpXf-yFtZnw`oHw0%M`Alt;2-z=37M(cg-sIa6@X$N;--2(9!jc zbdsvB?IMTmp+l)NH=j08AlKP$i1Az17}b=aK3oOBkq6k~Y=Gr}BU|LT)E+b80HZLp z3d!y{Oe*XgcG`2R#|!0CiUKKy^Lq^yr&VB`a6WktAFZ*#CFQi&0|4Aq*k2iISvT67 zi-4y^%DI8^lN@q{f}IDvAKpKO+How+Ot*omg#YGVN^41Hm~7@FlK^1crC1N!7=SyR zYX!BgJ{JrgV4IFWyoRgfp9X!LduItgEY)*0J@w^>)1SWQkJ1Bo-J4#0%PZ3NfA9AZ z>U%H!o^zAhRQq7R%HzBRT&lM?mewVyL|Oakv3@9LwczX^$7|x8GLCw6Vr(vL)mrH^ z*AB}|gxKwjGPY>bO&}xAI32=3w+PSQA$M*S)$~tyZ1h1usB+&5B742JN?2nwW_R^TIf=FCH$+-j(shna_-f$w z5YUa0!&bU(;DGG21z@F64$WKPI>HcEG1v*e?64E8Bgzrm+QK0WfZqi8wg@HP1|~Ln z-dsl+t6c6*Wg+;lNA6#m_vG**n915QHZcy=946#3U=SEMz;i`lSI6U;%eMhp$>}iZ zO7kE^u#~V||40ek7@=*H8dviY#nZSc>Ry9e#;cwFv~Ji*M%PWcwh@M9dDY6p$dOFQ>@}s$ z&=^x5Z}l9l*^vT6<1?p8P80Oy+36XpeGeTzl=kl5pAKDpP1uGg$3*VkA;#zk*Stfh^Q-g? zrb^EhDrX3e!OD2iD{kEo|L(l>DBL5gq2x~>Ol*Q%hcQkWC$U`Xk@aV{9)Gbw-7 zpTdDo3zF&@hamhnnj5q!3gr!QDX-&Dv3Z8Ewn4d+YxoeG-$YahoOQ+t(J;!+z2@z_ zhp=;9 zk8SgI+rNuX3;5Lr*(J9Ng=33+U&Kyz{dAV=fgjv=WBQ%j|FN%+&-bS?{M1kVRC>>Q z-oteP4noW=x?V48&fe#3=d@hHp6mUcP`B7^c7}QG1;L*4d+a%-e9ywlm3qzj-;7gQvrdq zQ|Y(5X#L~&YoK2PFHQ}7;UjmYpZUI@{e}+q)}Q>R>0kZdzwizHw*R9mqXyJB)JMLN zzyJ8?pCma6^#A%b(651StOl+~0DP&NftMNpzSQ|%tm%tCaCe(tTTR?;eeKV?H-@2* z^k2DG)Cb!XF<$q6rcJepejrR(I=kEj@nEL+LAj^U1Vv^my7$ zu0lbO)AhBq2VnflfAF2@x;tKmu@Z$PN@1x-&OG^aI(paLD8!ylW0v=H(yHJuM$F+| z@Z>oE!d7E?JRO{sb7axgpe)d=XX&-(VJ{vD;OUTIZp| z$~dWgd;3t+b`29g+0tbKqgl9Cx>Yt@gr%d`AW^*A-AVm$=GkJy^y$W%q69We%%G&( zM|0=ZZ)Joo29acT%>hw|d_&ETs{o=ggL)%)dt$q^#Scd>JgmJ>yUS01#&RgI(bX9ltgHpWC5qcZGAP-hwc zwZ8zsCG2+AVMix-l}L$e9i3ZUXA{mZ3WqN^C-5r(3=#ooZEc|n767L2c)k?e%VB~` zeaLlobin? z=24G=V4hd3>J`|H^HiNjm|WEtbDqMIOZnzH7YGIp0M#bR0o zPc|!?X@(@(HylBIyl;#Uql7poq^SFs6q)SPAkFiLoNE-G-qJUomFrkxwnb+(UIU`J zmyl0(a~1X4;u@Ys9atLWP_lW#m~NmHJofk#>8S@EOzHGWx^`w?+Bbgykb(LUSgr7@ z?mq@!rZ3?|!v}KoSw&^J1W-D9b~P;vf}$qYqk%$jb8MW$QQy!(0bsFeDJN%NQF2#{HEB!)%yZ-dIZtV>H3Sy0kGWS$Pm#uwTA;~$ z8QBDyl=?uqO*LsXHwhuUc80lNB^}-~m2Sr=YKAqjzI-~(&Wr-U#!;IL0(fv(872?3 z03?uCaJB;Itq7FkDA8dq5&#xi@@wX&3h=fA=u~b~wc`LJwcc7z!|Z>Mn6qadK9N4~ zzCTO%1AyP~%Gagudc&JN=A=>|L@C}P1at$a)ByYo0yj|sw-guOd4Cl?n{(u$erpFO zJA-g+>V1Ur4a;c;74q@tmecZPm30!gF2XZZfT9D;$Ia!lsDKxEPZ;Rd2JJ&s2FCX& z*Ad!}`j$e!EAO}*9}Fh-A6@`n`v9Iw4KcvlK8!-QN?2SebSnyf3?Njl+!FPpOgt?H zzSj!Z3vG$Qzp}+R+2Xo^_k+sG>_`+5Hoe0b>rih42Fr{A)VKn|RsxGmd%ny-7XaE( zv}bqsPkOM+XX7sR?E zwi)Z0#)D^a&!buo{DywO=elTczsB)O>n@L#m9=ylRsJbJ@QG7r2?>5Sog_r`3M%0( z<^Zj)x>g%d$bYH2sUNKb$_qbBsQP)FIQH+|pZ4uLkY z#W%jbBL4aS_=;$zE2@y6JhqfRb!@o|lWm~sv&g6}rj7D0UUwZUwgGYZMR!~OZu{<} z{Ki+?J^Rc5`L`@8AjwFkcL-}ZmduYrCIL=9XJ z0RMfU<4)-C^;GKLXa5)4pCL~V%-VTDd9i(eyIp+xR`DW`_xyF0fzEjwpxj!z`@|OK z*}q_^{$!w5NMdUYl-x z&8yPbEN}p0mu#s-!b2feOV2*=Si1jHUrtZl_dq)O)KNm?4X4{)`>OOE-~DELs%|YT zr6<06PdfJCqiK}%>8JcB6Cqrb4GmExe6Tx<>Evd#QwW?j##0rOI>| z?VfWe$JBXu^8!bw4HHZGZP3a9z!~bx9=*v$t<+a7l$C81NiD)b5Ab@BHZlOlSUn_| zNx77NSYx8hKdL>clQ!9P4Z~`SoA?$`qyrdhj{{&vfG-N)DJ5%Z1H&RIYHx1PCMI3DD^M{{@EF3%S| z0JzwRG1+G*U*>}x(pFWP8$i(0`LTI!0FQNCg-eZmw>d1cAfo_~J~z}Az>SAgy%8?D z+hp#KI$}3mwqZgK0M>5|f|dJMATtAB1K^Bw#VxYZH*yn)(?vXT7-qw2zA!}AKoIay z+p3=mo`%X^dC(nJoO0-VBNuQ0;5uG0bwG~7?rCmRXlSW}1N33IS6!QiB|PgKu_qQ5(nmk>cj>XCkEcnTYp%QTs&sJw zXu9^Q%``uw{G5aqZWDW$p!|d6@Kql52yq(D1DabSPJ?m)%UCHgrx5x8L7g7Jumd2j zV94A6(yju4<#!-wnlZrS`1q_LNA1j+bn2<6(^o(J`Sk4lPo%wrGwHTNH!-gh4j0D@ zj_!F>PDcuPTj%8p^TYy<9Sb;gETLxJ1b7b2p&mxP+QcbngYd;`)OibdImAAcya3U! z9W|sHZE_Ax_9bDg{wJ<16{8Vb26c9gkYkYYVC#`xVj@+5LcvB9h)z-V8ul_avLaZy z+q7b>x7q1Ywa;|cod7KFdeY~}{2;Q;q%rrufmkjn-D)@$XeMM0O0IgpmYryBLbRMU;~~7LG`{mPTTSj=p6$7R)N3+0&3Jrm2laEQpXygszYkr zYn68EGHcUQS}wok=B{Kj^bpZI&*GtIXE~+ctj_O6cI5$*eeOht=!u!&-HRTpsqC> zD2EivQjO|bqyrEhAe==7CxMEjs?=p2=Q%mE2r4U7z4q-?0(RtJd~l(oHQUh~bdET* zwbiCp8Ha*4maK5>QttV(ws{%$9I6+}QJ$|FILA75ptWlYRrCh1e~t0KxV)B50Dzx6 zwUEx@u(7tz_sqG@!z>3qM=wz5|7sdVeZFgECe6*yrMX?Z)6C2+=B)`rWKZJXfV$l| z<73VX7d}M@h_zdDv8%6?EmN0nxx3d~n(OoSS|M@>k~ZN7T3nR20Ks}NGkR zqQ0TF;Bz@B5H`Jm8hH!H$Bktonw;g@oX_yDNUdv8Dc``gE#aTX*r6apICACnKCW0q}O0xG(^Yi|~upRgbNwmkZ`wQvJGzrtJi8 zvgO-gCfKBhzy}6P3|#}LSB5aySD!hV)*m~XPCfZdI&)%?ymlD5FnBI)t*42DyVITj z=#A;fYwk>^F{-a~57fkvC(t2IFvU&L9&%XLPS?|C{^~R7Zt@8$eBX87acesA%3IP1 zkfVl@uFf$hCkds?&C)1_uK~hFp5%C+`pC!AXFmP8G`D|mddu71nqK$DSKF3b#h_|n z@OPh2kN=l1Bm#b>U6}YMFw_nyG_DMuArrEaNy@Y#49ZeR3`B9`UdS;riHAe_q8Hu}N z&mV7t$P!xF0LumoLDT}uvVpT06rCrq$Fy23CE3hjlS8PLD&=H!=0L!@?R$%2HsSg_h6N;3N9l}>?O)2jP$=Cn@S1^hX@Tk&G zgX$}SX;K;IhgHDt7qOo@4bxD6L6O*ziXHgd2C_IuCf^Ow@0=4*MQ|}>h%Rk;E&}cX z+GsLsPvbM4caPE5V-f&Nxeb7UT?!B9{JMY-9YBU+V=2|K4Dhbb(x1XO9)_P>wo>N` zo9ZyPVYG61Zvpju{0IrJ9FtOu>Gv{7$7hNV(DLtN?yz5hU}x^vz4-_AS;>d3XMtKN z1Pf3IK%N0`>BipYc35Z|_%rklwXecL%RmwRS7W9CV8bnc7P*4A0dRm}hyE03$y#A| zBmKysaTPW^oAcOK=iQVejMTsYLWR9I0H)svZmC_IFW5PQ1^J{377!FDH>R;uK-Hm( zRsT8sv&&&u19xqOi545Ekq=c)BG+GYINfmFwGMS$ZES$slwVb9KIOyA09Z=r41lAK z0|2{kIA^jdLa-;mnZSq3F!BpJ_G)1ZyrLY_drzbgXTfs`&}%h>J{wH2`!0tvw2qlU6ED=3D`w zW%9zyQ9=1mM-;}NvPe~I(;0yM>Bo_E;%3M(ZFtfZvUN>nV z=Z?)X`hatSyV2hr&%om+Kwk6@{#o&>{JjbTjpEaxo|W5F@7az%;c(BuoPfZ5#7fjzTnZf=6p z1h7@<4?rY21UMda0l+}B4uC1~TLWOJq17LxBBGA8whJohcT$$tP-8xJ&m-yKdmcko zxQXKXNV;n8!89S&w;ZN`Ndu^fw}7x4{H{T+XG3P@I@eNGJBRW$!jxORDF+bsbvx7m zsVLJhWo!XbpF6dM6B!U3CF3A@fQJ}IA_L`Z8vSt&z@0l|Z+2Pvi1xs-y>ju4)$sQYjL@tR>J zI&Heio`Jp{l>Q3e+(5P5;W}hwou8t>baV>OEdq8q#I)4F%I~MPuo@U3>|?|NsIci? z)wHe%>PbD@^^S@;*W;wS7DsH4SVHo8Cb+9SzsmRPkk{0!#=ce0p>CFrR&#R4xr!O< z!vfF*AFi!Zy^J~bH8KX!S06EKj(p&{wvZB7>zp90u9F0fUB7mE(0UTe@v^3ScJ?)6 zS*x;>kR1Ymk%sJC7cmAjUkVtvsMk96x(*CJy||K2pE=88IV}+mdIQ+*eoEa91ARvT z8siwE$Hpg6R^b4`V`4%%khwl2O!g>!NpoTLp9|-qjG^QDDT85^(Px(bJO}6Zc^iLU z4Bh2gDfbKI8E?zchknoo2Dd4vLWNg+eW|vLI%&y~$YUY;znqMmn_g;h`eR)Xo{M3H z@gA9;Aaph7g0sT#s3ODgxttzpS4E!DywGrD2@R^x5>e=T=M!6r+(8;JcPwfT!#&u%;&%Z1CJRkUtJGcUg z^KbL^a6G-1b}_Sa?h3T4R$Jf zi+Z>mvKRQCPw`v!d4)Cm3s&5>z3pvolc_1<`k&`d_JV(Y*Po`pATRJ99P*-imib@ z0KNj6?oytx7gIGm_d|lpsjZJ6gokjdOeye?c&G%uj)$Om;uMH71 z{*%gKgph?*6dUb@vuWk%$#k4h&}W}Ind++;r~n5iQKub#<;&6?|L{B0_~E_j*w$)l zOrZS1=&DeOGT09RPDYhG4Lk2Mj~q`Yo*@kMRrBeF=DAPht8Tc2$)ml^i$A3V|vWTNMMM9yWlwa-_~u_}9qm7&%x0DpJirX9RKe zPoY&+g$jdcFeU)3-v}rO)?vAm`ghCfK*D_r0ALucVw|;FManAeD+AzYk^pQnTw0XV zHCblku#BT-Mcc-O^4^9Dt9L?KS*(0rLxg3_D!DAzVe>8q=3?kDNVTyvpl1QVO>Z1O z*fw{Bu_N*U=RFi!>pm=C8s+Gg*1L>zET)w(j!0cszALRDLuGEWvWEhBRTS?-#@1|* zc6SZ+jaZa`gLs5gyDj!3=+RJ!RfTmFfWfe@{EP!l6jDmAr@Xqfm2sS!+>%gS9?t4WitZ=kZ z-&s-0TS3MRl%cf^6r%Jm0X~Ham6}sP%AuegdQyE?uXDe#!Miw+L>~_$ezZe2>qnc! zX-7M0ODU@S86l8&4J1di`4?rCk1}x9q)%=rNTq=Y+!6? ztlxzquRwD%QuW)sjq1-KrvaEl%oPr)o}r1|6a+tu3f5g+KqCWSGau#UjXDj*nD!US zF#}-r+pGkOLTFBuvwqMt{M(0C>M8SK7fY-B*;kjTpa z8z&j>b?$7Yu(unl%jp0iPVczw=5%QPKGa?SSi%90uz_-$%V9;-LkfZrGwPf*wNWPy zYabBaN^6IT4rQT2$trZN=N->y%H0ddLm4{?SgEkzzxl*R(`UZ;nREc>oVUE?jcL!W zHJo0SsW#1#Q#{70Hyj?M=2iU}rUhV$wii^_>0e@#ve-f$~Kv?5^>(oKr=O`^YYmopl$C8tV`m6eWlL6mQS)CM^kHa6r z-y7%{)CJVivz6Weeyj7c;Behr zA>Zm;tQTq<_AB7KMID9$I2^ltty1byV_ox&`?@~#B+XokLeYItbw}z3*3hxh8Gv6U zojy@Z%R~}suhF*wuY)*JwAaW53;^z|)8_&EPPxmmmHXGZ)jj7i*K&HAfixFrT^^ko zN#nb4CIA2rD2FOP9KuOpgh(WnNkMQ;R9p}YGKke@9X^^Dp4c)BR(lEn3;u>*g8_aW zg>oYA>d+GF_XeAz8gswU`D*SEFqM;qt|?Tns&9c{UB}P|?R=I)jN8%0oLmc5)Sriq z`&<5Baq#NCkE=WN)`lRkmG>IsD0D)+o5P~(bf^=-W{0U(@fc@YOQ=PcNFT82xpy3p zs+_rYc$34D!m#V%c`3qBhk{vUq+hFVGv}#Uhk6ecajZ1)=kY?3<=~?} zV3oV)Kem-44Gtcg3fV445UwlFoL%Ix%zM%^XzirEM{ul~CUpBWj)4ND3T>+VxC)PL z#}@jyqOe4SiKsiTGqDi)8pqdBp65#>k6g-_b~9Hm-^mw8tH0K)xCT-D3eU^hnfe0r zdkgs6Bs^w^@(Td#+;lzyw92t9$E;ZMVvUlcf*z`;ruu-^ttr4Z=}Sf@P=RT#c3JYZ zBfk|WmeYv#<=_AgM_mN~O99L(LZ0$1LQ-!5fCpE&W|2sHALnrk&Jl;@P{Xw^?WwgO z#%Ekzl=XhQrQ-7|>8}rfucX$xyvw>wl!xED?|9mfBS-HD$hpkD*u z^cv^`;BR`BzNB{82f#0>=Io!$6;=aRWDek+b-WXhUhKm1g}msz6sWye-?&)0dT*xA zw~tE!;BwO$5Gl-q1;|{eOa&nIe&4kp+8CSdi`nM0ZTiW^A%g@!8AjMm!t@Df3=;BD z`PYVJ#6__&z~>vs7t=HMKAcY7`$TG;B_t^Z=I5Zg2VQY&y8gAVNW*()(n6(%L7p(V zgtr?a4CAN_ic%6chtuifXVU8FHNu-trGtm(P^b_Jl5me(%Ik+!TWSdzR;w5$r6n73 zbj%KFW5V`w^D4uoWO53>ZD^MvPc%!$LfXaQ9R-gB zgG*>oLi7nT+E9$aALIX)6oU;y_mVR;pihBXabuNPQ-)bNJZ^6L;&UDf$lP+-#%{y9 zjStblC8V_T{Q71;=atP6$U4mofJ!r1L%D%@p!vJHc6bIlv*yPn^6jWa#)xRfo|Z!S%BZ112@A@XNC0;k`i3GHBpr5#vMaz8{pCc z)F_W?h5k0e&r1kso1}NxSWAfS%wzLud@TUZLpD1gxM2+)1b{af3j&NX=1U1x1vseZ zh0+knt-dD(yZU2pl#KMl&?NAc4NhSeE)A2RuV8oAP$Wbaf$0pD%RHh_WLVE($*q#K zQc`7&KH_lV0$lD!p>B3QTq!u|%TnRXNUc1h3Rmn9&xWyixN2PR+AbG8vR9 za|Xa60M6UyT-^?@W=Nr^tN}y=6jMIu4ApZ0;Ihwd&(=9=TYoQb6aZL|U8F_IXQ_Y% z0aYh5)Yo~vg+XzXc~W3+lQ!5w*{JZRYBz_}Z{cKMd}37XvkcgnVXZ>Wqf8&7Y%jm{ zrgZal*O3F2`GyD!9dfMe^(d-FfeNYLwFmWqI6s}I>QB%&<}3kV!D0bm=Q>yDTn&Gn zn(9x$5K7dta(6K80yftOKmCzUel&gMt9PZ>y!vLs5MPt_?Gj0n3OquLM@W4j7mC^n z&q1I>TvR`z5ejFkMi1a4pkxKGL(BqZi72rG1Qt*qMnylQkj?zwYIFCY0RzZ3E5Kj< z;I9;re5N+GqlR9GmWW1AmD;Iv^}ea}2ArE-edis(UK~qMY$__jGfy5(k3RTt z+CRTL?cX;`INY%`F-c!QIXIAW-WmWi7w8cHnD^9=0WgmN0I=ZNfK=iCP#EYc0C-@G?I%zUD-SThSm4b}Qz6Tx z!gQEr<*Qa?1eH71Duw=Sq4t!=&|v;tVcshMx4~t4<_7(OP{2*)^X1yY%c~YE`jxJq z1b{UryziXPw?ccgBLsk*EIbbt#{$h!1pufJg1-d-YY2I0X%>t*L~B?}aM$yk#<}`t zgkksi6C4(3)b@Gk$UxTP0_&gbPa;~F36-Hn4rJ(N_@&cbG z7x3!p2Ir?x$S7E+XVdKbe44?bV+@F2CH05${CZ`OYFQ7CD@P7HZ0T7usc5Q0PsDXq zD1nQDNoCS^@mX1$i*ei6O!_Cs1+N_uA;M018auj{P(Eili&^eW6JkQ;)HD zsZDh4sAwyKzVs!oYpYy84g;}=bv_DDuD+lqcIC!(Vx4)SuKA%jU8ySqd>MQW0o?)s z%crJJ^qs90l=Fmw9#AK|irT#mN1nYCT(ffw+EMEJ9vykU4$F1mD~}x)Cvi1$S^xC` z@MYaRS5)CX_RM1X^zq33TOiy{h+Dv{POM;JPmSk{rhvcQeRypw?iSl%INVC|c6lE@ zb{8(E^xWoZ`oX<7rhoVHf5I5Hcjedoed<%6NqJ7xgf3w3wC_VK#+P1~<~ zmAyW$2;DU-u13`UGRv%Lm22^c;F*mvuq!^T>iB7%}%9% z!L`;8zy5XuVBJ^rRlt`^1NF<%uYnh?2KoT_g{#|_!k+s8_@&T_{d2fdYv76mz$M&l zsr25Pk4t$Lep>_J9{Ag>y9@N(u)k$Xm|ur+r`N>O1$6q{KqcRt%CLv=OM;fIrS-Ia zX3hEFs^nR#pdxBx&>zA;HG@HEtd`Q^!%wB7pSc^=?a@@{M@uuK=`f+6uOeq*g)oe3 zLxexZNZG{TH3HO~#0aaLfNLmd9=-p`w6=oMXy3tf=-@mCJVHy$<|yzh!?hHwQme@j zsPK2Fab#fISR-dEWtx~7w-Id}PwDo!LLj!%JgTrm9BXi6E8X*fzf1Rj_)}@%G%C6= zV1bOD{7#_~Z43i^G%*O*Fciy3K8(Ru0N83332Ou?6v`ENqi~*vzXEN7P4a|QjyB+N zQ>se=Ho%WylYu?V8QNram4Q`h zLvGBn+A>h28_`D0VzkspCAX@9rHl+=pO5a~a%LrH*}%nhfYaQw0tiN535XWo1w0Ei zoZe=mN7;WdVw6rMP^W)_$1bC(ijQGiLotx}IriB*){PQ%jBLUTa2b8v278~5ey3wZ zk-T>sYbElE?ImQFQq`fLd96M!K<5yx032KJ1%PQIJ!F_~E3aQJqdJAoKH5l1a4F(| zzsjYF!CZY-&kmE#zK3Lg&voLHJ&%ob`#kd)&D%(6TmZ4o-G-(N_+{8ukv7QxTLTgb zA_@K}^qUmy3OgLCp#W4;Iy+CY{t2o{-R8QF+=S%@B|;3|LXCdhgt&x#3WdE9soK&&*lr6R zW3C^A=TNZD&auTfmmp#YMtC zA5O1-<;&9n07wlra0@6qM28#)c8y3~CV1sVWA3%~&|4QvEs4mbYNE`r6 z3OT`V)w7g;)vA=CKvTw)RJD_P=F=iMSU>aGPo#(Lzc;=1jyuw8Uw(TU+rv80It!p! zCEuh}3o5%pc`H{K6SySn0w04oRPfN($_y=K9MsVc6BXd9{AE-}>znjZ)RtA8C=_DY ze`;?Nn?WM+0)clOOivPN;sYQ2aC+#GX9)qj7uZLvQ>j69PMoj6;1JOsh5*?DOl~KN zgJ8~Un#xdet4c38s2sn_HL5MC3yrTvRsg$?Qbs{Q0UYH&bvWbf5TY@pwiL{j%GXL? zsRAXS}C9uLj2NYIbVD~45)^%6rn2q%`P3Y%0yyB+x<~O`L9Xha| zb*~1;lS4o)J$&y&>9GeNN!K1Zl=khNOEUnZ2_j&O5;HKX^u};`<*?PqFXX;(FRUcOYGV?QLoIoXaLQ%nnRR+K~XPwu$WiK39aox@Z>1n zlQ!2XgtuNuizm+5(W}Z@&*X**csaERL*CgSM?1$Lk}*b76pggI);G|X!Tm5$a16!o z7}qId)69bt^!0J-gb>h!^yPs`t?|KU!(oKdXxuYFdt4aCW``)vIf~*SWi$QKZJz(h zU#VgUO!1YsnClURhOXntpnSm!2i#E(Vd`N}(qzqtnyozc1TJ$JdO2%!Lqq4a!QLye zwt6j+lZE=8YF|Y&^wgXIAmlUU{&f@+_G$I6rK!wkytnfOYVsC!M$h5IfrE`z9)y6# z*=bY#Ue_{&ryK?xC@{C8N`%r?b7?4KZm57` z499zqe_x!$x-X{{U0X>-8*`%IX(z33Y#RP9phRC=XMR-g7@tlPv%p_+{z?HnLQ0T| zLw-}wir}E@L#@Rg@jNt#(q6{H_HfZ+5p}M4&ogaz9ga24ZJRG^^ukU>UTd|k>9rh# zbiozQP}+;u5IKHG`Rg#=T1%|}X6{%2XFiPMWXBH8FLG3*p@spzBV0EQGaf61=hnQV z)FEz8U4}4MIE4Ungu;ZzdMO7X9>8Bc2r$6drNtG2dYxmUpdLY)J}^u>9AS2Q*(l=x zhZ@Ge^8X5u$2C!o&=4);^FQx}N%#B9_eCE7U%qW~C6#QAG5des{jBqj$8~R9`;^k|yr`tU;V*RxO$)V-J*xH~KK+{2x7f3Wl3?n-LtZ(&*A|Ni$U z4SU^bGiI+;jtG7#9zPuNZP%3m)GllF5t_har))x*T4%` z1APGe!qx3dVb6U4{8DJe{yAK!HSoLuaQ9B-0%w15Woj?g_~ulfJ?pnEGw0purM|yd zNiL=EE&JXX7D@ZudED~8_w!!Q+Fp?T_c`uP58UnZ5cqNl0$A|3bAP2JdfAd8dyuuf ztN6A+#!}V*zhU6g_A)lgC@6rnv9y-fjxD7vLObJHoGNpZsSb2#DD)`%o#wamSX74> zYw7I$Po+n{a9?`tiKo*k)HpZ1{#EI^SKgimFvzY$!8WmKwUob){Y(<_PPaEpXBz2& z`yNj#t6S;7{zK^y#_aL5o~BUi3;~)A=>#^U3Y6^-!&rlSd>iWLt)EKp<@J=0_ z8X>PEz;2w}e;B`~QQz#R{IeWnxm^Y#3}~RPCS? z8^T~df{|P*V0A<1sN}@MBki!-hkmdH1vQB0}x1lVS(NEQ$ zUBGGp76wllVEwItLjkj;PpK|?x=J^XpO2Gat2>{`QU*R@?kL6#y|C@t?Iwe0K9A^N z-mWF3OcWuE!TyU9>#sL-Sr>llbE{3W5!Aoko`1TA8bjfp&!b?W22$R_tl(0CikC14+EE^Fmfo&2*vBr&jJJjP8!!9+mRPopINOcgSFtW z@^pIlF-N#>bd6V0rx^oBQ*rX8Jz{E71?yR%!6J-m0lek6(Y6uF(vUtIyj|bfpe{vb zRpH^Ku2N?jLi4Np4YmBJ=!+tN=sR0up*F#)TSmRXvpAQTR?8P3P zUMfHVV29q8V$oyJG-dk{C*A`NRBSoP2pYy~0k2R7XGahERb4OugGo(FE^=K)USQfx zdBMr!D<~F#KFX;%PIFUR`Oi^&N?-S;?|4nR?fUDObD7V8VI5SZ69BD|k(d{QRtQu? z6oF9xMo4JoDc77OMYSQhwoyM7H1T%fNMc2>fErOA2CVv%YLPl)UK}Nq?$YW~`Y(U; zv2^O$#dP~Ex20FV{$&Yf2f#Uv7;W9^LbnSl;7*JEDBi5vfO#2Rp^yFlv-c)Ywx4CW z=T}vG@0!OmpPUSk8Nv`G3RQ$g*w4M%(Mq%jI?}vv4Fi#Dz$6x|L~3P z_x<1JeV*5n>3}GJzyii#7_K3qilT$Sz9lg!sXZ)~2qFLwj@+b6YUDo2KrhrQ>0^>8 ze((SH52-q!fT6xotrJC>RRFMA_X5Vn5v~HjV1u;&Y&wF7{)=Nhj`+vCHp^J^1NN^z zM|?EBbyQT}|NVbwhHj)Aq@=rRke7m_(#_D)4blUs2+}RBlt_2O(A_mONH@~WkN0=| zKL6jl?pkNvwa(f5?B{-XkbD(T!G_ukpem6a0#s^GpNz^~;ZWn@cH9R9=|wv6cQ1d3 zck4fCBTW3!`lu%AhTedl*Ytv-9gkYh*SpzHrT*qb_g2Kj&A{i;K_pKWWbCndTlz=M znuT@3TE^Q;{C0KReUNLmAmr}q8x0pwZ*rl18}esHX01Xs15Sy)he8GRjrBWv(bOns z2a8KD6|~m+mnkUG6cZ(NtxlojSJ<9w&QOwg<;xk9zTr2Sr}c>b>qyF|M60=wi8K)U z98h#rF0D1=!N)6t(&6xO6x*MKw7Q4DlOcBuLR`#8-4X~TO-*0CCAjfnVWcn9ZQ6** z=iIpLwi0-2dT21CCuYVbVn;DU;oC#nb)W{tL0xR0F*3;f0RN&pRV~tzXXm+D1OHVO z3Z^bQ$AXRBvnmXW^zpyipHyF=fl3$WpK#Rn?pO^$YlF`=m=ry^#F2r-%52 z^@qhFKG%)Ik4sLcVl(;^$jqr%HwmRIi`9E97lga-bkLt?|(@)nI(8Wi*( zW<(?&1B2PK_%-OTDiI>@=+}XEBgJ;K zj%+S7iziWBH-*H4BgoZw3q0BzkJhOX>M42&!?7>bR34^;a*rK9G%Ydxu;1J}O5`dX zQ}w=ju^HX+#g6uAGdxLH>`5%=X86c^77gyDX5uH7yg_%Ke#da=Ft9Gi5n}+KFRvb! zkM60$&)RIxj6nWkB|IR4AW6ZGJx=#M zG-Uye(pkor6?J*NHM0~mjdf}IbsGF*D`i}J`^SF9`eqpnh?s}xrkKm3CGa8XwW58H zFmRuUEK)Pdmujfd2-_lHE2? zHAf!QiNEJ5uoyyb2NII!Rs|(oT zheSr3r^jlB)ml7Vc2>9OfLnLJ=yWghjfjM^#|=nyUXzT!Q{&V1uKTpgYyp*H8}l>@ zC69aGIG6Izmo`1p6)HY4FwJpNGU16%{4CXN7BUp9Xwj@SXkO-5&G>&E|Ca?o<%e{j znPbCP;AANx33dSivf1I(+pf^d?}w9Ym^+r~qhL6C*C|CD#PPp_Zy)^r{MkoezTMu{ zI1XrI&c@$)q!Inar8N{UR&091rxj`(M5pB{k9CgVLti!b)`8MCtZ94DMm2GtI{v{U zsW92Pe4Wc}FxwI>@cX2&gvwv_H8`84 z3(cTrpOd(AxUb-w@1Pk%7{b&FY_LpW#c{v|g+a%Mnq9aDKe|B>Xbb}1HXiwdVb+4j zxbA#D&hFOQYE^j#E1LmsmV-~Jvb-vQ=pK<+0z~qH9M=4Sb=088u)4xcE4gwzU=^$h zs`c)B-T4~lvP|~xV!&cMr;9{i2+1@GJZIbf6bGeS;dCYcKgQ_(`sgiJHL7b&K}=%gc|(IOdFc*mR)8T$SzKN};g43!Xk=Q+hv0N&&Dc{2Y%w*T<2{ zO}P-unRG*;8gkKB0?c1VvzpRyQna5NXqgmGqn=NM!rLf-w~yYqTV>GIVU<(B`U^<6 z75Rw=7sz9HtHv9&9l$tBy{9t)Dv=fQL&*nhn`9%6F+x^aJBH>D=g{cz2HH{ULc!De^A>piuW)O#YJ5*J9>LX3=468=YYlYpCTXXx_~ zGFL)3idq$DKFxO*E1rf~%)x#F#{6izO`S&rv9Yld$_TKR>@Ky;|lZH1v(G zA->CtCKf6LUMGyhPEwW=kN&mRXCZA_4Dkky&C(sGK`A08#164kqph}pf6fV3!aH}x z$bD!bs~7*!O1D^H9)_sAE!B^Ykv8f&^w`lfjPJuVQGpTO4pUZ%6)}3A153bju1HAC zFS&c@OX(=Gk!)!0>B$Xzd$P zPv{$lG6|!-L1Xk`K{w8?dZScdlCC0YDrS+*5^FGXL{EpW`xO!B#9}-j;?w)^ogXvVLZ;m3K_re|F!cPqPZ`oW4mQyjjLpoFWsgy-Z z!1?)=`k%0{WpCZ9dIw$)y4d@wf$;)=t^@0`-C|^<>(+CKO4HKCjmB(g<*-Z`XkPIB zc$Ms=<%WI9BL+Vk!YLX9|E-l)P1q)Jia#^IQp#1NlW}ngnb!j}8Y_ z6YB0|&o#3gjgUGe|M}mR`>B#^3=N)TKj`m~{(NR=6TfXR8eEzKI{??1=Y!qUlqG~^ zxV@@_J)XX_D~D;zu7`(k?rz_gR2zqu?#57wk0)=xFxLEg{~l37{bkqf3?Byd{^k&@ zB)wFoofWONhKE)glVqT@UQjSmaYv`-=Gt``d8E>bj@RG$`?n_WKB!G-mv<&6Ey1l8 zzn~`F&Tb&|s;`rEl1%#Px2Or&lzDftuFhn&j5=BlY!`~23k?%wjaqosRXB;gTYrkd zDDw$Aauk$#F~y25%n41u&>Y6|LAomx4y?82`hr_8zwd@HlA#r3METndFdHh#HEL(& z7T$T_7n3>izV#4mJXU?UT-!>@Jf1Q(tf^6&?$LYP%q0=h4+T9-P@*| z6Uq+-x6zZlbF5Tzop~_ppx2qR)6)`jb3uME3(2JblHz@hXkVXF&U)96+T#QDDm534 z0oq+m5Dc^~E^qh9DhgczV@P%0ac2-Y=M+2>3&RT5xk79&wyn$(2KirBt(BA&7mv5&R+D1*{hnk{J8Xv`zXInkY z%SGSOhzc*C@t^$YP1gH^29^rZlnGKsR_siD)o6RgEVy%K`FYFk?QsZhQg(1X+KW!K znPGP3D5&cg{0kWUCK8ts3E@Q-f-|GSc|9v=HfH&AOTVUMq*ojHPm8p?Gzvv=_Gc@> zx(C~a3{Uecm`|hi=wN@M;V($Zig_b+eMtQigqAk`(US*=hKdGtlk7RcFw&?1-eNUZ zu&?sYT|f{bv7Ha(PP2Oy9mpc(dhoOQcub-3W^Oibyu~d0SJxs({@PYL#Ja30%9a(W*kvItA-#XBLD5zajGfg0c2sZXhr zB1N|zd24Iq?ibTKc?Ov-D)StnuA#ZjI;fAO43-=*Ubqbg2X$N6hvvTh#y|Uf79Pvr znwjuJBaDOq>Ru5L4EY;jj$%@1a++R(^*gTl+bKRzUKRLewmbLm1zevbX8xz)w|#7^ zaNQIoEs&+ZZJnbN9&m-4&e7(SbxF2_1yoUnUJ@9^b+MrHy0)jeQ`>*4g07R7nIl0Z zR|l*^5xuI&k9Ng~89@mVdo-N2p2k8~4##%$P;)McgT-P>JKJ>?WV`-szjsS(F<y%Am7ADK=K&1 z`&EhBs5tId2Q>aWbxDSDC>8xN=9l+W^=MshGAtb(K_s>z(V}5AQ1leXhTt9k(9b(E z=+(QK$X3e-<0c)0tgxupogDj>2U-YZ~)&q^0?A$;7+r>&+{ku3JwZHS6#pbXs z)q(z`-I<_~*y5}|h{wl(IV{&hb^If{oUAJ@ayx;9CFP$erhw=S0gs)Z8uYz=LnT94 zBD^zQUeXg?TfLl80gn=EfygdDOGr28Owtub%tnM^MGt&);|G*uNOXjNq0G-PN@C)! z`|0=Z{VfehYf4`MZgO`EiUp^kGI9@R#cNYl@@KLjDOH=l4KGkvIv%;EooleqmlkDe zq?-=jLhEsX-fR1|Oh0VD+aY_OQO0ev%(M8cqO*8I$v@XkpDrB%#L&9m!mr;`3BitH z+$L4&RG?U{+4E}7S#3pT)TNeNMC9(1&$1nlZlTP5r;M`9g@z2XFH3fe^>8!UG9KU- zq%B;&|KW0YVcF@a^nU5~uD{OpOc^?6a+_zO=(Xq)>P8kQE9+pQQbu=IL8s*8zi+^! zyGhr!9hh4u?WZ%Zp)s~x3eu_Z8Vfj=87j-wY2BH_!zl6@CBEDE|8?$w-T%|Mvju9F zk^j@VbE$`R|I@iO2xidgcd2R-x~nXtO4c5FFsthoN5h=GL($CL$)g$I?48Z7TsuNto~YF2dVnz6xF5hb(n2>Zk|4MUD7yA$`avus-g_+gb0c)N)#akA&(6V^@ZxS|LU=?R# zdXpN3voJnO3!erOV)iwrPG{4oeU81rM(rvU(BqTZ2;WZ>s8v%%nBEMKjtu8D#K*^3 zCwvx&@@LZRj*<1mXB$yT5i%n!`_>+8xWDy>`%jjZn2hP~62&BE607eiy!+tnAelR+ zAd3^S{BmkvAu(A~ti(%U-)}wL4WX0NK+0mAtKEyWL*Gu$L?R~@yuXbpR33c&ibtA= z$qwNf4a#kE?=k*zNM~7)OT!bct_7=A^C)j(1%ETn+CkSE&iDL_M3<)PZ|i3QMlPPh z0IbPTiT8EH&%c5$9uFUAokWEwm(}sRhAMG&6xOvQIlUwP9z=L@u1i^X;KE5YM|A8H6Yyl0L5I8QUtzoYTgX(#wRHRoiK^unv$wk?V{E$V6;Qi4T z`nLdo#)N{08R+A73F){IZxEwgS6l%}QrhyY25qCOe65mpRpO4|3K=(V0T>R(dLR&BEf z;qLD)2R865G0wq&d@9EVZlkASIU9>IE2yP$qRE(le<-(7!=HTz1j~_jE0Q@#g`+k; zn0e~P$i~Ihm`xYN6UyWynI?Txp_cm)ikD}->>Pp^WeC}#0SoEa#eLH@xVoena}{`* z7j`%4yL?ADr;JJ$NjoMQjvyYDf`|kykG&oVN^g64CBZmW*%J4D$b%phm7ns`?5FuLax;0qGDAxG1Sf~Ny3g3###m6RGdAn zzK}FehinxEnE9-Dc3$jykJhsqz(ls+R;fbdecnNPO3|~R3T^oY0JH&$b^kmeBxh@^~7n!mYk6<2vm>(78GH& zZZ7paPQY=m8#%uGq%EU(OlI{8WVN9!H1=D#2!pfu{EZKq;yyhFvdGo2Qlv60|Dl`q zEuOfh%qja;HWu$V?1{2Dr*nunimx{R4h%l4d$ULL3I$syOO~~OFMUHiH!4G(EX)Kd z9aO`V>x8TDOA0hL8w*CO*J?<~A%qdEN7Ps29Y%}qY=1i+3p6{j4HRV5z7B>@rd|>n zQGQSI(}q(wSaFD>$23&ET-A=U6bjBODBSqUR+IZczxw)tv6la)jct4W+)zI=)5Xzc z?-+)fhuGN}JRPO9MuThd!k3sq2n`K^R619JBEx6+%XD=IZJ)i->G|6jY>>@;A08Q9SU$&(2%GkYvZLh8AG3}9cQ@>KL2{>hY^uuv1YlDC6=BCtGL42& zmAOW6Or!fgTLDIL0SzF)yxE+tVmg}UVf539@i#hN%`%XsiTX^EuCJNbi>LC8TKQU5 z%S7wH+m<~JF|$TxqZPB|4!j)V;99fcjd}n+CnB+j7;=61c#Huwhx@E|$O7 z@sl@ZLgfH9am?MI*W%1csCS@dCXtt%?1JUTI%pGWW+mxI2trLyBj6p`ZCl_?^aPpo z*_%9(7dgB9Cf7eM>E^N?JD(npABKFHji0r!VzupE>LF=;^Z#B;o>fhjw3TqXQXfBB zpkcH^`)luHx^C<*hqQo(bP%EiY|Zu@*U<&6MMXzdW+us)g`{}4&tvyXlg!ni z(cF4MAu9M}0sN$*FAKIh{=f;f@(ol^gW6#g{l_-wW|7TZ>>AOCU~X^ACXZ-_Tq3d! z;&@0ci@dG~?$3QxW9EXi`;VSh`w%!e@}=a`K7U~qcj(_mSN$J^X>Te&Q-yyrBC4{~ z!c%o!D;ebq#}(m_-!{|_Vq*&Zh-=L`tCMx!!Txux&k=;A0v^l@Dyjh7Vw5nm13L~H+L z2Of5LI`ty{35{Cw^Z49KMev>8xlR9Yr+(&6e2->1(d{5z@*XI>iyk&;8z-hI`x03I%&M5=EI@Pd`Ao2!m&{~G7CoEw{e zX%*NPse&&Zj7N+wH@3Uw&uA4d~9U^pB%ZdHYy`i1!;rd0_ljgRy1@PA}> znDnVY3g{gKyW0P}VsiKFY9*_OwRiF)v(icE8qByc zU;p7q==G_8q3%{NgG5jyOx}K_(RkwG8P5Ar7*@^8xQVstvUP@Ap$>xIIK*hoZZv+) zmgElkGK4&BHf?!}Cp=mK>~Yv=Dlb~ExEnRzj5#+d;z*y~d0t@;Y5C`)0)eZ?(h1Uc zbAj(5Es!`H=^bK>PDVK}eA;P`#jLfjs=1IJ6KHKjl-q1xWDr)ls)AJQY}57M*iyF? zZ{JO{OQBEvd9&j<@0pwesRQ#t5XyQJ*s)>8pzZ^~{+F|=~?6~zcxQ#e6 zug#ixT&O_E*n5XpL*G}Y=+<~>rB#-Xw&v+0fWQ(hN(JF`N~#;2&?{%Uqrd-MoF?9; z0jD3bOF88^uI8(ujh*7NupZ3N$2-Y^ghEz91;=uUK_3lt<#jlWN(RIjAEF78W* zJlgQGKCp(yHJTHV)wI2QW%_zsMRnMxO^3*>B~ZaTP#&+)B(%olKov4Ca9N1!gDGiI z9H%JftG1wd%1h*&Q)@h-Yx0%wknfl0m?Q;NVn+l(WRd+L;+vG;vf8JBt?u?V-76$x zm%c7oX|eaV>frl;_zRnDIh&3Lr^%eQ<|r$r^x1c6pq|fnQqiESu4TdC3FIWH2WB0i zfa=Bk=YTT0$3w1G3y(<&i7y0UR) zik-TWD5iON`46Q}*2MzTfNPe0@YVPB;$VMc|JD5SXIz-XNArd6b^&K1fo`|YQ%}Ez zOgv@UuzoLK&DSbQI$49{U4G@3^g6UHCr@mm`HJ^>p6g@qskY7FYKmvK#KIYZWn^H?IIV9=SOjQMPKDd^M zNL|%hE27HF`u;$@cTXXqhMpI>0J3sM3j$XX^Z3mzQMO(Gob$~JMX)yWab>}R9|$dMo5 zmku7E$CMWa$AK(rc8_%|z*g3;WTG7$KyLQW3D^A0slS?~N-O1yBJ_{Ve=ppq4BL z6CgtlAe?AlQ@!3b%}tyo)j-r^{>MKGief*C83IPk3nThE;wlttK`XGUYvptfz0-2~ zLFwgW&JSldQbL?Kmxj6$3OK6sLPV@v;JRRh0mr2CEV?*ReU-+9-=-E%bSwA#- zm^H`9y`l8icb}X(WH+wXM57wf2YJb%6k~iO+<<`ZDb*%7vwgLGj(^>`%j0~5%M$Kf z81*Z+sa~MG2i_x8*~fymY1j%QXh*@basbMv!~iyP;d9KaJFf`#HXsgHe_VL&g@9<{ zzCmIE6?|M5ZuupLrUQibEsbUa$Qk*P)^0+xE*V;0?QRcP%-a?IUlyRPPn9v63SK?* zp_*?qxCIq9Ks}}w>b4FRN8cHwW;L;mPQNKXe&cS3A`T3p#l+vSK=8N=o6>Lp*q5-HUeXG+}1>}DEbd4x4wV3)N655!nE0_l|7V?hmfd5dEb{4~-WPe)OtzH&4 zsq%sTfC&iyWe|s4?J!0c$kX~NU+5}_md-Q2J*nBxxaag38zvX-+S+CVg?j4_w1gYh|Wr#%H z>kneTJ^rw^oRvLsFL|xx*mltNci|bRFWAw#eQr>u1J%B~Q3^Fz8m@)-;ZyBr(*e>` zJSY)f@uSB3$=a-ugzx%ZuG@Nwu6~EIfMVSy<86HYMvx<(cS5GpAxWK9ZDd<6DO_aG zS$`a9<@Y%;dk^0BM=VI6mtdDDk_;?bFQ<}y#}L#wYvNB&Lvg6=i!L!)KGPT==Hr{- z3irP4&WX+sY`FSvrqmg8P;uCSiT+dLd;Hye>UiJHUO;ZjqEjUb{96NxS`=eE-)2kOQ1!BSWrvsHtxv>Rb zGdS}lR-FoZ-bVWM=TgG5dZkr;Gdx^TtWRcdLQKPo?um%1Qt1RTSNiaCB2@MW$Ty17 ziz&`g(T3n9M?OcE5r+BX6YVGK_QErmb5xgydVZ zewK(eGg*?NiwzAIyg{tuWLItIV=o+6Ly)7*9LS*z+9F$|)OO0OOqu_@{M%o_%`Dyx4rr4%DL9G^ z8K2a$ejC#O2zEpx)C5@d2#BF#RoMJ?IWqd<+V_(#D7F%t_Nragi#Jgz%fvEJE9ub) z?=s1(?$Hr~kf>3-s~U6&dU`zhbeq7GJs2O*^Iuw^NTwR)^XT8IpiF2N(L3dr$2n)~ z@8*bluS|LLWlaJd>Et+z;^!G7cg617T22BL>u?U1DzMsai`r5HHgy8tfaXF7-4n7B zJ;88)`D6LBC4>L-#9ilOU}KE(QMq#J4xo*ehsy=a5RxhR+c=#T_9fRrF0$sKwB_^s z^Q7lC16Rkh+-UFd@5TwVGj6wHC1Fj67nR8^oDU)DJ_qOeQ^sc=Spo95cmK?>ES!S3 z)U0Vz)ntd8e%=3bn74mKe0Pc;RoIWbddFq^LCqSpaAo^H1z-9RyBf60G2fi*|BtTm zEKup`H2TW$^4}(+DAQZCYt@+w24kMVL1 zawGNwtA>7_)@v0noEccvjcz*o7HTow^XASJO!Cbm52Y#G5>F? zvU5Z|*DRJR+)`P(Bdj5>TD~F3+5Kw3MXxi6$358xWC$Ym!1%WFfqNh`!04JeY`Y{&odo?=W`<$3L4DNlf1br^^eq+ZU+hW7`feZ~7I1K1R~~@dh>k7JRBv9YM@k9kUZBxKV5lg1 z@mz0@ZqIk0F~zMdyWL*Rua^8CF5(O_XD<5Uix7ksDMAQeUtjyl7)Z@9|0L&W9>ikI z1WngaC;<#KV>!PWQ3D}Prcu>3Ie4Q7#e-XjibiDy7;W--5zmnvQifs$;FbKz@5An+ zqD!2D?z~8M<^66sc{-(Smiz>}s-#l&$17{YoGFlRH*~*bUosNH@JuCUkG++*OR{H% zZb6T;nX~CgK3zn$T|4czqSGpr-6uJztlT=8a8&*RI6$9Y-&6CJH@-!5f`WeOGDnuD z7zvtfLr8@w-$!RNDGX_3-swUkj<>=D zpq#@W0P%`$3624$l7JayQaz9Vvb76>$42tMgn4fcE-6d>F@Ljf_Gc6um`Q){6NUvR z!+~jewW#e^A&heaIOeT+jbk~Eh-1ishFi^xa!NoJqYqrPBGR32sqx~)j_EA#1F%by zu^{_sWY6Gl-{4D*h$sZ_nZq;#FAl}kcO#NxA5tix6tM&CT!%5*g&7E?%lHS}#gvZG+?@L@O0X{GSM% z$1Zek}S7<7gp~(T@&yk9L_^0mrpYa~1y{pDE5v{wr7hS?$xtbC18S*Ne9N z-8hg7x8B}?6)Pnx4m^mn%&X?}^leFb*}rQFt;W8SX-=(k`waBQks{02pL5|AcV(Zt zIVM=8|Alb~zYn+$4HSKNJbnuBxoTR;Xua}n6Dj&-RIwnnh7I}Kwr_N-xN?HKQoi2c zrIw}4^AP%!ind|jes$vB!tKX;sc2-AHFLKTr#PH~w8Yz)Fz#>tR!|Y4rejcjJn0xz zw)HU!LOt2$xysY@S8X8hG@RMrk3=A$tO2w;b>Sb$U^sTUCbdw>RiiH6 zYB1+D1E&n`S872aeC@T(R&E@C?9(d=NB%y`f&|Grfp|Bwp@>1*NC6@0;OW$C`IcU` z2+rIe%EVaKdCTHY`NAhJ4UvRW^kUm8V)|-QqM25-ewwg+>H$}r(*7UK!Ctx}UF}ji z0~f#jT0>la$qIJNVsgYRI`^>;FocD;DFP>Yd~g?HI!*4*AO?_-RJpb(HPzem3SLvy ztRF9Qo9RQBVU%&5ZPE8`?4#iTI5*a1)hl*T74f<*MLLGvrMSevM{DH2Nd_1&xK`x3 zO9Bj;U+hkv+01$4Z60)i-UT!rGz%|?Kv+P(`b;q=(Pwk#2OhiUH;mOpjKjc6) zXJ*eC7aDbo>7NyAQ~2b0J!2)^l-djIUduUIt*kUpi`yD+8o1;)UL7W`M^O*5*zM|4 zHz-lE7Fnru;1J&FL|v0m|8^Z%=ft(rE@wOZmVbvUjrr-tM_dHLI@iCt`qixCZKlzs zpLd{Xa{dCY{AO*C_j0_6r*2aFcehS^y}MUZ2eGPKk6-?2ZpHW)q7q*+xEP|mw4et` z{JnW$KJWNWg1wWFvKZ-HeIl}s_0}ZBC~L4Lk93Z3;+Zs^OOpO_D)tF%F}5fmN5BJy@3igrG3^ zNN?B)d6^b7fEKt4nuCm$xH}b|bG^zZtaKMJRzQ9_f>ME|}TeRNxybU<2=O=dEHGW6Q+(EyemSxX2KlJcy}TB{ha z4`FbafPu8$*(|+?%uwioW^ZwjrTZXSEFHNgU4sG0eboX42#73pC6KM_h%5Px+4DHC!%@fqTa(nj%Q&uW6}Pf+U!Is0ga9%X_+?_8@ffzt2?eZrGnC@Q+f@5CQG z15y$)LYY0I>6)d}E`w)pp(ygOtlj;g>t`xC(JF`l1;`)XO>;(qt-2+HD_V43)W|cX zi8A$XXAXqVBym@?BvxrwhMlvCNriq5O$apZD@V`-v&Fm!Wo~2H;%m!Tp@9$Le1NLH zL*anI4M&6yF@wh`g%jx+15zv^2!q{n_kY^OT#-T5>=I_6y3uxT{smPyHpO+Cx{jrV z^vwiq#kh=$wc5yVh-@GTdRkc{O51ZW<$mT9)-N!7Ho;-1va`Y@x?|%Rsy}gDZN)vf z6>kkTfrtZom5z>L;IAH6<>37OM~7k?dxDs^tK?$VvE5`aM*`#?(*}YBiA%KnVg&?D zUTsV;V|1PcOHr|q9Z;Vv0Ir1Ug~G}y-j3&V5ln?0sOx2PW3 z*z5A>LlHK>zSHuUlnnKeI}i2YtI*rW6+OOB72N=}6c+C;aY((K$No&pu}+`|$KPK% z7Ze(;+aC_!Jadt8#Ewe^3m*15d!IWuu->joZO5%KAkPy)MEKDoUJ7YL+qpWx>DThM*)Kig?7C1R#354X5azPh_j z_osI2G40$GqYtE;WDjYOcHy8u=*3d|=h|}bFUFb{K~dP$3{}4h^t{{W>AUuO##3cV z-;PH+_TM$dSG#fW+8e}eyK+#~`z;T=TrNyAzV0B)|9?fHE9WXJ7N}ne_AVMm{YOrF zzgIr!{6$r|9d-(ETb~GghQ=o?U-yUzlTYgW%~ab;?Zi^Jz3!FC%zQYo&hokqzWh^m z)3q=u-$SQ(M@;XX$93u*{S^7a;UQlMaojd5{xn>-!WkHUiI}&msJm77b5qMdoZqfC z$X){L)c#MECpxSQw)i|d`-@4obhFPJdnV~lioW6Uf#Ca#E|X-RKBvV0mIDs*gnh}L z9y<(%m1h6;JB9FgJl{&k4Q9y}f|V21llMQHzh@rl6$_}}WLeW)3#9N{HRY7~uXe_5 z(1VH$gBNY=bJgFJzGDO9k^NWOtEU~$-osA~n^Twmqy8YHUAh-kiukD<3OO%l(Ff(C z#DB)qY&3?AenAS;QGCP?dkr;D4G27=66E2_7M>KSr=5KjHW9cA^n8OhfI z4vaRe(ZN&qSN?0B?jS0=nZ3Y~?Q^>`ci!Fh$)Q$^J+xB3U2BJ^4oD^xJZK|H3_!jE$a8nk>{^p%YRcVajjglMm2pgo+dD@Y#;&3uUzI*P5u2ik9+Iyss<5;}f7+(>e8FueCZ}fBI z4BAbS`6#Y;oh@oXes`1ar!QIH@?UdhmC)0gQ#hn&Xm$&i@{Ow=ztZ&V&>?9RcXGe- z+UdMwQwQ4=r+Zppy(l3wHZsV^@%o==+rJm?!oohD{A+) z)c*i)86j-I1%xG#!xZf(2@J3KeiaLQ(+ZmXWhlg3pyAb8N6Q{mhV}diiUzcCadvv; zl+5Q|Od7sS?!1DW&t4^ceFL6@zK6G>1!nLhr~fSPIENsW2;j1254f<7e?MfKlk$#q z1e0S>mkCm483+pHg-h`B=^BQ#`q*L715OCm1z5-#d;E_N=eeUtM`MHlrtFd9UuYyiRiFI`y^^0CQ}tRhqs6Yz(KH_=*9@scDUf{(MMt(*&- zfIc)y$E5| zPp=|2@U4zEvgsE_sh!(}Dnj#U&bnOe^k|?MwAp;MUkv-oe?3EL-|Qe2lsw0rks20qWv+ARGsx#RF4;~&k#e0(jg+!-JK$#gh)uk(B0hw zNJxWpOG*#jFo1MPcXuP*Tt44*?+^EHIP0u)_TJC?ys&E$A;R0y=Z8@&2&W&aIiphn z4}0sBUnSn|g5W6YMP^zxZ)fIX)X5FDoM3chq{tTH-k0Z#FE0a>&p*u3>aML8+t$lz zBzVM&-sQ1d|3T{R*W0^a+$BCcww^xE*M5F?8!wi%H{lmg)j0VlP4{f2;ITnLpCCOU z?UU`ovK6;u68TZGEV=e8OVEvzGTNA4IHd7Idcy}PYyGa*Z~GuNF>#{KEM)v@fDOvB?0l(MEIcNTYt)yDl`Q`OuBsD(spvh5k%&Zo}?H z4m}8I!(5!Fi`PUPz9+a=c98AXC3i_5RC%(+m&_vbPDrwlXdI35J&xt(W0f%~DOpGI1=onmLXO!lcFlcM?LW!sIC-GMAX z8f&K;drm$uyE>c|h_;Lh?WIG+V$9h#1;xbyZH5_?M zN7zzstEk2Ob>iP3O!L`qu3daoL|g5FB(VNgv5%kLilO1FHnBID0Kj%(Xf)r_N4TBP zQl#@7j>uKX{QHHVP|uCSA;kmY$$b@FGHAfF{GYI|dSE?v@1s)lC?CY;{4AegOc*ap zzE@sLYObKPU5#^iHTT;E;JfxV@yN!s0MSlRx?`Kt)0A-kjKvKb!lx z<}Jxnvdzr743V2{aG=>v()ne|zX|f@?jEJgZ$gc&-oY_dSnJNot=OJWHO4QFQb144 zHf02D8<)8jF`Rt}_SST)a~^v&S(M=xDu->(r;Qmd@BD&RAsfJ(@O!k&A zV{T>mruvisj8Yvrhstm8foCC!aK?_fbjh&7#Rw5p?exVSb1Pa@!jcPm&RI^bXj14? zM`oFo{AQsCd_~2@h~@g12g~ctB}SnitT}yR%<(U~9P*8;>@=|})9R?Ob$R~9`p!!1 zH>sQI|HlI8*)KQ^L(?*na?vA#vWYZ;@H+B?HwPPT3hRjcfD1O$m#)YXIhcz)z& z0Nc)Hh>at@`)55X`_V4&6@I!yttZh@14MY~kBGJ3KBcJAZjGOwM$QFC{{;`DU0Hzw zGR`N(TUq?~^Zxy6Z+6ajD%aa;*OgcKol1$=n!}50IFCk3pKUKG099AtuA#sHNPUy)|vY%Dus zKTudVw#XG);<|2zc$)SWfN5c?2{yVF-dGINn$DYs(=$I zB-KlE$^T!v#46{y1qzdHL*CbA+MELds_p7_mixdKQzgWNcA!KwSv(8@*x?bm0^RBu z|9Xu6CfP0Ncfgh6S)FWQH7#VEMM5Y0-OgOU;TL5;_7PTd@Fd-6qRthMAz-CHuO1r* zk~f=X6$|_$h+lE$wa{j2<$qq8^i=z1w)KMK%hIbC%)7{Hu%dt|BhmTJ?gcMK&%`ztu8n<~kVu&>t;)7JB6&v~%}3cQ2Ow zw7RRSOfqq|E@x z*#Dt)78d^{&}z8PY3#TOj73|n)rcYBYUa!mwRK)PffLtIxKVv4x=uj~MR(OOM9{meO^#;DDL zKN6~4jm&170{v{6qQ2u1m{HyR?W#$5imRj9$@pG4$6q870-9rA>S(2=Cmo|9j8iqt zbn9~tTcVV~e-0a50)zQcx!*CT@kq zJIOtD`c{(PgS&ya@6N)d8&=WaGozYgrk25>Uus({nLjt&72p+9@gB5$tMw3Mzf`d!vOIE?oqOQM!?8mF}aAu#%M6iYwWyPq_&c zhBKjv(Q2r)2@g7~3T; z93TRc<&Xe*ilwdMfnr~T-FYSW&{M3)UYh>X<=$Vtt?D2W890^=D=+=o>8!lCI3_9Q zU9c|bwL=SW4I}4YNaH(DWQ`r5aY1&)d(H5z3$Fe&M1+pJ(yO7x%LU~U*Lk&zn7w-s z;XCsIndU}M`Le%4W)=|R7>B*U0WT$S32Z*D!Wn`-AN;wU4C~Rq2gVi)W=;y8-xx)s zooXg%{poOqJgfakcj620ADiKeNkn@2btD8-DmnMvI1CbqoXc|g9wUk2&+ao~XuGeN zG{n{LzY7l`$ktHWGAfxGxkwz0Ma*!nHWfpwD9W@S5eHC;YhJJ`0uU_{#@BZ2N;FM2 z*d@{)JTxmqwD9s&BQ=HJ=(HXK&2<>>@OK#_)Pnj4)-)XeqY06mdnYaiH%l1xM~qk5 zP%|0NR%_d|8s{n(rHa9!duuE=$xQvCe-wBFkwc zpT$gJ6nhray!fmwv`EcK5?``$ZK@ea%)|`?rzz&XAOYZ4`4|CMCs35xca&`e=NP3oIg+}9~9dWBDkXPYUXA|tVmrh>U=WXP3NCXz#;87UL*FKUcGxxYgo5~>)Vbzj!lUw)rY9*IbNQR-4d zavkP_iJ?AOs5$)*Ra_$)viZJkQXiSTEhcp`37z-_?P9A9X%ifMUfuY)hZvpZw56Se z5!U$)Z#A+tGulevN*F$S-^*V&1lx8=gWH{Z&`0UV>ZZjC_UkYG6DzD(@mh+?kf2z| zdfLm1f?{70-M-uXAa0+l$buuc;JhPMdk3KyQ&NW`#sW1OhWu@=7ur}qXrc=+k z(mWzGuZP?I7YA<<^31~mPNICxlX6~owIbw9SD-=lNe(ffJzl}uK~MOHZ9~MclY>Ts z@0^AD15H`JZDNi$X|J6h@~JAt+v@SkUHrD-ki&fFJ;0yXHbi2c>vq> zc!0=rJ9c6(>v=5;DHZ*K{M9UdA+KU%c8lW?q@ zA<=bWi9Akt*x?D;%Ip1C>eY8^!5fu6$@4fgT%PB&vFWQ~TR-+LAx{Ok3K-ey_`q48 z3&3HY&k%WUrxsPm!uZjdiVGLL3?=2->@7zy;jv?tL#0^#%5r-M>u`7#UQ&rMB?>!c zuk$ftAAMquOIwq@@q)+ib1TP8Y|K>%;VFw|TdMC*oW?#q(&G4P+vCNLr17fjuRz<% zySeknj)8jkWLX8`D^nhheysUYpLkh^hw^WKq|{JeEshU)YkjqC;Sspy?S7z?$rkAc zbP!s3A;{rq2u@(LT~ceq))2LiOR0dpD+I}*ThS_ySB~s1_YlC8Z-BML>|?^m1Z23p zP)v1x1dZmphi|;sp&6@t;Eh>LD=O38+($&kBIc$Qx6RfW`bK8dLdp6nWZNjGYv93R zWPqP%iqyYDdtv>b>nktm zv?SL&iaC;_CtQtjIvL;gWF>-6-O}X>;vLNO>7Y+$I?G_7S%!;$tyzWeQDa%Ny(Ekm zw&?`^fJw1m*l7lQCjoveHsoi#7vuG7q|U#0YJYiejo6b16l64E@reA$he2yrSJ?x7 z2F0M!5Vx*%nrk~ybR>NUW(`T(3lqow)75FA76EP>l-zaE-j+KuJI@Fg z%8ow<4SKeTCiM{DbBl1X1fkCfa=bBk&P*&li#S|{!?PX1Z{w;@=AEb1WRU)1YiT9^ z!&pU>ZOltQ4r3UvMZt$U^S566y#4(4VO2&o%aRm7mq?mn^`vP!#|6318og8(PsRD` z%imh8Iui5M)gZ++mRCmUXO7%K0axU^COxQnM~h6Rr|wS0?^3gb>+7NZ-fpB!6>w_CVXD|0G)J^+otrHi#aSw^zZuLN;WRNHC27 z$WQMYRwRpc9J0C!pTQ7Kjhd@I{1`z3xF53|j@ykCH;eG#zjwsz3P}Lda`>jPg#r0Q zLFyzzXTp^P$bgHka3Vf0TPTU!7diM}plt2$nEj-=nihT*x+V>{GGPZT)q#AQ{_}4v z5Ol5?_alfB=Pw2^retj&;hiwo_pTp5BApbi57{W~Fm?|vFotNRmA2^=az6`>Hu(w> zad|iwH5~Ez8yMW9ma=QF+-Bc0V}kdH1^XHmGL0gUjeVk*{QR1Bs-N@zy}QIsFwm&; zm6z9}hFhUOyhAOUX6v__3uZ;;az-b2^~LnT!N}s(6?PEJ@L2V)`Q`J+|J_zlLJfcE zO1uO_SKg9+9wiQk7$63-TP0ixVHEaGTohFoN&0*LXA^O$44Jvd0LVAc0AlShA15GH z4_h9>pRG#ewP-+M{^qlg042i}eq|0A_2dx+YC+Nb&+X9d4Lm6@7By9a0D{oGV%0qP zhL>dH^X1qtcocOrj8z*}>ND4JR$LK2wmdeq^Ay#VyHx40Z)1NZL zaj6b60}Sy=el{f%uyarg*>6DhTctdvY5py-kS!fe(t!v)vc_ilw=U;R7)`8>pB)Xd zs`OumUk0mztDwwI!W*#@4`va;lE{xn(p)BKN_H_@+VI7eb}LS(o2Y1rYH)(N4qGPA zde}b!wsM|ApCzXd>;f^5j{3sDxYL!VyCE~tpKV7AZ7grZd>18{7dlpsBua%qtwiW? zEZ3e>C3+_8SDK@q(<-x_MD3HK4@0AQE4LXdbN?s@K)8>=d@UU-?Hg}SWLM5bR-80V zydDqqPc}eaet)2uh;jDiTEp`jZ2vP8I!sBkx~Zf8FI5+nKwY zvl1Dw{O0j5t5h?xtIB_HIo&u}5(uzs*8#k$GqHD?2Wsg<@S?nD-Po0|^K=Y16Rq3kdaRu7=X#}Ik@Kbw1=SzbvrUXf?O!4$${yGr8XHXmSNLfL( z8O+a0+BoKFg+SSGNs)a@9Yd9Pi(c2 zRs2Hi&%em*wf}@t(;t^?yVmK@IY$hQJa=9<>Uk$Kq6?2Zvk%Raxa?uQbl3T)*ujDdUs z7Q&Imn;OvCU4Sv0%gg=m;LLM@g!N0|K3sB3bQOCfD{ zLvqNa=;Ie!$?rIN*tKOBN&lmu*>he_ioJ~qkO{8~r+0bc>@J{$e+6J9a4?jlq;UxR zCf5ej*GL5uT?Q`6o@ov>=h>;MiD$Rw<$z)_gS8|56o6UQdvc{$u4S83k#PUr5@5;c z?RknFfn$eh4~eX$b9u+t>UbV~k;HR>O`c#4Qxc0M3=i|!n~|R^Le<6D2~9osIon-v zZHV^ll6vwCfT8s(pb~8q%*D0(;|@fblG;FrxB#AlVO?mMb~SxJ79_S>YhjuI!(zQ# z=E~HAzr195Hh?}-YxDWH_hq7aSo>=G1x@B{CuEC1^)6l5i0;+nEK?`s41-C(#AgV5 zZ^8nFXh$M39!d)(EQL%@ohsE0-K5F{hM_p9B|cj95WQK^^^DETzub(@#B#{s*81xUrG~n^gg?6d~xVZ?v36|oGekFZkOaiT%Ipr8m&>-;*8RiKS z|7j=T8{pa2awO| z!Vf-5xjiP!MESb>I@@T^46)6m?80)dk-SpocoB#onJIlG5Zdj!0@#=S&?bgYu6U95 zNdD&MCWcFYmwc6nmttM(6*6C|O}Px!n8WyqF@PwXGQB+=zYfCH0IA1+23ECv^oEWF zE`J8({kf3d`z7c4>Xax3pO+ng^|m;HasgbiWsDSwH;Qs+x1{7(E%4=cIHd;ybnLEt z2p@)SdZqX8Z2<_97mvRBzTqL_NYUZpD2xh82Z>8~gpVvMp0p-6r=x{7whpxVQwMJ8 zcZV6s=do|>sj+%Alt&FSE8HXK546&!qfCZp*$(I1J{~8OIz;;l8skl?U1Rw#WB`9) z-fB@!`bvz^xzUBMK>6>%P!}Ii@2B1_q~-5rZ1!#{pI>{=^b^N?N)z)@6iDuN)&G6a zB3i+7@ZQIwHj6hBiikjwE=OJqltPw;NOnz6^8U5bdM!%D{8~UtBdFt^-UVpoGIwJ* zNB#Ng5-ha?HXAa#Cq=4Su-z?;S>911v$xpMPw_l`L&|fTLb|ckV+$t~7OQxCiC0Zw zIEx8RxiYeeMyyq>SO~xU+Z)k8bnF&;uGej)KC6O{s3B+cT;npaA-Zxkgv+^CDeCpg z;qm-~Oxk8+b?`8R@!hSwy*?QFwJuv^#^m7JpQod^Bkru{&;YpY8->=8U`Y;f8|B#m zk5{&b$O9A>gSNR}FUnl_{bbJmh*`YklUs)8&2;7n`8o8zJfq#3v3;oIvk7+H6pw@{ z-QFW0?X0i>$5-cXI#Z9d&jo0%fJ!2}e9 z+AFEP550a5Rbg)La9s+2_V)HpR6O5R(3Q87>B$;C|0nQ{lDcJ!Ue55n$aY)?vtBO;}+1S)QTBTiGtEx^kZdn+x$4DzTTwA-~pG! zCid-=+m%HV-M&q=lmL2X4|M5hr?9p8ruqqfXDv zi7?AGRPkObWPZ(4<4U9_mnv=|qVyt`D7mESu*V(M>(ti$@+GU`n2gN^Rv8Eh8rOiap(QWW`rbF zY)cy~v!J(7rx&!P{2hdZYQRocor-=Nh>ba|$A22bR8u{6^mC(y3uZvaen?F|dSk!! z@ndi$BQVR5c0qDBb0Jqq4qIw{gFqcO+ZS739=t&z4&{hR=`%&xn+%)BBKayJC}2h{ z&XL0W4#hRhK9c;RYToN>X(NLOEK|gRF)~XyjgZbvWv7L)Oy?WW2KIJ-R1*NJZxK@y z!3Y>FN{IC|dlE59qT<+)Lpi@0&>2J5eYJav+c87xB_a-K{dHA)>U(8w12-1)%2o|s z#oc?5tV#{!7M~p2-Pcfe+#IyHqc=00N)zqQ8HRyK&QXMP^ML}HMy_DDul|1MaNO8^7qhh=Y@XaQ63r)(r_Su za$+?=q7d-tI*%W@Y%cqs?WV4JDpvUrq1Tj_H;P%`Y- z^ifqdm}YH(@Am(_nqjN}dJ%xC&8m#Exvl6&6*XppvbTex2Ms)c>*k{bC~k zYS(fLmb>djNFf)aqg~Ag!LT=1uqF{cZaxkE6rs)zMB8UaNL_v*v?EM(8hwKZtZD5j z>~2%Rj_x?_-Pa>L3Yj{ylVjgX5rfdoppt%Fig|XhF0hO7v-;%rpOTmlfIWgVQ!j4% zAHnWMSZnL{ULh+x);6C)i@Xwapgl*F7WuGgy3mBf->z{_bMO7z+un;~w?~CkyyO=3 zgRgYw;#MdEUI%xY6Zes7{YV&;hsBIBzx9~~{NZY!y1&ujU?4V7Z9~{IQ-m;islAb( z7_%*GeT5YifhS8{5R7NHw*@^sIVl0vn_U;biRZ+>`69Y;mnTC!FNVv^8n3jv$n&zK zEYs(JWiJMw#}f_zHAI^Wmw~eox?iivz^r!lLRSV>QsyUn=lN*$Tkc(p2% zX+(Sih7OD7w*&sQ>$}OF7d8MKe9wiL?80bio@TR^tzHZPzyOU!DK|l z&S(X1ydC?~ImZj=4{(dt8~eamLETJ(_4*zu>YJrmq8tf{bcD!2i5@8)&t`r1*)slL z94=oEP0G_I9LyIe5u|$c(PI+3;vl2|)L3W2pNg!bvb8*XsXR-esfP9*W zzA|4a@G5;lqkbypr6BS+#x|XBFI1Q|akD5X4(vfqTvLNa%zphdKwL~S zf_C)i;4x+v@IWFqM1pV%;qNj<0Sat*%$hbs%b8q0Wh}N9(@DG`BV{`eN2hv4#;@%4 zd(UR)p~p&`{ThumEDLfnF7vBgNssrL1~=kbuia4)gXm>q&5E62#9pcD@epC1xsWov zCwX>i77~F@y91B_L|#?bU{q!Y<$zKPol?d=M;{9gmjPeZTEFteYZl)aIgV|rh`+8yod*=8drE+%sHkl>uuF2<3ve7@_B zN6s=Tl=;rUV|@5S=ph1-}ISC@5vu}Q?~i{UKe!relnhWvr{`f z|3btb)-F;U%Cy&b~DaiK~|EPA3$X0H8E-IsahFmSJ zt%u+zirJ@j&sTOrZ2$80_*T4~?sjFwQm;-{{E8&&y>ZNkTEAq8K7JqZD#mGhNmApT zO@Q zOnm%eg0l~~|G$dSD!c!Ee4AkZ-^cewx9a_?ML`&%WBK!%01+^QP(fMKh0D@j-ePwy zY-msO;rY1?zxUKPAgshO!X~CY+`HT{bPc!GxgtXa^iiXDI2%T80RV>AwlP1yz>%+E zWP)f(rJ)h|G04TkSlwqgznt!#VqN}%-=?Mte{w-A5k&Nmqsx0LX6E0;3 zik}#Y?foLtELRE%I-%|obE5~IADO#!ICG;o;Fm=(A5(#LGLsr+CY(Y?>JhfWYP=o! zhr`%FM`s{zv=t<0TAl8;&y&E6Ph}a;v2L4uRy$bvc#N`_}1h2!OK3hE(b8~FNP;BzCA?Z8a zRR^ZsgT_n=5yW?{f~GNSLW;s{n8emzCMdyTHvEzs(LIv3qO@Hj4(5%vqT}5o&~?{u zHUPc?J15c1jCqq0hB*o5sB!?nOIp4py;d&@trrY;l=3#XlW6VbH}^2c+52}$?YQLX znQr#L(SXq6@;9_!t|RfF4zm$K2opdlaeaRs-VqE|rVy9BNgfR0P|WC>&X$IiBb4Cg z%3?waYe%w)^Ib$W19Rgs?%(#3nfFfHi)J%iQ3=&xOzT@0Ou@+(fYG zQB;^7wmf{tqVGmlsaE5j%C;aW1U}g8I!UU6oEPlSgAx}G>94fEncC~1r2bvuRngu( z*HU_<0;bmq2PjIxlP>HWSc#^P#SJDLWNlG3m?hX5>c2&>Ar>4N5*_JqymeT^1yE~? z(xlv2QF}coxV2H~Hb27vwk;6K>83DTwZBGmqlXMh=PDnMPBBmAj^wp{AQFfCR9}BY zbynLT(6WNk#V7u_OLnb)+Y$t_F4CUCJLTLF8@4e}YDq7-N39|Vec25v_k#(sGJ8p- zq1XqAk30Ctr5N90{(90V1(@{KK@9*dc9cIKmo;<)jQ~&N?{1y^)BYIBKPd^(lx8Ia ztL&kId@IgYO$X&)^4%V{^ySLd<>%Yg0T}Tb%uhWjKarB)M=b#|(M$=5d=YY#(Q5;a!(>E0 z{S<>`m9kSYtrKlCp66+g`bTx)_@`h`^;QEp!A8(*lr4w27v>Fm12q&wI*TM}HC_M( zc8@4+WeSiTnw5oJHp?AI%aZ?`Ae?sX^FVxZjcHcP{s-pGba@N;;S$H`rHHjze7=vhR0?ZPklI=3Q-g(i-9m;Ip>&@IP=X2=hw+J>y zr`HUoDf6c&IGLF#D?l@4&j%2hVQPSV(lv+jkj7VZOt`@DS%hL^BT(LqaLr>vdd=kQ-gXMA~VDH`s~%c@JBBC!dyNf=kaP-tX;*ZjKHQn6^5vkx@DwoXgEU2)_bmb9qkmKM&U< z@x|k~-7JyP6W!ah9b@4hP!;N(mb`FB##%=u_vD*d(Id>J6DshzmZ)CqfA}n3S4U^V z99!BNtsa;=y}6D-Eo`B6tTSOosBNnIv<&eN%~rNwo_nkHw|35_iM(>{ZWHZdJNr?{ z|14S34W!SZ;j+#&sMp7T+WP_AhpA&yp6Z z{tih$G5AXl6f6GYtQt?zc69$7v}P*k{YL(0X4}pq=JzMFoxVV)-_=sT`9BuA;q1S9 zjS6ov{gF2>NvEwE<@;n;3~!fO@!rYKannLLY;fsDj9|F>am`T3tL}2mxA4B!P*lkF zOT7TGRx}#!Mt`eofTmVO&3p5VH&;+MeiMg}0;I%BSaiyuSbM6JwSYeyLVJpp{F~l* zl6YpgnDZ_Iu#6JI!rcAP7WUtyQ6^r0j);S{sO_W7p9uuMf7=|2+rX2ly1_TyU1#PI zQk>b6ctxkeGFbom-`m%!>|QQ4{$k{I-q>$B5VQG9Hu1|sBg2iU9Dlf{IApJfp1i&> z{?UB8cQ*z2oULFf!T4+V`>=VBNff!^PyvC`?I~=zobL4pOe*Ch(CRD4*&k}WB8`c( zblsM?yViQLNye1-LP$XP7AKWiohn9PR;i!kc*i!POTHL{&UTWII*q>3QlS5?8@xv# z`?`xBSO%OQXV=@0@&Z34Zy97vPqUH7f&zfe;0;1CfyeI;Tu8*^J;&>^)z%Czj2^yP zhDygPEFtynx9h^5qs(7;&aImvH{C~J?{K#QD>Q?!kbrMuob#r()I2wQf(Ka&z1 z%hBO+y+SVX6|z*GR2lqN?t&hN?t+EsK3oHIP45Wp3wyNG@~V9W?(YEvduq92N6Di= z8CR*y3pKzclBn4mWQeea)tjD?v(~%4)i#nTpMC*C`)>4;x9aDgLMh#Vg^j@kHx7PQ z-di7Lv&4=TCDviSDyyd-1P9$mA8NjF13PSrP z@8lhw+^=UM4wzm;wdpqZ$uG@yp>B2Z06dnBFZ)CRjB3p|yg&hhKRRw+5B?Z+rzS=k z`R(5zZi`uyn2DPB_2Tofo?FPL=!cPxR}Z=`Veu8yq*P>W_*hJ+DFrdpP=Y<&jTaw0 z4r75WK6pfga7O3&oF&6jFnft?Y_q%dox~$L=pkTLHM*Hl-mhzK;@3P4qzz&QOwqle zKXBOPpB$*OlOUkMry(o7Aq)r)WoZ^NM-k!0Qo!L6WA?YY!m$}Z9F(IxXFdaT=hT}M zA@HON@Obhqpd+~Jm;@pt?c?sojt*aE`|O{egPV@sN6yFje2)hI#7!)IW3`8(KRn9P zUSnew_^9iefR`sX^CVWWLApy&e;^SI4Ky%sHeu%r?C?&!AGsRJ_A$NS1r{~D_(vo3 z5Sv)U9l#ze2_>U^g2tg(RHm+#5@$mZ#v-b%B;g&hym@aW*ab}$R)qCmb8B#(+(@Lq&#Ce_Kr>GrrRJ!%=EHBX%BqG3R%ub9c~wH+V7oUS^`>(I^fz z@vt0{ zkfrR2(2s-h@hUVsV`+G7W#rYcsu8N;xklWDf5%&d+VnjExeB2sdIU22Wsu_)6YBnc zAjj12Mhye2Q@4D-C=JQq660_F3%4 zz6nxH$b7PJM~8QQg`x0fRHjg56WA1j9iD?0N0T$veKNf#pl*OJ7|pa12xH_;(A)VSc>n`XAom z8hyBlbWEGN-su)HGOfH~K8^p#pdr%xtPzIR+<_lHmrvaG#?i!Wt$G-Jx7sSWYXck{ zq3t*J^YJz$!@X8w%oYG6IWRT04iP{#lbhROV zKG&y+coy?I0cBgLW%_09`IwryX?lJQgZz=^ApHU1uI$l>OP7MCTJN?HEs=99ehIg*a#lLZR)i^c2l$x(JfpJ40atB>%AyQ+&}`Zoi!AG zDfq;!;?##C_8NM#)EpQ0LDb1i{0CM<^oPxG*pNx>tXTM-=bVs%Ws#%v_ATbLOU#R9 zCR;25mg%=9RDechqMIVj-I~Pa`U~7-zl7!Ca+Yx zeEux3+8?An?mGEmQN+v?3qHh{jlxRVYJa|OUx^Y)>Sg_}mm>+Oq$s??_p-!P{^YRE zVWa0FHrksy!)0320s?`UeW{k~jkR1WE-C-qOW}3+ZKu)s{LNmY*rN_<+-rr9X|`}1 zkJ~yp>y-ZXVf7JWouCXF6%~$s;dXv?l%JATbwTev2&m{2KT#&q5OB{2Cn{i-0`S3H zhHt>?))+QZzDAM%mMY>s5|F76ItM{~^&>zn#Ne8X%7yEgYNhuqo7%-oc9ooIzf5t_ z3d{f;7$t(nRMH$8MWe>Q&F}<1+QM3Blcqokf>l)KJjG7WG{ZPP(feEVNtq%2u zdv>g*)^g_%9?8D5G~-HicklMl6c-WNul|@N5!B$#@9)4aHB74aVjTxG^jC@V#OL+- z!;^I!r^6EXSm^HK$uIdM(t-|o2l{yfyw3@@>ebj0l@dcS4WH@?mU%J;-bF|z-AD0T z&i9uJ0xHL9*i{bwve!RoCaDV@WVD-U9I^h6FjRygZI_1-4q7iT5W7KkbX}i7zwLk0 za-G?%;)3FTyu$Z&h7WmKJ3h>a z($;hMoJYQ99>I(B-7uWIl)v1;IQ(DP+l4I^h4***;>zvtUnsoRpUIA!)L)NX=lNC! z^q!pXA^!fFt-%E0j4~mos0m4r$BUCfMnX~Nwucmal8s-_W_~ia9dXYI?-cpw&OuOE z@J_(3j^3?*jZ|h+?*k3XS|0mxFGn|riUYB__r~0>{WnxV&PChrVr1TVYIuPbnG)5# zk9fC08e3+2m}n*$=Se;AKqhXyj1V8;U{SPY-yywpBiB-Ok#a~-zG!ra@_YDQ6@CZB zP$S-ta}c%7xH}DwaR~XDZ1IG?fo5u~am*LC=yq{U&Y^D}cDVJ6%3xg#FYW*kgal=f zk2*k32?-AFReow{=O@~VqOeNLd$a4YXiVAV0)aBv)&XP4$Oc39OQ9{#gs8&kH{mVC#{2hM@+(wA0_HFcAQzKB zoO9>LVmv660O*zH#@UaMGTwH~ZXNTGTo=3J*y!8p2eMBxm$;9E`D_$>$sINBGFAjx zL=KZT#_7)%;suy|9Q%J>^vBuZ~i#MhxM!) z8DNcD0(+vR8nkqwPWdHn(Eg`bRTG(9xBH6!!E%n~O;LFpD?HW{@8>rE%yr`D3G5pVv!mw#tkhG5$M+Kk12bj=KN<+hHuyWCXo9Q&WPx?2D zW)o`adYRZ_r+|MjI)eC}W`&@?qD!jP_f#Z2RlxqIM!UjMoV?8|)ajh`4tef$g-4su z=LfSI?Ii7pk2talMzS-nWAX#=X&VmsdKVHX;|xoHSO}TtQ;l*6ag3}o7xq^-Km%Xa zC|kNTbIMSc!f~}@L>JR3z7mmrF|UdHw8Vh|${sgjaQ%W~WsvRm12Cug)HPB}#r>?y zA1Tmfy7@SdR`$0C=FdY9b(pruz_%=RVxj{maqcPBnoP(NR%r=4wb2kq*8Mk^^scYw z&DIzJARQ<>rki}ZfdG{`W!?%14HQ5Sn)9J%nxXakS%d8y<+jyIvPNm2c!>zoU8js0 zD-|TFWi^+*od#K?q_J_i5en+xTX;on@Z$s2aWuD23~yLU&ypb`hAi23NVuep3ZYUD zWPX;hGI*S_^hKah8aZY;9(>YDS8GvLX zSj|l11cq8xoj$^qjc10H55ZV!(eK||=wRb>qBZaN_(lygQ0R&P7fY61JY z@G-J&8{X7P+VgdcoqBQet$B7bl4c3GaqjHj8XC%z_&do!YPBlTXRn&?(_D7Xm3wVsAx}lZxt&Qq zX@|K-6qDcJIa*iyw%cfm$H@&@@5r3Y$|6fptl1m)4|j+Cqh-PyQ_D-uE04z?6=$io z^ag?Dea-i88}JX3mi`lb6k@Qo*v~LIvSor?k2ff;8>A}S*MHCzgtN8rm+3aUCdq+s zus3cV&PP|aoQ{UjVp@E=$d+MKzYw*~g&(Uh#-FX*xXM1fG4Z$YpW__l!y=b+#*gs?^~jeTb4_**fukAvpa*_s?2pDupmJTyMw z9*9?73G2n6=`AwZIgDL5I{b5a=pTlv08o>=$IqnCW9n#({JfvM1kr?BtJySfPi%e0 z1gj==nAhj0S2jEfb4|kEbSn`*$@D8p#pL>iVZ+U9+~} zeVNE_J{JZYcF}fuc1}4I@li<9XU=yieQOexiq76#0)OKtu&>H99Or+LBw}D&Xy5}C zO{PG*D$Wpj`l5pzSav8XOl#`~T?0%XEVlHq5dw~#a2)R|Du0gx3Ac?Q556HEHlEPG zUsdzHJ=!EDXn7}X|NbcV@nzqM5I8-zMvF2teRL3fg)7fe0Kj1%?^ZD!b zXDWw6Pqq7O@e36E{+7M+ON$MggNa8sbWAAbF;+jAxyUxBk`4n8whICt zCa#kcsaPW$uxaoXRT%Q%8oIX305qsY>Vu%5uy9>_BdJ!i8&SWgC;1VD=kfy$PVyK& z#1L23b9)^7fXH+GBO5RkhQBbh{>=~A33MvwJ-D`spN6zV}pSMA%Qk1Jew{DV>(lPE~wkLkF7g{W5D?9HjMqAXf= z?^*7IDA`R^nHwGaBWJbFz}kZNJ1zb?bOKnGhe`pk1%q$1cP1yMDajmJjhy&^vPRWT%I1PDC*2>9DvXHzVh)Y*Fy5 z?^s$%Ajpo;Fu;CQ4FtuiG%>!{c@tapGgTE6eSZ>kw;#0r-8ILD@|qMdefz!>#Qtxg zVfOp7epZT*k}bdD_$-dB^eINxt@GVTeOo4l^Qpu@bg>oXBQu)$8FiO%DW14AN&n@- zEG?SO))UWpl8$9+7D69Em$DDU-Q9_l6nB;$m=n+I`LH_Q;mUnKgZBX`D}Hh6Lb%Wi zHikha%92Ro(`EE&#=n07{F@C{pqst0Urw>*r1!qFK$ZQs+mVdD2JnAqIt#a`yEa7l#38xG$&*ZCjzwV%D$v+lK^(&F$0 z)cT~H&;L^*hrJ;l@K;^}%Iz* zev%Sz$#4qlTf%|p7^!Mk&K+yQ1H0z{%-q}s=?~TCPkn-HaIv3TDL(WGhCoUIh{X?2 z%q6~<2VUXqdqr`2=lX9S!VpWS2a-ifUO;#rHlHU~(QHbmXlwwjDk{hEzxwFFH^XWf zkiY&oIQ)Qqv{JXEln6(X{mW&;d>8v)#EC(_;`oBmLwmU|X}oA#C?S`3Ivhl;E(&u-5vQfc|Jrm5+YFU2>uTaF=Gq1^1uJ>q+y1g{+NbAmKr< z&iEdkq|w)K+zR|nxFJwPLuFh7S1D!PbY#qlBq9#aFT|x!J5E6>`B9CBa?skO zv$0>fO@p(gnQ=;+oX?+^|FYc1FbRT&E}3lA^bL&w82XH6yv-+HSG2nl7lhC9ZCy$CfeDOPv);C+%!pws?;^rEk4!Kffcume4Q}$8$D3~JaRPjkJ^4@(pt|2Z{Hp+vaL>ht2%n^YDOCC zdO5ked_-m(Z#-}RpAa8uls8moFI=fPcMZbY_5B`ICZgo|=Ug@$2`8$I-B6+EWw6HO zq$exYz~io`={shAY}QpieL-1O=YsRkXr^%j9$x31Qvz!p=!6_w2RE7j)A^%1HYMNb zgcL;uPg`zAQ%%RiOqHTVmhX*SN7ZfnuE>7Jsqd+Q1-#h(ppxS1i|6>5%K!HQ+%yrk zC*Ll)Sb05St}12xhG!aIY`j=pDD4`Vw^&5Bam`&g9C@u-htBLrT&GQ1XP$f9+lSB$ zd_?^9o_KG00b5hCQ5F8N6zT03>Z^X~T-62#utmFRG4#Go)FKjC5pebyw+J1?ud4~e zDi>`M^vJ}slF~6zp`Y5I-!O2z(IJ>otQa^x9N${QM>mtkcn>uHlxX={ggB9-TaI`h zEd0}zT{WGH?}?MYo6C`vwfq#ra2g^#O?#pT(L;PiZ-%P;9{zbc9`#@P7(}x9dvDdd zY)feGQ2p5 z?+RFqIDVFGgg(fSr7Z`$!M&pa(x&)AsK=c)CT8cm_K?Lq{#5YaZ5QWje$+0R4mF1U zB#xlgr6)W1pzt9kwDWn)WjC^^@bRRX9#f=RKmpA|>UT|E!xu2mh^duzsfdmv>!yv% zN$1AZ02#|_G#Qz+7LqTmc*~>$<1%e$nc&yEk8KJ&b&wr0-H<&W0Jb{}@&C)Imcf}g zY^NVLG~c7ib($hDmQWhsTfir-E^ZHpf(Gb$vpJ)ma;h^@%W9QH;Xw6H4DXfU=Xu-) zv*U&lwf^i4h+CU`bNcD@z8tA#_G5MEAiSF-+~?xY80Ig~R9lAoQAV-r>h9h-#qSRC z4mu)S?RQVTKCStHMCkGLrxACXCwlq(xDs;uV`97*H9&`9xyk}(`iO+| ztxK!hnQ2(l6JRh0Ds~V~X^x&>L2Wj*cOA!Y_s7x!;5(%;!z5yxX z%mV++jnEIg1L5dsDTI;gRM*aFJcBY!QeVcN-|HsLcaVQf{fa`l*BDjv{*EmJGMj}D zE}|q$@%-(r`!^lo&KB3>>qF22LS0jcov)&{{^j)D5*mE(TX#e*xGxW5_M(wX#noKOs^4-Q5OJ{X@VfU zH55X=Qfdg`Qx*tqoHDy1A<=nONYVB%6P%O}OO!_K^V7>)C!$uvoWfEiu+dZ2$QFVd ze!@@Rk@aD)1zmUN(`Wx-HhSoq(T!Qw^atC-nWUi=p0$e6{+en19R7t@O@I2fFu{*z zGQb1;6H>P6Y5@14PZGJ{CC3@guZ*J6MOL)=%f-0c5@%)Ny4H=u)Wfwg$t{zDl+uBY zvv{d4Eshq{k$96fn%j3ATsFi%+w`^3MtZGor(`6M_pzz3QA)xcocFqoxLW=oyxI6> za;)|*LR|y%{MC`({533pxDvbKmK_IwZ~Rh&;MHi$s!UFA#<9CJ3-H3|W=?}=$KyFw z%X4JRj1jG?dWxMB;m`e*WI<@UEN^mcc38}M*sEMuRk_54-@P+!%zuvpz2Prh7O9RO z*F;SlXrGxKx>~;Zt~tm=WmVHPR3tttduywU0}g7=ope*qqa^*VF>@qHWgX6D-5XA) zC8L*~m5WZ1?~3bT&iQiur{v#HP2QB-Z!9;5?kh6$J+}sqatrwl%cQexhyU$#wws50 zraR(Z3ZIrcsQL9ba;Z6WF^?b967l}AFd-xQ7-?pUbIPX_RoGu8D(owBF@B6-HoE9{ zl}Ay)HOBwtfTx%UY_dU|<3?&zwd5?oV;vCA+WP6O_ig-46I(c&^wcZX&_mk#6LjXf z_I7~sQsnf0vGjTBWu0Q+;m5$d)}dj=M5t2|kVpbM=Bb7Q zPHz~OOYkZIsF1oX6LQ5gw8|Cphzm&3S)PS|p5*C-FZ)-asBu`W^&s2e&Fs2mjIjeX z<SDGL0>Ts;3yA9=EWS6xECjZakAbZb}Gvp{xnjfFJe>dNV@5pF70(fZy9zt zT1!AEW)jcs9Q_{f9tgPL26?R`Bw%B@m4}hz#2-<*=ZgXP>7V_k@l&)`iNKo?G5rjn zyt)g>47{+lis*0;%^K@mDMXAvVY~kCgcS~by5u*(=#EgkQcE_mV3IQ??17iKUxSaZ z#XZu*j}nZii&jJ=-B0lJDGh2YW73JeM*`W5ILzruQ%-x)1&_SfhYj-)L&9l+-PbO~B_7U<|G7|q#`Sj2DbCj_7 z1}gxhw@cFCWm$suk%C&PezYE<+fkR9b*?S<~G{Ja}oTsGzJ~S zDv+hhKHu8QG%b6{wZ1GIPLj>7ruWhOm}TdUU)YxKb=>nQGM0`XcOVFLbeknck$jkh zY5*VCQ7bs@70d^JplM3+EBxZ(TuQCMyEh?R<*Yw!+(-c*{I1daH+p8j!&ugGMryIs zUZy5K>qE^WCZ%}l08=X!$XY%UC+$GoyV>6O?fe|K-PWRk1f4MXm7MVinoSHnZ1I+g z`DrHk9Hf#0C_0+Av5kEq%4Kk6*fk8j;QV9fj)S6&L6(iv>ujgQ%LfOR?TowmF+&8l z-detf<;&modsZ2yx9}VCiOc(iW#ei=*V_vm5-_WO+L7d?=4{EO@$6FQ6{6&aEu6hL zHj7z|=}udtLI-XQ7}*ohhVCM))y96DZy7keuZ0iV2 z>6RQrXfFiFP@&1`Z6rYeG{%~7BT;uT2)?*XgLwFX9xTB*N3H)>scl2meCIL3jHrQ+ zpl#E%^bZE$ONM??ZjpVCXt~fTefpbIf`Ft?L?~f#6$Bck(03qF<6YM`*wYD%(fHh4miP4fYog=h2m8$V3iS_9||2^=o3d~{8JWDvJY^1wp1 z1SA8k;j*zjX8=eLC_P~9DFd@eNQTH-%N2v;%MQ%qp~4SMdlm>tWkLfmthtE_K)>XP zxbbi{&pG0;#Gcsa<_X1KQ?FuGCnG; zfGiF9;hnM-LX8?|Erm4@G=knnp18ZTD%g)9o@|S6LZ32+<)pf@M;BnbIRx*sYWpEO zFim@*RR2T6iP4BQL3$oSaFhZAxb(O^jQ@kG=f`rvUYJsTB#grVgC$L{EP%@U@GH!%S46;$juR0{!1w1B3c|v^`I6MtGWP|uB5L5fpDB%u7;BYNwp$PG z?8vuY+lJOx@~y|6VvU6=1_JS2x2#y#1+U=eT8K+H*gP^dLa)+){B&m&hsrz%7O5F# zN71L`m8=z8A7o-m3Nue~%yT*A3*p2r^_qvn2Zr~S^BhF<{Vnw7&7bIgsc-dGKhOYV zGhW8x-1#md{_`J{K*D1E*J^JnmjxLzlGAK*e8oVUMK?#c2-i;yXEJw6v$ruJ`ze9^ zZMoQg-12FJu{rLx?tp@}pXUv2C9qBRj)_Rx1q4e5_L z8N0qWgA%exzVgT#6klFaL(xWM%0<$!r@f9pij-UzuT0ZX7JfanxwV+Ry9)n(J(vqr z6QUJFGcOag)%L$>zDx~GPDdxj0lL=_9ydHqyB6g{w0@o=9*yBZrEuVobm@!1auaF) z%KOG@?22GGjj_|+aUT{N@vceabk8NLeTDv~o4}^)LcWf{k#CEKzdC+dC)?&^hxH!p zF(`S;22RLkE@6J)X59q2>iyI)J9$~)6ujn0K&&dPs>Yw#{rnf2i#ip2_miyc6>e8b08$1rOdAac25XmlN|;W;-E3|>JqsFC z&-veh3-qJ)6eyHD2Fv&CV~Ny#lkOHa3~%uT4ak#pcwJ7LJnm{0v2B-g^*S}5V_+br zzuLeJ7PS7>;l^Y;rZYqD>iO4E>eHIR;rHvR?fQ<3pH+&QKBq%utdqF?WSN}~Jr?eF z&Br2ZuP^7ilkQ^v=Qk$?^Y#r1)S@@>{z{?3duwEZZvPFbeK1A0>)hFKxvjYMD91Py zQf*qt%ULF3iii%168=QycVyJs_puR=X;BvQsrCQK;z3;v?{)L(n$+i=1L7-gbx6bX z(Zm0j6}lp znDBi_v5gT{v|3ByZr8Cfgv~otPWo*?9Qjevkf?s&%r5@TX zLv1&FSFv@DVWl82Wd`&9``PN-$`nD3H&_z1gCV!#pcXIgte=+AzJ3*)c=!Pg96h`# z+<-?`l<~u!OB@u2{*T0rc_jD(!7Nw$9CPQ^k2RSK%K^Ey!ny9iU|~zemOD)1a8SFr zHoBO3lik|&C##6Vt>yYc@UOg-@k$}_iV?dCwV(|8Th?77Iw% z@BlLQG@F0od=g_uq>CoTF*XYx{;h_dfyj1xS#}){5B29`ZBr3M-7E)1qpcH_ z8ry@@qusUC10SJL7=JbnvQN|c-UG#l8fMr#^395Y*qT51%c2ONy;M}KXZ#fdzgysY zeW$RJUi)J8NRwU!NXt}ytZ$9Kq#X>3Nc+@UDkclu!k*G3=6<1ns?;UV^gRWjSL>gC zowhhF{HjYlHC_ra;rj&)4CwCmjoN&cs~Gncj^>=lB^?7ZE=SqkY9cAJ55_`5KugPC z>}(nmwX>)B@BQnT1->-7z>d6XPnW^=kWL?@K2R|lRRXfb7L;3>2pr=m_K@2BRWag} z!osV19Udq@kwO|&bzjN@8BF~n`r`9O&7iE+Zk_k3#ClBo-MxdXUtWH*>Vx%mC68_o zM4IgD3+&UgBuBISYCUtUki}v478epayn1JoM~R7;Wr-^w%t#Ctp~S$79$pnq4ROr` zJ(J<18#(X;>l^{XZ)U-N?3CKX7^(NVTuGP)-whYZ`x0-8YyzJrxyWCGug>zK)Rn`Y zo{WK>-|)ru@5;O-j|B%wsexwaneTOwSf*u4f%eD`CyE)r93{MFC4`VTEtBu8m0_+# z+4kxSEz{QQ-bS$`fJTSrr3m~E!H#= zt)HFgyqkF3TIl!R+yJvvQMaLzbp(%Aeyh^%WP|K~ND%e@Ew8VWA55f#D3}K=juw); z?(SW>i$pWAbek4hVm}mK{bT<5l_+hC_A6~b4ARjyf-FX9yi8(j7am+i%&ug&iN^8J zC;P$|Swkbg2F$fA@Z0)4XA$S3NwDcys`6IPea-t5hYN~4{L_zfbUYZ4wj%_W2-yFQ zt!p@j^T1;-xes_tP+7`7hXap-o~dC9c5^ z$%_{>{G3DsSO zO6k+-<<=ED<3n7ah!$M#Hf z6ISkILt*my_Dc-pR7T&l!eKIC=?z+^PfjL!bEwhyff`5;pjnJNzr>?;x;Z0H_{+kn^pN@_tx1EWN9NEwB z{x|&YKC`^}Q+Eiy)U)>r- z(!E7ufxUEkWhw_K zJo@UMrmG&dfFslvwt(@M6hVdN)WmMgnpH`hmXc|a3FFgssc0CP8#KHVO4p4e`(;UVa$_1xdw=*{s-iStw1*S|BC1uCm`OMV=NVw*Rp$kCAB?2E8BKv_aN@vj zN%D$?)Dp%|8!m@U$v@|EY5ltPFM2*wiAjHhJ{cs=m$5TBI8aAR`Tz7u-5S3ZI8vD7 z86`xG;lH3==x0mS5GR(gW-4`2V@tQkbV_N8f2~!&E_$eaCBs1qEi-K@0$YB{^_0c( zB{hz0wFk=L9>>R=z$z)6koWxdC4}vTcW`wk>@g+97?1GBmY8CuZ;?yYlrCW`3JpW9 zRf1BvDr3r?$o=Xe9LuM{89%#)Pu`)of3ejU`)IL&(-Ky2ZPZQwYcY4PG|+*fShls0)3#iC6(TG~M%W>7w zZ~cIPbz*7XDGRMX+`iq%Bqow~v~y7wnYqze0)QQBE5=h$9VL{s__>WFIU#&&X%5cC zKm;nu-$zs}89lke-XhH5LXd%x#8Gj0@nMry;4uByz&`=X_>3J0hCo-5(2o-ecDc=3 zecxi;@ZV92+l~m|xX{&sujVBm;`!F%*}zaft)JxnD#q7E03@8-8S)d1i`$`Z6m!u5 zq83Mup57}%f^Ia$gd^pb4l>C_XJgj3w+0zSpiD)R`r#7g443ewV!nSC4eCvxy~Sz5 z6wn4Sez)aQ=+i<>fy4jIIR#Whe{byVz6h)=^TH9c%R;p97O+w0WwZ)wIjBvHpNySd zqCSe8VNoJiST0F${3q2bP8#b|KM!T6&*v0>QR#!E_9D^eJ?WJoRN*lHOKi8cp78!ywt=}>@_Y^*wnD;+$PJPpnh>M)@HJ66+w)-S&H5;uYB zG1p5nuKJsVy5Z{(;grA6){+a@KA8yWB>Hn(iw(eOq|3I;Qx?xJSnh;7qjRZ5-Eq>l zRm~a{INizpE`12Ku+d|Ue@&{vG@I& z@cqU2M0}5MCs3L|airexij?j6-RBT~06ZGS?x!x^qdXvfnL$TW=RLY-wv039>hKfD z0b5wSoh_}4XLA5%f(xWHqJ7zKuN%W7ZCp$efWOnir13BL^bnF3;#mwM>N73yt^4Ff zmm}zhDP$dk-kuH+hK4q;U}WngMr^0q65NYO`cmYc+z6&FC{=&8)W6O;2gy9SSOt~=tlyd9z>V@C)5YIY5jM|_;tUA zH=-qaEu{L#2;(;xo@yPxgY^AIL1Cag_n`n0^xAbmMUEwahi=Y=T@C^@K;IR*3jX_= znOu5+l+Su5T)5Wm1%i%{W3ta<$42t|secz~UOWI*V424#s1P$*SL^v!4mP(8&1`&A zcy1NQebu;S>KYfYNfsqaA8*V{FNSQOi=0*f&^EPYCS*2c_+7&;QT;~8@HVfAuv$en zc9t(S4;i;-S`~kqor(W!lfn`_%&Ya!>0ct5zUl-Mx!zhCc+KNR%TjoGNOO#@c0O&h z73~V|3MAIlYGpq=S#3#)JxuHeqO(e4dk&nqCd{p@y3Xk*D~eoCb2GdCykBhUyso(V zU!16Sg`ntli4t*yK{0b%9Zs^IlZ@bT{Zymi_8jj@iosbOo5yk}hKp-wxo1-loe#sB zd(G4n(ag~Q_X5l?u)mfZ9QdE#N&U`2#p$>9M}QbQT?}JT9q%EXd9Sk=vI8w_jkg^a z&--!|4;PC157)03tyiH{UMIQzD=ycg`#++61{E`2>sS4Kyk5V)Ht*FWqc7d}(RZeH zzHU>{K--sVx+9(|io{q&uD^CAEBa0{v~4D|xnRv&7>b-8?$fV6$+T{X`8O|9&PEwr zRhwXO-93l(i&2T3ds^hHTYI}*;U_*P3jy+7Ph-2COyIMkeNC}8zoJv;8h8*ENIdJ@ zLWxVrL$hhP>v8Jpfd2KmGSj?@{B?6suKa-Ae=#07V=;d5-==;|Wo_A0<=S+gb6eo@ zXUcdvqR%fX6Cv#R?KXF$rsjuiCk(yuIQLjs3gag4y!E=HmCrq+6LxoaW&ldt2~znST_eJyiz$^q?ANbA7) zA1YL3dMFokEr#;sjYc%+l*7~|dVo{X!gh78mdA`eR2rWvlU^@HT^ z`v$ccz;GK57D9MkFh%|m$LZm+h$%a|dN|B?!E-F|Vh{KL=1aB5f0eDjN}~bLK`-j%9$gw=^--kM$?7iTX|J`F> za@C{GHV|%cR5=-S>V7aylgO$BK`vuE<@uunpLLn^mmoTjfchBbN=?IqxW6jy_C-(x z5CH%s?-5v~*^K*xsQQ7o?{=W#Y))BmyDnqf#Rv}3NnMg027vQc5#gKB04aB-w`Z*X z0!>il59Fg~xXs+AU<8*z-SE7nA><{YY*VZHY7kO>i=8=n5^qB!r!nD&TYE{WhAi!X z|NQ;{;g49=3^XSu@@-1mR61)v#BpqBz#wxkovc_ik_l^R#XFTqv$`Ner-;(3OFwBa zviMncTwOHS9w9yWiK+Z=bqP!`0i9@gbnAMC2Pp)J8U7I_l@;fAnbQ@k!^7T(i~zu= zJF_t+&u6@45uh{9u z7sL0edCi~(_n{w_)vtD?7_`F<>E4U&dULBM3K~`)xCCCIKiDrXG?X;8aXrszvdQme zT(Vg;z)@*1sYBK{KwWvQkU-!s!rv7oA&QO#seVKW&XkUir|HX0=Ax7KrVXiGoI-+s z=>7zD_eE3^KBItVt7BetT8YmzZQsJ#0SBSlKkUwkliVo9-*HT^vkB=e^I+3JFqVw! z^MusE$3r5P4164c|3%18RgRUBWV0U=Q`*)4&?c8qpsEFVz9-OpbZ%lvjmM&`$-uHbEZE2G)JgDp@e?-5lmawv#Z=V7hwSuvY9H zOO$R8G7Ha5@!7$JiLKy)BfQNq0wrHF*_|jN0$w1-9H5%F0|ZwFKaP>qXo~1 zX;3bO-FK%aa(px48}c0T+#L9A5#~c@^8(o^!xnkk-|r0-Gwybxu6Vkx@)y~O@!W+~ zepr?gB6EWo?U&b$)iPgYE{MLIM;9e`9GN*QzUI@5Fn|EIuY#~;vxmv#&V;u{?cL*( zsWE}ox2BKo|Aot#ms$Tq&T_uDn9@A$;HaP*O}PnMiE9#|AYj{2g|bPpc?s^|j*4K5 zW%G5MRDV(RE=oA%4iJc`c?WKlDzj?Je5%TN;M;#2v|>U&iuv+l51ZI>neL-l6m3FS z{JSI0Q}?I$TZ$P6EV~)0ndg&5Y+lSGOI`fO?~^8$CotEJSSVh*(8=9;68Z9@+n;BCC>;19j z$tz)Bn*|x8=XUe-A^NHUy_4mAsmNSo@w=yc-Io!GcmLaX97Xd~zXyb0 z=?LO67$->@;zpCc^yd74@7#!8ZT(Ilbow-KQbE<_UQ@Vh=-rmfe<3pSATZ>)lRn=0 zxIk;qfAxnWz90*6XML6w#u>x*0qUb4wO%5v*8{1(BA#<-D=c3p6tQ#&7d4)Qj)plB zDZR<~0!To-XV&`4|46S!k;ECIV1CLq#_^pA9t!NfPhgG;H-}4zen6eAFqqf0li6=B z0MxMU7KrWFzACCTuwmHy6h>Z5Z|wF?@KF)4AFBi4htzXo(#$6d zS!q$cq%InO;aB0*;{RJzn!E(Q9->Xg((N0Ny*{6FMhVMMaoJ(M!f-6nlF1p zF2gf?`7+xmb_9EiOeMXUNFbGX#^{vTO*nO=L~;|=>u7v)>IiD6>4JQ1Se=tbD;bxT z$xbG#GLg#-;%>5>lD_5xy!*SBhK~7r638cT=1&cXi+LRK03seM&R;!|s|G#wN(!L` z0oGgS5xB{@Kgtl>V=n&kbUrmR+Szc8*)fXfzw_n{fqA4`pZlYK?yG!3{P_Vzq|X$M z(-N3h0tOH(0q92^W(UMNX_jH)6K|o4SkFRpwI=q3HUux%YuOi-w*|cih#svC7C^9s z%dGskiz;3c2u1SkkUFtGSwzH$=rk8b(M_b-z(jCmisES5fsgmj-lG6Z$JCnZvA z^h@*`apiif0_a3}16guGWQsAMHJg-NO;~D{3co*ZaS8}O9XlS5xiCi048oBK{KKG` zy3Mus%p3;TKN41pqmnzWPN5bdAVmH0MfBh*c^*I)&V`ad*Ni}&)MLoyHQ6SO(&S!R zY((IJ;5eqZDd1%?6A77koN;PkKv}25T)}H7-jAN_qtz}_O+VdVR@U4l0Kfasj8-Cz zZ5mDXB2CrYd>xwYLVZgWn{E4J<#c5j5uA+q^h>-~&JjR8&8&Q&x@|cjaJMy%v;#u! zc?s2yI%R}PX7@}KkrgWg$#)-o$-sPd!OMJbPneXr_v?&+UWPK&79d^-TyuQ*kBzzg z3V@h&8vo)-i#i7{u>hQba&!o?Ml{&Uc6SD#@2mF*$y1&b56RPc*(D8V5iwZ73*wih z`4B5San^jDATvVW@Iy#^;?jyqT!dSvhd3DdH%>nOWR+;&m3rXsTG_9Sa8GEPxJcxT zc%yJ;F_xjA`I4TIVEj9#zw%~CD0bie#c5^#PFtQuaEnczy$PsC0^J3&;cf*-hOg1C zz;nKdP|dqu6O!C%3+TEMXTVxBwzyU)~7gHY|>_>3Z24VXd(e@gf z<^9=T<=IkYD#D&rn);Bh0BO4(`EG-Ry>)O99VS+GGfPv@_F-kH4-T26`=13fgb!yJ+u_4i*p`=?;59mCv=3GFZdjSi&kRTR=!7=%0u4pd6&{b zVnjKhWW+ym>*(afNid4xy8oXMc!V9RE@RmFr;;{KD8KHCabK1C3`$5b-e=fMXi@ zkD^AedAuKy3-X!xE@l`16(`%q61Rju>!kg#sn9N&L5fiZ>u;Nq`9qw3F_*&E_QDI*m)x(ir z>rXKvhLSR!-R?D%=--$B#>b!d^&2=eScP9EoIfXpD0@XLiX3z<88m3BwMt!uwou%L zD>1$M_xP3T`111#_1bNR=1RF#0^_$PVb42a=qn+#>AT6Z?b8viD&?=)$`z6*6%Ul%Mq8m_0ZFJ*_OTB@X2t4tK4PAGJS9AqAWkJhlRP$Btd*Y(BFGp zA-S+g|Mhu#kW~4SRGVv?O`BdWplk6r{m{?n^1edPb}8nyrb?Ye@~ar?$>z+on>k@9 z6L9ut+1&lD?v7TBaqae-g+ADSeg$?0-+nccoG<2oC`X7;ck>@{4F2oCR0_pv(c5dc zKIhDsm%9dkQ*jUPrfnpk2kklkVT4cY-f+_IgO+;!FaelX>LF7K#pZ={NS?_-YXWu+!wg9fHh$k*JYbyvg@UZk9^G|Et|U z!&K-)(YC4DOW&Z8Obym2C*9gX7XE22K>kE`SVnh6#bT~ziCWQR_w%$rq^nxf12pEq z0cgrG9N-<1yje}t)BN9=73k63t@Uw(-vQ#;hDU~^XY6)HWj~HC@tL(rJNYkSRd{)+ zh*i_?zkh>=-c%tEa4adk$UylzF+wSIx15Kgz(gBJy%!!XAHPV>rOnH<>iSb)LE#YE zW-)#CL@_4Ui<7qshGr@|=d$~x9@@&(&Ek8!hCATnf;)wlU1r#(Dm3-tkrLf&tV=&u zSnbQ(Ch4d>qBtPKGDbbQMvd}pQhlui^hnp9kW;>NjZ#OypeZZ!&S`**xp-g~ll2Hi z+J?7cB z@xFh`7M)(6$!RAr4rvU3Ho;*%>G9O#vOIBog8_|-&7jFI(mcu?<{SMi>g+QZJ4h)Q zV-o(qUdAM@RH4OA@&ibrc!DW^(wT0%m&}3!(23rwOT$m^z(olLZ!7j=KOXx1>Je){ z@tYv^vj{YHGX&V1_2KK`*bx9XKLaQ6cVQC5wqnH86RnMTZ!QZ=t?39OkpwETvBlcEURh6!+tH7d403CXp=aR01c!iAUAtDsR{7T z9JE%U4kI|$|!$p z=cgtvNF71y+pV|m2R(>oOZlNVXggZUzjv4VlLF6Axq9vvV_@LG9Akc;O z!s0RM9I#MZfvs0V`Nzl`6FAf?u(vdiYRV76*KpzNC2Kj7{xEJsqL_Dhs&pL$*Xy$S z?O;#ZV;qHsTFE|Me8nxNqwFt&;mmTnAmGna)xcCN|B<|v03Ze|w!&z&N>;d?>7<7U zx}#~KmF3QWyh2}vJVvI^QvaxFf8U)OV!ya@+Usax+U*~s z+>lT*|Lq2kK8!>B8E)^lgzxysKz2@0jJcpM2O!kfP`^+yxnc zAcQB&ISJPTf2fMO5dsH=>zug2wQ06(lDF<3kzwhhOW&={0rdesU@kNNj7oJ>OG{MiPJ(5Z>n@+{uA%bh!uKUoSxxwz}5sNKQ zUyUU*yc@bxf_cwcKyCPtBG(Wa*s^48q%a5j%AOpb-=QmDAhUAj;-~ALXwb2-IWv~w zm`Ny}B9k4XC?UXqK%4$&ap7$bu(RG$7+7`+PesE=gMwX~Q3uyh^!FxUMp*Daax;!1 z#4v8N``x!as77FI7LwLmJp@%aefUs}Aqf`{y~zH^Cg0H0r<%!6x>Nt< zM*GhY`w23oHI9P%?oyA6n0e#@59&9h)c?}E2mDHp^-sS`hzry;r&DkaP)~DVH06k1 zBRvLe&#MSY=jft zE5d>!-SRnFt){K$dWmvb9nJ#6fNAe}U=Wg2O{(BcV?WP6`U?x7V}Mk~KyQ2M9}`dz>xf_>#pOG-I|#7n#pp9s z#ojPNg6k;6`5+p=hif*d8Z3kO3_Y2Tuha|v>`a7n%rUHvjWWiMaLQL>H$Y}(ndJ?B zCXf?1p~&Uk$8P5dQ_$zfF7FESo)ATHCH_zu1qWF4i@94#`LTO-u(|fMOgCcC5+XiA z>Q)}rBph~^ZrmrZBaWM^G=ts=C!xXCcZvghL7Yjaa6lB{ZL&n1Q|ILA{AzY)1jGi+ z0${{g1N_Mh5P}1$hxBeD9ESRT>q$f8l$-LewHQriL1tBmdsOEUA|cm8rR{l00fV^m zYmqS8FGI)S+}x{?dpt-EspDqiU*<>;D5R0gPZXjB_VZo-ynTvuT85g4HwsSdux3&F zK8eGbw96>Ihjl3!WATP*3$jvfRtnk4jj}^p2r+}LpoaTPIy~aHU&-Bn{vZKZC(m6F zX!6OJ@n&owb0B;e2DJO#zct>`mUOw8uSmf`h$I5jA}9hy{gO!DmZ7#FXhJ~p5aAI1 zgc8*yQ#Sf)=zrhWDC@t#Y`rp$nQIr4Qjmd@_iKxZFOMG#6)1#uZRA+1_G23ZoS=%o zY^f`&orQ)MN(@pcoAnSD?*!`wBWwd)Mh2&9E#BY=%CN`wyu6_Z%VK$xw>^6Qw-PMC z+h}7w%)0Oy2v!e0e(0F*mMz5_e9?KI4?x)=m;ka+hU1hxBn3X5olPBp0UX_A0(+l` zdU$~9%Y+bF6y8{HVVw7L78h{D%WKU853T>vZ-up?>n; z_852NMw`IBT|~tCIhME>Z=9V%Qa_*XlyY{TLj0u2KX(-|VolwM-Il8Nl8@DatoX#j zm*QEeZ5q**3Hdc~D{k(*Keem$=F3{Yg{oR|A3TP5ZEsafwbP*D24*@HS}i({b4VX} zuM?V`>w&SwDE5l9D`@;FDkzo*K$yVKMlos0GP>$(r1+Ujd}VgVR1X^~UHfRFblLu# zkPM?yBaOxf)Blpx&s5S{j$uj0A?iDw2h3VYxSnH}-Upaw5$&@BjtI)i&d`_xR3W-F zuDl1Nw=2|h$jSs(eqx;y(v8&6yz_h4dj5s8&RYBIeLnlT(2`n4 zF`5`*g?!a596E1JFa!8XyeTmMbyoik0uy>;T8|p~ix`YA7jAtGs#Z8-Os&4`_NJl9 zQ3%3D98yi>@D5YrZsAkFeh~klGyE!T0TYRWqYcezuW{&QqdyALaR}sZ%oR1s>MM&V zoJ$IRSLodVN`dL~_us!0jA6pakHmO?AL}onb_z@d<|Nq}I%|ov}qyOo#C#hCX zBP;Qr{%yBU;#9ZrIB6AFP@S)MJ1O^cr|R|yjyF}ZeKVOH=eBcD9~b^df!-CTe+KAl znLFy=Ts^4$ueWuoX$RI#O@cp&o)fHf7~t5nNS-bk2UN988Vne6+=uK09a&`r*b zKW%HZAJ?y$LA#E_Lvo=~Tuq#qKn5(d{&62N5+>KF@!_GzRGj_c64QjUxT)#Lew-8# z2G$eqcbCzk<0;`(`49h%z)wcvT9KDew`~_4`jR%LWENYC>;g3_|LbpE4IRlAY^vU~ zV|Jz#+SO_~;{Z)xUWz?$zT?_CYN-cmiZSa+xJka;w>n}B@d@kRXqjdj?%eyqqTIP+ zNF(1a&2J}pu8;~9WupVVTD$KXkBuDf|1S&Buld-sDhqyps7sy7gWN0_SU_h5jWoCW z=!)*|7EWeCPy{I>pJ1_nA>5G&eqL7S6 zj4Q3@9SRz1bn6wgmF-}3)Xji{1tYl$c_57Wl9~SS;g*{p2B9ucyAlMbw|<(+H_8Bl$>deD@gp%YB*X$%!HG+ zL+x;D1NO#|u3?U}3)p|Y;|oAR-^t)i;p`ZL%hX_eU=MTv5QOj9@O=(7F$jdA34ddg zcSG0!1|{^!U5)&Vv1JeI-x1*q(yupc9lgWGY0$0=r>u2dT&&iFw!5Vkzvx{YjOG6D?iEj!mHRv&EBR1_2d+DgWbaO)p3BCtdh+~Kn|KE#EA-w-=e8CefJiike(2Zeu z&#Jx)_A)H5tQ?EOH*u-~I8bK|mR~D5rx?6cm6w5$KwHvQoMclRRD$!r@F}*@##}M!H9mS@p}jQJ6{I^aKZ}R zj^5@E;zIfTO0g|cU#JXjz_tEjz!pNqlQ!}2vSng^j)sjTSmfQuZ{Fk*62@jy=$lf% zp!srm-U9pM|EDGSo|;BU);ufD)|uihZPs$4C}s)fTRIYDUnSsQZ@ie~!)$5E=g#ur zq7AdPDVG&)JOF3;OQ4cv(CcJHZ z6P5USFK5sw92B=vIlFv}VA9iG(Djj9@BI${d;I}f#JdHrqNBqqJ*w)To~raVtrRIOF{--;FJe(> z!6Ga*9d&I;2?;?wo@>_!H=(Vc0z9eiViIc+J@U*j7T6zPp4zuB$kkNiU8&D4`h%7O z$TGleRIOA>^C?$CqEwUbxL$#=U6daxLI5DPYt5&&)em$X$f3Uw&IqU6If`Pujd%n3 z$w;E0KixdVSdQAKIJk@cm6|qK$9w^40tN~({B)D!)Ru3azILBV06ZavKK`|t+UC!V z4sV1?#Gwy_IQ-42`X-}$dM&Oa*J(uN|H$xmMJQ?-@d1O;dBZq<{>aWpb3(TTThk&8 z7OQNmWV0MzH3>`}#ZA9K8SGI|#TAd3}RhlKMNUE+6IBf4o8(S?iM=$eJ@MZ@Iwrj=N#?7 z2AgLlpg7acOY)z=nl2Dd+@i71ray(MuJw#oTP>y~l-J0EV}I*sqF-IXaVK!0^yF1^%=+o5c&Y1XZrS74m zB92*!KS$jc^KM3MvO@jgAk2)JfQ>jIQTcx%Psk9L`iu(2E@bCP@sygFlgP?2E$7Hy zNq*k{fD?z40%UbFHaTwZZnuTHGvB{2dZ}ISltrP~?YAdCxndDsO7$^Qu2}-}K5b;y z!CCafL0h`iEt=F*q4_U|kP%1+3M8|f+SsT%Ol)OOZ2RexEUo)m3Dx)UK06hO`23^T&F4g4bmwpL*m(N!6WmB^^Jx`eU z`2S|g97xJ}3!sM4lHBd}QFMdB7B5@A%NiKo`ua-3X58xPSz1)D)+&@fVw}oGgd5Qd9`@&X^PTJEwn>?m!ISX5;vLJ>g_5w+^&E^)c3}+LAcr_Xb_mpmBTs`5X1C zW;{+dDX`lZqW6R5G?Uq`pL927~atWrv_0ffVujm4rz zbqA^)D7vj}ll!phEndFL^&bd*fix=X5UzEb7=KUvrOu8!umJeISg&f@R6dbw4neM~ zr*l7~&#fi~Uv$zHF9qL@Oa~sMHQ2wuc}_aWQwWDpfAwbH=za|^^Zo(rP7>nRVD;yA zA{Np5@QxbJB^*BknA~G0bes^-6BN)2UOn&b*`B=NaIsJG(hZwevnXTKSN?QCApNS{ zdo-GJ?(^_BUu(3b-=?=Es}m+VjZ~5eI51h9&GZGmNs7+;bY zhVN%lPd=vc_X-T>iGUb{hODAjgW*;Dx1lnOw<)>q$=ejftD>iA5O$$R*7ILY!fM(Ux#%b@V3x zO$b%0$M;I*bv+yX#mR2whx^a@nU!noWWsh3+>TvW=>f8Z3bNa*E;j;!77l6ryL8Cw z=_O;+o(zH|U33_v;aQ>trVdnY^xBH&xma=J>jTar*OeN%vYC)_;t7y5BdQh4u^Jz% z!Bw<{e0p(Vr9wIcUHpa(2#HLfDef31$CqBRKKXmw&pljv4?*qD zGBAMY;F_qhrVDXOkGeTJzk3o69&5mN^fLx8-ABA{k_V)|It0Mz(Vh_Oa&UtNZu$j5 z4(Hw{3Z$dw4Y87Zpc3%|eTM=<&rsl-jI|%0MAiWZl!&8pa=t&RC>BpZ>bn9IMh-2& zQBLht1gKe<^>_i#t3F$g5$^#3Qzk%@bI=mTZDgg_H3c?Bq3Pf+CN8LRblHBaZG!!r zxzEKu>bG_xKHtH`BVcb}A!f8c5u&!S-aTZ*4N-v|miYrrmk5(w?A2W|Wt2?J5o4oO zQ$`Fwg%z84fy}+@WelBnrRZGx-!#Q#R3xT7^%E~g>A?FRAAUJ*!#TGJwoRADQINaA zsqef3>`j3M>!Y1SxTN8Xw<#VfODGXIB}RTQ8Br9zf-Ak!e0obshL4Sg$^_+-)|9pO zKp?3@iUEPnXZ2RL*A5m%>6hjV!W*^6SW{NoSJ>=zJl+P~2??w`Z@sC%O9pkJ4fKg? zWYR8InLysj4_$sSY`X1XlS13q3Abp`n0JS)zJIE%b0cJdRnAt&#%Y$8JL+jzOT1BVzfJFQjlntk#+qiR<(Kh)H( zqDpTNrG5CYTH={bigAj-&q>|5nxmwb8y6$}5tLrU7_$|-6Gn030iYlebC`aI=?{Nz zn3tkO26T5K&Ky7y^WZYPkB0yKBg z7ELEG=BwNGorkrHPVS(P<{Sr?+)WSg-zIe9V`bnv=IN@~bGH1kyHgjhJuo*tG@#7|yVj(|SvXIX^$>o0a(d-zg;A z?)35Tb&H(4v-^KzKe>_%?cO?8^*nxkUG#Ni4#P3m^?=t@5Byz2ZAyr4>v(?^C3yqLUu~ zM+7Q)b#RCq|FRRioRpA)=WW-koE(hR!`3lYCExbHt4Kn?k;Q2298L{n#Ekr@+6*>- zKoprNZpRS}C2`yEu5)wq=Z__VFEu-5&UgTh^$tyh>>XoxtHT_BEGOJ8$Wv0!c@B3C z`tlYtIQ)oZV4aB@MlU9HNzb^$L$5TbHBuWDlCqr3^U?SY$gioLHj#06YoEr_`H3_x z@)U_W=SWk<{KDw@_6C=Yy>$;;gDeg%t78U=T6R9#HgOE`X_UHidNc+LhsluuY(^kh z30^$f#rsljYHD{&p}}pP9hYdD3V;m z0G1cNiMpgQ8a!a^nTCl7N7fd1!%_e_hOOACoa_-?=o)4*K0ITDjCBcK23mKerXokf z6?9FzD^xb5KF|D`8ftjU3Y&jJJYoFXsuToM&gX!+!p3;@*yEoj%HrF`dj<9-VeI># zM8SFXiK>hSK;|aa!r?PYjcz6lSdZNXiA3B9V$TR=!uE# z3^$G5Ac*Qdn$kxu6GK3In(>-U`WfoNp&iScSdSj~V;p*Hpq&fl2;!&o9E-qT`s8RK zF#;Vx>i`Ab1w~2)7_QUKoDO>DNrQZwFb+_S*d$04l#EhXQENN}CWt>p0My{(oL+;g za{btGF9*HilaPaMG~mg>ev1C+$kuD(R|>^SEDnN*UKg0_vBb2DUq;OVu;6@viUzI| znanHe)z7))D)#w5J)A(WTC5A7@$O*sX!-RB=IlL2>pd`l`Aqe3ZT8Apq@PJY8_7=3 zBO~2`3s`t~h9&yr1M_=WKsM+ro**WPFFt_FUXNB(VGJ^Eer#?nvY0iw93NzgOsbB*+s_jIi_eAm+nH|QxgUz#< z7AKwK>F=V&6dFNsb|C;XBmD^b)%!}pGSd)wd#dG>S#Kr~+^H@qLs3x|e$`m{Q}FFX z6t3kDEnmVk{V{R-Q!xnKbHI_BI{26K{F~YEj#*> zrPM|X#L;t<)Y5lCkWn06$4+`z74Zr@q4fyugBx;bq5d>6sROFb#9WuGkA`R;?XD)d z>{+*4cVF?2|AM)-i|j@Sq|VWAcF(+5gSDNv6c&UhD;WgnE2x}|k~7GR&70F8jEBAU zhaep5URpao^weIXi(rw1m*__#%=z5aindYL!}Z;d{gm12yrfp`8Fw)<2Nk$SUeN>b z$Dx4q&KcssySaLJ=G%SuD8lbR~wVR z_}*Wo7Bp7|kfUd^V#ofYb5dFQZSove_Uo_CTzFRu-;i^=kZy2hz@|5K4X(Fg63hf6 zu4XibhhFTg|LLYUuTk06#?O1?puXl_w_OY5f6o7DF7_s9wH*6&Htc6@ONHdvFSmQ4 zBCmd`_?ya$tBrAknkJLeF!CZCt`?}_G}FHB|5VmfORHB+aw8sN0&_(r5=VBQayrhN zd-26MJ_H;!mdayOJQ+3lbZpQl&o1P&LAeZz+&{lx9TQqfp}k+nbZKPqgPD5I$Z2(% zHA?vyVc%aa*SziJknqwO*I=QOJ~_4ZUn^8gDrc_FUs{cP{{-jrt8dQ}UZZhgulVb6e3@r~w>!asEm7^Gno;!AP z?INOYczD+Ck@+_?Ro31Z|LN-(#BRMSSmY=*T#e89i^U+n(C`a9VR~c=5N5_3es_m4 zYHq8Ww}|jo^ltiG`%3*z=ONnT-ud`InZPz5k!PYPjr@+U_)bWn$+_R%Zg zveUtUJV82AJNu5o87U3fU?7SPjQUro6;2PB6{ zyr}E-CcwO$nQ__vMm?AINoO+zYw_8h&3tbSD4Yl#YTchqGv5RD{_mG#7^_2g#!(#Y zQhgn#SE+1MQKK9*i8WGgRooyAR0X7wsQiQo3-+jR<=i&WF_5aNl9IVRmw>IhA~1P~ zh}cnK<{pwo8E6}X3r*u0Q`n8zD*=q6j31cx=%?g{H@ie=6lgzoALWhM){6!SAm(sZ z$Y6%t;*{1mdnyrv0j4|Djp-Fdro|z@-<`B*^C6Po=~~1c7>B!`;bBuBli;{MdofQx zRs8Aqr`MIhS9ytxq`hlQIu?<;LB=I)gKvCG_k&gc$TdEBLpacHZS9@ZCCrJMynJws z^AxfNqL|Sav*JZ=DIls;vn~oo#{u+sE?rVy*c`_inV;h9bVRc8u7^$RRMh!??2**` zX~AfkrFXu^BNWZ(BFZ52<_)8%i}Ngr1)lj3W8$6)?R27js||Fm2@4OWei(*V@6U=@ z7e+;Vm=O5O4`(6+LphfII5IBRX1bUe{G6P@d&#`oZl7vZ5JC@JZ_tU#x0*61RJEqF zt9*IG0P#ZvjRyS?5A`}%9voKxk=vh|Jr1~9yFejqSWU&6g58-+b#6f&pAnh6&)U^>e6v{EpeBMoM^fZ(pvVrwUGVL(^?T;QXkX3Y3zwRZTug&ehwY%lq`T$V*+~^b=nXY6kCZyy7_qrwy(pO{)UU@88rA9vDRpd|+ zqK$+0E~(Y4k=iM1I3j*s@~ zDP{bBl!ghYOadycS*3<$#k;6ywoPr40pt zL&{w`dW{Kn{fpb9d-_M$T+UG)@^CPPDR~--j(qsVBAJ8=I+Hui`32K>Q_|8l#Ui^d5uvU&u?{1-AJZ54$6jNtd+NT}I89R?mnnYbBZ< zRXyBGbY3B*v+jGMDlVz#S$Tei4lV)dX%$65HNB`2yle3ZHlX6H6IPUde$^gwwt0cT zc?>Rz@!dj(MgPTkloPp6|T-Gf(u_zK0C#|7q+dL3Ds{+?EN z(az*qYqr)flPBSD^M|`NP7{0n`=T$iB&>d9?$1`>E(zpFXo)J%eEMwRtb&vFc-LGM zsu~0iW`4pd)N3GP`t0`nPYUR@^!Ah^@21RFP4eAjtGesv8H`Obt3X!Sxq z+Jv4}g;UxO@`|Se8u#)3yykZ12N*Z&GI9FlPG0Vf;T4a2yLFTWmx~*5yTG~dE7{V1 z76Uu%r|qUu(?2^6Q?eoaVwm#I!m?lrEF0F*DOZ#V;qMw#J=@S9+8cv*U2a*byD0&w z&i@{+m)6vUOoE;4Q!4)SC!>=9T) zGqGZE-7yX*6k%o41Osx&O7X*Y2+PLYkY~`Z!|D*){MxJgz*u{C+)lD2(u1v^DrT9XZr&NLTII z0VR)u1qYn{Rz{&kUeMkO%c`PxGv(-rc&e$ro<$(0H7de8h2fi6;*Z?fjGaZJ*|kjP zuKj(_^DX_NN7dxU$*wPLH)bHX(Yr{}3YbpA;XKS8;_^aCp4K1rk0MK(HS1X|NTEV6 zQ*N+a`6?SCWx2t>v$DZ-X8UTObTdqhiDo5GT0YB!(R#D2X}YmV>{8*eMK!bS0G%Fp zxt(o=qp43qXf10|XH~#Fmf>8yu&&`1AKmOB)5Bnp+@-2UXul%QBE%P5QCb&*1LTAT zh2MhO(4t#VZ5({<=W_uC>Z1`;*eY9R{uMP@E|YW%MLNF!M9ftUHmBYjFWVW6{_+%t z!Dd8AJ@&=dcz@Ejjcx7xAI>UWzN-gapsbY3$)ZI?GISS#u{s?kKcUBvQBec5$h>~y z9x%y+8S}Rw*%eP(I8hOudJRopHb>)pH(rYKch605-fh8`ch=2j_ z_XX@IbT-Wg%50k3cp!0bMrB=fP@?db2;kY^=wey(ec=PT#^A|uN|Xs!k*`pE1^w0o zbruX?OB+VdkjEBNp0EKjq_d!H7*lgje*{9GYJ+evTpEa+Ueqh#m;Iye;sL1HW9TrXNU%w49!Mu+`J{%8^%>gc^`1Zqx7P>G2pFznOl)QCrMIeMge zT7ZuP1SnsfrX@m4KKe*9ulzRy&NAM!PvTrIuD`?k*3?5I7ox$+x&<*#pNt2a{#V=* zUIvlMR5b$AX2?oBn{Ea@HR?5RG=#tMdUaU-m^_fN&R@k_+7yZxyV<6zG_6D{RuR*I zR^S4~%-|01JOD%vwXCJZt~p%LfvdxBy$UpNLz;Z+K7uhnI$2HKa=~wn{3my9 zKeQpMR8h=hy2QAY<(sc7r5iF1S{prJ7&D`qUs6RplpB6%fV{@Ww;nm5PZ*g$Q?dyr zFB50ye9oj1zp`Iob>D}obFox5na_{pkL2xY}|yuE7f z&W#t{|8ew+fMflOnkd`hSqC23Ke4qN#F6ND*XpOUggwKYxWIN*asI)X6+GZ1S64#a?VDblN?! zohm6l>CeFRfKzYfp3*FP(E7>;hZxRl@4nwP6Aw=0fMb*z9)-1(&M&1bnka}KQOw)v zweCM%(>~*AuUSuh8bYSg2q@48hkLkc$PE!M;jAOqJsdxYjNHP&ZPA9Wx$K76Y7Lk{ zcwLY?J84bs?7Q2o9I|=^dRLp1#p%UbrLISBiJW0SPQq`RC!o(+gLiC(8}CFq+|oV< zG8Ulw+4Nw79Wge0M6AdXgO*<;4(S-lv)jyC!Yn}EQES{{8@|^(yTmw`5+DE~Y`FbJ zdHt^UldhY>->+X}4FH~^pHh|up9Fk`pI(T>DjfTo_?VrM5Yt;VSWOoRvWsII5L@`TKVQ(GF1!bP z{OSy|yiVi^kVXL2uY_RKi_PqN% zi>eRf(d>4MSxQ;9*-@c$zt^Ep&+)fGm@ek&x7VfMuS_^`;mv;=Dz>#N#0!0RZ`$ZNx>WM$|C)eZnp{#7mGvftCO~{YE<1(qhf5E0J9^9${4wxQ%gtOd%n)NUQijhU>U~rCQl^7@2*fCVZ+Vc>%iw zRnX5b%chBE{(SawzmIa_bx2p|Y0HLFcMyKf8Aw5~896TqYl1+rJyQ5;iovnYX5SB8 zu1XHc!Jhz20s3!US3@{sSC!4780inp*BVBeHe;Fpm4}Tj*EP)?cV7xFHQbw(N4nWZ z2y!h%3tWr5_4|NW>%j0w zosT)!Y@hxcAP~xwaf$Z9b&Ou>3xnDrRen19wpa+cusUI+iV3pH9h(T(I-{R(8-&W4 z-0WDtO*h4i+WH919(Aek0Vu}J9<4EA735@q`0i)>It839$GGNS_YRKw%W-xfgi5Fg z7$#PtoL`xSNmaFTw>j;v4AoZ<>gu2UH~DF^6l>r+;gZ@s*#$=ZwIBE0=6q*eDm2p4 zjZ=RCPOL>SZ2>0>JMBqnkbw(&W-E-#;TG~4_Ik_7O z?+J1^tF-1ZjrM5@C96S+PIQv-Ya~ez`04_mlINpDIwq$GtC4ZS%HvHIO~4G6;KURJ zCcp!!IE7J>(jO5_yBXIPC=tR04aY3`Nr0*dab8R1z%f!^^-`XHbEYLRE$i!Q|e^B75P=~gtl#WzPmodzXi>&h>yUUbVIZ?Wr-(* zO~Yrm^4ZOV@YE;_fH*0=GGzVWB4j* zY4OUoj!1Z#9RTJglDLe9TMyAaugtSP_VK>l*sxLcEqZu(+tW>gTdsIqch@0-37kj< z)wZW?HWO4fuSqtjowwC=C;TMqCg@|i{nJCmn<@Vx{jjDhwRq1&ynY_dKP`U(>(0+| z1ck<;qBxY;zWzfHVte8{Am$yj9Wa%>YAJQMF#i1mS@2rH@_Bz8)xZZ2gviePjwG_x zZtWmRJn5m!ybn77wU&G|%wC-mEAV8}e$iy}J9syxn}tz1(7A%2F_dvN*xYLd+(<`|gKN zi%@^oaRk#x>A^cHyMe>8mTvV_Vri!!=pCsIKCnvTZ*yyP-E+B?{Ut0f*kdgB z=2?^ULRuAVB5e#+FAX}(#{J=H|3(1}pRzyMP7g&scMq1#8EK zE@!H9P+$SiZo*wS+~L4z{nW=AR_ri`Uo52H!D ztx1IE{yS@}+n)xK$PjY)P*`u=rTQb2Xwxo00;NQJ_-~3+#JU%lB}W6HuMT!}^M~EL z&T8I%Q>mufN5dNILJ%#1hmP`&X-72QXViV~DXN5(7z0?ju`1q*?w`(bCuQ(hvf)Q) zj)cos)rYr_yx0I&aFAx4wneQm7-q<0GxW=pl7*TP0)*e)W^$*r?(eex$|yKDvT0|` ziw_Wa#{6f*Ga>>_d^c>JFn4|Q!~1Z_{|Y0B>PNTq*mhpi6z3IY#G5x|&ikLq1hg`X z?NXHZ)Wz?fUa#D7P0aCa6On7WMjzF(ANEKOsAO(oZ=c|?#2{6`MV-FKfm_Q;qw1pX zm-j@e>t@{HldW7phNF46-WZYE`v;Xu6sSRZxedWm9iH1J6lyKv)uHn#l*314SjD*t z0>^(OmIfTx;n>W&3nCTl|1pe{ohyi>@jmJLNREY?hmqh?(v!r7G7p=v0`ooO-VGFL zw|`aUfmeXl13sW1ijGUCMQW)z@(ddye6jq~1hWqBj2Yk=QO8o(n*>2c6ZZVKh4Eh4 z7?;=f3vTdRnNn*YJ32ty*i$k0-?22lK-PMgYL!{QFp@zEkKbGM63}9}I6e>TLz*l5 zA4ag;=L(+TY8(CrEZ5|Xplz%pneYp^=4!UAn%5ve@a0BDOtn#^_V=531IW+wfX_k0 z!SN!h@?;P))?!nTm=0e1D$uU2|B<=_T2-4rcvaHEa@S28D%#riAN$&(nU6_u;|H%H ztERs_n9oysZIEV?$iKm=cbpA7xz{hnZu_1%u3Wk>o4PCK-qRU780j%g*m{&pE2Jq4 zzG)%p#g4XuUCtQWoag;h*L&EMsA#5T$I2+%f@jxJNO8!kS|PIR-&Zh9*&YF@CvC@M zMj(EJNk3Q6@UpQ3UsS0jd#^c(#-tipBDT?6j2R-AB zdgw@d=TKE$>+lDDcgf9NBVjaw|ARjYDh1?$2D$9-CnRg^_d}GX+Fzo9*7J_N59DaK z#F?DG+=eCCAb2rhAG+4=e$A65xkR&Li#r%S$L5*Kylckr{{5nW zcDFa(0Nz4Ahc(<0b5jmY`i9oPenx z#KAQiQ$JB23So!6;n&1t@feew3nPKnoKRetf&gpfqDR!SNd@u~`ly!{F0?H7muRj$ z>`mSQShR~W)Gbwm>+46#UoB(32wmyb&Ug|jR!ff9w@dC-gACc!%B~X z{FO6iKCTfOlv-5q&z-CZGF72Y7SPI(@vllF^ijF4&+@iJKr8!I7lj!a_O}j?eQE_+z(z7hYJHhxN-p;5kx0%NNg(g zz^dD87LvvXQ}#Hy2{!&11I3|hKip^tezj7E+F!V=z?svppb5u~hu{B@4&J~HtL8|6 z2h>#tttyYbsk_0yS6X-j@IVNj0Rf8;Rk!22*Le-K8re6?7kF`t34$dWJY`We*Q{cP zp0sCU5Qz^OEB%y%O*%D@;ocx4SYv92i?mOz&7ghvH+uE^qF?R<+hg>k7A{yun&ETw zJHdxa3p3&wU*0?Y7e4y&*FeJT%Q%u{&fCemasB5ru(bigU+|!>T1wD+zCb6mgG-+N zhF9fq%+)NlGtm?}keDwJ$#+OK+enEobeIP4SB zD8)a`B7kq$yUc(Cw-vDElSy;eOi6;#7~u`u&GFZVkbhJ<;7BW+i(tMs$)|g+J>9eZ zU4AbUqpkr&;Lr2J<&sX%pWc$^qx?rpYmZmXzcFocz0!g%%S~6D4c~wD?QM(?s1Q4D z>3v4&<0X@1G5T*|JlNY>#EtGN8S4A!iFIM$$fc8%_+COQEb}lUVO0NZ>N~3Ffohi`OE(4K_;uw~ZNpb!vTsT^S*(N|Gz9 zl!ls5{$KHRJp^71y*Es+$a|j12Dv1JZa*!l{M)f*0mHL!f>9 z@#%PTr-;H#d`cT0nJmxf3UuMg43g(2bYZfP>-P2%f;w4G+#W16Lj|y`p>P17Ze!x{ zs3FZJ=Li6>%jHr?jaFjNhiSQ5Ry?b~Ba~N31JsilvmUakv-S=`bJl)i@70Gv5IeD5 z2x18~5o^@^YcUfcDROgv;7%;c$)mS<=P4cU-nMn)eqay53Fs9}RQ0dyYCJikK(3(oVyL)BD7PX@RD3vmKeDEZ!$_vnmlU87^& z$lgWCutqezeBppbylEWCpX7zF`hnMCP$ijUl?L`HO7}-b!o+SPnst19*JYJD2ib|^ ziHna;2@yTd^h1^?!-`k*WGOW7b1TNUJ)_zpm55#asq;aX2X+Uam4MA81(O;DvfR!|qUSz7dI=eG({x zq&g+CG>eiQ-!(cDk;M94PeFqnlVmYdIyr`)90Uz}9RVxFE%-E`s8mCxX0xoV)nuVKl0p%vmWM}4xw@t*Lb#a(F0QdcebQey^B|T|X z9kR915zf~~Cwv{=G3NPK8|?oappLg6#~k&OJN>i~5g&MSHbeChr8}eibhAreGa@Yp zY|d-)z-Xpe!}>42k^@k+Pf%bW01LQcKmfE%m7G}K;emOEa7X?;dv*2JDoKGPe z7fTK;XlB9(C1R&R6tV(PmhhcuC{RJ}s|&*Yk3Mf?cA=lN{3!3hxzUNBs!2}u*-sem z>mIrE6qfluZ(P=J`Xa}ANDvsfaeuAp#ARI!Krs(#wk+8(pd1q|Xn-0n^s6X7E*4LJ z=UMWb?ehnXRjbR^`fcXlBLT%Q0>pyrD1PCZ-+!T|V0g-HzErhg-T0h|SMkH~w-~3V zk1_DT$YhK%`E9SfQ8^RziP1T$#b;%FiR6PRctHS6Y#r@z;&zm%r)jRGCj_jyK<+Cm zBmKVI7`}M^*cL}a0Eo3`wT93P_UEwNHGjqO?w$oP*Lvm}9-dP(yS? zU%b^IclvN7ov!O_;*#lH< z4#%r!n1p(Kq@t~?#kKs^MH(N>2OFVxG7ata`?#}e)h>LkBt5mzjeq~jh0jS5fWIfV z>lNr0zW#wtIF3)X+0GlXXL>Bw@B*H*MG^J71eb~FD2nj)R65*2*<`;n+@^p} z*%pPIYIof?TX~PC;j5;;K4wXUab-e^drIxTeZA(Q!IsXWkGe)$o1-G%nH`lBMCS#T5B_!j`?U1C2=(%c~$#@H%A#q-TIWRB*#3Z=|3Lv1-B8-=6RH950nvL zz(3`Kz*5VLSKUNvVz6 z;Wv^YZgV&0f7Ev9p9S1%o>1?|=OY@$OR8!Sh}+|+qhc5F;I{PVhz)9^bNKuAgKD28 zU2+qrkz-@$pi?$=FAbVwa-XNstDwJK+PU@#&w=}6^3HMR?(KNC5&mqF{paPy1UA6;85CTqBBiSw7uLK{@e8)$bio#Ev2LrNf2pcypyx zm{l`AJGBEWrbxT?ef^N&wIVt$ciT9$@AAB@ZQ8gE_!7$t}RQe(r zg#w%EbXgW9=z9=bBn2~h6Vk0MeO@n{{OD$l8Jv)SG*KCBMzQLSkPgR6X5xrhUA{e) zYkIBf9gVcLOg6%u>|~ufn?o&&zR@jj(XYK)><8~sFuyo@i7@u552<}9+Lmq@neFJb5vMSpTVfa2&6kQfwS$2(4_D^6(*jPAberQ zm>I_@RF~f08AYt8)Ozs$SbJeT^|%j78z|8hh%97z`y(1aIbYM)y;D4|h9y*P9oHUv zeHLKQdB9-}C)mVPEOf~=)aiqn8UVPMhBp3x7k~-U5u)4tqIs-$gMpY#gkZ`J6yM2z zQLFU%MHi2M=MQ++zWq2K$_T(~ygx|gMw3F`jAB29oEZY+s5l?~=3w6aM_Z)KEo65K zh2eqU^7HrwSkdb!TIj&EtR+PrP)?Ukm*L_^n~ zS3!HVg?VIV$Qhv`F&P}EKSGkTli`)g2+O;>;W02`>)qATkgU(XPmCLAYL=*sqYo zZ`zZQ#G9mglZz*KV_N0dcMZ`E8!2zSLk<1<8Ap&$lx zEKMhA+Jc%=FhY!GMLCWh(ALl`w^e?9cYNNE`H;lEK@U0t%Gt0G5lGwExXP!!A_7a@ z)A{k-*6sqS{+l#R0{~V0Q7{Z?+?CTsm5B&piAP%3LEeJS->`55@v?vt&H$7r;yrgt zp`(zX0lqKy`gGP20-0@EcuDSrGUVO?4NRuo=&Hk}y$_aZO{|zlCHV6IFP+$m=!amM&X|O4hhD#9- z5^m*BWTRdoPxDVp1a~1mqxuyQn^uaz2AanWDHcO{)PJ_&abx-RpKYXZmX$dUUyd(_ z9DIo=AcXT%F*%!Fv#!EM@Nb3CgVj@FOE_TgXUpbG6!{_&iXJCBmumELl|z|+GJ4ts z%x&J#r`#a~3@^lok6<|EL+cnOAk=5USO1k2&AcwuuNHtt=qXSaQJId7ooHYujDXzx zObZaU){>o^GTZg=%Kx)QJ+1*E;{3qE{9=vRh#N5iUHPQ)4NDuzeant!eL1C}Zp^Xd zU|L_^Hf?I#Wjf-n&L8VOVRJEVzbxcO9hvQTVhageDK*E@eVZ z7l^4Aokz*WzWtkxU??q6d7j=`(Fb5{XDaXWS(tOujrh{oEkc#V8O;A1+$N97{P%_A zp?;lIQQSbYnx`#_!jUH`zt!d3SM{pT4IcIPm}hL~^;6hR|A^)wHF~A-KWe%CJaewN zsmlhwibdD8^yGy-QT0^1jdqh4^Fzx0f=VRD{r4jQB~*2*aA&N*hod?SARqDU*y(Kt zBX8M>ZvKG+Q-*octIxdp=j`O9`HZ1!(kZD!de)O#PRQ{Wi4l&Wm(hqflkBGhs|7%=K4EP;(w7Zw$gMScU0q&{8x`r z4sUi$J_{NJ#EE;I$iyXnVTUxdbV%X@r${@-)-QMMjzj;jn3}#rPxzw62QvfL$F47P zvm7GTCsGEHdCFmj`(1#gV<;wn&@MJ&#L?h}n{z^la>rg7O$;;6nwT-d?2e>?rXOQF@0}B+g>DFnq)j}w4iz9c zVU>#?Ho7mP5a44StQd|b+0~+Yxj4JfDXX=~;LCle8rkbf)WVO)fljAWekoq9VMkt7 zkTo8V=vV^)8^QocOp=A_>v05$)X#26e(@P?nBYqk*Z$h-wI+J-hvm19Ki@S_F@MBD z{qTWl>?y$yJBU%S`&xRwrln=1F=oKt8b%@t3|8{Z+X06X(EI6G!;sxCfWT;Ie~`iCZ|w z%Yl6_hM_$Q=c1BD9zshwR`_owh2?j-DD|bSW6cEry@`>+geHom3$l0w`EPuq3hYh! z8Hm4n;QAj~WH{P!Zp_6mu1gt2>rpae=`f-0Hz#V)cM$G-WzCXUERIMUMcrEYbzWK#C&#ks&&)FhDWxS=_)uRxR5*bwYt0o16DEfG|twaigy2W zlhWRb!o|Y89FQqm6$eSANx?9qd!5(UO37^TnmP_VDLXE=L!wn)K}rdKc0|WP(_Y!% zSV;Q_C~_gPDJ_6RkOqRo`;;#mwxR;+(B5yCq1X#i$87$cTl1p<5yTJ4arQ~TG7Kmx zYisj51SS#UYpByZ1we4tX?j}P%67{A5=c4h+NwzhDflyfL4uzfk1TI%P*)8%E4kVesseaW#oKAHe04$|3ZK1~~do z8tVzY<0Yjbi-=~olc9>IwxL~1-z%#W)BVrBH>bs;Pt$gxam+={&>SP$2S0hpqPD~; z&HTS25O@;TtJ_CY2U#5fa7u)}CdZ@7gQ9bjLM!+BO^3$^jui=Yk4waFXgmp85#xXQ zpTMkzr$s9+gx(O0vZaxokC-0InOF~&b7}Rw4}0Z;*NlJ|$Yp~qe_aTGOpvmZF`?y) zXxmPl_;7q?MvZMCfa)zhbJq!L<_Yx<{tawWLgfTPh#E>O4(Io654H@F5HxnLF+w3e z1^FJ^*8q<~tQQf)1xpN0-wuo_Q-E@%%4m2fmUL9nZh`_K2H%o(zGhtz@B)$Ho z8!)aQ1HlH&d`Rxd+`doW2l*y$T#&z!76!5qrT3O@TS|d;Hfd_aT_6KU1-bnB`tJRz zQV^!KmKC6dJfqj13xXycFuqUrUn_ouJ~EP-M!MpVy8^RwrLCS$3XWDUVZ2|})Q3=$ zkfLjXuSif2rJRF6#SHNy9AJ3$l)|;Jwz6Z}u3FH-U`o{W(R=73WC3SJC!{a!8^JG+ zB!JsJhYfneDl%{9bYxRxy#O7OS(*m61Bv2secA!W3s2c@VhB~YjE--e{lYXYZarut zaO}5ud30fwNV5&3>A(TQ%p6At z4wYtL`6;)z*`P$^i$TQ+S5$Vt`m>B zhtYuz52GMXebD+UL>cMS5Y^fj8Lc845%&6TiMPK}uSFWFUf{>eSl+q+vz@Aii z965V&bY(gr7V5Vf#!pXhBS3rP(8L%8X_y!ui+d7dZC!&xZ?`o-T!D zqbr6@!o=`fjvk>Le}Xszi*27BUjOGOnbX!J6Z;(sq9gAst4o@lZ`b;EqGcuWWfM`i z%?U>q5Hf*?5WpOHyKWYBiZYvAi*cdr`HabDmT!huKaA_Nzj+UKW7zb zFX;#{&kO8%tRFW)6epEfw+>W%xu-3K-0u~^5c~r{vM$1ME$*Z~U7KejraAfvmBW>m ze+P$^JFR1x`jVNSJlc^+X46(4E&Wb%)t`=?)4kTiJy^i|VvM=JEItXw?;}xgfG%F+r)sI0fndJrW zhot6X1~8|UE3Vi}=Uvi5*UJ?J64hZuh~2@(FV}{T=st`${yP}U_VH1fd(x1{nE|f! zJ&Os6Kpil=r|nrt^m>zI)-~EU>Q|R|M#zdqY1<}p(_`R^bztjl>YgEQiED4_%ij9M zAzY>hU)!ZI56(!AkG3%(l&-i8n}J0rwe@{ zq#wTZ1e5O${_gy11^1b*)@PGiEwNuH21jIIZi<7dm`%!NHC$%t!&0io-PpFoaxdPJ z!cDdI!d+$~H`kcoAj_z7QzmxyiVF!%hLEJ&KxxCn5xqxLC}SEAzM2 zG)`qqP4dw1ADzmae9F}*?ie>P{>=Xx!Y(LsL3)F7fohR*6{O@NkQ;HqYdw{T*Yr}m zaP-5(*k%Ago^iv`M}Uos@Gf-apHa3QGpR%=gGsSNMM=A!6;;hd68f^kP4j(hgCoft zi$k^2TV8IN0+9>XV8lCB$p5fGr>~#7Y+(_mZ{)$ScHB2riQ$ZW&*p4+=?qexudGiH zVii3oQf%3xUl`5G>&^EsoS_+ORY>C~3$ggAueHO3*V!3`(OYu(=pP~wf|Sm35aOCv zETqpU5_!sCz?*=`+&*n;EIauVf0U9-0j#fV8RYFE{NRU>rpc}z_-B9PJl#R7dkff} z?A&3&IkiSEM`-Tn8=TJ*h+-sWXJFw*i8bnPMg$l?ut8V>=uOCjKdHI&^jo;ew%a0N z-&sv0^u;uACFQg67exAORm^t~fL)H)IF&Cx#NxpWms{RE0q`R3pX*4d&Q0|#763u3 z(_rj0g64q@e+u;#H$sJ#9-z4w=H#Y+n>k{m-ycgKKCzcKUMm`$ouV5ES1vcEem=p& z7>#D`B8S0hObdeYex9|vRQi3Y4^&}*t)A;s?2}hx#e^8wc}GUF_v&2*T@)jX*Tr{? zWJgz((;5czBJ`@+#rQFLVdB=~|3E0_QpOGb3b?EV7yyhb-d#1*SaVHxF$cDPhiJKx z?lgsS5(*}QI_QN@+HvhzvO;3LrkvQXf9^a*Mtx4JYZGQ6$i_dLYbGE0W) zydp@VG3|S<@)2Is5m4poxIe$I(sg|&v7y&NCB{Gbo6!%=l@%4>e5WC{;RDn7yDFH_ zq{^-N9dYp$!U>0P?Au}e3-iiv$?nE!(pj4q&@||{&^&lQ2&Q?W$Qz_l>JJSww zr-j6ep^_Gq_)_VBY$gRpNLgWe(6EcU(7;wQkt{930X*wI4nk*_=3 zekCSR-j{%%WD`AvP2vOPaAk>=k0@Q*razy2)C&suiZz(4hC(|g0TU+=DM%h@Xv*!W zwYNvX@Pl6`OO9lEjY@)UEtcJDu@3XV|I*J)qV9A-P9vcA+9B?%C=O+}akf*C5OE>g zF8r&-4$RdvOo$;&Y&Y-iZN!nlfBrc>Z1V;JU$1)&6n%ebG?i6TSM)7%?xBGoF1Kt? zLGkF1o$mnppR`c&5zVAqUFJm%$EuT!+U2<}D{GFfdb2q&V0k(UIb7sUu9X3%*xLUU ztiSGb=DZ>Zw%G`J4}eqDd2OIPc>+_+$#)}-Jc@%;pf&8OB2Qr>>nzjZORfc}pUh~& z@15$k!JhV-k)Ae(KAuoWa6mBJD1(_xuRmD_(E$&4JX(aiw-U#j)%IW?{)lutfN>^4 z!=>0L$04o9h4b=z8<-o^)PbFYLj-lP6{q5i4Dncnjn7d%)!Xj(sXv{TppqN4a8UfP z;!Z{yNf^q;+N!cvGS@=L)>(KX$ej!i!62UH-b9nHwBA6Ky*Q4hvLj~`1%QF;FUFD{ zQNqA&#e`0@w!(_{(uy5LYuKG%_(`CHFvdf3__Ep-l|^oxxay3W#$OJEmh0mDmct)A zEq(gD{2Rm19VD&LopjgX+^M&55=mP=4+|>}Z4XN)hf5~`HxUD#=Y32Ij)4ygWF~c0 zT)T&dAEVZlI2+8ZR`hRj8&QK+`IhfgA9Q-H3h1tErNd_s6NxU-9Y1?(}{u#e_o-&T+8yKNVxkBGdCf zUP>5v-ylQx;s}N%2r3)1dNwljET2M#6BHp-^WRtHKc22axhNlEGQYaU0R(EZ<<2Gc z4lC$=JD8-s53|(1M~?0nHz5e<_>=ABX8z0wdU897l=08>C{lOUv}FIPbcK(nw7oZ1 z^XSHm>c+ayzSaNHYCh_uEy!AK&gEXcW2AiQqlw#6%El<}#$!G9`N40B&55JJlzNIhsbbgVBv6@!{>fRs z#A$fbHNHuePWDJwOc8(vyxBH?o4P-M?%k>GlkEMYXf1uiw*^A)u!R2Qq{L`?YAUz# z?Hk_OzZKdIus%C#a9pQRdO#%LyCl(FJZgEi|B*&+EEZ&l>I8Wcyo0Yy$o<9}X9TB@ zR=qHi(#QA&CcV@U-px7`o@U9}PI2ONQ{)2)!l8Xq;eKzY`EEOgGCwWep94;{g;W^V zdEz_yasKz>ca9dTNF$FkhT2=TnT6hN>;(e5RXM=h92x6(b_h6_syyzJMK~>84$KJ< zVtjDV=iDL?PloKvri?o2KcrtuoaN{p4qT=o6iZYtof%RVq^8-iC`895#HAma=L)Zi zW5T3`p>s?f$f#Z})FP{F0#vRKuW3?uL*^bl6^8}|hJg@skxC{`fZI2+g^0bUyoEdz zGKE+mK%YH8OJ{7v4={-ZStE16rKSE_UG_l0xV`k*{)Ow!(iD6T%r-{{#-!`I8LVUZoGu%hbm& zH`h+z6Rx(^JwnriMNkhFw~4&*KmuZ@r#C@z+-604l%Ohs{@pF7Q`n8UUSBz3^6*34 zr|_(=`0)?PI|uKzIxf&iwG>mbQQ6?v=dByQdY2sS&?o{VZl~p-6h+)0!_lEXL>6-W zTDz|+m%|R1hi>L-qh5($P>SALB<=wKHMo_G7lQW2*%;x&ztDKThZ2fwQ!V)-dy2zy z(1#UG+Tc|#vL$j?(iy$MCCbX|X)~kVL=Zff-2-`bA2r3ZP+4mU%yvLpO~KjHp}pu6 zc7#Us&7Xw*9kp8dXCCTMP_~fQJ7|nF!?MHhvAGgnva=IIA7<4HB6?B?8j~E|!%zsh zpOJZ>kJ^B$6J>5d_1q!GJAqzPtS$tUZ@M?eLKZ0xu>e_~@0SigqxTQ^j|rQ9%hZ}P z4?Tv5S!+i(JUd+=h0{aFp&ORCnX4Lf75|Vb_Q|{HQN=XP_PW*T~vVB)Pb`tzNF!q~%?JcxImq&v)r|B9O83An|!A zQkucYZw{=c9d_u6Xi!%=S+Q0G!pVp=+7X|hs?Z?ORkZg&Rp^=UT;eb$_Ja~}HNf*6 zd2sskKH&jB-yLp?n}q{3)5TGj*Kd{QZvm)Qt3qY6P($P&ET5i?dNCHw-0OW+v$Acb}dfJdH(JD zd?U*kvOk`mHyVCem&i5vw67+K38lAeCAY;tGTA2A8U?-QEtHUpJ@V-6_BeY>drvo; zSIs=1w$I}1U+uAm;z+hbw6gJgF#Fwm8EY{{_0E$`|CDQq;qyeEPvR|femI4lqWvKA zrxRanXWtF7czFtQ+%y4xgHe(X?;*8i3mC)BvJ~yt*_p*O7js(ZslJv;4rk39Ut01a zcN#w^9HHX=dfI7DTDlewD?>88Xwdn5!rl9AWHbF|Lx0@d@joK9uN>V%yGWx0VhJ)p z(Vej*7zG*rT=KW1!XHNr*Olgh_)#S$OCh1j-+kaMzDl3* zK6DjI6z=_Ro{4t^jvqmV1BU+f8}@oQw<>->XNC6T(2xz!Vl$Sf2K#q7y*<97`j6Vi zt`@{hjTzJqvUjDi%1d*(DmiKYk(NzLgh?9HejObf$wuXZIjBBlu|qA|LkT?2?iI-w z{c)7GiIOO0+k2?D{NN{fI=4wgED;y$>grB#VVBddjm*~bgtFO=Zx~072c=aTQKG63F)6(?(G04%z;^rN zz-#Any5488+#;dH=j_|84$kXI?)x8Z)@bv`JZETH#@!aCmvN6=#R?p~6?dNx<1!Ju zDK36#`n;y469F#&c*PsDMtERM4lfx)K)`twb}dfELr?}|;GpKq)p^=cx?q6zU4iGN z*T9@(<&EE#(L&46{#H@Ki>HC@Lj=vux|T7b-V)c~ZR4Dwl)~aj^PcPV>G-GXG!)KM zfrq4#`2>JlZ^2RIaB}YBmq~te{=i2Rs|6+Fhu>7KGqQpZ*mH|oKw#L(&|~L|@6OBY zV+-R5jpUrn^U*F}{DF5qO3L!!jbY@aeAD`D3gC1>J#^h#*4m>gbYNRm6yj<2-ut8?#m< z&zy59#AR1aV<+?lIeyGt_LgfpcZD5a^4KURRZR8~wV_oC>dG!2Yr7s&Jtr)X@2gy{ zuC+G{wG2hYu3bJ9`=MrNXzMw3nA_S!Flc$5la+`d7Pd!DWJUCqmCxN`&=%y=*@q(n z)pwv#HyFN9wlepz*eF6eLNPlGzBK5J5~}?aeAg@9tnM;`5OX(3r?!cD{$3j_3$g9; zH+HLTw@{i^i3J2cWYC!OaWnw|UaDreJ2L*9 z#1~sPLmBP-P3uX#ga5shEdQAO;unQ{mY_wO=2V-Q0)prMFrAvY6RxMYrGP&EM5Bs1 zVi=wYhSebirUW*0ld$ZyqeHJr#99 zT@56sLHqYPQxgw^1guW+H!2ZgSsQi}ioMyE|8_5rrA@9bds*A!m2){S{-q2@0) zw~v^*VMNJ-v8>2|@{LHlA&pt4=j4t#ccQf*8Y(_6zS1zrIH~5!#TJuMN+%ij#bdSq zqU-nPchj0)?kA;`3Y2fd zF9)y<#1GvPVZUe1d34s7-y*TM9YzC4D7*0#$uUWT(F@9bhl&m}LC7|d7wj!PwnTs6 ztLVK#A$W|$OsW(BFbKq;x0+~RAluLZdl5BP4zE%mCA&{MHu;{Rbo3#Q)iF8mIqfQA z*~UH#109Vx?NGy!XK>l%hCY3OlU7e^n0G1>SfWXbu!)b}6w&(`OEslfnuJ7p_D z2C)lstJ-FNk42I)K}htV3O4=+JZkfLMct*{aoG_v_|)VpWCVFP3R|W(s5~g;m)ap@ zcEXddukBGucX40ucb4r;?0%4&uSq|ygJ3j;6LgxE7#8up*{f%5yWDePH~`pK>!3?F9y77 zH!-gVfK&ozs`cdDMc#QGNI6rfwh1a=DeM)c`+^9|Rc<}?F8@e5Z}y)^Nu_*w-ZfdV zy$#9p@O2W^O@$DQ*I}U+OZw}A-9hM-!0dv(FGR>Zi|Mck9@oBXjMPcr=TUN3A;hvt zCxDJ_v*8EnjRdGm|>5yuZWwL3YLJx03d@Q-K2cRbPbvr)H`R<2Yx_Rq2NG2gQ zjz56#k2@Wnq!F$r53|~{TBD_%%R98RSz-I_sE3QF-KCo~S1s>CWG=+=(wG)2~Dl!dX8vih~HmSJEqNj3b=l%`Dx`kx5)n@#c?{%=6EKR z@%7NAmQdbqO^|bEj5BY;J3rg;%S7#U?ScW0SJczyX?UK5$>Tc7Le|UUPR}RD!={tg zSOdoIq8Vm)=UdmWT6ii}EUNDGUJ5}6!tXwl8uD|#%NNwX*Hu~K84D4jJ3rV;%XMC- zj$7znI!R0#pig4|8P+0puB$q2p;pVGX;fRLD_zxvq8a?@wyhJd`5C~2ncs+~= zW4ZCWzLDVN2;WNacp4W?ocE1>fMf4CSuB54Y;`!%_1JOYfu)4G<@YF`pAKIx6J)5x z+;eh&Fgg8UX6e>sga!oeUu9LWan;+%ZISu%rcAz^y|kL5NM5W`HI95FsY=iCzSucS z=tXd`^ljd_C|LNC6|fM&71)db52L!kG~;wp)IK>3QGUY7@Jd{AK3ehEogOUkYj2Uv z(g3=!#hs4V8;FyCBYCg^UT?}9+^P%_VQ~b%>-AJthBh%+g`hpbEo*TRCD!+`jL82e z3*6kimkGfoXx7Ze??)hd7*6L>yydiYT69UyIklG_~|%MAZloM1uwf|(7A4PiOSHmAtTmhZ`JQlnH~vn;u0Byf2IdP`y(rf#3Ro4}ZKs!6q-6D7a>iIe#NH%JTe|I`-uf z06@2)9&lE&aKO~`!@wB1G3AvnI81mL!=!re^nDt$6C-7%jn0YsvZBi|&uoG#9)Q$t z!kajSO+ND2V-G`MN8CW2RDTl>B?dL%|Fsw16JDNZZ}buOYsN1|@oD_TQCjbnDiqYG#L`WK(6(7h5e;4M9MG7}h8pqb`jvF@G-l5WX_`Hm`p$j;{ zu&S9bS!sd8Kayu)GfLjsH>9=X9n^-9QAM~srt%z(qY{d2de<)Fd`f3*VM0#y*-?m= zStY)GwpeO1g1PS*T?LKs@K|X^%-1$PL2D&j_-zrHG_?^d$P1Fv>h^ZDRupv`zfl55 zPkwovtlRW?-McvLbRQfeSN@K_o7AIsmf_D?M~x}!Z@(#2O8Pg6N5vrvFzT-m1LtElqGp@sRo>Iy87Kz@fyw@R*Ds>oB2Bbb*is>ih=^L0lpXiv9?q}q z2VoSla!qV*$_q&tHnNy`1tWX%iur+d85r;@vR^D;-VW;0kP+6e4Cf&sC5*FDM+`3R zC8Z)q0(RC&50N3W$X{md(GBsqd!+u&W^(Hm;Fno*n#gwySM@5Cu2_r_Jfsgqg#3}A z6dio|`4JxrD-xVl{VpblYt9qJE$*=j%wp>B(U}UapMOX9*To9poBfZhyxJDw-4>BfqXFKh%bvWHi-2OsOLkc*|0urG#OZv-lYcdon=*w_{dgM330Jv zD}5&pG~&=b9}Fu*2cX1b(qrh?1PjdDezDnBPTc@t!Rw3+!C6vDfda5k7%WL?x&oi!dH%D;e!oAXj{TU z8k~FnTObY+Fv$PCA00i@oEZz{EkYWYREIK^(NJ>$n)`L=Z2jI#dv2nC&(>`|+Ue+* zU*$Xu;0z^%&xyfSu7@aqQS#vTxUe7swUb<1e}lrB1>!w0`Ig@=B4BX|Bi^Yz4{$sN6> zcYmI4_8&T*sA>9RI&MLf6cW-q7|ISm8NPvm9@e4FtYbwd#zV$ih&8r9g5)FINHiZ=VuJ~f+xCDFV7OV zmMjP*Gj5mqB(gTCy}TIC9Rh%dT)M2Ax4CZ}SrEKL+%p1Bj#K$w>!RTc?p7eUPP~P=cvt}19tv7lDFT>uitKPJtKzR?Re~JU|btRNjO>n8(BWHO^Hm< zDqYORO;OuZf5d`0BUOd_g@Ert)kv$p;_21O=*{B?w zyn|+CWB;l-O>mqQ;F4*zyi3P6BXp=6fxtZq8E^5% z6={+dnZVpm59fElfA8CH4Qjt)8J_1QC4DtCI_LR^qZ>9pahnW~OmTtdnva+eVBQU{ zTE9!8v|a;CdXvxMC3LlWpHX0F$GE`oZ=?8!q}4J~_-k7Xym24!;}ZVky?VMlnkGo-Gv0fvgq3ReEMk9V1TpW(>wV0c9023fYNx&npFP}tH|UT z4G4NoAf0Z*P0P~829Rzp(69Q?6I$M@gW8Ah1abo2d^Ameh3)mQJtYDL`$eUr>W2 zLfB_;H3WI0=>G+I;yS&9C_{)I(%?A2UZqHU3FyiZxcymoPsR;8%6Tw5Z7B}G5XLXhNKv^HD8yv{ zaCNtc7H~z3_#bfy|*~+7E#0EZJihP>Z<=;2E{SFR5wu3BNZF%M9P$H5vrQ~N#BM)cm zF}2VsF(Hbw1w$BtC$L2`E48{$Q=eq88~ezC@fq`qlYIo#NcW%~SumdC1#*lP4uU{ZWs0hAwBAi&WZ)=0akqb%sWj^EL<3xy#2XWn_eh8m4U zhjEnQcWgki{eQk1#Bx(_d|#(sSfw9cG1Y(RR!+n;10I*x>U7owt3yN2=uT-{Z@eN; zkm*nZ$SgB*PT^AMTV(R(U~2G1)LY1+rwvYTJb>-h)6EIfTgf8)<|cOf1}XI}0Qric z$>3GprCs1ElypaML-k*#S3Py`F0>;0E!}KBbyVQ7#E7ovze;B((Y$ zNO^LWq~YO>tRkOsbM05=fA+U$t_NGhXEDJ%4FA!vE#L|Bl~$kG?^mDod|7a$m32Q^ zjjjMZQk{5(!DjzP&9of5n{H1-zPpH^wAo7DG>p`=J=u#L>@aY7hd;gg0NWc)+LjE? zQrhmP%FuU|QQ(`x)J`EBJi$&4Gs%aIe~*(S3Viy;ic<J#njXk=PIb2qHl z*S)jv>hXLeS?f=m&?-vPgbqlYTu4^X_-)Hqm5TU_uQabyTo;o_xK7_B4}BvExX`(d z#PM?#NFpx1>)?{GeXUs5lPztWp#wBtyFu42wypM&7vxrx)K$WFp#u)pI9ZotbB6l$ z+ba^jsTt2Hbtj|Z+nkxUK^arvXmWu4M%j;#RkkcvL6ebkzhk}?zuEa>bz!=xcajj*y!>|8a8)W6EfU}D*@5SPdlNTg;i2_DYhjE1<4&W7u@26Doya{anOgj@F&Is<{ed@7+R3ta*`V&AIa;x>2l?~ z^-l_lS^h4r2M{qc=B$clEumOU4wg23{sIxLZ#SsAG)?q2AxNgB8v*f(Jc^%iMbmEW zxJ!LKQc2{?GbnQ6XwRpGshaVUI(4x|G%KmAOaH_~~YqHf$$9SHiX6V}Se z7dD>B$@Am-vk@+n2DS%Z)`~o1Dwy(z>VP@h<^pjpBM8oby`%s@hlT*WSb`698VDkk z1z;GJwNp~@z^G48qgK{vcKRdO0S!Re2)|*DpYDl?&PtXUvjk(`X5ME;8IM%_^PNBp zy3%eBeZ>c(#r)B{gP{QzOmGb_yf7Dl_Tv!%$lrNbfC$ZBEXT8mJJka|zM1M#BoZS* zxo}rAF(J{U&UZQNk5ty5h}s#kbSJgtewcLyAxg;H(0}jW{vn_aN0*1Fk+4!;d;`iy zIj(6?0FXcg1M}ODfRH`Pspn9yK$GZ%MF>EdwubvgCQbNrm6O!3l!3-CvgoG_5aK1G zr=!?O>?k7npT3WX&y~xF2SOZ#>F;*mi9xSQTq-qL$+PXq;1P1>OiM*pIJW!t+DKmz z(5kV_k?m5%t?N9VheVJUe$i2S!+zBXh8(<(lFKXI|3qLW>OMe51wKRuIige=_C&Ed zsy9)K9VE5u$SZdy#v9gB5{5)P0~`)PP`LH}W5#JiV7lq*nK7EsE3pASxEV`p7;r`&@*m z8I8tz4?cwhtJT-_M(0Nn1JVVC#pr+{)ES5`wT3su zNwK12c}0U)fBG<3av3Oq?Aav)1^{mJtVKgVCaR^!XM+0I3xB{D?(ejtlOf$*F<{%JHXqf!lOKufn`I&Eo*axi3-v$XL5f zVHh*!{~V!WQ1(AaDZ@LFX@Yaam*NZoEE5@Be#KwMF&OB4V+0dT6mbI1LDTg+)b&|P zv`L?%6l>F2NM=naUlvbB4JVw=Zh+}I5B7rM766I^nGA^Mi`crcXh%R__spPvSThZc z1Yj&VM(DuLtGysf{db7(d@+8w5Aa5UVGSqD{mDe|55pV`Yui2{BC;lxPs{}ph}$Qp z`B%Lh%baqKL>Fsd)O<4BThy=!0E#T|0Zd*Deb4KqhzNqWM|tGZ0OfY#Oz+To4?=Yg z5G2_Sg_Y)+olXyG=f51cq6gjCAB&knINSvg1)N@=AQzzp40q>)czya*w&3~8By{e7 z?>%L~^ArId@|Tx?NJc>LD%+WcqGlxHtAez*1dFZRZ449sg#Q_M1zy7)ZEirwd#lke z0rzOv-Ts*prJ}@K#&%qOYZwV-j}-yjsF8y!buCBs6Q%t(Z7=?!8xef@{?AAxhEPO{ z0#|muO4(JBi~B_Chm*Fp&A&S%St2+aT!D8=zMT}ytJ5x`0m0!CN|008!-DEMCX!u%fwX$k=z`eQmKVFW_a`DVEScUB#a413+V2l`e{|d0sNf0i zMzpd8Zz7W@b^VUCf|Qo2hX9I&V_U$UPNeem($y>xz|Bzga&%`k`&7r3;a;=yl*e~C z8QtbRJ#*6gEz5s^NLpJD5CCRr>g;4JWJ%ide2II$Fe03aZ*j*%h2NM7J%Tyr>Q68r^2F6Omqr3y~db| z34nRAKC4sFT`caq0DQQ%OE>eF=G5vDBB%P6xr&`hU(08 z0eo7o|25LnV!t%-+#6>B8U)`+KA@CWF4r281&_9+!b0fI#u~X1`t2*Q*aqk6lM@QJ1unr`0sbR zwEBuGjztxKJ9DK4UA)S?MFe;UhhY!bJ>soRoI&AksH4OlXr7&q zW2>k~cWhpit8;%%LptgXc)O*m-X^YW@&f$~z%z{A^S`4vPDIVJi5Jdp(dwGr9f0x2 z&DH=w*c!bPM`PHT{7mM7I5Pi|3o{NMttbs0pt2|SKY+dJWdwyexe&tL2S%KP68+Rk zYI=E&P$Ql~S_=g|lX3=?;!&7I8{C9RtMmKu^-+)Osi53?ui@MXI@fQf@8j|`+N{Cw zVUS`0av9?KEa{!}`U*;-_&6N^1;0nKGV`DSI$ci{gLLwxoOHtCb1T&NhwW0`!kbfb zbbnM9~iIAkL=T)L~1R$Fj!Ixvig3XYz_8HNGt<$m@eSJ$T z{P#mp2#+aG07it%zOUr2ju2IsOGc1I0S@A9ij)d+nCEFmc9!^4SVsqika4f*<{_Az)S|guYI%lb%6nKEbvx~8Z_*;kw0fB&Y!eG zOfuiR1cMZ8d1iT0k(g*hA+}LZ855cz_8fW48>1}v(Qx(0Z7fy3DDl&T7_qo zOTW!I+E63#IDzd_ z)r4yn=?Il!v)2bqNEXI7$oaOJZ2}~VQ=m1sPgM9+io?_0x4kVN8@;3?1tQFxpMGK% zj)@cyMU$!g{L;! zk<)Fa)34$eF)n@mls$6Q;_4rmX_?7>`l}r)G19nKWXd}F$C*X?C&m63eO`lC^vOJP z8-6~hvtg>#YH%R^?H&PB@-m5ts*;>QJJMMr2SlQWnc_}rbWH!D-Cde%1d0ow6?lXdjP54{VL^7HUSHkahw4 z3hxoy-psp|tG3y;hXY5IJ(^)9#k%8S&61izO3!Vv?(^tWaJ%*LTH7rW3pM}QzP80T zyw&D}>!9PX%!~|9m~1Dy$zkZtT(bP;%7jppGtJ6TchQo|cjpXw2RnF?)RL~AXfVW) z$k=G}4*Gxj1~G%N)TOl+E|`VS=)P{ju;zWySubK)+2WJo>PyV+3pT5YHK*-ZmEXVH z?o7n4kC{jEY@+8Ho58@#V_(({FDqZF)1|^^!hSVTxOo-6U6ZS(>9Y=5ngnDz_u z^*6ps)SnY*j=dnTo8@hW)ti{sTf>SBP0G>B7uA=mk4y6k&^e^@WKOf)!vi69{Z%JRg^282xQghzuKao-<7|E!?ddx8B-X!aH7sz2gu3XqL|Qe zQ^T%9)=*xB#$4>_{T`^}FtpFK)!W<{Mrv~C>|c$98!=cTVTtN!h(xM1Q{XdSGTx8h zf2^$^PoI{Z3x5cY_Y3s}B>rsf1wn;fRS!)>+xmadQ=(#%ZoBHy>JU@+$EJNbDsp%r;B< z^-!V09@;N0Fu?3Iezf^K5fe~8`V%We19;@jRiU9{XQU2ZT2>v?kiiYpL+#>SfGcTE z0R~lWS!?aeVO9(vNu#MkY&9XykPyMSo&F^^cZDt6k%*=MtnYFFI}(8u4C|uPry6r zJArcz7xA8yxUR-+YUdkmBZOi<*$O!PD@)IS>pEm6Yt;Kg){Pxh0z?Ffd8s4ITqo@~ zl~4Qd?&T&;it^ps`JmFW$HPHRm8l)0&S>fAS-JW3PFl@4iQ{ zgtBHM%=Fj4uWKHV(mCXYU;vY#eR-Ghp{k4~1@%xSJme11p*9wP-h@uS^RY}53c!)! zUqFiGnC8!ipS8O?b6!IgXY)vA)i>`wW@2DhWc8XkO0ScFgZDrK-t%^pHz|086G^B* zN?j2Phyx(=W~T?jUi9`BK%<=9a<4oTc$6s4omMluy8(-Y|yt>a-)r07kZ@K3sV;>}#TCm1#ZgnVm;ej$qUut7?uh=yv ztqctx%i9-r9x347kE*CjwudK=Lvk#BjYt1UgP z5_c^{1$OY~bMa0mQ}HG*mcZxN_O#oTlJ98Z%StZP?E)&)JHGUYbp6eOL5`F`F!5+8 z<>uHtEUvx}@qzHW_dyWz>nvioH?`mOfOZ^c`}2$q5h6ni8Z)CeTW9aI6PKUrL+zGD zxqmtvl?TvB8-M5Ha}=;cH)d6tV}u!X0Zt-<#U0?WWAerbiAuYoGMy^LAk>R@nQLnm z3Mao6&9{+6^gJgd(l|;93~xn&3F&Jy%akQRQXZ%UV>^f_h;fUV(;3c;{?$x}ML8FH z6W9Hsf|{0>_v{WRTC!qBkw3TrMmL=zasr_Sa(X-1*WPO;;3uJCjBO7oooV(c2DbL*-Vcvz&W#Ax{91a8qd|{>4i0EHI?%zQH!y z>w5?VpR@Nh3<2;8G%;~>Im_}2QaWy!aZ54;u7`uUZG-&kJ@_ksJe`M>EctaCz4hoi zsEJmsXR2PWo?W@-oxB$aK4NJcS(D*~qgP%1;FruChv*p3ILw*jiP!Yblp9$Y_7f~k z@sC9Xbf^KaY!H-ikM*N_0>S>9##8JjFuqQRL|T0;iF$1=B`H^OQiKj=+`TLK*j!x$ zPs%j^6=v_*$)@1@k+tyj-V8p)MC}(FUkxcTh5og?WSc)b``G05CZ`0Q@fE4F^eD~V zKik-fpEdS1I4MC@S*#5Vj5+cE5MAR?YXZPO3GbJ5m|CT~P{P~`G8*e|3|Nnjkv;Dy z**8L0FT6wuQx5ugHt(!Ic>5mX5qq?7EV1U#I5+Qe@SObZaI0!^%^vaiThvf-iBhfb zi?Vzcri+0D@34T$g;)T^#vPn)!q8c;R1M zy->^sOLRY?*A`KWf(`s65K-(0CLik73J^`(NGJl8&7_yCR-K zYT*L_Y`M+5r)z;&pFSy4N5(^M?;NfyL!9e|E}z^Dc#bfy*^O%#S{}uGjkT7A@ODl}!d^1n&hM;?Qu1Z9 zi`{9DPE6*xWzSWKgFU0i(`1FX)Q&)PYUA69U`;A|db0p>$$|%?>9__Mt5>rT$ zLe}-D;B9FUuw3;}walBmkN#n7U9Wex#Q4Ex-HY6py~FdfHfI}rhF-OX#%v_|+w9;&d1qu-P~UH0gpMC@r-Dtj)vL zemm^PVKpYTA$qJ9r~}EfDNIGL9F4}!S6grO<#u)(&B{?}4oK#ZP__4<)lc~cAo$-9 zvgxe+Q38LM&~xc6nyBr=9WUhqSe`~l1jTnJgX(jL zO-iY;UoP2?8cJ%(^N4}QG)D5w9hv#AKQ|%A+*NE&<-VVuMYWhR1sgJBe)7ZZi=$d{ zTToS2^&V&{qxGKN{`HbhbyXbCC9T#e0tCnfh<@0w7DrL0!tPcmHAd%+ODImU^TT+2 ze{;teI?(=8dYASlr{$niO83)%Y2*5fBtEK7rji6pVd)}gv7>rNgv(m|e}04v4Ut5A zar!s-s*r0v9t02R2etIvBBQfiMLOYaCIKo)6IsPq27<F=*mZm{F6CuMxOn<0Pqle4%VM6yaUvz*=fq7>7 zv*Ie!Xwv?0K`q*U6eo(?2}GaP_^m`)5{dKK;@mC))ofAIum(J8$@YtM^aN~U}g2G|8NbIQqld9=fa3)})u ztcEkdEc@7;m?0%!1Q7I?27(hEw$o+G+qBdID9GV(Im`%qHbPz1L>jlF)6 z41JL)6_;l|&^^qYfBAMPD&ylyBtsL>lMzIcuCNa}=$uI(+JY?%K8kYx z0vy&KPkN-5kTBxtp_}FrG72<&ChV+D=4aG20eVa+efv6tluH@IqdvoWg&nqEW~L+NMp9s zSsnctD~B8O`rHr=Kp@rpI=bS(kpg!?m~^fkwY0;WRQ026ZmofK&{B1b=1V|P_tU2M zmxvmKBsxZ@TlwOrtRQFvND|CEO z_H;3cdpu7Hvu89ZcNrjo81ZuX5lcZMyHlD9{kwU67qnZi~B1H){jE`OE-aMTCg-5A$bvYeyM)sQAM0zuo=H-bIFB z(JdB4MEU#DDPZnA07W0;-kq2*xtOck)1OM1Wj~0GNd#sw3jwx{Faca?wi+=vse@Qy zl!lcma^a}sc*P;s8eg;=eSzCt%N$bnHJTC-OSWv3v1OUubKJ`OachEc!Ojdssrr6Z zUUf%Re*x1+GB{tew8T$=k3ScL1g%9ZR{x-VOnpJtLkj}8k{mU!X~ zgIba;y_X@18)|)2_E-atBeIgz7AIb>KCpHTeCHXFmfVcf49$1mJ*c?+E&)S-N;rJ$ zmz5YC@@40z%D;CV%*9;H3|bIyU<$Hs5P*($^`n-GA1+~4wx+}$MSsUO`c;pnWx2PE$xfzcTa#6B32~s64Rm zvbAYRe*M9p4xfz6ok=KEa?6iPcfXw|L34qh@Zs=o7-QVIBuM{b3b`49VA_G!zyscG zMo_W^G;(4bcQZg!^cFXoJYf-+kNUUY~(St<63kzA(uhM;W>iKN#E{ z127XX?aG2z{JEP=VN|O__9+yr30~cpRiPT4^tF>%j{TDU66c+m6rQQu?y%oZvetZm z6y?=QJtLg;a9QT)KeY1nXqZfRW=4CL;(s$_JR|w8nDk3NlD~@UlaTF!Bp0dDE}9w| z8iIpq1FlWX98AGA{~7l`EzN)Pe-4-(R=mn=ar*mO>|Yqe{{HW=nWNt#*TZJf5?X7! zZqdwAHv=A%N<#F~CDwxq%3=rncSsw)_^vp(FWJE58+G9QNxV_utf40+k<|5Ci2I?u zUVa{ z=ufHa>-}#l7sbR1)?>!VNe!}GVfAjvGqKO7dOf@_aGq1-sCs@WU`gOY$^CLTFra|ib>ff5)KJvt z=jKBRmu7|HUSdM?#K3W%%1y$<%|Ba9&GY$w=CNHFb%XA;(R>EBHcds5e; zcW|N6jLX_+A;60!;RRA&xT`~ee3hGVJZ8R0-R+JYh*>?&mOAt;EgQG2ouy^F_8DM_ zFL0>N%uGW}&757O;=u*<5epdC{|g3oTn6?wQ_tAl4> zcqVwh4Y4(T>LO@42ojj_5t`GctPgsH`W2t*$h(pN%?Y^?An3QjB?Mlozn>N3){4=> z4sDui{;SXarj(gkaX$zPMl|&6w0W2+75z3Mia|Jb+m8A(VI1~n6JgmqUjQtd#f{$U zh=i>9DZ2wjf1yLUc1~-Pzx#@tP>_2fQ6gvLvtpf8BP!Psxon}Pl6wYtB^W1Q2mebZ z^!)I}Ocf_OKEGa=0-$lJC_mJF1`Mg~1*rJbI-Q7k=7}^gs3dqwE%Hz~{3M5a)ze}_ z-%CRPhod^GjQa~G8zmGVnE_!=j;tp9uz{T{OK?RC0ob6@(jSiS}l>=-NbZs$76kJqUZW98pWm?k)rW=v?68b zbVcuDVC8eC8Bz?fZ+}n4p;c=5K5$vzQll08#V5&})Pl?m}Dg>BVrNTUpnB6`n zenzJfkDtd9T|EGy;os47CQzIi=@oCT;mL=NT_cwsC686&z}mDj>N=}(67N7BDL8l! zG>+-4Y=BkA)Qj#{=JnT++W`YbEHKoSFiIzw(CtUB@cNlGYuH8=$AAq7&46?E%8Bz2 zGY#VtrR0)x?~fDVsBh*|#YVp}n5E%*wv)?vwU(J6&N}WUGn-9AQ4!w|%&)ZDR}hjQ zXi$6qlfoLQxXr!tD|Oym653{EKM)T^${=IP>u_2V6O{u#RWS%o@tc2+Ip2 zQPD5LTcSU5W*Y7Yf1=A&Bp`CG>p}E&EiYDDu*pOm{Qn znZvG;0Gg_pFTL1O>`}E5kSww6k8!&5Te4+Ng&dZ+q&OqH+YSPpaCyf~Pk-E#!(PQX zqU|{-E&AlKNUO@@ws->^D9P#00sbO@c`1SJn+HOJ#QopF@2M(Dwj(e1(2kq)X@{db zXd>mevM=7R*L_Jzx?<~f+)$WapqJ*pl|JZPONI6nMY3EC0jOJV3W%M*+3~>U<-C-c z=I3^WPGob@04LIe=}^B6iAB|dC{VxbtgftO?8T+V{KAEk-0Sb2_^3m#Y0_Vj-{ymX zS8qwka|D~~D4>D3*jFouxV}y|4|TQneDGC+snkm}576vp55?Anj@eyv4?waVqRj;F zmE7=Nu1f@tX8;A^_X@KeDWnw7IsmylT+|`GvqUqDi;C}6FTV5I;iw9};)x=|{=^NBuQ>F#h z5XoP`WutH>t6Yz`t`Z*%I__SE?xa6Y=Ar6k-4Lk^k-eU?@4l_eR8BO-++X~|D7DUU zUw%q4h&=1;kd6XAScGEe#ob^bgz_WjyQl`2Gh6E%9^~$x_A}9;`P%Mf(YuNK6B^Nx zk@WbASh-BOO!!{X&oKlrTdz$-qo2Qie@ch1_ZqVW|8+&K>^4&h3HLNAr5WYb;f^8S z?n9a5fvbZWxuG@xCH@|*@m^AU##4g-X#1>@t+Dq=lG|ySL+YVRbaVbiT#(kOPFWAw z?Ps2zPTI(zf9EqxjxCN^cYdf5hDpn5UaGduv^Tv23oeIyrWp?SGy4#eUECSc(@BLj!(0SPc`QIVGuS zOLdPbmpHtf&Mpv1#(ilYl8cDzgM4Efk}JZs!A9%=3%yZYwkgS^g(+NH@`;VB`Svy{F( zNB=#2rEEmNRkN`ua?#%+X=L})sAzS5@wNBo*!6%T-_`U{$vZp9YHQl+ty=SV9o4ZX zkFn10c6F6G2l!=u%O;2GA!0Vvqp&agD%(1JBK3R}3?r?Zsh#9MS& zmj#ZE4PUA1H9pq{t@Zfq3_b*~L*UVVz1fAcex&dIai@q}b%21W0to#V}fRN>P_GM>R`E?dB`)zih|mp!vS<~! zU+p`C3U2W$qe`QFtIouc_81+uw!;1~V)&ax3)4c3zHZsPW#$v(7C`UZUTpNG*iuYf zmo8TZ!NJw>l8#9BJGPB>$xbTP(f#)01#12L~CR-JCjz4ob#9GfKt1pWSboe4(z z=rWCMUKLor1rB?gRmD>Apz94fa&%q3#b1q3S%CxPAUJ5tNfBAt1qiRzrk>RCx2V`< zP+k_Ayp8rK;g}t)8uMq^Tz-jWQ1Up60pkadwx22tp!D)t2LEm!M8kUWe_jA-r)5GV zhDF!E0rUKT!NIp_WmN<;<&MR|q7My7X~Nk-^k>>+FlDogvF!p-zq-+jQva8|-Eo@j zg{7HE2A^x+ovr=qi6459chKloiWRK8x8k?JpJh=&%g0fLu%_NB6@nr(GQ%Sc;ots= zXHD^pE?Prh%{E3Aj&+Mv+wsray8m-Nov@**P!|dGbd_Ilp3&T?u6SasHTo@($?jAZ z8m!RfA${X;hw8y=$t*A6@cC~9mLi$@arb9VVO)#|S0XVWEM*ABEQs-K3P*$yV)shF zY#Ea0fs?#d)HV=;+9w5*FK2l=@fyv+S|gX1MRSD!QGc}Kzem}ocqen|pnfWjn#T4Z zE6uIp$56Ha!GSg;s)|ULDQI&<0u_6b6Xl!wM_S9KffFNwaH4_LI0)Gip}Yj0SK?0B zwZ;85*Yi$GDr@=5;(5RcP$RFo^dCkCtOS6?pV+`%4Y|SPl^WjR`fp&ZzcDOxqz=3J zd99^%b!JPrX`_d7IDH8`Xh=VFBKoCgfx_Zd;kP!y_Xp1u6YecQ>q}dr+OgGg|A>0C z&g>}P@oZQypqJn~4P0y-#qcI2)Vb_~_fPfFOO@g7E+>P6kQTY;M^JH7eY@}?@rN&s z3OTRS1Ny##6yrS4$B`!vaN@R>LA&hmmIJ71IYYbST}HdQz3_|Pjk*W1N&3xW`@~b3 zLf9DRbcY-SK3K-~Y7-aG*=0``kfyp-X0}sLM2yct7YhA4%_dF9ES>sg2!l2bhqe5mO_-2gHJ+=hDVUZ*?SSgN>1Kz}mnLf|R^=nD7MGqpddEF&hL;p3* zjY~AmM(zo$);bXVsg^ic;7KTN_3&J14wObDb(ih~b-_0@s`GL6x3fheIog2J)1$e?`v&+|^6> z_z?zVtK4X6J~4*s58xV4g~!Gc&oZ**w62&{LHIM_inh`RWDNFxLo6kbAmy*5O8SO0 z-S4BS5+evVuEwJGAFE_X$L2RxN~f^wDG6m_ zpD@%J^Kd|pYjL48OuDSZE`Gr=OdxTTZQ8ubm$~PXxLMnyN#SB_D5v7qq;Lv5uj9px zpi9ec#bGGSwo;>!dsVHCd)QLPHG3-PE(gap65?4TJ@P2xq7|e zk67blso8RjV{M*}flY^B?X4%B((xC>C~ z^s#clf0Ie|OF|wgfYD(n0R3^XJ*mif&VzoFl27$u&k0bu*OLKM{z41&cDco!OEh-Q zY@;m2=xfRC^hdCSI>4mD@yC|LGXtsf-(fIUkV{BD}GY@7<7i$AwEJ*_nBk$_;1ULv7N(fKLij&Pzt6Uuw!J7Q}Tzj9t6L|yy3Qtn}4wk?U~@z`uq_a+F>k51z0X3X*S zao|aTX3ethzd3!!8i_y7FjSGam^w$!`z?R(keotB5NDT1R;$2X4p*N5*1n*v85V#| zc=KPhe~U_v);EGNXKbNez@DfvSG<#^Jc{B<_d4t*;2WZTfos#yugr=O1P}D?nb8Cq zQk9#6?@Gb&GwrP}Qtd3DEDkG3#_jl+WK6$F%lOxpbHc;(jW?2OKN~as1_yS^O&J*^ zeCwrJY@6;Ljy(9ev?Z>P|66X$qKeI!u}{$|#>bfGqqJC*SEn_;_l^XP8_D|rKXH-1Vl`0joMTM&RUegdVS1`NzH_Q;5t z`Z7i`uq&00{B_%wZvSD|hkE~k#BKeg$ZF)P6Ycv`0jeLsOW`jdm@9uybvR~S+@8-c4>Xmq1=c4Yu58MW4OuXn%H1t|1JnBg+Ye$T@a&Eyd`Gyk<=8B zU$jttZsnSkuM~e^?H2gK7X`rUC)fYec+1H29nvDZ<|qbK8u_FpnW@bV=f!vN!uasF z)3^b>l8Ub$`sM&94!}uFP5t+HKMbH_2%}#Zdcbpvklbo2KC_$g#|{3ro9WTXRXU1# zEHh0}10FcTlywkup_A7iBfDoBrY|=!H7~*!60O*$9LYY=_+tNsQbStrEjEh_UkEk0 zML8{qENUr^a1P*>*ZKHSJKojxjq)B(COUq*g4tzv`3%3?3C>R1WMJA$&L$)U{Wr6H zrwP0FIDHU`tji%=II}tX@=rLLNI@SEv3&B__ZV3h}0f8d`3ENzR<+NGo0y%=yJIMs7A0k*C&{3l_Vf*HW1*8NJ zm+Fyzb;bhhggn~{9C86TNEnsv&+x_)SJ!)*hsl7z-_IaR7??Xg=`qn4Cj#;0h!;8V znJNFa5rX|1eKglUnQJ%mc zYO@^hWrS}c8?Y96vQNO9$$qhi+V8=w>=ppuR32#dz^vJx5C<(bD4fYfgF~V7b(q+& zkq@?^?CGpA4kr5hy?CWWwrL$N^|cPa7!G9H9UCk92jO;sJLQ-{#aMeE^T0yzR6u?B zR8xZPK>QWV6cJxck9Y@I34l`+#jwkAImh5gj4?$4Eo@5mj}i49NAu(awb* zXF#Ugy?=4}HMO}usL4Fl4_&b5o?xAB?lT%zbtIPr0e%M0{?AjLwXNkvoCGugJ3dtu z;fInMlQ>5%+n;rwbJ1ENgA9ae;twMiKkY zvDaBwg(OPpf^eHX?NyL%F^NQ=-pO8^J@MIv(VZn&`+8+y&uIqHou`4caHD7;C@`Z! zUGVMmaZO-Of@|!(g|Mj{6*0+yD5@96o%TU=hTya&StGEr5)EMQO6NExZyhUd3gOBL zWirhWqd8BLj6Q{POnY);nykdjC!B2gm3fa|wKyJEB42dage02WgHTF;t}wox(Tr@z zM%2kRoCnr1ToYXjT#+KSQUnoVI85lqU`WtNXObG$IqjmSXOy_nz~(}d90+!y^!Cy# za^JN+#cp8hSe#zgQE{(&+ob~^l z@B)hXaxHqpx2V93g8!147S7fh2=>rf&4gYVLS+K!BjXQW@XxI=L>3dIOK7X{Z;i5_ zjgn^6XKr(rmB@DkG;w~+cW%2anbq2rv#naW(O3W)M%JH&Gz~Tm7F_d6KeiT4 zN7&L$gzm9Xp2V;fdSCfa=zJn^4#9AZODG@R6Zv1{R&Du z{+spU>4@&MFgm#e4HrOt<02|ik)`?*)dxch9an$RlZKkh%X*Ih%fm%mB&v~s_TN*Z zad(*RY26@T~b;PRORzaQpqU9QK<4%vDP9IWcSDE<6EZ9f(TJJXcaXWeo)m`ek z1Frxfju2_vm?_hrG`{aK&T}1;5CRA8*7d-b^7j{q8@CqwY?Ve-3Nt+xsNUO_2Ob;; zos%N`Bn^3Lq{^(?4D()CE&pxt9rKL(mG<9X_=2)x@io{7d2%Qfs<=NY0gmt8o7S7C zEi_|P{5Gsh@oqEd0-D=ghYc@SBtMJHP zU(vXYea<`lGpB_l{|*QRBpID-w;O7yJ)I8(#`S0VmfDGPA-zdQq3s4*v69}8fq7y} z>z7K@$c&qirjpW;+d^x0Cg)90zS5Dghj~=M&XcdR&xAsD!-knQo=#a%VtG(HDX?EC zoVV2JZ7$WE*?F6ii6DG%*<}Tvz!UWocGlMhS8z9=yX0G#1uuX*S1q{$Ohyq9NRL&K;)i6`3(d z?LwzK#3Lano`#Eqn@YVj-E0))T$Sj28uK(cdg~;(IIr&Z!u96a*=&@$*j5Mufn*zt zS*^G*rE66{aB09jj)9&&bLKm{BKpxo+zYEd4^067P|0$Rl8ij{)&EOKOq&7@4^ z8|{0E$Hj^u)_%qB3Szhre!% z_iGnf8Bt8?=hO^sPqrD{a+e~pfuH9#bizaM&aCG!i}qHkIC~g8i8ZoHtbDNMj4Lx!;Ms8+z_o)3G-)HUV;G0sS8tp}y$+dnl)*|-5biXZWb+#H{;7iCz3#_p zZ@Pp2NfFi@Un1`J5eR3IffC%QO{ME*z#7@-k;Tt#dBea@Mo0=C7 zrP?KVIr=D}^`@jQ#%TO`0n8{E_!zJjcECvqOkq`%(wdaOez<~N2^=>~sgY37rfWzW zgv6owxa#7_sd)rT1s53sFdI6I!hsx^N@_IDOJfcDjqNB&qbS?N?t+^n6NfIrZCd}q z=i@Fdl;go_VoWooz~Z+L`c1j+{12iB$_S->rNxD>ALX;9CvaM;tH7R`JnslRpIgi3 zz;2(3IGL1o#u+o*eI(HN!-RMrZ5}$S;X`En;>I+kTm+Bt01~tF7liRk8fyqB$+Glw zR4c3k#mzLu55h~XDam3aP*3?|qSYtXHz;Fecu|rRAzTs8* z?xz&s#uR*3gD|0f0$`bhLN=XdtS(ZaX@EUC8CB`V3rrjq95Iqt0%|iTaD%|U%(kXTzDKb9^v!D*tTV-prI{42a9a1 zt@6+w&n`C@6ANyPQ-D$V>eNkMpG5i>0Jp3p5?lZB#$?`i%>oc!9I|T$@8=)fWUXTX zR6yKa?5xy2QK@#ef(1x%4|Wo+SP91SK|#5C+13#S{P^2lk5XgAdXzADaDF`RA*V`rfX@_dYT2yUnrz_M)4TcL z(i{2l-$DfJAcyDlo)RrYe3ss^e7jIJ?v-m@P~co$C0wmBR$`p0?3H zgW52aBhpXMJq7X;&|+}u@TYt6;XC!65FL2$)23j@jxWXP;(4N1wz z5D2Kcyx}Xaf}+TfjcfJDv*Vf3S-zKIuB3rc+ohMUs=`$Lb%~`5Eo!AIHe%}(D21Lu*1=X_r|jn~<) zAo&1FwBy*%EbJ9oF|fzwHo}R*z^96dgIWGTPhPa_MJuZ0H#M(Twv-y&YXame{xpXAKx1eSfekfVp>%713{k&No5F~01Z;q$6!f4fm0 zm-tSRSHmRp+4|}vq4qvXOqZf}~x~P$=M9KtGzP{U98d5H^ zrS%=PnnVrt$dCMh>Do~C=gpX7CE`xd;9afpVDs*7DLX1sVOr}mZe@_L6hJ?YyWNdF zYo)=mzUx`>R3M?iY^iW2vLQ@y?A1~jBN+j8e>)t?beAVq*KT7(pnbF4d zNBhp+JvfLyn&+46=Gx~Y(J-+a0rO%{q7o++*_E$7>v#+4li5Wql7Bxe);}3H9c3>U z7Re$d@q*%_oy;gkq3iIK+rWv*r^_NGV_WyWX$oOI0jJ+!7|vy9B~KXTsN536b~S^7 zn1>VNLF@Hy&E4#53eiX1NvroYRhS|^P`m!FXOlwWdfSd4e&F7Sh;cbbS;`mq*UlI9 zu+B=qsTA2?8e)TTEe)X`%wW)LZt{FAb5T_J*B?t*B&)blz9PxjT7U}umSsMJhx?a+ z=V#c8qxBI%!@ODO$C4BdCT6QjMF4i@vsC{&^;+KraFx2p1*j}o?u|0sF4*I3IE9Zi zFN%8yMruUFfqh$ZK^Kkq*tC6}QFpSH^N(U!092`suvKzL)(jK6)28ygb?8k&3RxUNP7ra_j0Tb8& z#7A}RnjH7vvAyE95-EmK->w=j8pUYVtL13ez$={ z>(xfm{Fyz^d>nAkjH_~0AoD=3{diA+0W~4-I0FC&d-ZOh$g_I9M*Kt7p;Fd|eP6oR zcPq2C76~=PJ8bw`U;H!Y(L_k^rQI8Gh?VE73%!inL)nfdKENOmHah&PsZDL<3O$_{ zFS0l!{pCNTV;7bg6Kh9+@#lq=OZY}M1bNJ-``#yB2Pvrdt_eW*8AflkKt!XUjQm%N znW6mbN)`0T07NIn3=B3XYp7i@`ClsI^2uMi{SD zzOK@u7K%MOFUlY`S_{04tu0V?cG2(4Xek0yLTif-E%RCzf|g2uNEf&rJ|qsfSr2u% zZOzrWopc}a=M!1%36(3g8V+KyTM;R4rmCGv)IB@bJT%c^yJktiuLr3Ku zQ(jwi<~&0Rw;S90EnVk}!|^H!uy0`kq1V`DtXr4+UC*F6Bc`pu zues!nkRy`Hssc}?rpsNn$r+AWa@azHb*6=XI3urJi&~W^vxyE?7?pNZ4Hy_B1GFAK zfPJ3Y46WDJB(H{q1t~_C6Fju%&%nA**ytPpA+DTg_8%!``gUy;P z(wI9CxSJD}0ck)+o3B1lgVrEGA8QvKo7o^d`T%T!h2P&n_QzbU9qn_730})Yl;LsZuRX;cJTSvvvOESk}bA@|yNel5@0$$EbFBdWiy{3$vatonp6t z$jbsjL#$C8yr>86^Q9Rhqh)yX3v1*Ujn5X8`U(X%H?wBQsAwfw_SiH@HpSlP=DdyD zC?)^$X_x71$qEHHH(k{?KArd(Q;z{vC0#nd)Qq5Pr`W zPcLGH1Kc%>N3}}kzb-OH6&k!GzOjG@mZ)$1J+f`~13i+2XaB=2XZ)kwRq<0~dE=ya4`^JatCm{OV)IffD9-mmwh> zV3wlUr0g>*2-}dMd%LxgT%d2scn|%MNMshJ-mw?Z9KwO7g05*Sc}l)GENe7H|B4X*r)Is1q_b)T?0Q^g4*Ca zTA($isk0HcS$V&^LZN|72(W7Vz%ki(bd$UzC=uz+D4zgcH^KAOx&4pLsxj2-^Y?f5 zgPjkOLLaD6(KP@H02#y@qX7Y%Y{sdN`(uxGfA+v&hA;jnJ*2i}_01+veG-SxmfWik zhn~jg;=euD+Okg*3cF|BZ9ntMseU_4tA1Z^btMr3Q(tX;%1X-WCSl z7K*r!S6s~e9KBJz?E&SN$GFVwmz@NVv^C!Z6mbPU9GysJ84qdMl&b^B>*e#YG!HCQ z6^XqiA#^I>Z5QtKILZ?_$+djEKyT5rMJtY*;{M419lJk2d%6uurT?3Z1^C6eGqRp? zNb`pRy>Co4g%MYlASkZ*L;R|8h1f-;LR;?#T0s8>k2YZSyt43d{(JwM3VxtvDPYi* zPsDW~)0f?T^?F-qGCk{J%Yyh@%iKnvgQY$yEWIdy>&m2**E0Ec>$N?XqKdDDkv2O5 zrKGQP2L5asKJpBR62*dH>#ApL!6;xMgrQ$i?~M{K4Snp>XrAl*_|!i$Y9x~1{T$Md zf*qTjoM9h37KfoJ@|G8BJs?D7d8xO@dWm66>O1tkkA21WuPwT*5ojI#`wvZq^RA0A z2bZ;X&;CT2Rt3lWaEOf`W`7~_!)k-Ew(H&M-((6>GQs?6H3ur~cbfka1Rd<;N zjMvZ7Y{>?{v&{sx0B{@;hc1TfW+N(f$mexeyM@A6$d#)dO8mb9$mep=GXs^ICBSNm zbeJUOO~~FnPhoGW_*9&S(PB34zG#f7hJ353&tE8Vx%*R)kOc!+Lc=IEuyrMazvJB! zCx$Fvl zw&lhhzjS^8-%ml=84txA^c*KaGN(jKG3;68Fx!0;4!}Ko z$p1xcjg+0y}bWj>xpdN}}h0L$Uxt{7A_a&=?K^$N*p@4Rq8H6w&ac z32ZD??EN(6Wy-5X0Uda&00f$g8Kzpc^8*z?+y5?SBmY(3{aq#;=8(+MA%X`X6BEf7 z{V_Z@_yT1AG86RbolfNQ@q;=Mlc8Y%&3sTgZk6&c2jIjFqXeLk<}V`|(VMsDgxW@R znR1#8!nsQB6P!*xA3HCrN+hWh;)C3Wgu^%Al#eSWL(XkN2g>2-u&**qw5=~L3i;&g z3$Wzt3uO;XsV^_{+)dA7DI4Sk-CO52`zlrwW;`o?Yd9}u@1kzePrt{1yTdpYv?#Q| zn!wDfFf+vs6CiCzQ?yX;bMF0`;tPD|;3>Z3|0Um^iwA}H$Fg2ONAr_A9eL)U+DxB1 z>~%;=CctimCqn{6R3`i8+5O=t`c!XQegvk#7dPiy;nM$~7XZqT9BE>AW0{ElY7bh9 z$cbM<8}{d9Foe*t4Pp9@ewk0Q$O_p^QTi|)g{rWSs|&jjrKu!$swbRb1!?~uS#KE? z)#FEdpBZ52P^4Q*8tD!Z0YMPy9z?pkVE{!5>28qjkgfrg?k)+5k?tDm&hP)M=f1hG z&Z}9o)~s3QTYGm8ZB{?FdOz?=Pe*ha~ul~bwd0V zJ^*S9Tph&&Fs}{ZH7_VV_}It0I5%JFWabUaLNPdX0XrFMOr7}-<^fO7La7q3pH;C_ zWO`C_Uje5BOO~HqE9AJ5roHl*&6*Su)Af4^grGHT3~5vse3B4d3lZ<+g|YE%U|w3z zGhgXNkKw)lFqT?l0IUHll4~P&oBUIOyy0k(+^!@t(jc-g%qpWMy;kxs@(7W$En96z|Hz(yX)Jqxj zfIc4iGzbB>hmtPjk`@>cF-zkgY8s!m`+i79MHST!oCj_Pu!L&M7=yN^k^)#TtKra3 z#ioywhv;Dp@<$KvI`gYYQA`O|pZzUnGeMcs7K!g3Ja6XMRNfjPLfSY$95*C@O*O16 z6OQXKN*qMf`^D&GWUj8L*ZbQKin8K13UoMVDFd*V8p?^U-rGdDbb8j?8ZlnP{qx__ zV_=tTu^}IS>P7m8DZtS*k`!*DKrGSshR&20gw8U8Rw^=`9q17wH*d_%8<{tr(wjpa z{HN$n+h(b{*H%thX0t<2*!HG%k%^RUuaS~h`l?anr@-i4C*9#vi+asw2#>*81T$To z4}7Ey;=m4gz2A#mHS+aCj!Ap@wb+k^hQ5{dI))3Ju|dwP0yP`2()+0!kDWRrkqy`KB~ zdqIp=%x69r(oeGk%YNBnlak4}<<5WR{LZy@Vv8jI zR?9bC#_dhI(j9{vcRmQqaKkIpKQc=1J$fHUOjV7wcnu_@a38}1tH+VT1&_$<9p#SE z36UI)2Ris9>*EMv3HKNKi5sMOlcTM@8jSFN?U(%a}^HVG%MIE{#-wYOooBVb@ zp|IrsP_cQHPHCgmS}C4zc{HC4%{&j62cx7|L~U;feshBUfU!7%l%H0D*jCix%U+dK zmCC+OMIz_>RJb}?{&aP6m4`u=9RbbbjL%kun>yMyI4#Mq($wBlo7ZoULSRNA_+(Lp z>$(J}6Z@SAkMl$9qjsQzf&$)(N1mx3CP3xH1E7+2or}EDo949|%pm-4bzTz76TB9t zQq?l^j8%InQ6IZE=WPlK2JQcvER6nZ_}f`&SF(($bol(?iEgydanYCAVg|LR>HkFT zrI;Ycm$=-jAv(tCi-gp1k;NG$i)3r5msAsBcn|ugwEp<;^q8Fy(HHn3JRLZo@$r-n1H0BBe0qUNWb(FgA zDaBkiNROY%;KUp(kUXi4c0mWPC?B%pk z+9oOdZ(;7;h2M9y)79SY{qCM0Zx^yJK4(X8f`pz(sFP>K7KyDI{99<{?A5F_bg5nh z#l12GJ(M2kQCyiCcH^NDO|5KF&CE?}<;s-ZlH|t<4aNt1N`~6 zgB^kJ-!-NR9&!T!x$M}e$>D>+)KFFhde)Lmf6J+jXZ8iasno!LQOF_fl&p~TJYffs z&QCYKn{A`A1g}lDeKGStkpz{}@bOXr+FAigs-Toagq1@5=0Xl{$jDs*7X?Z_83lxW z)cnDrIV~)VN09Fx$Fv{gZ^PW@SO)VLnCq9~6jzir3Qk`j@;uZLfe- zx9_`a1@((p&LaZ&TgmLe@V_hFN{VYfYROtgcY zPWoo7K55|ecAI@SbRmP!`_;JTOTIsp0b^c>ajb=71B?hFCqXm7OVc${hv7kT*4g{} z`c)+BSjU4WT<)5+ew~|BW)xn~A&G+oRtR>Wltfy6vJP|I0IJ^a}KUIJ_uD%u(F1c}3 z7dtG~+a|kC8~fLP&>dbh{tuLb%7Jag23!jot~##TcQAOoy543wtep9w@?GDJ`OMZ) z1{l2XJW%IS{4>ybx5bl=G2G(9&^h@5SdhOme-K+9;kxv_uURD1qRMJtJ&E$nPj25g zcOCT$cpN>~K?F!0=q!F0L^s*0sKGl+w(4*+%L+Hy4Xg^#NcuFhH~ojlcHY2FMQy1= z_ya2#?eEyjf;RXgw#2J{O%EVjF$K(9DLb`w(1?7&}35m6? z=v$K=>ng9eEkphRmIKLpsJcp*eTh6jPz&@N*wSVFQdMPq`+IsySZ515z-rKi#w~eO zdmfD0&iftjJNHfbnE!pC|2>~yQ0BhOjJ4|myk$Te@>4}stB$Ky(}LIG#fwhNelvv( z*?5uBnW3`q4UE0Y@>OS+S@GH-5a`>RTwJSoqtgNge~bBikI+{_vWo<3A)Q+HO(F`s z#TUY8uRU_LQU|!*5;zdF_?C?K%!7UA!*iQ;x+U>>p@&OC3728mX#ype4W3~oWOx}- z#-gY*jCz_Ps4Mk;Db_VdJQEEl+HWVKk z3PB(oOcn+7&kWh6AbN|*eCn*~mN#x$vh8m)S#*0L9L2o+i)G{T9XrFCp4PU2lXcJ2 zXKSX$mUE~7!aRho!-&)Mu~?nw!vL7sQ2l-=uHMC|f8_kXvXweYm9A?zMOD(|{jS6L zQy4R#o2t1b!6NKcjR_(ZEjpW=oEgC2NbgIDlt>0RVyWgg;3|Dhd}j4FyC0h@-Y{%1 zyF}_gZ2@|8Yy3v4v#Y58z4YqO8kW$-=nCZ{q(g?|>=`n;J;?J<~K36rz_k?-Z` zz8n@unv@{{&A2=5`b^RkS#c%tHq$~?CNH^_8^RX) zF;D??#8A--jJ4opt1_17yO_SOxlJ4W5371X%UsiML@q7}L)laxUW$H(U|2*IKSnDV zC%aM@f-{Ch#VO8`s6DbAD~EF=U06$RX168? zQvxfiJFbo3@!4{dGNcweri}0|B`|;Rbl(b7!%5QU>)6kPvR}hJ-T&}!Dc9W4m0!om z=$3(Z`Z*UmGU*s;YbKK{968iX`D*p~f4ujAZ);U&CZflbBJdi_W9cNBwm`jdsta%7;1TM5 ziysM=hqT~e#- zM@51k98Ru5AT?mN#F*{;4|0l(383l8bTMC>ywDR)Pr7QbUg)?b>}82Q;Un3s8tQAi z`!4%h%`HO<9rZ4>=vL6`0jgzenD%I}h;wqfeD>RCfh5?Q;03z_R3R&6-j5zrb6wj0 zq}ssqF9JzA!aLhG2KMlK403ZCBE>MEjmzlJh1c#iIBs%z5&)Gc>8tIV^miDZHVF2{+1-2h0h= z8arN&r!CFkP|XK&kiDh=Q;)hnlLa?CRy);u%&gIK@yoJgR-g^4^9Z%IV#j_pjK1aQ zrTG%k+woME?40k?GV7bnj~6b+OMD9d_`78LlfkMCj;5Y6bH>w1vbpk9^(=_aJ6{{ofkCyyE6oW`rM^rk!&}>$z8=|2 zNk>t&^Uqa14<)%XPZWw!KC$0btE1A7cW$UVZ7aL1VLsfO7XdYw#Ir?Hm(*_wea0nG(`MrSuZRp=5+JKH7=W2v$0X-RjNV=R ze+8Nh9)01+6X=Ot7MJea+@l#m^O_BeJ4Jv+D)olWR9*k`Bn3V;=@iig>_l#J8#j_o z?wYhdD686fa@s3&M+WwmslMa6Ep_|H5xQ?9_YExqgwyQ+c(Y0F{xX69k2Ium_t5zWDM2 za2mj;4A?q$p_~Rkn`L!^z6z1tY_yUqXv-iJSNs zm9&_0tpQXlYv6zQP!UI9lIG(eKg+8{zEg)ZRK8H8UwgkCmAR1Oj}HmU?frA{ff4`| zmIP}O|0xYH!U*uXg=DC=#tV32%1C2iyY2x7_@e|{w)O^d>H4!3N*dscLIDfb^Lr-m zaSZl%@#!?xE%QwKyqBu(ueZBB;QC;KU)45qJD@~-SaHB`Y~JbwLkY>n_n^@oH^rd5 z5R$>FJv4M6;~R+$q_7u%asuWIxwGqh?pcw`v>3M*E}_tDixvZIUsj}$m^-rQn_x}( zVOM6F@kEArBktuVC;l;0tsiSLVcmE?{=DrR_DFx|^EZe89N}BV0TGQ032|AJ9g>mY z1xLo1R?+nZZD7n>BzZV+3>L%?t#%$w+o??R<~eqc3;ZCLdGnaTND}5=!+I^w{5eUq zpubfIA99%)po#Iurt1SUYgJImN0@?*mR1=I-+Xgnk zpuE(cW$U!_ZFy*+*-7qf;x%&L(oR$p{e=Ozc3k4Ms19aDvSA)4WK{Fll57IBjD*$s zoAKn+)w_Lfm_Y5GMf1OJ`Qcfzq|t#?+8`6R&;4QIl~Tgo@OYo>31y=ZSB=J$!Qlhg zl0CS?2Pu?aDYf+=|8aRu{E9M%=Vm|iDy8FaomQ4~Mxvv4D_Hix=^!Gez{9F)YmjFIoY`zg=xz65+2K|M#MtN@) zq&iV>(2kXBqdFkzwEay^cx%j$nHMHm|0FeCiBbYWmZaFn@x-V8ETy*y6`O!`eKNdl zc;S>#Ds$X|M8L5QA0fU3doT3DpnPUmMu6^Vrvj1A%pKXDdLZkJ|6npfQ}I%dHCi4F z!hr_#7FtV@5J^`AU{lTeW3mtlC1B5qdSN)Ab?9rc)Hx0zg{z)067dLqeeJU{W~*UG zF3b5^vy?hMq{rH7=~AHD-d4ZN-9CETlcNd{(8vb+$vx=FDuSf|SO}N^iyHbf04vp| zU12A8+Ob%6vpGi0m3g{C%doq)zf|jed6>;=-}T6SY30-~C``0;{ziXGp^S&EgqHZ$sDmBU{u8H_;?Q3M z3@2Q4OFuWc^0z6k*=;a+M9%Fn5PJ7#WHfG$eA&SkzfQBQ)T^2ONDwTK6b?KS=I|I@ zV13naBTMM>KBQa_ezsGTT?t5*_z&0RI-HfL`Lp|tSbGzAR{XxRQb79N*CWKq04Mn_Nt2sFoyi^)rc_k^zWXv_Dt`3#qgbxS-pHMqt6L54XsRcH)oWHz>Dx?I$PPch^NNCC@^PgIh?xRuLZ@^MnY`+e+X+*Z0a zL4wk!qfNuK(P`bfu}$bJRIppEYu5@Et5BArMCI0(I^MkV2ss{rj#L@h&=1Wb41RE;;MDl|MU8#J}D@<;S;v3t-K6y*{wSdm*k3cB|ZmZ0G}cOo}La z<@}N-l}mO3Q$oeA)v&6ZxFmnf`^McWP)CNPIRIVtg?JtG7w8Wd*%&g5SKt5{G`SvD z=7msa2=i;|(U9e|7R?Di71jsS&W&O$>a z|8T%WQ7yrRTDzjUMt)=j3%d<*!12@X@}>tPp7S38UYV$x>Kg!hkJ49L+QJW;?fK}} zq};sv41mP-&*#!6Dt*yAO~VlGH8gsoT-@XU2tRQW&gi@6SL})&xV8KoZ_KH5vO{ac zIX#a0i(w?7roUcA_%6_h{U0U*Fv&54g`*->4xlho7 z@6xAxPV;r&nti;-AE@EYzoeaTV3!4^6?Lh4Im{rTzO*Kn(61vW;~VCEFV;LFkzhzl z?opIMFkg&4Va$t9ifEd606myLuX4k$_uK{3j=Z&y10$YK<_RzionUyh1ITYIPeDE9 zKUU3bmobm`Cb=0F7AM{rzoDK&AANm{^C2@R6*4g;Wx^=j>(> z0^C%sQu`aV%EkI_(?!H~DV`_EGrRk}#$)4^%b zBl~kS_z(0*-z%EMG5@R%^fPj*CufZNdqU*%4^$0UM45YWS6-+Yy=)M(+z{@cX=}a# z^8DRkzo%+cv8&8Jm7@u+Y<9D4xr^g+$)|mG{Tybpb+kiDUv^28DAxZCwkaUW^oFk z?PMU4_^(dK;5PHQY<1x^T3z66jTey#D+X7zc2TwOx+0VBt<;ft;5sPqgN30r1V{F( z%f{u4##725xNSmfyN>-Vqiq>F8O3yz2fpYn@C`5UWu-rzp9%QnpOm@eUz-6lPQrvm zvU)l*!mCz_Av7rZj^BjC;kUK?VUy{a{mE}kT_h$+VzX?kN4@wjKeZWDBk!}9#81CZ zso~?1`5rO`XWPsVz4=*Lu7HKY7#I%A)t`wMv$K2(I&0&-=4%;68#ehOwo}#j(tIo^ zieFzGRI6_ziix3X&432KxFWLc<+TIxA@q1mrkW2@<$kx&{i$cAX<84RE_qq=W(qw_ z@&DHXh$e`e8?+Nm`I5t7a4t%+3Y=v$pc4G+^*%*TRE`P)QwSK&t+*i~wO#Pde+5d!fsn7ktj>J58N-1i&o zkealAGi;3UmDUdn|+GD^MdzF z@^+_?62jAVMeZeLDiRxsF{a$dhPi{?S3p`L4J9?}y!-VRYqWX+|8%coq)l<)LhI3b z7N-~>vK}$Hs9x`HHlp3&a=5GGEvX~ryF2M)J+ghVR@(3R zLr@m|pD;vLV5&p-Qi{j_dT1(jRmyfCS#qa=wUF@f4h_5T@4Fh*PWfyO`3?inNDF%C zIzJH(eC~g9Z<0v(@n7LT(lg_;ruZmj$Hzs3sm#YU)rcf(luA0#>r}e%$p7#*!r1eY zfa;S@hs!~7e&tMYe$6Iv#W5&3J5b)|0nH>O6f-p0Z`<;5gMZ4^_^P}x>OURVe+OcH z)U&d5h9-KQ@_GaVM&`v-x4{ttSX5IkWBG*Yz3UGhjA%L<+?$FzV7gvBtla69 z)~jBySD?ketQ@#IKynl%%4+$}JY-?>r>g}y^i-TYKVO(`FVwuw6^Mdm}X}6P0lyq=(l6XtadY;iadR6a2v2TmZc~om0L;BTqj8~m|bxE?I zpToc2S)y|QB2wykS27Y~O*`k^`T9Lt}Yn1gh!*xYbyuo?FCqW!grKR$UQ|EQ@UA+YfYAK@5l zVdv&O*`%rKXRy_dhPU}98XA{@HZG&Di$+ieF>G>l$~r!Uq;u>_0XfymM&_D^%9{cqNjqdI9j@(tMYS_pkRvhzNn}BA^%@h*no}dzv%h*j#aUsXI72v=kA@`yI_uvq>@svfs$ToyU zm9tTA{vt?I2=S7;>p29*X6G@e@fqB}_M*{sk^2ib5|S|jfa6S9CpX1r$iJ)Ub=YE0kQDp?kCyKabDSvGmHGm z(u7rUI!)wZR%go(6y@QE&rGx_9&DHp*@ZkiEk8cG;YSgac1E0hP4I2_t%XXDDn^5G zov37jSRwrofI7Gb#HSyy5X%Rk0Ks3r5+0MZOTv^XX@%N*Q+CNIHnCF@0BjY!!(nyR z#k^t}C-v2*`(4Xw_bX4QF@Vqwntk1En@Tl8^3S(qekzj8ZkIdAP%&B!mpWMT4_AY4 zV+yDJN2$>|lGY8NjifrcLq+LW)WG_5fll;_XIYdly3_@# zP5{~cbXj^s+<}obPv`&MuG@HLA}(ZWzIYF7_I>z+fJEna7J*4J#vy*Vx^(_+=vrQe z<4NeaC0r0ph{T+m{bEc@BAFk=TfPdUN7kD**sx$e5N~dpjhg&9I!EXO>dQI)tH2)h zhVx8I;{c57i&~V-_a^}K^gNA0MK4xQ2o?09O+FGki{8L(UvleS8ilNPfH0JROhP#L zYrH;2uPIH_^p~}FN?(`+`cq0e(O=kP?P!fhk2#h+4uc`rGc9e2ohr1WkGNDR?qYc6 z^8@;Dkm6D|hWKnl4yoWqhmK%M9S2KbmKyW#3iBnJu_qnr-lS${GzQzuV@bwn#fGX8 ze`k3`Z0m`$1Y-!ZNdbwOg8JG+1+#p6pT=2n3ZU>##l6`ik-Cq$>8Hwz!xlF!st-6r z?yn$X@W2*ZbYGPlSM&ua6BasUBRCwarM3FU79Bivqv~j}imkPqa9UiS<>hCiE|Y|t z5c1XVDx+*Z=0jiIZfStMW@qrFUn-&wBh50lDmn0&9cXK#>=dQB&{-9qdMhV<>G3{D zJ<$7a<`m*#&H2n2nL6fISl~b8QRVxG17roBUO^prl@XbqE8%1}M-PWblLjlNNhH#a zqU*VGUnl)h>t?r|f@C4XjsK__lN`n;;i7Z?`(||F3ePE_8Pxr}CS;4;O>4pDmyMl& z_Sct|w}p20Odfq&_Kc2N{`vb~)@8qYd^mc{8kIO37QGG4&YQo!QJcZbSsIqw+K)KE z1^glmmY*BB3@~Fm`CVRKCHqOYZmY6i9A{?9EVzWg^e+LNdxyD$J!+J zT-{fX3+zQ4jC_K&&zAliGGRb%GK&%KHTxY1fa9+LmqF+nCx!F>O}V!Bh3GKluo&!z zSyokHB9-^QYN{*dM3|xLC}Z%9X+#}rww0E=9ogBVyV;H_0~U7249^GLA`FxVw#gR* zynAnUz7YB=ss6728Qgx%#iLmC@MkJb%!&kXC{0USK}r7|T1vG&B|>f20;vJ&xMT{M zS{_0GjXohG&xLeI*4Jri<>yv8?i6fOu)nhiz18XO=8qsFdKO;b-FwSTDmE9d7=1p# zE?WtJGqYx8@}PV=3lrr3=EdYtmkzUK*%#^IG`%&#snzyti1Ft9Pq1yqB|yxt$zX@Xz} z#bC{WjwT;qe%Mf#WZjNfkKAgwR2V_+ponxm22)bSX@e1(+$!MQI%M3$kGuXkr0c~xSAGJK2G#|K`uou{zp=0!PB+oj*I016P1@VPc|&DwDaLF zKCv4xsY`~rRp>>;-D@tX6>r1}k44Tqo!nl6V3L=mQOEysOuy_$)uTz!SJr~AeV~w6 z8=Vv|!BR>)|BguN2SJZLZe7ww#F|!96)+b72*^d5GM!M#(@OOqi!z0h%*}~<} zv>+6&kuE_tNKPL#I*{Dm{*C6BVE+I$G|rjOapDaHY+@cm<#}>~J*AoE+S?fO`e-B} zJ=u||ZI6>({GEMzN=F5&ql!JNgCajw%hbQL1_Ac1V5Okx_|m}57e&8^AricLgz474 zq;Qw?QQOdB*(x!Fu#|WRA$8AuREs-(Owiwc+9htFFUlv=?}I3iu{4cp8 zTWZ9)3vN59u#HJT7yD4B4qF#{2ciZ8vqRo(Kh%guU*^`x!++;L=+jc>fXTmhC_dKG zY`?6m^c9v5&bLY&*k$Sen5k?4iXJISp`6*5t$=MeW1 zmU+llJsRkP-VH-A6<#ZnhZK(Iupck=?!*|8`r#&4rO?F93eT;WKn=B@UqMPhP!*Wn5`|0T$f*P}yT>F9m*u&vb$P7R>ooSqEV2v&liF7L74rdmQ!`_chvRYWH0WB_{Wey({*FGA@!Pd=I=df%avtvi zlfEM30%+5I?T}xmxW~TL{1wW6n>Ex#MDF|k>>7>s80I!1Po@p~CAEu*{dg75C{e-D=19N(9m zTzOrdOi-7JB7Wi9PT>kRQU~5aB)@*A0^Zz5SjJi$dKVzqT*m4QA-v^-(V6$TZg&HH zJXeznZm9LGMBoJtF5twra9GhgwUd3Piag@Eqc`$i*BS!jM;FHri(S(aHu)mq(W}1m z8kGXTU8PQ&xIdC6+vwCjrsw)z+DENriH@6gvu-n|aTnrDNIApK!`Jm`agv#?;w9odv$ovl?*}{d$fIS z<)dcNf+#C?cYh0}fQBjF(-3RqJ<4>ZvWi4PA1^N|1R0y2qZTx41fOz2h5p_d^(Vrk zRadjwBRM_aA;89KJOr=Qi_^hTtm`uq>L6x1qe=zwA@86s%{U<=R)C1ON(A2KD}cAX zi}Rffr(_u&QJL-7MeF2w^(lU-e=9S{D7MYQfCPY`PDPAp2Y0oxWEE-IlhWOEq=psojvlTPVHb?Qs-6;_*NNE6C zCgWdiE_oD=&6y+bXtAkSp(s_F_qAU&(~JhMb)w|wV;;xUD1RU`%kkm#h~O3sa_vg1 z+nYlZ*0LkDS{)K%n&gQQQ=ufIrX1|af(EaSbP}NY zEKrP#g#oBExYXk|dtxX$Tl0T(#|JZ0huKesx@J4Xgb4fX06Q@i!`*0I4;W}e4NuvO z=c~E;HA{l$>8k zW`YdN8SXj;f|XESg?tFV)0;z~zg%UWj`x&qu)SO<0zD^JhLoS)G?DE>0P>s)SWM*- z)Sk@ZMRu&Ohw&#WnP9AscWDcUe`n`SA^m9Wo{}p3hv-1`S)9a0f=oj{4=pTsFQ_9v z#6RO|7PE!*^T!a-?zLTv{%Th-a}Fll-RhIbXgY{+d&ohwRZJqf@7^673-ZlAvowF* zpV;~qzjdl!$0*x@h{FppO7pRLkQS@^PphZSVyCa?t_JNcj(;ydEJyEH%zt=xEt0^Lj)*6Wq!UCKI2l6a)?*RQ~8^=J7%)OunNWd9X7n zjN^hzd$NCzp>K_f1Oaj?V%PE|<|_GR=d(p7>f)~FmZuSIaM}f%A6R0zMHT=Y5^TPN z&BM@Z3HIQc5JitJj=m_SY1x`)S#^}p@hM{3St@ulUWxpzs%16AYg#YLP-XV~n8_}B zZ^1}n_m}q6?e8XzCF-y1<*Plj-6~^%Q)Q9N%twh73bxSwrtRRaiFJWsuIT`hppN{l zVyve9@l}Eg2E^!v2$b^p9r?3M^8QPyVgLDiDiWaga-P=webSW$U`q?TNjJ|)O{Qt< zx@NW-+<6*|A9>F2?LE-Bx60^}L+SKw$J!8WYZ@DwwaSEC7tKTSjn{+wRyI8!QT*7c zCaZ76>mL(1>t|2O3sm=}$y(Ml>tn57U zLz=t;&T8Ay(W?nPK50wa+*MBV8!=SH*kKA}xens1ApkfpO#RP$69CA121qahO&Dz3 zJMENYXr&&#)!QoMONWR^+0~pPQq7ROz%406K++tQ_o3;5y{Oe#`bnf|dShknv7L_$60J$MtkVGt8M6rb%Rc{s>=s+HDVk%Ybm-S~G zZ^Zjj@4BqlxidjdhXSr#Qjw167<)M3nT#@()LrhUvbzi4Ke7x!5iYVFI{pOfuzET~ z-+|20_Cqkl={vh7=5@*FCH=>ISI(?5R>mBny?!xH20mT|j)_Wp2mI&naYH3J<(E~q zup8yxm|b2rG`4fa;(B-B@7M+SqCU2f&1hxc%Qs&A=@yAha#PbW_AZG2FZLnhKY`T9 zP)h@k%@ef7`J;<3HWsV3gzid*Ciz{Kbe>QbCdrd!p29jpJ7xdZ-5PXf3){PdC$yCf!y|YTCiHAP7jGg%YNloe2m9=qb{+{B))3N28c3E&-U+6ZIbGtNaD}&y-v5#R-C6FiTuvA_~p|u7}(fypszeMqB1@Y8}H5 zs#Z__o+l)8QRDqYO`=7s52@#sI-v)~yO+C!kE`eZEG1n^zS7LxOWz@*3kll~8HZYW zmpUIB&S47o>Kc8hD)2v5@_*`NotfRJJ}3OUXf#-vyAi?*cJsy6;cdh^Re-nNVx>+Q zET`k*W5$lD#jB`P=X~qj(sje`u;7g-Ln}Zf7RvMP`B4hZcL>Z`gyLBx8m#W@r%3m( z3e6`b?po$&z`-dX>rh465|~TiA=Q!^Ig3oC&_0Zm%ale&2k~ z5k4*qMD~=92RogAAjtUsan1bMbF`k5abg3JZ$p;ii|yQFFinV*a%OM9`m~rI0W<@; zPbK;R?Ro^L4%Z+A$olLz&mzv^&_o7%C4bpU0Vzkp3>4;Iw=7NZ@Z}_=CqaJZ+~Hed zv^b~4AMJUDF-C)X3lm%xiJRFWAkMFKcd7PqpM}0{`ZX?sb+8b*%$l~MVg8wTKl?^8 z?hIC52q%y{v|%)&G3L4Z;y;4b25cjjkMrS-6`}c4Ep2Zq(kGiw zCZox4RANxK%Q2e*zWKO?G5nGXU_X7t$pak=q1F-E)odUsMcgnmN*3*+jDyG;aLxo7 zIitlBXX-6MUgG<1BR!Lr${Le|E)dNVXi$O+hk2i>(qXg9+)}>V*Z9ft#Y+KR2B^!Y zP8exWe1^g8)9-|4#FrK_kwDBcyQ&@7L*^N<(m%xE1VG5)%owu9y0`BM1|u`L?R)OQ z9>h;K}5nM_ThVN%}4Lz5|pd43s&e4IUKexA~A_BBX z01py*$!2#6hrv#AF$Xzg_ro}_AEZ3#Hg@6xc@t7NcyZJp^iMp47b!olViG&nEZ&t5 z0cbO*pP_a$Ypbp~7=fZ+Z(re0SMb3`z)=!yQk@T=R!V^7iU-61iMKuixHpVXMRH31 z3e~(ZRZUD+)ell>h*8>m%sr()O9k&16Ue zP2#WCG3zkT3*Xddi_Hr3)m^w^Z3UBwzWRIB!Lwml?il%**~d1a|^-~-jda( zX%GQ;Pp8k%Jc4X#2Br7!I?vU>&!CT!fkp`Q{?|t2cvoH#Czjnm5#xY->Y1JSt_vGV zoeT_0(ftt(xp$aZHYVsFSSvt#WdVXSNbmJVqRwl)BsBy2YSvBPZL0a+s6Q_*&YJrs zI#qOgq+#I6L0&-2=ChEODpVh#s-@lm#r?K)ZmRJy^YfQY+6s@q+|U}6=*GoG^JajT zt)KB9{Aypdgv^yz^gTwTY7Tt}1KQoIhxEcP7gq0l6NkFKu(!G*p2zO@e$$~qi&S61 zlu{XL&I-SX%86?6t#-yAqJ=q2jmiy5TkH`9lA?817iB?i4W>u!cf&Tri)r2Z$Rx=?C|YD+5jLVoz{SiG3WTUqxFoRzesXFkeRaMLS`hJOpV z4)}oYhH-Nl-NQ9TU}lwRy7&xAQDZ3iC*#WJL@hA2l7ey|^lxt&_M54Wlh||1zEy7j z8)G%i%e>zUB?$z;^?xFy;2ocz4Sf>ZFLzL3hPxAD?F&OF{gNDBCAa%+`|7PEVz>RO z{pgR=3zxAYpSjlI6z(%~Y@|0z3Oqqy)IM!n`}lBwJMtES_s}cc5j!fjS8l`pEFc>n zT^MaM$wi><$nO@~m;HE$7#lDe_O7B{WCgBqcS2+3O41QO^bF=V)9ks9N>{z!Q5qPR z2$a7qMX@RU2ayGw-v?t$=?X}jGJ_jUyjR7=Sy>*cM#ZrIS?oeZ4;te7+V*3sRHSdK zvU&CrvPF)5>_l~X_L4mu6*fsbb@bqH z#{ENGS?B?!*Rq=2xMpWgV1e^j6qp2~CwLEk<&w-o10hP95Ne*##z>1V=WLvxl|&Ix zjCeUBOli&pTCOw>Q6660?dPtPb6p`Mrxi7dgzuX~NP8yqU1^0d0EFv}AB5RUXO(un z7)RPnM8lhqe1jVP<%u1XNnE1dVDn}E-MR=rV^>zei%+IH6fq+?U2Q-$j9Mly*Z+rg z-$m;bJS!wF%T8yd7l4VkVmxS~lcQKYOR`oVERPr&5w~|PdDM^;KE>Fxr>@E(QdEn+ zmC_A2s7@RNA#CLOt8FXdCROdl0O2Kzx^ub~f`;`|ux*m$d~U@A3f0m;!@`$U7Da`q zO}(&=0xjWX5t3wM!U0uY0b|ji*lYnAEUetsKjsI9QN#e`oxYT?`2CsG3>;i%)4h(Z z0ZEb`PGrlpdi`Pi2bkuCr#yk#$Bfu^Lmur3$e+1m!PZH9=^J@U9fvh3`D;cBu13qW z{b>^roa@J+?B^f)je&}U{Qx|kVw&R$1OB9TeBtm@pYybe04KpkHzIO86c33^87A7G zrqCTa+^ZH{_&;o&ubPrHK8Ug80y1Q$L(#`0a zbc51;@B6!7_p|#?*!E!W>%7kEI3}RD+Lc4`6cxvUB>mmGZRd=%H6CU9zuJr2lA?s5&_NFH%m9T44(Z0HW4s zyV8&cPe`9Ak_@VOyBx%+`Y_Y${=TH>=65-D;!|^_-0^zZm#Oj<_Po!BAPSyJrHc-l zb?-w7-WGxLF*1M?&CoV=Mj?m!;bGs%%fRIbv$f}abV^v^Bj;w`3rPqz?Al2NW?HOFpl`Fn<*IQ*hGI$N-bP?OPw(c@QD@Ts63%qUR=(D;0<8+Fdb@C<0_Hj@?c-^ z@`5SA6a|g|3~hsBeL3?02)&N+bX|?UUg@UDhMw}LeHhMiGZMn8-af6iz;vgbCh3Ma zsQGmpVBwiR{na_~g0WHodvm%p8_wQ$C7SGZ5*$CFj`5p$%|MHmj}SLy_qS`?yS5#sIvDq8-` zKqlyyepJYlce+vmm9UqMLzf8|hTHuBUobW|W{?j%fZGyKr+O+y;t#VA4Q0ltf5LCW zzJ(nqcw#sMU&=XDg$MK45vGQehBygP1zQ93uB8AubROFBCs+yxU z*~VrcP5K(K(+FO1<1Nd}J~_tmF850VmA85=kX;5uh5gzZZF8wMC`s{g0~Y0WrBQ=R zUCOlQ+LKPTidW$GqfpjIvWwDITT8mQ6Y8oM5R9$p8ETQh#afM~VSQmI838)5;|vLo z98{=2JQa9LS0yAbW+4Ek8oc*&?DOBn5EX0mFufTTpPxJjv9#r=jr2R&Tc-al^Q5Ar zRm|I?M;ARQpQyAUDEnMS6#vxw=2MI!PG&&+Sc1gW!C!K#lCr!T+_R368*@ea|GQny zkL=Vxy4y3o6Ti1NeT)e_KHK|H(ScQOExI13rcAWto>4dm0N4M~o(R`O(-)y8r|D;5 zBJF>Qjo1DS@irx$hRfXjK_BvAU+j&cewxdpF&7#dx_dUIuAXzdv|U ziq^xH{X*ym=>o7i(0w-GnG^MGMSk|7*UtrXb{LFdDloRszmRNVq>H~TsUuo z>1F2pRK*YPs&-M#7?(7@b#aut<0kKDa@tw%M#$e(SN?HE&-#LU^_ih*7uK;v^5aD; zxpA_|!zZjFG;p=}GuuyhZUdmZ3;=k%UpL7Lyebg*HXXHckt|Y#C4CW>;&G5bSN}iQ zV;2r^a`5%3B#7cJ>NqEZM|%6KdlrJn%W*y+-`KKyJ?CPuD4)FR@bl?ICu*|Y+%`>6 zyYHC#=D}OEbeMGZ*GEo8BX{_^4UuN}W@&r`A&LV6EW2v%2Ge3VTm&?|QT_>qYcZq)*6L;FKsm8Ad zx6ZZnlt)O?l~a=gvA+XQbT~P)qWAy3KZ#WV0B~Jn2fv>{9rfagas2~k>jF`H*YoH) zX;_ReCSMF6%b}vb^Lme_-SNe}T_1r&GFG!t#qJKtl#)iw85XD$;h~(OHqPypP7_n{ zwdwOn7=p73F7hhcJqus*lOX#h_rH{ehYlkw9ne?mf~OhuB6WB0n1@6E^v?hiUgS+o zxJsk?dJ&1<`+e*+J6$U-{GBmEZ!Mg&U@?q2;0S+#y7?Dgx$vYRN(DVIX&{{1h~7o2uF)4ilErtyKwnt` z7Vm0$W45cBxclGa5?!QC6V2FDFcYZXV8h=orO7RF>1XRvaqj`TT9AMpBBw;pk(Op`PWVzOyp){G%oj{HeDB5=Gb9qo@nSBZdjN3 zEfXW9oOG)3;5}{~Wr7Yx{^#$XXul_tS1X-MmF#~N$RdYfizT%E>R9FL|0Adx?ro-PbqRlF3Zv_8vV3ap~!((Wgh7MktJ!rlh532NDLGoSns2y7Gu87O9RghGMw~;ikE=_4O^L6H@;urV8yN=jXZFO*@;?NrG!V*ToHcVb+HQ)wU1hyO;c4@R0Az3Ul_rVpA0#E0 zGNu=I%CJpuX$q>=c_R@!6?oQRn}J=8OUw^jg`7=(2Ga3m%#7*?+`J709nxq5I`9GL zYK>iC~Lvc({Y7K~DrC!nD zf9fk8WXT_%|0XPs-I5|{ev(FrVxp~%J>{RO8*tBHNwRAcL%z`^U zYZcuW1PJ6LJd#s-{?pv<{MArin#(WWN|M^82~@q~?;)Jw%!ls^ko0R20k`5lYDHbd z_z?MKj@CD`4s2LQaL7WyUs5~E?W6jVB3+AT@(l-WRR=X8Ermi`hBLNzBsAX}ZKtW! z65~UKiu5@JWbE)!1M-o~*|nbB4i6y_Ao5OTHaN+w??XlIpu@1+vbNbB1(DOOdBmyY zNY=tL^F;mnuQv?OT-yWBT@x}yDBzND*k?1Q;LL?Y8YFjUtN5W=-S+k9b;NSTkI^Wm zgnbe5Z;inxtZa^*z`7 z0Nwn;5Cf2>wb7wW>vvvNT2UYYm4JS->Z_fyY4Wi2=4)~=_LYXLyqRg4ub{iyOr27b7y(XS zUx#E!9sjvt))lxsDc}uY|L@+`cJACoESwxe@^r!GJ2Wr%Hy-j9N8S5p=JLb&p1@Q_ z<(2Cm>$x_6&-7c(5w6*C=yEy-^(zs~D9U#L+)>|Y~237c%nb*}3sLJ<72buH}d zKw)(i;5%6L1-w}n)@*Y^{j)dDjYcoZXS2U$-`?2pZjUodTmN@&iyQP5gq-(!eFRDE zPPzXV>3-ogrl`vvbfV);;u(gr>lP7>ZWv8US{ucLyUv9ArGLk7`XLAJ<_2NzL;0tar320 zarJSGp6hy;n$-IyK|m%|jp_4}YBg#!7kO1sg|gEAuN8GbRU0R#hAs9&{MXsR>U>UP zW`Nm(*Iyyh+epyBp1HuvMIE0 zHU=YvLh~8?q~C6g*A3Uf8WqL*ApB`|?2rQEQ67MxPJIMMYWRA#%!S?)9G*uN|4FjQ zbC@A7%3_v_QIAqTyNYY$uZi|NwkYX(_T0yWveAiT2+&u82Mpf=0R)vQVQ6W$m|A5& zlEpFk7@t2YdHGx{3jKM`8-u4s*#UM0fJ^YCr~rU|j@SIc`p?Su`oq>kJIlvv0C|L% zWtrC(Yh3t!f)1#e1IoAWWUDjNd{-0l+{ysH5t}di9GvNs9%nM>01w=pkz-OG1Yq6L z445_*9cOfbvgMn2;xj5-ah460jE!8+K2~bL8cYor3=rrQJy`YJygg(H&;61B%$kdk z0~B8T=e7)@`QpZ7(b`$A_tw4Z2FVe6z!wKc)5JSwiTa?)Pnda(MocdhdG zUR0@?GOs;#4+U6>s~$I$7J|YX?DjSq>@=W*iotGtiCMFHwReqpb22P?816tlnx)(% zim4X~G{ip;j7JKy_|kq+>1rn5a0A5y`AgF1*!^L}_Y3*N05v7+i~9oIuWkE&vy>;# z6^@!yBhQ5BiHfnBp(IAleH{=B#A|RKdt44_z9(F#Py5k z?RkOni0v@5@WdzIjn`+V1ch;sTy@+Y;2WnlFq*j4;)pE9T)#v_fB5|YsFWiU_k zsw?PM^>-Xh21nVyzNZOTF$D)SZg2Z^WWsYl)7AUJNC;y%}flt^~z7? zoM4>hn(`?oigr@lP4z7h3iB2zg~r-BJOe0v>TwWzw%P>xR*a0yUnpb2Ff%dXe{_ba zAY^ezg%6YsNWEZ23y-I0c9%ke0Ray&Y3^8srGjjynoZDg7@;`~Xpu!DBB*WE&$0D2MnK}gcD@eR>w5NPB#Y{0SHL#H^WD(_Kl)2c>JRB1rl^8j+$Pfh z5n*7%g&o~@!9U>B9kz_a5LZJBREJQWc^SCLa&s;K)g-#|JU?+`ILy~IvWIqFgA4Um z?E)E@(O*3cC~fN41!>6t7E z{=r;=&lgxIP7o2GV&MDIsM;&XLA`53eTLATJ;cL2*23ZU9A1D2#>ah^a?G`bd8R9G z0a!&TiRIe~3i`LmIQ?#*+mcgOWFUk@#b3h&O(X6rF6}Y{ZT-y;?QV`t8?79+Btr2D z!^CGePxadBcz-u@w^~=cKy#-ARz5TF1O?b6!GlaZ-rR(kVA)$NElE%2#2)D-n|%Bg zI;l(;^xl9?rnj>3KmOF0XCviSmwldVU)TyoI(AiW?=%pL9UEVjCmoxXxBMjbit{v_ zot+ID?6t)=lQbu{r7v5S3p*+?f-;1{GhKe-w3U;#S2=C{#N!ntzB@+Mk)5E5c*b}a zM8FeP7ZoF5%H#6M2`E*t}U#7Ij^jChb?unPR>iS53 z8A`c8(CM}*h&@K1Zk^kTb=n;cp0Q;#70O6Fbe06&!qxDR0n==Lr|pmbN`q8QZ{wV~ z_L46Beg#aijon`Th&A4ORn)$pxI4v`*;qW)zCY!xt{ym%b96BW4e~R-*;nZJ_`#u% zt?NW_*Eq+g@5e$TBJ_b5fH%B^7$v;Cv01(M0mItOdSaAG6XG7nAN~36A95u9i`)(! zfmFP^5&BN)`?YdYp3>!@n-I1_ubhj~x3q0dM81D}Ouz9+1pQ$lw8d(OF0qqbb*uqS z>@ha|4@;{`(eXFZI=Z;g;C`lsLpDa=;QlNwZ~(b15Oq0zxA5E9lQroa>e{{zv(t&R zq%oMy46XNX&OLnb`RzGfp?XxNadndGp5RYu$B_9j z%?$BW5A%+{P5IgcxdV`AR)`HjYFdn>$c3z{LzlG$MrXE)7HcwF#9>8%H#V|(kdD3N zQIsdsLud?re!}ql<8Rl@mW#G4hu*@D526Vn=R)k&F;8170IA<3nEim;0DHL|QS5=y zHeXX77}&47c1vTC%m78BrA>vnogY%D8wxKcIFaHtYr`iZ8;=l$Ng1k{ZEr`d#3(e| zHqf!lQ#8}9FJ8=HT?!MxzD>!Hs}$tJDm|nZFPkH{ql%xfRgY^Tu4n>wj5-^P`C(8H zWVR6lfe!TYM~#Bc-lkYd{gDz+Fm1Ev>R}bh&(AH-f9^hLjYFOVXeF#u0<#BDwJ|+6 z*xNv@e4+zc!h;CJ=t`}Dg0DL*n@dS?E0jX%*q>)tGFB(GEfjp1jsZ0B>3BF&@JUhm zaeCy0&Mtj*l04S6+Zlvv7_PqGdk*B=aS%kjl?Z!d6gVdik_SD2-&uyq9+_O&-pbF-`f6K1wC_!mz!G%;o8qa{AJ zGF;yMlP4CZwlh`XRExJMul#ZBbjOOrTM5b1mnqaYQA7 zf>6;u@w>(8`Aq}3hXtR;1jz|@LmZN_9%H-;7AL%oa_y?Os^*Iq(D=5ceq%<6Xbm2# ziA~H@8a)MJtt$m&rKwrVQ&F%WS(#+6RK8)r%q_)u^ueAz-eq0}?mRtRk|McYluMg8 z^}kMWe$-{f@&&`ulyDCv%e(_i_&e$VNL;jo^I8N2=XYx1m?PY_)rK5R9vfsCLTH7* zwa2c4)(8#I+4_K{dqyd1j&3gq#ylN!Fx5+&+b{0kKfySQ8S7`Ku16L3g1Ix6cra_W zW2pS}4e7-+*sx&e@Am=x-Ge#whVNDg0$47j{;frlkJhZ?H)AgsqG2n5J6hb%R32{!GcnNZ{$bkEDw@JZgLwb41?**XYMc;)Y7WL!IoOyjl7@ z+r6B^xU4JZUGst)!bn>{zwczXcU=T6zpUa zt5df++~K7yKoGEWPo!00nR=1XGf#?mgU}kav}b$IkP|;#C)qJ}W>!5x@QT);^VbM> zR%>M_t*QQv4&0k()AI@JyXQAyDtymJIeP6zNn^cPj{Tm!lmzzg<+|p-yaR1AeS>BK zEn{=(fk3p47$i%_W*r(L#AA20`o7$45Iqg;{SyDo-{I(U;!FJ{T}&8`n=BEO?F5D5 z6RiGe(2L#i;_4=qZ#%Yb4 zDBPj0fUlHXFa0k70Q}5I_a|QB#5Z7P4Zor^=~gg!wqImsLIAU?O z_^K(p?Lplo&!>?+AsR=BYE5YK(dvg4PpiBy6{xUP<*R-D%Gew{d-+0JdQ8oly05f2 z!UNNm-|xEt^R2P_gj1)yXwU^y#oy)u_`Onu`vGz#p^a6CF{x`uTf2>dALjPph0 zWj?g#Sy;p2u$k}bSM%GGr1ypzO*5mvm2@3E3UI7Ok}}9+${TCSDNi4!8@B`QZljsk zb_Opu8%g=vTiK+PyR(VfwiE2U-;eDf9!EfZqpe=#b87c}>P6}gJw;^G8rfE6i?pr> zf1<<#*|7rFmWvA2JGKR)#DBj8B%9`ZXD`wVL{|knqMkfFA*6bGy+&P>nI?U@GFIr= zds>&^qS09Fud?a3V-N~|_CSWDJC zK2l|)_=Slmtc-4G83a!j^s*K~%5n(76yMscnu_YyF~3Olgz+q)j&NT>2qH$jM9a`9 z@N6jH%JBk986iTSh6rW0PLS>ktYjwy)Rj}=CG|fFI6XKP8gTlg?Q%dc2}BXCc$hf6 zRR-oY`6>g{-%`P}*(q@Pb2umcZ$ytP&KMizs^JNcv)}v~fcGlyqGg5#5{c3WfQiwn zx4G};JrBuFW*NFA4QQfMc9@$+7(O5zFjTS_HlHIiN1a&N$bUSx$Ocku@k#Jr8_JW> zLmqF!QQHKe79xqN_3w4nYJg)nNVdca9a-Lk&oJ1%RPxrW>VmfIrqTWUa!iq#x>6NI z{e&gv{iq_uf!PxS_@PN>W7Z%vsy})oH+sOTti=~CKEs2R zRo@wgm##}L#(jO5Kd1+T-Bd62FN9MbYMOtZ)7hDtZN;Wrk&lh$my=C>3%1!w(?!f@ zcMngrv^1HuQf+Y_aEK+bZCp7zJ5Rk4JMuXPvolsO=MMcj5Z)(`2VAmh(Cf53z1xjT zm~N&wmgYWenq}B`BM=jbOvNC@C=FYn^5Gj6hE=N@AgTHGiEP1(TeC=#I@<%v1z=qaidVX18PM z+$yO~Z^aMBysS4Q&{!1_BVR}H!@L}&)$17%p>UCWr?&%* zPy64VC2TDGXel%~FTg3KL``vovA951^>9YLvtfRayl1}AXU8!Ql=5&vd{BlDlliS1 zEC&I^wss7Ep#(-}nDaQ!N& zON{>Q2K5OKPNO-#!0$)Si2#!9-Z8P$BSLDKm9oAcQ)o}XjazsQL`Eh!JE*U+w3Ujc zM1Xwx0Gs^TiNSCb9PemHDUh6y&==vz?IuU}Mkur|k8n{b#!#=TFpYXHT3)o~lL;SW zUx0aTD)sR%8uuXL0pI&Kn2+dZrbdUSiY^yC`t2uYuJml@G71pZzw7h@`qDy{&OzvS z8n`$cIwN7z`Ta}My9A!EU@E{nG8z*B_Eg$2+LOKgZV}B(0e@;x_7ia8!%GE}8e(eJ zAT`5s4V9$KqztW^%TOBcdsf%(!kqXRXDk)16V*qd*`f^I4#@_)922p!458RPy;F`3 z@m#ln3;yP~{CCk2h*r9U?ri!I7uei7B7ZPH-N$tpU^jMTu> zCE%lGA}yRC59S?io+M?1>acOI4{YuF$Lw!%Z>KI}G$kzElELXoxO8~kcNo~#Kk#nV z(xRTo_EIs&$;9xw)e!V!wMz`Nz6$rwQaHptm*{$eyDzuV*j;W!W6>k>UDuc1f)AX7 zJs7OtoBI7sg?bpk$$-@)XoK?z^>e8FS{*a6JF~=&?8PCCG*GP|qGiFpu_zgDO2DMK zK4>I4oohyNk|Vx=k-ZsLJ)b zyO#d;DmpGeD(h;Gi03A9%xArB68d;96eS(Vj|E2WJh)0J;tUMfG$&L?{q-%Gx7DFk zI@2Of?ff10l*dZcJPXam58Jsiffw`zK{sow$L@EX04&JoqG}SFebV~2DOr3Gu_*n1 zF}2pK6WPtHHf!X0_#+*7al3n}uBxzlTkJz0*B-z4TIAq}UFJcLNus-R?i&ahz&O3> z#-j`ge9{~Rh6{=4{w=gp$jvmUmH?48I<6!L5AMVm2l(0RR;+#y%kHhu&18tlqGbcR z%>5~VumKnv4q2&5B_kD~UuKNAAnVApu!0ck23QKE<=R!P+$dq*Rbti)F_xuNfB-%q zFBY`8C~@$s_s`Bkz!m-GJ0~c)fZDK@RYX1g6XI}{Pk)kP_fy|y1WQCv_kcyi!xaCb zh*Fo4a zpj&+SC=4vIZ<~d?1DJg9>Zzb7=){DcnGbGY_;sXF4(B3Kd%t5Ny?1BPz?{v9=AufU zi`WgE3p{H+&od<>e%qG!#k&= z_X~Prsh~h{M5Q)yZslr{@Evlgem$~GBV}Rfdn+p}p;7?mo&vDi3V1i;|$+fO)NolW#Q9$bcE%4c^*r@@q#8CerjuIY^ znDb|3Df?mc!WV?DIBlE(S)YU1%rS^qD9ULYNw8xDG*^sxz$WlEMA%#6TTxzWkA(Aa zLh^x2d0=#5R4~9+JzZ}#JFG2z@;%=PBFywq+gr^op8Q`p4bA{hmg@AWO_ z)USSTgV{EDea=~`Id^W;ePJXhGG>HO4C@yuA7|_4C8p0g(PFN6a`(%z`0zGNzSdr$ zwIg0n3W6^W=UqDo5bJBzBW#J~p$5F-$ooo)Q%RMNi!TAAxh{-UyqOw<4Z#gyrnH6= zmTT&f(nRRsMFoXtU=9$%e>UPnWX2R}$W^r5U?ug`9(Hac7J;E9IDi9Lb)QJLifiUE zY4^2if;C;`wT==Hvac{@gk}JGm?va3tvgbf$~`lOxV(rddrYg#__Y%r)9d(;>8(#0 z1;JcHpxBI4$Ki~7Ut`6J&B)%g-Daf8;nRk7$k>w&xXa4~3SvV4;)7lnT^!l$-nZd%R;;({bA43?9{(Zd9x;qsKUWBj(+_LCAw9Ad*^zsx3E z(dbRjhP<~Qp_Zzk2#iHKfI_EDI`OCl3`i9CKt5#`;rlC?YC{19T-#9iOcoFsGGUAn zz}bx-{rJPCSP%3BWU>426|MSkwSIv(IWGv|}HEi|o%A4Mpm{n-g!Wc+S8lJ9x&I`^0e7d|4IvXxV#)RV#%0*kt#xPH6+PkXo{uBW(YSL29p{} zmo${VG9OZaJ|D29$xQ|@C|XUDz`(YU8+%zurB6)ewk2B0johyvjqJc1`_dl56iEs9 zr7K6T{E-x+Y|o{7_KM>p?M5683C@$6GNBl^2c41e!^76f$yarJGv`IgBw%8_&;f?( z9_n%92fa&=fGv)gDoFjTTDpLi7265<^;{uvq|0>p@2?*?*l|*5O{&RE0a2(#0I4in z{aF14?e#sAz$rbWO%)8vqScM)i-9AhkJQCYGyL~UW)x$T;yDT!D3^9Df zOyX-WXG3(L3)vr@KSg(i5L*O~`%G*5HO;L1J5j+&$c|oN(V`$Yb&J^83qNF&B^EED zOz(c$wK{j4>x53Up?Q2%;6IE+*iR%4 zSCRqbY3J$GSr}WF&%|D%{e9`#?oMB^Xi|)jQineXy3dUb-Nd-Tlby z%2$(rw?SErD~%o>d{1Bs%Q69H2SgIghKG&30P?}{e%FV{_e1Mut0CF64&hDsSO=%n zU!S1bo%NKDNQK8E8C3p$^7(Dfnlw>YQ`#op{K^fvv--CGx&oe9e(f<-`lR%%y-CvM zL*LPEt`*>|{>0!RKp%Xo27XjPF^TgAhFL&xt~-(Ye}nbA?L!);)h^P(*k~WUVl~Xj*C8uKsScCJ2rq5xmRm6mT8a*gMs7stF{_U$ zu&QV}Fq=@`+@F6oeu7H7lCsf&!q0)Zqoc-61RrczE3>-qXk@hkcqX}y9A*{nds^M8 z*{?R3rp=@8AKe#xPh;z~@lSTUai*QM?q70iK#0`5cKpGhM3eKw1mF7Zawwg+3!Mg@ zARNhYqC(w+W777D2GtK09OK`o)60M4D4UO7-#$?t-CfG}QNv3GLDoZ1X3nWU5G+fV zDPVz9JYFiKc+TmVKQp}hpl3ewvbV-rI9lE@iaY6Ip`Z>@ zJHrGbh5MuTK2+@v76$CqHAM~F1W^wX99ZmjUtDu%vFIhNiE zF=xmJH*Y=5A$4Kf(8YjZjK<>{{^P($>P&;=V*#}MBuqWW_kO*}P7X!~otX2Lg+k^4 zZ7Y*1Ez^mYhanYoN-xpTZP^z7CIoustaaN1_6l+{&cYJrxfkF7>>#$XvN^El7=vf- zQ620+tA3w|Vi0RmrH`$Ptf%vM33~}8RHC#d5)IT#XXicB{OJc+cUmLX>!gAy9+{V@S2ofsuJ@?D)#Lh37(B91M;+3Ats4?CW8Plpxx zv3^Vb>OBPSXgj>^eEg`}ePR{l#4#DtCS0w@1DmZL1AYu|d3DJ}PIU)^_@x^ojVF#> zrwTidc8wXO_V_bWR7kKFh18}!VfZ(&1ijW3)VfgwcytIA%;{Fr`+ojw+>v>gx+p76 z+HbIEs`0Q%Imck~W|~tQR}&CCPsTZWf|h#Qy8>^gD_m(9oi43!Ok&RMd|B%jxr`6Y z+IX@E)q`Nh3(-N9YA?%?+?PD(&Ue@*E{76h91b1Ui0Q=xz zflj%`Q++{=Ug4p&l{xkN zJ;^-IKDmUpLJj77dbDJlpDIVp2jq7?Y<93ufDw8kPdV~}z zxn@Y#8E@rKKj=Ji@W`3RYun8bYlUv<3Y-VVwap$>;{&{M23;AqKjr-^03@|u&lhel zo4LjqqN2WXai*;6ahn33`l_Qq`dwkF}Z@ zwX}wr4}7Q_EG@6#xE;Rsll0Qk<(wAKaaM&?&2@!4u4J2>#{21!5sFV`zDBZ)<$94~ zo)kiz)OZ8ul&3DZCCDmXst1jj7K#LH5THg~My!X~qOL48?}5p=`2XhICtaK%_)@pc z>f`lUO%rz|=6MZ~xNyK-W>Cs%Yd{zuDo!58_@)8PSohO8&sTrEd%8Q`M&BC$r*5e*e zYiGmZe(dB54qyvz;{halOykK1Zl;*{;*wZ1c-nfA&aH3MSZjhBCeJkeDeENdlF>-ezTD&|2t6PlCj)&c@M3ZX;`qAJO@c@ zVK^7%c{R*RS8hTPU^ri{3*9sTR|C=JLi+h)2^fr;059M|2{6y1ANW7x4bk5 zX|R2+-HDosCQ7q~o~&Lc>vPgs&g~-l%@^o)xMbA;dyH<8|JMUUjF9oonWgd+MxPk8 ztSZG4zr%8bS3a&V!{EchEZ~An*9KBg<1zJCMHKnpU5c|W|HK5CV(F+Gxxl3 zN2ll+IJBEL)@aHKKeBC?3}L$< zn)$zbxzWUy?+R>R0UXi_V)Byh)M$v>w}O zXt3PDG>0@7?lBi`j?A7Tv{JxA=2AwstZ>hR-bFb=BD0v7-?&D~$-G_F()*ds*}Mq` z`lR|Ep&PQY!LNdsaE|(Z{@MuOV~JADkNdfW@j@4rdQeStGP--xZB}i}xB~%|sfV$& zJ#=^0JEFp9eyEw6L{^h)1_UCUKsAaFfzt+rfy)2Rz9ZIhin95zTev?52EyexZYqQ+ zNIN3^@NU=GS@*lPpFfhFDjoN!CFMHg)xBEP4&s-}7-DJ|J!JKV%h zdxLmy+`6|0of;c`uxs7X?NXX`n1hg6Ijh@%;cMA~_>P&?1fOgOl#3|49KlKp=;A-z z`bXGv1%M?$mfIyp10$dMvY9_ut>dT#1Th`5;0I#>pN8s9G-Ufsy2aFxBcdIPZmJdj9e-W6OPNaCSL3e1(ORB|GV zs|Rt!HCpg4E$nhU*p7N$Q3#-}b*Z0ou(PJR9kZaBY zue_TO7$Fu|BNoj?SC$RngZaNvhT4xy{-LT@xd3{AIPq^PIHc~J`^|lAb9+eghV4nVtN2TG3tw%ltGPfXm9r`uy!iXHEG(sbiGP+>CVA1u#n@aiM~20hVv- zX-X_DiN1_yOj4x3m#0LLH>3ZP{D!cBV2ZyFZcfm@h&l?_Ci+`qF}<%|kX{6y$pd@> zIUvYC5Ss~0#PR-(u9XIKS@BA}mt+i8=IIt_lfi5*67_|_Vklg`J z@Jw(`rI#wCVGQKzlcm?4*3bq#o^fI-pPZ5R7NVlY?vnq%WL?%G?JEy)nt)rj&kDd-5opAh3=K8<>HaMn#cQh20_1WerKD%>N7rDkDA)gIqMaa zWGFggYSOV%61^XMRDlv1x8(}JMZa(9qDoHSb?tS~Mp+ zny@&c_Ns`RVD?CO;N1V_(b7s9 zw1JNo-*pz~X<$u8A`Q9e4$$zJq?F@h&Tjl8Ea&$@;xRN6?k$I~<^eIyTkGG*@{ ztGBN}e_iC&D9|y>p*{22eLC8|MA^_B9G)ltSnmJhLU1B<^O-=wMpPFiw(9TXw0@!D zemB0ynS2o~{#}iw9IAY2P~24!zTQwO)U0+aV%o}Yd~xqKH7ArY$@tN!OwfypSZ{{S zlb(SJ{-!Q#F~8{ZeE!*Y@wnwXKw?r#82%hi;x-R-+R0o*#d&UE0hxbs(B@aPx3}kq zOLiy~0LAEQTv?r)UoR4?KGD5YL7~a1N`yF2E}v(BHANCw->krjZ$LHy)Wz`vt64fX z+@+b&x@9m}S*b>?2!=A>w_CZzORTJig^FvgFV7Z+<;~`|o-gB;!h83!r<*)jVruto z!}pr6Z}3p`u_-LQ62xaYC5w-8am9fz%1!)t*5}mW*2^*w?p+Th{qZTwrgup0YQYiVLHLbjeWnWf$qF-b2%hLv<9*7IV z>$e&vU=(IGtSIeh4WhObvZaB`&wVlPiBHpmGo#r-9~%)OGtOIrpm&59F8+AC;TUJ| ztpXvwDi)S*iHD$2O=*RSdCZt~R(qe;2j6%3u(9s7=x)3DjqL_p?-Yy=ORvmKK0;&@ z5?^sFbIw}ChJ7x%h_fX2mW4ecPhikUVLP5U!ExL+cS^GV*9F)c&s?Zn_+C$7{FyyA zOW@ttZ5A*-wnj}kB;;k)lm73})5ZK+=s534e$m)G&XvlsS0g73TFQOCyprOyNuL^#2K3)iykY|V41eXNWX0*mXUOa*`uKVDZ`BVw zYvv&*fpBW?k7(vxFGk$<&mt(YaLFbv9b2W5CUpk{K5A6&O!tSWT%}jz9-QMBp;V*E z0)8ir=(0N^uNUx@C;^^|0ie3mOX_ISiDI$ko@4@E9Rc8_58Zy4HZ$k$HK>UF8$&ye z$0t46-@`TS*Ng5kXl6BZAyho`Jq-?~EBRdZIU{zgL7W51pKK^wg|3La;Rs5$Y({k683|cB4jKu}~-soTICl;eg@3w$;^Ci@7 zWj)N#`oT)_B?$v=NLQ+|5Nn@y&i#$)gShJ9T!N0B@ltoX$-22wkWeZbU|qr0!d8`L z_Xc({#ltc3>+3OPz`CCaX6Kv7Hxxbu8yFwTx0iJShqfqvKrkH;B(Tib(JJGcx>(}x zP2!@RD1X8kMv@4)7YQ-hd638IU^$-}m916vcNv31E%J}{#i(cOWR71M7%qvHlt9eD z*Xp!V)s-{T@^zG&QwY>x4^WAI-;XI-`iXu%;$f8WX%zu8rkbfLfA=$6>$%pC$>xBn zWec-V+Fp~B1EH{~><>zfXd`2Z;Nv1f;}ohH4ut{DsTb#Zb7`i0aD)Zk=q4IL) zFv{bL_C6j$71StO`MmD`5Oo$_QN3Zi-a|?w-Kikb9YYDy-QCjNB`|;pN=r8i64Ko; zNJ>jLgXGZN@y+i$>zwls%$hZ8?b+}9JlB0+4?z_us$I8NUW=${a*g5OZdsNurVG$I zn$rg_r=7hV!wZDxLE=AogG4^NNmp>kyIp26xB_}TwQp*4UT{A-{L^(ivqpiRo_${0 zf8+9UqXGWNt|RX%CEB}*NT1fn{m7=`;>Ukw@Wt=WzaT&9dNaj-AMK24{ZlqN^!79G zo<7je&^;HWxYrS+)v9yZNZUI$T*CmKc*W|(kLG)(s^tkjT-fsO_LH3=YSI$h2-2 zr*SAKbETIV3w6{+m5eOF5I=5Mo5wB08smT;;{`yI+Qm7CQs(ZWoq_Htk<8hMIuoEC9b(r6M(!uVRn7h# zunw`)*z41niEUpe)hkqyc*d!;pTXbPJv6Ji-YkS{SrkX4af5eF|k0!&TT;0Eqa z8-r+0-QSgd?H<5tjfC3k*W-DWn88D5hXynxau?n}cJm05sCl%Ekn7o7aaf!X@oMQq zOCbZ_K6jaFVPE4j=N{-hm#?+`RoPcnGGH6Erm%X&!^q?9wx7cbI>vj+L%yqPG%=)R zsx&sYqkqjrHffD(&0;ITGb;8U@7?y`yO<<|W5c*J$@>nTUl(=7yy!M;I?w?o5`d0< z53pTWw-ZR$L3Wy$s{_FdJvBQ3KtQ+CT1p&}oUnP#c36CnFXkpiR z+C>^Gy7V0y{eVATsvP-Codt+(o;3xzXa+_zN&I3vV62+ClmY#5dsU~1dghKv&#&KK z!%|*2X?o(};11#P|Dj)VKx6Cw9;_qhAg&8N7MuI>$STXupPFL{c926l;KO(Wl~q$W z<6^L?b;dN>N+bUaohwWsNyl;|6W}68nvTQJPf{)Zs;-XhHTg#vnGfkSb?o;GTNFlGlB!3qu-w`3S4<^o@Ru(7~X>t_YNvaItGl3S>OxO z6HO?Xa>xkCxj3p&wAGR1+=rXWr{;K=qo~L3Q`qVm$K->^diBezgWSCZ&RBksVczDbTRca_Bwwp$?AQGT!2m#Gq zjOSN6vdNiP%o3vlXb-RZ-a?75^%3IB#PV~<_iEeoj|c8Xk{ZbE-O0|={FI17EdNAs zG=?Y-*zX`^^|wQR&GN&CKTOc3527{sLp9y+(d7lR|D1(25cH_C1O@6e)f*9;^8Z@) zahFQ*f7fLnFJCUTG`yUlXGyLdw?EKM)A&Yz@U>814aRE-j`!GguLT;U;#G-HeM8Lq za9vzXDc$)u3j!5{_b37cQL}~RK;>y!XH+JlrLmZIwn4#{4d$+SWt~|+fEi}5N#xUU zkiS3S5Q|2cLMIizH2V@*tKk!vI8Ga>zW@E0h>RZ-S?fr%Pi!RQxWctWACoPnnO#%V zpFhpUzz`@0IZ%xr328ZYE<4*iF36Cs*wnz9hgBUg+qV}ti09xB2PVMti^vby8jseu0)-33%s(7#pDVH;_;;m*LH7KThy0 z8 zZ(o9YWSM?6u;;)p&?OijSHI@$ABt~~kINV51>fa-FNGtv;5Gq=j%Hajr%`gtEQ_9W z6sPgeH}PLcuUt7!d|iRAtxmt(t&eNBW4@ELkAG|(MjRtOW{u&p#y&1Z8Usy0dJ$UShkF!sp-R{d=qPc-sX+HZ{cO?))Y#~?e4~G zuT2AP5L@h{qYO?UCVoRkkutcis_Nnk&E2QS!nx-q$ElSg&kNK2>)Y(zFj|vz)7$yE z5!<<(tCl(Gql_nMmVAqnS(kePps0ww!8xs!+JBsJN+S3Pp)Xtm`;-=Ep5Hu+ZA8L@ zIG@v|n;Kjdc9_YwdAo%8sXd?DIB!Io*1Q7mhOKfl`Y)6l<`0Ek1AGDwzf_qXtb5WR zmeI#L?i_he21r=npuy-6(&<){Lnk>?HnqcfE6^V)4Of>IKdaW$b|sVxjKah^7b#g! zqI}dH8hC2MLY@V4aG~zz)`dIQq`AOWvd`Gnr*rJc*U{?WZ)ccIuwUda*!U$uDf*Bl z@(*6Jb4k(_25TxT)Ue(`a|&whOif`lcd6f+j-(sDTX*4FbA~b_ST{Kd;m*o&6$;?6 zt$l*@snRbEE0=;>gWs;Pi}Er|gseP%wHW$HS;*?`ao9xqE~_o;EgTfR)ksZ6?i#f2 z7lVp#TKVwU-l?Zr`ZSM$0KInE1ABFZ&kivE@M&Y_Ku!4hZE3x~@kG%2L8S`l(9oY8 zz?HDAj}AhX{728w)VI1JVoiy1fcaTSdF0fY5I!55HQ7k%N;<*(IengyE5N+&&9kdR zV$vKmgQ{cNa=SZR(-tuo!xKt(&j?yip6L6Nz49?5MuVoXT;w7sIOgrs+<{V^GL~Cg zv3?f??MBQ?i%8nJH$g;Ssa@pE=)~JF`o5$E1#DfvMZi?^H(C}8)&N=csBO2s-UU+_ zvW>{9gG7r4Kj3yLco(2)o=@HD5ul3sAcOZgviJ9l1TyqywE$da#LMU@RB!Q*vT`%G zIRo2I=ZIe%{Ah`$749Du&LdgN;*CZ{@{ezXhACe<-!8|iM&WwtM^MR!$+jOWaVJ^9 z!S=0}UE?YhS@%TM-#-*tjsP)O9z;rt{Dh#W54=!jW(yzsHwY5%tve2oe^Ad4*%yC7 zeoMBdB(yJW_Pd5kw1{ss-r2KSKpV zE33aIR&fj832OdrC1o`?<07zverD)z9yF&?by&)t=y;(UB}og;vv`@9OOyeB0!6T6 zDY$7nO}K07dvK8b0M5P*lpO5)h+W`1A1ig3&CtU5mEG9u%)Tx z6p_s26i`u_SAF3-f&AW}mMd4GKYz8Zt!eO92d?!y-Cd9P3~xwCSo1mCZK1A57#U=P zQiPbhMQYv2pF*O=-@}r&GPQv8KK32(T+`FQ;rq6PVlR5l3lEdH$mBQu7YSrj$i9X2 z4)v|QVwd6yo;VY7&a)F+XsBN_LRwzhyge_3yn~1q*UezjC0jA%aGhI22)TB#^XZs3 z#s;ea?Z2IN&NVN24Rep_^cT1erQ@wWnm-rBAUjFT6lk0A=6&miT(~b^qr5|hNm);x zKagbWhVi}aNaoc#iIrB?r+jhn1B*%sRG<~krEI>-uk5w`mg!5QND7JFuIgjc<4(ym|06M9AAeUN=!^TqTq=%5#%9(O>Q{Wk3lCRyjCTzDLklOG<}ffM5e^#YfE zS}(OQ&2&UEREBzYy%gcgVhBRtE^+aj*R3q| z`M|~P#oF&$`qweH-~yA{WtPm?NFuxSrxK38%48v0{jQ&roWceb;CAFYe%<{Rm)UOq zlf&X3HgHZo%e92)H9f7kf0{{n^l+%TEpMI-+fnvc^`YU3(>2d!>G^lSvm zKkr>s-Yl!=6PXk&#Kivzk2Nl_|Uhc&#t+tN;E1xSA5R>fg6~=4z*jr?5iNQNhD0%Hnu*rf5DYsyAfYSUsxbD-D2Z zQB#1wg&;tEH17MPSwwsr`6uSyF6ED0k8J{UPd_?j6WgceL(OtL$2n~f8RG&E=5pbU ztN=gT#n4gI)|nsYh`l?cNW-C#@N;$${n+6k>JxC3TQuvH zk;g+vyJvzdK0@GKzT>bHkpUh#Xjt)@rIJ7-jj@vEj`vpQO#OoblJ<_`mg~1v>BC-% zrIqVK{@K#rOkeEk$RSd97%ndPl&;N}a6cqTd&B+fj zxDQ}=CtYk@_K>?fFUV_rRsD0BW1PF{VMnjf_VJMS{Lark%pB+AHgji6=S`KFqM%8K zy*u+lpT>Vn{V>EK0h>(i?a^X|ql|696=hyZ?&IgKMGWngvK&3nTD*?+9=OEwA#1eA zV?nORN0HMk+Ia=J>(_v?wAs_H-s|}WrIkey!zotPoBa#Ds*r6>H67t#^8v}b-nEdo zL0g+uM=7393re}!tL_LmIH+<*Vpj$}geAy!=ZE|86l|7m`jp%E6C-c`H{$zzYqh$C zS!1v%Al)NcoGG2wT8`rSlocWmC;*|4TOU?_O^*xadcNCRb4$whF+p-5+QMbU-U*YO zVRDGj$;M-2a~EA)A{*nQvbDMUmmVwPTW*e!IHHAi!LHrB07Y2OZ@lrA6Rj9s(WCNi^+6uN&JMOQMB#EsVBh`Fva)W6F|IrF-Lfm6df=4u zwV}N~02Ovd^#9zaFn>M>90Ih^*bk<=iBnz@bGRhL8Gikn^&z2$6OV8JTYb(Fo33xf zifZ%b%;S*Mj6jylJdM-gRX3d<=wzdjCpmvZ{uB#pJ5NKb0n=J53iaBbX)5d-UD~`L z$mj48V#~$&Q;KS%RtDhKz)Hv!+0Er8ZIF7R8qoEE4);G>)tycEsEkUwZ+ssjGxK31 zF_>ws@t7mBTn$6_fc_QwcK-c#^GT~keRP95c=bR}fUaj}1Zc$S;QB8s4pZLnw$9Es z>K@E$8TE@92{NV7d>jS1CYX@E>o$7715WZC8WrgZU7bHiME%kMq~vg$eQ};NkH?C) znGo#u`-F&>sy$>O?giB+maqJj_c-rf{=sgcZICOUV3~8KQef?4aI$SL?Qp*Pc<$e& zI+t@_$H%_?q{tn=SE9liM3=(hG(#o=s?VponNAU#4TmSJQo$=*9PMb@ymv;e!2M#XMXWUKfR9{bR492H5l= z<48_fIMLL`Vu?++%fBX!6@kdf_(B+Y`O!t7&2dZ6bCqo6O9;EIm$jh8vj)^XB;M6;Gbbf+zQgO)ys-y!-K7QVvMir=L?EUT890 zcI-cWQ%t|EE(JlTv+>DQk$~-hcDdlb(A24>b0c-ymm zG1>1-Wpb$m36?f6$_IfuT_XH7w!ptH_R}((k~aWzlugu6+93jV&+scB>tH(XR#_A_ zXK&@{2E71#P$}eBJ^x$zi8tbrRedf8XcngI8}jmHwtGt)r9u*4OG271d^0cW*P>%tTNKQP zgp`M@XaAxCxMI<$+*WaY9x+={zM$-0V6U#*gS(LL7}wY{s8xmTWOJ6<-uW1FeYiTN zbXYlql)JH^QxGWkrb0Y4tsSNa`_U@$j<->fb((&D#%b={z4fD2Adv2|Rdp57wbMWZ z2<_8d(_ov!Opf?xdroqh6&}mSHwr@qfS3+c1W8X}2ZF}njS9Zp)!*(RoBF$H+4jTt z4-@y#8rbvT#tK}u2|Jr`9PY4YtGOdv_xG-4o+qqj9 zG16VP3wH*{Q=ODBzOmo4WXYusRZs=1JE*I`0#L_mTrxGT`dWv!e!Y8>g0(G(c&j*l zVGCJZBQO`coyD0eDid&CT}#V7510my)T{rI1|N@4ZRpWrdK-` zj;|9o4murRFU#z)W-4=pwh&QHCLvlMs%?ftwe zj|D~W*3UInvrhPA{a0lQYgWguhZY+h%~U-9ORoL2bRv~TCbY9qYDR2h$4wL#dwXHrrQJ8TC3}8)4(>$Bs zzay!0UaOV1dLDqEKcDq^J*Eh&o%GGX5R0G#+Z z$loyJruB86BZ9c7eUYYC%@!)71gk^`Hr!&DD}2JB)8B0wtP^A{5yu!W@_SUE z(w%PcyQz5KB~9Gb{O{AjnreK@jXRKwO83`kn-fw=8n-+ zS@Q{Z@}ACqSx~H?+Rvm>nRU_f?y`^)73hhoz#7WWAO!)DPdYR~XUJ+1A9@<>HT-G5 zqRlo>v5|ymM89VL@M;bTDz4MNeCQZ^6on`Vnm;vs7$PU+e?v>I5^n$JnwJcU_)Fl- zF`!pMgPcnjkk{Vw3%%lOUquF*DAb#HdT{`@!%rzd&=RqX95kD+na<6B2oK@}U=^ish|DUQ%7^*{YLemxBw&@|?b! zjHv?L%9W1bzn?Z1g@!yScV8bUpuMaU8HmsDPkwD}K%YfV{IE)Y-&)ZnfrACDrowph z7d7iuI&RG)P`KACnn$Jq;50;R@f{OL+k3~)XI8B2alY%R)nP~4%ruuIcm=vAqyr^H zw!-*Bxgz`>w}v)4NR&^m!OlXHp4D*;awJ-ZfapN?SjWVG&{>WL1&q^^zvnb=l=VPf zGMp?=MwgdeCDC>;?OfOqa-qfLzgS5DZMyz~Q<@3&ZuG7G$Sd@V7YSYKc^3MLMWYWN zdD;7J!?=qWLbhG4bw$xT2vmnB{Q5i&Ej5k!tEe}Q0*MA3QBn^jKUCu~wSmsabiOwg zl5H7JOL!`vfDGPzTkr&Z=;cBo!>Hf}j3GzZ9|Cjd;!u7H@kVUFgqBa3P!r;oz5e(A zSb(f?9l#v*1wg5<$ifB;L_l9f27H=_O91HJJzyrM>Y|3h`H z*q;H>yoLy5B@+we-!TSAFV8ZW06r&|4+5~Oilm3Wuy37ud8K))MV{9$>i5*uqb&W} zY2M8lXA;wChkz7F(E(f7+%<&&1D#4gAi#l~Ne)~hiX~CA;|;G-vQ^&5oAcq6(SH80 zGcdr^@I*mKM#8dZ&l$>)Q+rH}d0|g*GVI3%E4w5II5qfjemU-8GYrx2R6HuIkE(Uc z7Kzof_q&Y^tAgih->vz!MN3kfBucW z!%1uG$bGrm=u-*}2xVutU1YBkG11qzK~cltW7er#*eqA@?ji>C%i(Byry`EA7mM)f zp54}>dQQqSNjyyC<_~^uSvUFI!w|dIs8NtxFvnWDONXVj!<4@+rk$%6?X(u5dUpUC zqz{|v@gli@ga}q;@FylqpSj;KIeB_H2!$uviR`7ib^sH)&;b@8H2w$zb{^hk8tqEJ zV1E=Gp3#t-zabfd*eVEz1pX0C0MJAbgx~f9!50JG)DPt$V#6Z%_Lx9(Zl5lz(Jf>6F#mi%e6UC7c(T86FIvg|lDJw}`9}wsf z5xc__DDeGu@H43CXPW6nh1Zn4nS*s~#<+%~w4-k1j6c)W1#7Nu^!U$!F9i5R zfip)2ak0l@f;(%s^KaZX)-id4XHTzHgMKCcsgq0M4MHS1AZ?j}?PVoHcwIiKcWuQT zR#k%6yMeSRG^d4YYQ9}-D^q`ewY54tzSW7|KWXjHa6Z~qYFcqKT6`nvy3wac9)*;! zn1%?$iZ*%eAOk(PV(?4m9OC+} zisUN{%#T4v!g{!u804rrLo4Riz6k^$xAS>T%}z6+9X4lsYyh2xen~%EDUOYsX?{B+ z(qZ6FCq;|l6i_aNoX-psDDMe7+PKpzWZyQ1R8Plw-pOjpZIoZOxbgM<$m3PUbjl_G zMEMye3cEOQ4(ESdKz>OSN8G(CrQZ^zAz+<1kFNatcPUA?(kM^2+%RWxbJyWWeN#`1 z>obs%oegv zg8)HF09V${s%#7|?4VL-BgZgdd`p8l`Ul$P7HKgFn`I_4z#;8ZGcJe<#GQYVP73}d zGJIx5VuIhfBMiXg8;0>BsPTYh7zH%duH?Oaq1Yddakp)O{g*t~{*td})eb z+AxW3$SNB*Afve(?u7$q@*ZMXtt@KLgHC(G?5SM&5wL7c}yii zY$Ss4&jmL2`LWw?k)MdjDY@_rP;W>}GqLDWQwkPohS{Mk0{Si601^s3Gd!usei+tR zALQ2N$loJWfR?s}DL>S{vDlS){0h(1=9(vx;*;*;#}C$A=>Cktoltg?p8L&` z;H7jb2-lMC^s1yJ)qJS=a=&}ux0?YrEPZbP+8$M2{KP8hI{Fy7+^3^H%c^Y+UN%nd4lg zt}|?Xnx1Re+v^VlP-e*DZJ;!r6p$VO9aAPSko6lEVSuuo9LPT#7a7-C2H~-3{Ky&n za&g@9%4Lg&@i73$?*p?bnkNob|4n4*V2;R2XQhf^pd=p;v7gTOtjKjEwSExlXuaLx z)cPX4_4>4XT0|^!N}G@l|L}Vi5)juJPyer*(2@J1*NmshbN??VB8z7%ydAXmxs7S@ z*kH_x@oxTZ9rEIiXmD%o<&6=R)zbfL(7pFZ`87aT|e=U+{HTz^++vy#&{QE>Fde9 zk1a~8`r2|nAA5gcWP}-KFZl+L?8pgjDw;D&gJK@&j%SPBqGr%seAy;L)RfSDEIG)Gy&+swTYyg0}j|b zd}klXAI`+Rau2^l(|x!>%uKeGIF%lomNRwXA6ZeU_~0U1hD&n_+m~WUp_eP476u*A zFPBkO0|_pmv8RMua*%Z$KuG!B#rxghpY2K?sfTg(EBIG0vkr-_6RCv|^KyBzKmSt@*s!6~n$B~Q|kWGG~b+Yv)m(}vvhk0jR_DG8yJc*KoauaUp(5cOw; z?+==W4#|+b@QOY~gGu~66^9olP2vTggnO;}@A!|CjOt+h z`6NhN?J2NpwDfAe>$>atkq`uU{#+Elyu+GY;*>Blh^rs#Rh`pcu0HY4(cu0y!<>bX zlLeG{{Cx<_P5Nc~Cbd1F(_@HyTv0@Azq@RTuxc&&X%_=eb-KiPl9!w%-(SYKcGuj8fK7Gd-XvDE8H`SXSr{=HKg&s%f0Y122eD6c}{!r z+v~icX{4>7RnLcc?~vxXI9|TcWRDt4bHyxzMm($yW1uCU*Jkp!atLg?sI*?v%g`EAxC9cjwkNyM59_7|KU8-`mSJ z9Bt$BkEIxoiL@W>Y9jUGC@HDcI0JT-2%U~Q8ZOKCxPgzG?sA3UeX1^c+wYMin(Sjg z9d0ySCCtFX*Tif7otJi8ArgE|0hYI-!vy-xC|4{%d)xK_$B#h00sqj<9AP1P^IlCx zEA_axScy(9=e7$6UT4Z=RsR>ov@w8iWJw(wY=iGN8mXJ*R*5o6dZHXZNs|o*>`lRJ zrSL1C-halFkvfG12I&I(?W5Dtj4I_?GO+Zk(nGgT?6Y)qiGx9PCdBY~+JN`oA(6=e zh)jfX{_%-%oLoukc96QS*L?-$UG|^|L#pc^#G1gEybp zn1w^d!UmzL`Jy9w-WcK7zmKjiujAgWYUjkeJB>;$g>eAYCVcB&Lt}biy6BC>Ygzi= zM-bsO!}Z-OPH`EEn`7wH?&JY~$DP}h|CH;VbO_f+?- zsX?3T^?3iVa#Um0ALBOg5{v^k?83|6v%~&afM~MN`RrW@$d>a&D3&HMQL!b3b3emN z#d$a6RNs<8A>FYV7zfH>(>lath*h8Z;6s#E13R8VYUL79E$3LR;mVcx?M?*V%E8Av z&kmvSf1ePr;82S@{x;uuYU-35DU7VxKid`!P-a&Uln|VL@%?Ua$h2)nyig_&U6$(T zsP@!ky9E7wzFg#I1qD7>CZADKffD`t0*TCH_542BB7OdkW#4I2R@Y_zfp`L$4d z)k|Z9sa?UMvhg>~S@U4boAjR>motlbad=Iwv?K|p$PR}ZId51vfHVT2?w5-o~kbd8zJro z)kxO)qKLbbd8PEO%V|Sqlh^rC!k=3s8l3}U|j?G$-5m4uaN1zs8GYcT}59H}|T zAZqlIItVx*PDmrw9mDVxIH$4$0KFUC7J%S2vSng>eUO=U1@h2}u+h4UsMQ_`rR_bR zbwN8pjYfst-a$r3N^>a7uljhsX^4Z9GrGw0k?Ce#5gz6u9DtFNTFwM!r$ys-!Ec^} zN_H5dz1E=1=To&J6~uFr74jlA1R|HXDmd;fJOMc%P}SS>F9{G>dJQNPAK75Jy^*|{ zFt~CLa`Ulvx@mgrJL}x<J9lfw4z=e11*Bp=PSsK|!g7|cF2imJ69}bumEx15tz{(k_vUm( z6ENGhWuZ3ScI`TU-nbIs5~f;={H)_we-P9^y0aG66+d1UI zfTAmHl@ki@vM-54_IrOUy*`)Xyq4~Oataqbm=ghH(16}d|!ii|mA%$Xgn9$&}a5k*Tw zm%j1$s|DK1qe|TubH$j<3{?L z+^`4*P{VO3FVm?EAH0c$p_hGZl$$MH4jxZ4U3-5;?EuWFY3`))EmgZW^iTRxW@)Y= z42=opwS=GtBtQ`EDoW8Cb@?&ZI)!tOoA8m?O}O@}xhhl60?RT9nf%|I4^;6cs6*nT>A zN;S>I#{gj&UCZhGUO#Sa^x_6=mf*>8()3Y~gO*^6UHl{zuleZ2cKI4Lm&vA*DJXI?6x+<_b2%%0}=>L z;s!5RjcsOQvRkfFtqfNa?bpni?U5zp)C=!}nRo#$1a zxwaty)cEFq@xI5KxuZE~k z*?Bdmtf!*fjz(w@QieZUL20k6eP36vfna?=S3{FiF)}WZFoU_ZGR8>r?OE9rzwI-F z%5^M8!lC?zhhbcTgKAuI%AJ3H z6(`QoF5yOk*C!$RzV~oSd`iDT+&SH!^auEa&5nJTKUoe_cxxFOhF;FTos}ubHi__; zV%J<;lFTs%J0g=gAtJQ~Ubf6lIom!^S;RyT(gQEZNhDn(?a!h9cJq=KJz_iqmFlw? zn74y=r;naUQe7w>^XA2g{4a)kN5tga1Djkm>hQ&jWytv=wk#FQomvw_ziR%d`;g&Y z@iE?*aAJ6bW)bIp>AJY{69!us8SWTbXrQ23hs;Z(xuSrYy15EtR*0I0i3iEQ=~pHB zEh<5B?8fPXcS=Dpw3z`fOeg<$pp_Gw#V=9Fl{X%bEh2v-l3u=?7MeI8Pm%UHwpu=n z;=JSW~^WtWB zoiD!SON<;$>xBn-@RXU!KhOU{mwZo=|7tYq3YAaD@#Xr4Bjh_d5|d2#*3Blc?uUcNeruTA)O90ADBo&C?`sLf}+L_IjNE}HXt+yszh^k zd%Fihp2litB4h$F?6PEX=HQ~yZ%6^vRQJ=pGS%H@@A+p4^lxellLj zlw#GugU-A!a3@x6g2F|6=l{`m$hUsrwe}_*Oz!-R`&HX|B9sDJSW5F(3>hUZang`K zdmvIyE35y$)w>MQ6yrGg)#m476IF>nD}K^lBgzSMSAa|2PzlPdMuk|oFH)ZvT4#^d zjIG#Xqq`A{jDGIuFBPwB`E1zFrLUA}HHL03KWc)UELmPzh1JH6zOs2SVRJy@R^p+f zP6`RV`SEU$^WP_TQJFEd%iB+A8#X^fSC_kFAT63LHJkyFIWDF_wSMktRO%>yS4{ytPrH6WM|@%R zfqJw8aksTAYI}7Em$k^CqNJIZ!(r18Uu(mfS~o%N8N=mVw>(70laaYQCxr4c;U=({W8Bg3Q=avD zOHL3D;P`3KCeYlq zm1=j35Y?_OPRtrQyzzw!!C2jjt(n?SNpM)%f)e=u-jQll^1!9CfA+}U&qtR%A zf{CBDPPZ)WAuF&|E6BEZ(oHK;Uy_fGPlOfxdQ;nZB)so0m&JzFh_$-z<7Z-VgsqlMNZL z8@+>|P8;@8AUvnlbJ3!qU0QE-y6Y0k+gc4-@=<|!44^WPMq@6vWWRT}2`9g*7MGrL z>2v!lZadmgCEOq9cPu=~6Ov0C<&3c?NnErbTwbfHU3qD_a+S--at};zjpN6MZSxmP zl`T>XzirrxA-KY9uIQ>r8?|~>ohP;R_O!*?Imgsj7(nn)tJM@ao_~LKB((hTA8D7+ z+SK9oBj$P6)$je;3Z?(9@>9{K7e6khi^aWV_ck9_p<>1-+gYxv%y}O@?hraP$NUbp zPO2*57cpjd1w9tWTGX|hRR9~-h}@Wd`Dhe78T>UA>DIR@r}lL0fTSS zuHTGv)KJB%ofQFSO*st~m|MB@ca|pjj*;d9d57Xf;yknLQ zvMgcwi4uJy4uUSBVqQnr2z*A)ycqCs(x~$}T)edZDH*NGO~2O=h3*I1tN}BDb=@09 zqVn!Xj_B7ufUw-AQGRQ?f6k!$meaUEDvAzJsm|VrL2UA7=g!)&qmTep#{AZGbDZi? zXchG?&<(hHKrkMt3CWZsK8w;t@VPL7@GN+>NfedicUhp`8cQa z!NN!W9v-j*Z{RGTG3snvbJO+@FPom^y&h{3DsK9M{i`0hextdO+}nlau0as3$r09E zns+PRpRw{Aw^`&y)aD$6tk7nZ1WSlTrCjUc5iYd&+t-C=C5U?VT5AI0O(cYf4J0mV zf*|D47p3nqCxf#e`xMgOWupSn&sOfHJgYrw20c-@==_pl`vhg|h1wo&=spD|Vv2XNIcle>njywkH*j48KB#TtN?{+|Qv)^HB=w2tjAU_=!QD=O0vV%0I2#$1 z?L*PIYKv!7H-$wjy(Xj|MFcdTuh(i;e0(nVR_lXTGN)g|@?}*0>I^Mvzx^fv$jAtb zDxA^XY#?T!k`K0`nqHm?2Dt-0?ftdi``sFDP+Hc!gTwS>E$qwU(*xNeCnFsq_78aC zxz@<)VQi-hZkf7lk($-|c)av)+#Fd&rk8=m=X#gbrg1*4+-8dvI$mn z;y3tG*s#y;qjG}^Xc_y>G}U*RBHaLV5N~0DTzB!yg$FFWn;9?E35>HzVv=hF>AtF{ z;k-5d>I^F`u^6MmhxpGgn@7~D@CX9EWSc|hJd)*o@6A312Rx0n9zu{Hre0Il)Xh6D zu&oGq+f1KRE)HB?fuz3adFEsrtwa{$bagTWU2Uor9tUkKm71Intw+)pogu`9cOXV$ z2>QbBGuMgi)k-JY829lq;YUY|+^FOs^7_71k`=c$F^s!w0P+VD>TJ!$5AjfA|Gz9i zyhNN2DcQ2c2=NNeXB$Spmvgb{JAnUkMu3r}3f zi?YK;gl0K#Z9XuVQxk%|j=61!e_>T)skYPHR|jBGhnmkw4_{xK9AB{Asf1!Bd5o!I zuvATDrenV}^{z3c1HN92Bbk|mr+s6CRR4;^!559d7EK(u^jJ2AK*@oAw)dIAp<^$? z0``%9)^K~*3K9-w)*Xf^P421)vbxEEEESm#YRZG{H1Aq$Dk4Dn`@K>=Zyojs=StZ{ za%1olD~c@5-=3=vR@FN*0aQi8VT4laSM8(0x?h`HEltdO(fl$s0_%d9i7WVYL6EF$ zq}EV2B&{XwIP<{_QK>5U@MJQ<%{TOvOW}hwt=w?{@-s4690cf|urw;N4kpKPOv@Vn-WR@`572 z@uGfqrU0FV_+kZDF}AFgN?vowBN>)rz!W?##TlEqmJ2>?7y(QRVZm~EfzJjz#pjE} z=?rpvp{r=toU{`i96^`e-pA9~O&y2M@Wm-pHbgCg+AK1nhvqd|qy#*hjS+Y<6%N zabby1X@jf+7R#P2tJa2_B+hd#a!H@hO(l*iRp;AmWbW=}ikUTM z$5#;_{y2*>Olp`4RV8S41VQ;Ez|4jsnUhgueu5_L^eYeNN|P7)^X49IDueXG)KTAa zQj_7xlHPZ=+0zSfr_`z(iE_<4!o=(9T=^h7u6L!oo4tD^_l;Vr;y+Ff%YSRcpAXT= z19U(q&h&AQ#?k-Kbk<){HQw7kLpMk__@E#m-AIYj-ObR_T|*6`Akxy(-JOyHNOyOM z#L&_(#5zv-f>pmqRH&Fa9-s8em07u8R7aPNsO zIeEk7enp5=us?8DvM{)q6B42uLe|mYpgA#|RH~!;2SZc#&+kT5GXiFc$C-{@PdC%G zP|qEs3{YnxnQ){ZpWJ!gdL35$2NY{5!spcPqJZc#2lO`yboWWGC9g1#J^8BTjR2ym zt0MmlfEa(Vb@pwk^=b}*i2C5z}YFf<~Fn2VCe32nC$U=-{fB24nyJj2ohY79|ju5C88$6 zBNZuE=U(DH{|PdGh^9&(t&6V3B+{qBA}1{0})_bjvisJ zB_*dXb*Z`ka&L^aCf}h#d0wq^0$FN=jFRda85VjtTyCz3^Jj%|uBCE{UaXWl%_Mu) z&`yk1M%(46;sxP5*5S7#*Jwu0+c~nc2cqTrX6WQb<1i|>y=9P6IZ^~*Tc7ZCAM`Px zelDAwadL;EIZ+&$23=#*bIh74D+J~P?ya_BTg4`*B!U$xbix3uMx)xr@OZ7c7@#r6>8*m4_t$3b& zD!8fF6U#u~HO4rLNARUaY(@Fxs8G_yQZ4Xfys9*Jb0Dt?81;tHmBfsxN@*8RYJUx}6=+TyM1H|aC&S)huf?ETMe z=G0^~&G`EzcU;{%X^Y|r=6yC8oR@rU(uWt zRcXP%HeTBzWZMTs4Z@-fcJ72@Vn|Ow1xccfgM(C3V7UXK&uAHQp%*#ZA-eq%f)bZ3 z9?T1N@+M`Nt8C2cE)xiq``x_>`L1`Twu&BV`1Qqdv2K>}5Nps|&i9I>spyeO zzh=5$VZFd85&qJ;;f((CvTUQTi_`(R4WHrL?+u(x9yM-?aZ_ofr`ftW+Gf@PHi@ma zrcO^$Vf>70{`jIVZR@bX+wvro(mr0oX4d(5({fH&3=R)tfy3=Df8FI#ao0PFR8vq- zrM}bqDWD+sBc5auaAQgcM`tUI#f(WFMX4fB9v{$-YrqvtxG_i~jFD@@{D@)IP zd6TiLuCv$PyY5ikaUHZ!pVf%wtEy2Zqr7QifquGPzrvblAoFPk3LBS*He*OLw9;$6 zbk>m~g6EnKPBU9kfFwgUzw)P^?`uDjwd&zv2{NPLMaC>&LxMH3)vUWH4%w#ck&Ni( z9-9k;{RFGuI}z=}AhPl09PyNTK}K(sEaX=-7V7=BELWo13rx<;4X&wsL&<34H0+eS z?v}1Ui4lUN^bJ(tiOkBycuqF3Ta733@vTL=RUCGbUqnTrnFd^ioT!OgXpU=9_}^zC zrpFJf)GWokOI{7fS*`6|LETPC-IsDlg`KETrND(4KUR|-<*vVL@B@xhdqauYAPdI# z)#ZC3ytEY}1U-YT)7+{EgcldE=+I2TdKkY~o=s)(_@@&DG_!Aq&MMQ$ba&zdHIy(r zysHRXTwX-tb$#q`Q&`SO42g%S1q2~0-*sVl_wF45NI=$NcUpW6Ml~m=OV@8zu^Xh)a-)1hN`k| zX7vi{V}#AKr|w2=B@)&zi-g20WSxG&K4CBC|3eFwJaLOvQVYJHnMt$FvQwvYZF%7u zSR;zP*&2wr_6*S!GYh<$!=C5L;galt)DXJa#6SG5%YJmQL?knz#+YF}ksF+4sFv;3 zHE#=NZGnmN$DisxcR2;NpAQpX z{}N@p4&O<2w_3N?(OWKmEc_U~Fn^jUSEbCQwNzi66;Ls9ewLwNZxo~((nmPIDe+<9 z@r>qK?|#^}>zZioc+#bwk||h3&jAG%sbm)noOLL6KN!wgd_{kd4|7Bw1Cc_2}oD0TUON}U*`;*T*6smI(+qsyPa$`#}j zg!^H=342z8PLRT}`r1q8kX9NTN$f_HURK$$(X;XGoZMJn=YRaxQl#NRq*%s6-c1;| zBaOe=EaCA+0P>&@-ZO+obb55^2B?Xu-uY)d ziNN&hx#wh2VHSa-%Dq{5EC$fS0vOYnV?lF&?5E^15ZG$0^0nT5c*0z1WPVs~7m7BY zzdE6U@a$btQMe#}P_}C8X;WoYI1e8Wdyd(W9OzIuUL-A%jn2X`%?*z-uVM1Pv~(Uh z@y!x!+HptJ$;&CIER5?#`Q#axQlj-?)5h&?#Z{AGc_c@czSzCimpuC4rir++t@o6% zP15*~2qipcO6lSEuldzSbg0{hUBTb9kulUIwQ7m?K2skI<$rZgttm!wnlZw&A?8cl z&HZj8e%_=bX`i}S$4^@;$a%B2gLa@tq>WKiUov7kG<$%a$Xlr!K)i*{D+Cmr$!v6L z$NH(*Ge>Rn0Edq6P5=4Zja%Ji_dCMr;eYT3@2l_pf?`R`se1(@)@5&U%uRuwf)f>H z6fq6)8@!O;EfjQvO(@LT$O3Qq`6G5xMK4xe17|;lI&=g3|o9nJjE;`kZ+#M!{vrc{|{Z&ha#k zYg7n7N$QguU$iCgX{ix>O=fBEi-aQ_$~wZcGyr=y46%P4j0vBd!2nXhv`^h=ve}@% zSy|1iiG99|mjyvZ$^W7j9oxK*lxa@aa}t$|W+Gz6Su@X4w`bxg(U0ru#fO2MF9Q}P z|DzqC5ha1+xm}$T5Vi$~F|)_Ep*h z%cFW4@CAA!Jadb&EQ!91C;8ZDRlV6QG!PznHhU7{e@!pt3Y0G#HD(60=qkpgG# zreufW4p9Q7`(dHX?m3_f)+j0Hi_twi=M~M*i3Wl%Pan+oy{F^|{e%Z6c{P8b0{+#| zFMMzQMqyWo7$>);ioCTi;cxorKrLl=3Z~AWQM@tjdTDaIgF79)V44!v_rdn@fOv7G z{PClTG>JG$?WB2zzljBJ6Z^#n@T`}t)9ft1)R1Vw22+0Zv+dn7J*<<7N!Ao8kf30p znl4KeN`B6p~M+Yj+zqE6*}lzq^5ho{lHqvBnTX1aHD z!MPB4l^i;^n7&hi+v&V7&uy8%k(+I8f@Oju!;$_c>Mh&amSHzZ(?7I|T--I3{}6fv ztZ~XxuLNAj(3pwha-cO{&ZcQ;6>U_5M%+dh5^Tk@_$tx=#u1TVp1ohakmH;#Jb=%J zNOB3c_tK4srfVQ3CS&0O#e2-+W|`K#=8O$U1;R?u@fh*gmtB&$?7q@f^kO%FzI{Po zeL2}S$5^x2)qZu;4qaT*WT#|&=U@rIviPUyCuqhf1U^g2Rl|XAKS>G-k{f3*EA5W1 z#ij$@JJmh~8rDOS90^n{KxoT+p7tl6m#p$y2%$NJsYn0U9@d_+U9^7s|CmZAR~|S#y?jQT8(6_ z6OgGC_d%!ed&{Cq17i`_Znp0yvS-+t&M#&tfW^icBU1HQ6rf@cCs2Yfe|bn1HQF$C z#5{JDu%~;B4tnt3;eUs<>UwC@@Ucn&)hWP6N=Fy>-v@5dj@L z2tk6fy$ItupY4MMWQV8tbmsd+;E4yku{!X$&Tydpi!~y$>o(Hvc)1Zyl70K36*&hc zd!4uw7!vINlGHux8O3Qhz1@!eKQCOKH}~e*|7!G;lW`9Mq3}Vb*vpAfKhU|RP+G@A z7(PuyXY5b7|I|l{!nV^+v9%z?vA@Z9+QI`ZMYYWFMvovT#=Se<6GJCrUry3q>RyK7 zJ}$&|JwUZ>iNwVJxDo!FM6Dw0&Piu(yp3$}PO?gbYfYk437%d0eCXQZ=ypfd_QUFjMj3$EJfGLbe3V3AcC6qUfF&P(m9# zhN=M+$LIdEy`n40&SUeX574xkEN{d6o9dwI6L!&W$%4pR``1QE;I=#%adnxy;p=N- zBmB)=&E-B}L%#a?5w)5t92H@cb?b73kI@VCOUosXQ7-=yZAqe7pu|e4FK2c&y6_Was?MNH@XC! zZf_W5hkKr+L%Re-GWWD2iO^UT4M3V0`E^0m=pDF_(u1)WHpOSK5=o0P)V@@y0W*f$Pqi;J0j%q1(M6a+sm;;O?x1 zl>I>$l9p-vV}0w_j9=cXUrz-YV>XYX36>bpFz4Y#!#= zgkL(N;c5Jgd;Z~dTkCC4oefW6Vt@Cw#e0M149LZBb-mI%PiJqLu%=GZqh*+piTLepKEK*Myg#Lpv{K{%1e>P>~Nn*o!8llYCb75?@$O@Vk$!> zfhUXQq=5mzwPC5WcdvKPmGpY1WU(p3po0V8z_JB|*u5%6_i1D_mBQ@$hO#vwO1(1<^~l@!PfdxgQZmp&@n2Ac01NIbY${Z_F^i@aof?OqVrOA&pNImOO($M zW6zpGW|7GZH@zo~iZt=)&!Vts-oo~<&3pgRKTiNX2(S(8AGau{l*^2!Eq-0^#xoCS zxt$y>H!-P1MHg(V1!x;~KpJ5}h3&eGDteLnDJUA|>E^K@565%`Dwh>1M(xFb<#p~@ zyFhfu0kmpu&v+`6hGr;djhE&3%>qoch~<9OxwkpdJbZ8R8nTjkyA|ze7+niQn9IN3 zHJ!8^SAjM^wHb^!Lu^)vUGo`fNP_3oP+<@C9umB7@zi39{Jou&+Y+yvWZ(ot0aC*_ zoHMzJg%5}7FcLhllol$i64{xeUxH`P@4gFp+XLYxGX^%KxEm0^z_! zGtm(x<&9ywfZCFLuPp!Zdq&Sjq5JB*HkT<^Y)+KCC)(R^s6%s^z|XYxNl*$M>cE{9 z&%m|8wDQI*zK~Y^gGY94=oI&4<9}Yt4aLzntLe_2*F~NaV?ISoKVkzD@)L+UDA%g+A@6&$S%4->%e_rnDn)P=_}jj3za9QB5zEZAhzTvW+V z{xiFqpnSQ@)WnTq(W3NfM}5PbuG9r}I^yLL)xW#MYXgA~jRWveM$T>z%0v02@s@#K zi!Lh!o)eFQi`-6skY+s|LKKb|(u;L2;X!tkkvF{|+a!Vc4D@ciomSw54)lQ6QlA~^ z0M-$!`r7&U9j12v`FM={%s|DC;{T5#a1Z^xC3_n$4-fqEu2BIHaFeWzR>K8QfE=B=@GXkPTFwc=0)Ur1B>t``BkU+IxEvEBK&rDIE zD=y;oGokplZo52{}9oj&GYeYN`k7(R`xsf@a0o*L3#(rr1iyHp~AN6 z=ZG6;b_7C`;bx9{0Yst?xSks!DZ=S`)CUIl)*99dws&7mZF-@Vm;J>jlhqtDBK>bA z&m3GZT+xh@r@Z_^GyGoJ1E@f~+9!cqBagS>p~0s5B0qr&{>*%W19ugL`*=|JCmILK zsgO_2BjvvT)sIXqmOr`R3Ve0K)UiOts2NYs(y_}hu)a$@)Z2YYxtZ`&k7W5~-2aql zVHgo~sDH_Ha)`Yr=Br2U_K98^09P>>pzs2tYP+0BVtL^B;neuUlw1)3Pe_}#m_I?F zv0@A6eJ3`s!2lPS5XZvi<_bDJIGpinlcVGC5H>^GRJMz~?bZqw?077&J)TKquWskf z^qM!|+^UWB&p^IOs zMts^ADh#$CWvUeg07}%eb&m;g0`kW#!r;`C*2Ai@)1y7+%OiDD@rO4tV&ZYF%5eMWNQk9mCTW!>ZMFnREL3yNLj|-q;zV>Nb68 zf1KI#J<~VmJdPaSlen^V% z2&}_jSv2k#8HJbE@#l|WMyCockn!})-TSTUyz%`&A7@2|z>5KbQacsE*RA%L5GZ;& zxeLDAxh&my9(@Tq3Fnt5JX4TFe6ODgA4ep-u7(H$o6{wdkCl>0oX{a|vg39y9NHZC z;4>}a5S zrIDpSAm8#q1}N89q=0O(26`}ql+wR*P68msuqi3ubf7#!f{V7$IJEdni)v<|wTo$B zs2P-agAY-0?u7$HwUxX81TQohH?={~t9C_0u-N_%2+itafBzzA;L)cbwelOy(vVz- zNP)6Wmp5&ya5!4lDDIoAFuG4UMlpg_T#SK4dj*>J>p{I8@T@DN*(8EHBFv;@Eu{OM zM?v&IF(H9=K;gfL-dpU;x(>Vb}d~rS)Az&3uG1d#dz1=Gy2QQsIgrAvJSer zzAZ|LTHhmHc^jgZnz^1KQ3qGP6Dn2_nJg+L9N4m@QNI|YoMAu?HEgN*_^@B5O$;6s zw#~h09WyrRZzwfHFQScwwx4j(A}7_B-c34Afg5hyU}+}juokE9TQe zQ;KA_y+%H`_U9rD(()D_!SwCFLpcK3Tp5hK@7hD;Qet&uXTBPd`uI#zpO4TAiZJo& z`Nan{Zm0tjTXAN;L{l||YTqs<8@xP(lG&UdBu8p zRVGJ%_#T*27>uOqTueWOKdz!*7UX1jjwmms`ZV+@54WDKIN=w$WGbPZ8PMXyf0oFO z>n?NU;>-NA3`l{5|6aC>2F4`j+T@z6Z8Dzzm*Z*de-UkQTV{RIn?o1wpK0f?0HBT4ks>3S4PxUHq9ET+>KsW;GC`|ntRphy;BG+uHDzAbGJFRiwd0|62=CF!?PyA`u$BG-97;^bLgcO~7%#f-e_4NyI|D@Bjfi{LUkU)Pu+=<7w^9W_uUdXWjb5NO?a*dno z@qwsYPyjIAN(3Dw|LVQh0Kllr1UBEU%Aoy&8=GR+sve=33W{j_^I;eN>NM(GS*Xmw zA4NkkrT;tOGON>#`u|ygGIB=O2URBMghwiXz`ex6AN0mz*Smy~C!fNNK=@QX?D{sB z_7Y=u?KWs}qOZjnh5RuoN7rLX3vp;LEmOU^ThqG?M{A8GjP}7zcw@U`Q2yv*6_K_2 z8LvH3>ViDyq*Zyj<532CWN9%(s+keMWNJl$W+F+57-(7HUQX)z9IryKJ&@CWM`_my zhah)Nd>azTA5{9ag0aTJ{f@b zE$zzP@G4aZ8*Lr3;snRy`KDW*ELbroSQmDQhxI^)+M*_Dy}jodJlvyA$=vEpH)LtI zBYjM*7viyx>`*aM&r^`WiJ_R*1` zcPxn6b&2?Ue&Zvi18;YiGa-u|l*REPfyLzmCQ0Li&-Hie7mTT0FB}(o!q(I8YxLkH zA+z4oQ>9uQ;A`L4n@-p~Tu)vkfW)g>0X0#6Y-87f4tQ|6GkgmJy5w_Ip;E9BeB?2m zodLPd%k-OyG)?5}y#0gfeQ7WO(Y-(M>1tC`+&Qf7l6ZWWcn(-@u$#%nE?XX1P$OiJ z3ruLFJ!|;4iyObBJo~y{oP707oT?zK8TUc_w3B9<4gF)1N{tUw^vxAT zt&zD1>x8iNGqL0aB>AIeFy5+x41n7S(KFDQDsflUWmuLL{^&pbs_WDx zm@D%Nsh%xx`gNJznf7!haB9Y10}rvsI)n{yJjE!zYq=-fYnh;R~E%U8wZGn4p!&v=H*3=HQuh|DY}UEu!nDF5xadEtIrIM7^ss zBEYzB$<9DBOi zWboF%1e*^Re*!4?xB}&>S|zSd(V+hhCQD6fx-+!e0GPYxU5k9QaAoV1NvjWoz^=D) zk(f&hms5vNF|Cycvf_7d1mT|O#oAtbEBMuukg=sQ5|gw)-kYr7J52B`AMNDJr*k%| zxN}!)w@I&c`h+7Q@qH0wB*)!?jWtf-Ih@{P()j|Ve z7_p2?gteGV95NM_8QDXHcTRzpk&gE(zl36}Lt5T(%*ftbcJXnL87IH22iO0dEq=mw zrN4)h?o6vKrqMao+vRvjRMH%&h`F@BUh~`%nqIs*oEhq1pJ8*1q#q35E1prETT8d# zJTyKSYSn_G0>o7BcrW;Df)*o>@S*|Y4)Yh9OvX-?cB254>Cxjd)KB%;<&Vko$I1hw zTsiCxiJ(ArjwlW(`*2MP4w*^)CI zsorD73TU#mQ8bLXEE54XX|SY(SD2$51OGxh(*q-9GD}OZQO{9sdxu|>@pq%~@m|jq zd>(%sklSF}lTbE-QHUwe+Mu3RU3F4qtDa5ux$cXXl4wy=7RTM!Y~LkkDPVzXa3za$O!xv zT6HyQz3MlbnbR|V9d^cC$ZPx-V`Sce{o&(~IlCiA%AJR}Nog0?K3T-tyX3$ihjhp`L6W`$N{h!x361%fG zDxm}q1_e`{BE8j*7{^19PmD$>)&#B0jW;iOnn%wTB=r}NF#)UalO;H`qhf+y?+8CC zP*7v}#?8#pfgq#a#0vmIRMK`78S0<<8py4Rr09bB>AziUn`pj+G*Rlyw&3Ho@yVhI zA5Gxvk`#~#EHiO)tl+M3+)UOr9FJa%?@qFrU3Yv$SkRNElN=gGX_fDqT3dcfF@P=Y z9=Z$@0fRqmYCi(pAv`k)Gj^I4MXE_;xgS8FZ=5VW`h2ql^h1Z0$exST%hplRQh~gW z0)J-;G_A`B+*P^;ek4a@_-OnT2$CoxyM%_}I?X!G@f|Fuya5i&YZ?5Dn1qiF(hyon*r~Irrja@$Y z;0cVvbr)2Ceu47es!WZ-=yr`!?Gu_M6*tuy?uZ@$hJ3&WQ^H1#D!g``PFns{9Cndg$nBC$V2W+=wec5zpd82FaY+%f~7Q?BF{MHl8EA% zSCLW&FY=8zHm^zMg==~*UPoO86$C(I(}A<@kqW>5mr9#WZ9>I9BiG73yAA<^Vt?7` z?<3j-e6ni~T0Ea-OiP!maDoiZ?r#Sqv(}M}d-^ks|3l?rB71QkV?(GOVti<6l|I#W zoLYcXqgfLPXJM~zrh{x25seFv1ZROvg9q#!?ngaMZ1{qCNWKabBOs;AH8+*U4K#fjF@xF2MOLvZk^=F zhld|Gv4yd7os;h0I()aY+5&zy$Ny7KRNE~Mo|tIzKjw*icdph%EhAT{*u~42qzXA| z<^K;UB+lLm?QPbTe=!ojojZ{g{xnKc-OAdrXML(-bh30mxe?G!7km-wqF5bxT75s$ z_0*wOI1%`2aT^n1|G&e$BP)&STmUf)fskn@>ucFOcIQO6%~DKzpF@+PTYylE3qE&R6*t(e8rr zWJ{&GNNh6;BO|lw`!>FR8bfJ(sABIMRA~3ShnA$jX~{<==St6QA5u&bdodMf{zu5D zJ8m>RjRP>xA^8&v&~B$M^+f7adt`QIZla=q1S-tfjH5G|nv+W8_tI)v6NB9slWjGE zEP0J^xft(V?H-i3s8xR?6!Tf+UIdZQPT8Qbd#&(axx0-*wD1=x?9A~uN>8^IhqGM7 z4C1Ac{Lm~>6${<}Xsd-&U0+SpB1n7v>;_1RZ*KtH13QgSlLH4E+LuAG9{AJb49K{M zW87p6w-j#3ial?qGq4HAu6?s(`}@ubjH!+t#^iaSPz#o!D3JCV0;FgP>5fl|jX9YC1|KNq7-|h0L}O;)C@zF_Wx%H6r*+!&uuE)2RDQ zVAg!eKWsy-&1ku2H>{Nd0=)6$zT4J`qDg5>N!XXC<5&1eutNRqzuzYhe<<@4D`AXr z-euRTvosuFH7wBs7c#0~-YEZo7)sOG$lDSLYhDg)R4)CBIUyHx1z?M-lW>6Yy{elr zPet^-LC}Qi0|wAiVB%8?#Mb6I6^CRcrvxouMS+MxYX+RNyga|~YEVp3(e@k(F}%&M zt@XL4IR+jo7e`0!2%Vq|`%LD@&}8_I$!S~j|CYBaw4ztSiVjOTXU zq(1iDq(+ue!}&_!E4$W@P`7t6A2Dm`(7dp*LPeB=QMLc(prF`v{H2fSCSL8Pwy~vM z6lr%ZR3~Su)05KC3r877`|G3JSk8PVf4_B8t(X$HEe4NSIaTgJsmCD2lQ~1lCq*(R zVyAwAXNVuPL9ok}UfS$5(CF=Mu1}o)&g_VmXu0Li>;($_VZPF9;aha3yk^fj6(`EL zP-lTgXKc|I7tKzp(7gYQMLSf%kzjTd)&Cfd=!B%Deg(Ok3ZVeWw3Werw4T`Z_jPim z@m^mcy|edZ<)m?MfW>_+g=mv&iApDwHqTSG;kQ1kf<1d^`+HN(bP-Z-j)pkAUtmQ( z#?{vmrBZCZS-21(fDAFZE5GglF@yH8TCiRDKCiSeH~qvnLTk^FtNmj05m(gOn3*fC zLe)ADr+tE&(!phkZ*g6|iaP;^8y-zQj9f`vcmGUTcPLwD0BYu0j~@1GC?73Oy&ZzQ zu}d1q^tym2abjon3Yix;WFA8={FX^*crtX=3pCBtVp$!+rM3p=b9q>Zi0Anjxw}1( zHQiZ^!q4RDV(e?VA24;|$ABtm8y^3m$8*?`GJ1^2Z$Pt+9MIoFpHGWG2o}+mVH&S= zs=#yFv?l`JG%Bds6maoDm_Lz-m8k${w2yiK18YfShqpIlKii?!QAjF-NDZsn>)1XE zpawrE>z&_5-19kMXj-!I7oEuotV`_8?57%rw5?)IDLH)bl}5Ma=nwCuO?nWl0|mOW zRBdVNaUDV2LY(72zsIXa#r*d#SbZL|(zR7$iL>GU&?6~Gy}T%Kr$}(2+^bH`pG(lZ zakdiL6e3|oG(oC6GzCZ?ZeuXVZrG$q;X;#=VBGk+gPdC6baz3aGe zbE4TV*Gv`pW{;&F(-*o7U23?UPgm9HJcgPmT=T!Vg8oIqGd#u(8m`39JhR>UsqD7< zqel+bLSP%)XqpmR*lO}dO^$BE)~1+fA?4y%Pc9v#QZ2JL(R!xiwMumLvo||IkseNHqC| zy&17sWjXn?LG!t!#VlR(SyK0~r#iE0PhI@kvX`|G-5D3u|eul&)0;1+BE*)%APD7`e2M2KNuUw3yVqXH=o zdAQs))h)Nf;ZBD8LJ!@C(?}}B0-+)vlA)4{p>;t{_uM=|Crr~X?R64FKl<*~8sAmW z%WS&w1%Zt_^ZT=Oi87AVVaIdEcA&)!;=Hz=Xv7cN#FSnfQxbe#ql`@}3$!?r*}*#r z&k#jj2_zq7-Bs}x@%7d+1Rn>~G@TYoGT#X&yE?|L`v}%E0@z2dE_eB1uQGfEV5X@! z?uy${Tiqo}s`=fyWW%&VU{~`GkwGe@$!^~pH4lkk6uY*BG1Ajzn(VwS>>7ufF{Tf6 z01W-EryE4`YC$nH?P~%aGcL?!HXa|^P}606O8$46;M4T&=I5SnPL|9J7V^^gN?d}swri+cI<`TSvas>Ihru2SP z$d^x`QCuQAml8n2wp%jEO`N7^P zG`6+B>X-yBSV*?fZk~R!60eI#8xIk4oxcft8Y&;BY`RkLHW4=QQb17bELuFgr(@g8 zn|b$#$ddMG@QUk>PO=1KBrY-+%qp?bn(Zl>(0cOx-H#NaML&-cP-A#bw@D2DeKipE z87Gy_fCJP5$@)?+Ivs|IMFBK1+`N~>R(Cbgln50ZRgG{i(mg(vjl2lx!FR4oxGWgC(n42dDCQzSbhw`TEVlBw?NHI+r>Ee%a9&b1Mgx|=GmwdW-_;Pfx8vQ7;_{+ z&I-m~0R})cjmGmw!-Pht*3H~qBSB_Ml_dV-Y((h|_TluUy3sNd=2O&PRc@tXM_SAA zz!j`GbLDzV#*5&dGe#BEd=NQ*ktBwdd!RF>`Io?0zRzyn@@J{{ zRoVJ`Qh%z04c{A|t`VtG$rjYX$BDWd7Ty39e*XVfp0^)jH z^tQf?jt$?5B9s1s&x$Gp(?-sgyu+!slc^Uidm=$Jm5B~dhQZ>W%YtuwS*9D47Kl-= zTI#oItv=tAl{iS>+eF)py)Gh-IR%2JlZ=HanX7Lg=E!HDeJk}-na)#0DSj*?^vqKn zh1HlZGmyU9FMfG(cldnp$O5;50E0xZ&#*@+BF{_C|3gJKwz%ej(A^ zh9SCAGgitKF#N#3I9k`t5+}E-qHWfF$H&Ea?t^}29T5uN-f!NYR8%S+mfJYa#3UIj zN~QqpEj8D)eO?9A8Wlu+*)ht-Y?`8Pm@Kzo$U8za;~#$|`$K@ipHyJs(ZW~YUmA$dFxbWz zl@ggXz&{hGl7%FpwCb_u(ACHkp%>$PZ~AgFCFrQxS;eIyi1)SFkAyAVD4~K3OfhPR zQn0JNYc8Q8AM1dVoT^R)|HAv`<@caM&(p$oY`Nq}ULnu2h1FrULkQ;`k^j?T>pTd2 zP9}W#0^-d4?kc!8ZIK8aci+R#=m30CTtQsVQl&u0Qbu2MJv$qC?T0%*4I#XF9Q)yd z3=k4)T(n_IqnNcEgk4T7bB!I-`Vm!iJ}8aL-wcqweUvNCN?P|npE&aP4>;k=`WNMi zIBq_@$qT;H3r<=`t`AOc5ENaveefP!u6M5a?J=haHjj(B>L9OsWaRH)xvh726X3f; zmI}Mn78$VGNmC)gT`rEtnhSLkRuSd~=hULXteYuD{Pev+AmFx_;mH*;;lI93P70(F z+stxFJYLSs`0Zt;sES`{FF5ah#W#C-c2kylQ{)l+(AeN)uPpu1F#VSVrnY}Cq zPf(jd2-{4o?(ZF;U0e3;5ewGCW2WalAAR#eAaa}Yr}s=9 z1lu_&_}Y1P`usNV!mRT#?w~Kk*?W3hP1)tn%*B-d>#1gRKnnmX%-oQ8uDgs4zVAaL zd3Np0Lj2e(FblllDjtx$xa?hu>NrseJnsrREuL9;c8+L#A~Jk1Vz^Ve@!3Kq4;Vgk zrx)|TJ_8hTtP%Sy!-n&8ed{#qv6u(_9)#^LOX;x+0~Ety>8<4eR2e&Y zy8^emDE6HTZHz%zoF=cq+_&MH>~_KafD_Zl60m$D#U=&xwN#q?af((3ivBkq>7VV? z0QoqX{bEEkG=T)OMtVvUYoZ0K!q|+QD{|+>Hx58oIPE{3w@!pL&CF66q^%byU)(sT z7{|SomJ*w-8HYG&{kwYpeM2R)wdMBwTcYvyUHu4BUIUAh*P~e_rCnU#TcJQ_#Bcd5 zGlFcSEl<(wYpO0)@G#Li6+;c{%zBtJDR74g`i};G@ei3Dod;S=ZtA-3g?z4h!5=$vGkmo*#qK){f!`P z^>0!x5OZ!9b6Tm95g_oN9+nW<_|^O;N^AJCe4!U5FttF zZjQeT6$kOVzo3>>SoQnY7-Gp6j0R)DsJ1&gyXt+`7Zf^cg7rW6V+_n}(SZvj1a8Ng zwx3lc)H>2KBO#WXnruBJJ&E!laSL7S=E z%d|b852_8jM+hv`iP-by|McI~%^*0VoSkzXCOYVJ+&I?(zU!2OEcx}sPxD2VsAyF6 zI8%yUYw!cB0b>Ov5rQ+unpQp48Sg6(;&i5em85Z^C6`-bU5~vDTX``Z1pg1!GVdu1A6vY`P6&>nZQ&<+fcQspk*(~o#qE45UtxqUG-28xAQ zN~Dew$Fe26?73znV7y1amU5r%_5MKkZl9l(+gH=>9h`QPwp!D6cDvkvk?bh^3hO_f z2C``U>s-43($Le_a6p`4D(udeV3{50=RAZTkVnW9^CY=v5m)vNopw~?xw-7{prOt^ zM{8j*1SG1!#+z%XNn%mxHWq|_UO@`1vWkHQWyE!85t#-5gl@3`OMz-BHnA3QTgwhC zawi}Y7GBVhrJ)^V07C)WL%D!Y z2jK%83#`oO4Gt9P@k$7MeA}GY{gKy(bM=<5Vc229C<5HDO9~pH^39m4(V!K zTGr^XsG=ANfZnPX2WznWuH>pt=H}FyWPkcUC?$>Z_fJs%R9L%*{XW2jB3#q4_XPba z_|94)+=mUhO3v&B^8TdS_j6mlNYL(Rw>_gBlB+E`SoVrcow+!`L#4a>n|HI1!O_YV z@WpLhnmfQou+u4v`;|rai z4SNy0#m7|ktp?nUgL{ac^#)Ktx|EQUnm%(gb zLXpeVnGpX#d7ame>k-7$7$t)LA^$MphjD@xd2;dnyOJmRx5+z-%beWBbTqvXdDjR> ze61AGVuGBr&Rn#?z6ZOR)+i)XxN+6^FBne?!HsW)=|1bTDODo{L0bkLkw1R0*>6Wh z7^?}Qf_0tp;Jjc%=0Yv~fsuN?iBrF&ze^ErA$M9BfqVP})OeV_RQj^I1e4gnqLi>` zB2&S8GbXA20bRk0PFcefQt)OJ5ux#sBD<8@+HI~i;jzr@p7oPsX=r}Uq>s{ ze14BOQ5sJWHfy+`Qz-H<;SwMiC)fjvpv`F}Z8K-ONi9U5lthn%lEmQJ)C$Y(Xqu49w%7bQNT4E2dYPaCIVDT%#T0#5ag3p{KjWtf_t>eg}7OTEhj5}N1z;R6o zpOWz6G{5LME&mb6IlbX|834A=8NsMyc`1W^q+#8cqYaAsyCrt2nC@bdT{Ih$^cZL{ z8u_+r%qIMPLqXhPTKa|n2OytRiA%UlgsHlqv)%11RgI@4{;j3_$oL^XqFLG+f5e-~ zQ9qw9mgzfsgy$e>&SnC){gIyo&U)G|3s57`tSpd?<$b8hZEkk>ZE+lB0bje<`KDh} zD3ZLtpBJ-U8^j*}zAbFs!o`|%Iq^7e%s^D1ua8+Plx4qdcj4P~h+3I3%Ar+Pe`WaH z$F3zUh^l~DnwLmRf1I!na&_Gp;*mtjo z9JB_8gWwV&h9E#J@>{X$QA|K=U3`_=fxhImA&9)*9`SrM|5Q$J@@02Q1zR4)=ex#b z6*2<#+a%qRlfH<;XI)U_=eWVm@5{d8&VVW1_5+=2Ua6L@1tX;M?~)?;9|hv^sXqJB z5wcCQ(XxMo3DJE!VZ-G&q%__2bl7#`@np1dKYux-7BrnrLF7ARYgXNi>HGF7aJzIw z*n8-DQJGWxNeR@2K+sG1G+qaD{{o%ABsE6%7i|#hzPs5F_McM=r7d;bNFSKqHBzPfsgu77Fnls$0NB75TlI~{W-oyA7=U~&N0=M>b zc%a^TO7z|2LV~H#Vp@!`cmed8wLZ9wQBK_^vO@`-a>2TdR)nF*&Udi6!?5rOdGmN? z>4y1po5|67e7K)`z7c?1y9AY5x1aqlDv|OegAb(W0_r^&Ppz;`6(2ea{A)z1y3Eh? zYRDn`H2cQ+`G@Fb89(t=0DR9aB9YeVwQdcMWO&~5M0g7BstOH7n*Y zj2Zr!AKq?)UTk^U5Qvd;E7RIjG(!Qr(h|jrY>g1$*qi*4q3yxV02Z?Xzs|Tzqw4$U zwVDCdfA~C879Mc{{+ilVu3L58!F*}(|9E=qsHomAZu`v84bqM91w^{LL>d7B=@>eP z?i@r#r5mKXyBkEhyGyzen4#g}_rB|S{yS@(f6sl^zW4s@>%wto6D2G*nyVmkMTPDB zXM6OUmwXcL`MZlI;S5*3R2DZKCUfl55EqDgF0E>y5#La}`>B=e&y4Cm*YNxPcd1>A zF`A6Oq&3rWJl6Bm)e{OSxXI}-_a3parGqPGH27<$^Bbcp<1z|7$p%y{(*<{Lba`Q2 za`?>I!Gi+n-^ZuDdB@%l@POXZ$uA&r`JHI)Fi3cv7(W;w{<1=A`=leASU?;BGOmAK z<{gDTqDe1=V&K5d~ce%qgP4I3(z;Mj?!cz5sxErSBdh0vUL)FsX z94tURb;rLN|KGers|d+z@@P8|Vu26)S=8e$)qwfyi|vhXz?sWi-+i+cTY)5AAdT?h zQBYM<8SedH&G=R_LqxHW1x@%B%0VL`p zbjH>fvBu?XGA%id5Z)W@Z*GV-Wsp1{l#Q;fB#VicIsul+Bw|*F)YUy4UtK(prx*;y7tMYQnU; znxJT5G}+&qIOv&-8@Cj@mV-LNc`lx4tU`Tc0#(ADKqt?+=HX?$6l<5$95&jkg*N^I z3#L}tGC%ei6oF`*H)L7JO_+#|j2w|=%;iDI1(k!w&Z>K5Q>Ck6e z8RyngozR(%vX*V$tpA#X*R81SC+|oDkztR?#QIRxoZ47OaKum_?>HK6p?G#b*CNE6 zVcId+aG*PwqJxtq)AXX(H$=jXQ@8VKw2KdBVjN}qq5%btxcX|8j}JIN26@o(h>-&2 z4qwHV^I_70N%5Y8YUYgnY60}JOHp)%e|I0Pr{pOo(IOat98f?@jE8t7XEh(6ol!0} zL)T~*>0Rh!YC4H|pagaH;~Yh>FVdk&A%ge>Tnae;_?kLTfh{qsiV8a8H{(d++n&Te zq?>j`kd!H&kJj%hCAChHu3GCu=;@As5@R+^S>ql?zlcdIk_o9NNMqbOGLjUf6v47L z93cf(sr@-LoxLCGlOSr`iMs`Ty>&Bf1n2w_GU2cAW=1@e2qKxkvms(te?S2tk^zNv zS}ZG!C=7y7m7ZTjM!7g7-c`!V8Z{Pl7h{KU}c`5T~3|N0;MS%PB zUVReNt7tZnx_LkPI~%`fXvayr$a^vRd%NW?5Wp{Jq@CH6N`CjVFle0N8memW2Q_1% zofijVU;JG$w^u0GF6WP8SiC;sy!ZuBKKy; zFUC=6;)p)J#Tp4HOIuT;6YPsMFGI@@KI ztAUOt^mmQYG6;8R+}sbvB5yfXKQXkIX@{8yB-9!2KE{5{Se{0PvD)$l`JgkNx9U6z zt(L)UjTz-}_w1u~XJ0_T=U>OYBWiR{3{wBeOWu9-J$1{phdEczr@oL8u8P$H>^hAW zn%%5@FXA%O1RNIBscVVIbP%M4=`ZVsbB!Gdk;1#l0TpWHOaEgW9ArKnWYQN6dwM)q z-romoCD4AiXZMJ3?ac2oiR1+`GG&JUmAxchID;04r>v2GU$cD2Oo7|6Q)@2miTfdL zY6HtFAB?8_8SYz7Q?-<8$L$GCfjeq5)cwNITvCw?VJ_l&%Rrf8DtoC~Rv*N1zek8fdXPsX&u*J{(9 zfp@H-?#o^ex64zdfqTSgV%KxDwL9`<$m0Qm3zNekj+xnd)guN zrk!XGkG0X~c$$ErT-*agRSHt!zx~g*aaW^3PueZFzYJa-^qk>I+Y93eZ@Tms?mb9K znr|d^1>axZRVoKX2kxQVgM)^fcb^}ZNb@Cc?yuSRCS*iVouARx+lJ^wS(+3HA0cft z`4oF<4+qS?xEW8He!OS2;rHVB^A;z~hzr{M7vG}tK|!gLgvBE#bkXGQeBFP|U65PWc+=C2EpY)9e=JN79pW-k; zURCTj&+uk_s65&1n+xc)aOH%$G-bu zohtZK3G4Q=T8quEj`5X>zk4C(d`ddWa5*zSwFc}gr*CYc-$)#gU2 zQKZca#T&{KcYPSkAQx7%iq9cYPwoNUEaBjz=~{cHQ@k%xYaG59f;cgeZmYQUSwj*Q zg{{kyn78}%{9)sABUS%+l1!yTp$#>5iN>qgPvXT-n>fsFF!5kSLjk;wUUR`@yRc57flZWZn0r3K|4!W!bCLX%wTDJwr+r2xLKKh01Pp!N__Rh~CYq^StqFM009Ik#*|)xQPso+`*Lu ze$S<~uY$o%ip46y)q;KJ`#_2WL-I%OVt~nwzvX?yIxi&zzG-_C{%p9>&^syRr(mvs zHTO0-xbs-|O_o-143-pT5x8ou)Bz`8)$G;5f-Rg@?vv@isdSB2fvKhGbI1@XWfy|XmoO7cSORqeo638n$b-OEY!?{$J-#npurYL&aG1U&jtP%cL?NelOHJn z@|aT=GN);G?`fOal_~+cxcJ2nP@SM1*LmzwAS>Qp11-Dpsu`RR8BdmdlsU7N0)Ds$ zP9CvQ>D1|H>-669?%6aWsd1V%=78@EcmEJ7i0C89>%PH}ef1r?7cMv|fXlK+dZ)jx zWs%w#bu;rvVRkgI&yAISSWCG=oSN~zO&{iwBsIga6dmIJWuGkR;%?KYuWb2U3H5 zP}3wkH_!qt?RtkNxmD4g4VT~WaahAPvzxZr6*&0Zo?_T)#b zt8y^}!Dg9isL>}7S@`Z;Vu^ABP9Itu9Mvwt?7lC#0IY`-VyvbFzKJRri_2!<*L2ud z6+dibQLfKKyDA#xAydzZiosprX=@ZV6(kOu@#W%29?EPIl+6*AlUm3B&Ob@N@FizZ z<}xUoMV!%MbeX{mp6cet{swhw-`TB4Qdv;YAeT^S-CB&_=MNuKXFIv!&|zp}!H@LT zywdccC7$S(Vkfp%K4yfb{h8DuA3l3ks@BZZJo%mCIfgV<^`FIWy*E696d=eGho2Je z818)^f4OOmPg?S0q<@(HkTESbz)BY`9!$aAlYv_G%lZ{kSBV=N(9{@U$N~q!Y_VRB zEa0{Zz@W4kO2s3P2by86h8pY7YHXBwZL~|IZ@fQbO;&6gz17L)VSd{>`I_9* z{@9BDY;@cU-#9=L=3>UrC7~wC`yG&?@+1923TcqXHX7e>@+V};+QXoF4G`0jkyN?o zd#;qs%RnI2(3!Et7FaQ5n>_osRr%c}VUaXu}hn`p5 zul=;Gg8GUXJ&erfkyZ%%Og226o;^9<)k@yi%69r)zgQbyOwCbLRd_D~=KiVHH8=-t z?M$sL=*^hK{qfaa(9_<*zo?*xz)BqF+lM_#Nh6<~gh1TIy;%qnN$Y?H)K<(|xL<;- zp9=jCAJ@{2KQ+@RCt&2Skoj~~IQ@bUtGB&&OmK3J7yD<2sTG>t?}RT z4D-B6%#-Dohpo}P`r#z6bS7fEU7xe{U#^f$v_EgWMf7NaA=c41q>S0mC8r{(Ku__K%1-C&S$;l{YaK>%0f zmRf+j@6w{$2Q}ZZxT*1uzhys-g0k!lT?P{{1TMOVlAE6%be*H_asQMGt-$ao9YVn+cxwI;k+*O28^)=W5S;E|=R^XvxF6b+&|oB+0R zM_Z{T_uXsG{c##4s-p{A;0p#+>L-#f7Feu5!qi2+`3 zei?a1OB%Q?B2U+EB)FQ67;tKx!4kbf&QA+E)KgO&zwPDK%v8c<_cyxusvNKyG8=*B z>&QNv^mRRGZ+N{vLL%8S$sT{=v!kHN2Mj7l3^8mF^c z;k~xl-@cIe&GW}!mdH4O>B&DG3j=XuPDe!AJTd7|fP#Wja%EpXG5=iHI$0W|qc>!{j0(m-3!eYe3e1)@IeSlj zbZeV!8;rTFlLgM1W{b0(R@0T--bNQIACr*1+qW##?OsWpnWmmu?J)^m$NiM&U%Qy{ zk{+P2X`z{{@1W)xh)0t|SfH)Z-}Hg~r2Rl6f75teAQE?+AviA5A%%Iw0$8LqVQ5o( zCn$h5olr%uDDcf&R!UY#V7L*^aLX=NJ5z>ZGlHueLI=TcMNvIux(jf~OY}Sfg=WDL zeBT@&k%mlk1qkWm{HJfY>HU6u)nq#{b&sIIY;IJpz1}Yj>swjM;>haXYC(XC`i~V6 zL};7zg>~)>ccb8%lZsqTQEeRPx%{Yf+ol8pi&nI87+u$gO}&+cRvvtH;mu&$ZmQTk zPcTYmq6Ad!X?|I18Y<8Kj16wa21Gx+iP~&Q0znE#aohhn^B_YwxQ>ajP1$5Y=vKr% z4pT_?d1M2tQIb435=Y!D;=c7XbA?lRQ~xpO*lquAaUVB*SZZ*NfpNvOO5c$EYa7&i zk@K`tykdFze^=ZpxkQwuf;g>utIt{j@yw*LMV7Niz0L97PE4b&F8%pcg!pH_wuudqO zEA~WLUdW|ksa)y{znIV>f?rk;j;~fgB}%FNz*#?jW{5$>Q{mgXYWJonvO;EjCTQO20fmK6=j!!|Cr^yid^>cO2=-a~%#2-fCRGJfc9s zu~lK#{E=MNpe>{OBzuY%_7umfF`%lpHkm=cvs3yeS;`djo8s=Q&2#q1;7A5Afk%iJ zV%LDL3y4PEY^VikI?W#>!UH0SE*?#Hr!s`s)_Pu_ip{fWsR-T+Q5Qa*QR&&~QqIUz zegY$TMdcZ#3Kkj}c;`G{3MC2=7OE%_8c zpYy)@se-;4(_l5GReZ)Ee$FNxG1W7G$Qey~qR~M_lzXkk^fjEQ(@D3wE3(X1!&jHW z>@wG1OSKr?ZEHK>Js2=N!&F3OmAoLMm;fjhM~XR zLF7{ZajDw65Nv#%M_eN+kE_H198OiT0FckL=v43Gs=1Bh4qHXhG;)USXj;)AkQ3Nw z+BN>#rFOT0a5QWLEe~@XaHhPTskdWAmdb1pfYW$=nbDqCmt$~n0=@D-xkyuA57$-6)OCf-f@ zng5;y+s2NNPLi8SGj%^WmIK*{veU%QFww2E7g+yF$7vrKGEX63wnG*OE;?PRXBAgA zN8n{X`|G!QLVsxu1&+}AsnmQQ+0N9gDYh_5oue2Ce3iM_XfT zIko@W^xTilI&&-flV6$IKVGvh>#tisMv7CY8J4|)>br%ypUqQ?N28+Mr=R=`z@+1n z1i`5NQ+G5Qs_rRk^ZSls9v<5pS5q5SYWbmUTN0Kzn&&60snbQ^Tc({*@RdB%4T(;< zUdmjVL7b}Cxl@hw#m)id+aU&ek9lL*acFTW3AQeaIAfQ)*%J)m)j z|E-&j>ef2zx!+eFu`(euS$(&a7_#*k z5NG|~wB>$T$Ci+Vxd_ltGkX8#=a-{j#(5oBbEsoKTSB;WUy~c=p_X@0s*jog5Llmh z55rGybs8!jSK`mM%K2-`Vzrt_4FFHUepy<5s&r)15?2fD%nZB;&tgBT#Mw=31G5 zR@%1GpabkT;5X`Mde>x`IwI>B`6(Lu!V!Z6@;DeTtM|+(?xjLXD#q9^ZxW2emFt;(a=+&}oS<7HLnhI8^|4fW{xpZk-%q*uSn_Q&OX_3|!l*1ca*>D) zf+_b&SjV{!E@4|)IJ4Rve>>ydrpQu3FrXEgy9?oY?R+t`F+RS9-c$i!H=A%!Svq4A z@tD2$-(W-L%RIUF@_~&e?eG|*&j}+c6BzJe{gml14AxF!E-Rqpjg4^k%}f4#LE=f{ z@dzcS{7~7^B*)n~npnabd8ra!>b{vMA4q6HE$_Dwp+8#ULYJaFpYre-XjHeo6Sz@z zzc7Y{9|mxd#YL@P0SH4pU_sTkotaUn`6%QyL#wnOKzaAYuuO}gMetNXC*a=$U*)ea z_Wakae#_GfWg0oGWab)p<83jLcun0vUrLmqMyzE9B6mJP93f#=mK>@gBA$OZw8e97(WY)W140U>*CjFQTu00%iO6!Vt%A zdO=EWDk3tI_G0%6ga`JU505T>8To$WDX>1K58lW_|4_UuE{YSf*VJ@TZwC`mtkTS>G;r zdT-7n;U$PBwCLK47H8pn@n%zyr?N-yDz25bF!;^we

65yAF44m?4Y!6|(c(rsi_T_0_|LJOk6c(Z>8^1wI4 zls*7(U;c>R#ky{m?CjL(zk)|!|Y2cnIJWH8DJh4&& z6NY)tYrPydBWY_Tgx9jZ6W7ImmK3(UGR_8nT%s-CUx0P%4|}Q7*Q!}h+Vg0ZT;j3-40@z#It}^la>}=C>%E3@*wvGcJAHHmi4xecHCdzSgoG z1+JOD$5$L-s0_CA|8O)O|JlOsE&wxn&i55iCDe_{qa_d~fTCB2j*p-b=WXT%3=t6S zQC;5QvW95GDDvj2>ylA-n{@T8GDI2Sou9^~pT<^$`Mf~!sG&Y1;B8rX$QYL*pPm{$ z^&zHW-AC1|lUU{0SJFBrqHhfTZBYGV{JLKbzIFi4Fw~q13s5=qw#fNspWtt$C6W)% zW6&5kpL_d8dfda;lbdqI-ldHM1$OE60T9B%U8i=_` z94M-}E?L{nbL`azKKh#Yy(qzmU&6f;Eiaoy9Viy%F_yWTvbQ*MmiLYqyd%L&EotUI zUx2~wo#P}l)814k@78W&JIfuSK_0=Pu2EH2Y*1?|;*( z1e98Sx+i)iFJckI3mbh$CB^dSq=sWoY`-%Bxe5(A`LDfM$JlvW-%8Coxg;AXFzm$O z+2BX^VoCw>uU9roFK{p|2cC|4>CL7Z=Lm9DU&R{oam zBhz5;ki&Y8&p|Rp8pk5fq>j^+A2Adyb5V+;z~`cyxBfFc{m$1)*k`$p8vqiWCL!1` z_Mo^Ha`o?idKpz55cUBizA)17bsZOar)v@!0!|v~3i($?UQ_z{CnWbsfNmEy9j)yz zq^cZB@|A`-AvZ3W=sV_U8s-)alyTMiA1G{9&K7b6(7Uv|Z+2u^KB>A-L69rwM9MN<;6a_D7mPx?Lvp^*%Pcpwtgdnw-{; zIX^s`?iWRP$zn_uS&m4!Idh>INvf~$vcxj)kl&9ER`br$gCzfbue<0~Q>vA}W`QQz z*|6H-^-0Xj%BS3SXAI+-?`CcO7Nc|qdk0?Bx^^$I;K%-90^VH;uDdx{r7ca4lFg=X z&AFBGB5K_8_JcZ5*N3N%>5|#}XEN7?Ew$^j7=-m;t~1;;#9Iy)jKayQ{RQ^awNeO8&W0%HL(!}wv&X<%!wA2PGM&^_jnMs(FUf{OOmH>KOvXVT zmj>_@wZRlo3ou>N4%Q7SbsxA}OLHz9`9v4aH1%ht>1l3ZH&jd&q_u-{WB)=T$og?1 z$T@$>K)Eg!%bjSY6#6mJy{xRbCeE`WrT-{i>9%NqL;_k& zLU*Z)4Mb-`ag$upL$m++X-w2s=nY_hvPsfhYb~FAN0!#cT?Sk7Ut-N_)L~m8SF%>% zIPUhXC%rU|ROVKT36s7PyLP2B@Rro;+`_fQL1(H#ypNq-7^pfrh!Lr#?PGAzcRRmzr4mr;v;>v_7mT^j()hl6Fuis9 zlzn;nyy2ucglGcDwP>Dvs>7PA?^;ZJonNBam2;ol0WO(#vd*_ogzVvSc?2_R9wPM^ z&RJ<5h*?B=;uX4}Y|StY4rIX%p^VB>VHU&n*71(M+Sp$Nf+J5bJ-L>JqByTF0#`lN zKR_n?tBn$Uov2*wVlXr57+REj=!Y|tNJK)F6U~hRQacBKCmO9E@de! z&a>%5*@iYeU27I}#V}8Gvd_<18`Mr-oJt>GDdEL)-|7;(_?hm%-2e+CZu1sh)^cfH z{SZSW-F}sr?EYW|@Egdiy(-*RZDyIHz=4&~B3gN@^0fWk+Hm#t_>rA%L-OWYrA?$= zb9|ttZ;lMatr5XyHhYxO?qdmt&>N{XbOzWp0yUDt7xOs4WUG?1*ppJ6TZx*I!iW@^Pl#b|d*y zplS+n)yL`%sxLD0TT56bmZY5nI?93W_RpV`X{N>g$Hi3KU~Y6$397R8`nA}`dwg!{ z56z@d<`g>Ib|2&o^64yZI<6U>+UtGub@F~Hse|u-lJH?iZZ@NZmIl7tlsXsM?u@-J zgsKXpCjKrWJMo%{0Yz zN{udI;QMFIy9UShZclojn$5EyM(?3D8P&9OVc0+0C7lIlmeg-XPOzFxV^lYcJF~lYqhKb2&-e84rK9kFF;80|p=CM7-4hdk8_M$pD-q z|3l$}GO%ZMwY7FUZ@FJH>f~wX>!-iags54CrDq8`UEW1IqVBPGy$5M8O#J;1bX*rA z+BOiPgd=~r_bZKa>t{(?UvQpzOMFd*JPXP44%cV6y*w=k8Wq$mI;I6`>?F{C*>)Q_ zpddVK6xO@JxsgxRV>W#C*N`x*ca1igvdV#y`~{rnSz_jgX&WSzW=1%oVA)~j^5uTB zv`P0TZ>o7nOjzZY7GljA%(STjqvOR%aYC??T(89^Uty{sY9*62a@bAFQjzjXE=7b= zSpT=w_=!B9Z+iAQ+Nh}PPz^RD#6~!Akxr@VwyLG=s%h6bbZ9k&?V5UDaZ;8~k z=DW~U>y_dEQvWu(OQv%~{_8$a#C-n{u2e2ut`EET5&4Gr@|+{TIvw9n z3b)$vaD-f?fF!p|x0(!q#e?RG%UDsXKs?{L`e+L)^YznGZhuUvIJT41rg`!UZ>jP*ijCS%pfB2j;*7LqKsOdq|-PVr_EBaD9 zC~vk**(I;o>eTQ3(05#fe5Y@C-jhVuoM%&<(+PF4fN+MuOqLTIwQV@DTgl@4K6{T1Hx0tQS#^AXUA{>qE_6=Tuh&L%%e` z9J+aM@ZU_Cxh(@`%3bIldigywa-326J&8 zGjjY4o3gM=TE(Z@H~^c9Av~&Lv!#j43`M}xHrlXuj(~Gg04XmoiIJ}G*SiQ~xU(SI z1P&JgwmzEJ7YhEgdH`^|aSSqMMd}*U*vS6jXMyJx9<+jW&(912L6jU8AU~1$YuX| zNP6Qxo)KX?JRr~*)w0TNArYM>g7Sd*ne>LkR(Ut?^Qj{UAUUTu9}ogzg7AfjTMMnL zUfFbpIG3Tp`Nx8IYDq7jLL2@By|fVLvG>iGC(`Avx*!gTK0mtA&1Cjd=MlQGJpv0D z1t_M z+Yn``_G+iRw7ong71h+;YBuK*2*yYRSS@s*dgA<{XLj%!dG_Aa!ZuKWeBO)Pl{{Ed zlvXJ51vWT@pYOC%6Wj+D^fj#O;|c`PO7N4d3ZSS#J%N|Fp@k`8rvVFZcO|5<;}#Cj zFl3`ov^fLNo-g}DY6J7rSaC=~EGt6gcrJw70>R^{^EiJU6DbC0?Uf5WLt`*M2( zljz8NgiYo*fg9qe+4aZZLf5P@Ec6WO7=}*}{(}gY3vf=X5&>6Bm942s zgPq&A+%)}80sNKt-sNKxgtJIS1OeyCJ<}iHMRIFTp*gCgA$2Y6EFEkw@~x}iF}N0L zc3>xM#hWmXdBQovqJi4F%Ttpdhk8MTih>_rqkj7SI$0Buq-YdmHcnG`n}1S)qbiyp z(bizbHF`t?H3Kz%^h%bUHcuj7S;PK{gG_c+f9boA>&gb7P{dPBBB$JqLRdpb#83R% z1*-cAT5XhyvzWJqGKpoHW%(OTy&FYaqwZARdR&`0Emq#vd}Nj4I%jSH{MHSs6E|cfCMB$cger-`)Xg7e7ek*A(YbmkU~_&j||FU ztwuCb|N6@KV(Gd6!O;u0W`8_Qx|djbxu57p)xOh%X)G@L1>HbCcirIbZ( zZ?-gO@29HMhoYskER=u6kEs!Iv68w#tyBHn%L2oRs3ClWPA17Y!J z?5@A8)Jo9X20G)vf_(7Z^JpwEXut&IlJK_dRq%8ImaNmS>vRfgBZZz|?A@W&HlwRZ zY5Wz(xHCuXe}A(2$A?5+go%&*Pg8e5lv01qu3@(^Vdj7xRugVuWg%*+WM|EZ zJKC#q++F&f;^Hf1unHn>3pwzhRpRz1YC?kJ>ObbZ>CE#y_bGEV#lke-U)LK9LxxlN zT?M$e)(e927~G%tc_xlFnyBJaGeLjv@3;x7DCwNyH@~-QU|fWYb98U|_Bs37B3irY zug=yAQMs@~(#n)uJ>SljNkO&;_jLajp=I*w#hfTXj}j`Oa^rW@tkzR8QARPSQQE#{ z^`QaAAXqy0AL+?%4K`?L4RqVZ=(LHd&vv2BYfrsr;NPH)C$E;zqlXg&fBy> z&qNc7G}RCFGRmsd6 zpEDNLyHkrpBl`M-kF%m{-06t#7-Aw~33erFE4V%CP_yONVxp;}DhJ$?IRr-w9v z7B_?IKiF?8{HQ=tAH-VN5cK0R0PhGgfVyEWupiU?@YQ4FQLBj9s!0iHMH&x_7Hq4zBR+8wA_gGF*uWv%v9|Z>vq-{tmv#W&0;=pN{TqUXtlj7i z$ctxs#0QyzY=R!n%M$6a<_%?=RVA8+-$p4KsYw`Pf3yoOjS)JD38fxfSu!>gwJ&BM zhf9PJ^7k~LZpI8efgqwwE^jUJNTe23l%k(vPkCC!e_k?B;QN~Ryl)D9i3xN-0af&6 z`w(s+B0vHqW9$2dpHZe(MYvO8vW3x%CPHB11q!mT>Ua%CDL7l3jTi7Nrrvc zZhZCEgEV+g>_AJ#=0{d}?bMP-4`cJE3YUys`xEQKr5BU%L34Tao^_c{1E$;?H}%h& z(7(~a>B^(`iuqSsEoV!43q?QWy2?_BD>`C5g$s=fc3o!HaR4&9icc`ueLOkqjJJD< z{vj-dpLLWIH#eMJHaZkWa;1Lh-am8des*3^=Vn-}4Z6B3TF-y6$G$X{3=maD!23e5 z1posr87Q)sWLnuU_{oh9d&D@~D5=bw&HUv&T-5Ok2|yl_`dYqx-HY?vn7vJEfoH4; zjarM2{e;V#P_n0(7%_+|6QD}XVWrRrFiI&&nH*8N&b;QO8aK4BV#&}n&J7_TJ!+n_ zspesZaM?**+eU?$yJ~!MQ}BT4)R!v?8U||KZTPL9`CiJ?2E=QmmG(7_rA$j8KVFN~ z8Cx@?Ee<8*ED`lRxFubO6@*4GSK;7SnjGzO%fAt4t;XaEGG#JgzG~-~cf617_S?qx58Cy`&jLT_G zz=O=#FF`0Q;eB$H5Jf26pf zSbUYk$bJ&O#CYjg?Y`U_tuz$LfZ}e_UO46oN#uIQ*(Sg?^(+!3s^G;A@zGY@oTOgX z{9x~&>Rx1Ua)h!!3%6(0$-7MmDgH3;bj0|`;~@%ODI=Rp7TZm)OwY}1BO^7Z@c3@+ zK$>d^#_~!XyM12pBcvOytsOesy)f=rnO|zGFrHn_VSFuxMG|X;jgC)<{TlfTqwsgB zF5To_)6jnDFVvf`Dh#z`XOERXZ5o5yMMK-58}EXX$8BVf zKvxRz+jgP&)jxOJ4c0IJq5KmA(u;9|nb`7Q>n!675g~=Te)+ej*nnE7a7HVy%hrZZ@SYW%_5=%@>=BO$zaTCe<1_xQ_jBvjBTnqEiVc2cbK?`+-}2lsmm7ns#CCFrRe4Ifp>6F5ZlQ~a88H4_1>&LmWKu|6F{GG9F zu$s6t+v9pT!|aj6icSaiXwMs6jkLDLQqF+=)*zmCm+gthhc}VbqKSd$p@DDGMg0&z zq3fP6l>9B)XYZE#)7Hw?@pVZ!+}**2=W$u5<0ZFoyJPaSa}_KwVH5FxvjBMh8^Xxv zVXq%CZr@lMl2MEzK)m-z|ZFPBipCq$)37Qj+^zVy_Tgnbp z9dX6NnnV>=%P!czaC+moIp0Q}g@#mdeyL2iHT&escE~@l?Nk)-=p_TEyq>rJ$xphS z!^1htneRmsU)1oE5O>e=fhjie$3O3gCfua&@S%4i&hs`)ZqpobZKWQo(nq>Vt0sS5 zZCLcJG?5PUuXagXTs`^g4m7saHVQ1*1#4r~>1WM?%SmkNrAkqrf)>D>ydXpTWml~A zw9WGCs(J#DSjAsNVcwIG|AYJCE2(-mc#jCCR)}*JatjaBP0@$1`q=nP;N9>wi*UK^ zA3&sWuo^YEh!jPaeUPI3dX8tmvK=-96TG)-g?>9A=3Qi4=r=jAzEr z0{cRl(J>59orFnXMr@R?yNI7F8;T`PeMLNvy+sfM47O((#+Ef}9G{(%dXb<@#$K6h zXsw`Vo>R2fzv6;>9LZpcQuK5+#G~LC0c5*F4zO1?BSB?RIO%R)EJ2kPA>C4hD>?Ch zgW#eGI}w3%F+F{^a-6P^%zs<<>ZnBO_ebp;r~eYLA<%q6Kp6>x)pd|Eu3bgZjX6m*=Ro2p&Tl9mZ+mCg{!eEr<| zw~sqyeg0Pvrmfz_q(-OEK(6{IGz*_Eve<2Fp?Q<=9s0lJV$Ym87f9MyU{@cRz<}$R zF5eR!q9BZgs6$$=vi?jWJ6!Pnqp~{yWw9;t?R*pT)uo`hdDh+Yp>AM8b~%McEGzXL z!8JZBb%MG*nmT6kfPhe_x_3$!$_;4~Mln?M(>icTUBSu=eq>T`hzq{rUXqBE`0f=Z zg*38*j0HNb<&fg&-;e+ifKzW)_%b!nj)zz3P#x%JP`GHq6?;e# z)BapAnuTwmPjJ%T{DxlnZ+E_`fP>2!7T)4XcO=9LsL4 z;%$h(5lkYpaMu$c&(3)%ig_m~?UV=lI0!^O!bx&kqc;jTKhNy79H;}r>KXbt(y7}r zY<7CrrN3pGTDKF2+`dUCG65}RHqHMqwFAntDQTrYm91;frOXT==dOF8MQqif2=u?Y z3+&5$`#E2hc)9@U@F|>8s4IF5ZPp28r+l|emB5fHzkEvtnb!*~`VZp3H;p-Sa5{ZC z^LA)UA@kxmJiprS%Ux1gA9G^i)NON^ZHGi590?8QWO!a?ygE=0XN9RI->5{?@^_U< znSCie_eYjyNnf2nmb&-Mif29RUhB7h zwC8$FO&#N3Zr+8RN+L|V#y?$J$=N(yktB1^bS;tuAw*gxw0sXw(*$k($OY4y0MmNZ ztji_l#6QOEp<&=$tJDUCqQ{@B!jcZyHc1&E1W7owo6<88+A%q1NnA)bg++>Xe2p56w_$jfywG7oP> zT5IeF=d4NSo85E6snL(n@wmx^&*b6ZhL-tX=0HNo1r+zRczN>a3%CkYOBJtQJ*mss zcQ2E%rt~0=>dp&vgu<#)K{4MuiO0b}Op)RSmnNQ6J%nR&Uj$BySOS(c- zF`{c#W5)0NH>8z|R0`PaqF_o80&Mw|=tGmC>0UQ7$aG7@ZkJX#Il5X8}vRrHZ zC)5^$BlX*DB|D~jj!f$NyF)amN8r{kle`6~1-3v*8c&8{KKZh|XGtzaL+Vr;`l%YjBaNE@qz zk}a1f6wl_DzCdO@o)?)Ord#DJ04Y2mU( z`T!=NiV{Fo)m8sDGnNO8Ym$?$p@ECgg#36MfH9y+NK!d)tRj{td?%$$M0pu^<>1~Wn zp-mW)F-afWcEe;(TT4s9uOzxA+0!*mq0#7FoU{{#Ik6_~H>SG3CeK;tA8Rl?a7F=3 zZlk>4OO@Vl0UM>YmcX`)Q9R@w{yMo96r69Dz#*A#ZZ)mXSLb4Z5j_Ha;AawswiwcJ zyX$?g=VDGrnnIRfa)@N;9Nl#f9RVK~aH$k2+(_vkQ0x(@*k%Q~#WoJq(Nl{WRER_q zBiH5(wplNeJL7dCZnK2!*M67J1+B!0Y!npoZ6D14Ky(&pq8YFn%0_Z&BqPbJv~H!4 z`Zgjnr7T!7S62%&j#V~8C40w~`})fMit<=Go9>0XIBC}{-)zm9GM6xt0osI;Y!JQZ zMeWo>`B|!ugU(x~CygeB`yxQ2y|enT${N#ZL!- z;C8iAbbNjmj9n?%Y>A=8;viDiDrzsJcA6V*%&AA|mS59_*1sYDsRpy~s*dmF0M5H| zK<~W>aJa+pgRJ7iHesmt>S=y>LTnSF{<{k~=4cZIdOzB$WK+##hh@?D>E#E=N1FcdUb$aCEnoOzSYH!=Ibe`tDtOUmTt_seSf|?R{iNnDG7DkP+|+vdD^|NlsS5C8VZ5GlYLHaCX*co#KgJ0{Nx*AU2E~6o7ljn zG1{arm+SYbK)?_XPxFf+VpS!ocvZv%(_I1##DtUNrWHmhkGz2FpD)h*HH;uMyPqnD z=y(i_Q?iRyI@J-9wsuhn^#iVitrMKHh8uoY6?~S0%HI}`;o!=t^hq1AibV)|J!Y-d z(Pyny{;kR6Bnzy|=}K2{|H3ZDIHU?@z7&3wGzC(ZM2d;V8zO^NfJ>0cRAPcf?4Z! zgpQ>ipWp09AvK1MOW~M_PO3aCA%Qw-jmFE49_hmp_OmgYX-r<*e%g>L_lU_r>M|k} zMv29_+pKpp|EXJ6FqIwzA#IqpbITmT1OG{QiAWVu|RbenZ+AS1tM-vDMM1KQh6yRHR;|QGqX+}@H&n>0tkey z2$hmqAW(RHIe!{e)RN5BOP+_UAr2^(8|Ig+2h*BjaUxC@>+=NWFj5Gkx~JmAf9SNF z+n~Pw<%iE@wIU>kl#buMD=_<798GNBz2PP#o0%*Nz{|}wVXU2Xtbt#QRZzPl}A=Rg8^!V zGsGcKgiO^3!5bwAJ^gOcwQ*F3@-XkUR^D!By2oETG-Je|^yJ@apDV7%+uWt#x1tF! zQ~@{lWa;iO5&CoZO1UmcA9%JCMYVDdnqKpGD86)iZ{>KUjT`LjKPqhJeq_Q5bnF!b=bHmxf9&}II{6O{w&v6 zzKlo;+;%m4j;D_M*aFpA#{NTT(e1t0{do65Il~dv6UFY08@?YgG@yzhCmd*pHm}tr z)Q7dk1!Ln7$hU5|NqW8e-&oN3x?gWrzE#+V!yfF20>7~EPB~X`Z>X9TJGA%SZx0|2 zXusK`_U3;uC{X7k$9M-wB}_uqFIW5cAHEWX86=?gJJHbR{(L%Y-&bAwN}qd}#X>db zj=NZ!P?o|r^xG57;dx7az0?Qy(37J=*6PWT;gY{&xOfIj-7m`2%?bshH|a@<_Zha8 zeW5pgNAGK&fr&2EU&Pat>goUXHvXeKzAXZo=pcN|VcapeV3Yvmg}LVD7etBg;5*3S zYHI6mT+IbS8hf2`G^pMde>2KvHD&^Gfhce(_3Wx#Q>T4^8yUQVLT*Ix{UA z-nN_%V+dgrr?0_SH-LZ`PrW`u{#8^qv2JntFk}AFG)ZVR!;Dr^#PLjnKns0f^AqV9 z{8wnofZxo}G7^`HK{rYe1#2Rpu~%U}*rkJQsNY^|`R}+~?94baXH;?vAU*%RW#tYBy<(CJsjkUhO?w4A@8B9b3ZES)E4B~)nb-HTb^Whaf~ zOd?b%fw!%?fBY_p+Jc40el|7qtduE*2SXJyvcFNkEwO95r${?jzUZQTf06PH+9y)! zq;_&DeSSLwfKD8$gae%AcssEMti@|B;Rc} zH~<;bf!gOe0pVz1&m5M>FZb5CJei`%7=$uHJa_SDtYgr7TmrwoVkqq!OVlgQ2-fsZmcGDwLCW z$4zb@j0D8++iCe^X?txWx<1w;u#!^!6l${Q%X#>7v{B@6*Nu&!J$a5b=^OX*Gx}qy zC+LJBF9_ri7yT?3G_=q6`MjR-Ts>+6+nRJq`uF%YYd_Bob)*ocqTurko&+0ZtIPmm zg{qkWWwdrUtMd!@z*GHj_JF>_uuCaA$3DA=Z5MHXGXMr#e@{jKeoD+Dc#V z6t?oi5q5GFM~d9RBh1ouY$|)UdroAZPJwp|2xK{P*M7; zP$Li37Toyt$e&iE7QY}U4PE6@J;=Ty2g>;ii@w?6nMcpB=*?#9-h{?>GlDNs>isXk zWdap1jfsQPbnxlif;bi}Jgj(u5PDf|zKBHlq@OZSleeg4-pTO`{x^+9dd*!&G|0NR z^DS-F&Xu0FI0nro4f`7(WX%?{k8=qqX6NEYxdPaM#FJ|KNWKRXzNsx{iv{Xnjq3bROX6+}ORq0-efqRAO)}oP$ z!2cxtZWYC!ho3SOLTXPLjSElaDrl@$2w@jj$O@`@K3-~Wfe4I~((`rLdvRoeqmEi^ z)J4diuJvLwo5xk|@=TXtoixx&Hc^cJT^J$1S<>!WT?~r(_pe&&&-Fn&PjdMJ(}R^e zSSYhtxFbex58FI^b=^DKj|!2Yx#909!_F+jnNjAISN^=3LC6DwOL;4)s<>Hc?l&7& zkaeP@>5`J6cCq$~DENvl-2f6!E{;J|)V|A=e;wPUAtHKR>N^y9>;;M8qg3487aTKsvuyCnf}jf_-h86f$6I?7yPtN1 zX}ovbuQfmMdUN?>WgK*?yac)%$k8v-_^$4D%Tr$aPB`J2RmbMU(7%XSw66YKogX~j z{GGFKILxa1-Ns;Tgtnk7TiH%-Bx$52iS(VcDf{?K~IIRto;!^;-fDbo;m7Y zlW#hdR!JI9nm9lia2h|;#KMFcJFmD5(s|OJ2dm38mvyg!TFUy@Ww|kUxyA6lCal}f zdFE5odOXPc*uL5>!3-*I`DKo+pCDqWc;}{P%SC3GdJRj=2`rvmEV}B6c~bzw-IEC$p#c5%=_7l5!(?hbkEU!9p7l#2Jdp*emC^SZSge! z3^v)WJw*Dhkp>vrMm7uhS7W8+=*#^(dkSwV!(G>;e_|4KX5m?4M`5xe_!rOLTKex0 z(?Q|!?)pK;v3vNKDn~xI&4u1~Z?E3PAovnbBBrSKZOgsny`$BSCRUnz;q8J`Amd|O zn&MrfV?Jo_k`tLoF4yQ` zs2KlI3jr(VGI+4PA6uB*DPsb(s~ZfNJ~K(~`11+miR(_v7`?eqcD3rQDUFEl-_Ow zb|}yU;I;|omXSRps;Qk0#5O5`D`It~RoSlrDZnRa*Yl;&|7eW=I;r#_>F)3*0FJ^S z8pAAxr}dJ)bRqkEgrs92#r2()cPV3 z7jp;c^%yy@@p1*Bqb3cgXV=f+4E4km0gw>Kq2&+^3nlRvPk@1j75ZZne}hvi){l*V zN!+BOX#=M#*Fa;Cl_5_R%b%&h=gG$~Et}6pPElw5b`KX;D8K@OA*rJST3Xpu*?zA_HaTp#Y2lJ5Q4HITv_uP)?fL&_G)o92%r1t zseAg8AOJGm`cdd8d&~?ioBh!OO4(;m<-#i$AES*EVMj`6g(M(zivH&Y$u@j*WFlD| zj(PBD((fzI`ubv|yBzo(+gH8ZWc|mxsOsTw(0qxZ$Ss~%UDjwaG@aLdzg-&bBY2@+ zOfkQ+ibNxinpc`Gdt;8469If@))Wku73_4)9hHtl5MWs4_p2i!KmMMX{w}0o7W$oFHoFQ62!D#+sIj>86zK*vE#oWK~n+_wPdQ zp1UijE@v3Ivbu)8^qcv(IpAzWD?dDFN6LWu=O2RO&S?&QWX_qATImG=A^{fg-V?5I zUK>J%Rod01NZd{82EA1Gz0WO8Jag$Ku0N;bJ!7p=Os8Z1j;`N$T8N`J`}1?o{Qe2Y z0r4#{Y?}9k>jU{O2yR*g0L#)(rwqw85?d!yVh7+@af*W^G}ZhH&Ht16wFXv z#msGx;NkMuMuprM_VQt;LFf!2&XPWUCTOgc)TAhqhyWI0&*WI|#9~b3S-4Il%Bj>& z2z9CGa|fMrn?@x-=d8yqNj;#?G<(uo`FciGdLGWj9bTlk-M29MmX5`&=DiI9;=+?P zw$*~7PBT{`Q}W+N!=DT%>0V35;$EfWRBruXUCQz<`Nz|f?UwT5MX+b`xV+)Ik-cd1 z8N}4v7+>`V^cqbPSAbnrqD!Yg}WyI%D9xy2yt*^CB^Vvse7#Gpz4l>s3&W{ zY!Fokh%c#)76=gd$b=dIU1)7|SrZMR<|vs)M4+4Qd6CedFvXY)U1Z2Xv`Crwo5Tea zTI%F5xJWe-{M&B@GOeBKvSC~sd-DFTmFI1K_I9P8LM?3Ym%RF6;IjHvc1s=1tt)hD8k z%*Xy(%z7E9`=`Si!H~z3M}C~USaWPWBdLd6oKbqVkvt$(%gn`$9Vr5Cs5zwB7ND5^ zzgd7L{q;b&8tTB>Zo>$DEQi4_Yeoj_g;zFCdP6ind1md%C8Q{t{GL@Z1Uy+M<@Hh^ zh2lc=Gw>?j%K}1GGFw&|S+R?6TI>qgAZ%kkx90dJbVTN%Auf#fKI9st?(CLe*P)MR zuRUAqLnQtbonx8Gj%G}v58~H?4OM~19q44)Mp7uGo^ZM{k0|J4VhYaD-F!p1I1uE% z#7W02E}qpBx}K9zJI5>h`Opwu5WQpuHLb&He z)Ad%!rso5dnF>dyK8G!{lFOJ%3YGZd&#Q+n?E<+R$vd2N?(|64WUH#-HEmC*l1{MV z_yrGxhoLKByb)y|k(Ed-xjY=r^Lu|xGF0(7T|HUlsZ%n!dvCnY*x=dXUpFGx-l z+aUhlR?xS)@@L?p!@Sgv>5AOSvIi8h$4BRRso zi2zdbIAUvk$_;O8u^`?OGV7WHH-P?ccjb4x&Z%>5lsyJhA;wi`f2M3U2@@yZFS`p* zL*2JCwpiT~(YXq~O39zE9ySl&?Sf-V{~b^KB#qLXt2={r_Gc;7y3GmfKK)ty@$FRo zr+06;s;AL|$%_NgtDKkPgUT6Iy{z+6#Sjn6?r_hw1#_49^V+54kB_}dF{iwMM9XYXq#Wk9V$R~OpNxObbc6Oq?(wtI0;oJ_ZKr9Od)`v){F-2zRGJ?KnC8Ew1kM}IJS%laeLC0XKXe~M&@+Mu zs?Hc5rZ!r2o@FnRw-MKmQN@#-BQqX(4;V$M}5Emo1r(C{?l!tm2R^ro$q^#pF!ij6(!*pS~DQFxdf#@UeV7MOycN1q(CJ| zGb>uKzx4gXT5|l_ydZgys>Y}Cgzy8>eaSx})(JM&DukJ7jJQQb#b;JO7^rCA*Q|R& z%Uh*0io|VQUxN)WplTijh<#xVSMR0g)Q4 zBSjLQ#3k%i9n4oPwWhY)(I9tIC;s~xx6jm%p3IIZiXfwqqY9^vNsq;Luy?a&pKmNH2rZyYg6`g}as}pGkG^51g@QM}K*TkK3Tyrs+qe1R{ppgIb}IE; z7trNauOLCsJEoNsr|>wc80mI}b!m-Xxt!R{l2?%j>R$|3A8yM4K~__UcBr`Xqk#z3 zP3#qW6!f$g3)xJoy*Zx_AA21@4)Z|H?*%dwZt0D=8lFCUGAPGy2(T?kF*cZvpH)vl z$u(eYR`Nug@`D5yP%avcC8pKH?||sscst1v{Bgs}Ln#E(_qM~PAcHz?2s;CrIg+ncDQwyU1nv)Z1F&-X!g=IU2ubIQOP4~04--1HL7^lmvH<*zSFN#s|xz^ zmc+84WND+MOA3hJnkuhW!>w!5aI;5EZ1?`{+E;3?&30ow%G1j(^jawH!bb5%c^|(T z57!P?UCs(~Wo7-rv8R|SB~0q)Rvf>89{S)II}6|LKRG#w3b)Xl{ghEvJ`p-~;!aqc zBf?7%%y!m-i<~jVYq~Kd)$b~AClWpc?Y%EpAd=JGBV&Hfmw}Dc&Er+h<&^!YWiI== zXR(^glV?z$vp*tmDpg=4OC@!TYR?!0p6crB(%XR|maATi3y1qD2lT&9lH#l)GS(I? z&sr-JZ8TzNW(c860X(?Q$88!c1Wc>c0L!GfGI$vZ^>p0W3w>)=Vv9 zc2AuHsA~{L@wG=SNEc+dqb=`eLGj;RM=w*^(HKQDWaXD?K%{2?)8;BCF@6L2Jy6lv zv(I-rxSa$8GZ5%Yy0>r~jwjTBC-*ZAz5gY*T(u@+Z$kVOgMo}CmHBH2pMx&vvPG!a z+#~Uxx0t|V)99Wm^jc?|!NPDz){sGy-g<-JV|!gLquqt)@tIue(bB`)q7P<94XoRd z`C8WtSvvOpvlm`;<*^?E39cCPbv$1YRnByJOu+(>3O+xKHXGJV(v;4Cm{hv5ip+Bq zPNn_LCd%_-q<;=`?^ipi?Obz7dF?gsRw~T8#9Vmj=g;(IJhl|J_bJv&3cQ9oMJAWT zL-@6eE5C0v#ENSecU0;sJ1Z)Y-{cK5C$OLpbXIew-}9YL@lN@!Nmicyx&G1xjxu#6Lh5s-=_|y3p4l(ctAk<*W#`o$`X?PIk#{+UtaUR z>~iU4odNlE*j))$3jrKq8_w%;xxTPnhvMcy!7)$i^h%r`HjYnjS+o``#PyO$q>;!< zbQ|w;$(KXs^vm-{q!1>+00RGg5wX^!(XN^5J}#R&lr@6R)K53u^omT&dxQkmXmg#( zhSdzT&#ovdznt4%q4ZwEvsW0S)4lL-b4IT#kXH8a!4V(f zGbYq8LAqszpUL~nhZ5Ud-?ZxlG;*O$L70=JFPmBnQxk(+vdMWZ59tkqIb6vU7W>Oc zV}wE1Z;hb@_={bdB>J^h67BJe7lBDGtUX|Y#x;JdF|}l_&00EF;K3y$XB32*S>qcgx*-r*IWs2TY50g_TDTO-#LdGntGp|fz(C?|A7T_1)hGRWX#yc{Hmoi z(+?7j)$Be~Km7!`RiSr3beEYP^c%j+rOvHyJ#f&uXIAGqh<*H!i;NtwSU=^z_KamH zMIq!nVFf7?2qyQuExy|M_ezFaa8SV}Gqk3HDK*!ijEWq9)vRw=_8aPHe8(|ww( zcI6MIfJL@-{jD5nUvE0AyEyCCac+C=^<3WHn-m;D7sX2ssJ%N9hwgmGKBC{ztdBpn zWkILoIpY7#yZ%4FW=Fia&m91fB^?eE1TzKmGM{o(jO(-K(36#t(@y;+DuWcLU_*>i z#&L@zE<`6R1W40@fc7*eMgcIMwKaTe;Om4|phplAoz#tgY1PCU$yPUyM!?>dAb{>m zW23brvEda4ewXSU>5|;7>*?C5k^*!R20HKq%omUw3CPE^ZJ!HTuw4&)6w3T-i31=y zzP?4AM94`_Avtm=R8WhQhq{6wY(bU(KxlmsrxtxtNN_S^c~fIS7vYvk=`==Vnqqw^ zQagyHQ^lpZafgZ4LCS&?!ktt3B^(6?TGM?}XDO_unhittUpqyjiPbCPENjj~ZWA(N z6d>9=jI*jromBv_xqR_0R0`wP4pT*oGgrP=*YRZKl&;dhcpMjo7To|wokff_r?AZy z-#|XD@ILRv3wp!MHKmK<0K*oq#Y~ZWO)Sp2XH}K72&k~vT9ND=9PB@W^M(_Ap+~X`H@Dm@}kkc7to&b!bA2ShX=lXf2EWkM6?h{NVP%_z@25LCyDsy;^IaSr9L_Lu5U5yr8;O&b zx>yTIib+TpW1*Wepe*>xc1bLiGdmSbD9fF&i>HW&TWYpZFP$6s4fJ%-;OPt0D;{tb z@jE;`{|qpjS!eR7XWBA#h9VcO%`h%Q)RyPE%NDDs6P;E#vEc^8FZYv^YaDAto>wgz zbR-EZSG*X8e!Y&HuI#8;P^`7GC5u@58n(X7q~0lY3eJ6bbvfP7RfKJS*tC&u6xTr& z>LfvwiB)Q)vm@NGUl5A0h3d22l6)V!2}s2fKMb+{R^u+k0`A~1e++d{SZ_tL>U1^K zr5k;U5s0<(DRVB2F;L#aWF5oVpK5~P8ZrINAKFgYFwf1=DxQ-myNn3rvR^SB8ui_V zvSaa$y2i|$k$^?M6L_II$lx_lc*uL;JZUtjKxEg}({3=w(LQLwJmOloB8XzU$Z4i@ zG1eYK5sOd=CeO`yQTj%!y6|Tvc>#9HTJr=~4XhAbouN7U(4AzEFW0eQ?`%c9|tG>>$lJYujYR5=`Y)(BZ_Fz<~tWY3O zTa$m)Xt~_fv?@WbZ=;lG$UZ$6#8>V?v!{?600#nk3I&<;#D3C{$g42&X&LI`)w=gh` zFYPe;02jI7N%YT(cW^~2Nc{CZ^)X=&#TN}ICkd$RH%8!455+PoK1-=7Su z#~xdw^4G;F+~SvzdF{03EKo!P6gzZS$#=<8Km@EA=R{$Vh5%OLr~;*R_k<9_%QMN$ zob$U5tb*tA8}|IQUVa(iI@SGtpHS^q>R$W+-5?kFsc;=y>h3~=w26ET&6Hmlnf%k5 zFUfSd1-W)AX+5JGzUjzp`mvJx##`_>o->x-3e}t!iHLgL4K{Ayq0`?cyI}$PetXw ziE)^WoAt}C{juac%M%(u-ndP9Nt4vil7Cq|D zD|1BeqC~$}6C5>MzBhu5hHM8VG=Q*nwAb!zwYQpRr7L5)u%}O&$h+DiwNo=&ucp3H zG9hOt8_arMXZmP(PIw&J@v6q2d zCP%RHi*&`cJ^dWtz5JgH#74>UTyqZIBQ(C=R;ntpiCq{xnM&E1T7Pro+Sx`Xs&4of zOo%G9V)BM+wbY0&cn5^si(jZEE-8;@HVEFCt_W-&nFedJ!*4w(a|S$%Rk3c!(dprQ zax=1J*+`p@L4<4Oo;~L$flbHEES!GgQ}BWPifUaKc@}KPWzM9b7~@*Qz_nkRM7|tz zToc2R_!B-Pp||pjG`;kgf9DPT`3U@<>YLcPE1$bFB!=CJ(O#loWaQ!9%evc_$W^2~ zE>#GGnXJH7wH6JFhAto3GHc)0^`tLfLsc$8lB(DCH2oy^(mc0+JC+Uz`u`U5eTGA2 zt-6=oYwZJ31a4hkpxXMX(EEG&^&iAJy||;0qHkRH$!>NyM`h#cl4Py?lGYQYwHtd? z9%lc&S3AgE3p`aEZ$AD?{&m-sW%MzPVJN>M^N7BmOg{5=NwG_{N&4^O2&}$T+{wi@I9B z4e#sjQ&|7Mu&zYq%^Eg)(P7)S`f$pGz4w{D=^w-X@3rrnp+x?)1gWaSL3I>+ID1@h zTB+S5Vx{ogpRQSU8F>&dNoQH0bl4ZkvN~VNqtO~(%0M!hy)6c7XjqmBQ^>4{1R(j% z;cJ@8aVxctJg{-d+?Dx;&^Sr`k4*5NDKC2qj>(E6Ci2!q_)S$(MheObhe?^6^>{ae zl=F;--h5y5^RIiUT#Jvk?MDucd&?9vwV4$xy& z$5T=gfVMRj9*56@P_B!kX!|u&h8{;7rmvjW$!O6;+DZ1cFB_n(5@-t>QK=125iJ;Jpc3o32F1Nj?=C}o>5Q^6VO1ON<_ z;{U|}wjQMi&nAtGsU?+kEo^BZqp3|G)$EO8LZk{XA=zQ;7M^wa%a<1%!Pdj+PJup^ z4ci1*%oY0?U0j(tzPDN()PoaQ);}3!;~{zG1Cw>d0R^m2-r_(MWE3f!f(I4fHiR?A z#rKM^osc?Fn)>{QU;rPJ_x24NmFn<@+8HWKGdbpb! z->pm5kpjU4=+IsA0aA71Bo1x7COBWhA-S%>bs#pOLLq4aLwwa0F3*23#O^VrdGH!P z2dI3+gaOBb(KEsyAunwEHasv@Fh7%y#5*)XN46`H=CT`HY*!f9H4s43B)+UteY>Ym zWf-j#%?A$=3skNYd|L6@Vm4U2fc#0ad2So@d7uGbOuyTBTsTtb^F?^QF1hq2IotsO6D8l zbK}i-t=H?&@g9nmDagNAO}IUl`Bw_Mm%|tr!$zKbIfyu5PF)MpBnL65vt`*Z^)(mx zEz_W54_mACos%mFxxsgdyRKFzZjhmRzeO4*sN3mdwELcDtKTCyM&CM^o(h=0$F^+0J%e%DzcBY zZeBo}i0PX`9;8Hwdr(Yd^SZ(UCIr0Ajs3`&$M2+MB{|cxmV+!ZI_@e4i;ULT1OU-& zEEV0YUn`DQ%XksttHdzdF@)sR%S0BC)*u1NQhr%@7Yi6{^tHB z1eX&*huP;)ZD1Am55bP}z4_#Un$w)Yhc=Km1I_KXnIDmq#hu3=(_II(qHZU9_TO!5 z%9$29Q|E)!2*ycci39r?@e;5S@YUJvh;fHB)~eje4FPYJ^0twAM2zJuxx_7dKZYRU zqM#fd4rzmlX`u)pFMFy4{491wK4ZL$42W<=SwiC0zZs9*-z)9Ho;GJbnl&kO$aRnx=jRZU5?XW##lHkB1@>08+Sk1Iy*Yi&jZd@wv(^XZbmUN z;8x%Yl%vnk=CWDkl5Enkj(b*<@aqA2OkFaguOL_7kOB@_PJ^j#iETPYljt&3%U*K3 zH%MMy-X=%C$z~|eR`YROz$uR>fsx-?!1wN_e*$60qkyWism#V7fkkTxe26gLlj%6- zHDpF|xuf$qIf1WBH0Ol9E>o(=i_cl-*4gvcO(g?+Pbq`^9Hg`Q)coK(zh zMIFi!P)`}d&)6U`AAEy6+P%Mmwn;0n4rRnCWz;?8^w;-M{v{4iKGjxq^cSL)Fv1bT zVic9Ol~Bv%!p|u;?3`3M+Lrp$MJ>bl%E^eNHQBf{Ygx5HYOmAQ^Nx}gN!7$yZj^7> zJ)Jb;R$)5mLL6d4it-92G&IrmVpk=|%pfP}B=qoQriK>XbF)%E9 zvA2exF!N1W<`S6X3y!dNHWzy7*oA40?Tsb=fs(zSNY% z7AC2=^Hf18!^kluvyc(j!v{x&8%vJBFz#`)nqDG}D!^YnYwWKbgf^>_zeJC4g|ME! zc*F)vWf$FkCdCQA-8gNFp88Gl9~b$YFV7Ms&rg5MZkB6;c|kp$@P0+ex|;ransN%8 zHWtdM<}*D!Uu&DoH`iUZ>x&19A2ww;+K!EI`5Gd_XE|mjO=%zAs=b@wb{zC|TUv@< zl__$2Vs*|ljvK@W$JgkW;nHt!26#2Q{G(CWB<02n2ydT;A2w3_ZblwgWN=kdJuyW* zbApN9UoW26PW=Tx4qdFwm7Pw!aN|Uw*j(UqWksu2DasgpwZ``RKP8^?8tmSU{T5RV~W)X)?_*zR0_K5nmY7a zv+7O~a94QG4agFE-Z;>zRs<6=U8h!GxYc^oyQb{siGA6Q7Sq545r;iq>gavrJasy} z4*%0^{%;BNPfRz@?ngZ%i)26#?^!C`YVA~qPyMlnDXHUDNYj;PAykouC(V( zspGDA*rk!=uoigk`5PPXGua5ZfAovE4Ss_{mQX8>7{hT0;ztdc#3m-3Q4$#*keDZF zb9<5BC_y#HZDvR#1CW!S2a(jDkv5dixYfbHYjgFIMm=}|&WXta z6LL#tP3v6(saUA;!+~d>KMHb=ps|o#9!glu&mOHs@ggfLAUUg-sl;_g-$4w^XovoG!Sk~=~@^yGm8P_{==}jzQzH! zkc}OqPxxe*B)LZw88vTCCwilFuLL!SuQK4C<0|xzd*e>jM>A$~Y9}=H*wVT+i;ySe z1#rZ*}gSJH51(Dw!=6$lk~mR?A=#K?=-<$1=%*m!Tx`<07bm^ zn+$`sRh;hkUVd#vtC6A(m9Sm*CWszMb~wdLs!wImjZ!Wc9HJ|3}xky#ZCqofg9EJx``Dh7TP7#Qt*ElQT$0K z1>V)xIE&mcoqDvk<1jN{+(~5X1l5ohA6lC01lfrZ(^spf;oz2^tHM`)rk;z?{Oqt# z1w6MB%3Rj+W~DT5nww{3ehtl=47R^p`E>m6tpi$Xex%Lbu=+nG`YG2X?zmuOAI=)noYtqTQE>~F?=@0EfSIi+c%Jx7Kzz9wUU}ZxSsge- z^l9?GIy_FO-Y!#1Kh%v?w+aU4OTOUl3-O-Z`#0c9Q*95d_Ltw`1V3UIYC)5;T3fnS z-;2}F^o?KTe3jIcUo->o6be@Ur9iY5UXJa@J2>8YJ~(Ek32b}nAq9Bidv7t4dq3Oy z(5-P#Gvz!)7T8$^)suB}3TUb`&?W%JeTJO$jb4Jz1eZXBp3&_^vSKk9d3E&LbA0QJ zgA`|9mPz+vNtdc}EcvDa{d4}L`SU#v=!D74B;}*9*-pq0RPujYQb+N4ap~amRW65b z;7C6H{*Am&%^?}|zV($RPmJKq#oZV-b@W*QiEsUYCB!+G!Q{uIHruy$_1f7ymPLcg zV(xU}`&tjKkiWma^L>K%YRvRrmA=g-W|!=+ETk2MfmpQ%SVRH5ffHa=TRPs{BM{Zn zM@hDaE>flif9#9x1Y$-miI@}SEQ~UwD3Fa%$sHNRxRiM+p3+Rfl)syG|xxK_G`S8Ztlq z7iAfC&(L&mBrE zElnH<1G)|!pl+$19;gCC$1$;V?G#hLl<$}fl3e3*W1S@O9RLzR|bL>vf445XwctlPWJ^(j)qcl;=f^Le>J79tL z_7okKTMj|=_?28NxS7}W=lVk2%HFGMEt~eQE1Pje@(dVG0vlm+!v>@wmiL^<-)8dO zYE4^VQp9dE9(wT|xlh(o+TVW4l$|Ce{^X7Oj3&k?^^!lPc}A}{Wx+|`3Ngq}))vjH zHd;xXZv-T8xNSFa609tr7qQ7`GrN7K?Kw94x<;%h65SSQcZSkoAZY-W$ssrom90If zzi5w3Q+mg$f8M+B@_O2(n&a$wYi9pAnYVIjOt9Z-6ug|WUbg}y0zWQ;PARcw% z=dyhByD%k^6}(p!JZ#&J!Kp&fgtDf8&1g)n8^XWqdF*EDzSi{P=YN<8ce(mQ;j8$F z)wYQ`Z9&2G$SZtju~_T>4ex~D+_E=hJh=s>T$euAeZ+JrQ%cHQ@Nnz@kabScbw*(t zJ|{LCCylMfcGB3k8{0lfnxwJK#HBPue91!`d$XGatTRTWuI?)<8EyRmZ2FHdPp4_4uvvt_B&;~at9>!>`)cJ! z0EkN!2)6DM4Gc3_8y#}BdL3%Z|oW&!>IUx7baBPOX6u~Z8T zfO3+`sn}M@LFnOF%qgoCg1FX9X12-er26Kv(rqc5V;7=Yt|VF-WuMNxwN~sQY=CeK zvjaaJl8EZh?`MX&)L=m$j(~y!mSSB0p`+-Jg8+68CIwF`T^118JU9_y=(*Xnk^70N zVYa^@NOYymm8WH2OQx5iRw1!)Ck)FhT*z-jb}-!7y(v3vbBcF%WeQ|DYpj6R-bMsU z%!g{u$$~dJGzzG+RcMEb*szo^LTXH&95%`Jt91QIvIUma)y1ETmzWjI5?~e6pAjwW zueE&{taiu?&TM{^tXARLgS8VqH5JpZkOnvka>Ul+l?!I&NlnA5nyN=U#NP14hYi~3 zu07tDoZKh8js{zT6aGt+)KbA|Wa(66RR(OR7DSyFiAbNdpWJn@R?9V>QokJIf0^nE zF8z!AV|SidiV4tmydeRAbddg*K`@&mF?_iSvu|iE1W)&HJDYnd3$~8k!V-rWM77`J zlJlO^178e%6b^ijTj%0C@-&9mWj9w3`v4(KbNCe4_LAp3M_UVQ->0Sld5G|@npY}= z(L&3)M7ne;<}XAWrqsWc|E^!zwmWm;(UqsR0RGJ;G-I7 zrRlna(!DJBbIb61U#U7QG6~C$m>SpQ?O z#K-s}RF&-ru|<16lq}0cTemRT=wzIKL9O@!PE$rCtv{6K>ew$!H6VvIam23~FTbEXPnq zcGt12gb3$vo&3@3>fE>M7XN@7GZ^|ztcS^Gd{L&b?KCv!)_fVzyYq&r1l(Y0qP`_w z1MoLIAnL5mh(l|bFpZqHb^Ygz!fWD@uH1|bYL2Fd4tz6%4L;tr`7j}}s<|~;G`fEH zpbFZZ(syL<(3>zh*B^MaE)zikxYb>*5|of0S`jW3>IGAmxW{rodw zI_SZ9C@qf61h`SrP^*nYwV`Apvfy^AWv~>PHjnwtMLdCd=Ztwr_cyp>W^WY7{U{#a ziB*HQ5-Yj+@o_Q)_s(gYZG5RFQvcOge- z(1SaEE1nkfL~#OqpOuSwipV6F-K}F`dF`+s7${C~%>!?nQy9|leDp7x^Lw4^lF&bRCJx|9L{#$l;JpF=?QQWHkI90xpKF*{x zM4K@4kBVAEw)&AzlicU<2iB{#{8JyK`V-&~mR=HL<6&DQaKiWR;2s)COY@Z09~o;= z7hrVU7E@%@OzG;aR9}Dj1HBj_R%^lhWy!HClvYImzf_mJvS9{%2at8if{!(5?|onl z@TH5-O1ZkD=wrIdr8N!DIcMYHj>MRWu4t)g;v>J8%B_c6f**QB2pzl<_c^C=#I%o zY*qSRma=l@QL2%s1?e^7+pfT&=r!^1)KceT!%2TA zI>Uj>L*P^_h@>M#k3{kJl#|OEftL~sq^0qvVV#GW1we#JE zgcH4s%OY08$-zoN-c|Wqn6gA$LWmb*-){aX zGTZ3t{D{I!vK(A}n9cXD1q0HaOvZF}305sOVfmQRwBn0>;)~Iux}kIZ|FX-Uu#F&C z16oiQV)T(RKwQF$9H@f|A_E$?J`dNOoXC4KHH4P0qiuIlfGiLdvOH7a-2K62({K;} zq{GKM#iYK96OElU2B-n#K@qxnRzH6cRm;K|SZ6o61PXrW(*c1W)-B-v;Y+F0A#A{3 z%V)syDp+i7bf8YGV}*XVDTk&0zi6kcHX#^bi#39<{D;jN@>=Qm1T(8vLzw4`Nsw;M zE)8%9efy7op;f>~5P#DkKUa=$a*ffMT*mW%yd7Qj2b zPXkWt8A1VRtOIM;lU~QP z7H^>`N{IC$=jUmO*1bmElj;P@52d#m-3?xYATm9Z*%eM&LE3K5!r`z?OP~#xSazr6 zhRj!zt3%ttOD~KmddZ~MGQ}VOR!L-v7#T2)dMfOj!O4}?dHwP>7r5oFm_aLTM}Kq+WCr@I{mk^HrJ+d%?cOK92n0n@U7crL)7jsFNWqUd403I z9X_UEor?oA-+Pn3M6~axlZC#is-?UC$#wH9L!G+h@o`R6X+p6Q&@4@x)1 z46&e>^G2tiJnnl<+w5&tR(%c^Z2EgK7lBV-W49W6P#A`45ss49+9`Xb^t?SH*fQb; z7v=e{ye}^`Hpy(DzPzF)tj-|V{*UY%oy0CPFx)f>z+VX^A$16%{__%6)$IC!ic%|;1#WHSE>}K-i#G8tE61l|#q<;tx+RLJ--KmYb-JD)^Yi)^ z#I!XU!U-2a5|vHHAV3rsTl~)@auD%yU+3}}r1+Z=%r-Mc`E>Un7M=#oQuS>?A>RnP z6)rNF*f@Ml{^j-bwZbZgBb5cYY_#vm-}_kPkD>efczFrnTK4NbADtbaK+e0$TQd^T z#{d_d(7ncqY*GyWTp5$Y5kK{{==ccfvK6=-j5jXYvgnVF8nWNdtymR|6B+2jyfn8| z!zS9!x(}2pK55B7RmE&Ud*tG8&=5H6ZxntNWI&h$S|L4$dO+H)rwu8d>#$Izu&Z4- zjGpqxy6Xh7h!pqnziFyCjn-aop~3|*T6^aCcXC3MJ0WJFX6ER$`GrO}AVN!+@N&&x zp%Gpv8HqgmFdvcYq#6qh8YkDmAAvvwRa;5GpILN`O69W@E-gPL_0)|yQP)Vt-q_cR z+(Z6KgXG;4C_k)XZD8m`BG;+!FyjC3rF&ZHxAOk&j6Va}GrwfQS;jB07? z?$vX)$yC)%#!zr8ct;}?iT3uKW%Y-EWJdYZE3UPJc$v8~PK{75u&FlHwI&Ls=G?SS z?OKpNWw6>Vk`ukvRAHyrO%3I&hbELU%4(SvQ?W^4r>=1c3Aw%4hh`>+MW9felU*;a zP{vR6UoHv}jL2y+a9@q>P3t_cuPgTc9k z%WA0_K~~%Bll^Vcp^i{tiB1_f&5hMI4ZIyy=u8*zunh`QlvyQ`=JZQ$0z#`>LG zwLOa(3lG*yy~V21A%si5VnZI9u9-(BsQe}F9qhB&k5UiP6=$E{=_K~vFOK7kOEi*q zo7eqzNvAwme`)ByY2>9#Zf{RWp3JVo{LO5dA^;k@{!-BJw@f;Elf4YL*V*}e zuTcN|m%(qp$!E~vDRVw@KVupwVcLR@P z(#t7f*l4V@k#FDsLg7L!R`{@rIi>b?%WKS33}WelXYz);f$xkFILg!=E3}p7i;j&=p zP^JfVf$waM{|@8tcJaQSUfNtV8f~YJHtmg7Ygaa=~2oM`;BO2jqH1=z+Ev29ZRrvQYV*Oym@_g3CJ1jkFa;clc|`vJxd7(?_fB!X0XEbO6l`xlX+P-* znli(MWYRbE^xe{V9HpB2Gq*MggVR-EK>8n4Ax#(q01u4pUh?_SgZ>>CcK4J)Bt|LF z1Mhy6-UKD0iG+T$3U{$))|4{k5Rb|gVAt|d@AX}sEo69tXIDV)v@&tg3YJ1|F4pW4^IMm3_=_7SRqM7b2fWLDnf zf(yYP-?Aac3n}CcQxH2bcH@6dTH4u0EqB_<1=iYv67IpD{!UxwL~X{+Cba%ZXwXi) zb#{TKmh8YD@t$W>*9gY+s>B5Bw5-^FUbt)V_Y{kkVrqaJhJ*Iy0lD<99q>@ zGC3<^PT8C^Y7geWZ_u+LtMY^M1l#}0J0DZ%_@f8|F8J<3X}90%d4SKnefr2iz*l!f zF`izbIc8Ag^MSsGxZbGnx1PFzC(jk_;-~%z8qoxbpIRtb_Saff!CTjjyB5|>{n_>o zE18(5jftpWeffykcjvJnfWe|k<0qy2>_cVs@AG`9hQ}cC_u4}U%VQ>eXW0Coc@6R$ zLmT0=Cz_hIz9)Hva(PxPxn_V3F*0es_SB1b;&JY3pL2v1Hb;7MR{xF|@b@w+nUY4! zx2LH5)h?Bb3iWf$mpT#>n6lyFa-@8YVc~B&?>2if=w!gxR(4XKVF>%iU{`JZXux9= zF30vj5?i&2+a-eUm3QGxEP|_gKe?k>g5=-y7biGK>hg?w{-EX~f=<7eZvCalZ|m~0lAu+V08 zrhg#DFM!^q8$^#&PKWrd^)3gV9rFAzL3`zU08W=>Abb& z%k~kn^%cm`>d#sYN()mWoeu>@$ zo>lo;no7ON=@0hvMt@?V{REU`h{k_COqNS&7-a@ks#T}*;9ixt-W6lboW1tbNr$L; z;~~i3G{sOVd^e@?XLV&#??f+?qu3Izv5^F6#${w6s;ILWWVkuk30Ud7gxZwp$0k({ z^*n+%vja*m0_8WUgMTtUcE$6-N8P`fzYBEU(%_&zcV2V$mzbCcnURVe@JHO44rRIXf>rZY zcPOUvqL7yfL1m$)n@%jJT&_zxW|repfhT(bIt%G0;;GLIoy^qMCoH-FL~Y`sa}wGlhbOn0@uWdC|9K0;{R3d z%iyHv|4~9w^u2uk-L#9v|JtoX+F1^PfXDVeU&mvNvg{*MwAbgpj_y>rR1iBbg1+le zsH&x&Cj|F^jVh$3_zgDUx&J4hloWpKt*R;+i8~swT}apMkiYy+HQh+M@uQ4}k%_@& zCly2U-K{L?PLJGgy;a(W&P;~zN9FsmO8vLyI7h$=mZr<^RCj6KmCuZ z7>k?z%GzW^<`H(%h#eIUYc+O6h+T&gr#Px4pvP#mHZj1K!H!kO1HlqljC0rHHu_2a zx1=>PPVaO=4|9EEMxHCyEZwiZrT5P}pLdU~u1VOZfCdKn-v$u+&%Ic)(eK02 zoF$Dh-O|$1CF(AhUoUH$S-!^?0)I`~3|l1WAx6^onD)5j_NPX5tK%ZxR9HR({+k8( z9oMwXA0oAUWg6E!4}~PmS)JPa1^QtcHP1}~%17xM*i7{@*XDM*()`xNIkUf%x5?-S zy>gb;Mdh!Ve4mluCPwXRmN>79h1sVv$&cldy;=ufW|jzbmTVkYoq-61nIiVhlu5?u z%0QQs$}pp7$}$cpAa7cLWSUD-W(b8U)4 z^@mgt*#-+GP!RjpKo!VHBqP?X90Dk0-yq8YY_j%H!}I~%=1qlyz<~(Wauht3RHE6TX-H<&X%Fg;_Xe_;#E z2gKMKKsit+;t3z#%%~^^&}cpZ(d>7pYm=B}gwmr8R+J;ttoS!$dA7bhjd3(i6SNHk zvVDLZH#%3MbZ79;7=WC1?~Ew%c&03cEe}l(r02c%7y{y_{aUwli_3yD|CCbu2i>vE znM;;)f*a^yYWz`hw{@kSkdS2NYs={}m4;y|SnXfC@KJ)giWRFrrx#aMM>?p2S+|W% zVvBXZ02>F0mmvP0Xbk=IK1n)inhkss$ehe-NeDLOa}!*2C!k=t_gJ|%3M%bWXe7wKPD7(mP#u4S~*Mg4RGekqTV>dpBlc{KYAp{Cq#* zlz#wA5X7WJ-pLn~`w`+G5ReZmxYvAwY#5E;07$}vsO~3G(mK&9-kN)|Btd8mVpW@u zGAv8uzzQ+RT^wb!+)8_oMRn{T)#c{Np5&z+8!l+9w=3t^@V;|xeOUZ-Ht?&0ArM_! zFaL=p2nls|A!=jU-pWt-v?H|GsNAWnR%w-xyI`R)Vmq}_&G{a(YX@h1?&zsY)Rr%!>M(4D0|rpu)!sUlS!sgl)1br`}R^x`J?;iy%@wU>qW zcysQcQ*w|NYU6Mvs5BGl1z?$Cc=<^1iv3s1JQFtJirkpQbFc?<)i1W_Z&4xZD{@nj z7ck$I`P+`3Xcnh!n~1(yM^8xpOpfy4N}H(S>L8jrid^&%G6;fpo9kNdy0f=lwi8$8 z;2+ASxaG?+($E+>c_3PhNvQz%HRdZM^Bwg%r|G3Ag7pO-$|TH@%*?m^bKaEjwdVI$ z41fQNPX6P<|5Rp*7R6LY7a}JMKOn_YqyBH}_yu65$20iiq!1IN)!5F%;YZEq^dg#8 zB402GtO!jDY^%RWE6*{tEIT7!0>pNIV0;951wW2dmLnbMaH>CvG}I4GJbZ7Ft-N^@ zfCO7+V{nlqHUSb7^WwQI9tUzeNZ6|kmbXc#Vf`xn#hP8tRbr7GSA-OaFMEc?KBxb= zlU=~*{WzzEw>bCD-LH;Sc5u~CMbWn8*Y?(xfG zWJh5E4kJ#LD8(phD7mR0PloLBHp@+leytE5RhMr#|NI;T58rTjC`Ogb@4&1cvP%)$ z{nry%PxgIh42+n~?CJi#0Jva9h$gvA>)0lpRNIh-29qH8Qzj2wEpE2O`DESVVasO7 z>|zy(_NOelD7WM^X-N;8AF)3RaQqpv+Sy2(LghmXrGx^s5|-E{B%HKb6`O%2X`GQK zDZZLa-N$IgW)CIepk~rW7`Yc<5S7;N!5_9w zMtRJo&9I73iNC0`j&$FlwAn*!k|k1nfxk!d$Mo>Z^;<7!e}!Qsr-BlXF281O+Pa*q z$aDcVi!hn*zTZSEBWVrrI|S{m_+-6EGy_OZdB6K3&$Fu*S`tH7WPTOCXsW2_%1iJP zllC-^)WY4Fv%wgW;kHS{K|5W_z21UhOAU>r=;yk8I%c9mJ)M<>?JKW-ax=|t1)6&D zGX2yv*Uuc0Bo~{t1Ex{Z$|Lr0fwya*DBTg)AF+8@g?cO>{;AMUozBBF*rn92=1Fe* zw21=3`%Kk_t$^3}uy0kxbI{+c5NN2Y0%VpMuq@qnF?F{P2k6eEIOiOb5AQo1$bBoQ zYYY`6-9l_vl3z_;&KqY!&r%mA%Jrj+?_Q@lE6n4LiM` zCE7PXTgT|MX*3-?#9aeK5gUVTlhb9kJXgK$iqQ^e)u8Xpsl3wZveHAha_{N;?T9^O zKQS}Pgx-Hfm_M-BK-?rk!t+>p8b?hL6p4Ly{_dH6wfw&^MzbFNm!qa2&mgV2Efl%J zYD0Jq&(rqY$^Ox$G#-|jioCTTegXi+#}%Vm%s($E$iE(9*!Y|__vaEbJyncg0wum;_2oFUaiGj@|LAoq-b zJ!3i6Z1f(-PIrf3;1A4MG6nSxYY>y=ewqfw@=tpDAKz~meiSFFwC;eg|J}W>D@07J zIXD=TVBpOm{qm8@myu|sgAbS+aQuS;tXT{r)A|Kb;qXAi0BYIJ8u4=USpGnZl8&Vk z9!h|f$jWrttL6;iLYQxrq>Jd!vvyD)>;h628dJv)N;H_nYyeYv`da}7PHfP6ceUlf zL)bJEEM2X*>rAm1P?@;7#bN-=?tNFv^SGyI_BUvRG{jCSFtF52Vq-eZc3AX#yfwmS zS#82A>UP1AZJWWiY5s(y$}W%C%+%51YxFbcauqf?vq>Boz9z4}Q}i;uVRXgVS& z%2(@B^{$h-M)TD@BGLTo*8W4=Kd?rCT~Lihl$sAZ|_8YPCF ztR?zswlIV!SY_v#Kl*R|^lROg#?w|Csf)Ha3KKJ4-_KebSGz73JI@6-0)@x{S7196 z9LKRVXz*Sa9RM__)1BADPp?aBV6l|~iq;%O-bLRr6|Oql9$XQPNs9~sZid{c(Yyb; zhB}~FvPMaU;x!Hx+U!AL2Vs%9Bzoqz9v?A|7s`J=KJ+#5MR@iR5vHA3;~ zYSosQoRx|&6V-7(vqW;MFdobmMWIIo@Af^0oN>vjiSL6;M}G@CvqXaje*R+H9U%H{ z{CP_YDfr~N-lAKDI@@jYlVjRVcSw0iyBmFQRW8zL5cxuCYZ#XCwLK?PW#|yMSN#Hh z1j)a}pRN9oRWMrBe}`7O^sOY~kz0TyQk^whD93PQgE1tS`1J3i#pbm4Ou@xt4mZY# zNleC`GT+bn$_W+BNF(>Aaf4>kWu}YF*iU05&>c?{6M|Z-9wpKT$6dsij zw2`55Ht=V2j1F?EoNbff@P=jW#?+k{p+kDYtwB zH*uW*{YxEXPHpyr)pSqO-u%T=XYb=m28@#t=#d2tpo6#$Mq4;ldU32BUzxWI&IyRg ztpfJmP>iOm;H*4JGHJwH^=M?%?Q;B_D_!goc-tl{tlD@Jdw7|2HhB6TjXqfCFHFIH z5lu}<6q|?#N!f|uw(0G(O8Ec{aXm=mgvjI{S5zLe@QE{}<^}=8k0R$1-kD{QcYob} z;FaI^*x0W_m&+X=Oq-7?TfHhB?tk4?(n?T?B1$9vwiy=RB>}a8hI6oHw+6Vm2-pNXyg zpB=cKzs!}*?0PJma)Emq;=vUH$l~s&SlX?&-U<9;sdgn@m6EBgg;veEzdLcLy|EbN z^D!2RY?kmVP%HNNB5fuHbZaRHaZ#x03ZCX`_D=c(qp7DA&~`sREX@J z3{bm8Vg{AET07DYXEjbqE*;*6@?wk8v!gzTaHINuuQ5)I*jD2!H&5K}DPgos9bWKs zPFqD?jPhF9U8N*sCiJ>FS|D>rEp4tN+e0~YDMl0)j6EL46j_DgR6pu1Jf9}TVHNly zCkZ=tRD&(U0uv%nq+EP_g-7C&F$k+puQq39>feSp>&>#~7sB)#yjOBIobYjsS4m-I zn>jhHBZIhx1X+dfw;sM1lqL4JnwO%vwW88Pxt*uN+PwxyLUr&j&m|=lVb1yZ`RKpC z9(@rn+`rQ5JE*rlG_EOy7`Z%TSTXk$ zFBV8ZRbC@ncHYoU-F5=^Ca-a5##ABq72Y~t5dT>1y{a?hCoBBfJH;*!sg|CRbpy|l ztuSyS`Q{iID%-Ch(kj`n%d93pWm(oTk2^oFL+O(}tmp!VM9^_p*gHL5rn1pV8}fQQ z|8*(h)5qVj-=?*R4X8&s;)K4OKDWx*a_Dv8Cyi}al6_%Zr?qRyWoCc2bk;Bj7(^>2 zH#D>z*g_r2TYlQUMD`|Q=f`wys!{payjYX4UMkDJ3Lm#tGUhYQ{^UxZe|LY5p39NGDGRe`P%s;Es5Is(@s8} z4Uh7c(+uvrjH3)*?=HKG%+#V&m4%A$&6YfMwwt`EFH>LDkDFFkU$2W#^zu!npAYG- zcWymfSC&-He)A22OJKV2&h*p&^})LU#utvaRtbE+M(n~mme(HEG(?g=4w3W73iw6tNGs;N}|Yza(VXVT>S=PF^;RlgF;ypP653n zVIxC<21?Vul|dAK?-}Tfm8?;r#8_7bz*9?~(t-i{5+DH-FJ1H16>J~^X9$Q(2#~IJ zbWV=3g9km*B95N@G&D>4|Lj>SN^XgrL~S?$ zTQDBL6Ni|g@Wa7fRB1g9OH`^gu36ySfWPzQR6CAKD}2iD(8=L(UK84G2Ng+j(@l_W zIZJ5d8+`w{`gNoN!9!Y0eYsgGM{(3&Rl&_GfZOc#rymdmd%l2|a=!J_seH+$T?aL{ zst{ZNF&2IHI24)5zQ#7yxAXXG^d|ASFtDND>d7#(II^kf$yi8f9nc+LTEW53_G=ms z);f>}(g3o_zhlKJ)OcW4we)3D{LEo5&%+T;=56>Rs++q>tM&skg=3uDQN{4E#pPv=wa*`CzPmD+{7PtYx4}H%2eqKLrbO`ZZCZ}q&PFgYtgo%gv(#T}z0NegH5 z_RX0WyxWr<0*2#T`(I2h{wxDXJ_C`*jpevF0M2EpW(&<=*;(V6w}cT+VpT8>FS$Mw z8#gr@SVN*Ke-2rw;Rq@+yf=QlEHva~%1e>oy#B;%59}iu%=)h}QTcsR$|BI>;$Zf4&wQmu-yAl%?Ru zY8vTBHWc6WN>m5m$%FIghz!(5h%3ia5gf>Jy%Op7boW*XrbpyV#y3Iklkyo%03~!T zpF?iM&Hn1w38jt(7jefw97cEc6G}IIajXPr@a`%j*v8dQ1%a6 zn(=McGRB_xNgg70nfHnxRYH{ri#>8H7hLT-7+c5>6n*1pA-VVzg4&EXEY2rAXzGHl&{8k*zGXOuL{~~IlQ8M&8 z3ePxDp1=h4jn3vW{2|xsrw{}nCEP(15Vs2Ve8!&^IRAA{8D4VsTEncq@fQ)y?2F9& zMC;;}Pw=`lPTRGfsM@4X@KSOFI-B1|$>~XQL`OdHb+q2)m6aB@I}2FfuQ^s_D{W)) z($fyOLX1=;-x#LIGlGkMhk|dEr@Q-}AEfB?H=Aok@aVh~agPJ1J4w~mNSlVa-tzgC zBb;H3755LKrBB5hVO69Sjx zW&YEi!dsDW88-Y;T2GY>$!^7NRkA%97=9wtO$IJl&&ZnY$+t7=mbL5$^GGY)Kkxx`%^gh z=I6pz7*h*V;Ldc^)(Y4r!>x*+5VJCFr3uF**+6uiQZ&IeU3{e0}rL;t6CS*wcX@`|waZ8`J#1=6t#@5*;nT^1t2y z*?L@%r^|j%gTHk2mF%Pd0hnW{4nUD+Wz2sb-%q_wH=ORR@Q2nepESrzK8?_?{m+l( zX8!D`+qY{@U&aghXzwqC5Z=}}pLFw27BgdMqGGFE?w64E zYX6*$&d=5KnnDwb?+G7LIqVB_rR6Me+~p%WL&;ZEV#dGh0=h7^E6}Dt=|}F#b_fmM zqiqr7dC4n^}UXN5+Al)x@=%lF1h@j6EL%;gz+ z?!BljD}>urZe_QqD#51!fIB+Tj0qgG0Q!C>Pk zI))uc<|~Z91`k&~+bJKfHvDdaQyNG|1F(41{TZmkTMp^gkW`?44Z1NyC;<#Yrt$Hm zxMtweS_-pR$RW8;e;W9B4YA#`le^93L0hpI&%l5O7LMd#dneUX<3!JMpzosd zfPvJpF8{!`qmhCc1=Ew#PYVj2ZxW9Po~_tn+C$ZbKqz$IlM0Fe`SSnPLH+RWHkref zZ^&Ri(!GqupFixq#cf^zu;($5^x%wr8b#E)H`X%%zmzBub77f81DfWc<=9ng6@p#v z0wP?&x?5iqrz^$%cZmJF`Z-`He^Ho2aj`@t;CunPajw{`<0G2EBq*1EUhDUmuKWgJ zDgHoLedYs&^ngPyz~G9^ZD`y1eyjSNY1;iwR+-)PC@d4o^*f>p$9|7n zKrtk^UjsV}v_s1C{0l0F^ulC&dT9~$2TM9x;5Ck2`L=L?`OEfO1c_@m(nahx-h3DW z3O29fR9Y7gg~SsT0O6UoI&&Kbs8Ovb$hqPi9RZcucX+6$(^Ud$UVUBR9@f{vc-MtUz_8fL{0cpfqlC^o7V`RFKiTW`(I_b(m`NhJ%Foi%vBSV?TsuF`x=@$8-p82!iU!Be3n$1v z9(bXl1O-(*r6^&A+Z~7CP>fviDORU5(v<8o>O}=kO%4`DZE0+{2EXLwM_O11Qbusa zal>gZWI}KM`EJgx8>KJ!d)HOzwfUF|8$h9Aot%IoM=%6?LWX^^;jXTcho4Nu@<0)Z zSaJcWL!hvv;Mez^ATBN7OlPJEU=U}VhxZ67IEnvpahrMyR_`2; z0V_kcY`#5c2?@#=&Vt2PGcZveFI|v*&}ZIM9kP-p!f{LoeQ7`po>ATAqk{z+enXNS zNC6W8^^#q1HV4YM!hasD-f>&nQwN%IZEk#hxKa4z|7O)Egap*lKj%8z>+|RMxFC)U zigfb$cixj(_zt>d1;(fG9!hoVZSBcodvP&`*^vYaXERL#{g+gptQ>rp*xwfS3?wAs zC5)AwREOrij8y#YU$2ZT-=dM4vic4c0h~)T1B7@JE^1Phw9m4zjTUiDymfD7=?GnEd%YKO zeVz=|UGyPS%2aZ}uFfRzYU8iA#lLcv{qdWII4{l|;)#CTtu%spOVdi<62CFAuwfCIx!;)6d?KO zkbP>C_Yqm~#>$EMV`mxa4K8p?#(6R%+&(uYE3>o1^vviK4RF+I@OZbKc5R#v*4p@Q ziP}8jVg9{?RK(WGibaWA_hv|mZC|1&&T=zCeS)&{T`m6{?xA4!Rj}Z8;((a6Ml`jD zoz5P9bxhh~!jZj==D7tR1y&%Dm4?2VCdFMlPOw5 z^x|r7$YQ%tEp+Ysaw%4ud$yK-3H@t|m!g+q01ucqR`9bX@3`a=>c$$Ov})?bzQ1CynSL-uCz`Y_kFeM+%?9 z0BMJ|mAiAQ>YzbM7udZkw@pMT|)H#paA7g{F`xaWkY>$G5u;_Wu&Fq`_gE!z@%|LlpLk}=~t>%l~&(l zahJmGj=hXi-`J7yVZoi=**UF4_tMKXWMaH8uEcfz+x!zn*G)`ft6dxB^QT6)6*OY2 z-|dCluGvCW`rVl6$J6$ z(xl*rjewm;*AX-Z$LUW#$7vcLCBZEfp;4}+JO3H!Uc%7%k;@=pMyLnI{y`_(*&9Wf zo4x~{H}x46E!DHC6^+lKR#$&Zz1?F?-mF%eRMfw`zP+aI8=vu7>r|$4J(01Y4uYmCVY z{f1}t0>S14T!B+c+%|%;VJ0#IEJI7cavC?&>u8?14dOLo0M~_p-YS*D+>Bj0M*MTA3-C( z5m*rn3{AP!7K06M&YeO|6v5e{zBl|}*y1X_AVts5%PO_}d(z8E7+Bbm`p0eH{VBb3OsP(MP#9`}VZxpM%v`xqt5OlJHuGRgfE**;2NpD)2YTNqRC7k1a& zzAlb~O<17Cw2!uR4ju%=xuBD+2Baz$B-H}9(dv&xBTiBfJWlZhU~` zx3lQgG?=5+4!%3vCMQd#f$!}NK5uh*fFB`5?pc5zKu5Xcasz;_)X0p!8itDB(FI;u zHqu~_w3)Gy>vF?y&wd7S6K*<5>t!B5SGu%z4Qm%_lYmwjI8(?JHQ?>G=vV)1<{$L9 zu-u1)=m@=Hjb5Xc@QJwOgeB#wnh4t__vn0>_+{yw`ppRV-#-rK)U^=$eZw*V6{ADq z-U#KtG%Dx;%RKimBkr6Z!^H_m>UtaOE9!3K;$tm6=e+BIp?QLP`!el*R!n+t+iKPT zew}6-d+YgaMi)$iwL~9i7|Ueev>)k$M!L&cgGfX__H1-~MLMMLM+?aa*e*?v1~@h& z>^^&^#J(!2`@C3Uy-v&c5X`v*T&Z<1-Hm%x0@56iqY#&(6W~({1gU@zI z^KaK|ZN49!PysC0plr78(lcD8NS8NqNV*tk%Dau*{U1Bdn@Oot()P3Av^QYhrSdg? zV+CODK#1f!=J<@9a2${R9p&3uYrbW}{z4$deU$^%U}}J^Hdr7B1VjbxN(iM=-8}O5 zd0z$hluXRwd}eTri1Q^Ts1h6`6dMrjP$N)7;H7-I`zRZ&jJMqf)^=I^cz6A2g`Xhr ztKn(oI~ZH1%@2KciA9IEH(YjgN~uej*`jT@WlMCNAn zNl&WD9WB3zX8wb#w`_~6i@G(d3JSO24hgP>1b0Yqhv4q6!L5J?2=1;)aCeu2;GW>_ z?(T5tKK*uIeSX0HwD!K%++*BxjD}bs#BGf2rxaIgv~-6_o+ebzMd=1UX)Yzi*{XOz zm`N-@3!_?29G0_2_F0y#eQi*SvoZ61GTAa%_I1()bch)n&G<@EizOKiyjPnMfJvbi=4}3n$j&>xfqF7yxX)JE_~_ zs<%5W`@n#OEr3x%j&y%q-+StO$2S@Ik|I;5;U|`Iu}5?Oo4g0%F$eX{>6jP~9NDbC zxAyL{P@q(%>~}2!xSUfyA!iN){yUdtpBIbgp0{g{zTT&sW~b9~Tsg@VZhHLYUN~A& z8)X~%sD<}F;679x)SGaksWKS-oJ}9r;hRmD2EB#7%B-fM z!jx9i59EmJ&qsJSC85wt0*FE6i^J(- zFm~kCM{V=rsTlHHFqUJqv#426<9qm1Y#H%#?G?0T;EumfHXiKdWYMO6*3Bc)1L_SBmZ@wxgYZx#a=5z+# zeudfjQ%_t#^DFI~d!M=22YqY}4z+@qd-xHVn5i!Hy2{S+>`}MiyVT?#ROYE<>_M89 zYf)`y`mwra?IfsY{)AgPRqdV^CW3Fz4TO06kk6C|Nnf~JRNXLp*HeUxM#?5%Pn}QW zGDIThy7#od{pS1`8kslo!LI7IB&N}Qv_0xKw^f_|1l{WNQ7XKU&{@KsDq12ko4mBO z%+nX{>;8;840scAq~p{}B>(T)L&QuYcU)5B{a=Sjr;?TBDKAXb!a*at(arDtoIt6g zke@O4xIub!Nu6qivWN^#R45MMd|c3{!B8$`-P;YN|IRvg-2LSKDI9HmHeaI4!pxGQ ztg8kgP+t)0?mzC6Ae7wslaB{zWc{&kQ!4Ue!}hFqW7lD{HGbib(??%ukiP*t-JH{BF|H@3b~YCc;PbM z;oQtN;fLj>u&!|FR=%`k zMXsyTkj}~KsrR!0ok7&w3u?eO+m&w5t4Fsw|Fba&gK*wW)pL@ZJ+Ad0%B{xGuFh8K zydvr`7b-r{eoSju%uOD5^3x{yAUk}vM6$PHi{sUYdDDJ$g-YG>3#>$apc2?I~zy3He@l^zj7VpXo!; z{5Lv)U0xHQn~R}_@v=T}H4JL#EqwscU%%7t5c7EYKcy0%Iv5=Y(6rEH{?`GXsvyDf z`Lt>4RMSw6tmMWjq6LVQ*ig^0FM187+};ALqlwYbuUi_A(*BmvKAZ;529eCJLTd$; zQ13gw@7TysjHl5A$rE+Y=z~7M^VF--7`0TO{Z&?&4ke_`9K?2HMgGC7tQV0HfLa^3LVQ@r(8#YW({O8^&%a2pz~6o-eXtA^t7uAi5=3(@ zr@5n#3(bSDwFP<3&y5m`l1T7_N3VN2UKOU9PqLcgX_luyKt^qunEV3tslsVIH9+oO zoJLgJ{I81^I&)RTN)fjgCHIiu%%eU&{qkfVml1_*jE{vZ*FRix zUDer}Grt>5T&e9syCF*da^dQh_<`x?kf(M)x?`}gMhg15e zb$QPE!y=j31wCjV`~FMxz>?4tnQ>KG`WP8~Xws;6YbvT`X>&C65VKDkAfIG?h@5~1 z_}%H@#n<_I=g-Y;@TM3wQWRCX77fhrrq>7ynxPpl4{Dqq`lgGLXBTtm?f?9p^!eKL zFKVZ+P0lDuf>@mC|K5K4?ntg<~WUcygWjXZ<9ojDc% zc~wy}U0|Ftdk2YF6}!I*(XGQ)J}QqP~c^_eOwMtsO6%&%^9` zk_~%hjfjIv_6lzhG4(q}Y)3%{t5{M349WC^H0&dU^Ldh7IE}Ur&gQSsV%kN*9H5=1 zuG4Yh@jqTWX&v=p6cf1eh;US1k#fp;qzF6^dze|+#}ay257~dndPbqb(M0*bkWIYa zZoG}4c(mTOtD8DjO&qa&ABTn%4k5(BO`2zYQdbmxm!}Cygh{w!Ch+z@y}s)WkP!d` z8M)8DblI57;%9`Tx(6Rg^0N=@GWn6<22K6M{AQbaCSXARra(63?e_^VI$o}6axS#c z^I*f)2!a7RPC4(}<*nE$4bzTqwPy^!86L1|XtFLiW`M@%HGxn}*JSpcqc7c}OKmxo z-R9}=As^78=rg5D*no)A?vMpJXX8up=hL^Br3;Zao;TC-a6F%fO+G~WCK^PH^)DO< zP{EJbu`|b7O9%uOP zYr_ZBH&Kz0i%nZ~OTM42RYgkKPO1oQF_|a`x`!RrN?BG=zif8Z90SM#rP&=aKx6a$ zPMOuzP@hudvU2m208Jp`PM@ezoZ&q+X+E7gldUE}Z||MofO`088i$wOFZ)-xNV-5H zLkBuQ790-__i%HiEg94Ru>J%RJR2*k4>@F{k9Wcn&Ap^#OK^KM*fO!po`afIAu1_K z<=Qz@t~W8z+EvNwqwT-^#HGHK4KnJQ6@NMP_|Bs^9AKt<-;V03uvWSndfUC$v|qi` zl{(^28-m+!L#>F z&U^1a?7>xjoTqOWOMk1(8c?S=on>yId*3_$$ReHKNw5As(%i5Fc5gMRd>TDX)2aGH zuRx*s;%45pDB(lTL6GD;G<0q$4MqtPgvPy1UOj8?EKOnR@3S$<)^>iZ#~05n-S446tP>lGgG0JI3bkwZS1IQ zIWsF}Aw6p@Q`KSN3=C4FWqC4*Ax+AV$(`lg-hD`N!h;US8vNt*O?WNN9#xq$pNHO# zyL>(vN%H_gcf#I7<#w9=+p2E%-fikAnwkH||C!iW_{ti&(Oygu{PEu3F%d3-AqP^7>b(k8plB6t}WMY8yBV@_*~^xS#lp zc+6Y^KQQ1c*jd!n#aBDVi5Bh6bK9)uwOA><%I5nWqO=4#BA)f0M%eAn2*~o%c?(tU zp4^M{S?Z)%7rOi86{)$~)4IuOd!}y0u(lMY*0?V!dgcC%NFYq`=6|+xKYn!9pK^*$ zD~&6mFX4)MXHs5)$b2Jj;s^B6&V{>15iBTl1cxLZ=u-bA+bStvGlTbenP^CU>eE?k z>ZIKwSWkT#(plpnS5e|0EX`f9BewOiRr538}f5H zc1?xAa_P2{tqFJ3c{39_FvqZISFnHGt|Lc)t&2k>vfk;?p-y{k0DAy9MBqu6;Q1vj zCa+EV|HpoOXF2z)hta_S>XfNN#m|!{;lzA}w3|EM$^x*i41Jc|j5FceQJzv^a(;ps z0M-ivM6>w8?<<`6t07#jd3|lzM_>T?4;p@))GECgwIU_tePb~unD|m{^KDS_dMjw_ z*i|iWv^5P?RwX&>S3ZhnA8UM)#o5opA=zJ1C*FOYkw2Zmgnb4aPYSB`tvJzUU| z3j{XXSvlm|Z*ev74aT|n*<&%ZibKYTH+mBC@nk@Ffwoer#&Yl8JA(IO_35|^QvCbR z+=>-)GwQgR|G6F+$5;IrrR1V81~ILFMROCqz%QXdjlwo6%`ELWK?m2uPt&i@NTD0g z`&7Gf3eJLrn9A~30od4kE;`+`1IsA5vWY_2(&ZE%@Sw@8QoX3!i*aP8gmytCZ zm89~bYXcZRx6`|y*Fqc7+3ilpCIk<)b}{64Y=- z84cjFCeY*pHy8au-gkED`~6NU0crJH9J1uo%NW8JAwRYr$*r6OhSiQXET&6Ni3Bu9 z#BI7=!*~FikI}&*y!7VX8*fW$lH|XThnVg zY`uV2NVp^)HAD^^!9jRepDQG)0K^t;5n$|GzG4Ujh`gBCb#YKFum*OOuJpQHZ`c+` z>%70sjTS33bt7)gEC-`GJ02*Mdf75yi)Rvt^%}lBzy@wxC~3<=CExgS`T%?GMPyPS z*HbJE0)W&mUH<$7(hhCLh>=50OpXzufsJsbyvuHr-1p)mWy$MEP@jqh+f@eW?;QMc z)*DYhmgm{y_59NhNY=8Kq^f**RmpgR^pR~1Csm747{=}{fQnP+GX{fwiRE2`CLr_+Q zq6>J`d4)>LNT8l5m#eGHKT2LLT*A_Kly2WP-w^*R9;i?NROXQ{$WaM90?$B&O0Gyq zr-^~~ti=nf&x+eT* zatUpkl=pGC@{&XnI*un9A0Yw%x%_9-b33LWwiDw=Qyi5gM8rAAI&i))Ma;y=BKj$@ z`%N#q0kHI0=N6|b|F|deBqHpmP&91kX_mk2rtQ+0|Ap$$uIQ6JZ;fP8`OEbYKu4T9 z?JzibOf2+;xmW$EBr8=qrdFguK$XPmq}BCB!}mJcvG2*yhpX}W&U(fwKt}#L5ftbn zqb}N`3$==!wY7kwLe>{G!X~ajQ_gRR)MS0-^K+kKzHsSyUy0f~964!xg~{%!W_Df7+Jc~G*3oW?I~JiPx|d9M znwlmm;WEDh2bK$%*zZbg***L@>a&_IrPXYc3x~z8t^8zr06MhV`(h~ENDq<3t7?NG zYp8(Zqb4yIzdaYSc5{W47HLr!_Rh$15 z7}{k!cB(rGM4z=A@m!(k=wC27--e`*I5Kg%IhrFU3d(N-^|xFaC3t~-<**jCTq1fVMG&BLhWYPq)zg84hq zWw)#SAx4fI-Zz#i(}iyZrr7d3Dg6Vj*<-myV~7&=1T?MLTtws$Ngi!975(b=9@ zt^{-@F7D#C80-Zt9vcQ_fUeltdj)Q%_orY_Y+G}8DvGDu^FNwV4(FM-Zuu@_*lNap$ zrM>>3K8VX0n|(*~E}?d=gQi$`HoAGCBYhqf-=;kfc2tH6@^9=L{rf8;6LAQ)1)+L7 z(g<)T*XklVSOzo#-(xUq53(}?Mp3li4t4{-);oHVp35_+L5f$r`3XeP*WiGN^!%T& z9F)LKvnkwLrzcO&Z>R0^Il7fye3YT^4J?*;f>~ME^Z9eaX*4DXvGP9-Xem8KDIL3l zen@!79QhWS?-_t&fzUdB%^=juE-iwCzD_&;0u zL9*p7Gl;ffzVynA*0qn{EQK1{*Xy-zmlOfbzDq>mqp|#Hrr7|e#ntY0(0?~20rWf{ zg*~4Po>$NOI2~5o2}CXuMTl~rp1EcOF>2(AYd+py4+jA*NqF!)ZFStQc2Ze;X;wbx z2p14bFJd^@s2P38D*@^k6Cs+km61KN>|`<<+$r6yd4D59+q0H21guAW2?jMQlLA`J zl>{p5-VJ|9Q0z{|$W9SDHV-R4GdFW*zQdfiu3}FO;G~>^60V=f4ecqTl*NI;jqBHA zcPrNB5z4;+-Y!BvHIr@zEiXz#Aba1Q!g=2F!K*wpTdUDwy5vNfmj9*`F>CM_q<5G^ zjin8l%=E_AF!p^<2-3ehcI0gBz#pnYK`-V+orgL7m-#DW+i?`?^L;qy-@Co4{)|Py zkO?0rjU$p)a<15^kKtC9nkdS&C_GU8HN7Dt&pxP?kNfC%j4E6d9+sOjc(UfX6JFjX zlLiLc`M3J44{$#Yyl##Oqvm?;3ulSJuCs5%0}@zHCi3NqIF@i=nFWFoi)DF95WGN( z=?`mkY)+zpJR%svZ`5;Hwm+kqH_8jxc<>1(kJQ1|A527phbNmTs6i=N@PX9ho zN-4Z3-JK~q#z{>1b=^*dbviu#XG&^geW&5$D~@K=_B9rY0^p>52f0}c=5PFDwLYdt zMOL|Nl_UWMB}G3Uah@@#gw^65siH#JJT9(h(oV9Y=Q=Md?iac)nhFSO@uw2h$v%|1 zXdW->5BPfUPN0lUM6r-nL&VY0T&8S~RI~CB&Z?!)@ zr}$mAQ3QjKCpkAWq`ko&;A!`rc|82WOLf%r5vn){OPpcgsNA>i3~SJiH&O%;0(i{R zT+e~8vJD~s;RX&1Kw+w2u@#UA&~$)fQrcQAK>vu<|7;OPoqj&;&lg%1*=a+U*U|!j z&BUgmA3sjhh!y@mo5722V=xU~EG++OjTODZLW8~9<;Fw4+*x4O;}%_Yor`$Sv0xv2 zLvJhIpq9IxXX-1csNaUY7qFd7*x@_$ZzintAk000)_s~f>3S-iG9ESYyf?XA&lu+ z+AmchNx8htmwv+L^o@dtFc1Nda2hLKu?4>@EFv8kRqGG+#CA5w;#^we7Pv6 zlWP!jp(gtY{A~VQQ2%vNV^F}(`PBU1Sec`3*~g9#e7t+;DeyENB?bcneidGg6<29oe|;Q)BI$N+(o z(Ty8aH&(aU#-bY3Bk%Pd^-(F`lo{TGJ- z!uaJ7!h3HfC{=p#V08R%ry(>;TB?+JuhbEHu}k%0c-DlaF%UoZY6^XvSX*B}b8>Hi_V zcti{{^@b?_MKj7*jwhME`A|;XTnzgrn`7Y=)e38|Pz0d&N1g??M?Xl~1GKtpCFIUf z0IPga6%G3A$lNAhVKc2k*#ahS=Tje7CefHC`SOkYyYk86$Kn6OUr>3{gnsLO69%s< zDMa;NSt|8;nJ+Mv(_^7ptFWWT5xT|k5L|X9*LKv{5A<|aZ|Ecx(O?};RY57BDpH~J z7s4PBr}`G35#jY>GlZ>Y0dYIBYu&|P%UaX|pCz(V3df|&a?&?OelrLt^LXlttpFH4c$~5#=l@@5vSCb}rU*?-pW5HJWvg7RrL8HGAPi(J9DpD_K8DL;LoX!| z!a9RJnHKoxa`7M6C{X9@tK1y(-#!jcGN+1;8O!vpi~b1&K-2b5H`OTgM5>4=@~A}e zn|K=Va?0L2ZxZIQwY27|Klhnvu9q<#x6DHSUs{{V>&H|t7oC5(6AbrnJwvcfl##jFFdFdjekfwACtMU9s7tY(4oMqSwr*$tjEb8=$_a-wjbrqnGKa7yGO z8yq&7tda&*h+As zOurcvGSNt%apsyJv^d_EQHjPiJjMswf4lP-?Q>t?ssmv^#Pr4kEf}am(7a%ja07jB zG=R}YKnA=ik|W&+Ull@E60^4Gh&`Nk+gCIkql61}?Ih{-#0A4=@96m@jRr?7GbIsm zR8A$>zI(KOYb4b(Sh*U9{Fm zURzGcNy)DmUtI$Sg!qHi5y}LJOJ%v*Q18p$jW6Sdwp^FWoP-!zQ4$!7sH{)V;8FER z56W}sr{A?oAPRUo?H99-`{l1P2|wjHb|WVW$scm4x0>8Nwy&f%H;0)1w$DWc+;fUh zbUhRprcQeVWC;X_k1nx98%L83Nn+Y48+3eUuhehGRS0x`L19ykTpsm&4vsN)hjjkA zq729SHXFAQ9|=apu#Syk&Q3~GdZJ+xN)G|Rrs0JGZpE|kCg;?R@HJ{{^y&PKORjyW z=euXe!n>;dFEWdh2(L|ZK~S0_PkI1h!1CmcV;A=5No9%bR4^}m0+1?NUU3G8WhX}`s{jV zYn>;H+Y+vS*qu$p-4LaOEZozgzAuYaEQA(Prc689j#|y82V?(`> z^wi3lM^X7OTu*cfhC)BK;K!mub|V}o<_=vix%{;FD|{W5>oA-G7`ir2Q*F6|hiHC6 zuKcaoVLpph9kW9Zl3hmQrY}XWb!Ec;l-DzFE`D&IB%(b8+L$@C->ED=-P+ci&o2F? z%X?amO3N!!M8(R*gVR!-H`OsLbGZB%WoR?(at6!#0;zs0mr>?hE68|JU=)kKuXwVN1;ZrmhzY-;?(& z%uKrhOJNV~LyE&XMl`?2!%ZJ=|zUunfUv3 z`~^cP?1xV2M40i5RVO{pAbkPedj{nttiG$wJ6r5>sZ?vE3%eI0p1ssJtNYH4-iwtv z+mM{^w@OQAzvuj(Au&(&4gKYJaayJ2YsYJTn8Aq~v@Cm@EEN#p4k6+I^C$T%4b_dx zzAv@cG2OZ_138f*L6@UC5Spf>Q*Cg(Pi6Ur@7YNOS^~1-G7sH-CbaVC`O*Uwo^`!S zU+*bA_hqM%h|;umboh>7lCth8p-uv=Udq7BP#oo?$Tl zj|v#c?81>j=Oh#SfPp4NW~&wWyCkv5^zzti29E@RXQ;XvziB#?rzB7pT8|Z1Z0Tp?=u(r&Mzb(nzaQYj_>5J3ONg;5>$*rtAto5lm`KsmZ1i zlE`fPp>I{JMB7*Bh%juIx?dlpg_I}A!SggqWs5El1AYpT%YO~LLcjBfDY45r;OdC3 zZ4hxdt5dkEluny{dxV?dBOWT3v1y|h-iSUk@qBU`A)tP|T+1P&4NocvIoC0YDWcgYCB{cFmHujXKyyb+r{Wt}bVz=H&o3#FR12ZJbfi>>d?J;=(qRlEIlsf#H1 zh{@Sr^50hV%(Rn2b}&2+y8lUz+S(f`C_qGn;3&aonV_x#^FZ@B;>)K%HMw83`=)V? zXvo1F7@Gv=vB4x&R%@P5Pm44o%3Ls2-|V(eohj~|lS$pXX6c)1>^3D+@Sz!FT#4++j64%>UG}G4aEcmUMkVC9#RA>w*k|2R}2s~ z9@}a>x(AD8q@7=_E}~I0NYp927V`|9{;v|skF||L!5rqJWA=*1=`*BeHW6)`id*wz zW$meE&Y+J6d4BdRU6x(Syxo|V>HVe}M7d6gyA@E(K&j<238;OsFe8{jB}_V?jxc0&Wi)8#${U$6){F)}4t2%= zX+^B6-?dmrNdHQMPfcwNV>LWmu9vL5QJ~>(*&f0~*+27v+eQU;lSO$;dC=S>A;nPe ztVAQk7P?EtqLY(7x~i!8cac{tNf9tFEc3KS=--@;V>CnzBWgFd-?$Mk0ooOQ0q-2| zV)x1unVPOIAoBZ>gl6 z?kr!Y^KyC8u0FlIBkiw8T|`$N4l*y>`Pkp6=SsaZkC-CaD=9ZnHLH3H)QU`WcW~8d z2!LK5@$k^}r~NQ5G+Al@AT>lTeT}nbY~84)si^{}V<)-^6jPzGL$hXg(Sfa|C-M_C z>{X73aDi;uo|^wT>RZNy&ktepA=JG>+`Lc$^7E?!UZ+}%=_y_$m^F{dGLa?qLDliz(QdQ$~rJKF$H+Y=&X%$dn!D3gXSyZonK0i z8e~sp3wUh)z7t8>ew36)TF&MR;HIq_>#MebVU*_n83{xF67;8fvP+DH>DKDSO8f8w z)Tm@+KP|;CU%<>=S|zr`Ztd9gAO<_n*0|Y0VZ6mbmHXQ0VyH<{l4+reE5XGjC4J0h zY@7L+VD1MsyOSlY!1-4YjL%B%XjMqlo!Sf4N$=Ly%2JoOEzJWSfvGEb7^5NZ4nA)A z;kIdWoT3X~0Cb^*e4m}oOE#rO_YV09X6R&+DQBvPKKz`uNjFLhnm1rtR4O`Ii9FKz zoQi-cwEQ>5KgU6cq5$7~#71#;B->MQI{_7rcaDUwQKi>_<#^h`pUDT44{l&YxK&W|- z=KqOD==HxG_l($qv=9g_^M8s_JR)@rON9}-n$%-WU1c~3{o)WJ+hHF_7UF|+P z$l9`bmdMKa8Tuo0c@UX3>H>VxCTUR$V;}`E6o=mJmq-oS=|IUyS+iVON>%qN_x@7p zgYB(j$;D9@&6S$^0v=?Uk><;Q`sB{GNocN5;vHPzmx;a1od8bnMmi~8o#Qz&3 z@hoQ%sDuj$O*g;&fj@%|K3aWDUW-oFT;r;q=H&WFOV0HR35%Na2us79C`DQWL0szx zZLP}Apf{cVHe6FYd{9{P1bX%2n3-J;C0 zunw8TOoCbqa?-o;5Osjm*Wa-iT6Celd9jEnlqWoKc-!r`zpEE?zu$zYMlhv7l{1(Bwbz&d=u?!=-f3@ka0MuKk=)R(!nJfq2n>bk=9<=t~$WXCj8FIIw&m$1dcd z2-IL7l`Q&Ia!W#dfFmtCM!INb$fHM%Y!owU=C(dX@THnvVPn1CMPB5&r1Rn?cJp^8#liF4=4iUw`%dN#uZImk?EvmXsGda%*k1XaL;JBq9L>#-)d~>} z6gDwZ#2)^&L5+hBNaL;H&?v#e+*Aq{?%tuf=K~Sv!il?(njf)f4)p?g&gj(B9n3 zUzd@8^mN{M+x1Fec0~N5|0&&w?lDqfZUZ4I2#mn#p)A5%L~?W?7@7c&r_~G}g2tE<*J!RA>F8E9dSdt888-b#m(V{8nwTG6m0pW~ zPV6R3`Vhf_QfwOc&g3a43cZ(o%Z={!H6L7E7TcNSRl_}x(8}fxF{iGlODQaZD78r9 z2YcOG5FqQ}k7J85Q0%{DzVdT2ig92VnWSmOMLJYpcFq}3A>qsC)RxYgGUUOOu`w+h z=Kkm1pxZVZUwe6%?BFoaC@uqu14d*B(+ye@OHvlx6{+sk=lPt}RPL*9)K_h$w@!*k z%!qeVZCgo!6dct>E(kuE{I9-(SR`JymMh$(Dyrkf^G;N%Q-lke`gZK`VO&)pJCE%sUc{%wc-_vGNqzqXN1oP z?$J=Kc05e>adCcSdeMS@T;AYENx$CnO4qB$CX?Z7t)4gMcy{_X`wNzt0{ti)lRW>& z%RXMN3d+slj5MEgUt>U8etIoy4Z4-EuRL(&_-i});7PWxrn1z?*Aaspf`ur`!pg9l z=>r_Ptt_*m5~9_~2i*)(nJ12-3}c5po~JONmTxG~C`X!$0NDy^O`X@^n4>46I{5_3 zxZ9>e`^5yykom-~L{^u&Em$GI`7xH7MAYI|SJW^#GsGS(xc;#lh0R;u@|Q{}=e@nx znCsUZPc?g|y;tA%sumAZOD07FK?+Fwr8YyHUc@Vr^|Nh7{1vj_T%z1~3qt0Sk_Qhz zh|5!kmf1+J5xJ8OcytWoV#_?L5Ue7Nc9Tgc(ovk>TRVK9(aOwoIT$)fl-~Um*oy%n z!CUah#;rs90@NE3+*&(FunOL_ry%aj^I6X!7Vd&@E}vhTSpfIhXTDOG z-I$xIuFR~_6MfCH^rTi~K6zfZSUboMNZ9a#dw&rlPqcn_qq(2A;s6{zW@XPAj;aaQ z*1Hq^q2}B!*QV~4!-lf#dZjB?QQ!us)N#^t8tYmMm8cM2T}x%3=`)b|UJ0zJR{PVu zj^=vP2;cm3b>8i1*;iY!{LzVhcWa6?%U|wJ_5?80R5!630RiTAjfIyu0a?Vjhbtxv zDIv-WYw@Y>Y9g2RmFYH8yJzY`=K+9~19ba5`Iw;wzu9c56w;D&gPH{}l5Gx=37R9f z>A|i^xt`&q-luP_6=y#2{9pZsR7)PS?Z5zeRaprc{jkJf_7}bx>MrkL+mPTDR=E4- z!ejs2Kh}X=H(^zMRcche_EdWlv%mjfdKfI?^dJAhr!RY^Bi^-c^Wit}cqY$jU5nlL z5wj$fqjV$31upX4HH+Z3{Rc9#|Mc#c+g zDSGAM#^blKn+6ks;oZ4pl^B@#qs&9EL(I+IBWrf5`E`@@N=!5vD5cAt&p=+e(eq}@-2>* zIuU%s(g)-G^vavw8wTkDHwx~sZq(F9dR4<4AthOFv@}#>SH1mn?AvX-W9IUqN8V>) z@h7WijgNt!TMR@TAoH4KYlTzkHAT9VTg6kB>P~$B%BZP4WCMQzX9lOl)AL%PhGx0O zZy*p=vcx2->CKSYdUeY3lsI|bzL6a2vjSKbPkmX<$kElUk0Us^m52}l?ZcmZO-gLH z-7mHu>a^|NdB1pQp<;R<3bg`Rz6rPj_6vUtAuPVA0#F?z0tn7C^F7UEU`~kwF{01R z1?AQ;L2Wb>rV&B8jkB|xm4}g)%Ux7_`?{bhO;eZel$8+&t{0<8hsWr$iyrc@v+}eY zwS+DWHvR5mVKN@mN#y5;pb<_}Uz%bESS$0fEp8gXT57)g0k34s0aN zxh4I|9r@REW*D(_MYj~qcxLe8S9FRsVF$C#xSwIJs|R3P!bxFZs*A9OArU1?K9MProTbH0KeSSA zGtBE;4=*{mKb8aCD={@!P%!k9lgQTcVr@C9cdOYzT6f~xSbm6ehl6erd>`t*c%wF> zKGEA=^_*|XUXUO^dza7Qe8)+3uYI{051e}*4M(6U)`E2GFfI2MWIsfUrP)vfWt`k9 zl-%Cl5m6OGbx7hW_bh4I=A!IelJj@3QLgG3;tD=Cj&N;18jO#7QQ9EYsyEmCK`y}VbNXYvZlPk;`<)|_1!rDMX?(I;a zBWpT9+=uPq7X=uvm=vNaf6PHf&wLX95mcZI2A>f}&fP~8W1P&er!h33Me)&Rap@#qIN~L= zSRZ97kIg4Oz+h>uIs(LfDLBkvZHG3DFnv^U3{m0-GXD4-Ky{3>B`hH0X! zFZEj!Spg;S1jW0#i!OXI`YEBPfV(6}0mOxvyw6Viu!k(e9y`Q?ff99+>Pwz59o(Mr zVbmJ6vUl`wDon7((s{e`gHFHoY%ap|d2wA=Ioie8t_HImOK(sbdZw-kl^lYXmW~RVS%dQHOzI#g`mLjp-FaC;5mMUDbMX5EiP&IH) z(k9bjY^d#hX%c!17)=SR42MsbHa;()CnYqReF}y`@r)u_0eIt(jKW-5r2O9=3M*bGUVeK?~8p0FEgCy|D2?SU7Lu55A>T#d?d zc=t5Y>avQ=vxv@G$xB@Yv?U1m8Nn%ub)2%o*Z>`2iu@S;cmj`u7*(Ijpb=OygwL9taB>t7fyZP_H~_BjS4h zG|)^fxaPbxuaXEsI!emRN&`xnN$ZU4SC%y^(=YvKvv=ATP7@m(XWi}GW@i`6$$y)U zWKj%vGKwB2g=l;-ctXk^xK!8hPc@}1y1Z_?zbHlN34_99U-c41y!W!3A1Xv*{(7AC zXJL~UDUSD7KRQ&MVirp+GXXr6hkQl@PI18UJcDSNOINbSWdhbHTZ__U1jmUZVS#VU z)a>AANds4Kh^s?XS2$zFo!28Fh*bz8_{7jL8mQg{x&)kX_wcAud7JsO_)a957u zsCrq{-Fu&KKJdowO;LME&P{mEC;AZ+kNDJL4wZJ){u%GFDED=eGCEh3@3spoQ1Ww? zsqvxEk*LOZ`xkoLQ1y>r&9Iqt3X(I!FMg+_%6B>M{YWEN+n0E1yO!&H(>8Bw#a)yna%VeGggd`f$ct(Yukt=uHsteYyG7Q|hqU(RUOxhufWBfAgY^*B?`92>w*qqVaC&e|&P2a!e*%k@+)igngIs(W&? zEd9{_(X~%cdcgIVyKlEGeFzA8H;+*BumshH)jVIcgb6@P!?@$HkjJ2K=7|sPGfjW5 zW8#hOOc*`II^Iv5HzPVnyQ+cB$FS1KRXUD8X{QGb{l*G2z-Jf3z7FNKQ^z0}E0G!2 z{YBK;@U>sobln^khWj*^z_$wEG?u{g z{iBeHR+}8fa*W68(Ak%*98ecX6*Cl(1}gbgCuq2(nkWC~*J5+YaQY9BsXOrFxdQH` z|C9I1^ya;C_xVr@4*SRTI!)-Y@F)ntjsMIXQ4$*@9}_ehoWOB{Cx@>#9d*~f0X0t| zIt&9*-EzY(|M=Dp0hn0oi~g50*x`J|29QHXV3RjuPxi)DR-mmOmqE3}pM_(qM+u(n z76x4Ptwc3PMY_7sM8#@5%?`5xn$5BeKr zbGL&q(!Wsv0l=zHAM7Iq7spKh4BV#^;ARQgYPhQb^wk;`LI-C2r>*g0_$gD2 zSEcddSTN&n8Rw3*>>rq`fPm)JHM-x-_GwWG@DfXyqNHa(ql4x)OgLOuQ7t2aXzr&4 z>3xIO94r1&I7G0|;Xn)p2nZ%(bcYkQ`1kCp)#VvRBC(4H$iZo2g2Q}^GipcX*sHIiFd#$HbI4vdnJFk`0@>J&%8G;H{u}3~C;$8z2xUr|aV^b5W36UbG}`E`6bfjKg$*Ub>V{GPKg` zzc{$$aOZsJu}2Yn0A!Kv<)V5=^8X0xG3RyRSpcjLcnep-AwLm~+UVS4o*If}Iuh>s zI?RQbANWP(PJFPiYO`X2h5%!MB}T{yUoky1NEY>F!49?yfL;MV7b+icI#vvO=ctlI{It=3y@~zHs+wt(2s@iVR zj9Lk{kyOeGQRSL>%AlEz20H%iM@;%dEo(j3J=>yFAN(`7riPU&qweVW=h0qICi?ZM zMXwXVDYKyW8P#pnKh|YY#zj5tF36Py0=^iDm0F!cLnk4=O}f8z*S9}Hf=6!-Cxw0K z;S?CoEUO8SMr#1^W-PaI|E=q7(Zh;Qnu$m< znl`(#aSZvat;1b@*#q*v@t2zbR{zbpPEvJsD)xd;i4`;3p5ok|qAq9e@v!PU7it3r z@9i^1E>bF+^0*{vzq5VL^p3yy(WEGeG8W2YX0XM+t@W6-%p{u|GX(CZ+$qG2vIk*@`s-EipahR7D&#y1U-T;iu}&%3i5Wts4*QwPo~%(7nle z?i4AnojSMJHr^h#L|-Px60NP8{gQp(j}4^KUYG>?XJ3faHNcJF`I+16=tbEvq}%#l zTxs8JHtFx1>DqYX2t3HvYSeB+aFD(0{pr#s$7>NCYsrsF+&_gcRKzR{-M?3c@RkqO zI(S)axG+I|ACBKIg*E)>cLhT;%VyZ;(UHa7gw5Z2i6Ik?HrLhRpu$CY9*?(^;uaMN zU$^8yyNLDsJT@M-DPwQ!zqYfvTzQm(D0u9hnEDqVcZVKJMxD)q^<8E};^&g^2UB!E z=WSKq_UKdnBjBFBY^7^EVGy@e*6Lo=crV~o{Y&jb(~han7k()-y6w;V9{jNFUuf0Z z7rDT?A5GyQ@>BNXV{fYWUS|JD;Iv)z?{$io)>h!Oze&?$n4%ql@r|#KKmRjo5}j^) zi;Baa#q0(a717zf$3cXEGP=QG&qqnJo6)$IlH1j5!|;>Fx=I(BmXt(?mH5e-W}Kau zm8hw+yB*iVuLi3U3&8AOi)Z{k))Um8lv~BwC+73P#Lp#S>{V<5AZ=OFo-1`j`Lk4% z>+}6CP+$YbU{@X2m`~Dl| ztL6E)fN3YD9$uYJyFLem0Kq_`MspFN=rbSE(}(Kmx}#$^$&Q?_9wU|p629duD=WqS z?X~|OwBtIGNuV+!uoEq6)iEK;L(TAerMZH*U@qVZGZ9@LR6g)bw7Qyw+dqLVf1QzGX)_3z724f(ENaZZiD&Tjuwki1XZ6Z;j5^DqM=y* zj-$qj%SJ}TN;^zuHgS8Lda16B2G9hS6vf*oINL2g`d%|r9z9B{^z^KWa)NHEPzFp&vuHoEFM@oePf&n5&N z`p>g6*xy0U)8{(}_>F`--Ydz|I((X&)}Smfvi+3WX)%hW!Bz1t*bYPB&cu&k1_Th$ zf^fnn|3GxIrs64&>8$!wb)XyuQ z^gYfl6}DNd1zL6yy4ago@*oa#z!4gJ&I4GW@Q*6@sLNF^mXt|zF zb+!D9rEmXS91o(}(p#~{T!F0{8iQt5Dm;GKdN2zyUJwX@`dr30{R7 z)yIZRx$XgRGk?+L$!0ASv-7cR!sEp~^Q}ae5qc9(HOWCv(&MdBE8$YgA84>--FL-Z z;|hD@Vw!IJQxr(|XL+y3^j@1r-5_2>>J}0Ajpr0cbh^yXgZ^3|xqy_^*CT)*7bufB zR|(J-OAM?u%l{Vx1RzVzGZKIH1E46Y@3GN=Oc;n056bJu2!jd4A%5dGI!rSC5$U_X zhSGy$gUc%y+hyd2ZC=#pZF{Irr4nd?AT!Wc#KZD3Er$rb(3WXVGSve3gzfL^)O_Z| zGX12Udv+?bw580zuo3ZAr6-#=8eq@W1TMPV3R#cj7X|2SZW-rRL_m=wquxYL(aK7d zuvDxBx5J`e86_?vbDR_6L%%Xjo1zMycl6Z}qgmq|Z1n}BQJH-wDNhxX3e)rHxlsI^o%Cb-#$)a?YjCqQ7M za>`QC)4_a2-I3l&Aurk^BphGly26+ewW8TtD3X;Tlm`*g(D)Sut#zUU6ttYs4KjW` z@=PTCE*BvvLo&?kgj z`p(DeP|kN7_Tu%J^K!uY=lAc5=3F&E5PWYWiqY4%ktn z&QGNs8DwA1{KHbmX=C0Vxs?@*kj8CXTFZSi_PwvYvn;s@l$0iEM&7uWvi}g4X_xbjlt-`w7Zz7=^seCT}OK9!Z4zH{Uy=l7Pg|o!&86e)m9AfSpKDXS;#ax3UIiPI6nM) zq}-UM_=DWPtL&YMZd6XEZRHNi1#8gDVv1zId{LsW4B|OS#Y{<&!h7^}hj%L;^)}L| za~HxpOD70GWhuP=_!-)IyZh`fJkqfOBQW%~P`H5vI2E}F+F=9MR~6aMcjip?`i^!9 zGCcki5EpM$hGn|}2kg|!ZaGpqS$V)Y=i3+TT&D z$AvFly6c#$t>gg)1)yT2!%4*68{g#wKJSy;-Ky+$+UVvsI*OBt4k5d)7{k7^F23U_ z*X|>SN3H((8&cJ$m=lT%at>HPV3+QU5~Js(7O!%@Wrq3$Wq%pai3nl3eVr(k@MqHV z5C8%(&)EFC-|24L zk_`ow!9zAhhjp3?UlDd-I3B|{+0}YyHH!`+$M+kX*9ZU0*8iID6D36THF>4o5q|sz zw^@<{4y>!85?FE6zm8_Ob^AWUy(cu}WRXFIe=s zq0jUGO9j{hi++IB=c?91d0%d?wAEg-@x#6Aw_$`CZCE|(ba@PgIuc@Vl&87-U{oY* zppec$8hyG*!L)TVIyz9J$)VOaSi>|7nI{!C1zz-P(hbUlzZVE@s@CPC9}MnVzJa$l zvyBs&vs<4Wk-ADK_^wHVFw!5{H&tZBM`mfEIh?u@B(G~Y!G+sY^p4_YE_&0s6QWjd zqPehDzLAj|3fhglrkVeDA={_lA*;MaQx(OM4A_7{7S#84=1<#*gSiaXB^;B7)rV{A z~>JzCaW zU6VK&=Jx;G2dWIQ1|HSvsQeFI%gb1g%8bYvT~#i}5>H{FS2@nHtAv^%&XGvFaR_B-pOse<1n< z3HZ@1Q~iqL52$6}=wc{H1Keo46Av7#R~>(DM_;7=>G`9PB&ylzQ*K)ss@P=WWfa7( zFS?%7ZP!9AQ4oo6z4@`fG)O>b71VBk~+xv5S1R|w2e8Z zi^^(cC7A3ADUg5H)tR(Xwf}^Gq(vDS-fJ5g?S&Ss!-vQu4E&og!h4L{xH!3<&pRr1 zT8fxjQ=H}IR*gF<>S9Gr?Wl9-ZG@k7&Pt%dWfv46QHP8FdJLD(oCe43xap^rL1(;u z;PNCHbUIQYpgg8%H}1w#@_9RIn*Qp-@_dm;L%>hBDw9p4o=k)ez?lwM%t4? z0LPT!v#+zx36yJ!giw?V=oWtF=(KBwJOrF^ar1O=oP&ee4+1rEHH}Zbm3(GwdQ+2+3We7 zucnc?+?eMUd##g@c1nf`f|B+`S_da1ixmbzBwJiqPN^Om6eWNE=p>vVw$+cnRXRx6 zB#kFM48=jSb{0eY$|*kny74&b?qsk@hKHa+4yd-4x7?e;tUg7_i=KT+WS05C0_*;I z9(u0Jgs1eH>etc~)2^aARo&ienMy#;D#%+~7v-bNf>lPlge*3o-u&`#r5}6k1{o9e z;6}74ni#q9@`{hk;qqC4R(h);`0zJAZ1n?>g|XpQp?*s^$gt3)HQn`;eM58%#oYxG z2%rniX+Il6be|2bigPb88g{o~s=1tfaJq?E4Xuc=58Id_p`$A_l{;AN3}c@`NM1Aj zvO~!y@>Vq8OAGoKn4Qh3$1^5h7jpw)Jkf4CZ5i!u&FpiN9?ry9CVU0L$Lo*jW$R@s znw=B|s9pWFv&P9nq#RpS#SL2(V2d@%qU2}Xb z`W00JpRNB6Dd5uk(TErxDT3*Udk|N7-KzRz^GRxWN_b|4qN{t#-~IBT-@$L0cLs*f zx>O5+!3){mVas~DkoUM^*@WPGd_)U7|V&%;d&Z{F(>=EXRX19 zxU6Vr(-Jxw<@pefdSnP@=x!Rt{9KS=NZ2Dzgb$srgVn3e5v(_%gXtdT){?Y1{adP> zml;4`=%&0@)OWf!A!|6zyK=%Wxx!)|FaI+zM`s$S{05*khRGOSMxp%~ifTJMPVwjE zS!=5j!#^KKxrbvLphX6Hw1;wEnM%Ly*LjL#88|{q>6|Co9_}@H9W^+DW&YT`@=<<% zzbytf`~wByZ5|M~E^!ir-wpp;jWnL88~m_g0quTIAq+=Bn?g_NLy5JnQf`!u6o0XV zD-nnw3X@_Mv}XkTe1P=0ARa(LMUMCFVqVV$K*XkdDWqa{2_Pm{qCy*M&wvtNhdc_> z(5r@)TAYZc%Km>xF~2M=g$-LhdHfS8H;HB>aZkZ32}ikO{LRXa)sS(F3c=CL% z+bun%C{Gh*GEG_;n%_KlXj8VejRBAxF$*$ht)nIwgri*4AvJ_f` zaB~V0wbRuRr=|2KvwsN2646E8BEH%jC4T94Dy6l)D?gUMwyAUYbDpN4VX&0c>mlEh z0JN_sb5FyIaTNEv{6~XMvzXG6cp-|H;vjO2+0ql74_hQ~P%n`Rfk_&A{j+fXGR)nh0c?%Ma$BTeBQ*$(S?K79>PO@iw@_W! zd0b_fd=N6oaDg-+?2)@c`ioO*G+jfPE18T8j8Ne(F|?qRGSM@*iyeY;Dyx!}`~8t! zN`K|E!NHYeV3i=zYr%96+pH*HtPP#6M=)>{{Vxy5b z^%b1OJ|hi*APmj`XGbL5T+9aut(TC6atU*sCyB~6ay*oG$KjRo_T)Smiit`{ic2Z* z?rx*x9eZF%kp`Uy*TS5Iaqx}}sp~DE-QTD#8~JYUI~tV|Zmyk5{~`gx!&u|~XI5XV zi_&3|((fYdzlhtx21va?_P#toi0>E?R(9>^j4;CTfO2RE7$fDW3ABw#?&0G+vs;v5 z{1A>6C;u!;C`Ke92K{$z{lWHw42$2?mA>px;^|b<1hqE+8zmpk<}}r3<$=!LGdU|% z?Xkh?Yn3zY+Hr=Hq3Zopsu``MmNk*EPqkvNuuSVXcNpHrw@)5 z7u#XQ5%S#0{9N5{2Ff_oJ3O*q{j!qY+*8I9;l%aL8-gho6cT?q9dL)1w&%~8quxt1 z_XU})PK1+b-k-I<0=wty+<$oiMs*jb?Dmnh#v4BbiyLpX9*GV&TL}C*cb%0{wy5b= zha1QOs+ll;La7E&2z z-`PkSaF>43KE?Hw^X6c&q(wvk(4s(P^BH=a*!X`n@j>dDDpcAT`h2y#F7henuTzc zeme+bE~CcVTVl0h88^LX z4v6|hxCfl|7ze(lh9AymW*WYhMk=H3`HqCm5nVgY&9F0*x5*-?FP__6d zSrUv`tUB2;Ji`y@R0V^*5E48Ls^B*#9(YF{dDMrxMpLXjS|I>oCh@IMfQHJKW zA_ON1Rcs_2Ip|vz=CmYuGY-lf#{l&BL`ZIM@wdOdq!Ln=$`QK;;8J)B5UWNL)+s@C zle9R0Hu57aT&e)O1(4A#^(5E&5qSw`xljoeS|w&448_pirG@IGqnM~QA++$%*=ZQ# zo(^zB8UpNDTvy>1S%{#E+}-qqo$zIE*+B5L`3IC}xskzgZ8Z&zZ<|DY>ByI#DREGZlZx4C}Ws=O<=cc5R$wjd+-!I@r8n?1Ls8z zQB}1BDq#ES;YvQnww8D&f(%%8EDnWDjk&F&q%t@oA0$2)r#RyW{_B3)*7mQ)qO(BS zM9t|URX2s_{$d8m;2c-3zw6Jvwo^m|yaypRh&OVOQ9kJ!y`M*|$L@EXz55j%jLOr2 z%4<{t16jgjM2iRjOR8S4rv5%)g)(ncOIk|4FwF z=iO3|XJG6CLG*$ik<(tITvO2s$H~*y#G`hRW^LjT?^9yF-=&|R(kEvP`AWQ4F<9iG zqoEmIk%@e-If*2`!U7Ya7^-xUx|1Vq33Jrh!VwOq-oDKH)KLsp9gdw?NST>Q9qU!- zhnppIRK?SbK&u~(Z>`{$AnQmZgmBLikLzBqR7~sT*QcQJxZ&9mR2hgk&!#i#5t_ z$)LZf=@(z{W3-+UM=o8ZB8Kt%J5*@_?T&7otLX$f^bC&ZAs+U7qAM}7?fbJjc*@P? zN@8W^g(ts2=tKWQiK_7Ae2PtuvrXjeWn1AhCrk2&+UdmpivAAQEY;y9oNw5Gqn2AJ zb=ygDKSue2YLrVC+WY<1_)8>URkf?-?zH3b)>v|%$ka-19qBFxBsLf!$jhIDPq&lQ zq_s}p5;({y=c9w4;*BBL8?z+t_GWYO7dDY8D2=b|y{1omqoCkkN5|6M#41&Tht^z< z+>|Lelc~LM%GO*8a*+s*Ep%nQ(%WGXM%nyxb$mU(PDU9W^_hH(e3m_WnW|>4F$)1L zoD%Pl-WFDIt+bf<$8WQE?>-fWQgPMtd`PKSdD=X2M(}Y&b8(bMlM$0|CDUONfAA*e z5u(tb6foS)*K|An#TO6dVb)hpT}Ac?`s% zn8jh!F+QsCS5rKqRYX)oBqs<{eaJ;_yZTuSk6OSvad9}*_I*hi@r3A>`YfRxqj3d| z*!HWjH#xq0IHIahZ>=@y`JwA2V-NHB)F{8}>8`3U;9=NEHbp1z@NmEJqjlzRYqjkj z=loT*W%jW1y7ueL1zOww+o0d{6HVMtP)qoPGYOAoI%EFe3x3di26yW4uX`#`jvI)x4^LbB)}F>m%InJkenXfn`Za< z3$JueSkk9sj?b^&2vx;21(i3JJO%@3@$~?0{8u+lKrw^wWzE7$cOH4cJaq z@?U%xf7}G^hztPa@DO2w3s!kCpCdM`uj>uVVxj?RV1djwWu!pSNcOu6c!<0k;;Kx* zyLiS~tk3H;Pp99uA$-wJSTP#diDvwEOm6+xG4=%sGe=wn-dS01+kB;33J_Xz!q9z7 z`3IqGTYovJWy=nD66!^$it0=6VzRd07OS$cjsWj zIr-BfI*zYOii*w=)ONVrByp#Nh!axSezSXjKr@5BFaxp^T@9rS)s@9e#ko~s*_)I6GcN=kmgx?qps&zR zUz4i_k>g-Z)&4nVkNE3z;Y#D(w`3z{Hb&o55^GkSQTuj^UpniD>jWo^R=W8<+*Kp~ z;ASfi{DO{VW2_C=Lwrl30+&MwL?)PNC@ojwx0VgP_}myh6_6p21tQYvCVJHWr6x$u zx^f>!07m4Jd|N@lCKHDAcMzf<=@`tBXBPg}9gS`MLvRD#PNCU{{y&r7$+l&D@Sh-! zVEkA+4Cy=`I5yd%Gc*rk-nxrSzYx+=19o#InyOG&Of<}V6{l6iVTcV}=o_=-KVP(N z1p9t|hmS*HSjN&~5hU=@7_TSV+vP3&vsI8cV7HCfL&MJy^3}zXd*%7;X7YG3_;Pal z`{l6Fy}q|*i@!~^A406gHYuF(r+p$7;o=aW6^ve&_#V@^MM9)#?C^*Kg_MK`0uu{A z)Q?;a-H0S{#m8UJ5Ntr=@fIb6BXq3qnxMChtv#bx80C1*j^y#vy(~Fnx}EWFtPMb> z7WS5XH0$zm2XE%9iB8cvy|CPDfjnM*bLEo(xbU1xbvJ1ymXx_?X8dz^E2UEd>$~*}<;pU-L060+Lofavj zhG%Qil5XoR+rxXKbdkxjnmIX42T_&h!Me*NRwMbUE{+6HdGyfaL@Ik1%hm(noynBc z9x{!mR?R;I6%nFhKTf8h%h`73FArIj62=$fgZ_D(G6ke*NMQ70wxBu0%utak+kHQM zs|>(=)4_H4+nbIn1#X(qa~FWBT?2un0edCdo}5>v+ZQ-Ts%BGtGLtyp z7KazUr(F+!LATGx1@~291dj)EQxk66(L#yBpyi(d{I{Rnb0M&nO=6Xg`h+1hFWX9X zoaRbJqHp@hb3V=BIsSn-m$Wb2OMc|Kf43s8gc^U6o`o@ADIl(7Xf9ES*N?ot|I5=D z-|O{~@{>F_!#0lP`>gmD5G~M%CTJ=_!^TBI#wHle-s{{400$JRcX=l4JgthdKaUs* zW&;ZmLBrsMK7?A;= zmam9~j>W(wo8?(#y2>O>Py6IDMY@5gF=@F3P+7HR*ShT!;fgK zxct)PCGq^Mu>O6q9MIj~#iK`=np7?Ogn>9P#JV^0G^2Y?wN`e~nI1 z(;s+4`IDnnrLs1gN|;XI7$?{l{q``I-+?YhVq>w(mmXJ**WfrCpROZW9b!1x0U+mdt z3KcUV`Kd-4?=WVsj3_TOHQSfthUCe!ItJ{5Vvf+rWF^Tu}o4?*3BpmNMHbi+oi7 zr)gPoUCi|cF&v$QTUJ47#p!9X1{p@jf+ z)sxC`z!QoTh=?3+{)E4jF0S8TpwL5qi*>tRo%S5@fH`$^wc=XHrOxv$`%L5!mbNB( z04ZAD&?(A#dES#Yx>TI6dgk3W$ewxBlo7=&UpW8GcoeG4wX7~IUbxDyz6V4ZU?`q$!1p6gg0 zfO>i`e#zTyNVCRzAVk;&G}0pA4bxuoC4D zn>;T}b)#@KNSj0B2CDP*RB=pmP%NPeWlzh?7hK-WpGqbL8D`rw277$rvgCR5_tPlk zn`)yY$8VhJtGE(cq-q!I^dp9apB+*4P6rPq4X+07N&WjF0+9^E_Im1T$T?QTogm|B z?iLQl{?F`TV~=Ek#$fhFRSW1sfM#a6&{_iESSfw-CsvsSW8&*V4lgjmeKbNkPa`4^ zzV`yHpK8DB)pL)4`B-Ar)V84&3CRSK`dWgY+#8VBJe#SkwxcdjCkoBDm`uR(_}&|8 zr6Xhcm6_*7)Nqv%=5(J=Qwd!(L39=(7Qp;f^aW{%m-~BSm$|z4PdF}YjJ(ky z6^Pm6Mf$}~7&>Af@G^^Qvk6Sw!+5U#BK(@>!OY8<1`ddO*6V+F1zsH>Tjb`JjKCR?h zKmuwyED!(OIc1%9wGZ;rCy;^oC`y>ddcB54jp*Q@1*8R{8!A^cc@&uz_>|GIVuryQ z{uVCBJtb&>0){RFg6kjB#iSl*0}g$``1GUcBC^~sg&6LXb(Nmf)SY|_K=tC1b77=I zsuYCOImmFa;fWSY42+?1-^LBTyvY1&Fz99~D1{XNeCmh8=&DY6YPDxxE3Fh;F%BL}rfCC`FA~ zN-Ym9V{$Ds+1<064?C9Ndg0wyH*dm9y~{7LICqIL5CI;R7b?_Hk5{qx0M?F=&keVS zm4Lc&5#oicD~t5vgIDJboE%Y{|6Z6R&N6|6uhQ`zJe~CU%@N=^Zvb#)sUK+~Wrj6k zB^CgU5t=s9R~xQG^iB|znpb|h@Eo{~IQv9yRq0afH693xM|7|uFT|YdrOi_F-)}Wb zrGvi&fHnjjxLqRTS^A>`^1UC&F?lBP;jX@6{Gh{_jgY9opM+23vSvRlq+1wPX45g> zeAx8&9c!>b&i~HZ?8I zgc#Z^i3mO-jH0k~cQis0qZknEZ=QAHRClqeX-z!#gSp@L2b@skKPAmvHi^7^zeHgF ztaqSi?VN-U`2Y*%Ni6<--u|d!mfGy~#*4*-Op;91^3oKRZW^`IkD}t22<7Q`7vL^^ zZEE5Dnm2&(xR@H4`sm-JPV!NBbj|`ReQ`c>_oHH5-3st#x7(GZ3SSb{O{~M6n~f0W z_nRu2W+i@pGl9Mp+`miaHV{&T?@3JmAdWJT94VUFayi8)_DhLB5$$n*w$)kFV=`#p zVee`@e=!N+m(kll#VMNNW*Hj(!ENxyyOLO8ps&ZHo-RL~mZkPgu}=Mj!h)~CM`NE$ zPF`3P$br%7ZTkYls_m*AI)GHnzbanNTzKvz9yN370dUj~d`4j=W}XYGI)KL7=28Xt z7-AX^L~l8!l%JlB-b}4NcuYNSqR%+dDUK)=4Swg}F4tltL@1$$XA+2ncmc*iEXoMa z**|adc${;Q#07%=w6em}SP9##J^F09@Uex?GTb+m$$a|w{yuc{1iSiNsirqO@ps%t zjTf|(#k0-A?4l`dB}p--obV9Qj|J$;ciJ|&5a^A0*YohxQ~kR%*Fg_Y!`tc{%d{}d%x4X~=aGYK zrs>{SEw)N5dQtS^y$y@`lC$Tt<>1U#-o@l2_hkR(F-z5PFu>VW*za@gxFP~GJ|!=8 z4(C7y$Ov}p)|&nsJob=+_E?(@g%^xr{a^&Ur+H;tkCw4PBHnUY2|Rj@?U785e@7;L zpnJAp)yPScMUum-ar-vyLN#iT&NIJ+b$fD20Qoy0s4g<9D7n7?b$DLgcg>%Fw=RlQ zEP{NC@oWHH&HgQ8NS6uck5uaY=GnhoBEYlm|2>%huO^??%>HLTe(~)VuMc*xr?S4- zA$qH%`*7W6vL4MexBqRS_PyYc)q4*aM2P&VzTl6a8u(a$4&Rf~68+uy$A{ypuZNxF zzBnAVE93&{{P4?Zs2e)s2Kz#PZOMFSl9(igokq+faDyT3nfzi&=dVFQpJt z$m!t8=*(|-9+pQ1=a4x6pr@cMCk@_ckHt--k{^@JMs6`dLBF*U)4ZuRoyWvgDmxr< zFZnG#Bp#p$7(2YbaIxHxStB7eE$6#Kw^pUA7Tn52Ebm#EUU4Wm3W*QyykJ`iXl9W$ zuK8EUQZzK8CZI0T%f*=^MIDAhWu?zi{T?x*q)3FC?KjyE+SoZPx~d0k3iBWy78sz_ z(@lcF_#~Dgl?uJvG^R%dRJ-Q!`h8^jh*E0lD|Kd|GOrqxo zIA7U88scCO1g`xI{8sfH`mLjBE^_ALG#(i{By_cpq=@up3`0k75)C5O2N}dmA{>bV z)O<+p4?Qi*%?>G3<$IU(I$@1E_5xagQE;1cW1oX*K_J1LQqVdO@&1H0+WVeQsbAm! z=EN~yN*|Gu3qU(xG){rnhu__XIYAM&(EvTZh<(FAdT+8)5nep3>cUpR&)GBB3+R2e zNP{139Q%YvfZ1EJ-ql@B4Q)^bp|Z@oY`MMl+P;OYAq(%D7=O_fTR%a?=X;aa{zbf+ z0COT&W#EMq3w-}(^Y0V;{dbo~0SMK8J_Ka1Wvht1A3BjK9uKe!{Q_YF92}I^!2DcP zC9Hi2!d;mMY?s{N%4TkmR-vR;47|e*N2>Py(s!PT^0j7!m>}2XG%;c&sD-IO-qdm;*!bx!QH{3m`#r}tMUWRv3TO1ry^Nt4T z4}!mQe2CzuT;Q-aRK|ho3orA_A?|dLvFTSdb(DmI5BH`Np`cr|1H#R_PaGXlbyQa| z2C~g|`Ec9j(sNUhU)cFdY=Cv5z&vi)7#`o!jKdX;z0YB*)JG8rT&gmiGoOxZP{4>^ zDQatbp!$>H(vlpn)^Xw0wfnxD9N7Kv8+1iLtS+^Bvz>rasGy5^=EErgo2MNA2 z2$_5@(6D1Y!nnXorkTT>=xr)g|NEcon!~~19_hX7zKZ% z#|7{TrY1}}9uA}uUa_>cUM$~n0(J4(#(g7KPbIlNu=EeKxfF5AI8o_)S^BK?ODKts$7g>oY5*_}i_s(j?%5?h;o88)LKP3Lj z+3L?g_e2mrb(mF6a!u3*4xp$MQDV-zKCkj;H@JU99eZN=33=l_@PIG+QDL)2V^FjJF&lmP za@^~DMH&Gz{_5zk_a{blOr%0mDy~98<-0af(hDm6zW>F;PmoM#Q7$i}e^O`S5nW>5 z8UqA0rAN7ba9IxZ1^kb!BV~f@M1GHB0ncxec!ai1D+1?9zB`Y{IIJoMh%(Lf z|CblQ=#?Y4m3zjiZ;W=&lphgsXZzrEhL`QOLbwquIv$Wkul{nIT`}z5bHKdR%NFWY zuu>;p6V?BDAoS~~OF6f@qWd;4^}}OB9m1>sT6(Z^4rP;|niqC^1nM@lxunV6ba_R9 zuVF+_r;qXbAv{`O%?IS2BfdE#g|^4|c9vTO?U6W-TYyMTnRCAX>P>yr)xS8quF%Lh z$lsGJK%ICpemr+!8D|wK4kYdRj=pUApqrh}Etzwsm)eC5O%&~a=@IzdiNwe|YhJmu zEI!6iquR86Vd;?@#<1l{6Se$wbj#(VBL4zJoF#( zkKp6mIHd#rxtec~m78L`7r=#4aEzGTgsbAh-~^!Fa3A_)gG}kEwaQ3eXDH;XfPDQJ zrjssKoxX>?6vJ#~qkyMgETDrXd2x!IBHeFh7ar<$u-19)^A)1_2V??AM|XGygsr+2-9 zSuw13dlwJBvBk%9jhV^|nS{f+6p1|xGLx?Uh5;El{~oXMMyhAGt`Gx)e}3;rksS-p zb8T;{T87%}MLVZd95qgfOMf^DljQfU3h!lsN@R>Uu|r-H%*R>~XmuB8gyZWMcq-or z;lVC7g?>C0OK)=_TBw*H`NBnMPJ^o-Po1+^;~(z@?Ii^p%#(uc7VrBRqh|&0yKRP= z4d8vR6kGuQFHc0>!%ReK6nnwgM((Xn8&aY^2k{KR{5esI{GVb|-Lv#aTCuQw12j#15EO`&!oY{H7~s-68Yl93)x^B>iu}Z7?oqj6;!#Y8 zIqFz0&4y`QJ6~S=!o1Klvx3epgqc5*yG~Yf_ysqkVyJ^liTNti1<0!x^CJSrZaC^+ z|Hk)6U-W6ODAX%=k>5!lDD4b-vl(L?h3V2K5G(G=n~0V#a!Xgu-6UTCnu-=C%CPgQ~xduJ0tVRbhWr|K9A751604`3N7MV5&a zDaHD=Mxg4fMrVA`>$Yzm9*15WSio;y>~U-hXkC3`xewJFyc&QNzyG6+^vzYo;wlMl z+feu`1@YnQJp8AzmPJsA1=-f~AN!hxl=(9K4&~a1AgX_jFpx1P^YkoG94D<~jQNzq zFI*?lH~q3-x*5Jt{xDn@U8*<=e**WN-hFd^$QwUX_q`{Utp$--YGZ{sKlTwI>UI`>{g{u+%;yNlRu{*HZU86p>h}>DtJFef1(f z8<4f1-Ol7lzFV%eYhg8W<^7`sMYwU+KoSYS<|48kz8;fmWAJ_c&A2S~;|3pTgYdz} zRtAo(rM9+sRZ-asE&QoT8(y`e$)F`7*hz7A855vQy8^sBw1kQ7P;mTyZMB59w6xnF z4)Vh|jJxu_mNTrZe+hX^JiE{e2kC$9;!-5C_aYx>o<;R$VkYRKf&^L8HHW90WlE)^ zbO_a7RZ!&h(HzohgRh-r*4tppAoXMGV|V9T-YyT2;+ZOpR+1u()?sQ}FFQ zweaS=eC(kL;XXwz4-|hHzFF%`cxE~hCn3>=)zl38tXighFS3n7lJonIP#duP72WyI z3VM`ZY7a>c=ejymYv<@THUx(kn4a^C zMWI8^6$!^UqCYHGn;sAwI3W-^cwa6 zMXB)So{G4*&k-I&&|$$gK@$1LzOETtSbLAoq|N~;VOMhbD+qmu>e(OXmU(ha84Sf> zgaa)6xPV8DpmsmD<~1ydz7tkQ6--v)|0&NR=G>=YAm~`Xpm5x5=Zj(M1?$UuG8tTz30ArFX_eF)SF z19$3iQ%KjWwd2)yV_THuxGtsd5fi>zxTDPgi(Am1|HIZUHVFLtead-nLW8}!BDVeb;tpG&9oLID?d`>@tm|amSRH8p)2E~&t(lcIW z->xdqFOlvumFp|DU)`(opTsAe;fE^Q^77r>UK>IU=Vevz(ars62EO` zAo~>`!(nQ!;JCc_jwBOBry}i*e`4l;UUwwWJABUkDDeT2EXI6ZoAkPxD$g1HvXtw> zELk2t_>m%8vW8w0UH~tkveM_>Aq1u(C4qS%`O`H$8Q#+}nG}M;k-N{C%{gtf4icWv z4yu2Ivnz%NQ3{16bN~dR!u3=7!6xs$qI{F5sBZwUIOY!oZ3;e5($S>xl0j!&r5NFU zH1xZ#{S5aB241wSISRCMLVto97|-@UMLw%QkB=iU+*nUrlCh|0GWd6`O>|YrX)jWW zII?FR7XdOO+F=4NFR#MXfWNxIK5L-cp*-ISF(NFRz=m9L(_O>CJ5t^dZ)DB}1J*>I zOr5b)etNy05q!7H+XW`pjPmbK%1}I-Uw&N$$Ltr;#Iv^$X#_{A)HJKKP|X5x`OH;= zZ=O%NwAXt)HY(A%{Q?STuc1w}1nsmv>ii}$^}aWNAwDlPZziH>2(zK?$;`|D#S%S-Lb&$&Y%nk`+;S4#d^BDHF@1{A40QQv{1F&g@Y@AM6| z4>hAQkPP-Ovb3w=t|;ee|Oz$ zZW)0`vTJ=STYTI#awgH!9)m&wFs8C0L zYnG?IjfdC_4cHWzS?5|NfQ+6GO9SxQ!#lezpL<5iRQXsW8Ar>+0iT5WR0 z2*@_QGkLvlZRuwtcFeKiNRb!#+^02g85!(3A7&&HYU{{*owc2|H+&J{`S^ypx4*6s z%OQHQf4}llqCTFcmBm|OOy`~m_FjIy=u$#Tgn^xwXC@r)R%WL`H{&A`Hr^X7Zb>%J z3R7w@*ofEGKITZn;akT7+6-qXrC@$Az2_@92-J6vGyrvsAP^(g*I41 z-)k@sXW(51s|E8?AFj z8bxbxJFq;vYc<{&-z(o*{MeYbsXd}e$43<`%=%vVL#CD!RtWI9=ALiorpcqs7^8=@@5CBlk6*zO2E-$%P4d7?!5NxA@e=R&PAX#mJ2a`)m9J zasD+o)tUXg#VBt{Be2s|r$V-7jYuApsg~W(&WHb~P++;k^^C;eJN0gSJdYXPXd1Z* z%Gi`0^Br7!nL&Yj7uZwi1a7XR_hc`~RbW3>d^DR`H~sttvZy>I@UEP~IpRQP>^d&k zdVhUq#l_*Jk|-ZYS{p>&fU3hstHT$Wf>Nh*t&0a8E>r`5=1{|<$??mpa}gPoWVS(a zHfbRvKSiD%y{{UP5BYI zDZxYfvCv=yqS}?E$hbM1u*K(${*g41oxhx{_gq8MFL^D8zJFT>P>$EKgs!;KI@e-f zYqbaKW{v&xJwH5or^^+VViF#*Jo3b}F@ajlZh{12+Bu2R#0=zEtVQrJM0W34sOwl%nU0!b65*C{=5jv`Wx=TJc_BIJ}*3MsRhE$Eq zcjBebp}-v{t`J~kRT~fjQEI8PUu149G7|5{hsKzvx^HZ*e@r$8kzkz2xdaXhK-)O=eOfnPon95|r7zaB+?{2WWFe5?zR z15A{Ley53UP)q*W-weqiFi#w@YVJ$$%0W~%IR5H@?_lvxppocFWu5k^qEyhNUzHqC zKuoVG(8&%TY2w2=b{yvD{)ag@yWWT!gs=saCcWr0Gi^?bQJOLxe+o()+ydn)Vw)86 zy4BG>m1t|`Q)*UVFdLWc`m`IBrwkUi%UubpHdh3MZPkc5oP0|4X8_21f;yapZjL1U z;yODa=)T@s0_1Q2Rn0;bx{kK3U5-2NX^uB3_B3xw%6JYK;GJAW<-ZW~xdOiSeK47e zm!IwD!MEgERPDf1RHG{&AQ~C*ULBe87NNxZrZU(En*s8GG7+ycKLO>ej`?t5wOn8k z!U@8>ICr;-U*6j0YZ0ZdIpV+3*HWl2ChpwhG1cyGcq(Xq9Rl`%(!J1-wCmS!tQI=F zz)5)xFl#)9$+SBcl4R!{HHm%l%&@Z7zy$N)8XZO87?-wlW!3vB{~Y_(IIjnlV#<>O z>BL%|H8*y%UJp+3vJkYTNP2PlC&*EHUTFzYsuxjoUvR%IdMMYk#+4DEg#a0eGBZ9< zhMzbJ86zym90ki@9X16kC*IV*8|%#pyRjjRcoREHoBL7oIPi&Rio_`G`WqoLJ8PgL zi5RnngR-6*12kG%zyA}Z87tMxYeVq7txOg=m2X4U4y;Trv5W}Ot>W8|O%VH=4af$I zzq}=NL$9%X`KAxpcmY9Fi{buzS{lm^vdqSqQt0_ovf|p)u7AqvW4efJLg3TY!l5SD zx(-4?OxUv86A~Dkmymtwk|CngIhEG`dJYlb=XAD9a=NfW|2!s_ zU}V?oc7|TcTQ1=s@lB+m`%hIy)byXJ{hI*&A^himhKs(;W+(TORwukU6`Uz%rujRF z$V!R~B27048uhSWxs5$?Bz;Azsqf_1Dy5aKXzQ~Bh47j4cx)^MTiP!mLc;ShVQhLz z29JGnQzHq<)5()f;p;+rq1(;0M<1B(=6s)+Gm*HRcqC}gp^ zRb-21MCLp}i)wzbwNL^6Nv0%C3ZOMk(?f@yrzc-g=15r*ii^TRgjuj@`7@zNOM;(T z`m=>ClM~Fqgh;rdq-p(&u!V9)f6xCuVy82=tFZhsSu79*uVslJVlq?=S2{A8Nr=x7 zO}tZs^!D?_OXK`X8>62C$7ZHY<7lA=S-zn50$d~jS!VB=v=}ueW&!$TB3$iYzR@qN z#CdFmlzIKqX}n(=O1h3o;4mQ6?RwcH4uMLfcEe!g&49oI+| zk6`ut4=N>Yh(>2WzzC*ysHv)6NiQLlHIx;kRCz{`WL&)%f@gn`Lkj*tT z{sV{AMNh&;!DELR`!ps#@gMh}t8`f=mXN_- z)q1`wTK(RQdI-1!!9O9a z3=4qyO|k+j+Tn8oxLB&n)*<6Td_FJ$yPm{Sjyb5`2vI~|Q@`8T_OZvWc$_r^Uoaa0 zyk-e#=liDQ$V^f(A@^tKUyvae!ta2x40XiS`3J(2Vc4()r>Akr1BG0Qv)V{@1Pb;P zzD)j$PW!i`%a+bSl?$+1R6v+$C0Dcw6~0teze2_CgEYjIVq{$mFh6Q9imc%fF2odK z{&~^1Eb!w+L-1n5_f|vm%dsbpK%&z+@~?2-JfNfqS6q*2P=ZCPpl--CZV zt=8fVOCR}@|K{|G6lBlI*?hOLH{4KZYTJjH-!=v=d9ZNCtcV2HLFi}_@U%dHAA3*G zlQiy&W^MlTNV6VlUVc;XsRPw58%g@;yo^&Fl?({z_(x*}C;vyszz!of))cj>_7$;> z^k*v_E6WjIwEgtuOtF_hY8x|XI0RnIUTGNpgwKV#JL77F!zW4${V|LzJQqc~cNhG& zD6Ww}>H?lh3)LHv4m-e*d=rV!>9R);y!*JX+NkmR)2BAEULwOqbN!gLoAa62C1X26-QQ`- z`f)#Dp>0D@EGT7s0Bhvw;|@PxN7CU4fX9MtE%|f$Xkb9G5A)0hh3zHz^<;JOHW-=@ zpd;o5uSH{#tKsyqckFG*dU0%S2jn1wt$Wh$WDk6aPzYle`OWUk=#2`h&h~8oK-GEV zN^Za7k?}gHCKyL^q4iXNrJKw&bL7PFHunNsF|sU{hIEey$RHR;JT~ex$p^6}qqs>i zfuCJ6)a9?DT+uitaJd3i0prvYczLfpd51x46s6p~C$Er4&4&qNI=#mC*HQcXu;~N- zXi7n(c;4o^5nIE#bSW+0N+j`K)RMcfCObsrXr&4k(og6SO|v3lGv8}o0u!$CU;5jn zN$uO?^qb#vW6<&2Xloe!3J(OsqA)=nxJLmk%~GH0So*j;Gdul25;ebux`~qB$`O3I zN(oSvc_%xVQ`l(xaK=z-e_ZSX{C3qcEH|`N)Q0W{erPt6tN?x_k{bA!x_}ohTOH2% z_3&JNO(k6t`vOezH0J4#@sPNl5lh)pqQ2Zw2Yhbt^rOj&vDSQUi{P#vAuQLGNwjN8 zvx*zSD{l!j*H+U|Hm+RemNej@4{S#~suXsHWNpvdo{`bu+|4J8|Msb*`(35AmVp8% z&)VhI4UZGN^b&N)xFo)v{k)uSe=A3^WOL!g!|Inj~_a_SiG(1eQ$B@adp}$Wqab8_WXqjT4{8bx#gxMAc4-vZ1N_9&!Q5Lxqi( z%9mwM6B&#S_J2J-i!rKxQeH>=Z7^8{SBE(&KJzD*vQ%y?B8~`a`7DiUQxvjcd^6_a?)m%Xd5< z|E1cC~Wv(4UMSyq_x|=MgXV${hYF$1)`#XDX)k@>v`dZTDwYXS%0;+rcg! zL(p~B$kBDvSJA$%`%wT>XMHk0m|?^V!5bgO3SI;6WV~`x!p1_^2{ajz`KoEdnJCkhn@oBK!fH>`Hm3V*|QQl4p6pSGE)Al3t~cA>w^cmRki z{*Vjq*0Ip%j2CfR8k>8VI2Lhx=ClRHs{4O(O8*sw_VKb>H-7yNG#2^YVX>-!JkIBs zfO%-`#ck2|Fp}(fXq-t(4)0E#Ok103Q?S<<-i>=kSTN^j!D&qm6e06DA{(+#;ISO>v2^7t?yQW=4{kXNZSEuFqedbi+%-Bv zzgO>#F^yC9Q@h!5i*xxuEI>f%mudSOqiyd6sr_9H!eZZ$g~F}YWtel?QUKV)UbpSY zff3qs^ywKuZvKj-MT~Y3F=BOhIB^aLHHg#lWw{>$@6AxOj)c1{C9q^pB#ruiLE-;Lqy%YSihwboVaSd_QQ7S)Zqvi#{LvdC{?5Z zfUGAjY30_pyhQV!i;}*qj7Cfg%*$#Lm`}3#U*}%klVZ5#y}K2B z506QUqXxt`va#A zH@$tlS&0r>$i+l+1$|k`Py0&$~!v(<*d%#>wqAnFPT+!_KmB(d590OImpbvbH}U7$i_Pj*e3dNK;}$X3-= zwpCDD{COr8f2Q51lN0g-y7U`8&z{UFgTwdx6gZNGd#g7kv-jXcLD!4zp()z z!i=CQJ`U!=k>l#P_w_2#G}*eJ<|z+^AA*OrLw#o-0TUq9?gj4a<8!qY&CRkRb8e7w zma?lLyd|^^bn|`YOP8|^8W_Oo#>o(#{Xhl??+tNkq+Dc`V8h<=0`#{!8j|AAU3lOy zk$Fo9yT!<>sg117^m#QkdhrS^>#CfT3xT?O+yGrbRzP1wp~0vumX{*cnYWe`J8EJulO)K*bC|zG{h4W8ND2={l3UyQ_z#GZh8(`+(SI$3OrUB zB^3Jzb_y2gIr?pNLIS`7;Z}`CDh>otH^m01&*EF5Pm5h;2=LMUle^5@J_w8@SvK2I zP_k^qw7mrSgtg&p1{xv*z&hg_8g(D;p}RBR4ds7dW(L}S4* zUtyoD8Sa5~A}B`lWyKT@E{p>?5_*iZeSfE&wE2=0*PcaQAWvtv&wQgHV!obcL$1z} zFjZtPOu&N|JCBV7F*OB73^o5wGPviOCh8jPw(I6#?c0gWs=<6aJeO_z2(A;g!wDwMqZlJW~$r2kqoa&=S4F2aR)Z+C2)pS@|8VvwekXx|dwmzZ#=30F^jOKL=~ zQESEw%Tq(d$&!MgcX}ejE9r4-%q1(2jZUmQ_enS=;;RcP!6l^}0D1n=%8@ww?d_J> z@p;?+O2aRkjbsqqNTb{L(4|XeWvrp&M5P}=C!#oReR5-O2E|-&Xe#_u*0u*b>8(XPQxi*z8aVrK%q7_7mBbI(H z!p>(Bry|OIg%$KtsXars+PP<`+j_S1Z7R9O4@v7Q(JEUHuE>maw)Xe3pCIOj_C#nd zrfYGo+PYBDuML)`6m@d*`KXGHq7?K^Q4=zkP0E`o%PWtcm?;tXehC_163|E`&9HREVP{DUvUgsJK!t{4S9`d1;9iWB0CBH%1Ezx1>BlT*sw zB9o<)%6>r2&HaS%Z9EAnu!FLc33L43#5(Irr|-~x)r;*(d?Bv8mK=GA6Wy)y+=C*{ z={ae)U2#pnmw$U?)xflDn0B0?;FfGRvOWR~RXw70+n2UZvOcc7%B(j~l#@07dR4v9 zs$pd;l%+YgZ(SRm@zzBJJLV;*m86ItQ6u(}5Sgu9jKIO{1#3v;Re2-24o0o+&rlX6 z-7U9Soh5mqQea^u51H+>%{6tL&_VFSSmFh>W9f5K-wB?1N1hLp1k_?4s@^yw-WrLc zyh_3x26lblh$io4dj)r#(nBMI-6{X?3{++w$v-4eNAE%hwK;&OKc5UX^I@>)dHu;n z^I{z3v^=jU$2rEVwuCVfG3|8)fSOkc;xKx~6ij&IG=)P5zz%*0ydIJ|F? z?3P0B4_K@C{Rr0$yuS@TPcDziT$_`!;vxHT?#mY}|6EDU5eUE2frmQf*l%1k(RG){ z@v=KQ32Hw>pzNEcm-2sApLIXZJ3kX{pAoANiNu|5c{_zoDsHfH4|5kNxY=#U=JdEZ zgN!OE@v|U|6H>z7_2~$TDwib9^p@A>btb1L;#4dpn3oT_Yyxa8VQKrp`A$zDoASWz?4Y-+CUKG|inFMb#`!<*6JT)rwl ze#sXoW#$Gz8Ku$8$(8QrKM>eZ>4p-$0NOF%Bx(<4l^4s+1+>~x;p5=CMWpSm7O3IA z(-ZjZhvmFz@jdWe*ZIghWGROb5Z%JyK(EIt?Bc;Yu&a>`%*TMBJAh)nE^&?H_HGD2 z3TIaUyR6I-A5gEeoXLoKh6I36BrnBWJi0e5GHWlZ_8lrc*;0HSYUXZCaUjmM<%7|EE02HD7 z^0J8B0mj)zwUKUnQdLMb0fITy(xOcH2dEbz0CvBW0;a^oN@GCM;bt0-)Zn<42-_1v z>Ut+pcL?RK67y!D@Hk&zGE+Yf?(xs*xBr&a+kpT(j1Vi_Ue;#Fhc6lz1*xMY&abM- z`E*>i`sM(0Uqm7dakj1GJLZn}DA*lDV{hun1LA|Fj1c$r`Fi?al@UCVfi!9J1Jat? z*X6OB8gpNxOlR3*uBxJ5=Li7lW2GRsG56TD5aN4yb=(b1A)+4v~f zRfKu2I$$Vs{}K#P3x?1Bbk85VeqPUcCo^&F;(e*_RZ|;#6Lk|WsSBsEQPT63fAal! z6F^qG#hdoIb?nF8fVJC4)zRz&Qm@%)xkRHNU>wo=rJ<-ttxhWM5&PFY9~YJHa*rn~ z)Ik-uIH0cEYa?lHy}NMLic!QaJ6X{|{>MAPJRU0g^OB@2PA8+$0?IK2U>vVj$@&7? z6ARC2O*}i`5I$Eds|R6hyx6R>$#RNNlKp$%uqb;>@6H@R(YU#z97)uegneW;L4ib7pk&1YgLMy}`G!2p zXQIP-zFK_!K=Z>kude2|zilm>$|I zB;rl0N9z4y5XLp9*lF? z1MMcXKT`Mry`j@j9_=w*Rb&?`s=K=rhnbY}S=eeY&V6Ql_V#9K#rqcVs|W$K!PJ3V zW{jd9eJQ(Iy(rba{Lmh@eZ0oA##H|K@p1m5CH`t+ z4)hkahux(m$^C*Pswo?5lL*ORzqtss+(0Uwy&8tFys(JRm@Ye*R{_J znY3Y8LRkD^o;jhn>-9!$QZOmzlr`U&MK)}cJ*sp>jCgr%_;e3%8Uew(#Qb|uPMH&p z`vA2PmREVLldmQCDo6TDwUr205&s+q4?3&Rp9bTZ4(-knn;W_!#2=|w%tF6h8KAFy z=T6hMeUUCpGp#l5?&<-KIHkQAiRmZr#jr7wT5e=PDxR(%6nvj%xO_c3J6-cVZEwTv zTu%<)0UoEOXV2!9)G)zcJ5F0}sh)Tj@$76*tT6MR`uLl5Y_3t{L@bOmZ)~27GyJY3 z);{41d${?$scZ4k5gt~Hnq$iF^4Jcc(@~J>{RC^-R5nr5u2RF}Q1`FD`{-qC!uo1I z6qBJb$pjs-z{`p2o}vDB72KaEndbxQ_&@H>^_=GXgU_bV`}Q0 z>B*x`Le7gj`1LYfm>>OA50cya(=?c?{W-I5KlYu%r>m!;JWt=t_A+V(pVo?|&!d7kB^#jfc?$53#=5sp^-@;Ch@}V2!dC!b=qCvwV z#E3J`YRYft?q1i!p+ZwiaIZGyBs;V(T=-}Uca~-b0iqZ*-^+{aXE5%Ty*}{^JGt>u zamukFXM_A@`3}0aV}=uac~wAsTRLWc-PH6Ha~y3yZcWe0aLc$+lrAfj_Md*RH-h>; z=bEzn=Tg$=LpYj^A#%|uGjwzvb0eukim;baayIg(I0HqzX;-iu>TPgDDm zj$+W)m*b8k4gCIDZzTlqfE}9m$xJ(w4TtGz^L;t4*|PU0KDLcIF>J@9C~$!P(cf?f zL&_VPyyX7^hfH?!hxr;3uI@HEkLHB@ce3C<q+)BO&*1oK)tFR)EYfzwh zD)CP?64<%Ddl!3lf{wZ>m%5l$UV55?FN0-ga}vf56jy%2NOnXb|Ga%vCT)j9Y$)2~ z2w)A&4u=kct>gS%8uDS^ZD%$ewUbh1dG*_3^;^$BYPXfN$!pe?>tzA%v@wr@K+AwM zj0~n)n-WZbxd%7YPs&?$nD~SzF3{E%3#&pr0h14Dn5wj;99d5hpUN-@vW(>@7#=cQ zjUtLcbO87oy(Bmj^ej(B&n!yz*KCypap(TKIxreNke*_cSpcdG%Hl=^hk#O5IR?kj zQpnqKWmulj#Q%U>0ns0^!rVYUA19OB8^wR#))C@Yfa&ROH>maQG+`jvr=`0Jf~?nOFL2&Iq8e3`=!~Gd7xT7 zq*Q8HvKfRAjp2Z!SeoL%=AW?-5}!negj?o>0l1|{i^ByUc`7>x@!J)0W-#O!e-7Ot z%?nu?qhd|<%B(H;kw0pUz+ol!^GhMCL zRHx8PBH!J~73q2W3TD3L6I@n;K6ukjX&^q&GIL^jYhQfmVLSA|~Bb&*k9 zTr+{gq-elcB_i2`CPg)t;CiNl0%QYuH>%6UohqW!#lis=B~HC*NwUTV_6$-qSc;0?y^e&Chq#qZL4NJkrVis z$Mqs_y_!;phN>YF7z2gU(dF1kD~KUn4F(O(b*bdC&%OEQJ#WQuTS~M;Ldt?fT6^`Z z5`cu3R9bUS{?HbvixtOqC)ReRW`8r4_NWH(|$sTC#`XV!^ri6i` zA8&GHWXh@lIl;H@+9Qs;vb9xh)J@2K9v!93^>=<~CZP`AAIde&O03sp%dH*2(~$yY%}%SCi%m+h-<5H=pSd=34gN%z$?ju&AeedDCqF(crc z&V&*LR$uW3ur5bEBkOnRqEJI=#y=F=^d-NQvFMG=F`*QaxA#3o+5a3JkZyr}5|B%Gq&@O>2u z+8^`2Ah4H>^ge_^XL6(+o)Qi%t-c&~IW8cn{x8pm9ZiK#3Ri%4#MTajRU%s0K6O7mmxlczLXBJ-X1Ii+%0B< z#q@Sj3U12cBflJNb?Pl(ni(v*FTJ&w$M-1P?w%0@yO_G0AQa(;z?QAO1+AzLOW&Sd zQ8XQt;+qV7LjO*pOrK^IwqQH@Gw{BNo?J<+DEOL20)(eR9||km%d_%90ntDSY;NJ| zH?k-XzbZdZdD+PF*qtwKbNMT{*_zSOZX4%Ea9jO8c53{=eLq1${BLqx_4&DCRNZq( zKTD?!slNR!;4|`D0KlKAVq$Mj&{@irYBm*F>wuA{z;{|%RCyRdEFT>Qh)D7ML;4rT zd1(|8%aVvJU9k+4htGE=1LK(a_r7CNWd-{g5^VWh5~s@}v5D>O*0h zgr6ZpPsL4(g0ju-8dD*__JbzjOPp6Zpfv-aEOKjOTt7)6&Y(HQOkdxPV!LeUl-nmG zyS0gL&UUy+==LGMGNNrvh2{7YR|kgZYZX!?V?Wuy+5-+Nzv+FuKOR z>vLG?NOj5i-D&0viR04q`S!{>eXhmRU9P_*e>?W$4nO_reoM2tuOv?75pF0(`=5rC zXE$v{%;k3j0@nTk95J2xFXRC0oZ$6SVTL1^AaEVvhTbuVH4FCE%{TQh`OtrG$P!O-_VpWfvW zV!5uq_s4L8{Fb81i4c{|z| zDy`E1YC1O=am?f!wU}bpXNH!?%Fq?{-r+nfEdh6Dx6%ZwjGa{OFzD%fRX=JI2j^&M zWdn}}Hbb+=v%c|A;5$+6NC-rI>+X-_P5a>~=7;rQf+z0Rr=q3p6cRqYU6ZbyQ;^p0 z^x4gwcWUgoe#*of>U+$oIk|0_g~4xmy}XscNln;;O&?C~ekaUN@-`Uuy6Bef+1MTZ zSg|3mu-UI=>z0o6G=)#7_%SV5vTpNUFig1rT))m{V-DkNz(f)BVIw;HwJ0IbO@oAZ z|8Bmk68}zOlVmwqX*0<=^{sIKV7jmN#@HXqn}zEZW4HL9m&3wb_a7-c>MkA5lO8fnj}cMResp0;PS|}AN#og1C2h9!K7Dka0++hX%{ zXm!Zjv@ey;latPB1J%4&V!hA_ca&kWE}_KGwHY4t8ZlcYb+WyAy$E0to+U2numgVP z)3hJBItgAc{Q*-|7l3qsVUgzqR1-?FmYAgd`eYO4h=vi}kKI?a_N`n;L|UBCQe66@ znQ|lD`+r#rg*mnePo#Q-QR?PunmoUZQJ|3;COJz&fCk1T&W{;#iS{&lZ#vXUANf{_ zeVAIO8uA&{SH%#F<~gD;;=Hr#HAbvH`+}C|>w1VfBdphlsk!E-8$*ySN6OgO|Dg05 zU76^B#W`*R0Qd!A4mI(P2*z(DKfGyri?EB*xTy1>5b$j)CY{H-Jnz0FCM?~Ht5}7K zuj6giCZhq8sBC#OVm>wdH)^HwV_qy>-^sUpHB8al6)Vx4U-*T$ZAv?ESLBtni2(sS2__W%E_yL2jXR#3%x%7;oQu%1NZ6!+GWSJ9r z-S+|t>J^(EF^GIjL&lpk?$-`uKO5mzuWd=MPn)SQ|0lDl^JiBKCcGWPr1kAHRPsUC(_11Rq#ES>mIik zRhxu;^h0e4&O*i;idVAm+3+5%IUjEeI1#pKcAinH3`E>~^P9Ii9IWv|6XQzcp6wKDt60ycR zh0z@7ZUt3`xDg7})aIu4&A5GE>Yw?ZQvL6tMu<@%j$x`uUlAFg6<8r1p+%XodCcH@Nf~eLk>^6=M&cI7P@jWdx|$NEMyztq~u&rb_`L=EPbntqzkwfxln= z&;zoV6o0{4!W->QVIsCCHca#nU%d7)D{&IPP!3t@swiH`2xZzl7AD>2$J+2N%_WXm zvkeP_0K;Saa4MoDjpcFGCLutQaY0)EdJy8a=a0rQ>i5i=kmX7!d*-dG2N580+_&WT zGh}h)=&ha>%Y!GFCiT9+FG<@W) z0RD(#`*$wDnVFXOx}=L|5?+iF3#wY}(T}%6PjS`2Q{G2>%tQwZ;V~@QhHR8`3p}03 zAG#$~A*ruM1`{RaHJz`pTAJCv7F>Zm6R0@Tnq*%Mv&P`UkB2kbtg=~MaBpiAq99e;#a?JH+ zuoM$R>MHpGjTNr~6~t#n=8;^><8(%J-2sW=rozNq6T zZL8%=&-Z?)0RNgl9D;{a8XzH@r1Na{e_8>(?S*MOr;wj$%AS)UR| zRZWh~{}Hl1o{ZCPYgZnyBA2yO@}oREh2OqoPwkNm=T6PZtev=b*S_2rInte4&gD<2 z!97AzCp4~N*?Nk&?Y#PahXDbLk{$La^BP4Bri{SY*4Zo-qMgS|*0I)-_n}F6TDwCx zc;6(m{yrt77IN05(hb4HIQ#oC+4A=sE&`?_vz8 zJLiaBi)$11zJnac^DS})8JDq>Nn9ZQEPF?SXE7i*_F!RU)enul=Krt&@~MZ#p?mQl zAllf1=XSrZTa8=yYQrHv=CNgwBsf}3_$%_y$$C9yLk%| z0@BjIt|qEPf{BRP%{Otz#Bv;u8%E$|#N7=J2=P@=HZo;?Y$M041zh5>wT$XA8)>f& z6F1F9zpA*2$PutSHs9lYYLCO`8~2EFVQEr0$9D5$svXQ6r=7)T?vT*$Dj?#|I3C~x zVbbjPyLU2wqKQ7sAev;7LYCrDoi?VjW$N4D!(U)=HgRi+^3>g@}2iJxE^8!Fft=p!y zITZh916D4BpnY!Png%F8tsTwoIkf`QKvRSZe=Ri2#JIm@Yr~37f5$MF4-tD5ncJb+ zykV+BVv-NX-IlB4DkUrL2a>3($24orfkX|dPi~we^9KsUwu&wV-%&HdOk}}5A7uW_ z(qBC#Y3Y9aTfAk8NO2gt@qWD)14{U%4~*E6LJjHSn3dLn0=kIdLUn)}mY<_UHOyRC zDhZlL{p0~1r1Z7ZMCr2-f>g?8ao-8L-*suI69`u>W9F>|uh%wK^_xkRd&PUlUAC$0 zINtD!oJJq}N&s0fF^7>^i;Nns!L4TuKGPhxd2NU_OuAS&4`goNP6t_V z>?V2v!JDgFcHrv7%yvy@gp4Iw6DThE>MjTGH`{P8@Oscql;6IwZhI$soi=%JVQTmR zcHoF#a()WP_B|><3g=Q(?#YA_N8@gV)X0=)>~p&;s3LzF5k*#I;tfV9;1^y1ZgzjR zrQ*mk+v-1``@gTeR#+T{Sq6tNL)c}9If`oJyo2X{Co&L7K|rb%$-0%LK4%*+!ot5B z)k7lumZ+85b7>`Si!Zf#U)rA+ipk&GbL1YWQ_0J!!D&!|8i_qho2)6idJ=)1o#Zh29u*CA>2fEse;?ic}l6LcifVerS~hqdQj?l>+J|KMRO zaE>GqD=g9f{oaQI`f>3POGiq~PB$UjhIJ6yT# zUxA1xX4zD$`3JKFXApMTi@6vMTfx6`5}8vL?d|~F9uw*fR4%OFU$aN`q$*~7TERO@ zJ3b!CnLTzxYrC_BcYnMShe+8pnjCd&+UE=rd-<|PVnLROA52Smb{i=WK@o7~tfAU?!UoBs$!~7zf`y|$BJGZI(qOF-Z-l;UhS{r@cT<(BipVR;eaXB0u6s2;Q`q?8?tUm5;nC-sOYk7B90A4345y)x zyG!K${P>FHv%4EM2qGl%HbZ;;*}=0(p>;2{)A#Bw`Mpi=`qSn^K)MtHGm7lC=r)H( zt7gQu6C0oAxJ%aEZN%n}tIS$`rwMJ%Ji>|BD}LB?b%*}+D(vZ}ZU+MaET2rP1i#|w zz4eZ82NZw^!n(aca1OtTw2{2VLA0no(OvUR!iv~bwU8^EfRbXK-gfxO$RY3bMO z5tckmea-Oz+DkB)4@XF5+cSZWW-sz=wTZij&(;$=>7YQ7J39g9&i_vUA-@6H&+ygE z#&qOZMg1EEKyAt#UCi=xINkpy-}10jNBht427Q*BQn=`R*_I|6fJpgU$o(atLNAOt z2nF5-1W;~%i!m+>qE?CgQFK8XN%RRb*cZPpSGMm4p5_f5%5SK=;Uf<{Mv$&-XkbPj zd9Q=Z&$^`>zcr3{x36_1mod%uUeT4MTfEmeyPx^ZOlz#lt8|f!+e=cgfIJ|et4dAp zbndhz6N|DfQ(vzrpH24OQ}-e{dxe@k6p^AZ`cqEi0l%a=7W6)Y5MZYBom!i1n4{@~ z1xgu$F_F6xRcNON`nVmSBbi;*%~=SAIuR8GW}j`2Vg+IJ8dErZs0uFifK*fSd57pl zI82)31%AvSrw=)2eJ`$Ww-$ghg z9&pCAauUU-_rzm3m0(#GjdR~3eG64z!1;$nG!+n#;}`&HXO&v~c0V6~QK~q)E5L6Qd!->W% zZeKdMYUZGr$QQz_0#S6v=TET>dI2_HSC0mev|=Ok%Szb`VPH(fHIYTm#ka_q%d(Pe zwKDoe#*PPWw5F||x-uxSPN$)r-az#+5CFLX;EOQ!8HTTs+CiMQ&3SLz z^gp;dtEf5xW?9ePxVr{-cXvX9y99R&?rs|h5Q4i~fu(T2JnLV7iNO#JfAL?ROq_vvyZm%@Of-EhY*F(;WQbj1Tzaz2vU0VHKJi z9b#wp#{ukUtGSw5p^5)nniZvvnuJT~600V}EYi{xo8OyuLY01b*D#01Y_i?8m4? zRp=jXaFdWJ;2DK^YfixdRY#-*&MzlI#R7jK)%4*DKPq(LBuT#YBt_4<>p~3VN`Q2Q zVP|*fSMU_#{m+RN#CUv7QXi)aot6$?g|Qq}dbM<3@i7~yko@h}o-9i+`DKvPl+LoiT11`2 z%WT2!=1&n8kl~wHwKnH97VBzox+=@U@mtCo{s;`cg=h2|`gMHxGHt;F%6&CN-`H(_=SfCzz=xWM1QQ*@R(E0j7Uw0*O*UEg{&~UK=c#Ee zE7^)0G0b2oRYt3DVh^eT_p*{%FgPR)Ee$Icl1-IzRZdj`6U)LnO`;l|s#k{z)%-nX zZr$#N)A=&~Gvv92vgPh(CuL?LqwFB&we?MK?Qz#*+R_kn)JO6VcDvv04RrU24)ERa z$ApiWn73uZL8zT3G7C53(jS=)LEA4>Ih6!LgjkkU=4OUkW$cx44C1f`hfq%k6XO{N z^FEE(*nb*KMr*R2bapHw73bq>zf6%RYH?ZzdA)Oyo+1DvU5yxoc@gj*mBQE;i*W?^ zhQh9{_TDuvTAHW&1&6O|ycR1WrL)vHGMmn;CAPC^L6CB1c6gw7+lgfpfM}0l6l369 zBq9O#vjZj2H=i7Tw&vKW!xFUPB8IUSd8gmDRBR(mE=f)1^6BAoC12ryt`bhxYB% z?}`)XvJl=wx{*U?oZ=Av8H*|I;+wiBbGTw;$^|DX^36hm;#+;}6QNxqX$>|X)GI-m z-#>b|?7MKdi5WFmcQ^*d4Z$F_9+95M#w>Cm&fa`iKx!Y&wv;M62Ii3`F+RV#F+~vv z682GQ4&;cKZ#>apj&iI-lK`2#P`8q?K8#ek&6%HZYG*oxyx*zV!YxAyKYnvbMvsF5n_9L~ zJv&p2=x)gbsUT@3kh9b~lw_yduX@#eCXwi;oLmZ6487M<=L3ibs)*sLBs21tcU%fy zVfxa?O-Wl`M~ zn|)e@37o&X>*s@Ck?YV&ny^!{^p~&)djkMZQ;4CcmQ@c76jPtmNn27WP$l8JZvxfY z`9s?*MkhuR!!N8i66f>CswXsYFjYBkue%s5-(TNOzQ1Fx)0Tx3J3MB-IJ7jAc2QH2 z27mB4BmQXM%vsGlxNZZ6^pR=Z1iE1)ihiX)^dAUj zvc*n+pM>VCegFqh3o@4MS;%eXWI`N;Tfje{Ize24Fi3J#nUSz&&SjFd=Jq*6-z*{H z^Mjv$Y7I-zbux}52$gEu{rdzA6$7&Dn0T~Fg4&Sab*L}VkznBLPz5GCt%M#yw-X=v;M3!rrpoL{mtyzQu8 zK7=B0sp!48KRN9)_-@B!n&}Ukp{c`76d~iIf6unV%K6*{i)xX?t#)#}J%?0cnt}}n zb{-*Gfr5aBdtXc_^uqJ;fq0jl$2_3UvmrJp8UXF7&IKgzgo^)YYfuzGmq-#pz_R>^ zxij%H>UQAKB8d|M^Vksu>!*e%9^YM!G3!<5AO&Fax2q?C)%yOab&Wzd&$ki)UqsoY zzZFcrWU`wB_>L(Xkj^%BxT*k9&iF(Kxh=qlKmEGE=?K~!_|C3}6CAY#7I!m5qA-L= zRUq+3WzN(^Woch`#>Rw1?L8j|2}avt!^aB1DWA7jng5AkQbRHV7~|Zz6LKDycct!b zrb19|S=_!+8rt!7bK-_O=ZBy4?M;Erb{=Rx9Dy186D-QUCdj~<2u+DT=X%~J8ET6C z0#m2QBiNUs@h)pu=osw;b_M(G^l{lVQxK>S|N5I;$aT6$W5K-+J;cmZfX8acU3=A@ zv~nPL=5N@4l>b0o4e2K&UEs@5o$9;1cWz<7Gtr4pbW7s@!?a@CdwrD88w2a9BS|%# z2Vl4Rzo`LT4WxexZX^o&n|@}2lpIknsr{NQML_rJchoseAMyHB;n0(vzhAbU9dLIx z(S^VU5<|xVxV`rUXw$S+aFQ`i>`;+MFsZG#`zktk`=H_N!yUYtQC?|Y;B>;6{=^S+*=S+Fq>XmO0dO_k0lYbML zM-}27h+nfE=0MAx8+f_*>$SbSGP4J6DEyYBWS$uRfzoDnPE;aQ!9F0jQ2kL((elsa zuk1l)_hHla$Pbr2SaqK^;mH=A1eE>kZ7T~}4mU>_S*1^%3>1n1AfE{5`QyV7rC*_Q z)-DI^{>ar)OC&2No3@P#hBo4|JH)gHHZ~_Cw|k0>PAoG@jHH^~017~TPXRy0R!@F+ zhW2P87mD&B#yI-4dtm$Taw;a4HOFW!NSX2bt08z9+18|SH5tSch7h%PB~ZR7u6tXR zu{7f8Wty=CDZLLbseeuqZQInW)@ZNvE;8%Ay;jS4Q~*KXMpf6P_>Fhec8Q?3Ta3!@ z;*+l*>#zEctV$PS#-i&(m}kNk6st`R=2pTIA7ncH9t4?GI!`C&evO2@SVjw)`iohw zzo1$-p8l?%KbSYLt}G9~Y0X+j)FOzhCeR!tD@Hu`{syoflUyYR!$S?-taef2e;d9M zaB(o0s=Z~JK0RiFpFN*Bq*7faT5#K*D^PEB(hxg05t#9`Dq&H=K8LyTm_?Kxi`j-D z`IG6K`Gr(i5o=|>TEbz(U{JE zZCZZWZzL{p3+wFWJI#ZVv}MZuIt?X2qoIR=;@j!L&TMjBn;K~6xnmDvxFjsL;53gq zrm<*0?*<#d`JNl7a(Tr)=M8Qb=HFMIBmatrlN@GqR?vGIy6gX2qppxab@F)bae6}g zA3vwRRZMrv!khMGGws<}2bG*~%9+F^|1*xkBwAa{;pA&=;142jXF!(Z^||){ZZ~^3 z=?68T-r=cT=23*Gnwd*p-8sA%+aY~YHEvh_IuGp*1;IW?2cIC;a*FI2V3cWGU@jx< zTy^*l>=p%77r`}~2f0tA^>mU34#ezZfC}e8!P+6Ch zMv%m7$1nPqKz#>njkgHBXFi#hEiNsH)B71mQMqkV$^MDX61D-^K55Nx+qXeYL{M63 zC(=!OwJnjgWwR|n$+DE6rEL6`8>7Yh*=sP9mlY+$HpXo zoF^LZG1+{_0qpL@Fx;Un)aN^z1UPidMT0vTYKQAFaP*xQ$tP{?G3Mf*EGr%OWrY^2#f2pu zbh;vgXgoJm{N=1#<(AcJM!|T8Go~ij5hb2p_ zsM*GR+-nRK40EkwiBMUMd6sJnoH?`5z}wq#`$0^;X7aiR2dA@P8HWw)d{05^()dBP z%A{70bOSWh>pwJDWY!$yMaT=>LohS^x)D^Uk(Jp}yiiSxWt@e>?ML_8rT2Q$>f6%Q zJ7x-wna(}_JIPPDWldugy!>(fMi!siE(ZK2ea@QiviF20|K|PMv_}EK*7kV=n@&JV zZSSnN-5cNf(u=H9F@mm=ISjpO+$=G37ag3zPX-6{yX zz7t5t5lUo6@s=f3FT~ZQjrqMbZd4N>pqVz7-P3Zr=&lO6#HI2cY1iZk2g^&tIzYiU zAWShVQlJk4pfP?qS&ECVUQ+bT-}B_c+?WkRVv>|i7b$jpl=uvh?2aX|2!?J8;*rDZ?Kg6^lz@*z8@d!8pkM;kw`K>EeM+BtnqrZw zKbQujc2vlYJ_(pHMhVf&Bsw`DOEZ~*y4|0O{8Se-ES`iTxqNqIU5gL7N*#_EBg~w3 zhUyQ(OlSp5LIRkpza8}YT-$)Uj^JjwHgu%;O{4f-VA$f)S4?G_e%iBftFo#zy2BgZ z&!DY`{Cx!2oOxLEp^+iYLJ+Fgu_4zsHWz*th2BHL>>ZK!;;b86;Q?rdI_^oqhoU32 zETPyR=N26P7_g4f|E{YQ>*#hB4Vg&OP_-o}6GUA%5-;LzHZo%=G^0M`W2$Y})}mI~mEi;~K+(cd$5iEsZLE;`6FsrD!c28|UhQb{h1DJ0%h%m0 z2LVmhT)CJbT5$JgU?vN@qjC(7#bobq1OFtMm_@@^NIOYANY^QrEYi+wz)fl%UdAFU zUHFEZ?38KN9j3};Y`}rpUNZBnw~ zf#or)=77)sQ0OYH8*NOjYhhY>{QTH9_PXFFunu>x#+wK7UU{`J;?VC;m{47+a#hH6 zvczUVo4`?d;Lr4bWBBihf(nNKN5<10XnWB2Ni{T2V-gvszWFyIGZdC(qu-@GMFGJk%3)VX~Jg?gCok#Q#1(F)%k!izPwe*vtdc7dmnnr6= z#y@?c{kOOMhqJOHy5#h)Hoy0T)E$f%yE>lzL+bk+i1v_fI$A2F^bUBdq@%5 z2;ULQj|ll`c_*p$>QLv~Qih5;$`ZR`4P6B}XQ^aq5pl7q&*KfWqQ8p>U??m93Kq+QqmTIQkG zI`eNyJ?U7oUqi(VPd2{rkn}IVzN9v@h29To9jpD25XD)`HoBL3;GZJZBy* z{(SqO&Fz0OGwOHk-}GvC_k||y(WCxh25XS@bLBDbPlvz`m`B`w-Q52?Y^JP%1Kfaf zwTz3FJY_OH@!+{ekstjV0h|pam6B@Q8rWp$NHH73H4NQJnAnv-6|%K~MjW*owuCt0 z;Do1AwH{|VnCYyWFTx_dCo66hcmtLtR)uOa7Z@J6bQWrRVedY8Kf}y)VrT9sZnSZU9_+2!k^AMxb(O1#MripD;cl0i#c2$DY zUhQ}!MKoilmMT%bX-q#k5NpRQ>f)1_Xwz(|uC;mTJT{Af z53tdrE@}$1s4pz~YfKogG_l(`;8Hi#^ZvQJ`0>Nss9%XO@BeMIwfkywx>dF|Ijw3%fFdEX`XpJU)HU zCn?;bgI`5(D)g(r_zfERM2;kuP)`K|O^Z}FU5aJvUd|i$4|#j3f#Vm?%(FgzC&KwI zySJ!~8Lrn3=DC}uhVs(!vh&6>0dd*u-w}t0&^Su2l$G9DC*knlCl=+vy@>W|)Z84D zS$2$ZFmTuO&W#8<4v(el?LP9nMjD)Z+Ro(ZplgHMQv*Gb4+a|?P*fvcAa(& zZ-18?G3~$|-IlAn1!is`hGr!XAlN6QRs~UN@L&!eCH1mj9 z-R04N8Ctdl*&?eP*+YAIT8pVFO#1(Z;p6;HiVp&aO=m>CYKYZ>@3vmobNYS4&IWe( z#pEZ(T{hrLFyXnsGv4wcuuMSXNi$g4-@Ns}Pfl6$qQ`brLUs!n&5geB$SUWe%gnfw z&{E&Ws~BEj`DHROo;Tidb~4G>!>B;R{wR>#))EVe=7)wCqH@{==<9^pU1RG=yG5h7 zuxQ^w+zJZvfqsga$EV8ZvVdFa&U`1? zkIIx+8JG6^67e719anO$H!64?l~@w8o6NYIlWvJ-lu+d zPeK59N}Ztl^bWz_O{~1m^7b#0aa`~AE0f$=$=&> zSPeWN)dF33${(B1Yb__B`dF3I6CN|~cl5XOJ%GHq#S+e1B2H2l_752jEQ1bj!2y_q z5qCl6GV(IFTAR5Y@%7OuBsB>GK_Fw@6&DTjpLgIcgq~?hv)80L_Q1B)@))M)h7OGwsfm~Kd{f$MW`>e!)Q?$eu6s*=^XX60lQ!((|-<;a%N<&B|twpHQ z5y)L&mSaUkCj4HG?u_D?vN8nS-Xh8QVTNN0?0^JhLM;zkwbN{r?hZLf|LdT+*-n=o zcqbHWP|}J`^KvT>^_jbvxObLMDt>KHK#nID|Ko#|#Xgo!bHFNCopH23UK|5qN|3VX;5|Pd7(=!7O zW4IRNQGXp}jEYYE-H(dAAqVYK2o}{EJ%?{Hf$OyyrE2^;hqoD`@jMI+OHB zsipgP<&@Uml_SJ1{p8W8nBq&7QlG9V_Bo{JDMd^CpFJW(Khy#H2Qm zdv|V<6XUauYY}3SkD&E?yOi)eitgCilWF2XQyn^9=d1IWzQAA`XbW%ni85MH-TCK) z1nEjp^I}UQp=kzOBI4At)G#l=6%Ubg%5yR$9|%B!Jts&^g$71RM<3kmzbt(81MaVo zLtohqVUX0(wL0r@4MJ|-MAuqAi2nRGb@5Nfz+_ERWGhI?r!BkpDp>gK%&U;Ta8Hjv z!rkzov?1}z@1Gxk)@3;2>mhz{=PX!;!%;juWA_lZpqedazbSoj&cqY&JV zbA2K$j0ZFYb>iIruJ==7_fI04p}eu(t;8B|UQ4%mHaUA-b@5u5DgquX6^IN&P}e#` z|Bn`U7X_WpNXSffr^+-uyfaFpTf0AND?TO=Ohq~FV-*Dpuh~i13o&GL-5Y2xGDsJd zU-ygp(S8XySF}~jIRtd+h+_JBSy`S`W`AI5C&JUL= zc`Bz8$Ob}uILZY1ZE#R6ia#&>S44ou*96e2aiSMYl{bLVrHb+S)^-4&k$OUV2MXAT z;Dma*II3(zp8+8?HxrxI_16?EQdsE=^+8l#3}M#2gTfH~XUR;eRhPxJ3&99TK>3R4 zW*1>GqQ5}-z**Zy!*7{pgqk0A%((J3mGII~)`v8yNt6Q=e|-pt;M94!&3!oebWf9= z`d2p*jGT?u*e_PMn_A7V83KHu=$6a^aS_-U&h4GQmKg2Jczi-WvE7{Wi9=w3lJ;*S?$QXY=wbvju{LVx>eV_$thhWNc^xv_4#xhgkhNvqo#A zUD0h(T$hu3T>jz346eQJ?f&=J!aGHvb?b(BtoF1j#c`!uq&ns9dXJBLjsToX>xrkI zgMs0ps>P^`KLYt8*;K!izUFo)BE^I^zrM8;f zp~|Iob5~Vi@#y@VI6jFAtNydjRchP1_V^pY&l1@?o%J;+;- z)fa~=vhslgaFutOdw!OjDj^!QVhs;0)eDG>gwv1|9g5CY7Q&uRtT(JM;@0_LyeNpB zLql$$>xTzmi<%}b5_6d4=_VBq-)eqMrVJC2wDwc)ugtBi!frKqIh@EK!e@{*oVou(uWyTuz0zj>Tp$92eV`{lCXvFN@%?+ct{j-?GQngg&XNG3`UFnoZBpOq&Ga-YAr^vj__Lg zvIL8klxRe z9BY!f^kbrT&+~~~G3t(*PWn-X791owews);kji zD{jfgJ5$V*pFyl0iZ>{}>V+vzq)1}%)*R-imUi>IXUR`m;8oagZq_+u>d~94DS>`0 zc;P?JdxsHY3$kAmmLqN@gc_t3mp*eF6VaS|?LvklAYH!BA8sE+oRtn;WD#v<*0W-A z$dKx>wjzTuZn#IbGszgYZf56}QtwZ1@O4wiZQ=wZye(F?mqhu`mY;H3WQt-=IU%A2 z!M7Wa1&v~~13&$6u1d4t=5cE4Go#92nAw`?5VS2^F@EGpFmU1c4f zO-r~19JUs%pGAItz>iY!Gw_#`g)O0*GFy8gJv02#=~jdObdMI?2@7c@t9m7IuhAOd zs52<5nT#(`0^XIv{4+N&_M9?U6{C^Hwp%I1=wagZWG_Eai%MQHoHOHHF~pAqRBSpV z`SUE6HGFqlrMxJ=SAcoj{r;aHB0=^Ui0{NrbxTX&`j;7O-yzE4I0uy(OcOeo(h$5DxHVQ*W5W>O&EAvGxIpZ^%+(q&#)+ zPrEqLg@d`ic2T#J}RrsxR@Alb!VQUAhPh^Wjo;Fb4bt~}?1uV>6 z1|KqrC}e0{i2lfUcsW2d_t;h3eRWM=j)gE#Nz;Xhk8Obldht;*@M(H|zp}gQh(@Bh zum9*DP>fVY(-5B&e>{nOsz_x6LtyUn>&}gur2~a1jy=Gf1!#1eIUGYB_sC4xo4L1^ zG2ah!{Pf&X38pkUK7+p!d>el3)PUteDSLA3^L%fZ(q*_`6{0FeYMKJ;p=Y=l@7iYO z7=9D&C=wSTk2Ga$wT30At;f%6_9K zM#|t)x-~V06LT@8Zn#A#q(3v4zk}YyHRM4BguH|Zu%v+QU=!ZxW|{*&v0Juq4sYc5 z`soVF8}r)Y%hsCr$S=a(@Pxi#^BzSE{7Z_WAczX)*bO^pv`Lc~hImh9&A<7(v*N~v z(7=Yd;Z9)IFG>w! z=Cc=kp9-u@xl$TDKe`R9E%haUIKyNr_~4%I4rdEeNd@uLdex|mWl(kGAKN(eBP$ka zmdv9ov)jNFYt+(@k#(~5Qj;z#VtccEL+`5yE!)FJXa5qQN{qY6iC#No^0-IKP{-dE z{r)h?hFz4k*=BetMJMfALS`}o!53b_+s}*o3$CWA3-OXiYLBJ^@J;Q^<6FI026y10 zU~9>xhhp+@K0mD~==e*OeAZ{{>4|VmT|==*Us0x35DCg$SBSnK-)tevIo!oo&a?6J z@m_$%!zs)m$K)iXIWpA!DmY-$U^AQ7h-38q7y(uH>kplSl--K{&FQbt-EX(;O;2#0 zcvli()nd$lp=8FKg}>mPYsrfKyZp0SMwPw0nlX&HG|B-(PF#OMxhy=!N0I}{D>Gto z>DbPZdgt4oa57D4jxeiQ{%vOg#_$~VYOErzEVJrrYcLx(ym?RkeHS)dP=}8=fu)25 zHr4vugiGKDD>$v(UTkGMW0eQaob%@A@UjE^z&Yws2hQPZ!xowSY8yUQtvogr3cr`) zvDrL5ltoi~J{Z0{eMAQ@iw;XdmaRdYhJJXkfXUUuN(c$K{mM96$CWOk)~z{Sp9Y<` zlYCTnG_5UYgR6V=C8jj16%j}Ot}H>yD^Yyb|g@h)A;^@4lr5J6v73Dz#EZEU!{QmRt546+{FY5E2lKs^hyn zjUU4mte@!BNl*Hw5Mwj`lHD@8T-Q=tH(*EztRhhrNzsX#6C3lA>87NVsRlDqsJZzn z-mqrjjZD=~4!c~xCZ3EO;Q4gEmTcv?8FAP~oFx&1=n~0IhplUkE;<=?tApyE5izgI zwM-BV(^s1~xuJbK2Djf_3A0?x zV!UgG<15#?%(w~l+p@7CW!A2&;6X&#i^tQ1xu(0x0Q7g3!{SrpRxH^jMufFvFSMhq z@~pCE__`SxQ>T@f_|4^vw$PPcU4=9&n2^? zYiJeQlBwIM%7;sNoDIUAwS8Ct2gb!3kbM^rSJ*UW4@bC0!#XFV7|IK?{wWizJjMWH zqZ2<8L1~h?13ogeZqqw&tXd4?xeO|^Tb*~L3xov=^TR{xBnk)v$ zj~-eS1$Jysz>hutt4jjWQ8yA@xlq_J!}E6*EJy1MO~0R%16!8m13M9*T`UrzrD%=* zD1dv&YxxwqG{!P8x`O`7M3cv|bS)k`fW|8LVaFn~P2j=Dx)#+C)xWAIszmm$ z+gTelvQ~$oOs#li`=3QbnzJL<;=7(C-grcjSfEdGv=jkGb1n++=1F>ii>07n-ovjW z`OQW^f<&*Q6|gA{JPIa~zNu(YwW|Mn>$Gf}vVlmbSyfq8f#NFtWm2q;!te2WMYw-a zAnklYev`DtB3Wux3;6sAfZa>5D0euHt5y)h%)S1WdoA+PPOU5xXB{PF8BpAk8MkNK z$Zc>_#B=oE9a4XV4s%DPl2y-LraxY#vDv(SdD$F^9q~Q8+t06Zu?+{12Q%f`_)LE) z5kCZ!6i=a|BL{AdW}Gf4HiMbyhOv?g3kIuIl+9Mj`L@W_XY_QrI28i1}l+eRErZNsK2ztAHt zguD1_8y?iLYRMivet2nv(1smmO-rXML&zU|tBuw;Lg@2-7V?q3G0HS0F7N|flAKr* ztg1~empJkov&)9;hRVEegv~Gi{rQf9V@qRSeUP#M93X^BIZPO>8HxS`uj8Z)0wCLB%*-9~_Gx+vyJ14Q2Y(m#z6b2V9XX&U7P zu^>Y8- z^}rgbLY1v3Gx`=$pOge-l{8TCB~c!^QJbm2N*quW1^XgYUB9NnpQhLI52*U(`NoAs zKL;q{Dq=gL6~9t;{tk{$$;~=fr&dR(v+c$g6PBtr!&E65 z+W76>diZiqj%kgTY^MQLlG5hsOH!cRY6~q|jLj`UQV!CFe+4O>xV5#G$0^KG(Ql-< zWD_!I#zU+k-b68H!i8T>!UGpFSUWH+S@q2A2XkUS{T0}bM@e?C-mL}?EWWld$(_k!{W$Y5rp}x(Rt0 zbxPxrj~wq?CztoP_A1Lh-(Pgp6A309KDAwae4X4He7h&@@9RdzT(C4Aq6prH&QQN* zLoMU6@=p?+)jOubu5V^~US&j~e5hnNE9Qxo1*dmV+(w$^O6fIf8db$hsgI^yP@!jW zCt(^vvuXcm8oF$)w9e}D@+c;L6p`O-^*Gh}vvUltU!NT7r#foGz;B_{DRMHHWVxjF zRYdrlefz!-6sd@a@UFZ5=6$2fR=BtIJO=|&c<`x!6E%;GxI&I?<{{hE6sw8<*Obx{ zu+%~H)jE6fi5{p*a4$t!Lx0dq!u*p?KZp0@VE-w71LoRP3R4NTVEU&~$H4i#0a3Yq z-ziOQrFN&Z5Y%zbv2sd=2uI;I!Gs`7?t%szFuVhAy_xa@YE5}4D3Er7gV#|sw28DIW6#5??J&hRM`Y`- z7vb0YMlf*Kg7olVO>`rsLXHd2^BYtfOpzOYGC1`+%UgQNd7D8h7doSpmx26x#0;iIDhFM~k^g%_6AG2J= zrj+N#Fc|jPMdQm$*;zvL7Nx--tBJ-=Oz8xd*41QZzCmcjnRSWfj&F63>GRKQfeyZR z^vJPx1T)07kU$^kT4{IdtJEMQcWcPBMx1-W8IP0Tnyp3#Jz7)NrEYQp~ z`B%D_y?hg0;8*e&ziYk=+G&1Ko%F}rsEtAFrt&F!^M3QzBgy<^N1;6~OEbPvp1-4+ zkNeXvcV1qV6Mcx8`a{aA%x)z?ce1Y{>^Ymw6-N9di4cW~#T`kwY;n%mq%tTH+MOh6 zt?bMz{lX^vpy$zTx+LDMAV~QiI`L*>+wP55lJ7)FoT#Zy;&vLL&wlwr#Gl8_GsA^H zE1jqRXSJ}o5e*{(1)P3TXzdQlcAq2bp-o>#V4?aol!}RTNe=cy6mszO6g_>3AnohM zBx+KcbJ`er#_a}rJqSALxg&K0s)|Mewo@;0j@>K`aV8Qng2o5`)*B3S0|bt z{8U=P%dZftL&ninxxJwNW}CVP&eU&AIZ{C(1R0`6JA%FsN!r@=wKw`>?n%aMY1%ty zQyv;*6JcfsqfnEj8~qFCCUzr%!8VUT9)@y-Gv9cCzNyFP)?Ml+RykL^M#&|?fJ};4FhQ_SY+IPq1tw(pBZB*ELmGmn_MHEyvE%b6j!~bb zuZNR}8Um?yC#EtQSECSlb2KnZW+r*Il4#h{l6B>@xx4f+el(r~OW*j1D}e6)XRqCY zzv_Fpqs%PSfn_xyC)_6ygkk|;UD$(P8?NC2=YETaV>azgA$KCK^=O}`%0ZJW z86r?!WKW{oXB~>&@sAPz`6!i9t<+gaw9prdU8RFbVnE z^=Cqbpe0ZUIcWzagw5)g^@S(B^~Eu&Gde**P#(zx$Qh5y~4`Ezn67HM`Cp) z*QJk5EF8JSr*Qphs`>%rAF-1m5*C&{x^U=y62#B**^BczKLutQK{f$Jc)c~(dRf7g zksR2{sR4Cu{2dl&BCZ(BDuBB%J7DL|1)U{U|ANK=X|)tpK}ntGbeKTWsuTXC7Vuni z0q0ak=g3Cq1dA0ey;c8UV^Q#))J!J|f+Hl(^;9Hmy>gBzW?8n{gk7pzn@$Z0IPLwS z!%1}VDya!PmK2tF<L}oW?S+ zOwN$(U^(~1D;H-SmV58J*Oq{3huZS85vqL-R|+@iH8hxypdhc({L;TASlEOjKhEAx z->@8>j}2et<0@cvysj9&B08L=J)CPJ;E@g@y^L_p!0URjo1ymY>a zN|p+!58lpNC4Df+R#0t-|E3kANM$-X zLotCu+pq481fRxa+(2Y`ebEY}%r_h?Mc7K2k12 z35EY^n&r~ga(55s^RXz(4*3`yhY;!)?!%aqf45bd3 zlQD`CWdCF{oE>l~U!P@Q$S@oWmGzD4OYuCPj?bu5LCh3?v}=^=#AfZ&>aib<;`j{8 ztTPQg3Y(ulC~e=0=F=fFp>RIh5eE##Ro&-JL$%)oBj(JzYAHAJEx+x5%E$0NtprZ4n(p&_+xwVNU{P`VMWW&6wf`2~iOeeNScA*QmS!^4mgY=dcjE%Q!Fqi}H zv#*pC_*^@OaePk&xuz2{e>th=mnQI6V8Tb*t=pXAZ#|H0`H{ZWjI}inw6S>~_^v)r zG@3+iLxgeV!H#wQ>t1aj__6dzsu?TlUhqg}EZxldM%0$}JoH8z5td=AQ4%)PrlRK< zw*rf=sPWnak{`nUq_F%Woc}Ph@||aTCHLLIp3=r6n}5WgOQSI8JbnC|OOU&~TgX!T z{yU%2NwFFC#L1VhFSENv^wRAT?R8vty~abNz)vAslh!PmyWlS0Tch*6?7{y?REQ7n z?e$N{BHXja$MX4b)wF6um48~y5r217X!2a~JJ)M5@RX;styivuD?W^Mm|2~u2x)nd z4zM0w$;7+KSlkjSm7648G00f8Z=TUl-MN4}i4ng31__0wq(&9Z*QL!PmRzeIZ`d4l z8ZrHoTlae!Kl|_F#Q$0}+%q)+t8oT<1T~U77(!#kP+@H z8(JuW016g75eza34p!i!nTgByZpMaikQ@&|v?cu%2%bqs*t|QZO2+f@oE$XZVY>4N z3;eJ(i(Zhu-u!@1o6Eha(N{^ zsS`&xLe(*22i#lm(wOBSGEV{<-Y_>K93-`HOR&~vkbbmja(CX(ssoj7~`1o##ZyH z&A||Kc-EI%szm6*5Tkuil@W&;%0Q?L>(JEoeH@s=23^H1=69U6!`TRZF>$L$Kq3n7 z#4Yz@@96)ylWu}n3bM4vJ!aAk z&P8*>POuy#@4D<`hpe{MFS=o0u9Y?*$AXG-K@gFRcPv$upQ6gM4JD!$hO$B-YP#8T zC#h_9|jtxIU~rqdT#gv z04%Q2M8oS7Nc-1zCrvDkK8Oa2nq&^2ZVN4vg?v@m7}(GjNrrRBLDF)%9RV7IiRv}Q z@@Bnmghx7d$vS<5yMpU7?e-?;@PtTeMcohojO@ao|39wIDku)-iT2ClPH>mt?(QMM z-QC^YWq||<8r&hl-QAYpPJ%2BK@;2;T`pDk{vYo5HqTQvT|IqH|4z*d1ROc?V}!({ zxK}Mh>`hI``$958>)BLwdn}4jQ4Ofm@3*|etZd_+9Uk%RWeq@u!%Ybq;;1vI6uAK5 z$jO`4pSlj6%K7d7dc?(j;zB4NUd0PqE*Md!TkJ|h@CNo;lF^en%BReK00Uc|4>cS=qTpdIw0k&QJRG^vC z_ypn|9WyH6)AUp&QSNfXOtF%ZuCQ26WR?PFa`q8#b=^&O0LK_?8;&f4rm8qaopohw ze445yrm!|8iqP+>f-*rlxJ*^ul@f9gonUha_9B)>gu0Er&s^eBaA1h~r5#C)64$_M zJvd2Jqiro&O5vVsQrWBBaY3;!DX3HF;&m{ZYB*^#M0_*!p2K_&r`4Tvl+WvGW+ssy z#HpMg$I2Bxo0zOWM?LO{@(B(RYCNeD+5%8r`NP|w8(~vOf4y@2Dk%tX77)c2sU0!L zGrKr@Iv=jZ-b^^=G^Kag`y@3Y3=p(V7!Di?VV5YoyNsq9fz1ds{mraUdD6_Ysfkyf zdMp?d%(^_@t4MKIKdAcLd5yH!y6=_gz-6B?&WHH)SAIaz;UnA>VXR;+AxH#wACFFj z0-NeR{7c~T%=xSD`oq1iak1)I0#V%v#0+L@+!Kgr7I|vHQh(-h#V!|r{LsM{ESz}W zvK#bzDjD;VBpLsunO9fzayi*dcS9c@OsdDsobAdunpNmzPav5}-7AqNG1}UU`CN4( zvvi?E@>h9}7%r_12qOHHV(dHZ(&(aTn-Rx5dGXna0Dj4l86mJ;&%$;qP?AzS6tIj4 z;uD|D`Gbbxa+b8j_h?Ed7nNQjCt9NZJx^)&*DvA?+~fmaE5`W z*8OF(4`rm;hcsz|9pOl@MJ;36UvYbRqBhM(C^g34ty#J=c{1>a+OgT*xJIGJY1K~d zRINu}o~;tSr%meI3jX%jFJ8?pf85tIwCN25cj)kBo$zuCQtU1iQJM%*MW)%jD>|V< zyby={jhMWj@q_)QE1I{vWvgGnw&A{=zcQhK>Sd4xb_(|YpoEy%_{^5W7KGJ=&BW`5 zpvUdOJANK!v%y>ZW9atpUo$*63Q9aTkk^8~kS%eRAyy`mb{NX5r>1KIJ6rMFNDeFpUO2mk%DWElu%Q5RzHcpRvp*Op9gU#$1+}3 zi_FOA&6J!3IUHggR}&doU7ArE+c{1PkD^I_GBmi(UBZ04YzqgZ^X*2-LMF@J2RId)^{QC=_r z9SCmPGWr92h$m41Te-2XZt}n!^K0VUaikG?PNsvpytHq~%(QnEq!Q_QwiTuuP5$AK7|X%>Twb*5sUq0~hT$ToaEPu53f#TK9lwBwMB zJ;mOPsb^OPAWaR?VGJRGer6GeqLl#AFn>mfFaWB&gj3r9UcR>ti9}JG$4eWe`VGm_}Ep$3z7J22UJpn3(PNJelVeb zOZv?=;gxoK$sFKCjx5*&-O64B76+@{o*nXjxn8Zdb_{jKIP$)&sz$uGZ5yYJe;R6vU4N7f7Ga z@pD=d_(n!{0D-X8K;E;hwZ!P8oK?jUEUR%ysI9^x^6 z@zUTB=DNk0Az$f?%-!)AZuOgZG>N}L6w-NHmwE3uky^(1 zZJ`{56pn$WT7ctCP5VWb0thSw0B45;O?$(wS$)E+kjtJ#Lea;qT=i2!>3!hfNpI@K z0Wm7Lw7!Xz%>3R+IR2}#jFvL06^R?vz@L$HUF5GGV#0kC?2nf?@k|Pe*qSo_ILvyH zETlv!K!FD;GdgoTfIom*Bdb+^VEkrQez=iP5AOrkPw2ZtLrQij5^}y)_n; zYOZ`a^98jAa-)b^6|wU-XPOfDEFM)!D{zKC@DxB-SdIYmbhC+l7aSSEdk2p?+jUp( z6h&ADlCps7K>h1@fzFz)97-({gag&38qMF2Zd98k&e?;3addIA@hPtThdBtfw8PeN zy#eNkR!vw9S*}gd#2t5^(5Qvg^VuAbp9yN13)|yZz-iL@CKv+l?<0tn$%`abDd}iX z`WI~D&*q49&Cw(CDH2yHi%!dMP>!ne2>UxJJ-4k9~;Ielsq? z49w|kLn7xWTJXDSHAQA}Ct+R65efx)VmLm7Xpa}EeSn32uXQlLUez4?<7Ocy;aOZsrijW_RpuU+@olBIk(khWpS!ovV&O*m)Q)w!aoK-HM^1 z-^I7>3I0yJs_1GFF$vSag`4NJuH8I(YNm@xbNlM}0WP%qQK~6fyu*0$!?9qi0h?`j ztKw8klPn_mr9dJz{!m%yBSn;whXO(Z{i-{iY(a0at?5SoIy1E5p$4TTc;}k_aB2%$W1>cqvQi!tY!cGTdwWND zAFnHc4Vd|yQ8Vo1u6rz;(77|21P6nhi^ba&J`=0v;fhH-k0Ka>gp^TNo3$@MJ| zR)k0#@8AZ}j4Mh?20&Gxtt`3x4Ccw%-3}xMl)h@IyPYvk7zz7`zl8Q37f&oK1#CVT z8>|&5&L#8C29#LC9ZE!whWWY+hwgta!BLQ0J7fx%NN3~Q@`vn{R)98@3EMbXmCYa6 zP6jGRhR9a-3 zIvpx1bPls{k633Z{%T7#AQVNxS^H7d4xMg2G%p80xzYWkL?aZQs6S$*WY_-j>5Jqo z<#CaYygDXFQc=-HmKVV=NB(}L<1boQ%69=UlVSp1J&sWhpbPo0;jp7bhDlr*N{*1qy)zAn} z!{XD`*+{z9P&h+TJM0AN_xkEg9mjlkj^jDJ`WItPmT3ai-%>8m$f~{wMyayNskd?G zbjtIE+wPJ6Qu_nk$DZozZM;b)=AwLGBcS2zk8$>{LqclA*KU_dAQNjB71xrXm46i&?n6fKe9#RSjhsBG!|s7h2#85QBA(KS_(TNA=+&g; zdrFm_jo;q81j^ZKQ*a8vYCNNkV9S&Nt2p5&FgsX5ktL{V+A|eqvgT%>gX@9$5j1Qcz4Eo zTCn>R@fHRalQkMiiH(|*>vHlj{RVCvfl#!w<$F9G zJ!b{$-}tTpG}&=UdDtzAd*7$6^-X#>;N8Y9Kk>y{E?q7)x-u0ha!Veu4JF7Uxz*f) z?Cb5wwL|dZ$&m7t+BBI^A$c~~Hw7{adTs!!U@^qnyhZZLA$7?f9I3FyQc!k_sIs9# z&*QiEUnda^-0H$V2k)3&p&ucank~3m>HVv-kY7NynMuay=9yd4A)Q0n%S9L)*K-;Z zR#K4q0`zqVrF- zKCw8No&OY9*~on|nEem2+^{@>zqXQ(ByYg?`5V=(J1YJL1LSZ{?R=U6YLYSHA7&hY zh85lwzg9NleB!8wC)4N6^H6NW&)t{j&jNjIp zIoA=E%M#h3T@xXhSXtYg*!4zYiS)v*t~QSqO)!UM0iI5+Q3j;L(3D*XGf3wu-8p%6 zVp(K-O-P#ml32--kgp8dR6S{! ziz8fh;;pf9%$KEy`+?WTu%Ud(N807%cXqRd@R1**ma8b4A#PD-jfIUPZ*x+(S@V(wRRwNh|V8pPAv0d|84&wkQ!9t|)`e&+lPA1ys z4I_P91t2?@yQ7zT<9JK<2Xz}MNic4+ye@mOm% zd6|(*G437n8f&<_{L%X=m#Rc+!14)QZD|T>NvmyoxGJ?4lz(7OS!6f#I#42wRui=N zlX9ZdB-*MPNGEC%YkvD{M3%g+R@TdbInhE`u;Nx!v7A_uwmaWeZt^Ga7oS8hR>JVf zK^L-5PY`>_!@Ho^LIYjNVVaU8cN5bD=BHxgw+(z6IP-kIw;L%Eg_Kx+R3aHX^+N@$ z$ZidGr5_VD_@kh77JsBs_+UmIaK%>RuUMYfu^hxOi;6YNhng>atw4h1yP3ZM{&)F8 zGK&C+Ojy0$SL}RxatJugYyM3hh_AP#g2Q|c**!7!o&l!a^_|H`UjFtN{0y`&npHa4CdqC#pZw#KqO6kok2EA$DgJRx;NWA2d zEWC3i;Z_(v*@7z7q0HWHIPtC@G4cXJOoyuYKeKL1O#Ba;NHig)3=GFnJ`g^Kr@F0& zp&)UjesSms(UICq|NmP-;N*S%gRJ3qt5&4P!q=Iwv`7>E`^JO7l zecI}c;r!uaFAwN6v(OklW8+=QEBw2610)x}we=~JvJ8HgnnRXU;^0qFN%URdXJ zeS+17Om({JjS*pK;3XHK zz`?%71^`FwR$7#SfOrimAKnbAt*@yGNxOn|`3aNBddPqrRg{@{bpM0*(WAp32$f=_ ziX-DKHJFwwF_Y_bx^ppVY+4j@aZKv<2T+}bF7&}(kW)7EY!_SIWMJl_XJe{GJGDv| z%%3rX%4PIu$JDvD&C)K;o)>(+Onj9K_#J5*OSjDGV1`Jf1xq^DU1Mvkf@+Egp>mGD zS8jO6wU#p-KpEXZFf_k8>)D?&rX!^vL%QQ3e}DxafeD21p|B*R+;W2ShssR^D%Qqy z1dMV4(CMsxAyT5cnc<()s7)b0$p}MG%pdewOj6nd6&Ui1#J`H@Du$6YHvXtC7e-N` z^Wo<*!hf56de8=R{9rAY$QuZR>b}&K`Kid}mNe$pD!eNyNxi=xV<(tkZvve7Q&RZ`N=ju-y-R9| zdvo!O46KUdI1J%7G5eobhcs=8yRNo}Hli&wdK;?Zs&trtr(6MOl#J~D{R_zi@+YJ! zNFekz)f2PDkJD3eO}`+wqN3pMj__zR4IIDIE&W6)>|>Ix((As5mch!v9Y0D@6Gd@2 zx38ENSTRlw6( zYiuLQ@*yI+FR{O3jQ3|QP1%4jiS-XVY1UYO7=b~}clC(`_ewYhx8Mo!TK;$JqMQJI zsPSEepRs_~Z@I>#fvlFD8+WtUF(2gb$vH0Ltwwg5~C?HYB+?VTLeXk-xf zr8J}b5uL7h$z-L* zK0GWVVoBdqklicC6biWeoGGO#3vM#Y#rcNY6ly8~`Sf8v$$MvK5o&)g3Nlr#mH3Vj z36%Fe2*PwjTt#3@N5JpKiR1<@H@2iPqv2NgF1P+A=6zBOl>g^#-Q!b`9?mUZU zT&)aLt(w|K=&|lTDK;5mb?eJ1kn)j$jy2?`S7-vvMJ7-~Ze}*tF5O_${fW@cFv+^D z9@6^WJI8q1;-1bZK2DAso~yR{p0}Zc`ya%!S7HenR2~QHB$C<1e`I(!v)t9^-VO8Y z>c}d1?b6gF5uquiql<-J67CTR2T1Tg4C0#aA_c|~M{BfXE$SjlY3pI3&%dZ(uIt?| zq0hZxP`AZV;h`PDKaip0k{vufVSYD>Rxe#7G=E?qg2AglFb55Su&cQQlsM{G7LC;R*6%U^5d zRDkFQgUiAN$>Jx<~{$v%X4h!GWU^}ZYQqMOY4T!IFWY{g_Rg9Yc8Vgtgy|5=c{SF#V!xv1o3 zUwY+oHc!ZZf@bcRanub_vEKbyd?Ym>rf<^>(W3bD@4Cy11^c+~clPciP5x~9zggZW zNZMoZ*xG!73~r9BzT}JE6X#HCxWdsFCvojNJ4NE(t3QvMjG@H$i|B>dhx z7!2-r88jxG5YMIrwv`q%9cMO>-R7?UMnd{<~$_0Oq3rzUG4H>2B z$Y>)8N&IK@rqtY%nTW1fCHwK9uzhSRqVYc=Av0IP4*}e$L6Ps@-uv@{F4MNL({iAe zC80SgmILjsp&gnk{IX(MJa_}8W}TZDj7g0A@t8(Xv5q98dYJ;PhNVLuFzI3){W~31 z_kzv|M}!4lsVv;t^)38xiBh~o_lyFhWle>gyhlqc#^d{K-i&_RU*e#g(aP$@mre3n zBhRH~bgL^M);R>xzQRXIbQ<(WG+JA7ZtB~5@?W>+RsRhPJj5T(_O7dK2#EwC-lzO> zv6}BS?g;d&ukE%urR(61BJ%;7t=#ul#VRGZke%^SqsHD5##OE_6=otBDB@pRsvZ6R z<=poE+`>Q!fbQFu!(Y^hok@a_@RKBdM?QXaIa5;#8x#rCf)>gASm?moEW#TSunX&?p926lK3-6-bFYI&upiLrYfrp$_c>rFy5? zI^}x!<3LYVDlowB;7GRs1fh6;5m7;I^~@=)jr_~RZzA! zDQ}eCZwU&3P;}199ZyP@p=#G()YT3p!q-!e>Q@9xp`jm%8B0V`Y8M{?Fd_4A`ZcNX z3UHT4hY=RT2Lwzy_jX8#O_cQjXW^TZL5YaFZWssWlThM2Pz?kTe3lBP5ndgRY+YWB z7RJ1mgM82VnG#RHMd!YVy|rr%TuT&ND2O^$2cts1Pd!W$M;ZJU{xO9z(;<{uEx2DG zFTp)0E%mi;S+my;J`BFGuaBg?vb7^PjK56QZpTTmpAb^F)zEWtv+NyT71c#vI#U^B z6xf)`U8RE<13Up8J!}$#1%1n5wX-&Jl5pEM#{@$sfl-uP1BJ&k^FHf)y`dA?c_M6# zb{l$!+kf>#%5CzlHNwbin(LXaEe;c&vyyIl7W4bX_b1zh%rk4^PIN-YLzS=Rg{~s! zws&Vndi}4@x-Wknxqup9vhwhoF}Cvp?4yK_fq;vZcfePfG|zrX@*k!ZZksFJ6`_3bxEGHo+f43X?+DIyiX

E2%d=huBC_39 zw<}xMJzhTjx59;z5{cDtFj!CKBOrl zJLw%W(Gx3hhtC>}M((B4ZW^tENTLG=Lhjg)*QOe8+D)Xk;Df(dPqhmCf>jSdS!tw0 z%|@A|zfZ^?cSreS%%7pA(A(|w;J)g%$SWeZJYMe(njQ+!XP z*MP6Yarr}wEsv4c>(AlZcQ<)ch1DlitH)nG)rIZJUtoG;ANmu> zgMw^QPq~j5D9ba#2pih(x3$dOFU$Bbv}$#H%Wh5TF?-eS0^^E_yVvf^`n}$~mKxVh zrG>530>=;!9Qv2VR)djfV-9&Irh_(x;Mvs!&-IWiR}AG8J|5nKU856|K$3n-*pf(p zM%rD@hih%U17W8pmc~Yy#Pt1i$TDNNnaMG8rJWstFJqR(F|?Z7_fhZHe;SBj9kvVQ zw@{OrUg^J^>ar^aM!L-nXhI^FIpw|^mI*8kLqV8QkjT?s|K~^1%fHYt*<=aYy=;OWgd^sdneEeCLj~kz@=ad+y&m}McPHr@F+9}~x5Ltg*!VBV)Ee|-b_16Lv zKv`m%K8j^TjJE}exPT0rqA-enMAHZgImsj_Adc<%TPpXF)-Q$#Mp3fK#>Ob;w*sGi zBVSGtPn+t?d{D+QWN>s?D;|K{df2{8rZ>8CY?mCPn=A`ihKGGcEbekfOJrJhXfOup zeS>NRTd8a7H=BH=xnUeNkVtY;2RE_LeH_pLrX%$sBmxwN$M~ccG}tzbVw2oxlJ3n} znXFJXkmdx*1lmi@WZpxh7+`4(a@Z?Fi zo58Iu8e}S;@U$`vMl!4|BdGT+r>u7ttDvLDF4lU(BO2mh4Y+$WEIcv9NY9I)pfIAM zzueMIw;ldg45qN9MIo6N-5;OG8oFK=phs-ch`iA z7Y^Q}IKtm{B>kEZFcrjpOVBue2geN;-gvrv^2Ogjj4||Xo5roR=BODr1l$r`OpKCc#{3a;wmW3-OlZHC>T0IumoKv)2ItjUl5fK!wPgYA0a zpX0cM=OwGAaQ5N3HZo@*&~kAKQPi(301j@qS%^u~Ps>Zi-`Sw%C!YRjd=O#!g1SX| zi88}?JnX-2ST#hl_{p{r<;%qzn?GI$Lc1jh>if{($q#i&x> zV)cu4eVac80pPKP{s`O(8Wjg2C~QR|-WU6OyC;1lYV5`3y1f@rQe-2639t%}%GgZk zRW_#e5H?ll5}E7mRyR?dqT$`&O!EVbarrRp9i15f=^^xu@Gm!ax^@e!G%{Odpe8^d zCD6Nkq}&3kE~B+YJ3=KFn^674ZA~pMjnQ0%2{?|!l%t>5?bA17ksRTS#I-!R3g8an zKg&S~^!rETbW`iISLEj_FfDuBE@xxW6cq(42P$x_^!GO zK;LB?Ihd&GFoRo^j8p--OkU{&y>k7^?RpJ{fxy>SS4c2i>Zssvtk;m8eJ9S*F9CZ> zosfxKdj|mVoCIcz9%yzb)TnzZ?&{^Au~-I$dKJdFrixG~Nz@9)}8&$_9-Ujo@$Dqi zD?digcdC{fpjNQ;>?~2yLsX-9DVeeRS@At9C?Gr?;OlT7T+ir;NNU2X^7rf6*K%v@0#dv0SBQKRS2>8QBU5dJ}>$aWqnG|`K>RW>qqB*fB+>G zu$P!;eP}IDcrTU+CX5uNcHEjL8h-dg5&t;ebNUYiXg%gwOM5SsQSMVCCQ6T7ul5FQ zZ7!Dl$CTH=KO_5D{ovK)-{c0fxjw}tHF?-yh%1|zxDP)+>}5eQ0Fz-v4xzlDX&|AH z$%K>r=70cvPHtL_)hkQ1jA&yjz-n=XprOXerJ~^aD)2-y82<@MIRrj3jQ_^<;mtQ$ zY39h2Nz3XaaeJI1m2ow7^y=G3>)2> z^BQ`0y6FF04E@pDq7cFCh4W$*E7hbzrt_U47F*GEa!(~3%-EPXb-g=0>hZMM+kwm) zO7;Lj3Zg!hrkiKeNo!kjil;b*W1!!i$~nVvSf;H+ohu;p`t#?k&nX`}1)Q@_?rmmB zw~sM&f^jD7!KP!%WQk>*$)gWZhS|IQ=}S8ILpF zqbB?s&(|1YJnSw{iSrm@tu0u5+n1qU>?Xo?^vizXTjkPQ6L+_l<&H0eMeTqAig7ze zS(~3GI+K+W<(#+ArKG@@k-PucBEo9fFDe1<20KCDUvENJCm#!n9M67Uf0?oE>8S@C zFK5Rt`!^8!^}QDBzg*T5Q;)GCHQZ+%d}#uoA%c4ULiEJm#&2Z&^6BVTxQOncSNWny zn&&cglCgtKN>wA27i()R6Z6;=!Q(@^RRMA%7t3?~yjeC(xI264=$eZ+3%d=DKX=39Bbd~2 zwgRy51|!}uSQ$*=xhQtMVb0(QG6XAf{IJUK$&}DEr3I0xY{bKBn!-!aP|1E4=nM?w z`D!0e)^@xO?D}8OWWPEAy5s;hjFyk8cs+2`hyZ?b(>6znh8XIXcYubHhaR{EZVt=x zE>~bUfy0!PC1S86mj>=+WCp>~#^MtYMDR%)z%`&7{WtYWw@(zl8ei&5DA&L%jU5K}$R-b6S@P=f+9sobbMbKwXn<+APcrYHh0IDWNEQ zFmsWdbTfxgv2Qx4BCY6@J$o;(m9HARfa!@rR2 z1b{8WZ-C&Gv6|gsDuQJi2gTEl7y9KX&5l93FzjKqtw7@QUikUIOc*!UXr(OXSMv7* z8)cNu5pnE1f~$GOh^$6D|6C%%N9Es9zKg-X*4uVFmIquUN^i^TeZ;;oL@)Co1F7*x zW9nOP*fZj21D3su`Qlgf5t;Dxs<@@5Pe`ThA44f7N%m z^0s^i1fmX+$eFs}_6svm7cVr-6p68kpRmLarA#tvbjdM1T7t6Yr`?gl#Pv6Lz1f)Ux#Kg=^)`E%CE`2Zpn2u~{<|0LcpukJTk?3)d87t=$e<3vsj zmn8n3IrRqw(l3RWzD+Kw6}OY?)bea7z}G7`N};LEFaLBMqeoaL(a6XRe|l0 zvigxtHN9PI*kKsq@%hGAHDWJB-*%5}7uBO++6!D5k<-H7#E zd$bU&5#UMRN#L7gz1>RK^xT}Ljs_3|0%{=mdpahP^ywHpuzf1_ZsU?sQB)5ba(aNW zAppwpDeRx~`k?XxF~XckV(X&MLCLPZiP>=fx#v0I4%t>}@hkiwCD#d)(TUXLRm&Qb z_@McKRf?XW*Pd)-)5BAUtp*(V?N|F-g(L%SXS54Ueya2>=0oHI* z53Q`+feF@R+(dBYqV(?yT4m2wn-_N_n~&EEg05%hOCJmW8Eq@Mgy-tgyfT)yrvVgUHWpb~v$|{#@7-$FTo<@mx6X{CafK zmQ5WDiZweFHrFNP=~7pWIpXQsu12f(WX6oQjc#jusGlhrKYS|s%h`E&{efhM^Q8Ym zZWb6CWYBN>Y=|ae^S1c?EMc6ae5`(J8OA7bAJ!uz*B zdHroF`U=8#MXpv#O}OouOJ?l~#OM2!Sp5&1$Ae+2FIz^~R)423K#Dvxrj)pp;VhzQ2!b_Fg@DSmV@V0ruVNpIXb=F`P^Kg#TwC}I5`1_e0|qQY0%NVr zeZwfMn9>{Pr(^(u<6w#{UJN&2j9FlMfaZ5-5Q32)memVQ)5r5ZlEX5rT{qZKX@^HB zTOtHvJgSn*1_!nrpjR)-`o4!mz^2tr&@!FFABZ&reu_cxbp#yepaZ7P+ibpRZ^`01 z8DqnFnmo-MsET#;k|#-)^_A)OuUQ|%=OhePc_wH#(oO$W$(~3%WAj@Bbsd2%l-63Q z$yCQxqP|BQrJU7@n-`5q+74;N&*K*T`zNV`FF=z8>{&vo>ih@ zsuI3S87^3)3Swj^N7@y+ zefzENll!0}TiUtDiL^dtNJ)HOI-Fnt{V4!%1f(dHS*e9O?0Jx5ymDnEg%Of1pfb`% z`pPTb!~wwn@FFl8#@J9*)z_uW+-8p%Hu-}6>ahIT*CZ&`-t3YU1K zo|uidR7IT<`*00Q{q}bllWn4cQpJoECG@FPG9yK(y*WOQa)eLHvQf)}vctMz<@!U4 z)_;PG?$H&b&kIh5Rq@n^c)I?Xv8|Ow&y;t86;G0haAR06GOtUaVs@iKPIjD+KE0DV~|FvaqMXJ-V9vg)y zZHHp2Rx$7EmhPsZFE{}qCscbu$?_XNY!8kY;`f>x0S(--Rim+B3uYD}VP+@G@{%7x zoiDm9XStT9DL5jDr(Umr1YiMx?G%&}VRLy98$iuy*DbJATX<{KF*BJhxMCD+w?U|vFgy9_<3D^`j3yw{o z;srk84az#X>~1;63=o0{TRt-E>P?wFe_e*Xak%c2wTt1lS>Woiyy++9~anwkjN^|M8j z3XAY%4A40gn))~tsRgv&cOn_=J0oCB{M2L~@B4PVwR&C}k$m+;>h%0u?EGV99u;3< z7kShxp#(>ph;qHL{IVUrd!CmxQp2q%UIyP53!D^m*z=WsKswVUNdCYO&@hp+EMdas z4+^TFW1aqEn3?(lhu%%G1`2u%+Bw?K>hYM4{qER~?sP2Vyt325g8JBrtUeoV#_)#! z&~$R!l9E2S$vjY~33(Pe>$_APT9TfUcLD9uD)3-W>is&*WvU`?KBFW>HfDUdDn_On zCUb*{Nj&}ybv}h_!g849ououjm-Au{Y5HPj|Kr(H*)m4TiZvJjOZ0syW1Q}Xed#_JFt`ijxIKFbM4=<}yPuz;VPF0h#7 zmky(+(ZO)PrEL;Y`%B9{azLpduqIYkZya0yx?10KM;dG7_97;rt5?CRd`0qd6?5(~ zr7^Q_q-S|@0Nh3GKjwMQ3@|&q%Zr^%I@~8mGYP$A44NH%V;9cymOK_deANfTMxLq> zB6I?!`%EVoCE-wDcxltF`GrY@$3=MBvm!;VD-4HOti^TLh6+t=fQA6@=Ga@ZDY!## z-7arF`z3DF!SUE^xi$YKwp_gAth`)ZYc(l}s-HrJcbd46X1t)SYNUR~U|`D-0yO~F zJ3Wy;Pj}V;!5gc6hD^MjHyB= z5h82{;4X-jsc~CC?HoZwfB4NTjUkVu*FjxnVLH%(%D<&#@iyICMB15~te4~m>#nU0yiTtua z97phGC@M4WBQdxBU6^%8-MUfZuo~qtQRvY$ACFn`Oxg2_bbC#DNw!cy;rzzYlyOuj z0>S5u!xkBaxFseV>7Cx6rUM!x*1kHhB$q*^IJA;moj+QK(>A|qTcLN_?Rl)pjnQh1 z9u`B-hfbk|Ljy~Egx_^b_2+`40ryJO;A8}g{IyAu?f+C$7E%=21 zTHqhXg#da->j(Www*%CWdk-aPJ)?N&knNmZ^PM9q@4?ylFER>7^)7$``MFS4#7L<` zHr;!6Zgh1?Wz6w$gP+>-JLYn1G}#@EtpvR2GM7j7>_b3mE|!4|T8XG$gxghq@#^(+ zZmA47e!)du?Kld5CAwNN`Ny8zPQ!q61Bsn$0C)xWrT*slF(TWGWaP^IilGa@5;%Sv zdVF9=PEBHivp zl_%pI3J?578c^d_U&zESfoyJOACN{O0thRDcYfB(Ah1~s#PDOGe{!Lcp}cuaue@Ka zH;#`*mud8BQB?PgrascHac#8Km z?K*z6T@cB@$8-NA7T~gyYAbTBT&&%!bahoBjB9PI z(l-u+w?nWAv?vm~<3F#0RLXa*2B?nLt>7g4Dq+#e17^DRW zX@*i7hL*0E|MRYAz4>`Q-Zl5!ea_y$-3H)bt)cyULd>X#38DwjV|jYU2wV=MxAbLI z;G%Cj?()}ST85bK5_*foo52}tqb;9y_jEg9M|R*_WGuZ)Gt5-16m_M@37{*8Z&&z!1y-8TCKv#z&V z!um1>x#Xhy3VYe1XHt^%msXv>Cy)!lX10~Q4$TgEJ4N4bL|s{r73Qtn2}BJ?4$BgpS_ zZJ#f!!62M-8xZhY>7(HQAn|?UK7i>@mXqa9FQ7a0y&#kt|Q5V zRT@_V{AJTlnyo_Sdza*?0aoX`QQ4+l%zMo9#;ie?yml$j;z{pd#*?6EmVz-5q*Vrl zs3r82GA4~_$j=9$c9nhNdUeLpk;HbIP9jE~v$vLB(}ayCEb4=fE~WNo&qQ=ypLQ=g z#BzM%OUx5YJUs^992US~C2!=0aAUrdEeu&*h=*;$ezOT``Bct|JGM(A(=+ETjxR7R z8ZM?d_auCN7B|bZ>YcJOeXm%(Z0%b>oyy*8VL-EMzGhPcba!DgP4u=}PjtUuAp)hO2Cf*aK>E6OcnJ=JDCv%O`Dvv{G4 z0_Bo^HqZA{nH37r{qfaWQUg92O5+cXIANKK(?|Mi-cV7Uuxi=(3^$qi9NM-AA~{$9 z%J(PWy&?C5)a#^-JgR*7(2&z^(b(M4DmG3I3>sg{vs(+Swy}Jnu1DU6WS_SF`^i1h z>ga0bJxz3AA-4wyU0aV_^;o&?oy7Vkf0~`pRpj-n%+6pZdrnIUO9Po%$PPQ014mo* z+Ex2qkJV3C3{_@tL$_R>%wR6D4DTJ^HwK|}5%=9iNfIAvRr0b}f-uA!lZ-J6hURmHskbKY_1Fk`$1 z^MYw`73G^ogh>}hr#!*x6i56o`|k69mP%krckB;RPT71yO%zcN+gpc76wz}1Xt4Y9 zYj3`rGH6A(7Vd#Ka|LJe??1id=x&R1jA+n=@L08^B?v_Q-lSWyrkNX{igay9LqMen zfzxst517W(F~zM9PJ1abz4mFi-d8*lW~IThaJ#`r5@wEj2Dyhc2axj8qdKBZqWFxp zeu-5|%GuAEd>j(G6fuL#<8Ts|l*GkGIPe6{srN$Rja(DB7zd!_SZfUH(4c?gl{nZn z?)xKBG5&n~8oRdv`J+dq-Dj_pm(SI6Qf0rln%aFcKza_T0aYI%@XZDi%JoD(-jW}^ zAlUW|qCYfFw1|z1?V(f%s=>hzG$?2h);_axrIn3()j&1*otVkpr@&}Jib?`EGUjZj z@z-DPf1g%!E(6iAJ21V_VuS@W=ymooxH;a*jhfaS>j4;1O@VcsE-34dqr_fg^34$RrJH^QM7)$;XdXcIHI?;N*6OXZuO=5aRfF=4zp zy(8=e3V#>XYtX(MVk>ViejvmeFR(*ckr3xZaH`+A2%5DGVZOl<*d-=LMPpK5atJ6i z4RF7(2}4MxLBVlkAOYWgja<>eM*2BC0O4p&$C|0~VvE;t_JDGs3=O}1B$8)VOhFN| zJKDX-6}b$b|4DCe!0J&RFh$dnj|C9Q-#U?w6XFi4K5zJ*8$TJ8d$qelPn3bRoeDy5 z3VdY-zGE=)NOk8KhCTt9OMuToaqH^u2>|gQN4PKAV=L;tDPog?+;d`vzdAqXgjR*j z%u#T#oL8||VgvD$K9{q>ZQrr)eulml6s@HlipMMP)YIzT=~Ybjz*i0XJJXH(RAran8cp&r*X0;WlhA6=<-ZVCPa%G5l1;rrBnIhFl4?NCIu^0k;2ylQL|E& zoYKaeFR!`y`R{jzieccrro)l14ZD%Lu3{7@%WthAq6{fv_gB__)9i7+S2uQ_k`U|r zfnk0zSum2Gc-p4cj1?aTea1KtLsL>9pJ;u+)s~c)oHm&#wyksM)1pF@oL|@89j)3? z9bpK9eMbG)`4P_HSp`2j(0*Hg+kVoYLie=Hkz7;nnRtP_6^_%KXusc+J^Cx}{PMnU zFAFhd#TVpk0>zs5f5|;`x^?;^`Cj0gdl0tE^={SHNDg_|?dp&|^M*r(fflfk;l#CK z=yZ$L+QMC3e}uV8h{+?4NyU1tcaDJA6m|p)HC~cMu!lW%18YQETa7C1Js(mCEPRC$ zP98BCpNp!`ST;|7XZgN1%ieB;k?-TUj?5IRg|#)t<$mqtpoM<`DdA~UnyI;CvAvXI z8SdZFm| zmTA#ISQ~QuKI_Trdf5IsZc7yEz)1`l@%FG|SSW?- zmuHDaJRURZ7KlKkA4SLNeBMu$vSLJrlv#9q}YKMT8J;V2~0w=xp!Rhn;R! z1_zK9GgQeq$?+$_c1J>Mzog7$Ue#JW_;pDOMDc+=t+!$Rfw+y{= z+v96C>oU{?4y3k)e@)o-)Bk0ZO4Q*ezd()l1fE|@dCd1*nUS*G?4zzG{jUFU($ajK zkeVV|Yj&^4i#flKBXV;J#=DVs&vHCwBCQf4OVcgY*u@3++Qb@qL#K8fAHZ}){gmvWLxztMno+=lL zd4^#NxLXY$y67{r>SCTG+B@>dwVXaJ@p+?U6&`Mnp5T7h56$fNWk%4iN&-tD4}Tw$y+6g*_%0r8m%1Ig&+i_fRa_h( ziO;&unk|2NDL*s2RMpkg1mH8c0f)LvQPLleX}e;&#CES7yfA;M4J?;%Ph9NGqa0*C zf>y;A1kBl2{*Uo0Ux8uHdz9)F#!m>i$Q-I$pWgX5t?JV0@?Ku~h*2ZkKU%h*cKEw) zY&Cz{U$0VzU&u2R{t$Kw%f+~^zwy62cLLzT*jer(G=e}vhw39`7OE6koh+Zs@&U=J zf_a2gaHy!WXvfy$q$*!c0Zcm>K8Gh^EM@)020=JrJgxdrjxYq2+E(c^x*;#II&!&o zVW!P!X49>$#Ca0Uh!(|>NUz($1D=LiQTEKe{-poQ8tWs)qv;;^8COa!O994bLnH2# z(`5gMWvz~|le_XMv&h|zC2-{AgUXj=MjxoQh zqdSMQ&ek9*SvaNWKD`8T8U7Vi%6oz@Vdurc-}`g0=E=0ii5ONh^Q?5^nQG_6+63w8 zK=@QsX9$)#{<+27O6|N6BzKqp4^{qhhybWg$=wk2zRa{=RviqWeAqr8TC6@rvDeUY zU~ZGV@q7ErjFt{-Dysv*4nlxP(f5wb68cx9FVhhcZh1xZZ|#?)*~w+kSumB$PoKC} z1Bc%+;gR9}(Xx2Np2TKSlNOLM`yph%cQ|@<9w$PNh=0kfzCb?XnQihz z(dZfIC)t8e|Bi^cui3L8kVegI8QAb%Bkd<4Bh+{-gbO)We7&|JuEjO>Vd9uvRHaaU~PaRL%kggz92M2 zp;B$ykAB0?2&R(otRf_4KXTbE`*FeJ0|(|AenJku@y=yG1_S%1#2IdUjq|o#ISLZj zT`*+d<*9+q$+&i-iQ=-NK#N#oGgYgf&S0*r8K>G|pV8m;d;DhREA1}Sq?-dm$kaFd z{@kDg4veL~M7(CBE6v|R@@wosHSy3dGInr}?*=X(vHNt&LI_slwc z#sWAJ4$fgO9Kz`SXi=vk)pp>U1)%I+p-*Ygq ze8>`A@jR8jiHUl{%zKk%AEtX@XY?OrTnOVnWI4p!)b$pmIf`!xBiGGBkN#xARu=2( zACH^jl7;y@^mDoS7{_7*@3AyC57c{Fx{XWQgV z!2aHTUQl#kXSz=Ew49qKp~$}YBLz2bpZ0z~vTv$~E{Du}587l*Z2tAlb+m7RyjzV| z6{Vr4`9N|^zd%QP$KIR^ed>rj52R}4vQ1Hcn6mrIwVjKNC7dCBLGl@a6g9K{l;!9`My`n*LuHq3}BD0VD0zu zbU11IoudxFkZSG67AD_}3nNpU>C{bRoH*MO=``^uj?ap`&9;bf82F_f_op0R(Hp9!e}Q9H81DxjCZIFYnBQ zgtA66hYp^0vpPd?vti+}z_npLD);a`_y*bKIA*PAoQ*!x5#dI4-F-; zI%>DT5WSU0LaJ;#!KCtpKfCZOslL~2uZ%Dbu)21E`wxDL9N2HI-*WS|Cs$pL|D|vD z&hd5Mf}a=!l%7M)S8<53n4c+Y<7jiH6RWK%!~i{bD(}fF#0S4~ziwF))nH?pMCbri zx`c5HV1##4z}y`RiqKX~M6xRYlkGT_dN>Y@t&#c-1jzMq$#xrOHM@fl`GL;PcdOjX zV1hm_ZM4hRRqW*T<8%dTHZ|M6;Wf#Se-pjf$C&o(F{RjVIyH9<7WmMh!8(({m9V7D z(u-ruE^E5lP_vu{u!eqCNJHe|%wvdn_0Ng#9z(@E;vM6u-^X4N=e|9)ihu#Q!CIZ!wz?2EeK*>RTw-6L))iEde!*$(QD~H{qS2?Hsl|Kl@s%e(K`p0NLc@ zOL@LYd_)3;6{(Q{x9g*5qx$0q9#28 zRW&!X^TW-YT*ke+GAnQpG~s=^EIRs0$tdSK(VZ%*;EQKxR+(jOOhXi^1;PO2k?4q_ z%5hoJPU%d;e$?D%Q6J@>wm3`zyyvB_OTkNBo=RsQ!MXdPR`|tJwom^S`|SE86cy6~s*o(NPqp6knCQ zZ$vdNL2at`qS!*V;h$eVORS=ZWXJ^04h{`CYsk!TCwT53o+AD^0op^|qdWF!zc7~Gt z^Q1k)6`~MwH)@5CiG~DBGhwUfOrL*Mtlr%-@YvaxzE&-_^PiJZ2V+7%u!NTdvRiN) zFza}Cv({$z;=hEU-RvpeOQ?<4Hm95Wy$_F~Po_g-%2>x?Rrq9s0!ol7_8PUzuJQIOJNXt5-2NJru&EcH zOa&x*a^f#Z_uaY(y?9a?)*MY!L?|(b7W+^n%icVO(!d^1eeLd@VX|!)5`rF)Yqkk3C!`3m7NrB=K~4Du!=^v|d>O#b!HdeVYzUG(LXAKK z8^Y6(TxyU-%0&Nz?H2f;k3sj5w8skPZ8huC(K!Z!S5i}b?eoR3jW3X`oQM@NXxAV< zqw$pdwamy^it^J$HQqxUb!Y3V#;JSBn-4Ds&Pv_P@mHDoOIrKQA}6-2zActSZd->v zmz~9XS%hZ9B~Q>6gT5Nk80F`?ANgzlR}#vwX)+>tq^87`SOBN9>+Fx+iL_LT{WCU? zI*MN;uwT>hDbI`VfAegKmY86D=!p*pQSYUN*N5UA$mt+3580xXn>ZppMGNhF4ut1I z2hVnAIm-s?zmWz`;;Dq*M%YJoG0?j7#Suu;cA+l{*r|x}+u~oYn`i8Dj6nI=QURD0aphE1yWH2>s1-T_ z-W4WKRR9is#9>V0@`4ZtV^ese@M!e-BWh{@zumCX{#r+9&=wY%^EHL4mxBQ34ezs8 zZGu{9!=ycL_cg^&$4+=_RlW35&tiZ2q-3^#1{`Cw@J4X#WySK9P zC+~gH6jGG-CRWNixJcew)9t*%_}r#M@KE)DSW%Qwa8bm{QChaJ=F04g*e%yMa9JjF zZGIC|;nJiY{6_?_{JJPWEN0MwD^m?uT$6=-kTwNIN6~c-Tb>2aQ>exG+JK3!xh$Arbcu2fEF)`U6 z?Jx_$^IVbA{K0tFllgh4rmeM^_$wBF6)sFe!_fl{S%wZynifnAF+sB z1JA=6DViR);biAI)e~VLDZJ9a&qd`VLwb`B^epRTgr2ms20d(=ovPa$r@PmEQ$>TwpHZ&rdZ27V zOk_PfpnlZSG<#XuCoB%zUWcQVOY?J$v?J3Ah9O8f@P z58uV?$b2=r{mnxkpy{P^95SlA9T0;pHoAXHOABXbisyr-UDT_47oMD7WbN)8cvec! zvROa%%c!1909!39!^?qS!|UZO#jcu9%*}J1T_4d3e24zCp8j8ByTN>wdjYJA0j*sxPo#(sFY6m_}UF zUK=(~8X)$#yD|>5)MpY`q(uFh!3B;gH)6Q`+$$&90suP5@>@ zZXViLx|(CyC@!1IJI;LbGWb@lzbd%9ZQQr0o#f;A0vkI#d4f+ubF4alfFJ!sbp0rT07)({4Hx zR|ZU%?QzEAgmIOb?T)$T_HVE&(M3PVuZnm2NYfAE%|t4F)S)d5750ON2cY1|wR${$ zbJPA?`*;4eOAF|CcouY`ewIMjJ{Wa))Cxd>h-lgyp^;+!R(ZEo3{&sZrVf?doe0bR zWM@^4d&Y{;7D2_0!%B^#YMtT4GvahN+~2HVfOPaKb6$0A8e|qGEtxN_mhNOs+w#H*45kH=bWG3b0Z)Vp zxIPQUb^F`yhZkhlO!Z)YDN1GE|Eel+*MCr^b}3?m@}?3e#X*+KW{g|CySl~|bWga@ zx`XHE{5R#ymbSNrbn;27zDV3Ifiq)yZaJB6CF`Jix;$E$xy z>QlU)3i40SXIYt&Yq?_)b_plEb(3#z*Q4)VDUveEYx_LSeZ-I8BAXoy3>;luT^H`s z$%$|OO%2i5X&gwPiP_n+_II48$JMiu-q(s=r4V((PLlDnAD#o^Gftc~+Vz=Z9V|%f z7XxD)W)=9u)q2X9ZQl>?zmNs`*~Zg+vYV#r$vV6F*>S!tb%?y{3Ye4xk?6*|$bQ2V zfELeQy6B( zYL+g6odZDl6Ov%-&$elyp68w~rUr7!wOc+HHLR9XyoQ=x`_O(&JBu?~e{=e8AA3kM zixSi$k%xxR{5dJ9x7xz~t5!}h{y5kdD-@#z<8m+7CcQ9W1qVo9uh-OGKjL4)m1VB5 zz&hfU$#7h1cXgZ6O0{o->Gh7vDsw8GR#x|r;jDM&bkoUq4qxIyW~BKTcQpcF_3@p4 zt7-egBXLN9AQPJ#KH#3OvI^OMkeAvjKW%O#vn)F09(1>-KcXp%>rgi`>6qT2dUPeu zYRjp?G)7nHCj-3Y5$rO?Mln7<1KHJAsdj!|ad!=4&t1pK?AAmjpF?DzXu-iO)@Qh{ z>=^-s#PqmBn4TkVb>wV7yUq$O@9ceF{jBUQ;=CLq@T{?B-le7! zlev9>!NBf%bjiXmdrNxcVra)H@Ql{FgdY>dE*D4-xddIS&^@Khh3 zTa)vFUUu83OGj9X>m;aSYz;Em^f-a73nt9vuBx2EO2FOv+ax!65_}b8-=lUB(k?r+ z>dK)lCQVBQWGp}a8URr)H>J2NtQF7fZe6fC&Q~b((5a};|50ej_{SN4@2`O(t3=9b zif-l`B9DY@7{o|QXT>_%7q{)AMTZ0RwAwh#3468v~A(z zXB-;5`*;Xp{id&1Ir|iid7zu6B}ns_u)s5mq;ipv5ai zfm)Lsk_;+@?xQ(2NXwO+DAGsMc22yYTJlG$4Y-xQNwCY#-(z&4138Rt7vTDK9lu*z zXaiKZu&PqFoQC^}{uu#QRL=xX7z^$>s|Z`@8Bn#}Qg%^n?nMti>Kz4IMC}KJ3v*DDKA5>T5LW>@B&J6tmrA+whwi=R5*kuKttO)Khasy#kcT4TP&}*_h5M$ z*axzhnAz@5g__p+NnuRdH#w_^#NDCO{)U5wFJ9DsK$nb;%KdRc*hK7EtQ!biq0O=c<}`y-)8R)&dd}2+l3F%?I*&R z>jaZC11LK*`cL8%mjx%U&UNYadmg?R7~_BqMle<(zZah z>dNPuM?$-o;FD32i8N|$b?Ae7Y}ppD5s}&&Wlev|WEf2^jgh#2@~Pc-mhAD?en`TI z>rn9pDMyMnKA|mHIr}HxR6_WqE&m}7TTB(7as4$3tuZtpA85?@Y1@`AKC6h+&cdm1 z#u|8|3a_6;X9b0~*YY|~L@94H!j5Qp(_f~5{+T1#s+4M4>?K0f-=F(tnSzX-i+G=DN2(P7~+i3=6u_!_R(c2 zj&0S$&M`)uwdsDd+% zzqV&DVU5&)#lYCt%Q2um_ZZT+=r4WD1Tfc`J7n!_izK%9x=&i-DS_J$7ym>Qc<>8N zK1}>JpKI~2y0zN$w{%mAes*GVVx2^8yCi~iLN+0N{878PE=?_h9>D_m0~&0!{c8Bb zCIac}nHTE1s}$oJKhf&Y=b`LuAF5vCPz)8lN|k4S2CoI))e5wK>Y@r)xTu>YuVM1# z;Ow=p4)NlwDeKPqclf(;Cr;Qom`-kjGXL04lrTPV?A1GrEMOFHj#kVlVoT*?wd(SvHd$J4bJn%OJ^t$>+kfgt(9sRFYAw1+IE!>WS{A<}>IIra06=nI00 zm(GQLKwfK$y3%!Ky?&u4CNQju@sI0D<v7vlG;@dA)z<+6?wojZgIT8)g^s z@9E_ot1-XorbDe@>^s$6ldlN>vCQIj% zy>=&PXJ)h@EeM@~o_@1g2Hp-t-2BY_{z=hm_>Tun6}e@aqL zMLj}IHdrbTs}>rwP0A|Fnmiu<$kok;9MZ;iWCb4$i=m?AZj)r*eHszIk@CF`znjRv zB|2Yd?sVfOVpw>%_jnjtZudePzeG?ZvVi!R&J*wTf*q}skv#180UZVL~-;9jH&}1-Bjt|-%c4no{U;H;Ot@#H#fcf_LY8R zVMwyFUzSMy1Bcd3<{zdm0c-JzurE}(Khd_VQ^;{HL755#Nu0&s!T{j*YN z=9~UY4)8dc56Phc+=CoWk{C?6pl7QC8yIK>kM5XoECwj)*O!0*g5z&JIwcNn8g>R# z&thl2M^i22>{kLx*?LF^zq{yVnylF^{irQTY5%=tlRB*aCHG|aa<4h*=$2VESm$;T ztwEgJfMmv%mWfi?BD+Jzadx@g!-Cdmom?$3HW%?!fB zWADh|8M=Bd?l7it4*6B$P^<>54`fQFCmpu$TFDFRAs~Nj;*Fg#D}*Qp4+iRxkqN`n zyy4(aR`BUXe)At;APP7E3lZ4}wN6!HwZZm}*)gi*c8;T|5%B?`3(OeXw8y3P7Pza! zGo57+L4J6Xw+Wxx4hV$d;0LKiosDTHXeAUx@?Ym!+eunbP9>G4-qM}b+120IH^qPVg zeIM+5{&~$Sn376C{z%U8SR~uG4g*>`raWBJLE*d!C_iR6)pssNGKN=>5%l3qLG)5N zAxT`MUk>mOTR-v6c#i9jLGNeBS3PfEMoS8Q|M{I04MLf8UIn?qf=&ivNqy%>WBCWK z(O~4hy5$fRjjhsWrY%r^!G}Q1EhF>+Rw))MrCaR#*{wGDjVj;sOB~O;p&GjrF z@_h#HFcxcjeXDKoS9MqdFH--^L^MLc5b5D=tr%qryI zbvA>nid1A|Rrm&txh--UM*xe8E|~HZ3c$&OU=Mn5SxXf>Af)NRCp1;#(Rr3f!)Lg& z5kod?DW7*OZ&`hvZ2Mg!o=)Hk@4$=j71Kqlh6*B&xnJ`ZP1Xu8?HdNZm6SlTo~mf= zZ!Y&(0YBX#kZIPqHt+o*QM4QxNnNae7^*)_yeidTJFHF{Q<*-ZJ}2LYvGACDqylV1 zJWsV>NHnkU7<8wK{NS?99(zzPBaV|`$`%UJIg((C)j6l?Y!WPc=V=UW%z&5oEl10p zW>v&~eFhwj9;C6mateOa+IxyJdod4Nr>CA|?npjgy=g`*gkDNj{Ni0W-C{eAe$73@ zd4>pKNzS*U!vbC~ddJoYL#eld4!t`hWiE_3A$$3aMH(VdgH>bD8XvTaFAQP(uv=f0 z-{?RFRCXF<#`6g3HmyA#3LFad{;(HTJvy`YXwEX6|9pP&7V6#i^Q18U5pq1Ff~Lvy zbzMQsZaH)4Wq@MSUl#={qWV}S-A8pXLaAn+UF@4BZ{BPT+g>;^M|uHs&4EOYZd+f~ zW;)xPWJXF5-=b@|S4PiotyuNSVMm@m6IYIQEY+GfX)7ro1FkTy&5F?$fR7kizx4gj zX6_PDf~$w3(Y$W4NwTIbSAR&Zi^$-N)Jywr7R&O40WQOSKyk(M$e+HYoo%G|=wQd8 zbP`vhE61Ao!Q!{I4h-^X{vR6W^{l(J3Gkn$hJuYR$$c!ue9isceBHYbf<1eh7=JAY zxQR*#I!_KN>Lw)2^>SCFgT!^ka&9pr7TdVvq9cGp_26JI|hj z&fuk)N5Bk~J?}`41saP4y81CPGAMhg&3~AIR~GJ*K8BQgSPOMpnrh_ZldY4QU2eQd zXkoO8%v0`L6{;Iws|lvZNM^{so6kP?xNi=%6O(J}!Cqbd*!HXK3HPvLB5{eI`!)t^ z=o(WjzM_^U+r)iPO$7_;na8sBgQa9>cW20-x{XZXE!j4gVXb=6g5gz$ueo=GFlo=t zbtkC^YkKNj%u6F7ZcUym(5AhsPV|q;B~3uy`)kJ(?o5{KRH!=B+I^W_qQvS6j*`{T znAwFfHE=5^m%aGK>d{%9UcWX|W5GMV2p)i$Jm+Nha|iy2P=S} zz2P!w(8;jiV#d0l?v~Wv0%&0`V2nsc7{#xKcX~X*Q&s5d1AE#^XGWl zl8y@;B=cAZeSLKSM0FUJ>FX&Op6Vy4{y@fs2#ZtWMRxUtr)fMWIt1tRGs^4<1yKxq z8DWz%RUR&R58E6kKk~;luf?lY5AF)?Q3jIi z_Q}0d(rt3PC?=gJnYX!?!O})1Mo`_sYZXsPa04v03UoUIVx$ME0nr|sY8bknqHLRb z?f^}!-E=VdS}`Nk{q38d-}-guFURoxeuj*jAM$>@`=>`r! z+q5*w#xxS!aX-p=J!G4>Yktk0t~`cPMq9hzNx1S8dp?`n5H0c}GS)6%y&Unn-vF() zbpK*t$bijXe?%{(ouSO9Z}~Zwa9!R_Oh}A4?qvf^yDp}%_tlW1z$zr5jlBA?oD~&S zwuH0^z392v|I;IqQT`Qub=zX0S@E%KjZ2S9R3G}X123b^DLY%MsZGemq86D3O)B*K3T_1pIii8Dfn&<{HHr!KLKKx;-_LIg?3M-8&%`|RT zZ=9_Zor+Q3hc#XtSZoc_0Y^7YxIX6I`<5`|KbL8YpNpijLe!6O7$!hNS;0_voJ=}J z{tHA^UFbmxKrG+e`0YH!Y?S{*rHuu}%vS%D2KgM_IE@!LJU@Y>oEGa|Qp7>Mw-ntN z73DLp7e+z-d>^0n3MYLZpGm!I(hu}wi01Fj$it0=T=O@Hz5zv1Z1B0C2M<1*Wtc@r z{olXM;5KRfeWuUVV!MeIqxLp6_7{h;=p&=ZjTrv9ot;Tw8EUNF+7#oq^`d_JYg;07 z94F|2kL(FU)v%j1p_Oik`XSoCznNg|$FMmMd4Z8EM)D|yMgqW%b77TpDO#1SRsM~fQ>+PN|q)bZ# zFKf-Wh}l4Mm|{IPGatFLr!vEdcgpQY)3o>z&qUfw3$@}X6BJ5#-o`3jdpO>4S(G{{ z>|fc@`XFr{P5+Bl+pdSc!c|~10!6|q-aDRjg2e+NAouVAAiKaJ$}P}4wyhv4U|+MK z96(P~KM}WQ$Y;o{Rg-IRIlOJ}s(r?^88fE6hL%ZS%B&7H_YL*2b%@y+mUS*wQ3tpF zYu%Z z?=593OWajoR4M7HX*N#m45QJ4BG2kwM`4vdno;ajEs6c-!1Ykzw_Z)D#=$%YvY@yj z_I>kFjQg_w(FoD+RDkteTdg=QT&6 zU@d`&b&iEOH_V(U7r@D?LhFk7^DCw!&AGL}3wPW&QUA^uc*>CiF*VDrjxj9jXWdP? z(6D(0&REMC<5PIkR$96xW&jzr<+N-Xt4h^M;&@6uY%n50;_z$H@~S*Hq!a73?0uE2 z=2^HA((J=V(lRKc)4=b~x<=ri)|e?UL({Kmqno4P8PeHmGhZJvQGu{~;S$sczR*52 z6b%gs82MuS!5oWzG)JZ_HK!x0?Xke#~5NypBRGttoN zPuJQQ(EKI_LpO)kOa&r^-|a zzqzFz7Rz3)W=_%iT9tCGe0D*#Kk;z6&N^tT{rvF$b_IDdBRMQXIt3pf zy^%z$1cZkn8#c#x|c=#&u3lCDIgZybxqF%Pd5z4kXnv>;;T28_q^PdycR_F z>1vbXqOl=7LpSpIpDgGdYIem5uj`=ZE9P)&{|1g3w$E>aDHm-@u?@bIYR6 zFN^hfZVKpBaJY9ix`MH2rHdoU)fR!2Og{l3!t4oZfDb&euf8yY{7jeLq3*pvoxE77 z-amo{{UTGrT=q){$MhK!vvK*~%NYS0q4O#O5$ za$#ywYz6P>N!VDSGxgrQH7Pf_~N@Dj@7Xyq*nFD z2uIO`JCK05MQ>5H+>5N~4~=*9lpceuO>)l@*SAM>XZ<=q0F`fz$u8A+r8tz0Xz>6d z7n~zbXv0kVMb@4lGyp!Cs`#g&wh|V6n*U=v;B}%XX@@T@U>cq-$$y=DdR`RXt2m4q z=#Z&RdGGoN9tYere7+CKic&F#dDF}21J=ks35os9IS)>D1=&KviKTpGRZ|p7&=93^jA9zaat&oG+%_ei*Yn-PNEO zlshy*zaL##xMn5=rQtvs0sPM&4ON89@36QrT4lEscY?k%)Ut&5;zaKZUBIZ%v{a1E z%5bO+Cimud%2u#g1z4iy3peqr=lH)4etRs{3fipH*3JXZxiwFVWON$2lK6Q_t)9uA zg*V5{QP2fnWdvhELDF2d!s$R;8CFg{CLoJ3E}(Ek%_e|@&$p*#r*@!j|2Zc3w*r$s z%7#cj`Srva4y&7$M@@um*IS&Mpo60Qf2p94Pt>O_2h(<&U1>3c9osW5y>cfqX;!HL zo*C*VyMX5)hhycEt{u>haHiz(YO^IpWSO8_r;dO5YjZ3{n{r0#bj;AWcQf5jY%y{e;Ipnk;in z8jcvPzv-O8TM+-z7r#6YE_rqhn%9sr?^&O=GrZEzXNj~vH-j6eyDFtsLDTaOCvHy*4P1fr}M%bp{(X3BC5G|nls~rnb z+#GOf5dU6ojE-lsyt(yI?wp+ zAB^*R{ignE?X;;NWb6Kw9n-uTW}Y05-{62F7pq$Ak2k8iK?t&PLQi{kUXP>HZ^q7(FCi^j6aSy&McX%_pQE=R9UUF=utILyY=*ujj z>x@?G=bQm2)&72*ynyW?gApG}_?#X1K-scMVxJU+NfJ0FNK|*eW53VD`Ip9-)b`#F zXZ_3`0R&(G?U7Be%c96tSRc-(shlqhZ8>(q`Swj$tlTnL&DYnF3iVCy*LoWyMQKbA zUm4EM=x@kk>NcZUPolz%UGAL>@>whA{)8?x^fy_iv?O}TWaZZTS6kI%xk$|jirUx- z3j^azO{%0zJW%A_2CD`51D`tX!fXU8{C=C##o{wHwda9b=G;8aKi|z3QmhZP?5Q-h zZ7hs@)>SK-xk|6u*U_S0>WK&bCFjCTN0It?KRG|$p3%NJ%58Q#JyxPEb{jMBKOdQD z(AF(j1>H0!w-)(OQ3||CeZcJQLjVuWB3**QqYEiH+yst~Z_+j!w zyk9^s45Q6Y_pT2ln~ox7OhX{Z%C8q(a4)Bl5c$cKht0!<(3_h3)VRA*u_R`-Ty^dc zl>f#x!HwL*t=!zo{l5};l_2MgQn947g68q>#S&jSe6R1e-UH@O1`{}*>U8QAdxDQ9 zxa6ZvI#s!6+~5l?H3)FXJ)MS8pLXE88%ZPkb5mg_OZVzXA>Q-LG9OW$;4=pMJftZe z%`R}2Qv8563DXy%>V5IWb=Z@x4c!1LyGaxMYJb(uQT6IYq%_-==}YS~ZzTjPJkucK=HH0?4A<$0{+61Z-yXv9Ci!xh2+_WXx-;Jh zl121eQI@a4mh?9gY&VMy`L09DL6ww8NTJRDpZE>H3(^3@zfNA($rR>u##bOZ=g3vS zHnX8eTF7?~AQT832#EYVG2S=TK-=rT|Aa>VG*pE~$Vpct;FLxLfD4a`{3$Aez72g|?EGh=4fIX?f3S7dZ&Ai!)_-Q`kOt}Q4(S#p zq`P4VK?S6wYXC(MkS;>SN*?2=kQ2c$Q37AAaPEK}y645nX46OMYH-brQ#Xbc2*q(QM0V^rl9-fDxY z`L)X-@VyoA(VV0>(`Q^6DYUrLQc?@)-Qs%Gp#Fdua_sCpbQUSRAKJveF;)5 zX$gSi>F*K|!IL2V@-fSDVS?p8-4#f8YZ zGOQIGj-p56^l|H$LHV}Sz*#V0W8~M@DyHl9c8@s58@pwfb&rm(ls)LmT4b0cR9Hjj ze^d$L?Q;=$Y)sgYg<3KRvMb&)HKr-_UeE>@xJbI(m-EL&-%8Ej-{D9+FckCSV0&9#BowPFJwZtWfZd91mN!E5M!w)QOA4!^U>wsu)J~gCGaH*nu8ge&QdG2D@4H zJvmVC7!4C6$R${ZS6LtMonV+()<5|c?=Nyj9h=HDv9~Y5&(FH+kONqe`}><2mfmX1 zPn2Ea1_}U3s5JP!L7KWwEf~esyM}ruTcGU5YSo8uifMR8K#E85@CU!=+5JzsZwWvL zR=kI)?_NB;w=H4J!M8Z^=SwlMbk(oPsalo7=F%oDK=HtAk7G-mqFLvcZ{bbJqi6fR zV0+<0EFI3`T_V)nkPFlKKxl_d4fk(-8h=Rd zfQoivR-a5rULrPiiA}xA1=W-7{nX-rNmTwDLZ{s}!Gld|E&-J*Gh*Z8)%F<;WM8a~ zB=h}@%Q}u~>v|)|+$kc@^^;en@Tr|x+5F-W-U?x37xTpj1+72S|6-^qR?xn{$*uCc zIUB>syhHh+{&d|{yL5%1K?$cYfaWk^>=xQJitHG1$D0$Y-nRU2$6$h7cF>&=KxtiW zAI|}7(BAz9U^hcfK!viqOovttPwTT`dBc%tr@W=-hhO)tdChkD9gGi4IxBFA>2|ZRVF6QE|si`&2~{Jf6N)pU*4O?0&Q) z8em7C#a|m8wAbhnu$ujGfYfz8vMvQ?< z&E8(srESu(TGQR39rt&KD-R&x780D%2iiNV z`?^*!1XoM^F0fn}VjUeI;%FXVo4#itp5h$eb!_+vUuHh++a^y1dFG3tUw=j?^~D2! zraB(3OiYJ)=`|H|uth&P)91L^qRN!?`hls!(ZcaOKX)tZOQBtdwUS>!Prf$tH;6<` zIsTce{_~9aoir^IEa54a-pWfk96-+ORUoa-JK>LyN;9D}x0wp+U6Vq6Wk*`E8(X}NF(>V%ksxJtdF=@Co}BUk%f7s}^LY#oU!Zy=g)l#e^)e=gY)W!FDf&FB#Sq%?@wAR`Hfn zFxRDqlcGa`*1q|(dQzU}Av1D)CY$PELZW|B#HF6U)brusyFKta*>Y%*lj;@xGV({g zOJ>07#hd%;psOEVthou7Y=~#*Xyr|=WSEQ|BjvecKDIM|i^tJKaT<&eCb~L8Di{~# z-o;oVFG<}2i9$mR5+CAST}vHHr&v$2nKPm(6ZulluQ@^%tN9Z`F%hwCa7;WuAFKiT zI}wn`{?_8lOe&Tk3Ab+l)`7~WYP;A5U*^n0U*`Mr3`&tzlwoCXZ8^QY5iM*X^bEKd zY0_Hmch&T?0w(>zgl&Bi3wecLEZ)okHGO=p&T(J~_T{Sl_Y;n;knQjL3Rp5b8iRCc2TSV)opZAcFG5aEc=#CWr z-fPuYA*a1qvkpDpK+o(z`FeZwtoW3vYW7O!LC-)=uh@LTG{5mYOvFCt8by((MwOai zqW)NI3I@$llzZ_VY5Qv7ys8bmOa$b&uILFJWl}8&mIe45ZHG*y3-h5(iHu}ePkX1s zv21&~8GjwhLMhs2ng2!92L5e%deAbAH*dZN6Xw0WBgxNM+ygDt;shK*0k1~%f{*(P zjn3pdbol?AY0%M?NwTD7z|b}imKb$5@s9#Jke}W1wR2lE_6)32*hjnNohH*MJxFE&l=>fU$Q*H>}^yv1bcWu-0mlu7!>EQhV){Q9Y%rp=)0%Q zyYN)j@j5apW6YyRX1~!`=@CEwVot1GSwisN34POSO-TgVxLB z3fJF(2fcj~v_Jhh&~q88f4=jVZD{>Trb^?(t==Q@wdHd+JB|sc_r$%N0qL=EqqVRI zbm=j001|<|S?WN)yXLVfJR!D&OvHzXjV6@diSQyG$nfU?+0U|l;A@4iX44gp3i%S1 zdnP&B6lQPIo{lMA*)2YR^9^|n29XA8fbpC75JQjWwgnt{%OHqrFS-O?a&nCqjITC1 zJBPMo%lL=jY4TP=^%&b;-~=g*IEXBl^t3*d#35cKNnY*X)P5~0T>o(~6i^A~UV45p z=uBD6fDzus1*AF@E% z?aO+Eg}>lXN*1Hq1DJ707&w}q2pe~qTps9d#b5Dr-Ro77nWh)vuzUGU_+D+NXO-ypZg!rg7a-2m2#slCRcj%PZO@9n0A7#ahLd zJ}(qpIuOzvf9qU3&gEfK$OkiVTZj+xoGv4%s?SRRd$Zy zS0>9}K7O-Jw%drw&DuQH&?r;lBM+y5RWX}C-tn};A|lT{(pkOLI7v!b;YG97$;pE) zoNskwtZUGa3b4A-qfblC1|HZuEWlzLY0*b!uL2*){XB%~a+4J0WeMI+NiHfTd2@hc z)bO3+{cVlZ;0e*IRF4$=DnrmUt*^KFl$3GICGB~NG;)zCqb z0&Wnwlw|$zfhQ|oxx@NS)ccS&S$^BzcEl36*bH1I|3#dQZrq* z-Gs<+>)RfG&}lJfZ8A3o$*~BHb&*|cFt4$0>L~JJT~bktEUd)%qi;}e z20NCg!;t;Vi^=;VLgbul;1j(Y z_}%zrh{94FL&c5g{)3!Vn`(OZ8q9nc=CgseY)HT^(ekA0hal;M``6PGG9^W;M$69Zr!@R3uevi`+CzXv~y!{+dw4*cL5=%$L?!_$fulztXGV zZ8;M^oPF{z>OS<%GZ){wmRqdA!t%cCSn+-sT&RK@A8Ukw>P3MKDN%M=K!x=c)px0{ z^+F#ui7L(k#c8$IHS{06-fl(`N{5f2jS@p+*4cmsr)-j2j4N{Bryo9mNkoB4Q_!D% z5$n=%QKOEH!7|1`A59)mj8x>p&>RsQ7{P!#Q%=BZo#|+z#Xv(JY|#C>sTGA>)F9uU zf1~f)EC;;+;w5rS=;(Fai!;)hz}exND?hZVszLU3IKe+#yhp3Mu#VsyQ$EyFW+Ve( z4V;gDw=Wgw+@3j89B7kNfCHk?F{IP;aKDd2K+!GTUUaIh)RKGuKocEy+5YZL2iq!# z%PNG}V%>#%B@=sBi*NVS3i(4l)mTtU;_mzc2S<5WIkI_3pZCY*Q?~PvwI{eF(TZyE zEz#aXUY8_`MFbX4^`OwE1636ojcpSAj~*G`#(7r8pBo$-8zO1XEzM2azTO@MN%uJ@ zqu4>NRt+vZ% z$FCdCZ_?~lShE5|FJmxr0%;gOXpD(nsSCaj)7KwAHb-U2&FQ!p72p}U&K$0$h7Eh2 zy^(UeX>;8)8?HUAe>+KFg#j!iU^S*{rU9*ZEpa#4lV? z3?FzEHm8J~jB^Pj6<>c+`};gwx;#xlo1kES^ z9M+Gp&SfBq3_AZBCsltjkdSM~66sr5GifF5_rr=>>gCBx={)5h^MEj3%&G7~LkJ|R zWJJ!>5?$1%LLF}YbA!6igzgiQ#6Oa$@{pg|G$;P^pl6)e^nePi5OUPJO&-!&(y1*g zjbMUv9p8xofnRGqE^NE9gM$|+0@i9(8YLBWy(vYJ+QDi-jC2Ky;BaVb-#wuylr`lW z27llwc7p>nZz12X-uHuK&)TnhlEp;uFH@++FsscwdN#t`ZAfcAn+m+DmeCZ>9qR8g z-Jd?JR)z?jqK;8~$8|^s==~C9@hVSP<5Tmc?DnJV20PO>|GI4jnf4PJOUUvhzqtl}prcV)&Mhn={}Tsk-@C^oD!wV?pV6k9;=Wfq z*;1eFt&GPO6cMs}T9S)))cXZ;r@m&6!{g;l>O2(6721_?H>`{{`$Rrl)I2~8iV^P{ z7B&zv+erY?%T7bo%-$|d1q)Lg++`p-D;a{aM(OPXmI#GspLx>e$Rq`-C5fDr)eLL< zt@M9L+7`-QAq9B`^d*!3w-?|r?Z}=!S$#V2z5@R$2HUc3skpKd87TK>E^a-&&u$YF z$Qtm0M?1^BI5D=cm@SIAxIiYFuogmd5{b4}8lLD4#GE&-ike$kHyCa^%yFz^DJ1-U zVd%wjj1{)qN@=bXt_0+Wcqf`oajJ&SG?!L81H=w-% z_xV0~U1#jGJD&7q66@bTy5<-)&PAUaO1bqU=D>2DT$R1FNj{|UJ)&FCv@CJGuTJ;w z-DMGY^f}#gyB8Y?uU;UNUerg;28b&-$klx09q@bU{J>8`C;8S~Ty+pub->o}CJrpipdF+?R^=5m%M^r-A(DC&k%Rw(UAt8Cd(}{J=dN4R>~Yl+ih4LjJ|v6}Yxz0=`s4zJtL8 z8&~sj!7qLr&GBbZ*7EF7InHppApm+xTAOUZ4I2oI^jCd!xyMfPdd&}cDL7QoG>Fj$ z9>3gg_MMMEIHDc`&u3?AtPH2T#XMHgBc4*ZCJ6oh&b=wQ;CqHY)2WzgP2Uc?@y*y| z?60-mADdmV6lqjL{4?%fL5Snmy&;2G*2Ke?Wm&^LUR&+!opy$We>s5BiZQ$043h<& zrmCl6NJsg*+PmF=5TzC4Dy%j;kfR|P;g>b`7@qliHWJm>J5L4V9M`XIDke%hq6$&d zd*9B|tG-NybpgdRn`H>#=}PdFtV+J(vrk#~yu zQL9Tbsk&xpxm&OAeZ`!(k;Hj>W#L6T_4e$Yeu(3p`|PI5;lH?cR6~vV`hW3Ftn)E) z(4mbL4q|Q}pD@37Yxp|8CSFxghx-1^dO*E({n$^m<1$_IRx@s!x=ZdplVXi;>QeoK z^M1+y%z8aSLA!qN9Tfo^WOWj_=!MO7VIL&J>{npb?>mF%Qb9u5LeJgCp5BoIWMFDr zT5bfb$7f=T319=$uDZ;RC336_cbe;82@5J;KCC=cfRoyt7N9uFBt=#+#65P9Tj3(PL%kLmuJRjvMPinOQ#K$;4Xj(kqX zzWP0#8C{?)Du0WOrc@s*#wlEvCA9(ECW^Zsg|o8u!_g92lX9ojc0b|O7JRO>2)l#1 zY7sx9^Ui=P)e`qN|M_63WU4U_zbGw20>p?2U_)sy|7MtEY?#Hcba4?Bx~?=Vw`0iU zHU3ZTKBXfB2;#2T68nF=JK@Ca> z-lnmBBV4dK{8|60pU^pooUz?afzh8M0#AB;K;nhJG$ zxE_xy%nPGz3>o+Q6|1|uQ?YxnLhH&1Xh(^Lc|3dWbgajWtNrlq!CvR+EsK>{`q;XQ zWH2RYk)4+1Kp?D|%C`vUJOyN%8S=DL#Rt1ZfW?GjWQ1wzgcmL#XV9?*x5WMmhF<4* zJM*vsCSwBce!)TMxH4~5{`uBS*&JG!*I8RwhHBzPVjV3prB*}D)Wli7n=48McsWkU z`F~iQ_v4cWVpz{WjVw*`NZgn#`FqC@NKMeD;SKERz|Wl3+oS5B2M+nB{Y!dc3nbex zgluIpz(wtemXI@D<#rjwO6FT{nR@}h-vSc#Mvcl~o&wg|JizP`Aqnp0uqo|&7Q9@o zVT-4lC{XRX5*BvM!U4PFHxUr!p?9+sjK@1CUt#w3N9=qh*fYKS3bF?SJ7=_gR@mGl z|460B)mmRo6SZN?yFFc)icbFEBQ2ci>_zBW>#nb`HjKu^Jeb4x)KAvzdQY-wtJY8=YUvS}wKDD}JUCUIzD z)AQu2PDe`BORj|-H1d8^f*CZ!zh5rqF7&r_=b(~h4ml_KS`Cl`zG2$5YF$fx`chyd z)`f;XT@+9kn(kBXGvdX^U*jj}=j@i{pQ7reuis%sp_32w5PWF63eql^g>EO4Y3PNp z!5hQf-WE%zlCfRX*jvsy@#|({WW;rw#Vtx9$AM2AM_wdh+k2(D?csiYvuJfcz3=#rV7e1yLtT7*MNbsgScEXm zHMycc)N&ZxW*1FrG_vC>Y|L7$1HVA2KBAhs?2I2aCy{G$YcP?RLm{Bpdz$xmblX|wS6oMutB{$}!4pQFfshDWtPlX$p zs2Z`@_1DxE6C?#z-KK55KsQQM)iHT~@arCJzqr_!{^57y6o##b9JeFWI;U4+`lOPa zxPD_)qsrr;^mgooqP`b8ly> zYh)tWRtBWrMPAcb7k3^R$qNr6(|?KBPUu{IVC|x>cwNN;BLn@tm`)9K1sIq zzVnHXQT%#t?WyCWptaWWE@xEiSc2CQgWi^^IwRh=SqcihYNmpmjTX#nE#YcuT#dLv zUV1g(%krijuJ7|=(Bie)=q{#Ev+(y=oChjz zRiaGF>4E@-5SVqXm++mK4aOlFcmcLUJy+xDu;tE4(b|TdF@p9DAU)C z{U0*D5gu(LA`J}f5K9gV#KnGLFv?)n#<%`feMmm!s6*W4koY7(5Yxcb_%Wtu`;F)Y zDcT@tqmR*Ws*CtTx(c$d&f@6lRvBXT48%~29Nd2BuJ%;|nJU;_9h6ia-s~pD9 zuPAU0U04`b)Ph7X1tjg2sEZps6&v^`n6Y z2gJDs%`efJpQXwOZMVvJ;~PM&P7Oy#Gj3EY%KrQPd!G2Ud&hiD$k%@!*H&1bO7p^D z>L0PP`}_Tf6_DtWe?NsvN(7QbHO;7Mu?*v3mz)adFPz8kT1spD@ed*e9J`HD85XB| zWO*2O7S~1I>}Xroi6jjTJ<%bR5Fc^cYk*e{{|4E;WlVO6^$@;TvizG z*ce+t07ssQa|uD{9Z+t#r)T_D0860{EFZaCkx1u?28e+p$;135!%*fcE*BjP!4UC7 z7M?{+R0@=7i2qK5L4T)~PN5x|b-h{C=KXMt71-4+2d`DJs(2Y-fiZ{s=-}i!7cf zIubSp>q?b!5_LI>)Kr_>+;qBlQQ*|+6;O-m)7Zr+<2`$nw5u^2wo1PmCJK8skwcZ) zm#O@CZ`zuauT1U^Io;|+eWJzzw}wn{8%&Dfc@luQ>lNrP`a5n3790~9$B* z+G(W`@T#8Bh5=dp`lq`Nc&&qz^z0lfrNBl(u2~RTVnpgQ&bw8#s zC>D3c5IiFeMxjluG0TU7+jKNCuAf;?=)2aMci-g3fBB#=wjvR>V8~V>L9ZL}t2s7C z2BEzknVli$q;yv>CR=WSoz7}q-nxsva|+C97Uc2F5*}THwo4*?Eni%Dg zZ_^rUNif^1a>H!JFq)XVxo+lss-bQ5^N;+1-a!QomTom?hgdN2JeicYID5fJDsHtuX-JWC3ep>8c zI9llsJAAJmBkz|dRDvPLPFO%sV$o9t?o1@+8$CZX(~qbSp^f?+?+t#%XPQuzQ&U+| zU;@7o2lzUnVWKmm_48oVsl?1rWm5_WSWijc20xD8@O#}d-qfC>(>DG-+2e*KO2r5HnpNB57*R(**6 zv??i@#InIYgJpr%D=(*GyqFSh1>~BX90P?f6?qKTRxTD7-gg%-`n`bjfVO0UzB5BG zvy&JWvuUPb5R!`t$J|CsXJwX)57q^a}DAIQc?Xzp1H7+&)1vVp$Z)3G7tfi9Fr(n0ovaoOIFyifNbHtXa?KkxsePmzGvL z&2*UO^!v)m0ji1RNk^<>9hs|JZ=Z%DKy~wKLrzU^DRa^$v+G^0gO?^^G~F1EhJtSw zLS9GvtT*CJpr$=1LEzkaQvI3vg~{bqzg``D=5RtmN=&&^2Wg-g*x+XL4Yut$fqhwV z90*!n0-&PSZU-U+D7+JJUmCU!(clvWuR5G$8rZBu1ENw@6(dX&Ce4)Qpa5Xh2s(Vx|y_(5#Qr-cbq{O5U- zz96ORp4t$Uu|py}9da3-yGV0r@u@!u;wSXwEh(4%u4xiC6h+SF$Q7z1Mx$24aN09C zPrTInJgTiul1#5}Rvy9^`FkcspOsIiq!n#0e*BXkiNs}la4m`3V2Q4!1WHCNqjIUV zqxz#%V>EQ9JQ3%^45EchjVu+6*eXm%{p^iJkaRtZB0D3)>4*^@u)ezC*k0kdL7ARv z;Q{yUMvwJabKu-Z?&P3wq58mJGNqGd2t$-pto*Y>xi+k^`R0rVHK#he4L7?x z8tFHoUAb%8Reb-`;QkmV3vM&pP=7o_qR+Fi3hN(bOxnV^)32^>WZRBAnkrjkZ+Yt6 zKcj!)Mo&Qpe24^x)H-zlH(bcw4hA&Am|fv=3ADj0NE(XWrS|I^87Ef{BR9!LY4?}X z$LQkQA$AM>Voq51!@S~RI zit;T3cB*BYOZ-!M1>Lbf0D@&XHBeItp1#-U00$)HF3N^8ev(CO;^PK&LI{Pl0qBUD zWU!yQ@!r3jw&GRCx@5q^w)92!s(2;vMpjJjO*o6K5dw^^;rZ6yD*9!*UJ9KY+G14E zZx3%Sg+3dL&L3z${=Vn`u6cuWX8S>&75O5wh(nNaFo9L%WC`$k!$ktn!Qt%qN2O}N9lQOrC06f+pEFY z2B4^Z57+&2P%{7WDvDuo^E&fvhyOgaY#82XuP>-|pB8c_ztnY~6k>yCybdRHwYc2+ z!o1TkxR}%FJN*7zi1T5#&DA_JwYh7kN)ywsN=H|kyms2QU>u`7#q{L-6o(6b3d$yz82M`}-cS z3nki8@{@K59q{b$}GinF-p6znZ(khe|4hvH;CdLYFO|4OXSB zANa2wsU$K&Lyrv`YW3Ii>B}dEK@d>0MVoqg#RcCAcKYz2@b4vq!DWyN)u9PK7St1> zpPwcfii~XJy90zg(eXc!;#2tk9Ckln6G(aJyE@QY@t?%p;8Rd7`F*7e{(P%|Gxhn+g5&SmUHp+fQncJK){R)f_TG`yxF&{nNw;Z?pvw z62_DdwJ7pceXyNX89**0|P>Nd>z} z(}LPIWESnm;Q0;N){VFhR3iwXEvcL;!7ezf_=+tO8$wpPgQIhDnCM05x3OCUSySUT zh1g^-ln!Pmt0r4+^ok*W4G(gF5laJ0hkCv2`PK!R*?z0(tiQ(s^=qGZxZ2&~oRt?N z8Jn=YqU)2DI(qi+!4SOpea7ptQH?j%Kd3{|$*^e8S&SJszNsO($>0!GmZh2#vWYsw z17lEkOjLH(>cjr!!ClwY_t@@;?A^&E%i z#a~XmI*#EkLqj>K0M#QzMJf@_9?)Dl5wT*E?iO*-rex6U#@x{%3gM!%0E1C!Lcrx zFN#R~CFXAn{~=DF)SW z{1*QH{a2nt*8aLauy`pO#bRPT2tFe2bbRxi1DC+atZ^YHaL$20hNhKe9?|c#WXXJt zJZls&Qhd9M6?}vS-$&5P6%>*?7mn+M4^Z8exNPJjyM66*J$ z6PCPy{v=gHo-Cmuq35=$i)pXOS&-UFiNgacRZ;e4UwRg`IvL0fJ+J@?kuGQHW&eXt z7wIDgRs8~LVeK9@O&o-Udw_Ax6Xdev-H?Jf%%?)lFKTCXGNCXjsRwI8LVEC$2!FPz zD9#`8I1OM`g!t6VG5maYE|_DSss~(E@#x=ZybC?-z<6&1stIBv2$+CrRflM=;ZwG) zuO&PGp1i&+J|8#XKELfa+&RP5!z-jv<%>6C{LhTsEVvA#GlchO6$CAtYgBOgi6@X< zH@Ysxas}i?L1<8S!i>7H2n#I$#=5gOYMi}FQb;44{#TLa0n^Rth{^9CKgrZRl_N!s z2$T(!JqxhM{>(t!1b9B#5XFQ|(2I;DAp;-A3DXNj>_-8_>#&l!B{lQx*0(_|UtC-j znJF0g(KrnYC$fNz@`YAb&flTp2rOu#KQJPk*g#UE|D^I&P(dsrG58>5RP2Mz7l?L~ zs5l`}NNAK2k%pqLYwC;z58{X@dsbA1MIQ#@+uC` z_+V?v6$ErC#Ju^sVJ;n$Zd7y3-qPhkHNdB^gsT^qI953a)Jt~mQ3Q0z1TjV9lVT8) zA9-SC*!9wXA?Q}ZV5OsFLKFhhPxJQ;-ArK)DeL5Fv+l8@qc9>MJUW(dUWxA!K-e+= z1EgTSyV-0jtiEdqaFGo16o?7?FIQX1eaTvl6zyI7*?2Cuvh)zyDE^NP`ZI>sDpV@) zdvF;mc|B#Rf;9r8jtX%&UmX*4r4nay*(v7c_trvvB2BE#kS?WV6S;cT?|GWp9{jew z3WU&|7xi0H3ULo~y=`B427=znS{RY|M#D98)WI(FTWmsk^f2qh-#oJ-6f@(F1RGt3 z33Y}4$_jJKjPMYl7A+>b*ZefCt@hSeC+=->{W~^VJV(}LW1d=+a_A$YL-N)PZ4lSa z>o1y`R=;HGVLI5Ysg4Iqm+OuXd*TmejVCW}hbQH?K^MJ*>#n;uOHbwRs1%G?F+fI> z)(lN};_TR4Bx7vN{9;kdg&RJQR92mts#Fc5sEREyZu7X(_m>!1A91&2 zvx&!Lm^X4}1uo56DDC`;oO@>q1ZS$Huz4eVNWEw?AycB$hq#y=&Rw>2gI>ylc3PDX zwKZwVr%;;DrrGY}>r9sx4JK5{2plP*(f< z%qEYe&#>66WsWFAfSJzN<%+yRlMCM)D;XLiTd0gHDzK=g3&pAu-D z?Z$VsS7AhIB`&yP0VpCJ>%TD`e&B#?K6+>BHd$C}hw>4o^uB-Nc0i&kWDy4&=a29+ zBcXRSIyIb429(-(G+}e@KwlTaHgT5>Qxu|Ms^6 znYx)d@XI>e91`Vpuf6S(7ap1#E z{BiEzaBI(|pQ-aUs0SUP-PI%WyVj>NB9Ekr6*9hF2YQ7|HJ{Is8_=B-`0J$;lnI5# z@ZcVB_xmNPM_+rH2F7W=+*YNZS5}=QXMEtjq%%lHDgo>2RWQTL8~k3xN?jW=_fL?) zWO8xkZAfF!a=J~-?#;@wS!Gk#YY5WmaovE$e`nrc(o%G_nuUC)w2S z`szQW!ng3PHW&3Nx9)<^vT~-h^UeA~X{=Omb=dMK$xh*ftSP>jw9I>L*?{DaT{U_q zf7qkUT&%u+T4mh9FVMd4R`-CgR&nL{Qzqq!skf|g4^j_$o@I#22^f~p26HwU)LUKX zp-}K;xY4ZsVhF#c_7X|a^AW+oOfzX0w;ijMR!>4+H~Z{kNvmbc;Ee0-%J*N}pvdM| zkqsnjW6--h#6L%FBFgGN8pUB041tY>#;(u+1C0vZtJ3YO!C&2i^^ixFFiFyp1I z!iD+TbG#*NTn=uj;R>3dnSl1Qv&}M$|Fyy1o0)BNJeEl@V=In)B;Mi`1~w8#7m&#M zn&rL>;>D72^uMfeSmAMb37ri)OUNC&Y&9Wo3;0*S<%*uDeqtR2APdJ{Eb9=SfBagn z2$q@!B0w5$I1M`&fMEBe&VzC86XCq!wB)0Xj}Wp4yxS_KQTTJqs_EGVJ{E!W&uS~{ zFx$J>*~AyjZ~~w?56Y}KKu$RHN-s5?g7HiKIZ5sKHs~8fuQYhKf0X57&wa5_E1WlC+JR0@WDSiNNVfe>2gAQS_I*vjO@swxab{JjI z0nf$2r>7RUEghM88kO}%7@WkU-`h1}#S8x!8CW+m#$y?M4wnsG{XIQF&qRXY5y67> zey76Fv;3xO%vemMaY*TF+-&BO>T=%99Hs0oR$5k~_bJ-`))@Q5UKmgr4?;&jY~q`} za#36}g?N)QilCV{O)1d*-h0v}bwN}qtX!sKDSb>p_(d~TM2|GW6X{pW}a@20j#R^XGG$*NFM zCMU>~O!q8@(=>}91X>eK=dBdq*gCCl*p&q6%ci1bE$NX3@_^iAr+jz&4bjOu)uZJQ zI&FBP2T%G4v8*OWi7x#WXFqV7?6lEW2awQgsSGrPV5AdF!{OED1$$#ijit0^ULdiU z(1w-YM5_(-{%kO$@4ma{_VQx+N4@v2aVGi4(=#0mOa9gd)l>4&BX`a4FjTFfVTM8w zq-)sj=~QT^32ZzU83MV1)P3N!BCpVsIdy7Um%vaQZUJ}Wj#e4Qm!x(^p5<9A3cNT< zbGRQRJ+UQ^RNxHo&^12qx6WhyjD{9A8umbGR0Y^<9)LPJhu!$G4Ory($ALrf1XE5~ z$;e7oEVrz*g+a7J|Hkap6x55ePai?KfC3|Zq%2<=NDv~MJNkA*G(Gwh`6@5M#!+Nz2pjv=% znchX+CvHMsr(^o#OxF#(Bbcvli6Zkij)grPi=R2B)jU};1aOCo;oc2;#*ItgV4(`u zp@<320S{9vV?AXG1u#JTHO^9=vKnN#>^6Zs9cu0&m?`RhJO2JsS(*5Jc2P&1_H zXYB~GaTo2p=|#|n5_f8x)S3P#j7s#JA`)CCF!Z=_RP|rA@Osl$)8e0+Y7vmGyi}|IVoq%a82D=50VQ%Kz%(&vc@u#>QhJ)NJL8ps5X!-ao~ik!j^6RgZ^Y zjcOZmn#z;qauU*(1@#Lm)a2LLmA+3E@aMK_;T#OLKCd^KLrh5VjDt4!&5y=GFv5Ck zH7qAoL~O9dKu=C0vii+Geb1)v9Wz>4@~*izBcyPJ=~)%|dpofx`$LY0-!q-5YGUHr zJvjsxQzqW?ts{8YFYwipCDZ;9?r2wY@YDP=e^wkQF#OZ#x%StIvbw_=gF~bpw>s}ykY?5NH0LVcGH^553t&71o2)fCHo5#=|6SMTB^b01 z_|}hWE?P5sK|}?nI(uZXDt&kJUh7o+DAp}vG|Hc`syJ>V2DI8U;6J0ZJ2LUE`P;o8 z6CHvMNgW$En;%z=>j*z(Rh=RbQrI6=$t%VmJx|0>?);BC|W zJm$W0!Os^o0sHy4(ri=|jk#mr7whnyPz`q>*1^+Oes`MNympmUqp4I~wwYKVhg=yk zcqvW4{07jDDkj;C^CLI@Z3j6eZV{2tgs~6-NR&&2eU5AZ4Ngu#+YJQVWE%Z0S%l%K z8om`PC=cX)Z8R`|AOJQ}wI=X9*H_WysYe1c5@G^yB3v_CEDDv+06YCZpM=A+Qza!J zjsMu!Xi_}HYE)?8GjfFgrn<_|&wOk)N=x;=|ewkqNf4Y z=tJD9hD8L{9^ zjZg&pKLIF+6ynECP5g6(M?qx^0A$B2s))%=3jOrl49BmY`qB_F_}X6i>7c6#&&gzX zRmH4$!NuH&j8MuS?9^u@g?i)W3WWA6WFPcnS7bE$!Fug;f25?(Z(d6Kos^GHkZg_k z5WR5vGi{g_KN%IYo0w|r%^k@n%_E^SG#YM4Z;Rg@h>=R?xFJvY@t9jL{v16*dHXZ+ zYM2ALmEj1jR<`kJK=_WKVKNhTV3!(&@iIa(A-1y6TG)R39vhDyZWjv$;ZYppxh-_{1XJ(fa?|UHzNxctFUdm-D;`+zAtb1d>?OshzMDyit zV3EW_BOOvBX^@0o2GYA>Q~e#;Yd(;ea$&elKvhj?hAfOrqL~CWaKpVw<~ql-T4(^; zbR(g?9RX~-rAlC3tG1#<;x40c@7)OhtVhQ-R52qd5Ur^VD#?UKBW7{OJjX!@WpHTn zN~4V3cf}YTXDf#oLx@+|{p()6IXH7vP-I`IQg}eA(5mx}a`j3}nJ&-Sxfvbc`Uwpi zc*JcrwUBH`fF3Zn*f9XdGPJBoxSl>W*PFT9lo!|BZ{Aymuc*B*LQtaLyA6y83;!TD z*|RkfKLdbupW%@pbcyWJNf~2J_e&L~_&w)IpFY3}3yPuj5xAG4+>PmbqQnPM32)N? zAfF?8P-LeI*9G#c*nm^8v}&ITaYa*P+*ld>nK$>8ik|{|tyh0iiou&4AgH#Kx)b0- zkF9_uy_&_O%g%=Jh*vKOb{YSTOT94Kht638Z-&+0i}t!AsQhP~@S~$Lbs26%G3H%U zFIfSY73uhtwU--5Sihchq{4sDbrw!hMi0M#c3HZ+OBPT-K#`V`ZUpICTDqib0RfQ) z32CIeLl9V68kATXC8fLL?r+}v&fNSLcIG+f?6*GK21be_tci?D3jU^@!sfVI!!t{Z z>RCr(HN`Yk3nx0}G+-%r2BCo01gqsFm>;+YQPk7xED&Ouoj@s~k>^YUhUPaj)(qTC7uRsRUWH06fpdQ}zyfbm3G3m|6JsI5VPC!i$=S|x1tF~KwM7Yg|f84Ot ztu$Os2=Ubux%|ua%P^`6Tpsi@7olFaCfSaOZTp2skQ}3$*%kUQm zqMC9K?cKSu^-4hnc^ieq34im?x7S5Q;f}#>gk|R(K98c6iBVQs8I2*$2_ge58sub~7GbL5LB+T}E9@d*2>-@zheu?grhX5UOhhs&HqVHIp!ICu-+CtqZAsNMf66;8}8dunP~DO<>j zBo*e&*fpFCe76P-@umH$uq!zietl?>G9Hy&*`8-d?5-+kv$g+tjmJS_w16gY^wyBs z_E=rwR#;ToT$O;Rva)=E6zBXq-PX9w2Hhnlcxveqwe@lr(4FYEQXp2J!c;AS=&1gz z!@E|~RzH0p>Wzq{6NcSh9@cg_e);=eYKMBEMIGY*+xKor(4=nV+AGLxK+RNs5F;6H z2K1t%MdPm|IeXn1b~roF3px4kSFH@MjD{8GDE_a~^?zOGN3&C450>BVy&z|F3jtP# z<-&i=O!1cWQIi{Pb@10)C>7KAYDTdmlTw3)G>vLqygWB%x{%dArQvCYFG$q`sjI1LN5!Vc4xV`iDb^8s|NkN0A$|cD(@LLj)V4zVEFfcp2l{j zAn+sq+j`0%{jV15Rt(i9Js*B-YiK^@iA(KYUnR}L*+5&BZ z<%i+IL-XuuGr&*Xryt>;k*~}th5aH|V|u}?%ui=NLFa$W&~G_Oo@h@x);bM=uM$WO z_UxY2H22Mmi1<`lYgs`3T>t?XxCEPY?R-3r4{iV2 z!>tcqDI98f{!XrAwYSP zXR_<(g8F`+yvA|1{+AaqTio-w163OC)dOhb(`D-&2{KiN@aO#Yy49Va{L@!31XlC} z&MJLZOV~3`4f(6$pu33uoV!iuh30HAi^qk-J8``7x3|ga{@sCpD1*8l_7Cr9Zx=00 z>l7`QGeos#4Wb_qN`r2Bb8tA31N-|xc|Zu>nRkq%7lHWh@K*`MkcL-Lr%|80W`Dp* z^z-Vy?1KxD#zjYVmZ#+kh_-O^5Leo!aw_E23yb`w#CrXL1y=pnjlAjz(r3PUh`uKP z?|BI>fRH75b@FO?KQ1eMxok75Z;;b;dng}oo%ge3|{oc6|PJYm7KD?yd z1Rdz##red})#!{1sKI41^Ah3ZJ#bL(NmE2u8r;}KT2M%V&^zKLiMY~zmRL$QNFFb( zzkqm78-v1K1yB(a?+b0Y8V^BhuED=<%k4b=ik>Q0NN5zEv?>7{y|L;FPcUc?ho1c$ zp-TLwZO?LCoF79o44qw<%7lNQ`!VYZJuega!M!ljSWe-rgNTOVVe)~0ma;(#gPD^5 zF_}YnlRx}LaGbwVo;qQWjWIjhENDzrsP{SofLA9stfSZuBjCfmHEi$K+mU z5C`w5vr{o*{q)vo?s3zW@{0Rn)AjvJpKw}#A zUl|%TeW)K_h52?vAG|04@BU4*ZJnuRyvdJ~dpOv1JQ2=z!_9iS?#<^Omr1dRvbq(Xk0UPVG_IpS-#y|6&M3aD#zrUOCSX2etOkLwPTg4eo0e>b22^}xzL3`Rk$f=q%&`g*5cfM z+2IgYKwcYs3a@>6h6CGMjL8t-du_*m!R<|WaPnA@-x@foKPq*XVuyZfgc?+&*>0cQ zVv7ri9+H>&XWU3AAsn~Fcst{Maiu)4*Cj83LVO(7j!Rq@h+WepcK&?hb#c+9F;*}5 z1f)1(EN*%jiR0=pWOD;wk(WR|kf}+Z-Qg)owjT&bKgNz!u{OpP%v%Y^A zQZp@r42|Ln2>N6lGHUVK>l#0~Sq*609|$@a*jv4i5K^Bp5%&Dmah7be*Qcw-S4>_1 zTrB#SO}Yi^^o6>SBYg0iYwsmF?Fa z<7ahN*&gk+iM7@*(LB5C4&~?Dl3bT}*2=8wfB)txN8goYl>=Pz^7_iEFj#qW!j*Oq zm#rn}pod}hLCbrz`OQ}`%A2+(ny;{UzVFewox z8NY-(B4guD^do~Ir>QH0De$J zsQ4iPK+tzgg5iQSe>Dj%mpQkO6w*!rD#j);c0N^spCq$a89J|VXgcuQgB?b@d%2$E zaBaT92NK_>U>ZJe`#442D2Hn-AojJEuUKxs`&u!r)4cP8@a^nZgV;}>3Z)B*Ot^^A zO?okO8aN@fS@i5gX_9d;{W$h0Vi~4-uwldzLl<8fH4s!>P1n3->eFpa5m8axiB*pV zm|4Zrl;ZN*g4WEKZOn#elsC2UsNkB@A*5+UfB!H$R1yC@7rlN?t&8#*a46C=A?xh9 zVuZOf6N~RJ>4hyX7yS?ydrNV9=Ip_Hb;gB(%UbGUj?S6F&aB{ubBGE3;~C(D_3Ks` zT~~*~Y!_Ta>4&AUtWH0U39}*rDiu#uFTO3+phgjetfW)bkbnQG?(Wz%1hgFlqx<(7 zWHi04Pl5C7519uIT5Up|ZTOlr7}$R5)y3T-Zyi@n^zveOivTgPr`&CFM_Wx6k7}mx zmQ)m5UiiO+H|$=)Lm6*=NYEpLJQVls59a|*q1C^u8ZGP>mjoAos1RzHUq1%5o@RBbL10?k`Qz;G;CgLk^1_$%`=F*#ogaXJm6>Klps4FJ0B*j5*{C_?>^)2En0OR z8$Pm}7phyBT5=@(;Vuud0JzckM*GHD?aO+@(t2y>#+qxc7ZsRs;vuc%J6Zt}f1@5U zk#bqwms8nyp`p)-%U?M(<=d?Pj?3a#T3W-RBPruc(r47R$m z^!SkB41tr^sLK_HUQB+y{z0I!3=5hQ1kWxL<9x86A0m7jEVaf8SR8Ng7GIY>si)D? zfbM>w#9rlj5m-8@Hep)PX9eWdNs1~O;HG`98$G}^GFTQ}?h1JEgM6e|sZ}{Zunt2I zyiRsqX)umXlE3pWv|!()+E6odLUD;SjG0B90x|QnnuDUz1Q)TG9x0&in3L~?H`TeZ zl#%_(v-9@?$!zAC!ZtB^Cu85ax>)}0-V6d;#@k?M zdNZ7>QTDokZiA@Rp~hn3#^9#1y)m?1FSxdc>X|D)0`6~e-17j;ie~JjhMLXF} z53lM|6UmRw=@UA7#NI5q+|kUslVyxbc%#<#&*{)87-d zN=QvoIKs;%w$rI?M#d}bjLWG9FJA@GRnP&j85rAtW{331kh%Q28n*gXbDnn-Z~(UH zP5V%iur`{cqG2}Tb&iLmAQ5!032r1jPsB>xP9=$J)r;i1s`eoX_iz+5^sR{TiDy2} zZEBu%!@MyXd=trQzw5I1*eFF*E$V&Myo?H?3CJntC_O)-Z$j+DS9=eg0v+ommY)*d zoA{r5wJ84k^S@o({|TRI;`*=XuZp1|6peH;KA^YbOg**-9(-=U9&ye1A!K*{d>>LK z4Oi*v!7<&`O;xshOxjR~tyj|lTtqWn&z8`L%De-^n53y)6wiYc6`SX$DzuCHqrtB5 zPLG)tHd})Nx6(omIXeM)7h+GQ>uVj!U)-}m8-ZNUy7!=34vp)Pmk+Uyc|xmGm>OJk z@ToG%M1Cs(whHMft7Z##POv9lrDYW%A9TT&R~!KwM4?w4@k^y~xJ7UnH-}^%PG{Kc z|D)`DpLCj9+0N;_Pmd!o*7M|R9>MHuyj3f(vcA334gi5WN=fU&Xs5`?NXmbqWigkO ztX7$~JrT4<|Ll!0P4fYq`f?o)-q&L?_;lX)sy}?$e^TFB+L$!VqAB|v8D+yW|9H>% zh&1s%e!9x{^7qdJCATRlQ}n`VFsAg7(b^M_p_BxdcPb?dP3Meteuq9&=`_uJf$xpN za$j_97k+=y=6&2^+!+`xc|sTchrTIbSbAo`BhEv>v9|w4fcHZL3brM4 zo0q;^w71Z${zjnRV0^jPT;5}Gff;l*AQ01QD#pJ=fhC|LhW%#kohmVco9IcmB&5(e zY$SBLed%RFJ#^OZ35b{ff&=OMHMj&-6!cz}a$&+tHL0qn0LNASW))qWbiO=T!e#NA zHtBEy^*iy6tOu5Fr5D56W7O2<>CS)lDOAY|)WT&N;42vSa-WS~{ypb)c?pJp=ez<0 zN4loTLrTa28286(QTrIbD#LrU9)Sf(P4w#W{(_KV<{?H9-|Wg1L^C8JTWi#CcI`}{ z&FDP@Clgf8SS%u+0l~7t|A|jL3M=-QTP4~&)3W92C2Z3_FP0z;Gy>}lih>CsBxMZ6 z;0`eFSu>m@R!aFUBL2rL=2%areY^vw+oZmv^im3^6#!eOFQpdy97Nq3NZtR~vQYM& zyscT_4`me8YMmaKsUW*mcd(d-j2#<^aTyaHT_H5wBG)@>iF2#=NbJ61l-k*p-T1A? zDhqr#7dg4(z4vWGB2pYj0+9ERJmHrSCm|WjY|rj-@5s{_4~X})|9*tK31;sx7_S#gY=rN>fu%y+{{+@OC$nOX;HA>a0G}z3=;OayN{1U(DaQK%FOZtIj zG4!>8mtXkHD(YkI@S5=E$HeuLjnTNBt0D29hjdD+1P6$iOxV9(=y0U>Eau2O35r}% zTK+x#8Pw8lU%YhJFMGT5c+B2}6EA`U0Y-R(fnZg7|KBYsqHF0NE?IeRI2oBIQ+?h9 zoBZ-Wz7;`32B>|3dt%fupiwNvfcEYw{)itsn({O_|81p;vbvyDBKVq~enc~XALB8D zizcMp?tMtExKJ-KjO+c^plqsX91(8qmbd-V_H=eT^o8CevVwXe;@gxZsu$f?resh; zQMYLaX=@K}VCg1?8fJ{|hHmPhV+e+)(wF83g*(NH++J?DZ|$)MJy1HP@t>*Zc#m*L z$7l=(Mr?1fAty z#09S9=wa?QAJfrt#2Q=2^<~+oQRg(&L-(>?Ux#N&1Wk?^|FG7I`oce`X&#!0tvWiI z?avqWX!WP2_N)GdqZMi=_tI<6^kK7le(T|^90ddG>#WUM61(_wdk_X%go&ot>txf_ zYw^%Gx>vV<&Jk#%Ji5#dh2lExvpfUon^GZ6%ECk+=YHtr8XHz@fT$2{n6_rH@!MtA zZ>v?SSq&0%;jD(Akb=#p^y2=@o9Chd-2V2f=BR6~KsxtJzNL@gG+nGr__;3toCW*n0XG$Mjzozi+KNEp!CYTG;&R zc<6Q&^>h7n9_Eq$63@X~PhKNx^-yHGW9Ff6An79a3f(4u&9s5d$IVb-WU9|TunL6u z5%j$DneT58AZ;AWut);8qIvY4_MnvDa+ep}v9fXo{c53`$~HfU6Utf1%aHN*`S$jo z+c@~cM-Ue}CQ87z3SO5Sw}NBY@arPjy+NasE9TQN(5vwD@-v^Ba%B`;=yWz$ot>LG!hlK zzE=gQgBd@&rv516kRtlEW1pm@L(N!A*Vxk3WL1)YIgZB$YkavCJm(NgWBYS(v!q=+ z{)dAF3m!k%l)W0+SYA(;W|05gBt!=XrR{33nt1(H;3sZ56Ef{-=0gM787K&!vBm_n<^Aez8GnIx^?7iG8 zQu?|(d^wLMFK%sk6HBz07rXmyCjkdmKHr!(yWbt=+$GNNhJ&U_>M=k7>6LTtbx@b$ zPst_OrBmZ)JRrtIz|wLZ zWx$5|4%&}#;YwD|0vDYq!EeC)6f)r@W<<4~XHmo1aiGE*W=0QCc+1gBz9E+^k6^Ad z$mmG{5Hj*n076g>1ka&=LxWmOY8lqAZ{#>xPlTI%ZTyq5wRe)N)datDZ+iQ7`eikb zK8s2Y#-J?ep0Tk(jXc3#Eg_w7w54~K8cB=z!_G5HCNtum#b+DwG#^VHr|NVBLPE|x zOfb_5Fl>A<`J6wGSAO-2DQ$hhTb?U$Nc2T_Lu#&86NDHMoqn@X%%y=>QvlDZOFC`J zLTq7rF*YnOGl&K_{tUN8nW^vn{3vmZWK8wb4K;_IAEyFdf&Z zFMxPen}e2R}S#2dUx0%t_sw$URN{ zq}v)b`09dv{#3Z+1Q2HwckMDcpS=ZPca}f;j{`?pbp7dDEu(0x1lv`(o_-%MMAW`g z{UgY^+-)~b8Cn{nH2sZrMBY^`oV^gB2aDUGd735}G(I0QXzv+w`_2bPGK!y4!rEts zr6@G~QWqzK;`z)Q zk_`4<+YWoYt1CjFW`=k0NrYu8e%(LfXuLp9I3hh_44~jN#)C~iZkSB0i2?`{G`&H6 zk&eXgB-q=HrIdO*{0Np8o1D`t$Rpgt z19X!;#oW(Y0*K}I#%>vEg6}c+UqBNXgYK>F^l}=@zko9;t!6kuol2Itj;jWLyAt!+ zm{3+ghUBQngfTY2YKZ9czgv@V=se~L98ULite-#PH_XnKp|%)vGHG*~b;etm=A7N% zqos7jI!}>J#C?a8_s;%ZpI?dxTye&i74JVNI15VTN~%8Q#lf;g=V|q0vZ_(L?c<-z zhlwJm=mp|}Y)0g~?WJcud-x0gjXizT)=^aSwEn{UoYO5R&r36?lZib#-~{6GoFgs8R-R_PdFNF;U$uRSA@S$C`-(Imbk*w|3aapt#r~2HorrDs z`904ERekYLP<3hrtXdSC1dl3%pQC@zs0pN0ge=t9YAMIACp>F=<+~)O80aobns`36 z@i`SX2vg^2>z-eae0Cro!ZS%=LaTn6G*38GL}PL14j@VA-njL&K3fynuRaofCb-u8 zZnNlTc4vZsrGm=bAcH&muAAuW90Ivt1XYRctrbc3mvk+xZI8GoT?zg%C9BYl1Gp%B zD0fR>CDAUN|An{q16MWy47fPTH&25A8{6i#ZiAa6qekYJIx!RJ1hB>o`77`YFW-T18ucs=_1uP3StUe2BEhYsC69BhwWR)Hh9G2XCsMAT!co&#o&Isj9ZvAAsy3uR4z z#YVh9YP5GFPKo3r8Oj?^LmO5QYXVdp_dmvf);zs^XPm}#04x9vTZG$&wsg<{`sr!7 zFV@5~XDJ*Im~1DKxfJ%iU66yjyI(Z-=O8fgwbdIm=vAsW^lgS|;$(ugd3cl)6&HSe zZm+g4UuyQiw8*)Z2lqee>fhuJ*@0}cW1Yph^GNv-1DVe?E>pkHNuJ0tY^nyY+KYqW zit$A5TqN3=AfpYBZ+}V{R1ALS(du|TW{z4a61e8RUA2(>n(0G*xDo&eMN{P{fsN@{ zi40mmy?9U4ZZl+*bOQd6bV2OU(9#CLb?7sHNg)pupdaYyIDiuo{0XnF+r-#nW$`$t zfV$W|D+$4-4#j@|RgLj?H}t`wXZ<9aMaHOyfdC8Is4IkP?e~elP7XUA9YWW*e#gDl z(y7H0(|}&=gqZPb=ZBY}QcUA9_W5UCq34&5^$bXBaIqMi3$|ik7}4e{V?zw=eIx6Z zL+PZ%WBV$?Vq`U>q;H;JQ7xxN&!bLy*X4-oa{Jh|4 z7rg2}o>z78SeF8_3`sTlR`QZA`Dk60Giono>@7q)Qclp};yaix-qH)XJkyDxox#0S zG&5R?tUYcA!~5Rh+uJ3Q5w{xVk8VkRkdC~OJo$KgBr04gdR4{mMK48h?NGblP{nCG z)Ma|j8)&UOHB!X3GhND@VfwXMC1F?VUwEt4L82g`%I`*g#SRIAE&yX7mOvSfW(rnU z`l7Cv&Y^Gdy~|T;_%fUR(!eIpuK4dxg1p>VZ<4@gu>V|jw0aD3YNQ2?FRbVr8X}W3h*D*KBbpn7xTND~SJ?M$ARHzUcthq~%6(EF}5tJW}rZAYci~s@! zftqTTgOd<8#Ag7Oh)e-K3@A>NccB(TZMd4t*FPpcjqeR}Eh%`dT0su7gc^)=yHTt) zMz8ZdlXxmwW79o8h{>A!P1%$E;N7X@TBNv}>EBtRfJ0*d7L+O}&N|=FFyHsb>~?Q9 zyMb@kd4Bxi?@CjW^z0r_woiyoS(WD6FC z!SCs3TaFTev$#|hgqn6h^W^Cr0c0My6FC*U$yxv*1{>Q(|F+<)>38$NJEDi5ymtZM z!%e921pyV}d9;4hZc||~W?!DQ8#Zt?$G(}7V9ie4lR7jC@Ul;cXqf}t%&IFBiyyl& z-L`?Q=}KrL0BpVG$06|0B6PlSub)oxEC7xcxGet-|Nn2w*>3P54KR}$SW7^T=V|+{ z?)+TL-_x#NP2%BshEPz_6^+L*fZeN6d9sZ2(wZMm-7PZ1#v93V>4h?{7U#)S2wn)F zFM~C5f&5h9_wC)YFo2wBoqX^j(xY%b5kl)$^Zd*&d3WTK*iMi#$@7lX+;^}AC9ud( zxz7pTF#z`wSui)b<>*d<>T8f8COjWkqn0tbF(upzOr;>Gq?88t)^blJkD{N-AMPnC zL1s`=f@VtQWOKhsu2z)Lw2aLp-a|w%F;YXA2xPo#OoFKa7}#0LGs)fk4i*jov!AG{ zap-jP2%D^%YVCC8x#tDr#LOtGUK76m{K3GG;;4Ph+6IdtHD35*pFUICf{JRBZ&l~V zGt=b2w~$Y~_**Gt&?b%C7+5?a3~>Zv(52JJlZVfXfYtYnvo72v9tHO!f;Md`Y__tX(Q^-> zm-9RUsDqn>vDWmqG02uri@)uXiDWzrZOog42vQDuW!&aA!`<%l8McwxsNwECfDm8! zelBju>=Yp;r8+ADR)x6N*OaH)C-K7NEB$icGu|Yy%(OS7F&Xf~6STi9mO33i**@g@ zyq-@Cb)#^0iy};t{DOIeJ>(NVN?Yj%c%kW4++-d1Fi7ILg#?aVi9b>W>tee00-iul zepeuwViCg!x8&A=#Cr;@e$Z;|H?iw;`-{8d;gnqz=#!T}m5&@cN-4iT>RzppdL!bV zTC4MM0iP-_P)9=hoqVEHspLH!NU=6ptf7p?74nRns0z)$oVdY=IN#`HU?K;r853@3 zel}B6=rqHRj<&DP*d-CfhmD#n#4jM5q&|k#OI)ZQpC7-vTjs6U-s0bH^aY0!i{kIa z@UuKW_)k^2(c{->7|e@+U^>9FVzClqLVJDCpo19y+de1qE84C>yi;{Y5?cO*<%(|q z$``uPk~VqdUng&?4VQ>5`4mkwoIwGhpd;3h!HOqP=F|g%U?qJ5jPjkZ^{fA z$x9oO-`t}+KCnbho1WxRNb-V`%*jNgQ`QtY^i?)rPa02S^86|mP82+Wuf!l_g0|W{ zTaZ^}JPI9{@L?k#0&2sZH!rK?tZRfT3S2{eCw4hgVlCoLjJ^15PEt=pCR}9q8^=XE z$661ijI?e_S-!-S6abXUu1h7GsOqH!tL8_LA^ST^&SJ))DVXsok?4sPc6XD!!N%te z%3Urtj3P`_d#n>G1Lt?7m*rrO?p^bz7-jF$B2bCt$A&5!J~X|7_;*EIWu;|}(0RVC zHB24?Enlg+PF7?GydC`026oTgv-j)jLgYxrqLStON+`I~cW?IJ0RG53U0e;yw$3gK zQ9V{(ffC{?y2N`UH{F2Br=qh(Kkx=&w>0c7&*-$8OvC7RuvOq)`jU+2uCyEsOH-+i z&U+1V(|twNp%rdNv(!~Q&~GxgVS~3R3zsNp-0?S`)Le_n!``_%(1Qo*6_o<-!~1=# z-K8c)uK-Pf#CP>*2&5Q$hUk@P;TVsH_5jZd2|0zdycOJN+mvf^_zKAeA<+whH3aHC zsXB4^^Te|eB`_E>AkQE33xtDo6QoTG7G+#Jx;zC~YTMT0J+!2?8l83Hf2J|zycP-7t542_;}t_rP*71x2A<6 z2+Yw<{mUUAfVcbv()9DF{;B)bJ^*|1ad$2QRSFE4YZTrpx2}_aH;l$5(yG{-tCc%Se)rWASkN&$)8fWr1K&;#uxqm$#{`pSF4 z4vmi9q}33{llvm7g^E1;-Q@A$c(OdD_`Q3IUsx*;>KWZF`$d#dQv932 zfvxuN7lDi!_FL?dI*E6Nm^Y5&u~`a=TA$aut)P7idoF%SVy`D`78d3!Uh&&HKLG{o z?_uu@>pC7jEY-jX^$bk~D<(V(YH8<`nRHcXi&H@GV=*w?@GS#q;e-B%T5auN0m%%_ zXe>n{T57}r;|d1QaXYfSJ=`6>S2@1;fMrk&Zr$ZnKKuR0wFNz{#A7JSCNaKu-FE|3 zLjA2WBxMQ_Oe zXW3>W!Zdj`zH?QDe6wQ~*!*EIUl5RAAYz=77#fo4tMGdA4!4ji(Zf2RBFu*&trhiv z1a#2BFScl@pXhJS5)1`9*xQCzs{BQFciyGY+kaYPnP6%v8{VGXWTmYny&HGaqhE`V^rVaa7@8wY(iQT8Hbf@xEeocI@bTQo3VBcKh)q4LN9eyZ& z^IKQz{~Rs(ZzFlNIs$!}9N4P`Ke%5M4I7gvK#y{fE7R_&`**V@4Slsf>)%BjU#w~~ zWEv7D!zDi!Akrbe97wuxnOBhi&H}K)v;GtUGIxIu%s9}61tCe&uMNjBbJY&9yM!K& zeyWo=`_KYZ0uBU#fHLguBtK@<`G4fuFErrFL}>!*Q%Dm4Pc?g-dCT}*8n92E_X()+ zyfJvM4tkJY`ev$Jiw$R)#fjIc&Ex#d%Mt%9%Yp6bC;zW$}!eV>}<-G{NdzQffezSG++W4Ph}_lCMLihdLS#S8%WJ>>=? zkiNwKIyO@D#-QhGYeqPajbz(myj$!G+#s~0@ zqI!LjqB(BZ?~%gb7uw_AVWfm{YICtme*3e5_p^J>_g)efo#(rq_BoG>q%-*Wr$>31 zpyqd?uW5#j<$fDXtvl=eS#_;%YUKld8*&8zUgu#0%pXi3&+;#7Bv(6L~T zV7ZqvK(mn@kJ+sp)8K#tBuK1C6bTF=`69Sm9;_UJtLx5RTKh+HmN+#f{UoV&j z@iUaM&EXu#!&#)}hvPpx{WYR3>UBY-NWF%9>z10w)`+S1wO+d#zj8gCB7>RO9Aj{c zJdIlRCO7z|>I8FV<>Q~q^j`$8G<116ua&^c-$h9ff)wLe5Rj4SkR>GkGUKGYde4p~Y5k_U+vkz6+)NaKI~p-1PCa{=$d z1*04;HE1Hb`csC4YIXGqrPA{u(P^$pW2IG!T@YBLzobr^opiIT&Zz+EQ?G9ia# zxOxLb0GEqgO++B%6;{KCNb4I@2UAcp*q;`AzI&-p&8~di5ah~=KTM^Uo4x%eMkHzK zg{fpRjH<@|{kq6C({JSWF$3(?rfxFl-Whyo#w(cpoRQ|B`997^3iy-NndxVRYPlv9 zC1B*X1FK0|W>!r??c1tvg9Lc5SI;x}x6W#R0A8ak&Z$sSbYPd8kv&8Bjuy^wo;;pf zvs}@ip3VJ-qesT`E$p5ANBOvbEA$MEsKK154x)jcd>T;Iu)EaS={e7B4*k2FQ)kvqH{DGuez>RH zY5l;aQN3ay8hYvK-91pYAXlLcWNA~KX_7pRU!8m>*$BS%WO}|hIi)xUXJgFBhOn8g zLz)in_lk+^e=4S7GL1*GG1`Tm-h=y`IYZ z?$t5KS{{WOV?%0ZM`Mg|MEAS;oo@QHiMaN0T-R-sM3tFua`JAMUk$$WDm?JE#Z)IF zSrD1J_vjClDeFV3hrdr_PK+bC7E+RYHrfSYz!3V%jf9=%(u=zwF>Xqp;g;xH@ie^w z%Q6Tbv+k&XBM)54A=xJGK$@Q%(>pM5`$GC# z<^|C~PmP=G$qY@~9>2};X+BF&g(~&#Kl|Duxw|Mf;R{Nj39Cd%LOE++v7kCZjQb>W zmJf4c%D>W;dtdektCFup-n-id)?WmwjoXDfnkWmir?ni*L9KE2UbjJ3?=h6VLH@`G zqsIdrbW|L4o$8r#vvC=)+S8;^x1O5q+aF%ES%IDWxfSfk%BnV54Tjg7$Vn-}!3SV8 zVd~I9;l)cR-{fdEQv6K5uIZ;l*z43X?G!~xjmKKd3mLVUHjdkJa*72!B>^%VS86kS zA5E%b3nhx)fzPa!GIIN`YI1%(6db(me<;IVSy@nD+GaD9d|_FWKkAwI~&MujrDeuzbMvGW14V1Vex+qo$(zvQ5y;4 zOZ~!>_r1_qR^!J61=GwoHS3~&Il7?3?g0N=f-MRO(ycO6eQ>U?dfytx;X$Mu3TU7B zlkZ^chxdHAiC7$&@L(aYHTzqApS{xRxm{fz1I4dcN_~>PhPNC7i_pbMKA&U+>?A)v zN#E>;vPW2BVfMMC9hh6G`lcdo)A219K`_>V9UJL47F6S? zw{mRN?q4d|WRhHnO18nGIdF-#dHOtzzaaZ1cfj5szl36^KL*GASKp#vCQf;sHQt5K zeu<+Fv9rZadFVEeEa?4#k4JFS9y%56h1&3FyZSN$1`Dz zzv}g(`*+;`u|)RYmpUo~ewl;5jOzJPQ{tGCVIe>Hd^pJ%*xxy2$5tbH#q!%PmMLst zypQ9Jrdr-djF3}vdWCN)B&OVPWjqfF8zt}4F^9<}&Q?|L1Q&a?0e} zC~A!{;e8j{&ry{kUMUDVi5q0$b;(mtZAdUV?8B!0C!$+h^!F_lOffDPE|C*^BVVRf zV^II2#!U2HEJ0k}YMtHWCia$hF=S>NH7Tb;)p1t!?4C@_V`71@Dt5JbQI&XR*ps$@ zMqs$ukL~DHDuX@>P690Si^%xT9QY{^M`KkOUYC(cMiY9ueRaM2tVw;U$<<4b+p&eb zedASE+;}qVqp>a_>1BLYn_loJqpzO+jn8@BDZYWdFF`{E!#$kI%Gu+~wB|vf^A0>boTVA*z!#q=x(o>^IANuiy*HhI zsE~Sy)NxGt9k7;`WE!86Kh;rABI&y=T?F9C3M+5><`?*S-CGypgvKSgU2A>{k5RGQ z&loptq6EQJHG|EKXHpyrUzuqoZS4;g>Y9y@{mKdUDQXQ;{wZl!wO*Q+HtNn4QaxNA z!^_vZ6Cu&vx6bQCjnHmfA`_z2qqFQIj}HUk!I?3>&+8uhx-&neaiX34a{kj8>jaaC z2m7L$cN`ty^y;RLB0&Dz$i>KAOBL3om-@@qQJCLg=cm|@To8O;kKFhL8L`?_^JBax z_{9HOO4T8c+WgTjnrLc_1khc2k12kPKZj-KE;Ao-#s{>D;G`Ym_0YjiBzH} zh|7oWF=dJFF#deTk=}(g8@Go8PwmeIbNcRsA`0ZZ3{9tVjMu6(2^sbw(10huBh!dA zc3VVG6vfp$SD-%v!-9o76rRIYRo5LU9r3;|>`swnrDd)fL30rt`v8{H5Gfr7z>Rh0 zQDg_e>l4E$G~V!$6J59xdK+OzDMTxQ@C41na_I@kY zH#ZSsQAdGv`XP(t1eD-!r`O0XlVIVZ&RLMRU_3&;of{ZhA-QF1Ipm+P!7oUCj8zeB z=Z^pIT@!qj(PU<6J7D&0Jn=)lN}W;>HtmyBt|M6s)-#`N<;(a3PH_5mU~kt5;O%q! zcMq#xZue7v@-n+=en*Ub)cm@I+Nz7jdm`AO{bBfbD!pHc_{mv9sqCp(LKB(82KL6d z4mZzvLr~MjHPtQJdIO=xA3uhbD?6DTa3m;oLOwcWUfMhREnpr}02j;*TNC?ge}Dg^ zyjwrW`Q)sPiTi5h!|3iP?jf#W*m*|($C1qk1Awf_I)`~RAoycBZVzS2v=4TCY9etZ zyzHsg7rrk8xu$kh275p1HWTbJ%5VJ%N$f6_JVlQqI=qN_)JM!WWidK4iCYf!NehZ6 zOq0X3GZ@*;9IM7Iy!Pu^+76m4mqNKCOoGKqYIXZ{x=;g(n#Hv$1A;eb=X5- z6ErFSxeE*o%;%7&J!YUbjBtQZ^?N+C6Rv$}pMY55xNRtnbj_fm&%={Xp*b?n ze!BUYujaiC2x=3r$K9ndCGucOqp=Fz&_UZ@8s?&k&|S^H@t!R= z>VpsyQz7WUJ8C}?%Hx*=XGb^|Kb5gV-!pP{GFql90|_U?^X%^#O$VrK9sj{p zVL?O7oyFhhTCew{#f%r*Dx)S*_?{&-I={K9l^;&V3Nnl$4J#LWE4G#Gd#V{FaU&+_ z#!=5Tm8?NT4f{uCR0se89pDwBtC?EXyDV75xSMR8qZOQXm1wt(T^^%e-2C1sX8dw; ziKlvnME%b=#HQ}TJdrG^qSH*GrDGO52vyC@Cfs!ic6B>n=f?~l{(buNs$u?YszL1G zOw04;B%?{@Z!mk$3uk@?$h?l--hQKCY9fZ;l?Yjojy`Rl@9eS)|Dm?D!OC}k#VB{p z{2`m_{26^1iJTx}?n}vx9uNAdA*d2KMP5<m_~17b$KZYe4>OQZ z`)WiC@}(L454z4WDyleY_h$x%VQA?Fk&rG4X{15AdnoDd9zaDTrA0~+r5Qv*Y6j_+ z9vY>jk!~*Uy7yi8z4yNN>sjl3IBT7A_J8mF{GR9U_~N2W|C8g6Kaj@a9pxdE>*F7f zkk=x{>3+?$8d%PO6`e^C^&qE+`$Dfr`g)(#Js~l?)o6ZhyG^^VvoAu@#6L#=6hEof z-d(?HS|2!P(26;B|Njr(!#zMygdxB8O{Qu_Yn=n#Oe5pBH}W9JbUZd;R^M2}znbbf zOdmIPHe8RSF#f`kO(}02EmE9ULh13IWaVH zi}#GV4LLOYnIg*yFRv@56>jF%Tl`7w-7>FvqY@w{`W@I7)jzKp=*YyOXi%v9 z@*BRzm?*|h0)pGJX5I#1yN_Ta*UW}koGETsY&)Z`(B&z2x^ zk1&%Uc(od3cS`I~s#&^}6$m%0RReSL^>%6mgSuU~n@o~9%eh&Xy(!qezHmKVzDXt_ ze{>$x8Kgl*I=q%($<$<%#or>YbM@N)JEi+vW@~-zoz=GU9FYzL$o)zwuwnyIw)OY# zH#+h?{i{MelYa#jsK~CsC^P599C&wjQP(ywCv~7n+D<$@ql#4e;_*@{vC!TIA!tsZ z5p+F;BB?M;5xY0F{22rGzhT6Bdud@0flf#PPll^F{o1reS_h2;Vg)@slf&aoa!D2Z?d~7q-Z_Y zS^c@~JLu?G_V6U3aw5MBbMvnXr$TjVuBfMmFzNNnx363ZI?&r>lC$nue@Qn2a7A!F z;1uew+!-a5ei5XJ`7^xiCkc*tqgF%%^BQ|Qzns(t3ez4{w#m!_jLn)tOGPj*$c|Jw z-Zl~yzIRBl2O_sYQQNTRF2gQ^xVVIWng*YOXC3Sfs3^$S!XPdjfo@oUjF5*$ICyp~ z6rlq-f0GecoyPQ*h?~_A7w}rv9e)UZ?vitz!eOacXl!RksQB-%TegY}u7aHK?{5-% zHUDNFKg>sTx0gB-Ns5guy}#BI5EC$6OZzlmIxS%IL1DqnajR4rS?HamHiQMnzkUVz zGJPYM34U%o*Iz=fLx`a?hGTW;nCX!A|4EOjUzyOJo(%8{G=a*gU{y3rA)-zE!Kp{1 z>#6ya0z1sf-Gp-QsRPS|pW0xaT5wMT-&O3)*j6>YUc({O$$>X0# z5xafg9|V zd+8KV#(B;jFgE$gr9m1L1wm2rSSMXFBGQ=6Yx71N6Y6vC?5==If2abTyeb`qqgn)2yo>a>Xl z_r&c``qpkfrQC8kAHYpWeOFr7QR#=TZOW&huOSYd$*Jh@qsL&6R)^ z%yxZI^MV=t0Ot3TMt~~j`;SYZduvonjh&6g9@Fx0J`#1uqaXG65nmf7A2Y>$vo10m zBR_>2onZs((p5xA7@#${e+A8W4EHpGEStS35RN!{t(gD-Y44{O7PE@w7A9ox zo*L=dO3MsH=|o|OQY)+FU3sjakulo820kVV=JmveWM6?hjEg7fH*)6KPG7MGWz5%b zmHN5!t7@4={k~?~T9J9>-W;%3lXu2gKN7L#-R!l({zFxBs&~=lRm3Rmr{LX9hc-%F zY$}rdF~f12Ja}EQ zip{3LX{tW053(>A%febxCrJsnoGl2a+AhmRGn_X;K_N_QZTiJ3Up!~y(yC{>K0ZWT zDoZBNpR|Vtn!7TI*nGgpr~)?Syx&mfayc!#2hvkb)_}m(gPIsNg%OcAtz~3QxH`|v z{7{=(!U9!k^J4_VZnM-m4n3fglvDd37l_#CW}7yJ0Dy(pU1fjwTxb>eiA13 zm|O?_RLrJY>WZt6PG9uboPu+nTIgXP$L_?;_&MICRUF;a_&(c-nWaY=V*=Nj=kBUh+ zISmhBhoOlr)oEuNCGM!?925grM@ONvt9#8NBQ;w3W@>h71r}&{im#4W`RKj=vVY1@ z<}B#Sc!@aHM^K|T)phWMn82ib9cQW8C@|1`ywyBq*wLe1M;m#j7{_7SzCX2*C24&T z5UY53`#+(r|F&^I2zQJMBm_939BzzAn)4Bz$A23cZmxwzBi(0Rr6yCzES|Lu6yw`~ zr#l**z{hUjFoV`<2MqPw@F2u%baZsyZE<*F4Vjz@aaG7)7VH)X2|N4^+1u+!Y9vaL zs$2&03ens1+pzl)`onepQHp91kI}tO04q(NW+(NQys^Sn-M)h!FbWsOunjmsu1Zxz z13KJZQnu)Uti>wuDe zdy^*oyX?BBSQ;~6L~sv$3a|wR~vbY!i=tb-D$gb6sS6_RBdBn)2LjNLiO}~*%`Y?Gs;1A9*KOe zZkXc(FU&RUsQ9$xdEhLQDG;u!(EyE^dHz7q-8=bc=Xi@>EZ%~koE8oOHqX$o#uu>- z*Ok;|!tcO-dx3WpG&`daA^kJiu)^|&&c}sVMR=XZF&jEQ}R@dmjaPdG+3 zA+!{o@yw{_zlXd4ZrXJsln1nFo@K3PWB!jVnhii+KKm96_yn(KWo5Jvqkai16rX`A^=N>YaR2l zGhn0Y!2k&0e}eCqr5!aCElb|J5vU!4s~hYXskB~vRf(G}uD}OE)kjD))p(mOqv_N+51i^$t^3rSo}7`!VsC3^Pva{r%3VG zM9?TR{G+sIfs|}fr{+$OJPI=pgo~7E{$>0LSAbVVf^|eW5?=b+91E~G(aG7cG5mc(tDiFxRkL03w|Dj=q+-k3 zkq>&e=QEl^3myt@adu~A*0$C1fIS_tNOs@G*zEGE)*qI|cd1l#ks14u^1{JMY)X~E zmmc>LRPC@p)W3yL|CkOu1qXLcTwy`bh`mI242g0n2;L~QeT3KM8x+yI*m&EdY5e8! z`(g3e46~n(d^~AfpltoBm8wn#8U7Q{j8<}tY$9`RQsv(0MQ3mNlP4>>3y*{S9DkKd zfys1MH2P$@bHpk}j%t0U>gQh&pUF5S?}WYS%%(Kw#F_;f6L>xv2}%Env)H|7ZWbpT z(l~&>w*&g3$A8M}TZ<@g1a{#&s_Vu=zh>hV2YWN*!`iwc)G?^`5%Jl-%ta%pY_H!0 z@C%YtTO~ondoex~|7o=ex&egf@WvVsAOtc$yW4kG!AE|Sa?-U%tzMru)(0Zc6^1gg z$TUxLgC!|djpnclK5hNZxuTa4wKG>fMYgEUo~FC_E3q>M*U~3@p^yWdbn$Z=?ZO9` z%G;G^Ql_vg_1WOe?dzfu*%ywUNkz{NWnAImmlJl@b-I{7r3q8o50E5I`P4BM@7-Uw z2C2Pmq9M-=g*G}c$I{P)o{PxY`e3m1rVWqVKV3)k0|Jaj6YX{G-9D zI(RVkXczz4;g?f7t_KgU%SkC&{dKLEdKa6!a_G!-`klW&V&FY>3XR{5L7P7Oy?p#5 zEnrUwR)o3y)y9BC1e#O%TN=-mDp@+M&-2!!t$aRG0Oz>CL7Ozak&mP;t(|Xo^8OqM zmVvbuDmbGgV@BZ4l%^ZLvLGItZBR>dz7SR-QY-0TO+^Y^Awz=p=MUe_fzc9Fr`)11 z-2eQ^Ww^Q&{Lu8&#r!HnciQwDN86!8=*UgMu^;V-Q}_w6B~AX3B@MfJ!eq89UUKKB|Pkw1~enm z*%?HMoY6`}n4edE8(I)Rg@)FfXpb13W?5Rby?E7{rQTK;$C&yU+e70yePnUxGe;oJ z3#kZ>Z?ih~WI&3hE4U0DUj(>}F)hBJ2`i=Uh*N(}Cs9@GIlXqT{|=y3x7PlDJ<h7iRtvq*IhJ3vPYOW@>8H;K;Bo?4QBzkytTE*FXHdX$0<-zY znyeofCk`u=hj!SF;fKm651yy53KG}=2pjYgT}QR4#u4dZ;#0Y>t!u4z`dUiNRN}E@ z!oS%kF{b{1O5Wp_R5AaU2CwBBviMWjLm8UK5OILlPn?7o!S~bM+RRU4xD}5RdvFDu zaBNiiNaBmRbmSA@!SPz?MLx`1-!$Rj*B)EZpy^*O`tgR`rKci3FHe9@Ji{08k8iij za%#Y~>Eg@)rhH{eg!C`ZENKPVe2$32mx>htIQZ@TVF;EmJrCvKc>Mkyc@5A3$YRT` zxs@}wmYe@PAOw)N3VetV3KRsO$;K?#L>5 z0oRdD6#V@7Z?1?~y}rKJhq`t1%KV%dt?Huym|t9B5`uAx-tqfb1k`KQj_ilSK0`xv zvT3V!t*hp@IWhroHYeQlBzNtKJm<6~sZ1$pg#z^cutYAsG+aQba)oZZBLM5iMyZlI zz&yjr@!@{|1igMYUwdXrO=`myucjT5;XTZq!gT%6(+4o2OkYMJcEu&Gi%G7NhD=-s zXz6&s3Lt=JKMU1=KKoVVF|uBv(thS4FdVlP(;Y3t8Bicc$R8)r>IO+wyL{}9tn~cq z4}wn$FP0zvZpz30o|;gDK^^*z8nr{cc{G53wU0lKyMimQkQw_lGqgISn9V>NkX3B9rZAP#hM@u+O@~&TOio2WU^Kv;`<5 zm1P27)fuBd#O!;#h|29natSK<_iuWEln12@M8t_jq8h4+Fh!v9;WhEXgd| zGI$ED-RJI*@6Mi^^_$}sM_*AMRH%QXfIy!SYp9xXfH1(5Vt9pX41W_EJBNinw4t?* z0`0UZ0?4hq{zz4#OHtq;M=In~(z36TYw#1p=Zxd$Wh}1JqG99RHg|l(Bf)?2*1yJ# zpMyCS>=`hGg@tmaoI$V@n0pqL|m8**u<*axgM{u_m=JDCzJea>?v! zevr%H0am5gldJikN{jz;!>1NQcf+R}+C1}O!Y%6}Z`bB}|9<}dG!Yi(?VpbtlA?ABOP>)8>p z`{ku39Uc8KYk9%#xa>7_J%wd7|UqI;9SoRPf zOGU=zG(Y=kF%eferS!_;jn=h^D*~0T`a90Xb_0u4@&i-yC;O#I5y{T|gc_}WB_v1M zRvHFnH}Q(Gm|n#=Pc6 zaY^h1z{I+CWmO>`@$|SK4{htN3_e&>**}i?jrr300=j zKPvo~9w{Z?Ma`CeaLKS6fREI@06Gz+kaf*n{7A|=*M`CXHERKL) zZFBL}eOgH5x8`!HzNW8IlohKCTkb|dk%Z>_8KN+HU19>~R!U&SeU)SzSz9Er86hxA z(<3h%FXt!}$zHVCm|wv^F(&~fFnE3gyAwiByqOHaoit6c-Ri9xjcTu7 zx0x_A0xW#q9}}yA*L)~mxGY>kU%T6Gl`No$;otS@tNxGxYmdifu@I<7qC#G8BAn|b z$Bo@0_P{kogwc}<0NDs;vxpy~GxB*FWRyY{;RB!rOd~glt&m@>zEJW22aF;TXRNRC z!I##qEMJ$+ES_VAJ%^o^P*2l(V!W^P!f4&Tp}^Vi=WAm(j|;wWbawuFkk|B}0^Swl z&qhS@07(pFBTDWAfg>)V2HS*ja=;%v`Jc2=^_cwJN-Qv8l6;$M# zw@yjx&Sh#gLpR zY!0GKt^3#RBuc&wX!W0CZk>J<-6Aw+Y_9 zbSFO;0305_Hg#2I?nX{8W|JEAjC-z|=cz$p{@9oLFm z`aap}E*F=8*YmbzV^^Z$*$U@pUFEdadpbYYVZi<9LIP-fD7+stfyxRuj&)ye)Jf|;f+=!j$Uh)D_UFg^$=CS} z6C7OWw2%LsTrwJj41Wk=$Y+0zKycP{LG~C+5=-j37gAY^$IVttqM|Rs2)4DnD5am? z(d}hV3nO~vSQ5J|G1HO=bfr13s0n=ynTi(Xfcb9knFrM#?re`uyW;P?8~IT7MW zhV(gr?pH1HOwBcX5ULl>jaNh^w84kaTv3cAJKyi47u2y}m*E`-9Xn|f(%d$cml@Ct zaOu2O2;Vvh(e3($c|7DkJ3OD1$HUkl8@!h6RuS3RGj}-krP3pX)reCUtC4{usJGs; zVlD#}F~E1T$}2fG?cORtCN0P*h-k$T=<#zql=~@{(D5XhzCsfwp-!rQMF$8(yb>Ih zw_P;8KfapsYor|6SN$D$F0FXg&8u!XU-158zO>L$*}J!+o#>Nhm-u+L-6Nizid8&E zb}^Szmtj|`$GL)Gk?hR$GUCsxeap3)rb(uqf^dQ1?9p?rUB?XZcE^bUcU78wpOoZk ziyGOh=E(1VCLYkqUZh(#1`WwHh7H~0Gcs?##o+wb#dFCWdL5}sx8h1mh}O9 zcazBPHwUNo=I@`Vrp5njdr&KD{$$Lt*7QN2vUe2COKL}$_1Y~({0`UX;Qimuc6hb- zrX`=`OA!Ec?6@f}HjuH!$^gs4*GA_=z=0`iSp1Im-r#)A2(s(+Mc*)e9?~-}bhbN* zz+6K%)G77b77?J26_kSgGOGXr$PB{_#3bPt(iZ`B@}ay;F`OW6H6E+|YcU9d2`8C& zXRmEp^)y4vzQ!-&ZX$x|$qrA$hpEih*x%R1?{Y7a2Ck2HH3A-VePEQEXkUy7-QJLG zux>rkFgLTVA=;@O3VC!I(aWwv_2XI!f0nO$_ui1}S!XVpC{@K%efD1EE6BUYhlQZAy zqdsqwUj9}*4twi|G{uKNN@Rq_%j0lDHpmT8iJr2PUs!6$=uJM{c++t~IutXjQrEE2 zoNC`Ue{5{Q=L4MMcZf$m{BLXGe-z0Gd|m)(xlkos#$};zkv2-2j9Gb*P5!~b*I`oT zsw>#M(QA&zuNk|mU4!9giD3yH-ZDlW@x{Dd2J8v~$XZn8J=NIDBGRKRhEtA6ogQ;0hG`18`d!qZyvLKY29fsK6zOcdtFtt~uIa5cauQn<(88(&Jd=H~08>I_m`a zIh8=pKM=Vh?CE6neOfh4OYQyJhbBEQ?oURfQm-=f%PMEO=9uQ@3htuOy=4qaq3$Kh zg@J_ij|%%l3#?z!)JBm%IZx0anF7VI&2eiu?l8<~lm2Rw{43AQi7ydrP??a2uz<+r zJtbRi#n^w#VoD3xnkA?9LrXpuPV}a5!;a*PHn{$J>1|b;FjD{E;O%k~IBr69{?qHn zZWHf*!;&nqTNV|jyeBSgK!I4tL}%LVIe+UfpbfjXt$~}>4q4&()Dm`IlVm;ZuwpBJ z;b`dC=*!VWWj7Deowmvb4QrvE6E4^DqV>lpgl@(>1x6WOJ{9ovd!_Y4b($tdKSl!0 zgo=Q^6j5mOunc7<<}G5|Tm=dmC2M)M6eMP}IOz#-aruXR0!g z7q*!rO2ut`ec7%)M23xf@Q_-(F1Gs1`pe9!zPYvl{HXx?#2kb@)kt+Imr|JlXYj2e zO7pD_CPQlQeoAIt$f8brKLPLX!|UBVBvQ4yV%r|`yD=@?gVox$sPJYrhs|{GI!oJm00Ft|mefO- zse4`M;UEbf|6;$+;Yd8ECrBA9x=EC$h+3(ba!shjJ`|xw$91ju#9cOchY|pTu7rR| z1fjW;vWDKA{4*#bnBi_FsU^JG1)I)X2C+D3%0}MDe0sTa(}v`AWgsr( z)649f=&K|*{NCQ^Ih%oAHnpCA%3oj^J|@p{ETgVeY5P3(W+7e|Pw-6Lpo9JyrJf4w zqQEj7qMI^1TM$SR{K`?)p56qXluaSWzSW` zqVh9ry^*pT^V9H@OI4qt=(FOj!Vt`kG4)sfQ)=^sA3iXoidjhxKJ7R-u9NEwb*yO= z`hLYQP@`=Lek$HEMYF-sepr=JRxi`~Vz=D=RMN4{l|NrR{%v=PdyR^@179X}Pw4rX ztu<UxcW+JmX+kM73!tmgg*oNW_4OLIKy2lrKwIH5hQst*%VzeIo4cI?(IJ{Zs)a-ZuDiOB2R8{N=R%>uU3p zFC81N_Kz=^Bfm-~B|9aW&j|H91Gp$wlq?wMPqg2il?aMCcLTQ>$vM z)tiILz};WI{HFV>>8-b?k^v4O!=UAQ4?zUv`4KM&ow{)Pw7jqAC3RiujlC$F{r0M? z?bnRVwPQ0M(JL9|O6QsLN$WG&%PJyX_3cE%Qb*k65VNGPFy4ppc`<$X|Ey3&hv4}(zG-FV1tf6qDN2hsijBJkY{z>Bt~FJQ*(8u_8G zVgfTQO0NJevOAVKF8+SvTf|fW;+b=;>#_)d7H1xLTFT|Vh}UCJJN*~CvFqr0ogO|b zz5O}AYJ!~}n4*0rp#w9P)EnrHpm+Z4#7{HFj&dl*y8ahsw0U#xkYP1>YrdpzeZ0g4 zO$`hS*LHoKs`mu?UFL>O8LrwJ?Ij4{-Rtwy~& zH0#8`eAaTZbqe!kDZRvC|c;q5CLrSA*mt=(VSi2NNl>nia?U*itpbl z$;_*0hh$=*t%tNOzWyJ*<9}@?mr)-0*Ji-^G{mJ=Cj~shfc=xo@x|`7EXsXtlj1;o zwFL+%2kT!v;4l5%s{ShBj{Tk^{&Q_eILoXD8BpJ`h7!YYa9O>*RDMGpWgR}gg|h}T zJR#mFo+hiE!jir80wXBiq+a`S7rN2y{*!CjiY*mIvVQ8i^mi$Uj>qFF#qk2y-|&NQ zqx}uhDwERWg$<|hKF0YL*R?K+t;p1WMB-zh7#;bi_dXJc?C1!vyrqvY5SMZ?2;Oc# z#=G5#AQ__}r0GyH<{%{*N+U2MwI`f=<4`;eyR7f%H=W6EI4VH77g&UTV2+=$-%p}Y zADC*ftZ`2=F8;D90b0x$lZbMx!UDwY6(Cb%V93bzyu636&eV!bJFU-SrI8zN#TX%= zt6Br32rXxE+L97h+91C>+~0+w=$Csvr2ItPz*3*IT!4cz&dS*_b%K=ZImT1>M@|L` zTXjaxuRcoH+db;RWh5p+%hw-NyTet9H>NW;Zagw+)=nGQ4PFbxvU>9Y) z@JefYp9dr1UtEsdZYp7zTkOj&{Z+W0uL8g!>}yVbn3TCmPU?5VPP5KM^-ss%aDnG% zo_a3ug8>w>-C|q{8lY>bI2q(WWzel$4K_|cu1);x{Qk0$0%RCeMEmBia;ZK3K)fgvFjT@bfE$67y`p4X$(UK0vwe*< zScTkDuH~QoJnyyHe%)T_UGg$EAbRnYfyxs~*pC2EMw4wFhe`0m-rdLcz=YsSQZEVu z+a6PUV(Sl2pk1H4P*~^FT)pOpCWREr%XnN9+Lrb2XPl{re2rK(Khs{71O*N|rVzP~zTK4G zo2j#}kY!_mS@^AI+Bf^ZB6?d)TM0f2)5JY(&?uBF3 ztzAs)?r|s0_CDjmMd``BgF>sO%o{9h86&`77t5G|$_R1%b z>^`O1T024wW-!Dc8L+m#5#W7kmo8Z*Dli&Lf5|Qa|JNEF`ZR%bsb#VHR2_&QI#ySL zoGk6?(Fph^c2ZnV)uU~uXllNjcFe`AatLd`Wz}$MspFnOd``>zLb_{9FEl*MHdobz zsrc#Odgh<+Qa#b? zc;xkf^~~TH?I*!whrovEHy)W!qF5VdV)j(6lzTdf-}dnoj-25OHHfLG;R*_cL*D$R zxqYMo#5u6k$?|NJDr|9t>sU{0tZ5uu@vzN6MT*kUak=sUdR90XM$!5Kt!lY{zk+n; zUiQSlS|K7mo-GXyPqiOig3L?Dzx4Lpu9D7fW&#fIfP>YNR<8TysXACyW*-dfPc^?& z-A9g50|!4gDmTt`J?mQD;uhRA?8=>5q1S%XvN$}h zwaK*45|N!{y3>Y^Uym{lY<%wL7jP7iqgshfSh7f)AhlNFEsYJo{?}3BPf)ZfEa(t^ z(-Br3^O0A%)v>1W)4Oyc1C6G*6RjiNmHOAHGGsR{cGV%Bo0n6kvP$etLpvpx2=9ot z$?v~!uzrTJ{@Dv0G~TqSr3Y?Z0{Ue${y$^bdGMXT`7#|A+LRg{bBFzC->TJ_spNK_ zYmfsdmIebN;1B*_V?%15>(mBDnK`9G?ca1!pa--@Lkfbgd(E;GzXEVaeh5JS;F9N% z1d)w}wzacQ^g8GWZXW*Z{48gJ22FGfW(00=fZQk)UOMUEaT5xU1-3kD7fph5;94$xv+jq!2rsP7yDa`is<4V9s8#lgqXB=o{QnsvP;Bdf}f0CMyj zXjo&MB{Kp|!$ow8`_e$}3Q%AhdPhzdFIN@Y3oblZc_UpRVTEU3wgeFefKa(fMXRWJ;?<9H@#q4|u+Xa0=Fvr=>{Ft^Ev-zIGwFjS=s z)gi?V^Ta|B!TvJk){Wm+4>9*KloLa|+SqbNWNX}RA0DB-SakKR#p(0d!{ztghG@vY z*K@Zv07f>yrjN8_OR%LAj}rN5rExU?eZm*j+OYt0f=N|LD%@{`)~NsiP=gh9$eh4w z#PMbG>1`rp4#UaPJO>3@Y#R2%@E&dGQ)66_0;MySMLVRWa_>f8hI+i1f~OEB3hy za-Q`ouPi$aJgUI9RW1KV2iwPBYkzLGnH`py)VD+sM2fxE;xGsf$7f^zMG2|aP@Oco z;1IB*yZhc&=TUxMq6wtp)#0!#eB+mi5;O3Q7R&6-DcJvKMm2kOve)7(;qgO6AR zqkQ#+JAd5-#J)=L77)Vhvb;`2Sk!0-lymHe()&rkITMCSL9eyPW<9z8)H*|lV8Krg(^@oqv-Yx>3k<5wdixDYM&HSB--0 z_%Bf&@#_1$nY3_i$o2a^9+x9PpQSe36t-d4+#~L6WAWXu!`4&jl7Rk8cG)C;j;IWs9lUDl`_neU+UFH)v&cBO*f1th zj3@q4V|;sF?{y0Qq1|H|);(SGU?$-7rFemuO94=o5+1;G?-72|`lZ#zo z|M=`dKQ7UmexvWjt*9PKc=PRy^-YGQrAd7ee||0AcGos4gvkM$?F4k~Tj^fGzsvQI zS*3kGli<6Hvm}#gn}HAtI;E>^q=qFu^-{HdRvYzJY~dFA0>0CiYI*H#*4(im?K6H& zJcmo&C3zrz-4e;KXQUVDv}-?DiX#vgy35e+A_F=)3r(2J0-{6j1J(0DVUzVK1TrAt z+TEmWMddXdC{mA}9nd1|%98Hyqo-vX-J;o+@xj~_PQ%Y4)Zdb|S(Vd(hWA9N(KX+s z_cC_{ZL3*o`|q~B)jRboZQ5Hs=CbE%zHBMNu#IeWF4@SgZV7_f`joF?Uq5(M^Q2h> zfHTxYaj5tdhLp&~#r5}Bg`e&zH6PD#1Z*7$x|qu%@BQy15Bngg(yEFI9A^Db+fBYw z?A<{kzP^0e{P5<;g^Vmjlvi+w`X@Z`ebFcR9^Dqoc$n-2ADg#%7m1a}7UuJor9ur?Lm5kT>Pxsn}2#^g(+2CLQ-}}%1+Qa_G zh7@#@6q$;B+`imP#;f#P&L9%0?N(+F9ynWjN=>g>LYPq_K~>m_v{^uFJ$$8hW{PUxOkD13r$wg*5i$RKhfn;galST& z-*&nE?b`d#xq+a_?N?kU5dIm-{aZ}g2jKv}#swUhz`mOvmUaS5bgLxB4nMT|Ay#YY ztruLH+I#qL5TQGM%enLI z6?G67qzpa*Bk4tCT22xh9i*ebb{pV4k8h{_7Hy$nsn;WbWq#?VGd8l@(JP;W=eZ2hlJa&_@RE+4 z!)42ftlK}l_+c-mixA7Vb-}8;a-9>dW5tc8*jFrwzvw6^O|5XRQNA|`+C6|?nleTU zFv!6p+B+AVKVU5J3zl`9Rbl0Sv(Qeo2qGUp_tzJ(wH$)B8t3);hZXS8s$8%0g&zNQp~3;4c8lt&reYwBAB!bF zlIzVs5Ju@#4udXmX$eVHW{v1wx{x}(=oe#M;#?|evjgeMJQGlOhsXB_%i@*q;L=!k zy`hP*VSXXbLGiSKUb{R3yGf!}m$Xs!lIx+m|16}+Uw}!1tws}Wm7sBwM_U|04siVH z-)0-Pn*8)!Cql|`&ovwGd3vSgT6A2a`KQcN(vumGt5#xeyrQ-Ko1`9^`?fABwbx?%Z#&uhBgbK5f+w1)T8TlGZk75Jdru=gWjU#v}YZnx= zUtt+l=SRASCsdceI(l#s-ElA&9t}G2(Axp-eu%ji@c(W^uTBJq7ON(;QeNFbG>_3 zSxQ&%nz)@#qHQ5g+qsKLoS+m!1(+xPWMo)FdpuOtTJNKS? z$OxZU2PFJ03*x`hvFpcG)Moy;pQc(e;qTD4FbPZbt0uLe4rSPtPWT~nsp}xUbsz3{ zb*qYMCFp4Q>fiI_-FBPbE?4NZyQGMSO*I%urr&CqiekC40+zYfC<&T#v%md0(tdO% zxsKJ#cDV3S`RBW8!x>^&!>bA1`QI|wEsW23@L$F1JoiRhtKl?xa8^FHk-NfWb83_q zauQR29`rkCDdyFuxUYvnahVwW?K4fuG;;+OO@#Hmkn`BxB{_SgtY^_)IBRdF85Pu{ ztYb(rewtWtJFyscG;~m$K+K8LIEF}#QDyF9&#D)osmje^vU^k?x3yMbI@&zTat@Vp ze|!>jDCvw^@3WvEEN-y;W4*wH@@&CQicqLwB>YxS&p=Z@_VZI6l8*s=E**R=emJr- z{_fQtGYveplXngEJ(aca;x~ZY_qHn_9QABWoAs_SdJXj025J-{u-zv~V>SL{u@haZroWeH4}zV=6N<+plT1d>9`q&}cX|4!fqnod*yU z*gl(W;tc>$ry=iJqfU>V>Go+gooZwEc5)lrK|^4)sgg|KO$p>H)xaYF-b482vg8Ne zJX`jw00r11%+LAkP&Y(%1G_rNH|pex|k#8<0@p(HHe(q{VR%V zZZA~%;GM<@T^c8bE~$k5b#;&aiOET^@Ou?mr*4uN*{Zow&~lJwU^PvzexD=$bG^H$ zx}&GW8VX77-{y|!P7Cg->>*F!HDxTk%4lbAfji)6>Iz{JkBknqpb3v{H&*d6cVmjO z=Cv{~2I&bJM?VY}K!0HlrZfo0gj|h8SjzET8L(Sk{S|zMoVwqTWKh53e7*ZE%6-^i zRoFsc$iLo^ir_xY!2yW=(FJjvv{`6PqyLoU%E0VhSny+RXgi^@(TERa0g^P+NPC)`ygPTO_!Pn`}&iWKr5P^BPL!XkX50)jIM-%9Lr)aP;- zoU^e5(|PXbE+Tow;g@F)*is5xPqBdGoDuV(f7yQCE%zgsikwtYS^x>)H@yFY5&X&o z1>$wT!aeB_ZeM(u5e7W)>JTlt2CPU90h3W%NXXiDE-$p?hsLrIkMYcRu!5!Sui;FG z3Zv}UkXC}w@R+9gL&Fcq4$5_-ceDJMf2DS!cvsU~fa!5e(exl08?`^6r}pk=RDOB| z>0y@*2Q=gfzkX^W7MkexwQRsT4|Du_B6vo~PnbZXZ|7}kBR-qGUe{bH{vz|QQK+0vV|F%IvcJ^I#C z=TJPPDR&@}?ZdXkY)5r(^w92qrsUF~@pC)9O)i8k3|pM>fsBgVATSLq0FLp!qXC4+ zXl7m^D6j&QbkPqFa=R)UEq?8xe^I}cB=#V`-%hHm0#zNKw72=@~!2~JdZ zdOGEIXld$8Bj{?$v$*ny#e6>u`iR8zml`ZrzE^ihK;w<*J5hw>6_JH=O&|)x4$~tO zbzTPW!;Fg8`yQZ|rcK}AJ(5YY`?6=!lA;n0+`UMCw+d4Y>jwW_O7~4ih=u1SyE0Mgwc7<6~TP|_vc1Bgg>ODNq#Hw*#-(mgPg(v5U;I2ZqQF3vjVcHix_ z_TDq!?|a_o^*t^FTlKQVP$_GX=0&X@&L6H_QMpciBVu;0upFlVXBrQoT_QOOe$V+*8|84q3{;75p}?vP z#@7KI**9%FrGnf`1dPd7Da!Y3_`fh0s2E+fqUwHr9+kyms+R5V88?Z=fK74-|IPMU zWC~||cge5gGX5sR-^JG;p1V(E@DRix^(|=bMttWgyTD$ty4kkTak-{U(6Pm=kGC%t z*_@hPf6qOo?;}TJ(?v@1n_aijUF;-@BwN&+z5DJ@(G?l9z$M@O-}-`D|MVMi5Q2N~ zJ(q~%<4xaJ9%iCn%GZbx!(Kc;ui>gOFE8S_+pkZ3A4AnQ0;O{<7{ zo6=Ip`2;T9T2PTe)mEJ|q`!N^62NH_DAD+l_V%>Rp~7Jv{kV^|?I!zxBQ$ z0#^->Z@AK)h}8ubY3@Xb!A{`!Q_tZ8nMk}FO4qnS!O0j-b!c2O(m`+4FYGP{aIsa2 zlQUohdl>pX%k%4-k?MDY@c1Pw{)d@NK7aM%7up2+lI?%Vg^dIeu+?>7&F82)is8e zK5kV0FluKMrh`w-z-1rlX`Qo0(CXBSqy^2(F-6OVOr~WQ7ue>0%j3}HTj!xceE7&H zuwXSXyd{m-e8t)ht%w&_#}=%q9V7tT{Etc;w5Ia1;KiV}mfx`JcN+3I61HW=%Uaah zm7{uzFgx`MUDm3()8A2xhG`Ac^Iz&gW^U(kN04oWqMc9n)nOrHFR&w&^D^clJVl9m zAKdqYIsF{XY3Cta-Mhj*4k~*kylobVQ5q*H4_d|ifmLpO9K*)& zb$8ndyUy|HcUjKF1Co5_UK}iEB8ZVYs#=fDV`^4To?mhhRa&=6Ch=1l5JGjexF*z4 z#NULKmm}`8@!Q`44(=phZapp~H{_pLo0k1r16=cprmtmu?5*w7EQ)|2cl@13<+QFW zfV-ta1?~I|m#_D&)XjnG@?S%D19U(D;?tUWZGUD`IY;#{m_r56j6U^5>t_UX@3ww3 z|8Yg}#onUFr#+L$_@hztx#kO@f}I(wtMrOfkR1Ugmw9^YuR4e^hNjgMXtZ5*c(;kI zvDA-vh%?5Fjj@v>bg-3IAa>6lZ{J$SEQd85==nosxML{38KAo&a?<Ez- zA!m#tnJ?j*UvhEpnix&j&RB>VE9k6anct*GndC;xUVUSm#&wsG?(E?oS)k2soz+^8 zjR86`@FGzG9w?rT*6elX)OG0nupX@48ez%`U4tW@b0j@+geRnqGMJFV+i%A>mv z9;ahf@V>Y3oX~>%_HmGJMtwCt#VM7b6LJ_eS>8>3KEZ3Xw|7!h!3Ctrv~N?d<=M$S zilhr%wfznryTOoHCO^)mtF%^Qw7sbHDPAp#rE<2m{i30_uMSLmTzUta|m%-X}#pl&+;eT=Af0!Kdw-TVh4e~pQ)s+ zvbWGB6Pc2AVq67{yhMoLLIJ^1?EdfM!AooTvDdUo7A1_onZ2C0E|L-|Cop+y-v21rpZS4;f+2btwu(OD_CXT-Z~!*-9_uXj%f__Eg{v`+Ep8)=cS z%d^nWe&f9m13oAVa01pYe`=2W3Pk&z4_;a%pa5R5*)zmW%e^aCH4jMi*?P5_D3A?6 zPdSH106;?2(E;*A2>6Cv2oSfi%kWr^jav80;DyoJrQG)ZuetX>7cSXrCDDL@=WaN0 zJL9>#0Y^Q{e*&fQ7g9cc-2XfJ2Hr|`sO#btl`F0&B87eVmSd%kf>bBfcA8$NN#U0d z2`~|l(WMOLKzHDDrqwFK()ZMDsyszaYDqg^49? z7s2c>@Hv|VGZJ35lxk@FnX*#=O?M?Of1)gK==Z6o>y>eP}OfxBw}raMYqBEa1HyYu2y;iUJy57tJEgeINp>Y? zh2DJG_hVy%GbFGFPj6beHo#-(8*sbcS@pc8OSeTZctnu5WSCRNCycw7sKElUdLly3 z1)cmIJFib^s_3bfrqTNA(1F{OigF_iV5v6yKmfoc76SOQ9JWD^Jfi)&tH2`Rhxof* zs94|$>aEEXKs-Sn5`wONs){RDs;s9@afiNJe^X!5|F4RUC#h0%*JF+^Z{73>7HeH4EGMUj^+e%CYCd zB#6dJawXUwKSuRB;z*XTWKbmt7~Hmu^1>L&5>K^dksjmMQ;U7|Vpm#gLK^w{>OEGU zIl+~%7@T%hX|L<~ed_qnhx}C`x{A?AhqSxIKzCZIyRr*Zb-_+}1CL-*IX%#1L|29O zcm&-0T&qF}Bw(l^df%yZuES*%v&ikXX^kP9n}R;^LXkt5zWO6hnvQ9kI*8o6kL96= zktM(jzZYY=`e2_D9`LJz7&;G->L(|y5RU<>BX!ETN-1oJtBxlC0sa!?`x5*edShmI zWhA~qNU~U~fYqb|?>W@JOMVyyKl37to3iV}bBJSyyGKew0sWjD8ss^LJLU?99Am5e zf=i_adYO38yDWU8_TTeOQt3hNGa1QO3s(`{`DLtVil98o8f+je6<2X;C^wcf5`P4 zx7&Y40%>JrWCZXgeEl*Yv+RM*rErY=G-n3jU6NeBq|?or3n|euseE}iXHnD_AFM6@ z_y@+H-5oAIb6FL5qa^lwR>EMF3I~`u;Zrm>aua+Z8MzUg?}COR2)|I;88-Cy}CGE7#JDC_t=Ct$eXdB!2 zClon`Wxs=z+1JA%~Qt1>iB~vmpDy6~?pg z>gNz{fX1fSS^-26E})dFyF*$7e!dGOJWzf(V zc&xC?_@^W}=zOX55V`Ag+cUf5vGK}A&`n{QL}lTjPk`ro>f&r}HN>Cdgs(MJC1x3D zxpG$CwMh#3TAdFWz9h5De;(44pdGA<4_u}v7VUAl`&b<+8i>vYe)VZ*0>PMy3S~cT zdoWOPq7c?IEkDZ2S10*q+DgyfV#3pq1VA1K2xtphrXmBxxDRk>3|QKo&5*cOOw7G% zM>VwR-#qvD&3gLoH~%%h00Ms491H&6!*63hSS1NIQt3k5?{g{RM%r`ufz2eq&DQM9?4hz_0%`^$yw^*`nn&*jY=tAr;r?nvFxnX@dgV$zHwKEQM_m{5{-XN za_|80H}PQM;RJP3uZge5e$D7^+GtJbuu?R{0>i%zo*J5F{B40yyyr_55#sAl^g#&V znH^Pw-$_ixC!kyyL8F0qZa4mz%T3@c9RXxN7e=H4~~63zT_S ztmvDiFm;k2iDsEvUJg{k)*v*qTkn-vxu5_12T(@5yPp&^WYl|zE*yt3$+p$0m$=;e*{!E>uK zlY*G8wy`T`MGZoMXe;?Cde8ncKiC$%0*amnw!`PL+K6a1~TwY-Ad7G0~Ps??(aS{2w_VP6byU~At!Un-Y$^GL53dyE*t(HbsG`hRZQf1NI z+7+@-CCyWtIhaj0ig`z|-7tY$E;OXcFBpJu!7>Bc*yN}nXu`m+O6{%gLDyL z4y^V;br(UBuEf&O$OO6&KCa2Uw!!=J#)oAC3O} z=&(UaZ+QQ^ZyO$7s7)@gtlDMuzI{mtbt1IIf^fN=d4LL49}JO$>qoS6onkV5ad5U6yY_(| z(o-$`x-PgU9s6N><%JZisrp46Mg9RDTp@fNCJPG1xs^8Kd)@5rJ6@ z2u1#@5XqyW+V$AnaA-Z0M;!TJi}q)gqSxt3o)*fyYpK z`NEOeTh(51;~!7GyVYZdaVj#bl%|n51rk3fHiq}X?T{Npe$h4QO`cf!=?_lu zDHDS!ZYapg<1lGvpJC_Weqh{1G)VC$BaU8br6GOE$GO777^EF%PW^igh&hRD5^UJ` z<0uOKqOJA;PSSB!%1@gWk?{KZ4rZDpw5H$+mQQ>lUl%&5FvJj0B-W<=X2&z%J|Vf% z8j^FBkrtKrBG_TW!lrx2G9g~I#QC6J_sp2Lo#L-YRsHGtl|Iqnw;9sunzOEr3(f=T z$=!4}cY9oMJ=rLvzHpLa2FO#wZxzj;xzXCc_f4w=uX*4_cW3qbItCUDm9P9WyHlz& zR!oj7DS2YtOlmA`EP$C^|6rn~Jy<{W%vAoxN!^#VUEa9>tH;!Qxg`T6cbazIU0XG6 z>w9dD#0eiIv#-bgUk1@P-u>Bc+0Rgnkqm9*1Dz@ zuh5f!TGJ{|3d39##kNQdKhgyPN+51U|Hvn=E)!w<+@po0K-+zYV#>qfgbURxkl0*$8yH=SVb0aJ1 zl!dV2w*WbftX&0kU$*)HRyzA7WB+7-_k5<87Bz8p;z3F}>oSkh!xH=6F;9wkSGHpv z?@^WEg5vv*YnF#F#W$FShRkTj&INtQ#)JLAO=vPg7pH!qCi4MMTYY8m=lna5lQmf7 zWy~6vdWiRm$>@eIsnV?L9{qV~?MLF>ri0@`+*_L`L_-@Qk4#3>x;YqDp?o(gjvO=M zf*F8dDE+?jMkfdekuwK=vQiPVBD5Wp)N;}&iPf-i za$?cc0NIsC79DqIg2{MrvaczIX(e?jp05tezn5^{jgZ^t#o@=nBg(T;dL@OOfPpSOoP${w{b9FyPjO9WTuWcgo0#f zonzx8Z>}fadFBhshJko))Q2zkfA_>$nUTx5t<|o`FIch1Y1e&c!L_-)mUw^x>GR7$ zq&Wc&kYZt120*88HmYpB8u*eccJ;VnsTCp-IY_f#Xhf0BY($@bwXBuS_6^>q`em;9 zW`Vx#T|6p|u>MXqo!BG+q#$BSrgSC|$v?;5Y&dE;lQ`(-Hk2@#l&wFUHVd}dyEz=) zdI4aM-ZuPn#URFgFaxpO;}Ji~@>K~5HgIl-Kg+%X$xGX2NX^!f5z!DhVTa5gIh7UI zTduup#P+Llu#eA$%l%X$G{?@vJBnupaE@J&eh>91j1p~n9&ug9l zU`87nG`I@f$~$KBc;JN3LXv8&0C|g*i``u1@Is9*-UaAYL&+Z;{cb-YRmrvi?X|@|+4DAmRJKQ{GQ1>U$uS`RzsR zet}YEvzWt4<3|Sv%jo*jztHv#Z6-t+0_CxzbW!ZYDDALzRE!T4mq?%e40%ix-Fx4x z{VYoID%LlR`%oPKTRG6y)EB8R8FB`@-wl1}Ff2a~)=C-FZfQ5|PJVOK#k*YF zEfcFpjp>X(V|10Mce6DbU`?gtG>5%ROTN|nz#9M7)i(#kMp#5&<+4BYv(D-38X5)j z6?wE)v=TRr8jwDwRILm7Ofc}6y&W@tbPIs9_j2{y6)Q^H&wDORLV{&$8fssPQQpXCSM^P$%-BE|oEc&xu{i}-<7 z7s~&h<>djVhI-V;5?&aC6&pYvik&2IFvN+>xtk7zmwA3Hm;7mA>b7u@xWyxrV5;Y; zyJs0Q-IcM_#V=o;xzhz%`S4TWiCpC(s12!c?L4*q=>SuMx8f;?SjH?(Qq9?4_aWf( zV@BSoyiCssC1Yg}LcNEPXzuhhImbMZDpKzkUNT{dpWu`8bK?W?7^*{u)V=!m-RDdi zc>>0IwV}R^`}4zgNgCGGyei@joZ=TxL1jxi9(Iz8bZ15>zI<(3uuBQ=;Gn|fE~?RNNQr7#uyEBD%v^RLWNm-)8PDW6bC z)bH!kii#>#;*bdz$nKu%eU}1SsOiOqooTB@R4E?UEE=qK&n@IZzuq^ zBc1wZiKon)?%_If`(y#ZI-fkC~lUB;Aq zHQja1&%)by6zN!`TP%u#CHQ01HjL@V-hD0Tdj8b9s5}6}v@`+A@Oz&`m=JJSvTk|Q zw?U{_uY@sdZ!0i$Q_r-3?VC#tbZP3+6Kl%|4^u%93GNo|)R@u)3d|}F-$KOmT$AYP zG|9n=uch}ycOV{5v2CfpJQe(9j#NV@5NwbL8I~58(zWgu@7ueYT&{VfrP0ZC;b-gk z*RF%o(jt3=_eUgD63W9f=Sng42SIg=se#&Vf{`x@!?PuaG2vyK6yl5^^cLf)z_LLN zb``5vh4WQB3Xf^+qhszksoNiz@Y=G(o?3zubL^(rQH;x7k4Y;pBmWI2Zc;F?LH;OO zy7-3iGYy?h1E>l8$AU?Rq0w&gyPq`$inzZ_P2Ib@sCuSr-*~_DV_3Y&e%7he0kv*J zwG|qK#BnA#x0H<~!<&LZxJF$w6y@VjJ|kyZYJ-3(#jtDl*7{@KG1%GyV3?>xQ>ylKw+A~%Oo4p z26v*kLdCZvE8Zo1PK~4o$o6$%gte`>C$A%roTR#jBc9=8Mm50h?Qh`NpMaYtOa5VK zb}9_V+(lA0M+@PXO?SmcH+I`DoZ4}KiKL>TZJ<9udyl65!6+|eRn8XW{~b!U$LUf3 z>jzU7w!x&>KYj+#&GB|!NeXe=k8J;h-z44N0un$y(o1gz-E@v8kv^6V(}hk-s6~S$ z5m+c&&Xg$9DUpd_4oPoY-u=eCfwN)O-a6&*7nTqnd1&lM@<-@C=i|$otoro~d_nxV z^zlg#i_6ylBb)D4S4cQF&co%8RIk(IAr-$Xriaek4^z`AIi^vvjuH+!n!byD^{jt( zUMArHWBtg-hiv|=+3tLN&%2BaWDc^n;GQO|4EfAnSw7EnXjDhcwq0cGwG@*rWP2ib zh$=7%Fl&^MYB%%dg_&II)vLMxz#Vd!cGNzGt#xR<^71n4VT@Hm;@IZV{D_?~O$s>v z!{l-Go8MqbKiB~`{S&r_LzQmBKI{hu)z&Und9a3jvt9&TpAg|pQ~&8v&f+(RfHzL5_s2{ghx@TmH!}O@M2-q<$Nf+KHSftY*%pNSIY3bNe2S|F z0a(a<;VOQU-E?~fpsBwpZpusMvDDYQ{0^4?nHo6v&VFv74$i2B2p6HH7+bYDDgb4? zya1w%wcMZ|s*qw3z_0!OqLsdn)04TA(H$07KMFuqMpmeNhG(<+!rlwhRSF(sm{a2d zy1rQeEOXl=>s9^_jV#kjD;*XhLn~hu+BcSS_tcMV_orvJ|n}|AdxMnR|DIO;{%(fG{=`>?Y0onr z!dc0NY&X!c|Ki>p=+$rR_fJaH-yZitffZLt`|tIv)~8(8p+|ysI73BT$!LdTd2_Y^ zPw}R}e)X(RoL-&*eQM`u4-2${IX<1L!T{;%Z+5qzv+E!*k?tm@qljEjnYH>4+1jYr=uUR|h zC4ZFL*ymRS#WDq1uiwIorOl`WthI13ICdvcWcvrnqDwO*x(9(bje#LP-Dt$G^((?4 z&luVUtQ2gz61f(1EblVbcKmt2`yvq(*EW7ztl8j_UAd_vMT-3I>|J5KGs$-a;9MS_ z4v+5~M=Txot}X+GRm<@1#KiNR=G1-BW$)o@_<+~|g=?*AG5zy(FR^NP`oF!1r{IBl znyY)`@@-j2oJb#yoqtwTC~haapkN#C-o-nbww}}>@BX)P}^Pe;{CKG%p{<< z+gY{26N5Bfi?v^(+xq3#qLpmj7@Av+g%EShmC~*EvZf+K-+|26&zWIc355ij=-5B3 zLpyVRFcQ4ny5UGI6Fl?B$OFCXxpd#)9Ae1EyVUYGsCxPhl1tLEtkCtQUS{ChcBK+f z<1W5rQvKxM_!PJ--FHwOB`1}HO3tnA}fEq5%<#NG{ z6W6=mB(+%tcW5YPe;FdVFX*orZfOZE1~AJ&G3*Jc(n-6_|h+%XBL0`0bL}5n=av9;DKf)zzC6k&EcV+S{24SwhoRfk%C# z&u+DE1g#UjM!&s)^Fh4LzeyR8utq}DP1JqZO@EqF6S9OR9Is2ua1sZkP9k05ISy`{AyKrJ9rEBdDTQt9JTZQde-5^ zC*P-@pOcw>{rH8(nM9&WlU)~E#Wp`05YT(Dh-wJ>qXuz0oQoDcMD83j>>iZUEmqDq zmHv~v69%{7Dr(0qvA()0f@Q{99^S^4JAUBx7qdWHd77G@_8PyibaapWJqSSEsyMw7w zBndx`!#~Sv=(kyER_bCTYpzTo0dbn*Iw1bsRfgKYL5%&;Y82{VqOV zZ%V*xvD6<3vz(T*90C`A$bF%zzsEoD<61}!!EZhyywge>It@{mHS?fIOGpK4b%oYP7`rqi7-ljr9PmP)W0f3jy zVsqcCe(VbptS#0xx^%pXVsu%3tqvnaiL*}UCN?kXO2)J*f-Ns9o}iS$it_Y3J)1+HJ)CE-cY6WTPdC@d!&P8}gF0 zTKjWcmWQw*Na^oS?_tXzV=#0JhdS(E(dT%|0o{xT4$onU>IOqDWAnr{w(+4Pd(AK6 zER08IqgC@+tgnsjlPd!45d>Mnwy(mZLA!8_%>yV$9d6r-NGOx1M|X>r4e`oG2{G3n zyZ)m_`vzpVaT0lqm_qOXUBk%j2g=4elOaA~W>1Q2NF34z%TTs0)$sSLgEf=0l33JH zS1UkDpoGo$3}x8x9QHTYWC~%4MB&y?agO+XOx=*EJ%2OxZp)P&aY|?BMD0CQ245>~ z6?GpXpp|fJ|MNUr^^b;eMYfm zj9AkTryOsT7f|16OjYTymdOzr_VsV@@gJzXUljE?qrLBqi9>#RKIUys#(-bxErZJ? zj~&O?Hd?yCj1h#JgRE zsD$#J%@tO{d41SmI(8j5G7PT@?^pzEj(y9i2pAQjF*nXDi8%orlrNTjy2x_i^}{XG zO=~2XgA){~te9P=jLc>Q9tPnSf+%F(_q11Gj1`GAG0WYT%r$-{w$>o&U-vVmucL_bJ%M z`VR}h#}5GBok#Lmv4327 z!RmHX%%zOk&o1xhqZ3O%pY@szpGL~P;XoN#6Pj$c7zJnDFxqdi*lo;hX4n4faGZ%^ zHO{-j>U@y#_}g#ld%eI4A$Ol&q-t~sJr>ToY#`-s#jr&?5SWystTkjDa@#jBS79^2 zCSO|TRfm^MaDrh)g)#Hr-)p>SROoj(j8XdmqeSelLMXRn%iXJU!ta!B9p`9!37u3g z*sx$4o120E&&GWotaOn&8?GZW+#_SB$#D7I6o!dd84tmJqdkc7xMAm+Ty7s2b+XN( z*U@7{=*)f5ldGfQK?C5nSP{|W^~*hJ$!@wS`!t|wfbE5vr)sioOn3hi?^Gd*1Fz?9G(~yW<-{=f`T;vqA zjcr+q=A_ncM&4l+48kXDa>XZb6Z%nM{T{~K#e)6uYZ%JgL{WG%)QgJ`5duIp8d|VM zp1aA>seXpu-UJweb}L^w}cJ>*N-v5Ioob(_g&)E+IOuu z*_znJC12oR)|zVATTsXKZWr|d$I~>hROS6|iO@ND^aSi_%(T~Y(il$dm}y}v#9Ld# zc7XKTV?2)t3C)u`v2C2dzpW9{y;rZS$+~o^{WCKZYS`W`zSX5pn9z{4k9>`!Y{0-y z*>kBM^}*{iC>ef9(~(Kgkc^&H7rU_6Xr`Y5B6o5LuB2I1B6=fBII zo$nl?(U1v!jCLvIhO;`0*E3-BPvj$W$Oy7~_Mx(JU5=nr%e9fSnBbWf(&%Ur*l&)L zu61XA3cfeVVk@iqs{+I?#iX+^%AP|Lhj>Bn2k!QXbQeNF-&=BhXeRt<7G0aw7_=-t zuazrj!u+^3G(G1Feq?P^4_R&aQYSRvxZYr4Ulm+-Kx2*9!M08!`dGkuF7l+68te& z5}(w~jz(>QgS94AR;v8+vITb>e(&LU#p|v6ykn-X{N0y-4+3)DA{NL9zivq7%tr3oDT6rLJy{Fasy5CyJG2FoGF1U*25?ZL zZKORLg>FZu(&F@XflaHh=U@u!?vxtFhkeqqZ)+`^rir6QZ1wT@CN{5{%lNgT72TtRd5=MFNJTe@Um?q4za&aL?|-mMtwz-Ite; zknfhLQii?vh;~JU$;5Q_9(_s@jPNWsRJpl-@m6Ez*awY}`eE3$u5@|R%4qB`pnI%TN5 zTUQkcG)$huNPEj7dV;(-4_x#9=alSCZaddi6g7>AGHWtlp#^(ZR>#kSk(5n+eG82e z(tOJ5>@6TS8{eTKk8uBO0_Xkg?W(JEckcx`vkg0g=Owo5jFD{njb@nE4b*(*P5VvB zqF%?_H!pG}Z1H{u$QOUNuQ6#k9biaEl-}##dYaiRWG->Hn=>CGdO}cmuIk#%Kb`7f zxUi|<<+;!Lf=+(7XfNMg*?%tF{&McH4Z8dwC;@k|EjPZtO}M+&k!WmH@RW69;fXN? zyB;1ezob_ZUkRe2Wi`tny^*akV@cN0HK3i^4-MbkyGpcjXbb!;aOf~UofBwF+Xa5p zrqww4EF>x6^(vNKQX-TZ+TjVD-xRwiX-3fek@)!s8(1>gv@D|x4~k#N>p{py5D_he z6V941qUx-~fX84Ni`=U+nVlO<`yY3*$^RZxy&%})KC02)E^~e_s!x?w``w2qqP|P? zrE41Y8C8b$dhBmXeKRt=o)zx%)A)Rie58{i(@`mW%EgBX_e=z{5bO%_P6@WaL9XCDWlzNpp~Djrk>9zdbQeioSJ*T_!>c(AE}Xbaq1S67|JtedRExr>L^K=b!uOyxEPmLgNd#d0~^ z4d(tTzGe=C?FMyL=0hujJ%D3xCoBxL?kQ3GXyd+^pDQ@SzZsBtKvtS`{4HfdyYug$ zU67hLVPPEq4qCiHj_H3Fjt@9&S~S~?Q)GGgYj*H2cr?~J?!INtU2K?oPs^0X3H;N8 zq;)Sjb!2{r?YAh^W&CT*WZ|OU}1J(`(Fvc9^;1^W2$e32q8XWFBe|`${^Mt zS!V2mYI=-I^m7)kEdaij>rM!GsnEfYwK$(6bG0LmKY7JU$o`}}(%DlQz|_V@XJ2Sm zz+cInd21+j7D(N{TAQHari#dOrXMBiQh8ZQu=1Lm9uV3Q*x@Jq%2N54rvQgYTeUw# zhgo+gl@dq_!N(PQ+Exl}Wr?TbrVxb+jl5~H+#G!nj|w%JAOFV)tYo;O(KO6r*FC#r z=}xO4PP>49$EtV<&6>77E8J~)cJdw?MOl%N?9MIa^-=)7Q{inSw|Mm>q#wuYd8W|y z`?djWlae=eSnxZfOTv~51?0?pXH~LY9f^mS&oEubjl%#MjEV_;bP!Hd_L7n>AFHxT zH73VEE6lp;cLd}I2bHGhmInIS=M#ZUu{oijTN(5|0yVo@YVF;G=%|Bv!u!jltbppA z+<6wv^`?F*(c1YCOB+K5NG11(dZKvKX&kGnWR#t`*t8~{dag=oV>2FlIQQGVr|yPc=pVTQ?w+7;lb`ljGO23Q{T;>33uJQsU^+xrqo9J z4NX47g+O%Z`c~W4axdeTaUPK&u|8mZ%6_2F=Azsay&T;8zI;_rGp;xot z-u>VxLj9(a0)Z0%EtnAyNzmK2XlCqS8i^}8PxI;6$Ea(}5&2eXSc+JX#Y~x3CdYr< z5|V@3`?;=g4z-N83ki}!_*%Fj3-;y4vx!B~O4}u}l8txs~+`U%}J@p&8@?u8BsoM|CqrH0t ze^yBdwM^i&KV$g=iidw4;Xn`L5s!Q2m1-Mn+I}u zKqo&ec3)f<;aAvJudr_dD~yhaMkpQ@HRSpg|Edh(ph=G50n5L(P4;8kw|=%#iWG!~ zCt$h_tI24d3%l z@Als7-jO;knV>>@H)k=5t0Ny=O$HbCp@yV!%N6RQmp@w%QR|};^&>$9zB>yPRFqQ3 z6ye7&!*L?!W(CR`{=u%(CS4nDq4C19aa7B2g}-Vfa3^2a$z4nxIhLoBonTRU#gZ+{ zDP`Kr1^OJ;2^U$ftz)eHQkv~7 zID0-d#_Vf*X~W9rDe9`?puH*k(*RLB&w&!av%vW3^6;5D6Dc92p_0d4!9>`+9C*gW z-P%UuE1&Tx@X#(Bz`8QMDk`RJMQPWj*JpbCyT`DV9aVEBJ8~ynN5`CaiMTu46TCB;q7S+n8YUA0$BM?dED_rJ)FVSX%6nR7~(?-&(U zdhmPbd_W^_>)YFjRyf}feUzzB>rs8Ph{6Z`?s~V+=Jx%3%t?HY)-UVhR(&pSo5DkC z36@{Eb594q`$#sW_wSvlPaVfIB)*3axAD>JDSK8;zk=9$ zFP|3h0NP~6j#KF8r)c(Cf3EA5M{OP~+OI<9Hl>yQ0dD}N99tK5w;R!25Q(|TJ5;-UaICGg_FT7%bcXIv+t)k6%^|SlP>exOg&Xe|cwAyM3 z8etIPV94Ag1yj3$?1@C`e$V;CfeLpejR`67nEDceGrWEv-lJ)Y#be>)s9s*hKDCs} zhw%=}*gIuf^YvB@^K&*_ERtV4St#CyprPCGYSWv8NEOsTiD`z^JF*og3Yi)}vWb_^ z(wwv1R%$KhLi6~;jW*V4?PqXB?4Juiqp7^l=Q25AFVWKtd>*Uo_O`8}DE63Lt&_Oy zQ>zT?%MSgmCj~1{ZlrkjF8@wo!o*|i1OLKS;sa*yb#KHf@+easN~x>vB03AN<}6s} ztHxE9-(g+_Ugq4(?7p2hF^S1Qa0{Y;=!*@~EtyXBDRo!Q+O(^hEK{TZo_(=#+v0z} zWaV+O!A@nV)b{R2U#3%=sVgs`>HvaM;}=XTs9BLGGiKdaa34nl2gv&UjO=ICl`+DB z#FhS|qI$Agq*w{PfbB@bIr==QN{g+6uF8M5AR|`n z?D?)^*TL}6+`?)3A%_0kY~-Ka+y-65an85A-$tdh9mEy@m&b9-w*G~#OkK(g(SQX; z(+~w=i)UJ&#cdxNvZ)(mn2Gf*-lsZ?a5YV?h#&2`1PYzJ#)3VVoG*#COzgEBi1iC^ zY67LW$*x+P2t~CT>Er$vHnJKiedg@0Y&J@|c4C2}84q`1;&yFpv3v>A;cH*bY$KFv zTn})KgwaA-a*6PSSH*!7)aDXl=uV}h0E2?z#`PGw8p7A>@Mfg_Vj;49bZIllIe$6- zsqCvbX}V}h${;?l4f+K3e=ZorS31oopTd2Ijcm9ue(6(G z5%PFCZ+b??v{7k14b|@-d-N3Qg~xiShp%|c+)OPzcRuSDMzc@o#maC2X=YQKij=S~ z^Yt!bwwuv&4%xt$Oje%>Acj5h=HvU?M}C<{06`|FzJ}@-L;_sw%6eNGX0rS7d!ot% zWkEzZ0NcLrKFp+WD9Ik6qCY(je^^}s1~7LenPaubcwPCzFB{T=;upTyOMCDxF2Cdq zGdI_zRl#{ql%)mTF0`GzrUAr6hfbHtRPcAdBq+trP9p^E9`|%t;2I0hj(vP< zB?@b`DV^u;?y()!FgDT9;gsJGCf8vr=cO)FkGtVvH{=$~)SVnwVm*jtHF+UV)JMnz zCQGy{f(0$#tMO6!Mbe|haX`SzJ0AfOCd=42Xk!{-1yr zK@!m>Zj3epC1SrN-C)p~FKwsaJ?zum!RhtjE%*{rX1w|{E5XZgHjT--LDD^EeD#9_ z@6UzpnicM7#+vf6LE@pZy?JtLn}A~7B+5U3*;Q4au)Ya7zYD1v|3G;a%&Ac?2w zqBwc@H+=kQc2!LcHmM^pDr-?vQADTB3K7GK8AC0zhc%<|Z12u&ZTqZW$nn*)h`+O>&~_MexxlUD`}w0cJgsw1W|=w#+IF$>wxM|IN0_N! zQGF-?z=+;{g>ZN%NErZF^jBQZpI1o{rh%CyFtM@)Y&tMiL4!NqlMQw|g%ohu`W=W6 z;Zr%6&#f}k?uV$cPf7lAO?Xu3;oIM`j1yD zl3@eczWb(h_}z?k1GM5-+xEwZ#_N!K+yL8(*kwY}s@JXbv4tYOyjfYM5WSl?O@902 zz>4r5p7cDuLEauLCrsllddVJp|#ArEJq;Aiue6PzP0n5|N0|^Sdo+S*UCK4;&W%m~rEak*U z9#EZR)33bxcOE}o_qlMAMe6-1&M@KVz_nxHs;bTWu80^U_NY1UcIm&Jt?a+0WVcxP zUYqF$=I9sfI}Z46_P8&ut8*RjRPxQaf@c6cgs)!YMA{dCR2&( z{qsW40}4y!PY>lfT6#PdZ?GX9s$LtUui(%q2-)Jh#`94e*=Vj`NMaB?0@O*IutCuH ztmOqA9ko;qR^j|_t$MB=hR2+c-aq8|sl*yr+0!UTe|81mWp~bxu;0Mo_rQ|Zvhn<~ z86{@#i$3aDn@MM?Rqb_l!*y%IJeWc$;LUXWLCAsc*IPrWV`WdL-C3dOh8HRfZ@^;q%%zmUl9bR4#EL( z=ZN^d0y#HzR=L`R8<%oEMT0lZ$9P(@N>Bqk=fll`yg;eT+ofZXXB!ab8Vq^D#e2)V=Bk88J>RRV%)~r=xqi_O;vN(Qh(#~%?{h!v zEuBn!$gZ^=38gwmwJ~=>&++Vu$+WB-^T|b=nZh-krrEB5VBY{}s<>`lILw_`0y1QA zzsmSG-0s2C?U@<$J64K)&HAA4)QIJ&3C!E=n!%Te=(Pte^Z3+I!(xTkHMqc!D!E3&(;rU@S zK&`Outc;s)fVbkSL8T(G#EAdRqr`%={W`Nr(=UyfK-7fDdv!G~s~)M-#YHzSW7(H} zZVw>XWo~wBaI3J5SgZh0Yfk? zxAFi0YaB}rQhTBLP>M0{E~`dJ1VDXEcjr0PS@d*W2=CH^7z5Ttgw>u#d;CSIb0rwX#vpQiJn~+Hhv|zpf{b#M7vu&zyY;9R%(m zIIU$8)vd-aaq@qbAG?xx;4(goj+6*FT8N{q;**3w((|>72}vE|)X%9!QSY>7%(h5r zAP|&bB@m-2|6@gx|K=(~C9v@^ohMeDI2G_4*Iv@5^gPfKfhPRUBlYS`WZ#MTvK~8vM?8`|u;3^mj#Z zjF)w&X=xA|tLZA@WnQYac{u^FllNDd74o6p&1Zv<8pu37RBYt2l!Vp+t3f>m`*&~W z5v`9qZLvA&4?(Hq+eWTuH0)ywS-_*R$#Ma+%IWlNJYbZ5mYTgL^oL}H5L>($1YN%h z)qPCU&a=08eKzhg&V>(iv&JSG(Ub;-3qMx{To0ZnXxhmK(iE=pn{p;v+@%!=P7sE4%sG z)5KNX-RgU58X*l15kr%tY%_&%QaoVk>e{IT^_ccs&Zliq51dJ%e_x3@h3biQBLkKh zE$$Rv!kOCnH6<|P8i; zMHq7#iQZ}-Em#cQFAjh%!LkZmFkm`0%b)`N`1mL8mWJz>CBFcEGuJJz9I&tLc^BjO zk&aDU&~1RPk*^X}ifq_xqV?1Qt|-(~j7RsE!&z|YBR}j|cULdDZ+eK!2F_8CJ8gGK z@lNsgbOW;=MHVBTE%&Y6rkxesf3g72_5O|M|Ad~cQuQ`I{FuTTEv;Ig<6+>&fba8t zt1n*At_(A;Dx~!BTer*EBaFxw=1jcl$u(Qt=j;sH%!zs}$42eEkNZV_mCpC6cyyT? z4|PXNQq-wOyFTYil|A%t=_6JD3>v-fmAma^VspU;mRf{d zLA*+=1rN>}!kRir*6tyhUPqZZ`FjGpJb42^5o?wqA3gI`*`4OmjG30h4AwaU_r!7k z=5nayc-G!_MVP;|V}zkZvbv+&;|<&MU*ApCZ1cC(kfk#+JaLocI?(qRd(O{#xt;3n zGKw^2O=Weg&2~maC(fye&jb(0*CqBkwm%BY?n#cPNzUdEpbB1stmQO_LpxNC2k~T4 zqbR?h~tmX$7V^- z^EiFS?Joj#WqFZx@Ay(55oY~if4;}cE7FT!gztu27ViGIJP7*NEXn_sYvs=eDlm@* zP7v+G+*oC(<%zqy?X4TKbyWBRh6N=!GPXurMyep2Rt4m7R4o6R`QJ78|MQ@ywz-_T z2DDPt04Q<80KExy1gQHubmd}!z(6>47NyEe5d>ZM9F-pDO9{au_aer{f=QhAEtK{1 z2xO7^1&S%;^_<3sPay73eVr_KLPeRn$fV3;(^p&yRx<7T;=p6h=7-ge?i|1@2|P)i zu&X%tOl6!JlmV@r>pi76B7px3FES8q8kvY{T>~5xhDb+~@#~Hof85JTwZay=y~;nv z5|S9JM09+iG*&y_rlekgUWa9Y)qY>F<7RI@J!;@J@c?Ntblq{DRkBrQ**RzqQ^jou zS5~38IE||PiLdx>e0DAect^$^366T9ik#2Ej&l;F{O$vR%X~mmthptC&CSgLye21E z7cqJ-cGALU47a_X9s9z_;^h1JNTJck7J3chhX^_CEOrJD+}*vPeszzOM+1 zVKlyBTRxT{-AVxDQpf_F%W7!R>k7I>@=roa<%qJ=DwU3!9A>@P3_uy#k}On7iKRAF z767~}rRid+oJ5)M%xY#R0(X@;y!luz>6A%T$m!u}-Lv;CT$O9RlNAD`rz;UhPQRj- zB3~*#Zd<_l%)BJA5aT-zf}g>I5e~?51AmG?dS&6yV;Jz+r`Sj3JTZlO3>dQcO@zD~ zbBd1JJIt1YYvFjy0;>ai(gqG&h-aWv$OEh)G|nGW(L#6#cB3HjU-Umvao87?eyEI1 zsxm-gTI+s@MD^GH0X+<>S0 zM|dIRw-?5c^;=;n>6C2Y~*wc`k>IFkO39@UZs-v1OIq}hJE z1_vxvx@=9OmeTF`2StAskOm}Pi39i;NRbVTz-nk)xRpo)h0SU)iE*6EKVy9qpB_l= z9|%4t(Y{!Msr;4d#5`6>q=F@|h{* zBOJf5N&k$xUIMtnHnCAWG)ua^xSu?HLI5S2$a5welj!vO(FS`-!UwSO9L%*KhlTp= zn--KW7~V4`^tI6m?XY7EUA9=)tdKYEB#)WQpVyt*)Cz>clnSRT7X~EKDF~q*L-eub z@8=hddu5p7XTsH9K`l6Ye>ESgJYlV~)WE=?;IQ|2()(5XY_v2aYezhXGU>;;pdkr! zW;Se{o4bYf5- zymOd2qv=E6mUaSA-E6n12dy9Kl2epxlnvcOEo>^>acN1NH?LiFvIiG0@?z%`v%Ml5 z*?NgxS6Ev-2mQ`9#wOZsc}LN{zj$k%yvk3*Z^|B-tF#eC&gvo-S4Y68TCJjL4csUW z>pm!u1{||Mhw3Sp$(y@cGioP$*&je#IY6MUR&?F3*0Exdp%1eq^^C(9%!Y6)KVnb( zyLp!y%M5M0Mqdu(-1hp;v}uDuWtpy=ZI%1asvNiCulW5SZ59?v@C=;Jwzb12r2C;Z zc8eJbR|ISot|SE`3FNE4sTl^M6EV2If|skAb0X*AmR}Zx#Ds1h1;8fgGtZI;=fnsI zALzl4_O~j>6+cacwNeFTHYh{U7~q3I+}Q1(5t|TdUYRiuM1ZBFgv>cD0wOe|0(6JN zRRv*e*O>C?>>Y0U-DOAycq)aV9-n`vWl&uYc}Cs7v*TIwIQaFva8*7}_VBmP#mXxNa@r|Jo*E_?O95{EYI6-SHh zZ>4A0h&m<-HlK^yGP}Gw`rThW=@x%63NzuR3x;A_{PSeIbPuWy*er5m(|XV&IDaX- zsdY1*LodlAGzaNie%>`=dA3p@&7Hf3u%1TGy}6R2jbkO^%W)BK&*n|kLMfnuv#d~V z_oT*OI|CfH`UdQj7k=0vZutg>I9#QY3*^TN7`%aM#>DJEc#C~z&Dm3r!Q!XEZP1PN zZkx;lB(i~t&WOL8&S=Db6gAK0X4ytbd}8Vx0Sa%+be zJzK0F-5yWMU#yp|`t{xS1`5NZ=5UeM_ZP5H*2J?C`S6TxVrD(1_G*20)f{Ou6`XaGPBqUp-^ABvn0|jfxJ8ZiB!fk z+DGRMINdGAEd~xq|GII6&k`>m135#%zA+)wzRXgaf40EGw9?Y-g8bUOc}Zz8bRr#U zgLY(bW5_zn^pV_YMM*B9qVJwZZP0Usjt!tH@|{`J^e_@SdKfiGIARnH$rzuh?K(_T zczAS*fr&9;wiie(mBJ%blm~T#9!ySAi$HfK?-+fx|8H6UzuNmJ0KW!<_>APSt1smVFyPu9uFPlYW{h#p!UDF)~AFp?#>t}g7qr+-zrZx zc1b`d3v9-a*tpTW`Hqj`KHbwXlyg zGq1GhZv@OwxGSMAE$HYd%vZPbqJ!ee8V2G!G|CZ%b7a4~HyS}5B{iU4SW3MZ2z&(9(i7pG> zxvX!2*#x7qgWh^pKYS4?#~#NlYb5&Fo7dUT%dYotWpozeb#Zw~H4dHp{6v>jMHnPN zRS`oPpt|rk;-@>4bRTQbPBnn*bvm`7Er&^jld@PHuygKIn-qw0M4?kb(d>cGVVdmM zP}SFBA=YJaP~-zgZjQV|&Y7(BD-oRHS-3K5hLc{wEjf%Z1GfX7b zXRXItep6q4^sq>gRz*)$+N*R#o3PbT?#_Ma!j+s5L3b~Yk@-&ldMSsU)UYI%>zaWB z=*awh)s~4{^WG}ICMHF5gJu(%8oHfBnv3+iAXT%EpHD75MD&8Z26vS_aP@h?1FNs` z7gz%-{i`|p&}YrP+nDRc>C31ylPX`>>NTV$*b;00>`_cruQy|TNom>7=k(K_%UTs4 z-gwr~_kmf?lmT|(wz=wmt3AeEoo=L3o~`AP`ZKi2Mw#DrNdl0X11{Zh$^Xpr<^z6$ zTjk2{!{Z&HUivBD3;$x|ckgEX`>gyKY+S;DS7IqD`2J_bl51}ztd>oJ+FEk5%Iva_ zAN+t#len1We!6Bym*v?BBV7r7?-;aWa+{@>xM>?NiWng~J*0UOt_A-EGxAxJFx&g& zSE9$slO2hy;SKR@ec>lr0lflE)mElFVF?)vM|}3kzv>vCCoA(bDI> z6tIQ3EO@)z;LIt=h5lL3IH_z#eL?X(kM0glt#Yz-9a`~4@B~81i2A|?S?Ix;=e+iu z)$vpI+pw(l;t!s(-b`&Za)|+qWcTN&)OZ`FFC#p%X^YNc3Ah`PHOrb>t4Oo}FH9uf z)DC!Zf=&rm0wRJm5tK38eeB*8{|Puv@ktZ}bZcEGbB?_UPsyJmr_%w~Lj7Uv-C7JpW8 zZqVq8-@Lq0dJJlN$$GB84aA$7)e(~-v1QqZ8)@uA{awvL(*%>6l>*ZEl3PUbpQ<&; zOZ=n;Cq9waZ3xqUwC;YC?kmDku=LyAo!$s`y|^RWRT?J9I#XS)_lz9~!FD5JbGW** zWV!p%ImSi}u6BC2q0-=qFk_CtwE7UiezZ_GvdADhpGsO)xq395RC=;DWrBEnTBChw z2keH7VwZHA2ZYya_b&5arY4>7bqZ{@P&vij4o2oXJGG=+^p3RQJV|om{IpNh{-@83 zi)^FUWG~2%!oxQuWK;e;13ex|(f@en&}*8QmVz2=lYO6whth32a5{J*`fCt3r7ApU zlNHwXA>R$w3C{*Uv+$KW(el`d9rbCYdfPk9ILpZXe&+R|ttPk6y<`-CLsG7bG$^!K++~4=fJEf0T7c zAf@H@D>GnAZz6uJMQ%TCb1@h#O&&0P31KFCa9zTz%*B-sJ%&^UY4F+LZAN_{*yY05qGk!T6zwnI6d5$m_U; z4Ue%r6~~|BCcA3MDKavjq>I+r=T)BE;ZxY7 zZc;RBY)OjNDvC3T;cp!S@z0;3IiHKVu^VO>nM#1(|ER!yVI$3n$Wv1dvkQLH=*ND& zheJ(hBI#oj&C3YhPZzeZ?dlVL_l(=DXxT9K)iClF5A{9ops{==C-^yE)zdr~d2!oS zjk|{SPYmxQOt;lld6mZfcOstmAIBC*55suF)~p@?a<27AakYaI2oiK#UHm$TxTx?I zv*2TwphcO(f1+N7yI<__&Lo6;R#yeHV_E3CW3oNE$ptQGz?yJ%lk~wBf4hdA04&-_ z4?BI1b6!)F$m@9FAzbzCs}j>GN3=Wi&%aNk%5^I=)>;A@S;mTqQEa_nAuFxU?=~%? zUa|@d|IT!)sD~+=(OE5M7GmuwiOMg>ECweO0CS-pXs9dl6Wct4M(8`91oEVwM2-xL|WpT?Z#A4BSS*6V^2&JV7+7ALbq)f*wOd2 z{^d(PGo4Q2Bw=4Gl>ast-eG$t5YMzTYMIVL56Jwdi;S>F6b?u_-RhInQ*BT}$~Ut` zuo+s9WH}lNaAB8d+v<);?@V|sD2~j}cM7`unnYb+?W9;%CW^vrzDGqmavCk4H>zLj z6b7FCJDXEwb{1D9n6^eul<{;!e%ve=jOMg7I_DLz?JAKje&l$HtEib%sV}O&9Lfmw zzee}vBq*&X-Yi1Y=b=?ipzsW?C@e{VvE~b?2LqJJKlDr&)M_uN?N2@i7J4T%wd{0pxV%BNCa3Jy(5pRx1NT%@85)fhw68^oToq;}t(T&_5C ze)SF|AHtL@SQfh4!vFc|y*-<&<9?hX_-YMn7<>8iJqmp(Hv17`X8y0Vq_*d`T=B_n z8WW6n7UC4!j`;fz4zoobpH!I0e6H$OtV`8l4Q&hkdaT*YV%#-c;+d~%OQT(UwmKS3 z(NXAHXTs$y@k7N;hh>dL_z3Q_Rss*WQ(ZpMo>hY$%!Y0Hjf$p-jp*|@bmUOmf)lsX zSc6LUm+R1EEG6|tZrQ^ zrjPfIGl^-M3IqIgU}}o9_rESXTY7yy#IT82EBPnouJh&DVZr_K?^^+rd#IZ$xY|%S zluNSUz8#U9LDb(>(Gseny;+gKjI*PrptWe9xOZ6|<>#J=d1LwTY$Jf`)e+psd<=Rp zP!v6iHhx6hYwOZ-55BT@6Z0f6Pe9cXt<>tjY9vW7MNGdJjX04k*Q=Nb$2vN&gZ3$q z?&u$R1|-Gx(GzNkyl)jArMFo!SL@5Jd5R&!g^K(w?icmO7q}!LcxJ( z33TwEwlt&QFt!Q>6b?^8yT!W1syR?@J`2>+ME=XY-btT+o#7i~T}!u<1$%?%me&q> zeqXc_|IuhGg`AwXjlzibbg~@1m#qWXVzpP82c=hKe#LD<5%okw#L@aBwB;F9X#q;3 zO-Y4@ zN?K+odYQ)>JDf69o0x+JwRrpci{=Xa7x`rT9{s-?Nh-2gduxpc-`THHfonu_z;=4J zSo;QLEmGd4C9%J7ZlUz;!5^CLHD}#N- zp!pX~#7OC=oHU(EP?khb=;}%bmIW{N?+=BH*q359qWDebTBG5@SilU_RGOt4VSpS9 zq0HM?(SS~V^^(zy6ek8xkKlte{$4x-$qdoNVtQ;H&r2j@4jfADq zjEsvzg(avLyag7c3?j3+ZnYFeeEyrE=_of3ra=KAQwD$d2P{P^hch}IL@|;EUCvaN zgSo{3dHBGOHP+AnPOcZu7M5F3>zW$o8x!ijJ~phUR% zqwWB^5B!H?YrJfn9h?IA*|&hthr&jeQ~)6niY_*rp3}@1lpykezH?o=bliVYxMTcG z6oy)xI9W1~LM;V&}gZFyBwl%kJEj#-~k}baeSX%W$Q!E-* zgI`GUw6a2{ntEQoYPI-2MN^rtw@c4Gh`=SnQe6%jMU%#oWFQ8auUw28qiEA!>C^Dn! zH?1V_y8CE~a|HHedhC-`{aSksU@b4MU_jcjo-5~@*Hc$Fisiq|_kB>w{y^?ZKhsya zkcjtni@u?zUG8Oy*m(!qr^lUk3`u=Yk=Z{xYxBNvf`nGjx|7eWRKM2PDwtBhLzYmGQ^}iB=A}n<{IHXQn>-bOQ@Qcsmlpg?3PI#~z3R)Arb7wA3B9lR z7dakv)La76FTGR??;cgkME?GbtV(!^k&(Y%CR27%+`f$Nhj+?^WgCZWi$)0d zp2aK;Y?sWFHjH6#<(EsfOI@3y_$^5dlyi1kX^Y9c z$C?4%xNs!J!&f?Dnar!1=k->E^FLJLdzi*5DJ-d}9Pp02yw)=P;27zx1z*kF8$x%y zJoL}T#i5(H4yx7nVpkH_pX{XKZH#r5Q-w>d+oOJHP?XhL6G5-vUmY~kWNUH7I{GdN zvn$olnF1jLNA-F~MJeNN$vsEmZ+e(IJhQt!3x zp6j)#HrJ-j<@DFhB#D=!&%E2@QcF|a0&BD1XzSI=UKt_Z`r8x7o0bXKqp%amtmpaZ z%lFIw=aHbta2=R3ja%dX6NAagr-_3m4}OKhuTKD^G)JS`~#Wbc@6yJy<33i`z}SC%0{+kd-ldn47UFN$uq z?wP3MG6Cq`n%qtMf@0|J8(6m|3~9?`=z>deGkBTA7L{rZzwfps)h#TYWz@Z1 zmABd1qZvS}7A22z{H-*!T9O72eg>I$i4wi}hx> SX=$(ccj(p=1sNNwE`G_-*a%#%iu)Bxw#cUy z%9PqqYi_l*lmF_*%|rI9{hkvzG?M4($oY&A6<>F&V)yDFQa%usR|_ji%S&XcO%ODh z-imxF-PHJ@l&jx&->uAR!h`Awf#ta@S8sCL$h_M~zUGH;9W``6+i>H^Y7c7kil&RN z$(1k7y{uyl{4japBg$&4sOzK~Wq(*QtrP=C_mfm!qdNA^g*hqbg5xC;L81&)NNBGNNjd_TsU_-s^yszBD^xy;THa-MHTF5jcdm^%ZNU;7Gm8zO1$@Ih7y54 zz{U7(s@Cz_<<-w7NOw6&;l|$IJqH^t8h3>@G#cT)R z!fQ&)0$uRdZbcH@1(_yC6EIXW~J=)=<=T|t>}2XR+!zW=5Lm_6kI`i!99SF3j> z;#iw9Y+f!0s-A0m2xz?Wd?PgM_zu#<^$?y0g;R#HP=U#8BL5c1KS;_iCUqbKqb`G9 zyc%?n)|)T2b%GuCNm{J_jrme|k7|@QnRmI(C2Qm1-86tlNGMZ5wW9XW9Cimg6+_C( z6tWr}Ey-FHQ4nH{K%wn*C7$#*Tv{E1uD^?1XsKKwsOP#Kv;jHtr4khb<$VF_5V6nU zV13rZK@h(bmv}e6=rg|I3@XFdpn{ui=4p2$GE@2pY~d|tVLi?-Ly=>74{1S2!`inn zI@?=TuUCBN(qe+@VyBVCaXgMKtbb~$v{YVsQ>hXSU48MLI5l`GB(KIUhLDw)-3A#@ z>@APWNo{+*A>fWMebIu;%mWVC@@;`F&$XGCH(y880uMDbL{OaH+ufJy+jQ%222cmw z90;Hhm7sA34S4nU`zhl0>o)5*mDY3<6CaiauhuQ`)&Y;%meRhwZTs@JNIRm1F}==| zFYXCG^U&$-R=(g?QuB$$`orVFU9jz#sUi)sYdFaX&%fWDL5XFXB!D(L{%g6>&wf7N zNxWvstmDPq;X$}^J0qX1p`Wcny+-S;a*Nha5I#e9LY(9>grE;a^emA2bacyn3;Lts z3%ru}9j0oUIruP@>q<8j$jGH+-atJMyvvDq*Y7xyb+!$!XPKbj$o7j6Xo88>KMmp}l?wHNsuCz(~9Th(>b-QHBy?vsPhP>EHz>pXMpsn(0^Xn^|YxVDV+ zVMXJzYdbP>N_LMD(pCca@#Q-W`&Uv3>1s`mhI~}pUb{1;v?zrFo*Dc({tHl90cAZ0 z;Ovr!Lu6rTj#=GXu&wy*sDbb_(a< zw9f>R>re)FqNKd4pC<+RX_IH+`GYAfq%(ukm0}PKw^qeYXwa=zt6y;N+NYC9HsI86h zJh401B1|ANIo7)4dPa$)@vC9poYejt5)n$g;y1Q&v#<%gcH5C>(6kwKTTN`RwUW?v zMWSVNzuE`X@HnazTw&^@X?0CcJ-mD^D*i5PXMgMzKnBya$L}%Mu@jMJww@-N75B3x zTRuu=R{dsElNwG<+PP9=PsVi56Frs{TUa+6zfqr%W0YS?1NVe7bHJe=8J=@ev{OY|b*>_GXNb-nWw)Q-=Z_W-Ur)bHl~ z@xpxTW$QitbqiShNa5<|O|1+->fN$?VyYykvt0kQQAUP~T*7EFVvT49r8l>Cb3?hn zw&NHtvBk5MDsAZAm6u?Lns}}&8_G9v^id2Gu9L}gg!BPAABztd}(A6q4+vBAZu9tN>JKI`* z;JN5|x(6Q}{@2pjyPRi(l~Z5$W5O>_D%YmbK?unFdl-PV-ScPdCxI^({x8pG^JlMj zeDn$<#EO91uWniyCD|3`Ig0~UQ;sb366o1#-@QYi7#zG2fpOHC^+BM0w@~^WlRSV?cwdotVen>q; zB%mcdV_a3;`06&q2L5Qz>GE+a6t=V_g`xCG*$P#zT3vIHC&n>*!&RN9ZTgtoFv%N3 zQ(i!zLXDXbO15L(OSCwSO1T9YArSg%hkbG3(U4Z|ssI7N;BzwuU5nelx%y#Vz|TTn z))5{fg`P}MjuemCC99gVb2ZJ)i9#_m?kdiaWCC8p z+Eii|L_;MSkVS+DYdhU$OCC~ACwzxmN~zr(J9Uvmz~BO#@6Hgc>GevZtgyM?ywY*j zxA3*@mF3Ul^sUmynfidN5b^ud4wi#^3t((XVPSsM8AD9-{i!S8{nnh1zhwdR_INly z)AOumrP(pjgyod*$&LqqaFOYnT)%b0La8D9r|2KuQzhVy@w-to+6=yQZSCfIMbC%D zT5GrG-uG<7^Gz190vC5%Zg=yJ);*g##m`AMp6P_dIl&$DoIAK6nE|;rPdtr>HC|mP z?rcWg{L#W0Pf(e(Yw69%g9b?)>8KKuGwyc$9SY}scEf$afyC(T&}EnUI|FTm4Z&JvAm@U`$ z*gzEN0KfKAE|6leAse8(-n(JWuK7G`WO9%7!;qk0BtmQCXMkZc$M3|Dx2oIH(OatM zUQim)1k#i(&z1(rIbt!+!U36Q3@PJgTL~e<2Zm`WOx6|P@O}*1!WbNo>Wk_aA)G#lA0Hh8l zzSs3*1a99Hp1<9^|3|}dna=qEwOD)L^qK;oEgRks)9cni{*y#o54_2@u+DRRU~`i~ zX_$>D!p}|@GBCXqyo1r~E+G4K1Rz_EZ=|qjt~PTM#|sv^)<|SDP4g>9#iT4Kh;}=k zCjgYf49PT{}#7hAFOSprRz_Pr;4Y|WPcMl*t! zmyKSpxmr!Ly&kl{Uy!amA3`q|sXxwA$@pgEIY|otna$kg`dj9M2O#ni8Z>TtS33t1 zKffx^s7YDlwe3-pD6~Ehg4Ts$`~CDwGLdK8&IKgOE_LilfAzHOG7kvqDvwXa+#(d_ zj)$+;0->Y$XuN7N5WH!WBHQHzN)@b`>qx;=t>y^qk=*LJA*#H?;S$Cg;%yC_xY5yZ zZPnNNtwDI3#1h0;fQevCda@=rO`TgeNdC0>$ID9esGkwSS{5k<$EaI{xo!^GYik*Z zAo2UZ!k?*)nXUo)a1w0&X%&CPrqmr=Mw5vFojp-+G%%Bmc!c=dODS@uM<2Lh^oH$D z#i0sX0$U`2q{#YXlrk|6wQ_*Fq(=}XIqnbr{+#9AFMJ7NF;|F}^WK2RmEPN1*>K^H z^l-teX&vz$-WHdy%BBQyT`MsCt-jk}YVgYDuZ5amz$_orvvqR@b-0yFQKo5RnP==r z!E4Ly%V}q&obzMm*{;N^>kCL#uVe(b>2c|t;szKZmT$>i>UiZlLQ&Ln?^)?}_lI+) zYB-VX^SW;cbvz$t+u=Q5C3`?|0(r~M^`jY^VrA^^ug!x)9^djXdQL}*^E(>PPsRBH zio_bRw)<3!2*=0!KNH*L%5`g!yXG5CZJ~i?zxoJ5Y_5lMqSpu!a`t%56y!*gDgAs$ zn_67wQAJ~q$GbFP)zsNvUtQKt`G=F#g^OLzmhDFPpju*bI5~cm(RsuC-Ve=sZL6rr zVoLBrIF9?zUj{Tvwh;ne3hI$njb+-tDM)GU$O7MPX;%9}HL991*;>7lbq891Q3+KV z-1)+}%@mzn|8Dkyaqf$tpb^0;j|H@RxoUA#@`$%89alQm*?7ZSC}O-y8SA`ox2D{R z?YDL*O&MnT_>yuZ^W&8Kx z|I?uI_nRW;^XVLI3Hk#oAfP(kvq30%P!PKLD!XJ0>%}mrZhPs#D66wTe5ANa`66B6 zXO=aH_W^R3vyRq7@$!ZB(|&80AMrzVVghC8SA(Iq(66~xa@7Gd&Ipi)saLmb`cNqf zc71L^yg)7SAJkxSJERg!*XleK^&}O$*fXzbSnWym4oRfju7W|u#w|os>U+bCM)>)Z zZk(-+-~t}D6pv0H6P88Q{)`UaBe68o2@bV27hXGNAMJA9uAGr5AQCA7p{4{(wY<%o zlT6-|CiwzTTCS=dQqxJ-25ME#bI;x#hPUu;{y<&X@=fLZoI z4;t3v3ZT}PYp)H**L8~yD_-s8BO3;g!r1g`zd!F-v8Buyi(-~IR=S)~WD=)?U18-d zYi70oZLLss;$f^D+Z-$e-FVq*CvJ;ZrL<}dGH4s*(KOQHboJ4jgmp`htn^3m)zR}8 zy(;pje5mK_LY3^MwJo`?3ltf6?0zbAeowPgmOWPwINKSlNhvtvJ<1%~W|pTyH>t~O znbBq%s%W6d8@qR1dP~UvjDnfv3*T8$6i-SEMI80m4n;7o1AEmYT{i99;Uv$EAFB^( zt1mckpYwe?Ztehn_+*uG>5Mx-JDH@V};` zm)-M)6ybGTZw4}f=ZH$)kLoF-Ha%#}jz!53l}SY~F=5ebOysqWpi>CiyytG|&)gX- z7;-zfG$YB!bnyLM{gLyLcSxEg10&uC4&j8r|2YyS^p*)@f`@zlZL5BBDV$Sor6k)C zcl&Xaf7S9sBNmNIt2bN`#V2ny5gvaP7PFJxgCV!W8>o$m`NZ%2sS+Rmokg{#z@c{R z-~cmSf1*8F_d02qA&-p1n09qAZOz^Kuxk6N5(<&QK$bcRGJlV+?jJ)-2g7zNE34!4 z4mey@a%f&3MfGo6vch7mDgM1xBwM`yLDyMD#T9kwwkX^kg1dVN7ThgpaMutlSa2)c z-QC^YB?NbO_uyJMboE1z(c||0-_EG_b87Fk<~P5?`Cy4^2F7>u=*P6-GEXPtD01r)OtIne?UU zpGtm$nn~clR$lz1GyQW}e#Nmu)%@);=J_bAy%GLu2u1z5@jtIx*m!}EO&5IMrOk{t=7l)Ag8@-q}h zeSUiI>3TzZ`>C2E6^6J=U%3TLyz#5GFcI7c0&NT={$Rn{E5}M8ABWI`vH&jXd0T-2 zeHmRoRQlK+Ws<72{xMLyl9r}gd)FUNcYh}W3dhCte%vtr)L*mF22EZcaCFQ*1ZR1? ztlhnLnR`Mib^T(;r5W0z+h@+7Ke62lC{#l0%Qlt~$NAF(cc8d$k7QRc<)wa`Q7YsOHV%hVtUy&K9+C11DD@8Acd&GH2q|KfO|Vy;Q;5;cm=AM5vIQS`u3Hi zPh(7TdgzTXU1%kj6Zv?hwOudSxzfv~06k}nJPEdj&~hWp1@#8OcSL6PQ<0Fv+Gi&Z zNh^4U1DN$8pZSxbG(g9-T#HXmC6p$B$wl=pyb_9+b8u(1<8rm72ke}PZ&6pqdW5a| z$=)N)8poba*G79g%tpV6o-$+ki}kP8RU&rWSiOBVHd4S*X?8rL8)|PwUiD^l7VnP@ z{^qsmtJ1f}ffO4zv*m2}8_O3Y>Z{4>U%KgeWS@KM&bT*_bmXYx5YPe#DFKI-9!J#$ zfR%0MtluV4%yU~U`Q3k?Yb(4j23n5UHjeN>PXn>gcMTc&AzNLZuI^n2(-q5p?*I+B zYGw?+uzYTEM={=}JAePO)S93ZT&8<3wMJeBhT088uPIAQ%E=HS~CepP_s;PamGjv!&EGa zlpd+bC|AGR)W_5G=8DryW-b7E3kO!VlK<4QySlmDh1q)a)q z7}_(8gCrjQfp=k$zLqBmuI;nFG&6%Wmw9S$K zHl@Zm&2v;oWt+>CBny6r1*kPiM>D&}B%sv4QdCrcgFzXp{;=?oX$oKUbAmNYu4NXAD*RRZ) zaesF6B&(mEZ+6Rb0h@YGiWnj5#UK8RycpH#aDS30al($_-#ITYNfC_Rf;%|+yGG%q zV(OF_k1W^C_Vo0Cc`}&rj;alVcyz=)A2;&g`=gu|V`65cRQL%6TdfCf578jv-x|pd zOL^N0W#!XV+u{^Tyj6maJN(tyh3_%E7JW_$A47q73$k^<=@O38iFZH3iB=D7H?4N59Y=- zCa{4X;ncDXQ1AEP1iMIfj3DT+1>~7&mZ1X zd|{#VqOn1GM#J};igcwT>f=Ry8p5W}k2ZC$i|G$CrH|1s##=K6Dr(WI$v{6MJHtG> ztBht|ZsT)>35`)_u*t}}>uvi+ke5LbZ+>F_4Awf`9Z&jmO~r5t$QK_kCBIsIC(z?+ z7p~X9{>Y{0e(UgoUTb|M?KW&ErMa#;=-7Em1Y!4SgVn8U%es=O}fY z4XpqS3amt|1h6|x7E~li4pz-2IQDx?8P=&{Tz%FiLGo82V}%OdtdIk}g)o6O6uIhj zqbQt1IT*or-B82f8kT+z_86*2Mqf<5=pw8=*7GIw{?hU#XqunmvLs82er^h`+Uu3& zf4gY0y|^GaWgV%5jsmqqf`jPNWtK(HI1Amf&x1yAS+Tv%SE-;35!+AzIN~ZMkQ{AG ziku^#WyGG^sc)elc)j(o1KMAV0UZr!qU`+}tmXsp#dcV?-BIm3C7*m0HBqVo=}+Xy z)uF^V?~Eo!b~uJ7(-5C07S5Y3a-N4u-&a*yMAJEsj@!dnZpyuPG%3D$%B49z*g%g> zWOh1ke&IN2!>U zc86+Vg*`JFzIXAyv%Q4=3WOXvo5?=Isd zjl@FTS+0SL;yk?k61MvDHOB)P&L4_tLkVbW--Q}dqO)0siU7dm8hoj&uH|(%6;*~^Otz*>1%>lG56HZLMfEX%XVxp8?!A@R@B3%ez( z9$)BZ)zCOD@c(AoPP-ekgbC;#@6?<+objwwnV)Y`SD;rvJ=SMBOwA-Wk*XO;#r@f% ze6iqr0(IsI{gjiV$qshE6U^*4XtdidMvX#S!D0orHC6tNH{xXO)tj4C_l#?I2p*-A zl5sac*j}pKCw-t`dqq@2hDEPb2(6&B8HAsFSRK&ZlKLZHsUOlo0atEwvzU6{<32%K zZOYg4EegTAToPC*R{KL`8}Iut*8F;U%SjvQu?0`z^px6{ojbYYP7NanF4r{m(PweC z9rvciEWT~UoMz`<3YUVXc^;Kw*i!TH#Pe^?Eu&R+8%WV}(VSOFbPmM`gXJz?DY@El z8pOm69VX1#Ve~*-O!4^#LvmT;kdV{5(1*`-%F6$+07X-}r;}(IfAO3>+|dR|@#!km zd}=-pA+q!ZPVRZ%CBg@n)#w5(e$zRI&z&F+20xT?5Qv^qI^0NGEYC2DrEZS|-o|21 z@dA4RZ>h!_8!CRRWXbC%Njq#yVvyGpKO`0*d*kzogIG!^vZ>q6L0X_e=8a`>DFWsT z{(ae-ndPRIHHI^=Aa|q{s6vX^fU*>$n0|siyV_^1ruvEQBTz4e_G>C#k00p2DqsnkWK8FH( zc<6zE4<`%g^JYlR`La<w?jqd{ZI>&}bxIBs{(zn9!+XIFo% z?V}3qza^_Un&V<*H(eK8IVRD1;e+$sCeVFOp!ce&E^x8;sWjUvpsk{tkjut2ME!fG znqowNGoOLIi}nQTi}RT1-&-qziVb0s5KHXOG6E$*WrS!B?+4!S964X!l^)TM=t<$(Vd!IULKMh5J(X6dYTh7iBH`?t7HHP5Mrom`0^9gh|=o)_{ z`yB~Buic_0_s_jyyex$rYO$mEvWl zw~~osz|%{ZtUYcCFHX9MVLM5T;scx)%jh2t+FF0k&EvMFwyR^A@bQSzQr$YG5UaU( z^+d|w=W1R#Az@OVAWn^Lg=m$|^7Wb?-#b^OOv4M_+{?ZnIExVdL_1M;d93IyeZ+uc zHS_UPh7|YS*AkTME@Mc$+XgLMaAi~5NIE@^PX2H%KGx$+kvtA^)Z>%5U>P!n;@c$+ zx?6RX{bF`Nqk-PeB^;aogbu3v>9*e+_--#|hq89bYX9q>uljpJbPm$##auFj8!o(( zeW^92Q#EjMu=b4Urd4eA8pR~F>R;w{NEWwI>H8s7!8dKemEjh<#7>gklkVSf=Jd{5 zknM6B@-``$q$zxic&gx*4R6%$__&OnaGG$0+UKkunw8c5Y$aQ{@7;QynnhbXkKJuP zPbC80V|xpQ75_79QGxIZgZ$GEO?1ZfgDN&KI{a2866j`ZH3+xW3Tdv`*v14NuiSWY>D?yd{Kkrcpg7o9}D#bhTHB5sa-uti&ugO8h-ENlLzvE= zk;pmz9hAF;rY7j8OiUSK6dp1TA*PcX!Z?duf={0y1iFf={o|67jOa z;2*9?<7$~MR>}oBghK2Sn%FNylIGU|mx9uEn@)cQlc$`kph?lmKP~mkw`S!SD2Nme zGi{2)RIY{Iw2BN<-j_hT>G}QEE{4yA9uiP8J12(ZHm$1w+tvM8I&TU?w6v**?%*_t zfWF4--ww;YR+n@*BP)D!1<|ASa$EBzEGrVD-V^4;u@PgV-LMq>z{KT&B;XKBi zW&Qe>0mDC3ius`;XNwDe%v!}5Gjt{hMd8--;y{ffMFe)#uxZoK!4xf}gkrb}L!HUE zGQ%mOA>5A3Dzqp?lZ7i~#t28j{(d}Hp2lRZ!999`Wyn^?rquav0Erc6uX^je=3QZ>;G+R|p?VJkqwii`S^S=ANneB1+n# zvZgf?{iLU1j}yT{c0cS>8j@!pKIonr=d&a_VTmf7_L>Re@W*;t#hCas#Uep^53F>Q;LBf1MycZjIzkO6sU!WJQy#Wba7 z(nmNAvMtukAH^hAs#=sI_ET~shc_WqEWd@2e=C=k_)JC%H~&=##AS9bsF z36`qoZbUNrRRPalM@USZ`|(m`1f>wC#>)ZMrvcN2q$%e6q}-WLm5Wf43xK3eN{5e{ zxbFTWiPhlvP8qVju1&Eq;@z0)llyc52B)0;VDPW2k6H=7ENx3GlSH;ny{@Xth@5RB zGp5Pkg$#(jqxZZBzFc(Jt>Nq&f7kEeQs>gLc281|U#s7b;tOX_=9|K0IvTjO7GNF> zBLt)ACr&c%tm76-?aorbN0=F2V}?()eQ<7k7kGJ0oIU-yg<$0v6#!MzD`eK5XAu+eakzHZuwEmQq)w6$8vTPV5BppBg( zItV%*~qc{W`Tni0h-_2&_yZw@U7<~)Wfa*j$$38i=t_q<1tIzMU&09CL%D>7> z_Bj}5&-}-aP{4> zg}cwB<{#7PIdIYKMPR)tvN^0XuAjN^pa70XD;M8~>P6^B01bLu>tG@%z9YFgB{H)I z_0{MFo#0z>`spXenLLJPxsJe)oBPT;^!9nJ-O+*r_1gB7U`V#i62m_=VxL~ z^7*#{lHBIZIk_M>!_9%*Q}Q`HG0ZGZ#t43cDnEbBC2l=AMj@64W#}-(^gjFmZ=L4V zr_&dn{+|C&`s$yNM?F8TpkK0bY+{0FwTTIaXd-IBD+bu$&@=bYJXwSY4yIrPVd@*# zmm_jiLR$v{SpW2y1Loz)^8@e-gq34p@;_kRlbxE-3*BNy+D7ee|7Ha>O$hwtX{!la zK&}wX@U0b42LD4U{GQxAQK6Md4uDl;Byj&s)lWogPAp^PEzEj;%&K(c(!oos(4v& zCKNyIm}`}9%a(?(j*ye`{Xr<`=oy5?Uo=^O8lum0Hv%9gLsk^toUlVGG*?Tg< zu&W)2xsQ2Slf6#WVCr(Z35uH7YG9UctLku&Hzz~&1>Oq8Pt2^?)T`&hUjyTkH&5KCZ}o$oy$da~_9s>i4FEqs66-QI3d3Z}|hdIowg5SD8;sdN1^dYd2kMRRleUe$+m&*j3h+2tUyeKy+5z z67QY-VdwAZ`W>qwNWWo9kiqVWf2}DMidYXxZp`gif%BFUO;eCFvd@HSJ?%?bh*Z#T z#xFCQ#n;8#ibaIoOVv6{(Nb%RvZn}oUpy}LT3(B>tdITf?5y)!7=lzFXZcLdL(81n zNRy5qO?}zRZ>c1|1dNuld#p-&!2Y=T@n?OX+ZzL$0i{9eyei{qp-EDDh5N~j!Wg{< z+j}sz;q~>um`=M&!t^ODAc4Jwn)0L+r;00- zLN&TKpv?#-i@beML9XzWJIuOyxHOb0%3WY&LgDHoG*M-pGQcA^KcuOln! zcm47Wj~JG>n8GNDTu}d_<>}nTwL_}>&3Kv7bBVZLKigy7`jKj-qm4D6a?Nx1mtaDW zHtC6`R>X)-+eb|xD|$pfSxli7`ShKKvA}&EXEKxilPm$_VX|!&j%VV^{$p#@`U#}1 zE%4mVu5n7haXR{Jwb&B2ZS+aZ_j7Yx^y)=Fa#QFzm zmE#{e0k?g4_5-1{iEkWRqZLf1*w}Ey0z2;fVwTu6^Nc9AoBw&0##W0Aa4gfJ8+Mu- zzQp*72wyB!X7`b-#3?ztWcY8-U$#mL=cqQyDb$c2ZxYhrQI@83P4cCWNixnWQ^c)n z+<~odDa{2eL=*yp^ zcnzXzERBuyN$m@6EMg6>$=z<{=zM{cWyKBBW~P$O27Vs9SvEJ4HJ7X$#@9AR#!N$l z+J9*bI#%OOc8>eo4_(TI{pEaiT-(pHq3IoY&CyP*)dDy~thcXa*D4_eKreSCaR$;nEQ++K{MW*>R}H9-+*I; zb~6oKHDfn-Wf9Ilf_jbSdd@D+&YVXC(E6OzS4glp&Qli+qs@eD;o1qSUui&pDP@HJ zBf3c*)tij4zn+zuHmX1MxBu>q3`*e%0AbxaU~E+`Y1Qvzvc=Bt_MDb$k&cn}E8J9% zRAC#DlVxQzpweD-xBV(DVKorH;iuN1YYN+zv)(~l>|~BR&d!HP#5Kgi1B@ZCCyWD7 zE4fqkt-L1>@X0UaB#qWyY74nlR&9_H>h9)O0zsjWBJlbjM!bl8?@C_B-^GZpCBV}I zM>P-y>*;3B1j|8w{-meZiL;*I&b%y4Q;`9({w|i$M4k?O1F#j+w82C6@DgRUU5emx zlJPLUj|Y{Tw5sEfggd`zBARK}kmeBD+TjA;zje9Z!2j35Z7ImI0GPOY!5RlHi}f7 z{oRJm&-DRXZ1Uy-j;6NB*}+8MWm6LHV}oEb$JP{V!Q$^Tj9lleDb&O^ML+>-9PAe) zBj+z0q48`zN1t-gF?!e_3KgxxOO&+e;NH+Cp&EQW}eolZr1ZL8q@bgPT; z1;)M9e8)}hPAEftJ#3FS{)|djowfQ+kxh5lcQfj|-hDCSa zVmK5zKK>8l(Eh;0Hf7Y-HB=4@Z~?wy6X(&?%`#j#*6_~j=SqPT8z19UJiOw^Ou)9G zNLZy9$^=NHR?a=!0olqg6}UxlQE23_PyldPnF>xi52BOt|0Im^mWNK5fd zl64?YOLyh&eMAx5Sb5TfUEvr$t>bDaol#0(^FFlRFflKrNRID~R?hkn7!bv2RjEHf zx=3}x+*a11ORUE zHA8w(MFLDftv$jpdS7i#l92bpZul$ovQmj``pt^*1HME}&eEX6!(^zk(>nXA2cIeB zs$Xwa4oerx^cj{dzfecV@>}i92!>QDN9>~3n?2>{k~J0UM2pl6jH8)|Pc}#s3-gv~ zlMRTZ7mKG@XJT{|25*br`Edt0;<$)8y6`Dpo=?3RScJT9bOUh#$P*6Q4|Q@bpX ziEcM}RAH&bsrFo+=5aeBg1MYEe}uZtM1dajQlLp~rlKa7$!_ZtlvWey~FI?2Jvj^KS( z1Q>)PUMLL}$J9U3uQDj4iFFvLpEp;KrD62c<{KX7%AcNc%A;34;+t4A%jS(6@UD3L znYGmI`A~g-54qCkbUj4Re3__z`4v>-sIGZ@lIf(RlpXbVL;K<^9*-d{pME6~W;xdA zZ@m%pXS3z^@AYpfP=B#(pwTG5`LWx`@5U#N(4>q_tf#G{Wo3RnQr<{%dSrZZo%G&3 zIayh`SH*%ceN$IbQ1`y}zI+jQaPM}y;B$psJV@_9Pwejv>1LIwF=4HEvT2KyBy~M} zO+Q>i+!pf*e_aroYB1oVb2We8fHkgkrR>d8Qdlm+T5eNYlH%Mdtc?G;a$bfJU{dBB ze!Hr5)Y1N|tk6(%R$Lkb-{rC+_6p|dCaBq^rNnB@aF|uP{w^HNn#KRDm$>X7<-+9o zok6Opr#S)PsG=(athTwA)Tw^pLz2+Kx*5Y?@MQR&9h9UVy*Q`Ne0;n3u65dgG}1~s zrh4809(69X&ZTof0qb&n<=`WAWz~-cKdXA3JjOllaw_#>)1k!{XvZvw>y`V+tbcVd z#34ZY!+W=kJ>&9-^OE}O@`#%NTS0ZQo;H`u9L2t~bF(=54=f9=%G>d3>1mz1gw~<| zgKKwf+xv5ljMPna4TId{!M`B4f3DqxAGf|A!cy?JW)D~P^9)jizU+rG)o~eWk#)2O zJ>3&yv@3diR&KToOZu1%0nygu^q?})%o~G<*gPs6xOY$AQ@a7f4|E@rl%Tv+(gC40 zM$?-@WKAxmv$+K9^#p;X@J{<~|2D;|u8X0s5f**{p_+UYb(79N-<2O4#W08X*?I^y zg&WZ4s5OZcWHh!;4Mt8cTe-8{|nwfPljk4Duc1e2EK3|o@kVz3;aoAnhBrm zu9Ve#HAC&G1SiN*WD`k#FGUK55c8)*E~elAefY?c0ze>e;HTmtL*39g-FGkjm$ksc`Y+feWRe3F69ez|Q0%%3T>KxvVa%b!U2;oEAZqybn# zAratvJ7icz@_v{Kkk@-`xk|Dvu;9)d*%G^l5(HaHjmdo3P|jA1Xv7eRx++8sn!CZi zl4_o!1z|61iT|@lzuFmvPbq-++%|ZwL!82nD=dFVpU2tsKveVH+PK zXFq*pnItPjo7Z=;kKK98DOtR%p*a-3ip~EMv$q29$^VWsd5|N>k-A! zWq*pE>au5qdk}e_*a$w2^X9f|q`)ZG60AUgQeP`Zu)v)9`Uq@Pm;S|GEhZ%xnDAxB zR+W}ZTts#S%-{3oJ2WtsVrusL%SDWm4#^TTzkKNU4-tg{=}~^aCN9Wg9mHG443o)S zP`KLQOV-aOhg@x`YrSeR3Dv9*rR@E&@zC2)0{R~oz{bwff1>@0cNO_O>oAWa_>L=j;+g_MM87s_M8uSQ&ggjt!@m) zp(UD4WVw^^F~U?acr0cBN6X=(Rt<={qAmBZ#eucf zezn>);!?S#Hn7!|J7a55S{rL?1pLnw*(!ahf!od=Zs%bu1Fcs8=Z8Zx6ymC(=n}pf z{r>Dd$uT?@E&TFn6I6nTP^gFf@7)XwsJJ_)dZrlK)D7-!7dKF2hZ3&ClprLfK{B|Uh>K=WYE%!pe-36|`z`!o}^*n}D22g1{op>MHJ#Ux0qi<{1Bf<|)k7sON!BnND z3xWZX95pL#VJ4o8)m^?^n~%?@1`kTU7yo#l`?Yaw$MqG<0<5R=xMtrvMp(u&u9tgN z)OsxndaygR7JtPBYrB503Ne4W!X4J_8u$85rOLXZ^t>Y$VRj=7w(d|rrb4H;G=!=3 zOpDMfT_;;t))-3I8#D!q*)70aCAD(<5MUBf^SuNyUFtb}WpHu`bK_%E67eU7!>8ltUbxp7?xvf$B+uTPX}KX%GQ#XAsKCA5xIhwPkYT!Q7lbZ&rIhDrV>;4UtuQCjd06 zr;8GtD-n&5pbD3pzGgaiCpgSiw?VqiY=QKds>X?n{1XrBb7l5&1FHSQZOYJ82_@>O z`!O$6u6v^kOdP19G(auE*a~^WduC0obG6)8jp2Q2t9!?^+sb0K_XF<-wRXLREYz}cr zWSU@PqB8$g?>FCg{&K?6Z7QB_ukvRulUFyMJ`*~Zx7x|yoZ!VqnfA}e?z4w&dORK` z83dm%|G5=%z4+bvcsJ(&{M>{J55+L04>P#wjOb{moxJ%MV|6G-ibv8qqw@Nm>p+Xn z?7>W<#_-wIO;~m)MP9JlPlDJN7{%26BfPlQtBcBb`s4j2I+OSB|ed zz8^i28Aogbq{Pl}VV9L)`^W_X7ffWvpL06Z4x!FXdnJA9cvvy$iG zQ<=Ij{#0O$R&3}67_@C)J%uy2KMQQR@20K$F4O^+z{v;|$E)RH`mgMgx)aeZ!5K~H{B0MEa(Mn=gm4vCiz^*m75y>?Ko8rp0pzUl6)x5HT z#9*$P(5oSE7vR_VDr73;k{~t!V;^@wEklKs2Z7p!fKBtbb-Nz%XH|eKY9DOsiFZeu zpHFhJ48Xb~7tH_e4^q!AKgZ|mp6ssvv@Wew)xG#Bha_+7pG@cb1=IEH*j8lHKV64H zCzkB5?R3F)|JDG7I^qoF?-dgl7OB1QRXIhO2ew7P>ff*z3QUn5dhco^I2C8%+qhaq zOZ3WFVFoghjb0N+nwc70pl{`b9S2R7I+%qtSKr7fgxAQY0rkn(h&WGc^MP4Zd$QqE z*JX8DI(ksOLKr!3F#DI=Y0G;3icW1y$;Aosn!DRG&5uv`Qv+d6`~{Z(NON%wJV8?v z_0KHaQyJv7q#>CPwB$vp1P{P3Bl@jy`n9$~;XYmzATIcC!@$o|Q_TIsFSZ_bK%tm| z`cFYPX(=0dYVwh%c!Qy8EUZ4$Zb$3wb+CyFMWFe`3mufR^^waBkVzz8Xs!8ttSKnA zfC~M%K`Mq@jR81_t%j#UO)~&DY6oIt(8W*Ld+dh3zIm9e|76UfHM2{^C zq?Oc(kGAAWN2ExS)hJ_h4b_|VV&nT~{AXB68o=-R7~fuJOA!@6T#3+l!qNUGO-7;? zZG1`QTkbjzBmeif{<6%3iK1ac{300wX+7JFQHKaM%kP((Wv+#Hip2vXhF_9~(j5zj z(tjCy-SjyRkDD_O9YLj)9J$%q_<7ddUKC>FrnOcTD5d~q3yW|)JBvf3(GVzutfLKz zcV6{)P&We&;$Q9f+ykA^~YCaZvh^KslDN_~>TAlXf)_v`TNpm~9y!U**38WU2 z?P3jlBD|&?BKIr&5lp^nwBBw(ElHNW(`>f%=lHCV7HN2iCSJ8W4G5o!5qe#c{bBRE zTJvU=Dd0`*bM*1LwX3`JHn;5XYX3^CG2MY?(x7yCZXPbg-CpY#WZ&cdF4g(8K(>hc7LBZFZN3bXJUS1Had%aT7_*Zv@)?{SfEov1NM4Lvr)X5@9j4wcoi|&^P>V zHPq|=vpNHAh5CNU$G>_AtiC^Up!IdKDU~!WY#`0!o04j7|JTYUtrz5p1j7BAXVD0m*^zcL z>C;-rTIvwefTEwYd=y70Ye=U#gEUSWJvpI9kNiR70R3RIksuamKuj@r6GdeAB0!d9O|(EQws_I z_Ts#klGP%?7UHF#{^pm+zzJrd^kI0!fU?POzi_SoeIZ+6&IB6ySHLT-*X4mra{}Yi zukVx1n)=vNo+|0CuwUaDT9IlM(Kx}!!V|FG!62OxFXvpM0mSZIxYWa20c99{5w>#V z9oe@H5+KfV)V#Rw43L3`Cv9cSFul>DgZYVC5<$zrp?)QCRv6ji_43Q>yhwA`voX2# z^JKbQB0Su~E}Fj}Tg)ZZQkZ(hcTgD~*=k5TufpX&;$px2%iZqSxzfnGb=l+KakMs4 zb7R91#8vUD4)B5P$zIjHUzY_Pgcg|z(tlEojNTZ4-SHqtjrx9v=DRa8wAa>9V)DV> zyx3UrQwni)W1!wAnRp=>=!}pOzxoHdPBw4&>&r1$++=w3W(;v|n+BKXb!P2` zi_~-+bNJc`-obJBvpjyvO$&CLV(I+j{!RL6zDfUPVYgOH%E#mBb3WT!v1k`v-{DlsF-ZL+?mvAuEuevYHags3A{|U@Q_t5HDT*00-O!aB^ zeGz`=zqWfF#Xt64`5?p(!5+65b6xXSGNyV-ikCY8qdKN%n-al5Fu}4Y+eStxpAPEX6(tGI8H-ZPATXdD z%sk+$L`I`zU)gyjEmZtNue_$dQ`p0_|9q!K*yH=&<^P?v{I6epiYX!CtT!r{D{+x8 zez2xA!pQ)*N=zsew^|6emBbhx&DAS-@24P932KEa`6hhLVy)r*b852-7b;VWZ9A$J zT2w8D9>g>Y@#U$tg6Khp6_oLA+3M)^dMrpr+INuYiaCuIf)N8x)$|0TXN5)~8rL?u zGbLtmEJUJ(Dw-ok79wr zz$rYNq<#g)Sw?4ZMtvCOA74Vt)O)?epIOGl(pvt(*{rppQ$`L(BseXrZ*^hS^t{P+ zzC5?&H@mS!KRooWP)VEq90GJM0dsN8NOCO~u)o(_bk_n6P!iSg$_Db=+TV#ItFnp+ zD+hx>?$AyvPKsI?wM^RG&l=dQ2OZzYmTvSxFmU&^+m5`&UfL0wIL<_YN$U_4 zi=(5K0CvNV#4k`73_+Qe^f*cxrJw}Bg7de0W{$r6KlIxwMWHQ4ZPh)vM;HP0&b~)E zAEJ8gj;KW^Cf_C}2kE`BU{%YHMUn^W$DMk(2P?Cg`YN#jEx1?ZvPLb5eSiVgTzQ6; zKz~=LqWRs-=Sryke{~p)&3LSwKjW-h$yLRyG}XjP&`6_ut~Eb&h)Fm9-Uod<;-*NT z1bv5QPHK6dN|QG(xNY)};^e}hQ|3VQ_L~NB6phEtnelid@B@%*IPtsRCoTsTP9IcQ zXDX83?Fk{0>+EcX8!x2TsM6>pjsZ# zyogIy$rSrZK}!sITCeC~gLd1QhkSQ)hs42ti(NuDzK5ig+`8W#y55Aq(JBhxh6ZjT zvLmdU?ISen#Z9+lGHr61KumMrpHZcEZz&qfn**mg1DQo>Gj?whWBUnI&H?aQ0}V@?Dt!p2%u~!dvwex($(}?^ zTw?z7?akM>uT#EPiYS*CF@Fv>Q#~xGtBpQ7B{-z`t-A3BJCf#s^#nN+IPeIi8Wh39 zcKS{~oUx?88flNjqc13F6rRKJRb(yk!acR)_x(a_+Uj4XM{*sO*evKS_3HDiC2j4M z&^S6VSHRMA)rkQpAaG2RvT|>XJLH358I5KV4?L~xV0y7t_ba7^uGKi`z>t=}Hy;|48Ls!dukJ*C$9Tpo70U7p$v9iNwd;&r4~6#URT z``QR^tvf6bg`M|4D(ztnc$*Up*&}d#xFA`snmy8zRxkED?=B+b`p84s7ob^`I=VAe ze(mfR2A|R~fGX&`1u!f7N*jN3L3I3^dDtVWhZ+p=@lkK>h})+QxUm1l*jYBk6?R#> z8<*fT5FCO#A-G%6;O=gX6SR>gKyV38&>+FxrGXIK-7UDg>o9NCe0%4q`46Y+?0fBN zU+e56alOY4`mQmIG-b6NR~`HMIz-bxiLeG+VpHC{s|O#yW8WvWlkmKhw2OhWIUkY# zvUb<%d(Xkzk*(evH~mNLD>&;-d4Bc{&kWeap=A?pOi;%_@Y1i!5{K3fg$<4*_Qp(_EQ1E=wDd2-rE?_mf zwqK3+7?~0(7_Kh3sh?klLM;ekShIn9YZ}hpmjINyLs8Zj34p_V|86n2y|b2L{2Y)E zLl(VL0UoNxR8URD?Ptjr=2XCJ=QjWu1z99`u0m2?q$)jlR#DlO?Axu`f~#0Zd`Ozlk%;J5-;{f8aaeV0q^Y8)AFf7kU}XigxP$Au@s9GeoX-^>TNYK(S9WSg}qJVR9{REm)dSk zuCfL2GWoKCA9C?vnzQeJbS^>$ey=Gs=eUR z#jttNWrRMrG>pz#BizIsiTmI31~9kZD0kpSZcfe9sE>BlfQSoH0juPV;(%dkZOXEr^z73Ia`kv1ky)z~%Ag2ZJOb-E>n2y3Lx>=h6# zSxLm3=O#m>m$;uVIr3AV_|EyNqO_Q6@VPaQJ`dQDbJlD5JK7JgN&!NBOwi)<^!#Vm zQf&bkOmM)szGp!vEi=sW;nl-`NAXCgelCR6)o37rjj)B7Zf1<4_Uk8<^;o2mbI<=R zCK(Uw95X`r=~#q(rawYbuI%Kew2Fkg&l4mm8VE>s2TKEtUl+m9W#2eLAdCK|Aim_! zi6xH?2pkj_bHQl9A{|q}I&C})Fq=lC)JS_K^r0j>p=<-eUg4VZkwaImm6vFZ4qE=S ztxuy+9ZbN%1bQ3DsiEaBvJW(nTRpU-ikO1A9ETMEGkFe6^Uc2`8{p+d;gCWEA9B~0k5~4tM{kU`hL`y7{?I_p_qB3``>kRqv*n-2v8j1gWy&Q z$S0J6dN~$awi2X7ZvfRj;yd<$)9DAn!-*xpr!2Zb*J7!AsXDA_>jh`76QQM zKt5u2>iS*&Wd3Q#8Y@R60|Fdgk$Px|yEWra?|(8GhqsFHf5rgUj6A?4lS#zg3E z9Ylm7Q&p2fibScF<9KcYX#wYVYOgK4wQK6nl~mzWhMG3jOa(|GXv&!aWUr&f_l;)S z#sQ!He0PMqL*2rkX81TQeEgJBkDJN~HP@lUpZrZjgM8)7i0Q@6Wu-Abd>1eny=ag@taC>V zcmSgr68n@v29b)SCBSd}vn zDPcw*5KfIqWX-QF{eA35`p$Y60Z^aU8`jR}YobglT$SA(03SEit(|Xjs&Bo;YA=i8VPS0+_9Qj4NSD&3V&68=f-2WVzq2G~7}T2}aj z@AAp+aQPqsKRq{nJVz2-6MT;E)y|5S@lBaqp3C}ppbfEMxL{?@s5Dt6d4&VR(~1Z5 z(!GsnZDd$IaP`f)rZD+lSaXBgA9SR_Lx7vw;jqud<;J+KbIOx1qTV61<44lNa)QS! zXr1K3@$vvB-?0wt82(xP%TS>ziZR@Y2RT5=k7a;<=-hNqYha zz^CQ?`Rc+oSU`%i8?^PNKJ9&aIK~5@X*Olehg$eS!D4TS9@k{MLvTwPc*~YY4BwiGm1!^AqiBJjX6 z0U?!~R^oTMn#a-B=|Q}VHC?k%Oz^S}Ik`E>=bVFN^-7(F17w{OWU!oXP+eJL4ViC zBd_#D_wR=_eGtT^vShVyc>jg$$|MBgBv&Q6DNeTErS^O_B=USE#UESL_f1Z}-SUqU z$hZ`ZaY-q~A?(E%XSXRVwWxbs*2PBnv~{=a$~yoOy}f?W^5?N-D2#K(6zPj`sDvoUF$-C=v>;%I_HTKYtH}9=2%mF`1g%i z&|~iDQNo1#RuQTr=x%MS!PDOCr`6Lwp&kTS&FL=gr`iqzQ~r*+^=3vA zSHFcn{SXtFXtrmau~}5<+6|K1`*SB!J|F6ruF5C7r~8gGg$d=@<7R&QR_~0((4u*V znUL_Z@+i;Nc8GD>Zt~_b<9xs__8{}_zv4da?n-ji6(`yzB_Z^P3pcb&^-FOj#3==r z?dRK#S;%SMFzoRt+@?XoMW4dFND`A|k(YC>?yM{bssJnzMCeC-if5{T`xFnDZOlrgIDVu3W|OrJ1|MRu zUg);oS%5;mKe(rDttbH$lIT8v_6uyTJW#eZkzUUU(`0lc|AtFVIbCS??&ImhJ@Z6c zmC_iCh81-rP}`oCA-=az6{oYOw*_j0#B{BkY!$BNkU4bIaY@P?B8Nznyy_hy%d2Su znbKh91!j~3(AeR+c&B2WPT%?$FWqy|dK!FuY1oW>F#r!>m)pVg`NYv)@#&wjzs3(F zT^8x6178Nd6uH`)4_sG$L`BzT?0R=q2Ust9U6>7-<0QDKf~sk0Z1oJtfHWpbigQ&; zjB>iR?D-wOY6knoUsKab*^II}bqnoX(#=((;ySokQva@cU70@$dv2wOEu?sKbFsNC z1WfMJ(+}y06$#{fLY5>leC!#Fo_g?f?j+oDMOJc*&w2%5M3$0FNu)O&?-Z$gp}m%fppTq;=%HsBd*d*Ozpg(GhFP}E;1^O?KBQl}>E@(9e6+`81A zTMwwgg4W>0ygWYMfPe^NU;cyat(Q z>ca9ppySf1>)gHlG2;DdfZfB}liuw1D?Ni&5wtsADD5fsWn_CKE4J}4>UFoht?tf3 zkpopkvR1FT)d4wR?_IaV4XYL@Xzqv+d7YsLRje9iZ8a;9D0lbQ5hsY|#}5aF?Ac_2 z>VC(XpAOPesJqn90W0$VSawtlxcuANrsJSBUrMfscpW zHGoZHDPUPJ<(V!*l*R^eEwM~Z>)X^^&=w}CFSf)afNr`-vxqVyvy{xc(91!vZ-mX*1^l@J{v9b=u& z*K=9=;1pNc(P@@p=b0ZHYVk^pJ&cUSZ}=8425KVv|q?TlPaaIhOBnewb~UM{#b>ZGt9{29~J%4H>%&`Cw88AP(DLL zbsL8_SIwvWD*v6cQzRIJ(4PL8+Di8RG9VdI={w=qL}8KDEy#GXbzpByLI>dO>oJpODo&oqc= zF+yu!uS<5`^@YaG^|kd==&{dI-p1zrgxnP4Rd=`eJipZM{egQJ=c0GU-LJk#N$T0G zjS+D1z$IB;nmS*|%MK&F6{478kL~shIK;{${Jk?iF81km!WT}oI7g1tdCJnWs8{U(c0GFPssV(FU-wG_0f*)KrWnwo9WZ z1K9iYE$1ZMYffZ{{ojhsKm48iL7Tr&6pn!e19NV^?8S*v3M`c~$`fNEA`nu3*H1p4 z7(Or~!*6@NdXR)md>4a4>A>64BW#1^lFc6u4ZVRk;8Ay>n0}_L5;4<|ZvZ;$n5aI4 z6M_2eg1!8CSHzTWBMFaY>-Ss}e;-OE;L5%4H3^;^Qr0@OJOj3F1Y1|{_dbvW@cu{E zf{Or%vi_sE*~zJb`}ygHYv}-JFzy!MvHBa%e;!^34lzemEEM8o90oS_qtyfQTLKMJ z(3CsFAbMKzr4eDcMlwB~RM3J88yQAjoK8^D4=+vN&hM1RrDV1TG}CID8u1RnJ3o$Z8_R)@=% zlYA>R9D74GuER83>7Y|aDpJb0STo+}UU=9k_)_E>lDW5396$0;lSAslGiaD*m-#-4 z%h!_+&nd-e=BMy|Vf2T?^?F71F*CIk6cGgFUG=tkZ_ju=BCkb}q^wC7OH|4~q-RMm zwv`eti68Ous&4gD)g>KL;kFh-C)9$b^{M{IS8~FC7w}zBwc|rsygK(2+A@1buPR{t z7p(>Np21m_pc5|XtFV?zGzn5CiLjR;$ zC+#%$-WJ@biYWq6Y^Nwx!supy!9A$^P$St+24=oQ3gA^iw-c(nXc=KBkK=P8R~z2% zLWhEY?}`e|^R4sxPe~6@Zy;rdLd^edef`tfXkS+lxBlWeW*jDZw<#Kf3y?I`~Ji7)3~>ECEux~oqmc2|5G$69wZaZL-7XYNpVZhfa~OGz5% zu2nVz{Ttvg&64G^uyU7Sn-#HKQ@9!;sMLhRRwL=UBqCX^t?tsX8cW1Ca_UCV0Qe zCo;6KK7vYb^To`r2^t9Sd-|EGZRpwsTX+HGDI3G#fB1a%C{rIWji)pLOWx40ICxBS z6P7=Sb-O)FHijByKkDMUlGl=%bIwB8t-ez=g;L!CkJD%LiQaxh(P02b#C&{IHaHy_ zbGXzVw}&o|Rj^8_)!_ z%%H71SSpfViXLl!j&UMuLt!r9sR*jb_T)0qU&x3kvnc959w5Q)B>V#KtkK2>K8+5Y z=CEbtq5#s5Z6o3hED}5p^qlk=S|%5nwek2d%Ul91r<@*!X5@8qHY-2!T{rH;LjX?N z0LivDcVu*EynnfdQRl2-4D%s&Cwxa`ucxj*gl5-4_g?$2cc1N*hI>^r@D?AJ<_~|E>w<0mI84qQ zV@fq83Fr0eWCoQPufV4oo#-Pt&%o<=jF-8qJw!ewVbr0T9It#rwnt>meW&h1sJP`j zF?>TF=OQdBh{x+_Iu`JljOtRJ0$W28od5WskaWuyhpX2h!GT-od z(HotN)mr+8s(=#{w&`jN;{r+<*eEz^Uu$+#x2(O;Yb3@z)ch>GFxL<35k7)?+DN=a zw=vMUnxwE#xsn&tj6BVC+|H%utR?R;W677>)kt7?W-;~gaDzThT{@t3oaM3{_0-qO z-`j{Wx}*@+`@YAg?z|~j;$~ahDB^b2|3mNT>dswVta#GMWePrpkxl0Aw0C>N=Df6k z^cB`gRBiXpzIAqFUlxQ8lU(%AYGF_^fr3`&PG8Vd**{?#eq9H#BWS*NnS|JJxIC4b zd#WzJR;Fr<=&L5H#xe}&b%ew2`dzTSg10TTTAMiIQi;BTq&>4 z&`xh}UI-jW_oh@wB0HfD%XssrV+mhMvyL`Z_-bS82Ombj0%XgXbiLK7NmSHn|NN+f zV8OGz#96T?)_z*co;EKtVz5vLq)F2&q8dXqh{v>n2Rt$p0V$|`(l%GDGf#nibs9!+ z7IZny@$1v0ue_Z8wfEZ1c!4OLAs{m6XBjMV13vsF>f;mycsZv6FHilaqyYyg!rHO& z)zj>TS2g=7`XG^fA7~(=@>qM>huaA?0k})hoxt2U*`8k%CcgjuWq+sz%tVOUqs>Q= z`}l#C;hLPpw^KV?iILF`pnnW^^A1X5nravFBy5`T1a*SS1}jrRr5Ev2=_x)08<@1b zVGUyTx?<=6oX*N6KrccZanbPZ9&E)h$((DQeVA}v0(#WF=d}z4tfrRdRY?7!!VlfF z$D^Xw8EpF!FHlUbu#s=_P3|Q%s!A!}msZXIr(M61z_m9P@4Ua!Y~K@G#mb90P)=V3 zO=^?4%q2^GUf}k0iA1u9^{3#{m#opr&C{-X-i=W90Kp0aub{llXn2^R%R~RkyfRh? zu}OwcPl>fX-pmc^=fe8c@2UTU;(XZyB8e5m8cm!~Yu-neiZSXEqb(=Y)MG~=eXi~5 zkfY?BTUt-^Y0qsp_{?agMx_52^b;^9);|J=?|3EsH!TRe9LPWvjIUG$rV76o}S4;*z3yIL%_^p&l zr&t?IMa9H2KH62wa3Ml%gy3oy|9W% zwrc-{(DTWiQC64QSK*tJ-+TY|M)};ENOBF5m+{Hg$M{0^Y(re=E$e4o-iA;42px}C z*MvsfRyOWBv+CK;#)=P$u~>@TT}tG9Vykd%pwxvZ=_jPV*6-h3F~H{-j6Bc8NpPv! zlytVD#wEZ(ELel4xjYFqvB9T!Xh}x~Gn)9`zdP(+o-g)kdQ`&huQDt-34jVCI}%{o za>3nC#pxj?YkI&jr*4kNO65xn^*2k~j2`wRI2|T|Fp|V;YL%xOI^uYt*=zjs;`_~~ zk^4#)-=XxY>Ja;k-dPtbAH*IfQNO`kDU$p+pOOREZgY&fTf%UquIxH(1Yi$bj<4ij z8ySSIp?2@EeNIsKYOM-r{Z_bjG+DYbq|6xcgF5n3<<%5Kzbq2KFXFrHjz7fZvUe7! z^IQ2~FVxfQg3hn9Sk`$0d5tM6uD~nuX!Sh>t3berQ;6iH%ISVfoCblNLrK$fNk9oG zZj0jw=R%{=rW#>Q%Xiw_WNwAaw{(26SS()>H>E*aTUC|{>cGyrWTX)_@n2h;_iTj(Ocr-L-%^UZz6jyn$aI3izgLUh z%=Tl75%CsyU<|gM`w31;aV2-z`e)s;2V{Zs5<@d7n>2jeX>Y|-Nyy%R-Ys@v_$ak0 zbGRb8xbv^=&es2tbNw|Iav*t|AVC0v)7pwCG^&Qqp4RxTZ4oyXM+0GhoxZF_mF5vr zP3$t5?8Ts0X2v$YTxF&<@yh)GfoE9|zDJ;bvM$J{7p<#d<1P-UJ^ItEj8K(KUktjLfgO!2H#kTKKEGo8DEE^cRvFb9_P!Z24qdhmy z_KzT#46-?WL`$*}{x)^Py-}W*W#-^_TKvd7ZSro_iiH%c4r~W)-p$b0tw&CUwOzc{ z&^!{vRi^07bX{Bg|TMrp&?O-ca5H$8-qWUhPE=ae?ug-6GNF)uRME&RtrX zfUF2A{Su)O|Da}(eDPb>P@mh0ktRVbk^2$OqF}m%*Kvg=rShUhk3=4m8IgKaH{_}Q zZy<^h2Kz#Z8$?f{5awouU06CC>cfiD*zU84ToAG3h+Hw--=_VeSZ>jFbSG^&%daFiqWtGB zaoObrWUQWrAjrm1;7YXuEVP>4D6d{3efHD)nbMGNX!S+YKhw>>BvIc_FU+40C#dGR z2M-vJ>TmO`KI_crAC2*!+snjn>Z~1ZvA)C1Xl>i!qexi^zb(S@oEpR;&^;8wcOX>V zM>=*Qr-K@Bb0LFcEMu&fa;SyKwQDCykv~Lm5-|ESyY5T)-i1 z-rv_&h-Rx<#V}-_UDZ{4eNV|XBS~^Df2}{uvX%xx-X8o(#`kGdYIe{e7eB(wz5rc@ z>r~%-&%MTalH3K+{q|!>@TTWUAHLKXuKbcE{tA=8SC82{(=UgeOsxUCCP%+RFELp< zmL3@A)gidY3--3Gb2QC43`c{H0`$k01t+sA%-QT)0{MgQuPWkmi{wHFygb;hhz762&NC6@d+*G&1617foIjEc}QUv_O{NTmVf7fPk20yz9VN z`)?t!&Y%QNCmaZ`2%n%f6Tk*&F*X)MG9l^gFOQtd6KTfwE05Ff)KW8bm(_d+O(cN8 zRYo2gQQ0-1fGKiL;2G+64gQ>=V>cll5nai_6$mi?_(8xTzNcJ}Dj!sXkSY!GMGRhIwpH8I4Syih=1`6;wY5+}2XDaA5pP{61McfLVQQ0)1 zPWfS$jyZF+-RT|@HYt?t8$N^shtP2~ocRhdON?<%v18c-KNzl}+}}#b9Z=>mUwwL18^)f+M_7**Ffq#KiQ?A7t9FigMzNx)Q|(Wny5ysrZ}i|-JbZR5`&L> zPWnm9uIei5tAU}tikQ=yMi`1ZHh)3)q!lo7gI$7(A67o6f+;W5Mr7$A!e^|-nVW#tn{Ebk8dq+$+Rh%(Hp&@^g5w7}zru(eX z)4luFMwVfTR*jMx>QZ@9IX#(FD?u>7cdncG%P6Fwli){}AEA*SeMS3q1w}1Z`bAng zK7v6zgfFrlwld%uW!@~J4#=b#ZM7zP?c!1WJ$(n_kYI9GCgBYav`IC`;y2kW0GoeQ8l4h9EK)1kR zt19r+BFjVltAz1bbjY62GDx|@i!Daj59K?b8Tqy`p0NJu_~YG(s5i5f)$ZnAA$jcP zScZp_+QI{qzZwOY)gdj`%f7GL%kFi7=;SvPK}|&_(}CL6*BS!Nz#~63%75)aH6i?J z8TV@3LB4>zy*`1a!;&+kGgK|7~K!-ObH4F!4Xi*Z>e_ajH*hdDS(04Nsllru!$HFFUT@eyBzz>?bH^Wd;929MCv$+}Q(R zIvB{Brh{glCaPcSZRw+swrblbRk0tc1UTuA)+l`~#^Pw!sJ7p90vYUi;`F=?r53+s zzYr=W8kj2>EljF$eD$ALiOPJi5kOID2(tr zsdwE@^BCzQn$*U%?xjSJU;cgiOX1Nn;XZkobY6CQ*@>rH_KkjZieE^s^Q$yRK=#-S zx29?44K-+=Mc+oYk!SYpg+^s04QI|JafrHX>-_%xdv9z{im36Jf1j6IAhAoo8Gfd1 z`V8rYuz;`qaP#y!U|84H+b2c@=S&b&Y1$X;pRmlbsZ35;(G8f+dkPURO3W#Nt?@IZ z=1!TytBT=A%VkX-OVeTRy3WON3Hh|3A^5QL3u`-akkY)H$&xm#-;V+mw(>F5ZHFZJ{&(Q==vIz^@9j%iyq}C zatDkFl^AxKzZcy}Jkp$cKi@;U*y`zi%{$uJQ#%^BlJBnhi;8$rls?{$Et&bvP5kA2 z$0*`!Vtgc2C5ox$>v=VfgQowm(6M1u^T$ZiZ^`etIfk`pdm+|Vu;AI?=4y6c-4y=R z)|~1X_UWWXma-8=Ny6JGLe*X0>bs2VPuI-dHdoPDeHn_}FaN^=Sf7 zinFWWgFAVdbEnXK$ZtjyoKYbX-%iL=^ndHO%35L_Uj z2=1#nOAg@+IYqc&KrN5JvHpqTUQ!cv~(W$Mxpl31$44~C4U~ZAB z{L2e5$M9+@5`Cj=9754MWmJUfE@suoEmqvE1!}+O6+)F#DRap_+7Jui2X%#vPq9!} zW+;C)#;9AGdA2lhb08d~?<|XGzLu*6S%B|#BsG&fmN{iD>UQ->$@lILn3Km9_~g%s zDG5ih%-!uR4u|qB0XH?ZVeqq(#pfC{=|BzW7nl=}6Y)2(S8dC%ENl#qVR25JHN@eV zt}E23n3u3%R#j9i@VnY39AHdRPf2;?BON8$l8^ao7M{z^H2r3OU}0`4PgIffy~1eZ5o#i~>f~jl zQ6B_RY4r8!XOm-b>ez7eiYzAK^k`_HDfsz9%E=|_MRadR4^F*9Z~B#I$)Y?J*s>p# zn+46VN5XFv;kVrWrhlh^e1cjrb_w+5=&i%8%q6`C)#v4-#b2g1t)UAdp!n==xqFz#BOr_D@xYnX%Kh$B@+oz>Qf@?hxP7~^Ijlp@P@etlq zOR|`ZT*_~HR(%T(ehx?LmZ(&icJ+h+lqjF_(AI~Emv&sd+vV`F8ZD?kp)tGuwQx-G z%m=((P7rX$C+D63huv{I%Oz3?ZW**+X6F560fzw4EO*5kT6cb#RmwgUB>SZ%FK<5e zGrQaGHH!&2CdUPBv?c;m0~|57jfuyWfO2G4storxNe)6)cnGd|hspqqT3I1x2Aoc< zM!t1JjG#%T{q45>Rxiq^6$Jkb+ftFosEF+uUMbee>UGV4!-zI4UOs@kjC`(Vk*d!x z_^l81tZ?!+9CW{mWqM7i|C2Hj77&x-&{sy3n8G}#RhjB79jhIq<>bOhc+KOw<{S%Q zyixg686TwiNmWvOO&%x2Fow!=R^K9BSqXd`To>H!aOyTOG)Z@{VyMISx*X1TIj}hq z^WY~Qaa7Sj)a)<1`p`?lDUP-EcT9wUK3fe)Xv+NK+MMNs;fW>15Q2Mvk1R}3JPA4g`w{@ly zXn&f)F-?awn&b+Yr9*W2XU*&N^V1oLQ}5o2YFNFI>vW4l&K#A(GP`yJA4?N$fO?CT zXvXTI^PO z-uv~B&+~~A5vX&6S=YTj0Gk*QF+uFZDrV!gAY{cQ`fd~M&F8Tx{;m$AHCSpSEiD3!%6ij6vpIaXKt^uwFF)%-+=Hy@+FgWV za;)6u>6*nfXv7G;7BJFxkJilmLE|LYJk-P5<|CkKF*kWs{8KpS*imIvf#+2%I33~U zp~UtltgBq@rfmd{xjf~7w`^|Qe8sx)2(;&|2<0L*9PSdT(qQ?S%^%1Srl|5RSQmjI<|k)Z^(b`)AU%PpoO}T zYj`VVBVWD$41ugv_!czVgFTLc+$kaw0Org^3D}X#wTQpdE=ly*S7TdKLv`yYRG*3V z%So&aif84HKkY4|FC!f&;C&-J>nzSueo6y@wLA1Jf8ua#OIEIT)!XJwQ$0!cY0MW z$u*F^Yb%KQTtO{LsAWhys04LrA7>p_Hj2+y49X4nNT_fjl=B#$% zlhtJi5(#JXOP^DH&}mqt&GH`C*Hn$|-u_^^*itDD5GDTIYyAHgMEpN@geU|D0J;zT zg7w=fz}>I>-m~%HfjC$WPSY5~cIs(TQ3HZHeD2$AF zQRJB}_K`VW?S}H3bbgGpP#~omDv1$CvC3*Qbc@2z3CHK)k*1WBQ4wQaBR>Ll9pO8I zNW@-wzfoXb11)RgfYO{vZTY#{3Ij*i%^(2+O0cvgzc7hs#cEVI_%^Sjc8n>UJ52N% zI&}(ngJF^O#DKuTS;lmSF(JERAy$RW`Mnrz!LUCibXT{N)e)P6dh!EfHUbCU17ki` zN9if`1$J4kDw8Yj_(9X$WGM))#_5!e<)B3+TtXJGvkHSyu^Bv>w%tE?ZuBO3R+VLj z|H{JDK!;VkAyX2_YbHP=nI{;BxP!9akF|+N@LiX4&@+_eDMaqkKoPsb0II&tXLN$e zO|uFN4HJ~&#Cx8_nLnulr`Q~EG%Go}RPM` z?YMsV^>iFICKSM;lyS7{V5Pzob@oCX00e~ipk66hjO3`Ru}Apf(trYw71U)_ek7nG ziD(#VcDz-PvD!;hDr}?Xpv7f&YU8c@WVScK7@SXf8{02e_yYd99<9Rq{Kn~i1FKco z?y9M+qNTu5;gq=Eal&uPE;r1kl$^@?UIvdoXapP*uZ5~&=d`jC;Idk>Z(~>VmLILKr z=xRWbMgD>QKljBwZHDx|_x&?CD5oS>ZEd#z6(2n`qLb2Vycf)|If9?kju+H&Uk?R9 zXA`?6;Dqj6GuF=O2pjY57E#t+!xsS6aJXyM-RRsdBfd^F>}pHc-9npGv@X_vLh`J3 zo;#N^Gc)bGmkNb>qRrcJ4k&i-+CdpboNBvL!;5+j!+;KGKa+@WUo&ap+Y_^px`JWK zkMd)p?|T3HRo!z{Oz393eEM=#S3L)*?Tpin$rZmhQ8FCJ(0G^0@QMbWMN8w=VjG7u zTa)(NjxH7bHd-9ouVI9fm1kMZAKeUrrU@M6=RV~ctUDhetvCBbG8X`%4 zEhuUqQIcEAsRPRJ**Vo$&qYI6#uO(wsJ2wJpkAZ5iu!3>$5qGsm3_(FU8+pRbmj;u z^l|p%Mp!A-k#i}Ve-U~xocu4hb$(`r2SIMT7q85Q*D}LE%T!u6I8~pp&e(b3jU-`z zYHcJ=k6pT8UTAyVVA66@un32+JI z3wySQuEshG?_2r5%^F{aOfPI9-qfG9J|VPFfoeZ$3n69DT&l?e?Q7r*vfH_r z!I`X_9Rei0Z^d+EdhC05r-m(rqIYjF^X|jSp5(xS)DKy*q|A)>q)zo|m*2yQH_1EB3YArl@)XbKX1c@I_&YeHwT}i} zNr?78vP#droXq>(h<$x`DpnswD>!HTE*`CMwrb4y?a=q;`FyXau@7BF1h0<(v$uY& z1OL?TF`5sk$`=MNq3)K+&4-SWA(QX;RfK6X-8}g+GOVq$%k+1x27aiO4fj_&H>IVz~t zp>Z=hv9@5%Y`9OYF0TU~N187%5qw;8tCio|h|6zf`znU-_7L zdriJzJ)hRk51rdCbesa0yNYb|3Sn-sd=qF*peGbtgZd-3n)2pA` zu_rJu+`Tb8nS9=*X+^Hr&<&E5+ftBfzQ!4Q;Irz0>AI3IX%Hk|nl4Zi;d>e=n~YnR zp6WRiqwk`59>1#Z!BQlyT{Z9c{fRcn+)t4Nanwz`QUNHHDG^VACLr(@J&+x(2`r=_ z2#43p7~}9WYqH`s%W0jFH}hAN_wTq{BvjUOxoMvK(_B~4t|on5_H36w_Nhn@W*(1# zw0yTISAX^T5$b9*{?`Fgey9ffrS`+k6gP>jyS7}8Ja(=ttJ|pdKp{0$fo(F@Av>Y1_rBXzvGh!h$vs*XOLrTGaWyF= zX$Ay|mJ1d0i{`fmp}2%<4(SeU!^sw$+s7s_SvE?ZTeoOD|6HEgWe8D|4xMpnYAWDs zNu>VGGCVvrc%Cy$f{)6H^|dtu!{>eMW#egIcfsKmT~@Sz3>X&8_fKTvc^KztbGtJK zSzEdRv$vts{?Yw$e!=5%W#!cWX_KUp=RebmxWLqjj(Zef+se*8)p#mk=x^o0?Nbh` zYa*P;3eOSK~Kr{>}#lIvU}C*9ra9bvy&FBxW*m%kj1$ z*F)?TA$AGuriPX>1*|b+OXqwqIi68a$U!{?Pikw8G|(zRdm>=+z_QJc*r_~2w#Lat zQT_Bl(#xyo7Ra+wlu=8NT`_%&h093wP*Sg}lC0_Bc4iPvk~r1OL{5u2 z0)nF-q*`l_nvr4w5ikprVg-JM<4TAt4t(1oWT?|7O`mu#1x}{G#qn1x?9D4Klch&L znxl&!DgfGlO3_yTB8KnLgINP>xp}4iWuut^`Z?{D;h6H9hU&UdA3XV0f#mPIO|rfbn+WFfa>D8mri%58O*fvNC(<)LEvW>D8g+@szA90!X6Nwo8w)|9U60 zP(+%DM!lV%OF4B)S3CVG1hP+ScV)Y`Ak$i1SlutqDRmxU-T0f9+bvVqOJJ+hX@-O6 z)9g?zNVOJ$F!~sOwyXp=#GYJyE>s7U?$Nq^19cX)xfu|#`sK zZxco}>8aVORcv|>kzRIDs*$ybE^p;_qWqGm?^A6Gi!L|?uRX=ETE|id;G^Fbnw|f_ zfBwh#Rfn>XQ@Ma+Zt-_pVSNELp{s|t34P_MoY8Z<%mu@bY%IBWzP^E3fgDrnt@g4X z`U+olxt=CR*+I6c2WrB`2tUKHbUjM$#1Wji){u_UsaW-@7_>yBN9HDYurrV0wRYJZ z20waH-3T`bW<7OnRv*q+=-678NlwS+#y~DfD*NVP;e1zj;e6NOeenP*MVm~GNazeA zN}msF)`}Zj)+&U~M{nT+b{_`g8vet_Q^h!&_bdoiN>hjDH!4XsPoG3xXP)-k%i?ad ztC_WBg;o=r!p(812Fwy#IScep!0G zZfT61_nXJ{za;5HHPU%z{U`XX&ig%4PNK(AvR}rwo{E#!khFz`pjcX3TTl^P5EmKZD{u*@R3g_3q|Po*cF<#;?g2wxYj zm5YClFHd{jp)o)gl*rxiBlKs(NNi%SH_dUrMeyp7;2dSRZME4VhKq=5M0F{a!jqME zMN`S^-Z>%uJ_~)&M#V-dGd)TrJ(YT)aZ>x6*$!6RW1f5r2etDo+(Ls#^XN3&7DF8Y zxd-V|j{2D&l_80{4aJx9T9mY><*sVup9P4VqU@?lYSntc1=V3iyn8)1rw3Zqxtlsd zp+*AF6oU(KN8Its>kA&D%Scm|LDuWPx;B9+ zcS&3jqoZo^+(ahL+Y^a{WRb2Tu5$6=F<7TFAZ+KXm`omRaCCDJIxhNyQ^Cy z5rE^zJYRUlE;;xx@u}~fKVz=`T)Q<2;8qrUGJk9gd~uW+R&|URn(d$}^QJV|#ORt! zabzAYIT`Y|Z^R&w+rJFGd7W6Y`rKV4mX8g$J3GdjeXx0U?Y3}tPdydf2q`l%_+#&F zVjHV6e9_RMEF1HfM11>i(^X{j;+Ktu^L{LOUclwDqpkOguD{_V|EHesaGJ=5WW_|w z%LKl(iPx^!%WQT|b!rZOcNzOW!}eX1zGib)YZ*tWHh5 zE$EjU1WtoL?IRQX z5m}DUguvGfa^pbjKvI>td_Hm>G!?q5PqC0;X~CHouxdDG!3@2a0#II?)YJZo_F(5< zDVp-ood}a+Er=gXsTdo-Opa6BTAd>0{fV_qo$M%sM5Dp}2Q5~e1Tf76gs5!bvC%rP zfi!;(<}c$&PGT)R4e5O!h^eaOc)aQoB_{Jpx|^gfo; z^ih4|Ip{I@N3pAbZnM#Xh~WdvkbA!p%v7`Z z2?40KtescZA~@06-hxr20+Ang*pM4|anobm6d{4Pmu&g#`nnlKr}H)$B0{+1BA$c@ zPVaU(;eLva`YL?v<6o~mxn#bZN=kbl%^8YKCjZGi#bn|iYQ|KnlM+5Qh%juO=Sv9^ z4Wy|0IxnwQO&g%ytj0*`{F;Ua3QKGSUrwugU<)0-o4~o@3itRijfOX?ei#ZT#lum;z&WX<^XICvF@aiv(gbz;BG+wa=VQsRNOX|n!+8mfhD-dl z@vq--LVQTa<~qB1*tHUpE53k^uFlIQe*H+fTeUplLPDlW1beyHlQrV9e|T9d{hfbq z45aH%ce{wy^i}m|lfq38;JmH`oKsy&IVmErFqS4J>#7lL@9XTJyw;1Nw;Yp~5 z0EXrr?5y$K6v6KklsayqYR#!3DezgvMgpI+3wB;fjVUj#1`JZ3p|V+~V+P$WqNN=k z&m6fXE|S%HJ5~2%aozFg8p>}|i&8vk4d#9kI;L%UD~@pbh%{kJe8v!1W3B$Ho$%n1 zxj>}Sd8=_GRF8R){MdU-E=zmA!&lSa#fOhHo*5CJ?Z<-Xar|vrJqXwJW2>u!!2PV? zM*PFO=mC$+x|I{At%6zB5cyV}g>eF*%c}m8`)_SuDq{G&J;a{ox`jddNzq{ws^=IV zGs*ORHMZtSgNM~BE4>z03ERoFPcl3s=uegUVBI&L^}QbA@U~V&R@n686T4l$-9~z_9a8@->PGGWSoDiTg<$rPib0)1*K(Ug1(hsM73@M$$tS8f(t*gF~`u4GC+oW zI({*EyU3CXAbUq6T%8OsqzL#jq|H++)zY!P!evhs5)W-6KSXLRqtD}v=#z$9A&&QP zXR7pXVbX1CE*sv&uVG^3-ETR&`}``CFGtZ06Mr4kIaz_e-2^e~h_mO&8RUNaE+=;U z&;g@nKhP^k9WqA#O#>NfEyQ#}Yqow@yn&!+r5%O(9dMvUvhC*O6v6X6UFk!-7d&KP zql?K^BW~{uHXZ!;h3`OghbbnwpjzI0h?A+ROU3syd5QbbO*iRW*k`SZ98YZw9Qh(D z@_^GHUMoF5%b{()@z-;>G^flnM|Bm+wZ{0NizAIyvBe8SvdK59iycooSiHe`>>TiK7nSAu0) zS3_JsrElt}{`g3*F@07;!(tSCH8e7)&2Q;XWA`$COBtC|FzI1(7)#*M>kQC?orDLe z59GZ7n{r#HU+KQjaw6*luyE3Akk0m*>TT-TKBO6tC)6|Zh0x?!T-$-?A6#V0S%L&S zSE@$3uQg|_o-)Y^tp8Tm79Qp~&wz(M(-YXfBs%-15Im*d@V531j~sPo1t;{K_ zEN*6TzJDC=V}H7@?h`f(AlAE{QOlvAYLDD4`zg+QdT^&_ zhB@)kWEr(-BFvxuyA&BiiQ^hsXyA>I5hvyIclH3ztcTvslfd>0+g$1!t2E)A=+B~J zHfRRYv@l#Y?8|`h-wfGCqMqh&9T+i|sJ^?}H_qzcNjGapH@gWsUMiL{Oj7<4$-ivs z3XKd?p2-^MOI}r>`;?(F3?>+uhwYyhv}car>iO2qY}{TfC|3NZ3y?=RI~Z#g)9rfo zLblP;rhBRteHwXVMG1kaGyFL4ktAuU>{P)EiN4cKT8Uo$O92V|FhcCf^Z&lCKPpK< z#vhFA+C+I~)|wUtuS@>S#q{7KXTG})yuxwQD*b((`!)SH-Q9trZ*CDE|i@^?Scq=UdOS$_t*i5zKn+B&j{I z&1Bv`r2+UC^zC9nKQZldrJvKxvBZl8kRkXw4C}JutP~4eNs-(zdeg0`(RnOF_?FqR z^RTU2HF*BXt!m)v16W%sQl$tBY7NTVwGywx8n;F>2+}~&u&@shovkE)vlCv26AA}$ z{G|YMWux*ZW6PE=Q}Jr^8nuv02GsR$E(fsoge{vE~5PhQ{pRvevE+L5-Xt z{8i$HRqB#^2U#Pz++Ki27t*JitVqfBsNSv~>e_s>pSy7S)zhjic`2DlRaNXxZ|Epd z`uC)piN-%Xie6u>5*|5%X(Zi|_Bv3IY21-_!bp=FC!%`rL3Up-H|EOl7>x=T-eM=h zFBJ`%GXA)W+bF0Dnk96?{3!2$=1eW!;*)2+TzLQ4pOvyHLs5)m3zvbon=-mMMcOjQsA&z5K&lT|JH`%YLEJNZ>M{*vB{uDQ zea()rE`FaYbJFF&_lUlTUXl)!Z;~%`d;=p^Czn`R0qJ0Lf$s1Chc+~6#)axslJ{j}m_NGco zzmLMZp;6c)BDEZ#*EY3D60KHK0K1jLTn(7`-{_alBh01)h~Wv}g~e?3v&nxZla(y| zKoZ=azazm(o+a!yQNdqx zZc&;f=3LM({-r-eBztb?&2{AKzW#J=&F+{(U1u|u_`?L#(OeUD=}oId z8S*_4J6YYNf~ha0{cYRQj>V$wNrhga95%VVEm76qjaS=>TWBTCC%^s#o=<}J?tY+3 z8Zn2Tb<{G{hV8q5tkJ}u=%bn{YZK;$Rc$-Q;)$Eb*cr6<(HxWF6QN&HP*H@LvR0oO{vBiYKGNDD&cB(w zoB#Ohq1z^N(E*fM{PfxtK=l>2JJMz1~y3wfk2oI4%?m zS_k<7R7n8LMpbY-)sp?_e|PWVAF?BGBxO}+LkasubzJ3&4?f_u>Ugd5ABrjf+LO#U zJH?N&m*r^|OIZ!}D*19+uY+o;SV{{P>umKFk2R_R%Zr%gx7&C-_jCiRh`IE6*St65 z+9v^nT|oi=HZRXF=GK0C8v3v8POVUv+vDY&k+%MbfvMePxXz4WYl-|})1JN9{}B^A zbiHD<xv3kT64B#gjoln%A+>JJy{0{1eGujgcRiz^R? zm^X_@HotiHnkB0H;K00Svzz6ZQK7h5D^4NT?4;Sd<>-NS<$?MYmEK3+%k$ZHUF5%Y zwBPL=nnuK2*2Ej`p07s7P_mpcKg0^lEE5hsh3lZqZ721lya%CMGjeIz=)HxKQArA5 zzJ;pEH`&)il9+ViYc!y2Me1X-g6d%>4Myq3r^io7FlYYOKcbfSlq`Sr68d#pXu$kv z*KP@~PLa~-KU9_kU$$*yyQnl9qcwW4=G>Eb{IlGRdL8G^#^$}9#6;plQdt`Jh?d{_ zH;k$N3rlEb`>dLnbd;o-Ir%+?KGY(bAfQN4D?IE0$2R-$mkb_%-OV2+y@xl)m5S%Ne+yjR z+9qba(daD!6O=ltql}1ad`eEjvVh-pz{_Javg`E*gEB0)v%Qmn(lYD4gV%R9YHp}cPMKq!D0dX^?VU|@7d)LRJhs*y1;qFR z9W5rfiv54e>o>WaFwc)1F59YoVE!WPnCPDyX21l?U1oj=jH)YcT2psraxH-gKN?y& z@H&r3HlCD3kN3Silf3Xd7TAUkaGL#M_-OQUl--|8ExY0PB>Q6W_!vH}Mv*EYK9D~y z-m1SFc6G-6X{YSn=nu^3?$4YZY1U6;+GkeqrPCrL`fztnqi2GBUUMB2I#bD?{rDde zLRw4ANdBeK3jK*biZ{1#RJj~^M_&E?75$}lSNTm;6S|tZE2`i4lP*X=l-D+M3qrdy zbem{fE5J+s`vue2@!otP(ExT_k+D0Hw%dv-X_7U$C!Z&!mPbwVT`JY%8!P9p-0_P* zPP29=Wa>`c~SL985)%g+I#{V7)Jq5@Y-~kE(Z4tdy?uT(P54Vb@AnDjBfi9=(on-`nJTV{7_S^pK z9+^PTRcw~Z0opGW>$k7;+IEB6^K|Dsp=#;Cbif$loBq29s2T`jf_i~sm40c_-3QDj z`y!NI$bvuvKW!jphkP(U`!_KN0)mPQ0&eLpW$*(AuJND0^acQsYoLRrF{qhwVxQPn z+9ye+nk0p>&NV8C5&6SS_gAgyfWo5PqvO(+T)~0*xCSyTLcX6$bzc`_YT8N^UteSq za%$XcnSZK?!{59+O8LbG#YpVi(0U6}J`-TglE8h&51pe3DRjnLUQx{)bw~I-+Tacn z0<#Mi)o77H;WO|qO?9z@ncIdmrLTZ@?bGc3zvF6mI47(SM~;>Y0JK`QV)~7p$U|q_ z$0FzwN)XrFtEfs1^xk5T`L*l2m-jX864iVvIkS#6hoJ%wf=&b02A=RnOGgyUNgrqo zmh*w#y*=gHov7Z`%m=f%GtnVg@`%Y*vOim=p$qZ|Qzvo?j$I$o|2Y^Clx~$0k|vpN z_f+w_T52!T?@tDBifR`*%J=)Q1UJItYGWa3K$p}yfYI-GyX)5ARuWUP&utSu)#$5m zXMiA1gNr!RcND!%j#Mb5j>DyXO@pD45sTgXH?vo9DL&MPbY>9j*^i^PLakq@r&8 z9#JPZBJ6TS?FPp2v`Dc-QenE;A(K+7?UjR{eDDcmkVD}x2yt+wrslaOh>(?dl|KAR z>i98pa7G@h-JL3-)`qhx3weBm@3noJpG9aH&XIuSB&@I7_w{<^1J{t$$KnMkN+mP{+$s2FlUUIS|xni_-w7Pk@uX*rIgz zCj3;xW5HVdwSwqR3EklKo3teH(ZKW`TQNR}9!N4n8Uf-ReXieQPOJ$OV8g2rZNo$2 z!zHod#ZhvY`oMvET}S2;Ve(26pde>x4Lww*Cs-sw8K z_6LD}Lvi9Ex{-YXv@|SekKn!aShC7g2_x5LO$SlQr-g2WvvZN86Pr#wUA-G^!%n}h zBtLg+e;a!&eHF;E%OxTtQ7P}Pfhju+;9&8aG4QbUGP0YrkArBf*{NSh8O_B2>&J+i z@KysI1#=-7Z+hX3I>j)={P=l$zZ%VotaSJ&0%y#eoV=8(=WZDRj}gE+nP9T``vi)! zXrCj;ccH~FG2D<4&0wui35e>kX||7no4s13GIG4(5WWMnVoFw=Bcmd3EB3YHNM&JbO?$(eyZy`atA?A`=)z!Sd`iOqK&*n4E^`t zRN8Z>ZC7_ZJCUVyyXB6_^L_1XRTfqB0dX8cIN>(r6O_>Km zMk_@KXbmCJ$KU}v;$-WDfK&COIT~>C7O3#oOOAsp0Ue2u@VK)EWdJqmd=+J?=GVF* zjnS?C4^44ZW98&$DdG~GY^)&}e>>}}-T=PY-}r<51ki2Df7fVFKP$kxb7)XdZu^^7 zyY|S)BA=;|%yvIjb|c*kYnQU|UR%yQ-x!L#7Yn12xYrpsSuik$F#$ABWg?>;!1_>n z^OY}`pu_Ud>)Ki!EtmPUoAH`&ok`^@B5jy9mu)-Fx|pm+Ds=$5^MY@tNaS1@7@OPj z+<6P@j=QzG(b4!AplRxsZ7gKaGB=si7rQE1{cFQdtPRW!E20@v&uuXa{wY)YvY#|Q3w`wkYp~Co4He0W)}zLs75Rgk?A7C>NB~OJ_V8yx`a#oO!`+p4Z37ho z|ISY?&?7u&i#NJOaKcos4ryx<5;5q<2Im?S{VhMENWZRKh93)g9@hS~+mu#}cJ0d8 z5RFvH_RSu-I4g9m9@vN0%$$7UgbZj7JY+vrzevej+MYK*Kd(g>2kw`1QX&KsRazVl z7HzH#z5fg64(G>uIoGP>#oNhS;l6o)U3=Tl*ZpsR=lIo#Tm7#FR=(JCpT;b zfPPgeZA32-<#6H(IOEZSKqkeOsNP|iwElI7j{)g>m96K2esl9oLP8Y zt9RdMHnSN<3|%JYsXQ!`(XhU`kvSz$?w=58Z_J#A0kn4lD2)KLvNqiyrKxI<+QO;q5$_`fEE|0yyAma^oW_Qk6-itrtr4rupV8cpzBRj*#Z4#5%pQlD|pngd!9 zNe|VI=QTMqMSV6q`CqP+^R49n@gS^N;*sX;&_#%LmEf!o$)WYY(>%n;OM?Y%I|Ux! z+^SM>-(`w`iqWmM>fI3*ViVp2Bkk~w=!knN-2NbiJLE|b8-RKnNe2~9Cy0uswDkBp zd;^9qpRG<0@tPqD5j2e}|Mj5MdB)c}UtTm4YD2rVu|x9VexqLP84jJX^X#G56Yha@ zu0r3a5+*s-#M)v>4^{Zg&3)}O(_#X!C0Z+DcLJ-1i-5Ff+OtyeR$YA5eN=ddrG8hD zl2+NA?!?{fBSOW!&`e{*A8d`NHF;~92i1lMdG{LZNjW^%I-_TrF)752m!{y+@~ETR zX(^?TYq4&ZnJF0=SgI^2svviGT@zvK##L2S?~neDcY2<mtfOrZA?H?uAJ(|fwOUYwQAf#?0Q&QD{N z^H{;1DmkL=Y%3;}IbuOiYSc?72Tv+49(=3QMnUOlY z+F%-z5rV)%VUEIqK#|gQxtIVmejI;$=`wdEfzoEhO`6IMYejk4o!lRGSs5`qfXY=y zPVqwhjlN#Xe@3~GP2h#tZ<`};EjDK9kQ3-GT!Tl-*xNK8>h!`0$SyuIAJq3X-Vx5P z5x_(}nWBVU-V4gy!{f^6KkQj{JpW?mxa}saAZnre6<&)AY{=!(N2F~gp3ApA~)QtCV2?e|O$=?~?QBK=UEa?-ok{UJe2u*3d<1P7(51Od!FC@-c!O6OglAFL&pl5{*!;0XH+>#*JMe#)l4z4>8bTG(yU z8_O~9Y<#3VvPu}Pr(M;TIs9&@JHom{&fP-h$T+eXbbRPDvl`IY9_=$`BUu~|Z0ofp z=;R?J7vu|0^4SK|+h`iSQoJMK4(~HRS6lN%4f-~XY}Io8_))*q%_8)=nE@Uy6P;&G zx{Sew$=qcB3^E(gM3qSAUI@R3mK4;mLoaL1j7L{?Txc?s-;@i$U@auRC1tAsYK;?tWP$INH+8lh$0swR|)*L zFzgM7aB^Xz4d`+K^M`qLo>Fy<;cN%hJn*im8{m?_=lMt>fnT zZdVSElKh(^r(44y^!LUcqL>_`_tr|f1m6}!3SMLJpP|R+)2HIw#nZqBW6^f~MH*Sa!sg=VZCQ4bf|Uz_XoSZv=kD%O z(}jh7@cl@fY=F)1Gv5kru;6%lV~{szwLQ^p+&8E)ln1hpc`C54u!COxBD42cf=6Ta?RKQXEu-B~GcwYlYtRTx^#y$9gU zP%?l=Hz&-n{)%t|FjJ$$#3zs^!dxuG9S~KAp%e;m$iLxL+nv+53XksT_iFo#hypyV^avB5WbEm4}vOTa>P7Yb#wp%vtuy2uT5=^VDm?BT7P z^gBxlTet-9&sB084j*&aEB0s+YD{0)p~VPlGjYj?lIku3aJI`8jxlGi2h}X4QH}a>plN(78;5>2c=Rxqb z+TAskQA(`eU5fzKlAH+ns~j!)POvD5H}(WkiXgsWV1P^J$SoGR=WScJg3kiBh1;6Y{H_#2b{SquWe{(-I-O13 z0&*{1%p|H*dTOdMnJZV1Vo>iC<*`)JCrqdVcXbRIjo?SD*=Tqn+x2F{hya7aovg>P zI&e`Hk<|peuFbp?jo@H2)Yvksm27BBcWr{de%=Z(jrlgoivl9eHK%|p_VCUN^10`J zuGy6`DJptk{*@JorC25FT21yNI_M&SsCaBGEH$#_oeeh?U-olR%&glz`j=f5MC~^o ziM$UE!fv$^f8H%sHOUe^=!u@z&x|KG{Zl;985e!u9WrRw>d+*>x!2%cchHhbDwcUA zZxId$z{Ly(kbx&hY0Nqj%>?B>h<7D)&5Heo4th{o>-Qs}5Heu1Fi)DxH?`&aZDJ$L;lLe;ADc>}Z}yT4KLdECD| zd%ILtQAqyEwS(UmzCrsU%|WJR{)eyR82Bj{7~cYdQ_R-+TH4txbmD*GM(Y2l<92*N zkkU=AJ!TNe_q@eZip|nHH1IAgA2TcJ-=qomR_voeYOGS{*2mN2tIJ@d)9_3^3y~0%q*+PlTD3Hv`YgHv4WmGYg%33kcG{Mpih4^N*=j&RM~ZLI0W)^0gOJDx-ve&3#LV{X!qcN4t} zy+Z*;l#%-2lias!zNE(h0R$-T4KEZ!(QYWSE(OZ`9etn^`D|RaOijPX?d85UMb=@Y zm_(Hww z#fE%l)4!*frHB^3)Bf=qWeK^6U-xXrCj2CJo;KKFcFL?*HTZTkUtb&rA+p^9u%m55 zKV2gbDiG$%Gw+J(l5pEN&ep$yh_o0eR*~tl<q0sM@v!xXW>`FSn@{s z%gkCt{(v&mg3^%e2>XpcL%P&)Y9V zFtIb^$NROH$d;GfkmJ7u@=5uhI0E+d=vfNU`Q?%PiFa2nT8D{W#C*>CZecGcw&%|a zu+7lo?5M!o=m2hV?+cT=ph!Zb$GMhf8qqc8jx5dW=0cL%EBqHvn9%Bw-(z7Nwk4O0 z0k!=vQsKwgZpB|RhmCCx({FpHHlxpE!BCXVO9QxmUVzK7o`6jO$paB(=b=4jmH_VP z_ETq5LFd(1`ywg0$nGlX+M!dq=euCnMGlqTnt@zw?h{R_{on`M_SN_Z*PCi(Wai*M zFr%cg(cA<@Yi~nmI^3(2sqT|-JtGSKz=H2I} zd6Mqtg%%Y|K|A{n2{rS;*dR>^q4&Q~lID%bm$T0K7GXx?V)i7w_vxMxJ0E5&e~?yE`UEnBNIR<;7yJxz~8-(GpKmR4O;i$xzSlKx{jG6cSe)OeQJ@KMB%)8!BBamZVewpsd?DzeKnR|@S}X%L z@DN-JE|`C?b9JJ^qXCAoxD0X5@D9emSw_Man0f688Vx)>`}Wm4op-uAIotG7pAV<8 zvS#iO{Hn%?{CIE80YWIoLQ926GRViE9SlXB06MuQbajygq7*n{&$rgKQ86sZ3<)VR zm_fz|fM5{<_pc+Puh3=RKVs}cIVm1Igr(2~kg-uc4Ljk_Z=d4gG9mpnrPcbKxZ^MD zdbO@|PQsU0_0C%pC__kA$3{q_Kb?~vBrYa%ur9`W(?Hm`3|9>5+(Oq>kj!ne85PFU z;RMK?-h9UhohQ!bA8*9x^JT`%^1cRV4g$S-Ve5em%DbGG3+Wd&4ht?DMM$+~c`YOv zQ6UC}mp?+oy-r^UR8L8*CD!vq;56N{L#5ZEfDI4$n*1TTML65;L8sRKFrY5503;u0 zfj*`g4DT|9E?APg7>0=`5oPjRFbQoc+l>$?pl}6|c z4nR%B3HBgm=wFLb%`xQi)e4=v9Hvi3602WNiTpML5x4gBPI@&)up5@Km+gYmS@}yy zji94i9CO7LPj*%ZHUvo#YW9#S}E4pD)EEokc&Gwq`=k&*bRe z2hgbn^Zhgm()(|7-WW@Zg4yU;{W&`vbyvjQuHZrk_L2U+Lp9A9$K#Xamw2w8w)@%R z27?PxJxJY4KT@JE&uSdd>br)h^jyms%C+P2lK>!u6ajsPkEkgbV+`?xEV2i!h0ew9 zMY3HrigUTEr`eAKja@lj7>geC8}*;dSB3n+^aDB=p?y(`PCyYs*jje9YRCS75Cn<;BW z<`kq7;ZC9;TPHjfs3Gg#W2zi8M6&XkK+potCV(9o{b+3t4;Fa7%6~pxx;MW-@;9!b z`|Q;Np~p4w^?YNbUZuCoH|kXyF&ApWY?@rfxNr5PiIrT?iAkAM*n|g}BTf)A?p$lf z0-^U6pRgM4uL@GEXPm*ogba_e!4P>`kDbEN7^R7MOsW=9j)hN)w%CS z7ktF(n)XfM5(}@Ut{oowoW_oJv7$|1-MeJw$Y5>FTh-~p6L7!6w+T0caST~0=Js4) z`b0ai20*VglgAc9!;yr*33-9@0|^0_%5pFeaMX+X+I`uLLn}%BvPOYgId?-I$NoN> zpz>=CF&vPP7KkK+h|m$Xve1O}KrTQE6LqKa@r0FQYF2x>_g~D?NLMh*54cYe<&&T} z>p`}j=gCzfg^7QgQJ>%Ntwj{pYi~d_h<+dpvg9P>J$(2>WEbzt8y=I&xHckc67V)O zfOcR{0C2*zApk?UE~?SM&hmo9_S=4=PQI*!TUan12vHh^U!nO-Df50}d)} zvN=`A%o*?yWppZOvnVf95n6gEj*XYY>dSSXaib<~YWOf5Ot0Jz7cE*=hvxgn!LjAr zPf?4QWRsFlTL_B2JRqoHM@l<7+=;A*5}*S^t+NMo=KcnhN!rS$lUH>gf5Z2S26PQP z?QNKn;1S2;DCkf&hnTg^?iV9<(lp{CZa%IEc4@=782Vv3*hlvXexd#qDgQox1lu++ zZ^z=$jP7rs7>7D3OxIWcqrG7&ajYmC*GAB)`lBTRRzr%G+qs*6J7jRmapr)4c52;% z4$BDW%<3BZH9Nh0|6!PK#yj0n>#>&q=*m#kT*rJ>wtMYp1wS04`3QSI}2ZaXYXBmxPB2l>3jjv=WuE-%>C1z zBc%3mn_3gJ%Dyi$dUlnmxFUFET%Nz{x)C~`EL1!{`;FPRc7NSX7I>Z9okx@7QG06Q zb|`$nhwad$`~0NNn(pyH3e2Nuzk4}1tW0zND==%IXP_?JAKuN#jIC{(X@9s%^pZNR zyYx5Oq;W)S>jAWe=LUh*blFD3G~uAFV%zt_#DQofhtyL@LzZQ|9rR| zHU;^gKEKPhe)uJJ$@dUlHSVV=5ktUB0$T_QsYH~^QIf#sYQXc#P)~Xs4>-Nk@j6tv zvQy_U;`+@k*LKmjW=Az|OlvG|tQy4gm1QOAP7#==FbpV%C40be-5=iu)equn|I0w- zw{$|YWCff)3=9Eu9LPY0D+LVwr!LqH@e9JATfQE1um$i-j#(Sf@s#dl3TVNQ7#67Q z+UXr6Z1!L$WYSphP2#R{aDSYt`QlQAp zc8}g?5g}7IISE~H9S}4V{9zG=JZR*tI8BXG%kagIHiDpFMOV{FWoBa`T5&H^Q96h5 z-Uji1ce+8froTKCn(A?MRleSVznNqJCpv8@#&|k_+E;k5(FB?jExYd{%sr+w=%Ka> z$#094lMAlV5%H383g@ogLKFbwJkdwPBy$R`DLD$bZy($cF_&~Cc5qbnVG=p677{_d z-jPApeagG=k2igx>PQ>&u_Cpe+S!2f&~=zSuUMe$E^fGNHwvkDuS{S7!NWCaid8I5%EuceAl62{6Cfu=8N$$p>8;Qt<8 zI=lMNu_Uvx)+xaQFO9DZqfTf%;<NA|kWL-W-1?PVI8k&~LyK zG#+!*`w?hCcZ1?U$acb%>+t}sZ$>tVrcV1QLRQrSXfNlO8oo{v?jxSE*~_cHMu|Y( z7Y$&%CRW?7{hkhs2~HUiI$f3NfYe`*?5CjF8rq=sVKr5bgnzwhar*yi6}T@0^fnWP zu44XqWhKZb;n>qLJUf z?!VIH_-;J!o`h1XkS5bOL=#rmpRcN{M!Ht}Q!Kn3pPwaXn3h@*ZcL zh9O~QM*a51ke}^juaCd5LBWM|dyTBUahmQ$K3Ys8V5Lv=|01@rwhiA(xb-Q*74ztq z(~da0#<9Us+9l`iw;rh{WdXN}xEzF=>XD`nwR_G4?ii9Hviak&4ULX~>tbiC8<~i1 zK77ZQUD!rtcBhqbOefY0p%r6d#_snSG>#*+>k)O#XRwd6t;LDp_~^49pD~~V6^?^( zC~3y03PMLzPn1eslqmk(m)S{c+r7h$sc!Vl_oY@2ly}c~msoLb90FSJyVflY-c#qG zZdA@KsiDKN{pjkh51e&f$9lO8{I2!4(eAj6Y&LtY46iM&W^Fo+Sq$%Q=rAlp{K8hK zZ%3zA>%gYaW17_l`+VuOO1n1C3J_ZB&$2%Th7eD)K3v6Nm7M?JQsR4UBc9cCagNqAW%QI=Sgq(!{Xe=+H-o~p$i_S=*5@Ee#b|? z^R4*tTVaMe0ABA8nKTaE1@VB|bWf>)n#cjc%azLJ_m{^3(;}l#*{i$m2iUWN@Of{( zLe{aCTI4MF_0f}h|McKwSKUq~T?`3uKq0rQb+@hFq}-YL_cySkyISl@>bh93trh+6 zY*uvZ*nG3t-)Pvv$ip&09E<8h!V@l&U~AO&;ZY>Pf>kMId&$4o^%gVJm#P(_XtK8K zXEOg~eZHWxg7kJs8xACI*BcZ9S(yUz)3H7;ZS^V|ME{^MU*&caPV zl&pcfp-shsYm?v6JWnFr*gRKTAHrY89e?&Ps+;3-1bOF)_rG>p1RT0=mE9DUYW&T0 zDzIwvmeTiatf7s4MvZ$txCI3qQli>Al7Sq^4$ony>OWa|32Ov*r!VO#I7?3+6)vLj9ki zljx~5G?J?0pikdDU-f(lVag>*)<&diB6aybc59+`X9RcRvaX4D)jvx_Q?K3MD5w&u zNL+pO*lUh@`?D=q{NSL&Mrn+NS-jAv(=1M@y$bl(0x?Q<9l?h~taHtjxyxYpgvg#)m!KGK(c^n9N*g$cbH zTZc-=J|=_r?S$#0Zky0TSJF4F^@$Jj8Q8i~Gy$w)knaQj^Uf!!4~$s`>$5kY(Pp@~Jz)>}9au?}&3F#c-r?X2cphl_S=q(l zgf-Diff-Op`;uX&fX(i5gr7c5_}XX?PYf!C4}XK0{QC{&pA+a!y2-x=>s1No^iW6> zTWtSu@?-qR@w+@*i#O|!N*1OrM@HxXn|R)U*6bw$z^hc$(`0$K+n1tdFnJi~{U`^V zbqE$a;=Yq+byWERlZz2u|Y0Yf)8HloTMfvnHBp4~*@alLE#i#bosan~IpuUE88#G7gz=xwY< z41=9pwWnZbYFKIplu)M5rNm`;95u^$>~9I|mUA0UOHLUAB|c^O?W}O)eimwf z$8V6N8`sPNTplI#}N%Ea$NR|^RYO}1LevXGhf@h8p<+Tye@-NTHfQo%8_0b9=RMm+{{3p3PW5PYaivvu^mT!t%kYj@gi>_eAfefFgtKfV zh}bp+7B3V^1QJoRd*$m4%tv_0-zDc(zU$=@V#v8HX?Jr z)3EQF4nRsG#!I#D@vQJsY=(O?pqCP$OHKm|L4;iMq3}SG*(zU8#*L1p!zV?}Yt8D_ zmR$JqOHKg+R#jaHmv}iIJ>f;w?a8J{vfGp9wOR8YP2tDh_*y;jJ@5hf!6h500KLp( z4p|u@(nJ44(}cDisHr-3b*_1K0%wnn2Y1r^#xkA-I4&klpUNVEUk2_^MfNn3R05+= zIIO>ho*#AH)Z;h75XZh>^X6`=X;CvIIAyIOxxK%5@)mXldydrMtUBy1NEY0cns1 zLFsOWX6Oc`8zhy^p$53T_paZ%Yu$If_m6m<^WEp{{n^7&1hqj>9GvAR;iwPuCbRx% z0^ezI@KVGUE@gZeg3ZTyOrUSn{w3}E8&hlD8r#;2dE#ERYoPXI5LKuy7z~WxX6#UB zuk@#1KUgXIlhX-<7XuG^KHscXKeYM&`z*od(OL}U#HA7=QMHf1x*<~S|Nanxt;g|^ z)`=^r09Dd0hg8Tt>9=dwwD3B#^Q!XZ=xGnSg-E&Jo@x_=pGe?`Y|RI=ci!nfmTmu# zMHq(_NNP#9T5Bd@{7uei$q)=U?(qG$leTH=f>4tFNk1X#X^J?nexCS`H$)b#K7CHH zMeahF>BTrX$?ned>wR3}MfcDv<$}MQc?V{~$x3iygbub|Xhd2~YntOHq zX{Buz*Mrbb{^Qv{zfnB+Gy`aub3^m%+lHW*y;)ZBowOyU+V-|*yRpsmsn0$~O2$&_ zG=(8iS2}Nrn)%^+w&)vuh3uDe_qGdvCokNQfhWI;8LH|3(Sg@hLZbHmbfMm9(!}wz ze)U>*TOQ}7EDrc3u(`)qMKgGP=Z(6b(A;)<*XDL>DDjmnEBg4)pY#vsbts4Cy_sow zkZ~r!4A9@ms?2mX1U4w#U*CK!C{UMqDCl!Uqoy9AgB_Sr)uMs#&V?$>c>S7HDzV0F zvFHaI+W<4F7CN~GJz0 zHEA@&2OZ}i?XmTpa*J=51Esu46QYH8kx0UcbrZON)Le>rG(fn3``lI*xd3BJl0jR| zr^!biJmSK_=02dgf|giQmT@)-GSKOrht7kkeJ?W8y?nIVd`*&q6#>&a(Kn9!vmjT3 z3|U44K2J@2ZmAhQ)J1F=Jz*ZHLe|_U;{?U+@8wE$JL%9lHn_(=osY<~dKumnfGxvMoNp0>w4=QN zLa^@B?zigET!7vSmoJHny~3nQ=2ePZjhdk2+UG{n{^$1clg|5kU&QZ-t>MAkXSeH| zo*|b9Rr(i7?`1&=OQenYXLjI=$?i#%4kOw5A8HG10NSvP8-AjA4}W%f!>Sj%Lx!m% zlsxE4v?R1gnS2-}fgQ&nTRM!#F68MZzD+v8r)w|E4wXPC_1@-q=B`$smP1M{_!v{0 zmHFA?PL7;ft9`oGEn`Jiui{M@U?%8{1VcGsGc!6_q;v59ju8Ff0UN|5fQhFh3P<#t zhzJuT%i3Dlp?k5Qi=ND(-Y}%IyDH_`RJzGOn5G-2g81m!9;8zk_-C{bB;$QmXb{7k zQ*J;opwEL}e`aTL}WekQE>q_{cW_ zruVJi=ooPTyh^xSl^3HhxPSeJMd$Wt!?4Sr{O1A$qKp%y_3S)YBMv@`8?gQ}z(hR^ z3quf`1PE8X_*7>lgf}4Mk$TLYJR~W+cm8r@JK}CDL+{HwmEJ%o+La>Nnx&!I=i@3a zQY@fdXTp|*jnAOp3|aK23@hH9i@wI+UJHOWWb-4H@1sxO;135|7E}R$ zLz_>cWWjqly@K}^`op4P79;DtzRVMc5-cS9CF|kjU*apK>M&TjzCZz3L&P+I@t|0nm z%}^zOGh8}}?=YPsK7=2VIoa@5)|q-zD+wwv`3iNMUn!}cjY{r33lSWF7U-gCNVNzR zAt_?i+Vf(BOSlTsJnR(zbnuisG-n}=+-aNo@wP|>{`yAih;nRhda1IH1lU+rL_Yf* zq&gTH+k003bYl?Hf$%P|kUm`D&1)@d<%gskuTNE}5RTj(LJSp=xJVmh^>xl^HoTy4 z!sJq6(KW@%LEPEaaOfEJ;Q z9`90$8Xd~YUKlx*Mrc$iMYB*u*3j9Z9n|A9b>Ya*|%tKjiLA2578Uz?~S} z|66(~=vAJ^j;u@*+dt=_D0v#~)M>gOs=-(p{=2dYVhg#RLrv6~$kxT#V-J<$Lv)DZ zr)}|z`jCD9E@Cb3eXOmVmau8Ii!}(nX5Vnctr&!dP!S2gt@3o5cu)L>?lBphwjOd6 ztCyagqDm37ijFssF-Q{~L92P*Th*cRnhV+R;VUiUAI&VS*jM9B40N40wh~`Yx-K=0 ze&5*QiBz8e9mXAJ5O8*`|6PWl8{(5wCC9IzAPFHMAhdQS9PoLDY>?Cp@FZ_Vhy>!7NUD9M#A^hCS zS{kQY-8Pz&jDxU-7TawxCLv$i{@&wJr_&R6lVE7Uk07NxeL`~`?RlTz!=`;xQd?|Rf)_K0_Xs&tun_P{@tf}0=Ru-=Z!9oleRZB;U$ZNS7MSZ?GK6hP(N zv!KU~1J1tl7Rr%U+8967coOr4&;s%&g8@Zf5F{VRqtpT^S-#;3^g$w5W5w-9Z)c63d%y?AKpC{{#+R-b!RTiARpW z9c&Hz$YzlU$~0uG`E?AG7i=6tUP)OwqUBr34&cpf zS7b{#h&i;+t^OkgMPb#JgMaxQ3!#;^tBswV%Eon!WBX5o!LnlhB^Jt% zPL|SpUSgii6i}sb6R4B=FQ>Y3a&J|%5Y1sq0#M_1rdln^s$&R_So7G;1i(zD@Lb>Uc1ex#iMMehpQ4f9LcbVK&kYS4yNBjr7d zTax!Kv@s3mp2;_#3Rjcqi-djzf7Cr1tD?4;&HaYB;O3r|O~1&LMtb09 z4*a)`w2-aPlYjs-$;3^25wUZso`1N~&d9c?A;_E3*w6U5330)&nagVmy|)<6CPLY4 zIpbNoo$ha+4sH}HBrX4Au&1oXfn48dQ|l!pg|4!_LUVObkmtB5K=G_1 zKjK=D>b9c4E(V#6=Q?o_>so5?G^AA2HxO2>4?l(-29cy_TRUn^8FjRkWWCWU43>%Y7(t7dlj^?$h7r0 zsQ4)PA1AOz%%F%~-jhAv z2tc!t2}S!i`3iq{1~3kAskCzrQ5?$=@apPn1<)+I-IYZz6)TMW(^zJ~GVe0{7_~rH z#5wf{XINnivO=p_%DcsJwUrEO;b1(dnCsMC4`H8J7bF#sXs1XEc7Iis-IjUKH0OS& z>D!uw?zuijR3&R#>LH?s?B`lKHR;T9k!)^-{$*V`H}cbA3`zV4fcvOeCq;0sNVwMN zT+#UMS7%3lZAZ1pvC_)dzeHqDhZ%woV;LW(SF$uU=CXkeZDKmOcaJ4V=XHf1H$?l| z4{@Hd(87gpDvD5|vXdaJ4mxUVKHH>3f-FBT`#RH7SV3k3K}%n_$Cq02f`;qnBoU;N zx_DlGi{XDb#o|AEkYIcqt-_-77Bef&OGkaP+i?Bia9vH~v@`C3NL+T!;Bmw&P@YaR zS0x%X@Pqj7W?}SF*SJjXLWc3uL9I;cH`?lAO5wrmV+XmULA;MRYwQ@?ezn---G^9< zsywvK-i)k2bY4jebrMME7Nqe~c27*^y7A=dpZetnAOFad{Gs^m*WdFR;DbJqXTA+a z02;XL`|x`%{}wfpqY3U_J)l9`Vw;d34zoCV-R;oN=bnSmU6?*%#rui2{kpVc;7dV^ z07|D&=}4B-z$|}9VW(~O{JpG&esE>)1;@2=?sHV}i{-)j$-M8MJpi`2WOM*Mru`T< z@cJNZo*h9sojWF+yNo+|&n_{Wjq-P3FgxI2)%TCcdeppWu(Gx0E|_4|<=4Z;?=82@ z?*WaR5@gayeE|%gW8b0fzne}9?5yfoM3ool@;I&lwq!d50rvW>xmOpMRp>HyI+QCYm$%Xm!J;LU%dSS2{;jN+$zC3j}%kN$2`3e@U^w@BaGFEdKBF7qwn;`9xwB{<%xDb8fF=yY#0N5zGLcGg0N`5WU@w2vW-)xaEy=Bsh zND*vHEijMx@X}Y^QCHsPcy*aFUd&rgBkCa)-6Rp)f(fp+YHI(UkSdhVqK6739R}6z zB!T2j6HFl`{S$C}fvjY)*yH}m&r(MT4TV>Wp3c#jC$8QCti(C!RBK-K6~iOIQQ-|i ztxOS0tHNtxeKP=-bgyM-#w+5RKR=KCk`xw28nll+C6u=Xh+hSUGM5aa(J3WlZ``$q z6&-NDU1Iq1UiDCmx*8c>*0S2(uN!`JESXv-Cxi71ovRRC;H9gHE?t&E*U%TDH!hm4 zf!gO~XhA%}dU|Q7fJ(42Tmr&T$_|G0px+AbCPLBx=B)&OD>Nk5gkD=>=1Q}gZpsWC#&4EY{<*Mo)Y|0E$NR2o z$tQP2W7oOv=FFL<0*{_jhG)1x`5~W~v{6QtndlHdgnOvo#)ZwOT0jIu{1RrAB8I`# zBPme+&0CkNWQ<@J$C4nx<`+ksKr#VOo-L#-W^$y&8=xeSjV1JFofgP4?@jHa9o+r6 zcrN_}=F&MJc6kU4%RZJ5FG7$FoCj*Dz4T6@hYi2y2M%3>;cxl#pMpZr5=H{3Hd+(e z(_2=IFz7^ngAXh;HHEG7G2+^#4s%VXIb#8j;py!{ zBmpu$eR8tZfEmL%GX`)9)eQcbLjgm|O@I25AsWwfR8VDY^F(4c=BHxkcmFc5muIyIMa23@LL^mpX8R!QV&j;goY?@#ID!ux6{ROi5i$tSrqvhUKFCIsKfJBghH>Q%`O|)rDhb+8YXE$uJ z?MqT`kIdn}=d=_&=WB0o8F!f%R)**3PWh`GgEk(P3M3ci%46lPmr{5jJ1?c84d$3$*uNq5cU&^WD8i7%`+f0E}Ps8!ZD1pByd`% z`U3z|bm=zGf&dfzk^s#p7g7%6NW-$6^0M}9jnm2FZCAlrb4E$1*+Z5L-wvr@1|o7vA^X89TUI&P zMD09}V*&Gy=sEVV`1u*!`iKq}1}S7Na#&gy`A2(t3^QTe4_S_4oO>n<$tx=T-}{2c z_%i$w!2$*Q)7?Y5!)~>BCHHxrdQEHrut_L*R0m4`0NknAHa6Us|r0ip28KGg2 z#@*S-#`=DB6O|50L)~Lx#<}d?0o|*K%$o@}CBs5Pxpe~sh-dUrJ|p*rGqC?iQFn8e zG=wSyKEhUp6*PWtdd@DK2&bh;>g(E{Ja8dAkfWjNe%ZCMx=wj5c4LMZseZJl5)TBg z`5heH(1#9_ii^@hUE&lm!Za9L1!Bu5CTLDpm-#r*v^{(Wy>EvCR0kexkK^eF zRdQ-pm2Mf}mJF|myP(R>1=AN_7(`Meb@cB$WZQ!) z$}j5-L$V?I5#L-&Ma`qtovM+u+qdV>*NA|L#y3IUtos`mN2GuYIYoQXi7t@_at6ui zTBToGctQB>Vk9y>_uXOXLS>_kvVxN1ube*JZqwtY#^Tn4rY9p+3gBkax@b4=|1-54 zAg^V*Zhe>QpISeu)}iX7H}`r$4BdR59{zz|gn*dRVbH)qY#y#AKnreph{xxMK_L} zt3f*LHbW@-ipvZ?9iWU!MAD)J7+ukk^{%Vd(bUYXPyL`Q-CuA*olst;5f|hTO*_c& zjCySOSV+~SIh0*5uCLJsQ6r}w7k**(^?>Semq$-y>M8XH&sO^*5{cYgB{oi!-mq~U z=ZVZCUov#c)JPxs7!}r}vKwc)5!FXwzl0sW$xCIh#>gg>$pCF4xr+eK^7hU8xFHJLr`r)n10HR|x*Aj}e$UrY5Q;q=C}vSazHZL(nA1s1e8;6kd$2uZ#4L^0?i z%u+HI8~LCeg<4xyTe4;_23%I!*g6ozjY~B)X5d6VgtUzWRAKAp7q9~hj_T;Sgugu5 zO+s+D7{kj?w=r*w!TW)dS^Lf7{SGc&f6QjZicjLH^F=HT2&0D*_EQQ4TNaL>;` z$}mGMU1gKAKcJ;UZYJqgSx^QDNhd?d1gebU!vo;`zkAVfmMTf+p3h{?}o6#s_j0};P9`; zK}p+d)wk{%9wE7R1{>)_45>Rlp44}q@_CGqDa^WKVO{854RVY3Jh0mYy!hzSBt%P$ zU!;tB$(m=Jnc*!={{uw~Bdj>X;LjybQ_wxaBY;qDY;uS1f)`PZGT{U}T+WjF<;sQ5(P=}0N=aY>S;Q9R@%Oes zA?FfY*LgM5n8Q_;8hNu%g~?d!)6PW@zXoK96Mq2vJ}7KxL%kz9G2+0yX(+gQOQ!De9Es?*ee){T6&&>diD zTg@sm0lx3dJf5 z|1{us^D|kSpYerfx4d!1(GAonoLMc`=xxk3YsLIfO6bwU)9aPmxp1+SyZhg6<5dD4gP z_zNLB`?JkI{9#xI&;W*jv6gXSc=Ji5p@kD12e-fDdYu*>kM`@o48JQr41BOWc)Ce5 z#=78{59)qSv;Sf(5uTRQN45PO5_x#5z;rfcv9 zzQ$+jQHOQ^t^-=uQX-md1t-~99e#hvKpt`E;oh{j!g^1$i?PwnuzzG_bV-+NJmMjK z;MI38ik3&M9VhvH$!+It#dM-X$CrzaJh0S_>n=tBD&({dH^@;@O{KwaGUKkZst!ktC&Q+v9!|R0mf9-ON7a9|HF`(pH8!2^8mRzR8pa#h-dDYm<_+ScxN(8G_98=xSVE zR1kcepyP7`NlRzFk$~_aHOYk&oHF<-;4A00VF4VWL_}yWgh4&&QlW1x>q1VR>10bK ze^4Eyow&;vBfqK?z)o3#8p#>`S^sQeKq;rTP$ZeVS~lWsR)oKdCkCo!>0WWbF>$$# z*O|(tH^*QQ({=wAUXCmE#%v)u)G14k9xWZ?5&zB1gW%si!K`D=XOtI^{>_Xhw2yF^ z$t){W615i?!+5c8ggBZ9rI?d`ldNO-E`=9p0_w4VJC=N*QkrHMrkaTUEn3?8$!Dg@ zZ`S^OSR9;#RWc6y=gvzESCBY9qF1MtbpAk7vZ+n(vo}V}VOqu65Zt`B*x$nJBm3Q5 zQ@>=F@074iWT~quhE@+52}GW5bt@009ea*^aKivoX8teHZksP0s?#NCPK3I*)f5gWYuDoueQqRpl6fk}0+UR9JhKZ)4rrP&la2?fXi0wggCR&Ooagc3Q9EqxpNfm^F!W>-}* zuxZWx)%4*Ri&Xp=i1;D9`Kng6B=?0wB~k=Al&$-u=a@fhoT9?Iv*BkO1DXP3;+4jpOj-mt+Z43gb3?$5r&V| zs&tB7z{bvS%gyk7Zam()DBJoPbwGr7-~WZUiK&o<9#F2 z1UMZ;mM3Jqht=7qRfFWd@#tH&C7L=tN^TVHreFBI%4Ah|eqC!UR z(TMm1VlTP{mASHNORVT`4F|3oDd)GSV3V?GeM%(FvWDaSpF$be(Jg0`=G$oz{12bO zmkDsrw(G;#-a*weod+pUZxC97T~;V#ILo4b=}7_|~GbcEP&Y58{D!3`(JyN@wqmh z8)8D0A03P>$=e)1iggD6b3e>HisPguMQ%QMk2qZKB1!9fcAhj;4$y#LTqJ2Zf{7jf zYA5uj`BD=Y`2nSI&B9cnS}?=6whF5-{}^nGxe?`ufjtfkS`yiIW%YB#h?ja9K zfInJba@skML}c3{3<(9`vVW^t#^4`dWVSw@WdT~}xhM6r!}XeiaELOPx!fPGx}DG1 z9T_*EDdJ{4n2TVDf=OOevYSq{s=iYq#UNCH<*QWBVFg-g`$uqvvF^c-6kK>5Kjo0{ zG8|8ST}{Pdy}0YF%`;Slok)B{BY6w=ISQsn@0TCeEktZ6(J)+0=N8KE!Fm#%@PShj zJ}B-nKfPJ4@Ks}TvSOeDT?HW@>a!Gvci&unI>fux?r2S>dPg;!vD|>S_RF`KTbv}h z{?Mnmz`wx1{%(2yP3vU$AF1Jt6P=z2EqfUm%gN)8s$v|uvEXV|vvWw2A&GXIPvB-; z5|gdR1T!gN1HwklHD^%v%@3juXDTVmmPqaAH$mVaUYYyo}omqiw z@!`Cxz5CtAehfOp0fP=EnHP=0i_R3k(pL8hl(*eltDgS>O|}=G1ddQH!8H z(##l0v!DeY+gK`Q89X=gBA0_85-wAGCw%`M-THrKG|W^6>u=Uo!T!_N&&d)#pGm9M zGVh;X^pCxjE?f;?4$=ABmsYZr)$$CZp@v3zWD@AuE{Y&^mU%0dCX3sp+q{6mj(ttr>3R~AuK0k8QO}XM8WoT{$S={ZAS4kS<9y|C|81g! zE+F!P&U=M|@2a3pR&5byi_4Dk&40WAgP|V=@DY(3qolAo#}p(JZrrAkC!vjRkdHl- zG4x0Z#DEG{XnKmnYJpeP4;rH9r}=0)pka1&V?rWFMRb%F=dfL)Udh@=FV^&CX8b@&P>)Z)%;9a(ARI2|Ho262!Bh97Cj6Th6sk>?#{vha9E;&UyctdU(kZ?8{;D zRJzX-w;CBF;@wY#Cr-f>N9C2U%EvMKshWRukFP}cP^~-0|B&%(YN+_(b`yaS+8bqQ zrsquk?h;#ym%(C`-E~{FNh{}IP)pIGAkB$V56|&ICB-)Q)G(gYjBC(O$+|Amq57^7 zGZXX;-2aBWiySfWDcXGyEvNL4G15OSow-vYB(V3yhwRMA#a<-tJ_tBHrv2>BLV=6} zYbAZ!NPkXn0P5MI10}P(!H0nuoXqH}4nM`a#|!z7ik=3bsMQ~_XJxwghj3{S-HcIG zWu2{FCbg#l1ZKrSrb2URr|7_AZS-RiZS?rK(Y)SHrBcD~#h1i`CksrJ70h*W_5xB~ z-;sYdqt!^Swx@DnqA~J8mg`CiK|*)IAKV+XSaOyVP%aO(0)3A)VpuYY0*aGlykjZe zS_is0^%AZ~gtX!E2w?qEUueP5-R{s~Ko<}?GK^3Pv$^#b9>k`y-4N}zzQZTcv8J?L zJ~x24c2HmiC9ykeVnCps_q{iZiq%W&R4Z+q!u^k{>8UGc6@g!pJ_&;$uUPHIhvXA2 ziH#T*=hD~kGw%D6CR0+5en35sJPXC$G!b*K(&9ajpo1rS}S^hr9ee_ zzFzgmS!iDr9od2yzY`K(kwT3WhDRe#ah)SXB&+&8^3|;igmwf}llDhPb8W^>sZ$(7 zeN{vHmCwJ3lLzLt_8XZuWoZA#eX0iABm$X=28$bLJVky-!W-BU$22;GP227Y(7URq z$SaT+tH%zg22C~>B|aK?e?#G^s?Xd0DHJ;Gaff(gsldk@pR>d%v-!Q*{j9h78I^kc zG!lL`q)Qu(EB~GoGHv2x=BYq5~nsve#T%O9|$~iKg?DHZd}*T ze7Ag1fRJnAP(C(|TQYw9veE=`Y&PDw9KC`o)jbHPy`-b>>tU>F?YDN2S;1C)VB{+> zGOPwd1RQfdr>~QKG5o>Ksy(*m!I6BUF^3# z9%Z1b#}?7Qmie6Y5P>%+Rhb56v2iEB2rsP*I|uC$`_|aLFIwU=ywdB1Uig$EXI}+P z?`04hA&Tp*Zzjbxo_Kd!V*J8;^oy6!$v#=)^m6aK?r#=QGkVB{t@Dr9X1_-zs{)KN zgstyLCll`(Pdp4d>jT?&=-plm=K(P5KPUp8D&vArN zk2Glk();+FAHttEw?;Gr>qlSuUhYLVw6g{*pg8LWNf7lNuFHR)?##Y=Ve`a)l#{+Z znrX|n5Z!Gp__ZkZSomK8yRc$AX-1%+uWB&IS z`3BHVm7bzCdiB?)m2vE$YMk>_8&(oQUv3kOpN44>UT#Y?iaSedCt7W%n>r#POX(#l z3qJKi{D;xEj?NVAY0rnX;MK`1v6CDRPUUxTlbXlRFbfqyX7R4%pu?Yz$R*WoZEv_9 zQuiM@R7wis;Ht>(zSQ8a%-uh>#vT`*zCjs+np{X4?vfnqCO9qmp(CnVb zk_>q23_Ibk)G6tAH7E}bzedJbLr8Yr!eF>PEE^eokp5FS`&H+^!$Yeen?`fGZlMBCI&9o6oFdlJ5OM3D)&rZ~V_J-86cr{s4FD_)6C^JSfc^ z-1dM5`#M%2l~m!f|1s8%wZXJnUnSA}2VGDxfkgNRz76DXUyqrWe{uJDd3piC8ck`! ziSjK>6D;7%MAPr`_{gC4B8Z^-1m+OITX$w`a+U6PL+`TYcg+Z{5}1lCy(UMDTl zg(gxE?qD?{wu}6F#Umd1D^?8p5__sw7`d|~&+ByB)Vc{X2955TG)P$pVxR4-gY>&D zESPa3(ipy?Ia#7|?mX;1>W#GB2H+>ovn#P6^(iX)t4h^zaR0o!dmHbWTW|5bGvqkj z_kLW!(!f3K9)1`#Js3Zj_z+>C*LZ}@l(~o)ma{-jno75u#MB@jP5bvPx6#O6{_fP5 zwR&}ywlIJHs_8EA?s`tRnSAh#`6@{vlhhjaP3pb3WCNA%+68k-w>$J3;9vIxDDkM^ z%Jx9ms-=+Dbr4%AkkklOGBl5;TO%;^{%SA>pLja1h>$DFzU2U0S4El6H@@ma{$-;N z8$e8D=i!H5aLuBSG#!u;HK5=((-a_ILtbL2xm5ON<#U*phnm+nV#;MnJz2yjr6;{G zm0{)Bo$>X|1o4=xo^x7T-Y?12;qfvw(a2DXZ`(+$$^ zaY3@U@*Kuri6}1QeoQJW4Z_~ZW-ofM(ul*=E?G(k6!5(p3cFUSTV;LT{}b}LUcJW~ zg}JGK3j00@djL(8r4rKF3lcwW$&kP3BtnT1pS9UO8dbr~85A zm}^QyY9!XW&ZOEAyLkQYJAZ1|&RlOIIJ!??2j2G>safFhj7=h@yQAZ5 zyIPD$mPXJl`w$AKG}@f&|3( zA9Ot=H{aywTD&)LsV;zl-mw8=VvM|mB+n22q{6wYlHpO45y{Lseo4J!T7IXi2s@D^ zoKw?AVm%Ii{+;zM#DfgMi%QObO|}FJsSFXVw2f6N35$>gA1B-E1 zPd4=Wr1n0}cHT|U8?g`@ho3R?0I*;m8Xr<8T0LLQ#KwPV(3`17^fuZUx+WKmV+6Oa z`~-71ZP7{jk#A|Fx+48m%%7+3%wdL8FL4vU({~@{ zbzm(3{=Rx(inVcDqLc9@a*FyBdF%rst@_Asi}SK&SNeY_CiyRQyYVzh@(X(M^l!My z_G!;Q-Y3YX2-kH`<30yP`8;9L5+EvvJjiW~bR!21p8!VMdB;gsd7KhYTMPX3zut$Y zG7MWfOCwRBxh4)NbG-YKjLnblSvk)jdMx>&?v_g7H`b7757k6w2=2eEQk$qfc8NZw zY-sg53KhmK0a$xG?nS;;l(~1o+uTJ=Or&>uqg$wJf|9rZw#div!v+0Da~m6u)FLJ= zN_Q2d>c;eTKiN=g@H<&l!jiSBuxy7>&EL!1w9gBw&7eV_{;^ATZsR_RPb{`TK7jp! z1djx(f`GRh^MX`Ck!Ke61&gw3CTsMsMjZ+jcsn!*8z8z+u)!Vx9}lU9$zv3O6?vAO zJw}@uy9f#S4KpQKAGEE6gYfgnK;48!ukD?-A)WXDrY|z*GOVKAtL8l{VJI(@R>q<^HZ^8~-Vk}rb?2Hpp(N6~gYW)2vn*`t1c zhSLQb)a4DnbrxYiYiHf%aBpOwu!B6S=Ho?6b-Hj9%GDMzaeS^bS>g38+D>BRkqSbqItEiTco&g?pyEqOzPrTFTx%_5}mGFVvOOULQ#Llu716i5TrZ_KK)J} zH5gA7Wr-gDtRlZc@!}&>I5w#HuWUguYo!^xn)_algL zI>%eLQ{MX@XguYdwHmfUZ$gMX$4FO2a98H&id^!p+KD%m`UX9~Vsdp|Mh^J7Kh9KP zzvo>TN;VXmO`?L+90bk&-Ri)QH;5TtHDCa7!HW;S3V@9aJt5}%7m;+O=E|1RpL9ll zarVB{2&r)B^o*RHSMHEo=oQ9gS>?y4Tz6IoVWcx&(nTu$=OPf>xJcR>3~F>LC0tmq zXiifZy^+ny3T6o(^#S$lV0NlB#%8H8Vud>7pi90~;oJwIaU-Arkz|4@D#RS2Bd-au zwD;F}{yUYSq?U#-(>N(&=2X_4j*PP4uKPI^luiKW{}w~WkU~Up5Tj4j9-~<#=Bn~_|k+n z2w-kEXTq|7%L3{5WK>AweVK4)TQb%-bF~GwA1BD?GSlE6hPI19_7>e`$96J&i^cPO zuV*v1!*C3)Jj-dy2Y{=kHtPfI6a60OCQcF}TU9q}MyBb-p!K6PE*f^QWnZL>f0fopB4pWQ* zj;JxcCiW6+FTFzJb41DG#o+Hn!T7%Ky+YMZ^=1va+ZhQWJzoDA3AV}7gCQe9PR5i5 z@YWIP?LKFfWK{p6uR{$Ls#>kCMWQe; zI6z36q%>-Zl-S&Or~n&_lfMaGbzuVfTzM-5~&{H_g7@ZktdjJyDSd25y9tRM9yd1w^VD)W`O>VwBUe4H!|>={vo zzMTAE)JEtXmxEt!eCS2kx|C7^aStcY2Va~nEA6o3v5DkSByyqX;rjWRU=r%3Dl|*GaE4Zo^dOGu= zj>*yKL}rus)5uoQajo1t-!}@3ggY;pk+}VxYdoMO?kkShYy@`^=^;@Jcr6aP2FN?yR8kGgP`KFpxbD46X8r9t)>Elvnr0rgXC3qi;_`mzq#-bV7`QQ~z_VwqXxF z_2Pw9v##%E-C^PPifB>lt4TWR37pNm{LVI=*wkW69GCg7yaJUJy9&RWb?fd`Y2tsc z(q|5p)@kxb+M#ozc_kwH)Y=a|PbNeD>$D3szHt7ZPrDWdW)2VQcYJqMsYadLK~n3B z@sFZFI^J3Tx@nbzYwu)#Ao+f*p+$@lJ|KF%#wRa}`^jPp69s=`1eU{)O^m1lxIjKg zmNnHp9O{yy=x_~Oyu6MN&;_=M*p|1=ta{h3xXFX~I3wfg?02vw=;M(@A8`;cy zw4z#Z!Kff495RJgvM&k8s;pL3EKn}KT?*})KqDeO0g>7_Ei&?GJ~$Y31VpwrZ)EOM zw@L?b`-kc2x&7|T^g_dX7%|Ei^(+K3RUNehiuP|a1_OlJW|XwA8`2+1$jT$?{doabSY$L{YE2nbVmI_ zh*-jhAm3oR*4c1nbf?xcdA=vQlYSGl!i`b zu0Idg5?s?*bQ^zUX`{A^Rr<}=6TnqhOuAc2Cu)BtZ{%>b-o)KlY-tYsfIULaKq+^8 zxzQQqCWl>=_-M}b9+>meq$SA9E!~Bf;V`E{3JY@^P^8FGlV!0L1*x8T;SDG!jHL`cIppOF3u4w*nWs zs%rU(gv`8Xi-`}p06FL-^^kHE^C>o#9$zL7;2|*fkh$<13F7f}^`TeHYJ;j}1@rmS zQ@gG8n^+pajC7}wL`G&UY!k3-b)%_~LUqC_ zK%(K`gMdVCpr}C8+e-Z)7LV3(d2)rHiR&icvJd+URRw+ z9|4qKeG2$s*ejuX@ym~|w`+QTzDUR?Vo_MNw&=y^HqHhLG!VXchi?^kYu*`K!) z=%m8vK!kL){Bvv99S-X}^<@b5JsTUoZU!07?BBW6iz&C9*Im~u0h9lWuCt1YGm5fw zRpG%UxD-&h6)wRcKmoy$010jZ0>NDi2=4CgA-KD{1_aCaDbt?5}kJ=3rM^S$@{ z`<#9D*Lvq^I#^2$^ELJsR3`b_>=J)}Hs(EY)?0Sa*+L1z<@qCq#~+~=vHSA4FK{=r z$m#3&<7PTyZ*v0~)PQaQL|bFW>j0FJ>{NX!aoPK>vq@UeM$0*o^KmJW9-jKBI3|MM zF;9BU(eIf2j#OAkMS88ORM490>-|a;by)Y9+RQZK@C2tlbRap90a{hzM<6kCaqb10nGa3UP4^O-}*jEI| z9HX)eb%k(Ebp^xSp>;AiYAK%Y)!svh#e9i+tN%$kgH|43JnbsOPiS{kVmn5yZZ76rLuoMXjadi_nzKu)ATtsGdS22{}eAH5*U@{&T!Miwct&G zOk$rNq&C@KOXit~Xv2V}KTxEr7oq@$JM;H##5#+-b$7$+aempl6FAF10k?CbuC*?% zp)CfBP{O#}EK(%n?(qA*Q1l^OHqm__9i zlrm#mk^uV;tkECmGSe{xU)hJPIo$=b;BnWV%NY_)SOI&x&(V|}v(eZ|jakWkIT5Z4 z;T|gw*HqGNTjt@aZckhb^88XCHp$WS@tQ)MUJWTE4%v1@Q%Ne+J|A4{$P>t?-!yfJ z(tcqTG4U+L`NK+E?mV}5NigP_o(6RlrR_2c-ZCEGh5xY3d@+~JbFV2d*BN#$%l&Fz zJbqeWpy2vPAiP|1B^kIV=;w$O3Ii}VHy;%~FLc&~!fsi4x zZ{e#|VxvzbMUJGzDk7@=_UYoQc4^h=x32n|wTI1k`9+WP(S6_ACn|zpPKA$Ed6m^N zQ){m8SG7B=V>J}iTWaW#N-1Lecg34`tVyOdBQ*s+#L{?=RenU(KY0q_RC0}Y;Y-Y4 z;;``7*m{$2a*0rdYqH3wPFmGbVS*q-5(cgcK*gwM*Ndc+o+SPW90q@W*}!nvG_*V> z4jb&p?iI}=A?^?^FBkYbLFif(YTxOec{c4;L<9E@ga4X}M`T9(gjWPnfn~*r)A`7Q z{`14Bvpndw_L2GgMYLwS{_KdY#l>;o8)^PBaQhbf0zeon6RFpjgf|U_nL_iuEurd! zgqvITtA0eMH@G2AP4!UOQgW{+yTpJ@PtCv1F8aK*qU0w>u<12$GFF4)Jrx1(B`D zrKfi@lK#Yd*4Uedl=dQ2o?W)&->{NpliNY^V$vi%5083@d>Gsc=W=@*P%kan058Q+k|O#jzQOA;$@tiuP@(82aSt&b6juqvCz2mK z#K)^S06foAxkm< z7Dax4ah0*EUo4pw61RlF!++?;%2|JmZ4TTA-?1p;X4?{%o1p{Ov0MVn!FfA_Q>aY$ z=fh{m?hJ;w*|dcP_5H2bOH?;YS3blPDF6wX`^+lHL_eeRDXJ9e9`DauEyOj)qrNMb zyHSnJ%l+5hCe}~$6=(2@!1ZfBzk>{Avn&=>ut@WD@7_SeK$N44f=wBYgZ3A#@XzqM zdctow@|QVs`l6P0c3R}3E&@)LsZ}?`DTLPFk7u#~6AeL+T2P8-*) z@^71J=yvX^W9JnS6Ff!|sOut1A?8C(Q7J*w*D(c67%hLECio<05p_J2iH?96W`O6H zjt>=Yu;!aN@pWDjIO%Tj(`dHiK{(R^c2+2YP>bBg8N=nFPSNAlC4EuZw^$%eTb(;& zpYHv64nX@AGyGR%T-$*5?(fW&VUO}Bz2!uttt*ewYvx}*;om}R{f9^0Q7tc+-YZX{ z$Jj?B3tsAu)NzNgT{ZXNuz#ET*kdyxreuMqf(7&>11i3<3%F?h-jR!jBSJ;s7O44c zIHv5GkyE>4vM8t-(K?F;kZBYyh00G;1VJ%UqW|dyP^X4}05F({~%(au;}DH`OI>SqKD10$9W$O#ouDT94l`$a)_~U-FCFK zRyKz5`Q(`vd6kt~OdRlrIpSsUzUS-|ea*?gJB}XK+Kk%A+G`E64f1GmAh3pafY+U0 zE8|9@_XR%=)_oCaHK=?Mc|dKqiFI?r_yvlA^c}U*TP;sCLi{?!QQwp+HJa2fVPk<-V349$-# zNM(b+>u$IdvsJtA>qDHP%`{x}!+6KET&I~wCGa}TB6pa!I*#640HzRJ%(7&+I7FIJ z#F1kW34tS?WWk?stIccbdl85cPw}>W_@rdH8K4i>JELTreihXv0|ujVOv~VI=aUum z$YU5EQUBiGgQYH=u!hcdh(~B42bv{r#~9kknaX99uvcRw+Yh`iX%?ST>pATC7Zv?~ z4Lop17mP5CIC9*Wswpg>@61HFqm#aadGivT%(QU~ij)q1r4v{!WpP#PRdrT4(quSm zu>5?x6TcvJzb*K!wb)TjZ|IOM|9$~)-uIw69H({W!r6Bg)L&}Wx*#{C2D$MkLCbR0 zDb{!Z%4hewS)1oNwhY&~V$Oaz;kK;#o{L+nMsSg0mwiJNR@HH}7<18}1bY1qa7m+%K?nXW3|gxX`xt6mqP2Rt732^`_Nvu<)fh{rK>({`GOY`7poxR19SDA-Tkfo?<-!O7d(Pu%vyb9ttCVqkp-~ zJo${%qPrK}$qaP6v;@OANVqN;Mc%M+)FrO^*Th06iL(L|(4=;xdAX8SOc66&*@1#S zM#8e?+8~2R22eDF3SgoYmxm*lgVOqqH^RZlfa6uiU7bo4!1>lK1mOBJGH>p*JmRbp?(ydW98yNR zwEP7}m)~kR2*Dk@mm%|UJ}Dz3WHzFeu)OWePW!!?{0<~7Wr;Etkb`9Nk<^T;bx8f} z&_BO7DfRt;a9Z+Go5=Oi3jVwLA1DUG^e}!L`N*v`@GjLN7SH07W`QeNI256ncMIW{K?zV}d-vZRAcwOJ2ZPKBJbi&ioN#q33=0-z0JJh!|Kr|D;s*d)yWVBhNQ6Hlww zk|EOS1o*;S!w0E#%w!OH(GgRIA{QWBeU@%u3T6CglV{q?{ zGaiP^`$&j=59jN->#)OmI^Jwm`r3wT`;TCbtIR(#bTjTtU-=42Rd3+`KGeOSCWT6aUhG|2r*=$5^w^j@Z|`;J8(q`9~V9D<2AcoD98 zSAE93qFOv{8(;GjuP3K$z4o;opj2lTsOv9>w!d)|{zT&Jr6cUI2sa1nR*_!}&@GQB zmnM9a)16`@E2!A>cdkN}2`KIp`UChgbZ2+#N2QWB2-`0H7%i#5jgtG_yq1~w% z6URPIUGZb2!?(4tWiMkN@4@R5;}%Dg@?`0H0~=HtK-|m?5WY&30G!OdD*TiFP5l!e zm*bab8S;k%>plT?feLq&9qNzNQF01uVJu>^Mx?6LHSaC}+=pdTG&ao=o`xr@F&yeb zN{B+&@n*~q_CW?U5*v}Pd+>g*N?2*F?b)j|a++jh$`Wr*&^@#gyo(&(NGNW@;fGk} z^Qa^3W&Fk}xKC5F+4P(Kfb2O{09LaBq~eF&8zVor=sq7$8sBCpa+5i#grr7Zwbato zgGR9wm%QW|MjeAa3qyslr#W19^XFwJGlsZ-q4`Gvz+Sxh9hq)c)FZoir-W7;-cb zlus`?OMWvF&C2+3rZKlwh@QX{nHxXKbLuBm*B-5{NbnV9YeX9ri#4}Zx46* zY13?y3;qC==T*WsKSk4>J0so%v=j_p?PNrYtJZwL`>czBlKA|rElW6-rW(Qji<{8iT&$4xIf6(HOWCo5DhXpaAL)$cEAauvv z$O7Kk{O*ZnQJhq^Hd==+|0uU&Gu6uU3l>hT^NizHt;yp!FZplXU3ba6{hEJi{SW&0 zKdNzuayjCu!Xv)2gYZ@OHEG>boHa$8i^)Dui#XQ3Tw@**!d-(%65`TciG-Q%d-!kX zA~7qFvHL~IYQ(~UDbTE|8%dDBCz|Did>KdRj!;`7=S_ww4auaIM3&!htZ5M=yWc#b z84~j;BaqKG9wv$(G`}G+5usi|?E36mBkt0UwS#a#CtwOn0#o>W0!d;tu|fmk5Fd2m zGUN#tq-?gtXk{zR60*%J_&?dt=&*2s+BEA=7+4WPK)5iP-j0_@N)uYI+Km}JyDzmz6UejLE+k>mE5Vx#H zN8sd|1ldr|I!sExz|Db9+DZ$L!W4dNuEVa8UIEu@RQ3b*YoPQCoYvinN$M4 zq&-p9m#zFJhUSvX6pm}8qaNr%u5JDw{f@61zj7$4*%#~=!Uy@x<=wpgCGCK$Er1)s zR?1$Hw!0`wqljKvfug@t5nPbFynUa(o=DGqexze-sa!#Snx=Bx(mMtS1b1AaI=i#{ zqe=^}ednHwtn){o7y-|8Z@f1CD{HPb8Tj6PEq?~xP}1=aPanCmFSsPjYg)Ulv{hK; z9BLJvk7P8E0Ol*KDH@!VJ=%TKVcq)o4envpXLjXySo)K!b-Uz9g%=I~Edd@L4YQ&# z7rF3wbj`lRVwQx7GVK!!hrO-|-q%)I-=5lSTT;$xD923Gnhy$g4?fXY-~2Gq8m|a( z?ji$(NY|)>KUk|yN{CzJX+E-;Tl%eh2EBy5izOS(1O1YJkZCBYNr^T)Oe6yLV+3N# z2Y|a=N>nz61bNJ(OyZ5yc;xFdc}x+MJf*Rb&mlt8ulq=eI(j_E&l|2?G;r32>RE|O z^TTlz5sHQt;IC2Sh(C?g!m-&B)n58#_xnIrLDA-6$s{PckEcsct>*8q1dX%;fB77Y z^Rcv;gAv<^{Kz~Msm|*dfo{%QuRgq)EYcPo7@&N5rlM>)Bzw?D;C?XIcCGW>#$^7% z**%%HZ2z9h(nU7P=0xjF7lNa>!5M#L#cTrZ8w%;?={;%TikmS39$%b!D4>MX8`QE+ zf~RAl#i#irKO<*vbf7`-2(+%@KfZ0*C+2TDEBA+!IZhNO8?tNl8us>b7Wty%aWijf zgkaoVSILQk&6k(WK+-xwc026o=a%kj(Fu{-NIpB|_hI9`Fhj}9J(n^gUrF<~V%K4h=Ti_ccM^RD?Pd7>s2U;n$! zo{`|&1C2KF(Bb+V?>3gQ76-Z&w|xdD&b&82qt?NON`fy<$KS3E~4xN2eiH5+sdAFx?Hb%2!8d7bl24{O7Kq;C^7vw&XFmb)OEsQ$2 z4Fzqq?k}{=)O0rrn>eF`yBc+_g7N|DDDvo?lVQ1xFR2@rQ4{{Aj|}kg@+~YqI|KI2 zT*KdQ;r1Z(8lTL%ov@vnxeZjzNz(1=73h;;q5BcsFC_K;f;ik+5k%yfFG5+aoX#K_ ztLm_DT}QX@UZE9+I$A94TSLO0v}K?>r=7oeGsyPde6r3%BQIe(%bfA!^`nVobCR8u zMf%2{()B$ZoB7Sq@fk|do5y1C@Jgp(&f;*j)$jt(Ff5_PHNxb+(HxflKFUP3pk>5U zT}{g+dgQHoqAOd4^Y6@)+Ae$5`!u(r`Qs(29if>TotPZ{yTv#^tnI65WhUC_kwbg+ z_w9aa1#Vt=1Eb@;HX~Ik>6!&r*UsKh&V$geRTir!OiL}-o!@`gSX5&%M{E-aep4;t z9XvlL3lB(JFgp0chcUANA9#}rRnhjtrET?Yl;I+>5!;Sjl})!#%;5}4yTGQ1(1~2a z&4uz9wyk~mXN7xE&xSgG@lx!~ejZsk=$=K}Dhh|CP){YrHD|gIB55gLTZ~Y>~4K3*O=^ zumeEm+A>55)(GJ4!NjPcub_Y{VgsS;7_eJ39%_R1(^60#@poKf*38c>Y+u1xmB|o+ zR2-KQI#Bk$U<~3Zy|}Wg9nPUK5|?78=aDA1AC3p~qKm!scOQ+gkj89+LqwdK@(oxx zjc=o`*P;?IUSu2P><$C0ReUE9vZc0maFA!40Waz;lLmzEn@D0-?N`^(yIqZoe_NQr~hf}+t>j; z-267c@2r`f3@Kq3nxsGe*46d5Dv<#;1Gx?Vc<1gm$RusxE8jcTed5d)B2<^t2GfmKP+rdL291pYxk`3}Q+ zY;2(9hNjE1%;#29S4*{FS4brO$c-g(?JPKWuF_LhG2%mS*7G&cd61t1R?R9On? zB~&LgbXq4w)h8z6!Qmpy+Yrj=R#nbQb$w0Y4G@>a_}AP*18i`Pwwx7)nirNuWqoj) zQ}~HYh)gOHBrv_Om2Nk=q*m*h?!xdg8)rg7eHiEwW~`H;Wt`ditF5BY8{Ok_L_R9U zGs>ttev@g`QahUR17%UM*-3L(WzIGM(@C9&18oC<2>i%vo{T}Nf)iM+O?LCf^u)pOQC z74*+$c{`@dlOO{_%W{#ZP5Xd;09Y9m4?(#ojeY#P ziUIpzMB)ybx>!Y~hvX5&XyidJaWLMJp6Y7*{Vu28Ivhkd-C&hWzKH>>@MRwmh`?1r z(j(vA9WoGe>hf7goB(Ncypm=G;*@Du%- zt;}fez~E7=tCDjyh(E)fDdS~miW3`q$nzAQeoPg~{?kV{Gw_CE=$~PcL4fA&;&9%1 z-FFe#^0yPch%eZrby61J+oXkxQippSGPO!KG8dLov|>IcSGTbAlQ{Y>r0Xby8n8e~ z)W;-P33pvgWQd#*$Z|?;L>h^k3!?{<@y=eia!VJL?nVgOi>Vx~erdlHDqjqKdU+lC zal*+Pc=~cQz8RrOuO8Zkbxa%#taRJXUh3j(`pL1k&`;KeLKwre>KwxU1}&B|Wsnx! z^)CeN@Xx#HeVRumAc3fuoV`4kbO~Zxme&N{^CeWhme7t%-!yuzD0jo zmO}(Nn_SC-_j_YI901M>v1uNk!5EG*=K9=1%xsQlkHHNd>k!FP4*22D{iHZz5dZpJ z^E=U}=FAA?r`A>V%*m-5pl1iKf1KFOTbfTh8kI+TVcGlHn)`a894(i#XAAop6cX-2v-%v8KOzTDQQpox!|(dix#Qd+7Sd(5y(Mv9NZlr!M5FI>v2}+Q zE5=$bmYogpKX`A{X9mf7Jxg&`^Mo#Sa2Ku0C`KyMa1Bx}%?76RXl$gQ>hE!0p3n== z@r!Sj)Bfgp2yri6STfZ1-j!5zjO=5G^rFFXrVO%vOy%gc7>@!Rhe=JH0%U?t9Jk6$ zX9etPF|*8!RZ#9|xil?GXYt#LXiZckDZ1ONV;;i>#6cdk_Y!$nxgU(60yw}h@(~2^ zPj~8f|FrQtNJujSJRr&+muD-zxIQCHaX^jPF?GI!2OI0ASnhU4Q*G~J(9GAy*{1)B6^Z>>E6GVzk`>pPaR2SE2_(kODntuCIP*~C z>qGVDq{VUBJg{Y4;$M|_kIJL!dh^7+5Z>WKRX*Y|ZdeF11y681G`kpDEqiLR^x3aF?E&4l8AtV{I9;B{RvHE4``O3)=4uytk zilXXo-D`n@D%G`M9G`cG4&XOy=Oczo4K|{L|X*n(qkF^5bMlEHzSiehm{~6=0_gaA43b`@R&Q_l*bdu-ZAGk`wSg*`f}| zu!%ux?q4#idcSr(o>v)8EAlSJR;~LBS7xdYhaK=*=~T~yK{FoiaQ1!s3X*(;X~3Zm zT!f6oO<}by0X$qifUd5)1b8i!_NX_Cpd=Z@Q`N*9{8p#_=+#75*JPsF`d6SlCL@aU zhR}}BfEAb38?x|bz}H#v0p8Q&HG6`4{s$du{Qb{9O;G~{g~f8f>(cE7NVG6->@D^D zLBz-MIHfqCpZko#nCIAor_<+B zN9@}x);kB>6u{6^2L7yKC zFbm%O3{h%pazLl-DBS3}VnI#CG(@)CmMjq34b`}s1%5? zH4FxL0?%4++Mdn^%0+RX-XU*(h{_;R(vA?3Jq~#%c5Y+2*go|~KxN$vXLFdJFvr52 zqUXd%z-@0OX^#qFF7ghPUU2L~{5<{ zEk^ocpj&K>*PS~@U*=1DW-yECST9bP^*!-Fil>gKaY#1Vt);aXXFntU59Ik-eqp?K zIDOvZ&)tK@d-&9eHlG|cEHnPGc5s!!0$w>9NA;aIeisPZyy|R4?zB=jdbWM|{hSJb>K28UhHI zeJ;IAazA+!Mozym_>d9eS*vq)NaZQGa=&xI2~S$bl&7DWV?+>R2)9j0Wl12Andoob7%3j9gs8c^*Sj5cr?^un-9& zM*qG-cE*+J3`All7{I7~hkO@mCyq!|$RSSo4X2w3*MDuMOVNS0{`C4NsoMzubSofD z8o=Gc)7l0DFrcMXCHh3Rz+(YBwKT32Kv^asfi{xyS|tT4paE(C*rb4$zD z**2_D#1Yk2qv`MCuh6w_*YBQUGr;Q;;=gA7p8EO;y*TAc_0B50D!XQ1%-eiF zq481I!_XFnwDk-iP$t|;=fF#M8tLoBa^Ik=&;ej{rz2>&?Pv8%#z5yHtp&R8M1{N;GtK&Y<|r3(grW|W}v{eb}tZ*u(Oj@ZM| z`d3JN&0%dGaYh*oXf zkUl9sDvGPykugV7cI4+GMe&{R-hSL-l%xYP8Cf7-Sy9oj0fPSyKE@-$ZRn1kNp?OV z8oCM8At+)Uq`@_T{dmGOOP{;A56-%hhzqK5>dF+D#-)qU_z}LLDdy*P5P}3G*zi*+ zNg&75qPUq>2gq=iD<+dNJCgG#*FdJib8;C0|6TS$4S)Z*gE~2#Z9k)ziS7t$Mq3K8 z*p3_x4FrHNnzSfNG4nRC^M1l8DJcaeB7(Ucdnz{(D&Ev3F~vO}l!q$O@__(CdSIns z9h|2bi(wZr8|UpCZWx2_KyaJ-v^Or60#IrZsO_Jz3vVxAP`GvlGBdfmvNFI~I_yIU zW6D$eD!WAKv(k9v)P4#QO-i+C>k0cQcu7x9((STmKtRMVTR=;Y3&N`va)8eDN0(OR z3OC-cgFnI-9w-k{t8=%-F%S@cx@SFLvEbQNQ2>F@w*5%EykihB_L+<# zAvSl7JheC-y`k~Ox}g9o5>}P5v`#({qnZT?q9*%$hPC|-d>EnYQf1_}1H-oOxE89u z+l~SI#HI&2Fs+XnVtuCVTE9O32Ka#%nZh~fW3(!S*Cj(45ToaQy6mXuANrj}G~;39cKg(iaIm=12)a47e&0 z37)4Tfam2!_ot12pbNIgs?!Zo&@8!su@`W<@&dPDdn`Sza-f;;6^|pgxIU%~W%XrB z&Jec=QDp&*SirS_vOX16Xapf@IbM~7iSCA|8}^i(k1Ct`9x%eO0tIoA`En#)ArZIf zSzYlsBc$K!XmIY#XW8~~`b@y-@F;1;9UAdGg3sCJ9f+whhjppo*+_VGpio)u{iYZ@ zqR+tPW%6qv_0P9C#N+g6i!?Q?n`+kX4vC`QGtOKiFlytCfc8NcbzAFg|0HgAb#Yv( zFRITT>oY^kANO}RceYv1E9gktL-sIE>F`yhXxWGKdJDf1UF?uv(7wjRhAAyma-t zJiN1}r8<=ivy?LPCe9xm2&|@l7;W-*M?HAEyLTLsT}e$+JL&^(cl*cOa+t8$-G2Ay zcJ9Q3^`60fb1A##NwoM3}W&S8DrNDJD@lP9P3GlxgIm2Fuuef z2DHprjr2|ilT})KID6cd^&4%>mxJLwK6)wA_ztm0ZBvvid0z>I0Q3* z_k@-}`RtE15xCO%t?)&rv!#$8$amzk6)tjM&XPNXLCQHpX6lhwVei35u-DXraS_vN*@ z;XNPRThpR=ULi01kvt;ejVB7}iz)M3VqZ#@oiMiFKADW~?7W%xpzo*o*hBE7QO0rK zvKJIXDoLtBWBVl{Lp9^LwYtys-;jD|vA0XhhuMU8T36$D*GAbT5sAm8noe&bFl zB|jQ-j$g^5Y^!I@pzT$023O19vKRc>P`g%%JfnD$nJnhJpe!2lI z?!hk0Xea0wy{VlOepe1{U;!AATuq5{pL5= zF+wZX9)@itN;saZ&14BjujC?VhWp7;QmYH!@p3x@W&6u9F~TB0Tv!7EWWCOmOK#nx zdWwjL1w%z2R}cCe%TH%)@7H13?3~#l9)N`emGtEdrj9W|YEWCA|K_8YATb#GMclC47?))VCT-V8(X|HKOVeZNW zNrHg#=5atD`~mbnd#7Vg@3;<4>{cKJsPQ1c8*+O_Q~V|5BmyiA#EM1gNsKN?NAU>_ z+#C$hPesOf4O8~M{Eq)eA_o=nHsbF#??_e}*k6Kh;(-a>*h4tF!NKffkjpr&q7MYPtaC8;)S0EFKhQzLE8a4=(xE zbQVW|rcXwvd7RH7?GWQ)#vsC*SDZT;`n$AogUdBw6C$DI_!do{)M?KL!`}j1WS{Gi zC0h};rIteDSR6SQ0P%I_>?3*plE+bIhvlRjyPokjBN>?&V*J=hc4djz0bb%@TMFAW zRqBcNCyP2GMqRUocVD|Pm*d^5{*Yab(oK-nAuZT*dB%78;F`qrM~`%RE(qM8{&SGy z5W5^9gM;|P7gG|l(~4I!lkIedU$P8L1r*Utkr1 zV`5+8C#LRiSx+ou>Ml&|tYZw*VrbhBL@11?gzCUjmiH9Vd@L|+cE+@{@snt>epJ)d zp(*jjqooqqk$;$DN_n?+#>#NnM+|a4l{-k>ePH(TR{F33`P{+Mjg|6wUUqW~dztLj zI2^8_=PAn@Fy>cT7Yw~cU7qKLNBvRJuO=$%CpN?qYj`nRYCp*!?)o~A z)m+tf1lUy|k?YNnDw|xJgVQ#nuV<10g%abqqx<-oAqQp2*Qb16+60xLW*oXeZdqd%;nc0oh6Jau`#c290BS?hUjZ(`)>rEcR zz5XZ3n6FPvXeq@mH61ecgLa>vW;doS%KM!TGo1{u%FAs4*9K`qH-4!4rK!`LkmbfU zTZ-%z&TU2!n#q}icDsmS7R_*(fn(mi3gnhycSG{LlUsqEl?l|Kg=}I(;8J7}>4=f# zdECDh-FkmOfoKK_umu1Lxd?1Z*_SOkX~;BMR_Bx{AM0B5^it)bASHcI0bMNa=0>bR z`$~FQinnCXB2C3zb&utTZQ$EAxRK;5KI4gm@9>rR$7p0F9GhG}v| z+B7ODki5W(lh@dgJxwSopAF|J{kZU8>5QJ8ceHj5I6ZtW)ijkr6V4{)rhGx={M44Z zx^Kk~N|1EIn(GqlhqOG02i4U4_*6SK>nUJt$Eqg}6KSEvz*KI0$^_yn6qV`bMxL@SyIE!d}y zU&$JBeg)}Xl#&2y@&#ald|UJudM#okA~8R$u{%Bn_yj0m%jpB0&k^bO67RMaTEJm- zU~h6jC@&zuV!(U=>O+GHG%H7Nh$o_HMYUaCm^4iD#HSnwa8NZI*eo^MyN8uiXEJV0 zpdlKk_Xw~^cceCQ#X9V8`uZn3e%zuVdj&nSfEwW5FaY`^E?ROHQKvF45?TUJFcg0b zi8Ti0XGb|-RFT6E;FJT$;!BrQZPn-4TteakBWT`CU;uYq;jQ=nK1Y~r$Gb1AoqLZeq@Iq(YXLJ zGn+~Kl{145O6m>G>@YGs@hgL)7Qb7&)rK4zvGAbDmCVI9!MBlrPCd-h${>s{4DEV| zrCKl3$|u=Lx(c!H3=%Z8?xb{A2{O-9N|y!9azA!A-y5P*=K6v8W$&Z=$=02ey-rG9 z&RE9#it!mFz^{+R{yy;ux%kR#{+_N*q^Q5|PRpp^cq7Zik_^2z|IlTHmwNnp?12@2 zoFVHb=iU&Xxz6}U|DcQmiiPa5=E<_reoc?4p2(q zo%>0w?B?WOh0|Z2Kvy64jqdidP}{9&l=nHDiOfHJO*>26zlnaYX0biFjCOfE^{ccu zCS4a56q_N)AUSlMeuskzbc--5Y%H)$zV>fmDv5Jb-JEvFeo&^)B4gqnf=8@FFe|+{ z%2@jrt*JseJg)6+k996lW=sOsbsISGS+p6%X9Sy2`$XOkoVH=?V?>U8!1Xv~M9LNr z5(0vp5La4ilbRhx5)w^08D?$D!}I5oEnfyd7H;fGVfRrz4twK93ZGZdfPQyyHw3P< z@gN!od5;M0km?1t;8qa3gHSOu8W1+6Y`^WRdF8evBzc5A5LJ%n8vGr`5u{kywuY)1sZhw??_bXj9)G;P&3`#3+8kWHlVhDG z+B_e*($tiF)^b}Z@6tp%_qN=9vYowtw~1`0(QbR6uG!Nbj?4P~31XRzB7SJ0%{;1i==Avj$bgFSLmB)4i*Hso2F=^?tn6=RK`$sb_8;D{=c1{~Pgg|VNMV$)7=};^M z=g7I!kev>KgDm(jFN6me+6xa8w(REu{?nxej}Z=PF{XR}o_%mj=Q~05 zBJ_QKEn)AeVU3@qsQRRLV6bRkR8f zBfY!hBlLqP<{*2XS*GPOzLu8~>1H#rMI`|LP1!f4)I?s#hBwzMYUS6hOb4YrASPN* zX9@*&trF)G&*2dL+{~G3?Qj3Uo|l4pdb*|lk}OQS2FH%Zv9zrj7Gc-)WeURx0%)H| z?rLK@6WB00mr#mVV*z!l`a@uTUH>S@{z{I4gRJg z+8moQF_gt`Cv!C^HtureJqg}0yKtMJT`Y0e4i0^#IC;D0|5>`f^Mvx4p~1a-r6JLt zqK|@X0*0wLc!gkOyA~`}X|d{*Z6{OPF?Wu=efL870>=Xpx&+V{?mWo6#YNy z@}*G$KzxzSAe13+;bk{>A9`QMGNzOMLi_mb#Q{BR{*51Mn4C(%(~Kk()r_8mYYx9q z*PB7fV}G&I^<2g>bLB0n0BRACQV_+X2|t&ciLrN$6A1%aVpx7n>QrxasXOT1`S?O$%z6 zdbC`&v(81e%UyjndEG~+Z(POlA%RIncKaUogQ^>dXg za%Mf$0S0}0K0Pk)i{_0kt~r1#p^gSE#+=W*-e7&-Jo!N~d0XXDax{h&Xuk?6vzx0G z_88uy6@O0)&}eocNVMtOSW*+Eb1-1a-8RqI(u(Q4+)!)%tQTwkDmOfOVP$2*+@PvJ zhP137wV{Fx9{>%@r#hd>NI)s8u&0JH$DC~%el(|bVJTQFbc6Q2l6^~Iq5nPYcK9y( zu!+``!iaV$eOM5obK{`s%&cZ9Ogv^{rw`SzPP94okow=<(ZQmKSE}Uoky~;4`%fUg zi=XI&y0rZ9$HUugP8DjiCQ0(yBZRFfKp&g3;0}egnLqNF8w(e`nVu`IK2OAtdXnPh z*!q^xRgQ>jcoHc1Z={90xc;;dvSB51xumeU*{q=hd3xkuzf*$E>N^a#`i=)@v>s5m zH+Bbicx163S0Mx49mqf4{hn=;WX(GP+R_g$WX5$(sw=g@5>VUf3u*An@cjg!YC8_L#1+9GwWa#!FvQ!?TMy zafFNj!6M`2t({F0LZc2*+ZuA{wdELL!ihgkEg19j7R8N7P|V5m`~MeTXB8A@^lka> z#@zxm?vmia-CYwBAh>IAcXx;29^Bonf#B}$?(R0+n)=UFP2D@M=jnSm_3g9wUhB8A zVH5YY?LGGFwNU3V4-AK3p9;}z%bz!}xsdN z9@g5irdbf{$EL446QH;v^|V(tbR`zi{ZO1AKU^)xfaf(3e zgyY8S*9SRXZt6UwG`$z2`Tsi)qbl>IByZ-UXBzQ6(f)?Ilk0vK^}6l;7p*yt0KR?X zVoI}hZWAPb^cu3}ve*Q=jbEsh>pxWIr0z0=Sx6&oa_g*Gw{;E4|j+I^j*qFX+`KM7#9SQN=s@rqhhvV zx#do(sVJypBr;oNlP#nm;civa5Jw9>;CiEzH0@_5r)6MGF2P0b3!tyF5$iY3D^>Db zH*e$l4}3VMG&`aEC;8_Htk+yP-(K=NBamzv@6J;qI*wN_$5f*z8gG8x5OhoW1I_}g z@!LI{IgflXK!d$TV6dhj+W748waw8=L@O~Lq2lXPyzRQ=netFbnmwn16-*v=$nk6> zc0PKO`;jhR+x^>tKSqx`r(*wLsR=d;%n;L&DFsSWO!aggg*}BR&M~PS4pTSk#g?7N z!5G|jjuuJ8Vv_MzsrOYvlkL+dzO`0^Wq3?&W)xTlQK&0@w(zg0*yZ9DE^hdCmLv7k zJd+YWZZtsp5A_aeznPYOV!-)M!zH-CRw4$@1-$J9-^VFR>LYJHWiWThKD5;L2kCk^ zTpmp~$Wn#YM02=4#Wk*ZY<6R?BZP&Yvk`0&a`u&rD?-o5l#$%a?9;)-)X+9#dm?ey zQt4kg*Ow)i7;6G5w~Eg%tnVd})RD~s7alB5-=cH{DDG_uPdy}}`>6J-X#9}9r)a~1 ztO*~62;(fWM48=*HWv1l)czuV@*Hw+duve|&0U#yQ80I1y)vKYy;8falNA|j?fjZx z-|DFWbd8=XUsj|#WVk(WtBIu|%u|`VK3=x@26E`C%iYtFkd-Qfy z>r1k{HK*{Q`rE)Slh$B9H_j8nGu!ubN{?LCwN1+ecWT~pM&MVFn$i1Ahcj&KQd^ea z;8&zWIhDfWPc62k2v`K-z{NzTZ2}@EwRvrkFpV@#F$N*Xi13Q55OsK^~{RI*_iq zr(URlw)1nMOoQG5Q%+sip_z~jQa|Y-P_Zf08QAJOd`KwR)uEzC))0pK4q(>}lnG@7 zTESNUvn8{OfiX?d16)Eewf%~{SCMJ(p`k0T&l`BK*@;5**EXPYs466Di9pf3dt_g3 zH)HMy-KWZ5C6py2j~6)~i?}SYdSM2Vnu)ZGp|8?Pjj^pbl9-0wrvVLC!morXN#nJW zFzx0Dga`&(tW+jfF$GjhdfY=o+5*1>4dh+~`J=GJu^fdV((sCI!jyHYS7$PJ2mrh? z{cktaGTGmKa(s<^H9$=>wEn92f_J3XdfxXY#IiN2{XbjIl(|fAx!($K%c5wQ^KB-s zw_&CE-G93OZb-Np{Hf)B=3(mE>NU&t2Cl#$-}g>zT7nXb&G8}*qbgA$WD zN$QR|_6NLfTz6bb0W7}~V9fd^H~p;$dvI*_KUx5&5tFz%MwfxxY)tqq)NXU;#7(^g z;Fh88xQGLP)2GsBj!j|sU)Ojs+h0TknYv2LNGQcf+5{HgU~j<)_~;y~q%rU}*VTI) zUrPYe0O5aTMKLYIcGfgL@^PkwRbvwj{5Mjv%}Y>R(t%%{!DK>|Fj+=hQ$Kz`3nKa! zAaLK|p<)9piJ+Nx*Tu)hgTDMkDGrZYU~u)iY*f{)X!m^Ns0O~ugOd5vDCsDO>O_Qk zHekM%_?zmiXol9ZCT(lE0M`BXS3igdo3@7&v11s^lptx-A~`G))Tr@qCe}}7`8%>T zEuJjboWnw!>Fi!gsLuiOiMB@KNiEX`bLKM#-ef+~D{;_gFf43-HeK88{ul5P54tPk z7yTc>;DIi?yiDEOHXH>T#ZAu7QD7s+Ls#fueFT?rdZZu<;I7mj^lcmFY(95}XYZ7C{$-FdZ-3A`^Fh~w#v3B6y#pD0sk;Hg z(ZwjyF(F>y+R>Rn0P!BNk+BoUo-XtiQs-6prQ-YHdB2m(`q+#4nuE(p#ri-=SWRnMTFb=!Wzd0N;4!CCWIxha>WDx_8XK=RzNz}8Ttv8`Wrz08V z{cTE`lyG}MdT%LdSgbqN>0+~`B%i~^8(1=AyHs|5`{ty%=~tS6g-9!lDjsxZF(UrL zLX2jfFoE=tRoJ;>>&o*MBJz#q1UweRzZh%+&g zc{hfr7sBTRGtJ{3s$f>cV54|9NitacxdzpuQ2IZn?8^b@tgF6zB)b?99V`|Zu*D2^ zSw~x~9Y4}B8JtMV_ql7TTf2_9GeGudj5w%kw5M-ppe*gfu7`P;r1>zqGTl?IxNjMy zAsjt%>HhKt6_h3Q!^7N53fW;Eig8Q(-1o2Pb7%kTHTq|?$L|Z^Y+A|Ze{xtkylUK3 z?w8zqEQKewdn!lourA$^Yl>gl3KX{#U~37;8gU%QJpV0_F+Zf9)p<%3%fD~;x_n!y zH;X=*bAL}T`{k0?jC=bSSKvQ?oDr*9$0`3<E~i{S}gQg77@0)(7Q?#AbA z$(F}wU>~6;v%;ZY?8(+hq#E8?RP_gMFC&^n#l^Y8M2<%3=oE2X06cWG-_oe^N&;pZ zU3>UOQgPz8P191*QuIGAGQS|au*m{C+XQb-`?s^+^&DTSJ5$FsGX*2p7vZo3(7BYf zsYjP|%D14uqgTSH36WMi=LssbT^YAsC#^@Ev2q$X=u7^-0r^ObX@aI+0V?wsDQbua zXdqpxAK4daH$fC@q|tF6zkv-0WcwGH02Ee}lI**(@4Wn^kWfQ7p=~R*jClC9f6n`W zyV~1%skRUV40|7GRe*LG0~BRKKa#!%XrpMFE(V$P4d$6M+B)pGcLrk6-bHEHIzmX+7L~XhF?~Mf4Y_ zZ0eWL`e}?zHzR_kmITP(rBeEmFHiA~&(zoR2;U9HWQ5Pr+iZoonBik3-8m$usiIX&6YOg+Na zMmX++IXoFp?x(v^e}H!6tX9?jM!O3me?N}!7iPYb3y!^TXlYQkvK@smfL~eo2K&m!FABZVn1XXzKrTNe5#+_CquHW} zM(6()vXNdG3M?3K7V66eE6*D?xnt$f(}ik}{<`P8MeK*xf{{i>A3ulPTnuy!(T+|s zHr6NBPFr@8?}y9s;A&W}#@abAc6n@N>c^jJ4fcLCxZLQ^iU7vkunxqlLoLdn^`Y#T zX=Z6fVYp1K*Ph#~G+PVWo%sQvWBW|v4~8~hkbpC-OhYSW+QHZ-nl8-6>z*yHQJM*!~TeOl4T-9wDsO! ziXbevMHQ{Pcm63=x8Kdf= z7~SKmI#&8q#!5|H@tk?a^rXPLXy@fGhSk!}b7{|`ta1dHA}L>h);I(Z6+aA@A1!CB zcLooFQOruxBtF8I8(c^B_8RGz$MeXb9QFgOP4IdesHvq z5-+1seYho}t@M}iHS;Y~REsvT;BK#b{fvxaT_)XMTwI)AHRemom#)nX3qDiN*!5&6 zwe;2`bkEA3RyzCD(Tg?F)8xXh`cR7u&{fDqngw_6y-m#X2{SF`R*SVq_c)c|%To*I3 z3(f8#eOd%zpzm&V1KI7?OVGK~*uWM1(AmQO%jWw(6W9L@>w6ei1reFV99-oD^9Zm! z_ZZqdS00lzo!#vAD)!*NL}Q{_l4141sCUiItE*B6;e2 zN(_`fzD2)NNl0swaY>R;NjQFC(=7%nzbR0|4&8 zF$$47U#kY-y0|7~2+f5=W|SqwRagpv6k*!j)$5>TDxMrY!ihcF*px?@R(!7;Nef{* z5w;{Qh&e|5D_LlsA2kAayP9vj|LRQyfLpRgjC2=1GtBH1KRXJm`- zO8@3a-_2bN1Q_qi!kesieWk4{3q~;Z50b|_O5PMAVxaxo@l+NFBAVc|-8N!wrAA4X#8R|uB)1X3gJ{EXm+IIZhX#*6)giH(IZPD4viBicH3 z9Nv78O~V0eOVllN|F?4^VLi01&hrM95*UGAZgB%R`xUwdHIXK{x!Xh}LHwKkW)ZP% z5XTq;A0HaBFw?a6i%9!DG4KPL6C5fhU&-_}gxpS+SYs~j>xP5u#A*&6k%*@2Ef<_p z5#J8Dg+tgWe&|JY&;D=InMXh!yRCwsT;_B7pfvLWFMLb*6T-HyThhPaR9H__(1MZj z!_c+txHhQy<|Y3gf82mDcE)i{M-%Sca5mNuK9`9WjXIS5=np+wn-Y|rT0q(OY73z? zk+&rD1w0?PpBf#*k9!jVAJ>{8_KP$?7EP34sx>(fF`yBy-Hz`X?{|k(ugIPH_8>ep z9^g=wY+f7-XZ7GRLo=)|y+5a)1YtvDDGqW9eMD4Qh0J|FeO1QSp3Q^5; z6;SB7q~>EzG9W;Pc8ahJ&q0~Nvygym5?|q zgS%gHOAG~fK8Iav&i~5ChyNaDYu$cp#rJU0b2Wyn(oJJ7??C&A1 z#xu{_UeT;Ae5bB#S_kzwkqnqeMkUK*ZF6E`(2lBXf4X%!T#=pRT*SU7Zs82#?bA2X zV`t22B~%^HsRY*~bF;y@8&{#LSq*kZCPZ9GYk(Haz^`fql`eiINxV(h>W`vFNJ-}AhslFhZMP>pDp zxOa@SMFiC?hwo*24?W0y+Cj`U zfqY)d1Y9VHw;7IB!{CYjtAM?fMB-85_?$cc1|WL0M&U!|T>Nvv^&smOZirg=y%GKHtkQ~{5)?$mKu zprDQ$W)3rDXzVnEb2$;_79&*q^(4kl2E)c9K}(%G@mXBwgHs3b8eVe2YZ~Sv||wmFWZQBCRtNwY9Pk4dXw~6*q^|fNE+VW zoAH_;D5U?*SE72Dna@lJ&9YlaoUTG7m$$P1>Rn{qL4>oCsb|B~k<1O%I+~xPndWZ2 zUDkq(WO=2vz(a$s`FQFii|vX2b;3)l*50cdEdlOAk+$fl=6|l5#R&kLzk(iR+o9gK zxnGjeA<*z~z%LbyKo~O!K@>EzkYrF(ng7RK zXol@c&yi1q$5^daKuaA>?sOy46Hx?)oQ1ld@ql@SU4GM#QUsWqAWvK-Z@@1}0-|?n zyXOjMssQZ)t2?`7y0L11g^4zKyCvlQq#V=kRGW%qSOoD?35ju2PpV4k;C_RM_$v&3 zm-UfBlle$}6T7c|&YkHYIk^>5qzPsGR!wa)@cbiDnBmr(pt|&2I=dwdftbMaR%zk` z7HMDBOo$dg*L-s&&m68uH=nT5$ryc~>O-e!&(GzA5=>1JvyU?Kd55*#$M(k~W1tAW zrphDexwhkEozf3G;6pZ>qw2P6_Ae}(eSs8u&pK&Ml2pV z&5)7lvmeV+0mt#LGr?PNVDIc2Aq64kQ5FfM&u~V*;u7MpwzYQkE))WLM*}+Kr6P3b z(i~L0d#P_1=S+K_bM@8jH_n$|+^x1N#2mjPBWa-vXwcic`0_3WPS(KPgajcx15@%l zG^Z#3(tDxYIfMWP3YET9M<*JvQrrZ`_dVO*pj%+Fbd8m-tAH>%^UF1SH*ReXdmIdU z!Id*B$fr%8KS`DiilahYT>UO}8RiD(mm!*NT+dfOsf>wHo4lcA(`eiypgRD!QWvXN zUwHQ-;`mcsdZUX%xW~4JLA)o*7M$*fYC%TEm44bC+$f#E{8c!}i5idWZ(tsCmrjft zxxYAI8vH?#g}&Ua|3G_8j{xbvBW&0Cx-s;br+2urtns}$jgXd|Ekq51i3t3=ko_lG zh@HY@r$qfhyKyf@$KnU-hg@~E?RXYc z#Z3hlC7v9xUe}(yGiR zXUEmfk$by zb@qTn0naDRm)G~B^Zxe-ruSbbybh!l>AZy(zFp|`4sgiwrs5OXvHo`6 zynnd&rI?1!5(>>E+w5P(JZ@J_w#tvs|Di^t5Jkj+$QVga?yg`XqiWe`soF_Ynj;&|wNmZfQO87CLA>O`ldykBM_c*aUiL`#z8Si&~ zze-6yu=wZX!W@5~Whh9|Xam=;j<1>U2}35Blwq4avQmFzr&)GUIoHVA-f9m#DJ8@^ zFHOIB^Ql|td_@2+9&4KgHhM!3e11z#$Q`O4yi^FvBLhr1`Xr}{7oA~)PcPf-YI%PM zsdRfU-7vdk=tEQ?)}os&84_Y%{2NlbMMj{)%CS?Y7etGYW{OP;3B6z8RN zCgwF|#P4qQ%ZXDvwKwf2m5x#F2(lW>90|MU`9$TBjnUYkCNcNVD@}8ud;Sc(r?$3L zC-OgW$3$l%`!!(WpM(>U>+kBX-e$LQ$Jxu4hbI&rcg6}rG0uMm#BISuf8|YiPqz?7 znfy^jJ=cKNcN2UT@+x`X!)(NCUOG~^eJcB_G41sCP)l7#Qk~}*uaySTGS7*mcNzRf zmfDZm!2clL|DQIpi1I?27WncfGcavDhh+ElSZ7(V>-Z+2d0kR!(t8y|>M@N*;RD0Q z9xCAHe&=x*s5*%V*>~=ImC<}z8lG&#WZ`22VS=NEhFnwXRHkcvt6+r#ac1Oob5XDK zkbrV89HN}dP+VqYe%%Pk2)GmrS($ZdH#IQo4G6kzSbEUz7b>)-ZWWu4RA|+}PatYi zISzpC1e6P2l~pd>V;R5&)U*ts12my$Lg2j-5_PjH1tBm!r6>@Riqfv*gS}b%>IN>T zuey|HOSN;gX(q=Qq*|onb{6l{;Fi z-6^RCf*_5qTn*KwIQnP;DHqlGz;6{YVQ5n-472l9N*{6cqd#xH)6I>vk&Z>Tb1T-s2SQ0dq0n!q28<2*-3Tl+kC07I+6QlYA<0qyPG?2i6FT z=8kO~5fxm9VHRZrFZr;k6Joey<&jbpm2WW5Rjv+#MynNcI+8Kn341xX=In(+*BuFX zQq?(ySmX>GOK?_)W{KoF@fF8Z(~|m?b719C<7geRfsX|_UPbPTQYto8KpSHR%vJi) z9}%mEo>6?*Lg3bPDUtSC0I*4CUoxvclLWs~#{2n+18OU|3u?}1 zI@!M4xHty-7#~QW(|veNe*7l$NqPQ548g;}zoofc>@m1`8TA?Y@r}QPf)z?sfwD== zBRQS5T8J9+;$!>Bsk%K9iA3tN9;#6<0*LNbqDI&lfjLqdWx!PX;~bRzDV$COs>Zm` zZY5ST2U^Tat%2AiP~=AL!Yu|$MRnuu#Q&>P?Oa8xl+}~f_U|$Fu;hO(f_zzULI5=V zkqBf`nhVmtPkiu9M1lm?UfcOxcU!|OaH3h3(^%C8=3l5c*jJ)wfVX%%39vxfDGhT1 z5$N9N>ci^E9Cms*gwFjox?APb;_U~VN4-^Q*ya(LvCl^yF8FqRl{s0s7=P`7+AWR) z=5FsF06SKasMC*l%tI`iEdm`+YU4fQ?TOs`-k#1I!xNqw!9=_vENI+=5H+1%d^klJ zd@jAqNZy8XNvmgfeK%9gW}nzJoM;n)#oJ7`vaW=g9lu%Z#J&$>J3*16lajLcC(BJI zsz*lazA5(^)iD#xd}H%L?P+3)h&IUQZtkWC{=UYJx8ECoWL`>DH4_TLz z_l(>8R(^QpKUHmS>OCB*-Zg%AeSJ4gPv^(J$fnR&(OeULx1f(vBr<2;IHr{45y@{M z{VHqb54&W9DDQpmTBugFv#E@Hi_10sT9QF^bwm+d^1aKnvJ248bc9|1{9ui%1&up^ z=6*QY*Fv(q6o+$uAEV@T)n)V0!~iT|+4(xC6J>dgb-DX~eaLm$J1==h0u=TaT|3?eZ{B{C{(8BaD%+j!hwqQH5! zm*JA1zQcqBVvm*VQHaWr(ujK<2e^!Sy%0LAkP)uS;! z{;$gAh1^%oS}pO~{Y*i+6iqV?t!3oGI*(7^8_Nbg3XZm66Z*MVr3ip!wQKfLaQlIy zN1G5g7V@09$z-c#LF47#!WxqACU%#1%2(=R6cxL3MW6jOd+UOKQ;gaR7WZG8V`pxe zdk5i%Y@6K_$~IYxdETBxNa%@*SbE$hVPF(FOTT?Tr|$xr5@9I0pyf6lt3r8YIYO5D z-HBiDH#4y?Sj_2=m<0|)FD23X-hxs48a|gjBR|WD4o>B3ymh=!s&uCQ8%ucglzp&; zt5o6k@lFpZx&HrDO?uy~f6t9Cc5l_AT%K;mZMBsa1%Z@1iWfMTwYMhjpf7=>1ufnr0*RKUPP}FVU+o7*D;XC@A zxW}}f9dvoZmo{PW!>=wNRn;g$GasS)*g=i2=~n21mZ4R^9ISe%ddgR4QfO#|xP8i+ zX7I2jCMf#l!@ZP)yPzjSYdrN^jZ6|4(;b$T`E0&dAp{1g7l>TB8IV%j_v2aT|Ww0=-sCt{~zv3lJ(q= z#iuhP(8vN9Xg)k{IsC1nB^fLGH@L6uafOe@CW5%7sV0g=rb5(^BGkao$!^H+1;!@U z!=BfMozYIs1Y;b~IP9O6SNBB*luG&W)^Jy;R{G`=7n`Nb=Yzy`&@yDe5@^MOEtI(` zCXDllM?2F=+2+~&+y&MkPd6HiH~b+;)pfQtTkv&V^RPh`3yotkfnIw$B_G|H>V>=C z1^5-Ft<=Hq0(zFef*n(8YVtXWFPFIAW`f!q25i!ab$_}BZPX|Y0u!fQj957Iu;%Ad zgPI6(+~#>VfXEWL?jE081OT9i0lPAYee8#IGlsxU&qfE{@Gi7)5|)K(4rWqM8dQqvb=cm-|v=n^gqt-%Lw(4 z!oLOgLoEq|)bRhloTE`40Xu11jVd%UIphQl=QIYA#f^S&E8y_%eH_n;W}9doBi1yI1Sw{4>nF0wu$7CHe$% zXa={>F80Ye>Ug^ArZ|9=uVNmdkk8B&CXN1%k0n$BSU;c}sk&C$nJG=>%-l(*1=#{O{V_I^@uAAMmz+0$Mbc7EhxVr$;WTl=zFsBUMS z24}b-`b$>zaScb+3}xl5hCk$(hN6-IhNk)yd)o6adCkfS_k+u4-f>%6oY%@Bqs7c_ zv-*v3uivFseAKzzYwMa8%BobCbpFEZW7t-y2LnDdj_Ni2W6Nifv_E~%YGXg5uh-Jx zj#ITvJfBxs6`eD!$G|YP8zujgY#}Kn;IK~tLa~THS-hzdN&t^g z-=S%toFK;VU_IHIKy2wmBaJ!3(eks7)y7uq`_qnTyt9;GzRvw$&%FB$>MF8LLO)%{ zPp9{v*UpLVwB1kZ?;A63%XaOEg1tabdmXMK<+m%jaFDC&%F6!zaFGlx3#rrQRlz$# zMDm(H^<%LTnVoa?AW%K(iK#Y5NOi7mQzS1zji~;c%zBZDz*!ONKA&zOdV7J9{)1IT zd7ZpbP_p-pN9#TBADF|A>6&kdL2>*}PPopMoOQ#G**!W$zen7#e2F6twaDG%tziKV z?4f2{W-$;_Uty3xG%Nx#CYX|0O7}kp9WYecl~av@;Eq$?{*LRz%K0``m+u{ShpH77 zZ}QDSZ{3@$9Z#~Z0_+wA?ld-1gBSpxzVSbVn|X3?Lna*ynGcO5mXZ~WgE`J6r^8(> z3Jg5Slui0T?^X&i1=-fR?an47z@9MIk^57hSZR+}02OY(E1Vb#yB!pFrzw<~`~8kb zvXXrJVQ(OZ^s6>^(S0d(hDG^i;sk3^LL&t%3bHN~M23ntzb=G}2|5?PECEYr1pm8~ zb&0}86DCvC`DjtK{?UOzs|$w;F_n>xwQiT@ATNPlM3;;ZW)uQRgp0L9Q`do;jgDX#$!s zBF){M?!iWbyLf)A}{}Dr%PK|xe*hMJgN|(6z?{Jpm5ofmE-LSSC>n*3DGVGfK;=?Z( zn{@P2EFcK;HxhJ#e4Y_|E0R(UKv!tSTtYafmIA#Y7t6IbwJSj4BO(*_9+Me)dbKrzXL^q+wLB^A3Ui5a!cyJ zIKZ8-KEUHRul2(mAg|Qzupm%M;r>|rmrz6Zj@;(pX=Y#$F$ zq}o8?^mN152a;d_u{Yf-`o62pQ{Ga&zZ`n4A9%Ddt(s|_^*Hjuwbf6|^t|})zI^Vg z>V819zSft&JZzzFaVWs5uX-jcnb@nprlh`-TN3>WnM688!6h*_iO#^dmOXLKzfF(x zioP;dw|P#8?v>Mqmmp}exoo(eSz*-E>f zu^pzuVO?V(4yk>7L(@W?;PcvDprVIJ&8m4JPSh8pi?$kj_=&_eX_)r>x8^sVYq(*( zlSq~^E>@aq^isqZw@WIYm&Lo_j`Qc`tT%h_g7&xAU_-3L-Y?pA7x>rK#wy440Nq{* z!`Ht$3dck-(`COwOTU#0p-bSQ$pAvxlgxOOTPWq0^TW4?8(%y6<;(q@87P+?) zd-r<&u-5R@oPRg{eymvETz=~$^ZUG*VhK%JvhmNYn94+b8;z9C`Ok_uo8^WwvJrLw z#rY>hfN|YoDYw56HKQb-YWrNSvVhJ#cKK7$&DB(M?de~y=x`W58bC5PuDvj6ILOsd z(7K3`6~HXa9VM<_pc$ zC?%=?Gk&3l5(k)_C<|%9^?*;P7#u(;4{at@daE`K;i3Q=MYvV}HvnPAm(EP$0-M3P zuY3TX+O~hN#3z@C04D$nyKgPTSqLv>8RUM|M|(f-d>XAErwxS6;tQts{7M8b%4s~dW$q_3qdaTpkbwD* zvtbsajOwn#$9}}9o+F#)S?kE7(tk2UF&mHB>8*)@K z{^J6H!EWcGk-y1_`Dtp7o}ZJ>sy}Pg7n8OuN{-jNue*BlI#>Gbr*~!FOE;1o6`bps z=b`3#SKW_Pt0Nx=009E@#hbVPL&1baczG8GU7^2}J4bom>v%A{@}C$zGK{~Q^pu-%1w~DT$eBKr_#Opg$#UnyznlI|C^i8L+&FM}^}*6UJQa5%jPy&J zo&h|NyeFsuUtsw3ulaAmj=}gUGOiziF>!gi8Q(3r3bS^y%=b_M2 zEljh(jf9ET`{Ud>-Lh`+eW4}=aH5`^Jy+M}34IPPiPmwxGn#p^fV1K=+hs*!UR-Rl zT}xQ{)MTQr0u-P&bwwLLjlBXqgt?k8rJzHF<1lf*A#u z-*@qD&&V)z`#UYML@VI29exclsBU4gn{YG^?n*$NA}4_=0&0wuu|XvtH#3a~?f&u% z9)$BBw3DxdTKZ$<3}}RCG%n&!!#tq}*;M#zsg@y=h`!Uquk#DZaL>C}GMvOU6e``$ z`eNQ;MoE+o7ekGtTS__)uA)w#xm38$Y9(jh<7G|`H~~_hZK8F@ihnKvd5@C@rFKah zvf@%h2`hR3C{7FcLScOk&JbAQQSW~_0YHj-1|(@ek2#H6vv^?J<=9ix%9qsTQ#KJP zqX(VQ#XvC|;G$oj$PjoRt1r8&$uZYE%c72B&!Y8KyMNkS3>Ba4#NwjQMyT>>NrP^T z_|C41E*#&0QKUjX%!+6%`;SuYD$|3w)LJ)B++3Fh#jOozL{Q(cS%0V@qMe zkr59T%W^aUh}K055k}HDay#1`sGw}vekFr!`Ihf>fv_l6S$%3BD`90#$3LO!kjQ5N z+D8Eue@2{odH`pnu$E*nb-$NNHf}z4iVGb#JKLxEv&*nP(HUIDx_EoCCn|t-aN^t2 z1cKABSvH>aVT_qxufm;4HMXWXWbTyiqIw;RcX5;=qmI~V^;!eUXQi~!*93H6!vsnx zlRqE>G&zWW$$Odvz0G(F>B<1ola<44fNmDfDu03=itipwurF?knc(5Qz6s65I!Sa1 z(nJy6I|2r0VAe3X8Vft~CUL%=XU9-P;6AC@2Qe-i+lC1_1rjfEleAB%b@3#eh!2|E zUn8CV>$N|2J9?zR81&(pv!s`c^<;;r2hz_cOO1~9=p3ce<#jxo-+blt;N=A zZ(KJype_py2JyRX5xztgE^Ez0PZ>K>M?LmRM^94sftsC*?gJ#;ww2Ch(N zUhbakZTK>GRp@n#b($-9FMDd=`S=;ex*R?`%gla=r~5Gtq+&)B6pes55ls58Tdy@-GniqIM7OoXal z{#8X&H>OBSbkng^bQ&GSq68ZMDMFqyk}oO=4Wg4BI(5o@-P-r6_Iw%c+&qCo3$6Fu z3UDx1Sw#B8Rh9o|W#H=IgAj@n9qP-dt8vv{-?CD%)-;#MVb#cBL@kW-{4ae#pe*#U z&C_1?IWqpV8ji#wI>Wn;D2=b+M+J!MmY13v@pOjET(cf7l~17bnZ7>St z{p|E~SEqUDt#|!J@^s|Ig_mVaJ?|RfBEG^}&pi)!Tf273yRx1`M2`Lr3(vli%k#9l zU6KZnt^bVBVuNCQ?72r}u^D9^sZjj8Kd{zh$rG2ULAg+&J_Rq~0v#|i&gCSvG1c(# zq_&v<1A0pI9mUHxrO^nts51~OcV3m)ePoN$tsa97BI=;nUBEo3SE%yo18b3K_@%U( zujlnGKAIjt`SR4{RC!4PnD3g^I)KIqVqajZ`j)b7fd5c;$ZX&>_V`l+^;-AWtttVb z2-G1zqf8m*iD{>Sx={wW-{}Opyg6uc_xtXQTwe4dBj>+3mz0^-Q{BcrBj1N(08Sj& zNwv${>q|pEj|((sCO*rsl*8S+Mv9!OuVnK9jo`Z9&`py-+d+Gs}+l?eg*|t<>TO170O^jC$$* z$Vjs2P=OSti`D(BT3!=%%e8{jn%Wfbyim+nK0@6Yj~L`R{9+2M4o{j=WV*^?%!Z z{#Q5pppxX%G23B$$K=WVg7SELce;qH@@B#<=go(jTWMR|{^?SxeF={csw`RMsG~5e zWkulKp2XZ);4#1)A=63{Cwj=6_D+J^&Zh4a0X}qCQZiCxv0qIg+yDGg8i=1|2M6Ku z%Uv5xAI505Fow1C=axqN!5zQ*&u5miwaFA6F8G)+iurYyeVk^=Q&piCg*zc*44*ng zh=i5kq=tKkYfIgoe*4E_f~bvb=Q5Y%uHpJmv6rD)+~v$S?N0~d)J_HT~JG2d-eKHO_`Vv{Pn93YPQCT;v-2yUVO@xy} z^IQwuahivG0;~}E>dmy~3UslSLauvi=^Vp`yD|=wT5_F!s8lL_)}9Y=PBNgB;j}lF z_;!ukSF61cYAMMRb$DO|=V$lWby*PvOR-wcWye1U8?)u$$f_A$J!KoGPPAX1@yA|9 zuXN@#Q&PYD(lQ*D8uQMO1H~2w9pd&Tn`OO0(SVh-@fWwn6~ptWnqVMrUwu z>c+}LL7b5h^^vF(dLgJDyb{HD_yq=lU>h9SFBs3JXK}>oHzPnF>Cq8{ZF8gsOeKny z>H(~YNg@WDv`@XpQMb02Gm_EC1P7|4G;ma(CXns5qE@+dTqT5HSbb0^^Y)rj_-=o{ zfgBWZ0dnqKB!J&jfT}UlUWRfcea(CFD#=3%7^(6wt%sG-q~&d$$FPOK`Q?}9+|!3e zYkBc4T-LX>?%-tVHz9Ia@j@h1x$jVXBV}DP`O3)A;}5 z>#V<`jK6*V%naQM5|WC7NGc&61_)9P-9vXbLkyv)v~;7=T|>tp-Oa#|O2+^S4AO`D z!&&!y?^@^n56^n`{=D`(2ugbficAJ69%#g$9{)s`Z`6Qb4$h|Zi1aBii?!X(L0AfE zwP~~#F=-Corqzn^l$V_na*)fc~-<#KN`v1K*kh{63NLVLaZXiA~J32hd^~qM%Hae!R=!YLp z5fr*P`tKBXZg=t5S$awOw0{&yXYAjhJQu!n`4+y}++HGei0jqYu-nShn*N%2S^q^# z#coqLPon)t>AfSRii^Gg%Gwu8N9SHjC|o(>C74j&J`1A?18{hUk*<|pjdb({f~s#x zW`&^FV5&dTK7pvv>Q@BfM0M}I3ljCy>zqjaQEekcmGU#`;3ta9l4Yo;(5KW?vS;Rd zWg2|DRg?O?*1`%ex>Bwt3fg7^w*zj<19qs}my(j5M59h>!FS09pxlOKdHY4XCw8p` z8~Yzoq_Z6YvM;7K;4J<*S18J>I9#X932-Nnl1fJh3;4$Uq(Y;FVhPC!(pHXEE-ML)0=OS;ME2aV1v5u!$p0J z%_i+g-3yuHeB5C-8kt%p7TUu?xxos0pv2h9T^TPj#FS|}$T*07A$_69eFhOcMc%G6 zZWkz-YaLAelauFMdBl<8tVdnXj#|FR|UC#wV3%( zEvFco^`X~xxBIn1nQEa!pVX=0gl>ig)^X0W3Q=_-aL07AyllyqUkRWruvGBA=PkA1K$C<7|DV@74)f{yYtYW27q~EJ|_%!D`Ve6FpUG^^J z2wvnBz*KrUxFnY2T|2qj*=HGqK1~P+nnj;twr9*KX9P`pQ_j`17+g+aK|47whMa;D zI_CUJ?|q*SUNFaV+LczgJ5DF{iRy2YDl@X)1T*R>r5~w|?Dc9pZC2fXA31#uRa=&M z-XB_>wc!}#rJqoyiOFz>=Yy3_&vBlMzm9fygVE>j6?}SZD9MD#NTqkxD_R!>rG0$J z0e04Wo&8R4x}+ELS(v;=J&SM@fsXlkR@`Q|RH>ToE3xpa;JuaTJxuv?ojoiFOpV9I zxGnV(r#ow%_0u~epg)!VuHCij_BmFf9@BFzpC0(nuzAzS+2&NzEx2pU7|vTU5!JjF zz_!$Ed?IzV$f}SQ@X6TMcG=nTZPS{&_RU^5Y4*i^H6{wm|D?D7fpeXWi2Fv5Ru7mi zM%u4$Ga*?b&|cy^D^;m~)cWt7qU;;Zx^l!lp7)AhfLZsb-IRjX1KJmG{L2o>e2e_n zzcoVz%zMN2t%2DQQ@H4WD}4^Xbh3X{j38;dI_R|kAIK1dH*n^x@Iex$7)Ps4Qo!%r z+YQqbt$V`Rlp@qLM`4VgSIrbpz1&4>C;aqkw0Cj=lvm((ii{%XDH*+x=u|QN3A*4ZM~7-&%nA(i90n zH>UlS_f>Svm5s8*Of$Hr+x=ck-Dlc{>h)~dnA1oDiG3A`A5HP2bX~SOBcuECKYhdZ zPS<@{#ZKI+g2hKdQ$)bdzS?Ix;sSIsFhbkOBt-gSR>SA(9y?CO`$B>SNP1MpT5aFr zw}MRq|9wib$r)viUYn7r8l>$xo=5HhL+u_pvDf=7Fz04$&Ls%~}h+zs_J^fS%XTcv5EaaLlRZ_%vgOC8GqDRXUi ze|j7wkQD`@K+IjePmi!*x(*$q`z4aZrm7a?K$`P=?rsbO*tb0dbQ zGfq6D>RGlii>KnCkEf(l+KUbwPE~HrzKX;B)udVq)2sLVIbm&~5&}7P?!OsPP7naT zUac-{oVz{^4`Bn?@xHPj_3LprIdSEf@W0Sprbp3mNUpS}x*Bs2(cv(!j20QncIUsY zCbu8@TpagGj|U!6XWQ!Vnb20YF64W*z9Jza8ef(G)1g80bMA#kf@Y-I%`Q~|^XbbA zZJVcoYVg7NB?mUwH#wrXH2um;;A`w-kdz`y9&nDwPIJ_3iOr;r?eV`n92tmU{!QY+ z(0Y`cWpfl!gWbW>%@fMpPUsSoYoB!KXN!D`1wu=l(a0Yfr1#Us6rBvYnsnwJEjig3 zJECnWAwL&MSBT}!mxf(?L>EXZEu$Tdz zNaBCr@;7UtvK3Bcwfsic)!>PQ^78@Xi-q<}i;JJ(fvra3e4n4jqOo8B9x?ib4E>vrZujO77mv^Km1qS z1n!*_8S5B}VKLTur+n*TV_}SGnL-E7lbJmRAHH}axws6w#EmH6>DO1DM+^XT`I>Hzo-nU|l4l`&2#^o6ezY(oK~PN&n} z#E~cOf(;;3j{n~FLLAP#Twgw_S`oODM0j9-041!B-^(ltw5CqMO}WW^xo+D}+F`NN z7^Tn{^#pphdan0Fqd8r;cX32M8&XHe4SlekZZDM^7BVeHn4mS6x#J6_*ro38H&kasX7FPd~mdJ1%pwwFG=2=BQ?_WP3J;g~q zb6PJ# zeugjrQB$$vcM9~?bV%TL6h1K0R{8`kxJWGo)HQc7Z@E?w&e{+ntdD@m~5T>duKITtQ%K1~ z00%|)m)^(gjm(i7keyaBM;LN?)Vz?Ks7ukV;9>i^3(|bX+`vgAJC{k2{TQin*B`F(UWjX|AnZEMa%X3{#K2nu*>!_7MB>WRzE1nU#9}3 z54YB}Rp$e7b6z{&C+J6%ktfq9UJA;BvtrxJCLgyOIveyOz33n`w(4%S7l z`>wL@g2^PnIaJKiGl~;Qn7RAmji5dXAe36bfIJMs7ZxycV8ijabMudQih@;1=f10w zaie2g)jR_nn@UlVg0|}mlO3M=_u+(=>J3zD=l95cJ|M-OMhf!8S~eMXPf+jj)gUgK zX<&4b3i9s{r_m1&+@6_)9|+JT*93>9WB?qM+>7_6vf=u@+wT(53ur{6HuvZP0fH~R zbm`$c2TFf55`HcOD_~TjyJi+V&ZR%Rv4;uPD(V90L*&Td@5 zY98TysP#zw8xY0-3^g5q_Rc(Gt$UF=2)ZcsW8mc0i0-x$aEVK|4{}o}drS;7<7E>F zL+W0wN~Y9It$uXnt6n2md9-P(T%X$66w`^<@P01giMXJWFTY2Memq_XDh&To!0#iI zJ4Bc1*dlJVI>u2WPjV}}4L+`ulN4E;Al-p2r&_jy0)Cf?=K0=>7S)wgMi$%T{r0u9 zs#=fv(i^V%WJrGFDHTB_H{vHu`*^y8dP}eV24mvUum!ZDXaAqaXfTi3*-bXU=ew`WucsE3HCL*6ed28qm+z|QUJXw{#4z%S`o zoLMjWDUs{eS&qr7tXeCPvgX6qUri1Mq{lM%bSP%fQ-@etox^7vrIyrDMRa?U(3>X6 zukts>vaaX{x@*lCI#-|fmJ)o%En_3u63ev5e%dRNb+)fO=S+rfrP&fol36U(s=HK^$UpNLl2v^?|e`{0-8EO}@vewJ0m2U&_| zetN{jE&$bxKF4ZUaBaXhM?9hiky_BoQ#t5GGsVu|)UxlP7Ir$jdGTTinl0lUy0w)N z_SbVLt>((~q}C-mAOk5LLkTsKOA^jEM*3HXueg?-B27k{_CIu?mxa43L|I(we@;2j z3EW|d9NxvTzvQ}#V`#>&#wDGkG1k~PWMDLa_SFkE0evT+K-ksTF|vpRUP!a8>Uwr zg&YaXL&76w*{Q8I)z2*Wm2&jT+-tj%@BWkP!3UOeEpfhJ1Sn9?#U{eB`Z!FPb1B@X z!ZehYT}LD^neBdF_wv%bP5dc~{pQk{dh^ZjX?=Y0bZgm!SSj@nS4PkLgAWwP>oB~M z!EnNpd#w7Q6qF0xPVT2i`)xh|!h~!4*R3+~f1$SiCGS=kEpH~WE3Tq0>bJ0yI>~&y z7ylp?Z`djv^Va!3ONmB0iqT*vE8E$PsUQ|Y z`Xk|p5QLS#CM-qz-LSGQRP@ak;?L~wv*%JPOrUmuAp&?8!J07uSYawluL`lLB||08 znyT$86pwLHK=dbNbWJ67f}RuCsH?903tcmNGLv#OD(wLriYDf0>>}b=fMzZe_K5=_?LbS1o)@6D7q|C^y#sxw;-&>&H)zPwsZd zqpNnJw-r1vgB6+0d0Q>(aB&0duWM#Q)gPU8_zqZ#X1{|A8aBJ*7g%vvNRGX0%bBE8 z0eqg$kxDq5Gr*&~zTBm}11$<_o{X{=2h6No(UuoSQ#vG2sB0=tTO9K;gtV}nW+>Bh ztIpfntcU$4_G-Aah$1p;Q%v)dm$x4+U+Nv~LvQ#Gg>txIz&05*g^l*>HX{=ojbO># z0%Xf2!^q3RG>{yKssQ9fUK;mX_vjuiyKEWLmVh@WK1LLqCzn*|mx-AKmLtnm>~w1! z6p@BG{C-SUl^aUdzf<4f#U~W;&P@-G}&nTu<0`gOroU)r1+dOuZ z;BFgs^LRAIbDD>}RtA!RwV7?FrJL8q8~bS!4XX~BcYTD!Za-Z(5aJpO770^HXXtt` z$I0}~lSe<{xRC&186)cYlLB2lXrSs(Uj@WnTU!UMzffP*F#jr+7E46Pa2Vm|L(&yE zVGPy>6zB�ReRS9s89WG>8I_+@8XKtQ6O60Q&2^o(@lh=|lUy;aOg`+ycW7f^@?$S2CEK*_w;$dp`E412ig(f4n7@PXupdhJ zYd5p}@)w)_exIP>e@AIana^N?)aoGoV&swye1~~GBhYyHW?OJ&b?4*GI5Z2U73~dO zA`vnc4XQTsU%SSg_u|0bV+$L45NeTN4o2t#tNIB}Qh7G(Zu5+5h|TLKX-1Ec zIN1el^G|k;H-9*Wp8*0jZW<>hnc8zck{})?%JlRq;oBgp!}BpmSjeM|ZsRN?T`G2u zA5z@F(RKlUy>*s4)lcft`oZj=mPLJIr<;E7){@rs%maslgT_J!Hd1Wl>=Y-GE@hnM zL+#k0(nU*K9W>I(;U3pGbyqE^kO~xJQ3FsbbKxH*lQUdG!~!hN%!`(<2Lg!^`5iW-EqiFBc8oh4&sy|kH}b~ zx_$LSo-R=NO3JQXkYwmdvm%T}yqcHIOaDB`m@9{$7*E>k(!yX$_d!Hr-s9<;fTFTb z_%W(OPXUN_n`5=0peBh;$fu!bEbb|8lC5a2qQB_3bfNG3L=`W{d@A$+iSh3C@~De$ z3ZIKBHBjZvdVZh%aEAG@52TXLfCQ3);@}?sbjGN}ze-Y~<5+Th**ar4A*V^>$apqiZJ=8X;3h{ z*`fKP1+Ow=83D3lC)I;V)KaJkXXqIrS(2vvV)r67@Syqr{>4$|H(c7j_J)!1bfEXt zYPI#XfZMiA(XC~v&?J%wwJ$S_g--zGS6p-1)3bU7fAwbji6Nv!#`Rnmsch|-(V4c6 zqhvJxb(52`qkf|rKUc>v7^tZql05dA7$XV8aL|{gV3*JuW~gzl*$WHdoZOc?8of&% z%(d6A%aHZZ!&o~f{-Vg?t~_(4i_4x{mU7^%a8I|nkH^bKZac#n0;#^oDc3 zU~+cr34E>We8s*MS0V?KWCF>;Ca?qW!%OPqdFRXH(fZFN+}M8^0O*-`7-3h30R<^j zfmdC_bhq4b*?>8e^|3fJ+bfFj5!jTq0BO;Ym{RD+c-7uIm*T-+Z4h%CC+)qo_+orX z`l_VWqzr(?>Oj2SP=)wLetd&;~cEWzUv zZVMG$mJdeaT^DbLeuv1eXkZ&K0u&O1wj&#zmX2+-kvO*n@oU}eNk@0udSEeA3wDZ zY_4c@R?W$+N(X`xSB0bi1ms`Rpj`s7K+f~Cd!^>=}pr6a zYRh(uIpoUaB&aPm1+_(q2fZS&vJRtG5%tFekL0K|rsipD$n)DYT%nwvAutk9>r|ZZ zkNS*AFY%wpt32Md$Kf7M>ZiF@sl=~-iSj}Hb^;@$G3W`@Zy_^01LoRCeJU! zkUy|pwtCp>=-&x$YR(#y*ln*erZeACruOJ&wfF}9_qXjk)*L4zVfm_efUq|NRVz`H z)>+Q$mAp6%pg>HMKf@$r^#e%(T^Q3aXRKwH6Yd0LMb6?*hp16XEHFp^D(Z6($XDZHf((|@2B4faO#xBq00M5)oYu=tT#**f9tOZF z$ncl8w4n0bLLbF5cUWby<+O29(=zw`R^bK4&p<>)=35s;;SaFhm#08ib^m?V>vN{r z0I*+y0f{{$#CUwOI7 zxzVuGP&xcrBi`OF^7{RKr+W!rKAfd*!$@M-R3>a?t6>|WnC)ozrW$%thga&$aXBy;qEWS3Tqdw@n^ALCzLtmkLILK7DuhYP) z9MQ@V+f6x$+N!$Z2`U}qz`6ijBR_JmKFgL{p=&Bj85Q@-oUppkQAtmrQEW+-m*f_b zl4GmFS+tDopb_><&UT3}19L+kFSZ;fqT;s_p!p>#S(|=K8%t@rzw2A`N1S=;C~rC` z>totC*|CKi162=%X#5=QHr>6p(wy1*s```Y2U#ixa z*2({vTRAE{JV*W1GGJy}DR?L=L|ERDz0W>f>}`|b)rWwnKv71b4!a7#k;VG$5~~l~A2{On`B|rWFgK{36C+6OV`Sde&`{?n3(oo}gwxlVAOVIN zL0;yQ+$#@-mg8WpG3#QI)Dl|(=J@x=y+^fh>9gar1DE`#FxEJ{C1rCQRB!WVm~_hv zrePm`{bwLW_Jvb@G0c31Y=*@}KhmSiAnBE^*-haHy;K)&T%Df21m)5qgIasg16JvD(ep z_hJq(W2}6IsF!0Q=<$Uk z#A5YuIl(&}_S!ck|ekjL~#~ zAmE!vTdETC7-XRmzK$?m^P>M0;rjim~g?N7j&pl7EuGVNb%B zRH3m+_an!bL#(rR1;WrXcoR>DIdTBcHwE2Kt27>Ea&sEf^D6Q##wc`E&X7Ibl2gpi zu!g)=r}i|6ER})|3@Emj9B208rx;0kHf7#r_f?{Rf8XuJhejN%FSENHDsRyt=zE8< z&YQS5hokl03n+k{($%afp_l47;d?MlW~k5%#ppk88y}`D(WQV=cr$&3oBFgE88=`P z+ciQ(AWQya$Mlb=meSCxkynGjOmb$|wCI(h(FRJw6H;^E_Ct8Jv?v+4H{es23k@np zWI7;(rr@zxej?ePA$RXXWr>1O$U=9?bVZ@NLe#kXm%RB&es7ooU#)1cC!gE4Z3X$% zB)`S)gfY>*CKIZwTt0mkdTnO1m%8mtY`i7}qYji3BjWSy0bW55QKBXavTjwPc5O5t zd}*jq4R7cgIYzgOs><-gCyaU(GS8i@JfEug5;^8_&G@UU7$R<)^+rZe9vO-HU+s-{uaJ$m$0Gg>H8fO3Dh9d+)dxjm(LD_PG=d|`skx^R4^8hakcRd-Dt{^=91kl zN5u`d+EyuhlCzbpsgT&9t7$JRm3an@Kn}ZKv1t?E_*mgZMnNlQUPl=dJ3I2bgN&-2 z@ZSqXb%mHBL_x_rZwES`ty9vb!8 z5V#BI@Saj2eh?lr9^UKW4Mhc=MD}sS<8j3B%=h^^AELLj@nOJqU-_R5;%5xxe7j2< zmT;cuCzoPum4HxaM|JqrGqeQZlxSixh#W9u7UmXpP$opt!#bR{Qq$;nl^8C*+7%l6 zUe9^0@3=c!G1F%+opHixNM)n04${E=HFp zStM^2^D(rK-%YpB&Bvi4ZOaOBiIx`V)cXX>22(>*Qx|0kBnyfCPW3ec{RlZ-xB^z} zHt;&=2^RC>#l;IwD5csxkE{&t9(fses9TR@i2QqZ-+1tg-{J?IhCs1KKS$09LOluYsIq&4Ec_1H5r z(#jAOv_9Y5t@9$&fw1}+h7)U_)>0}ore@T>kma}iC1tSQV(@E6nBR48`$zf>0@S4At`pl zdC7tGT!GbQH1MzMv(I_W7G*it6&o38MPHeBAbRQ!$vEWy$=R(!H!qU=Qey_4K z+4DHmhA8VgfA^Q!U3q*=ZsHU<%e4bc8eG0*iUnZY@eWTnh8bt1c5c9c#-u1nezvlD zoZZTCPR4;8pa-!a%L!7n)R|X*pQPgP<+eq)ng)udhirU=++j-jrJD#Idm7~rLJ>4F z6RD%zgX5X|JY|jfqOm0~d53SC9#!j^f%VXJLN*F@rk+wxoy>@}@9?Y|kpg1@yGZbq z-)L_(ID*=2Z7y|`UgF&wkxIMKiyAO#>(ws8nLN;BR0^G1z_10GM)i1`Z-0E#Lm2!J z`4Oftm#P*2L*Ld}2U%`^Z9qu9@X=9D8!ud7^RFqevSrUZ=&5(pl3rKt@3}z9jQw=Q z)6NKLUc7eaITuJH$`G@2A=6Q|q2Y!(_e*^8!>ZMAVoMEWvVrG>gRdz;?(wM~uYQP~ ziDeTClpC7&I9te4eS$DBH;gfAogbi{I7AC|J{J9MvSjUdgw?)~#(QL1(`zAk`XULl z|0mUJCo`rty!|2y9JGmKL_c0acXJx=jh3gw;zShyg zpy?tAa4C!@&BXFQ?Ues9FP+7Rw+yf{{^;3*OaAuMkIZgG{^rRhZT`FeW~XKgjx(== zD?Hb?1_KnRJz1sSPvg8+&9muNUzZk{bFQc8NWpvs>OhMF zCovE{SrakpbNklbZ=i@S7`5t$eV*}Wto5OSBkqt_qG7J`D30H4f2CEpGJg++K;Q2s z*N)`|8Qw7<0_YXURl9A)a+$}SWT4gz3HG_Vv^Xq&UZshvfg}5yBk2(jF&~)VT*oJk zurzMN|C*P7fnGx3k17dg7VY zTw78XVqXySkiR}}eUm4nSdtosbS;dcXER*wETU(?yDL0O!Nae>WfWhp-WJ9oP{hd{MZx69l1bvT!hmQ3aoW`EQtA|=D1^ew2^u!! z?`sOBSjWDSRO8}<4U(0${h1+?9Y3n)w{#ADQmRz7gqTJfKly(#G`GAVjWzLlEF z;#~uJSN;JT0hx!Hi1|zHv>p&rNDlU@n)H)JE}gK&L87&z#1I7=AA+A}WFnZS8rLd- zhT%mnG}G^HQ-gU-B`?TWqjid$Iwh7+INUV_Om;FgWy{NEi&D40R}s->fi?9XV@1Y$l&X$D zZ3f0@P*Q-)Z2q`CdJZ3`6U>IQ5DY>te{ci$8iTS+z&wTrY7}O=9!-Htut37&ly1c% zdi5`~ou$GqCOohn-@5{{WGf_iE+jTl@Mt`f$`>Zyj~uIwcZ$HXFXp475^)Qp{IvyH zw;HCHgF+7MvwqFGq!pr3gX3?42-~o4HFrcE5ntF5j^eBck5~4+m|?ByNM`%q%@|}2 zsWs+L(GToUl^}NSJ9I~lHSsFJ2u-gd5fFqD6QpqMq9#K5!D#C|URD1(*_iceZVYTT zYQ28h+~{A!%c^Jp!dnO5qJk0eD73p4Sk%h{UaZ6f+@;!XHmAmhD!BRrWD5O^v@HOd zi@to~LJVJ#=ZVzu&?6MD6uvSg%5gA61tl^U{c|Zn%H=+KNDt7giOsxvaAfuhSpJA; zQ;{{$HNH+sJ5e$(@o3-06XLB<+<*@>RE_OZ>-uc3&3UO7W!ui@A9woM^h0uB1@}}1 zveIJpUXHCwt6kOI{+ul!kCC&wbn4+l!BZ&*0`bz5O_9=W?Y4Qr1=-oxUDlZPv$@k+ z&Mcqrt-BISbAg93*KuCUp{IUM66?=n9^ebw$<7cN9rPB~)WxT0EqX&Erx}ikbH3Vl zr+xSnpdfy9E5WWG57fv^nrq1sKz*4EmlfifC5SRk$-4%MV zJB6n2ik)|^9&&FBY+uGuS@aw|Zgzy)ac`MAn(SFK%LMG1UF^Z7r>_^)TIzXC+Au6F zn_=gCSM^s~=Pvy|%?MQFpT&8ly+4J9vH`Sk&2y+Y&2EzL$c|}Fg770e_#m{7l$O6> zzu0+*DkO@}sAYB}Q)*RsrD^AeFHYr6T?%G?$Qyt`1tmK17)V>n69X)H#k;x|31MF6 zX2?!dLdm7ykYu}^+UGBhPyazg1pX@P+XUVggAl;2wIVHpdXSgGkQ7g$gGKES;*c4;2JXTEuNFdGE>S*Gm;MF_Dpf&@#bqMM z{|6$+*nBpd#k(hdNjq2oXLpD*^7u{lTBZpLbU(yk3UzG z->%rEsx|rFyRQ$?PiFZ$VoFEx-iX?idklJ&Dy=zhPF6 zjo$e=@0K>HVCS?|AwQXEE=0$f$C)AmQ4F{S$Q>p*iLz9xz7naes?m?4|KlU^r+ci)BQb zX+rl4z8AKU4wD@C>j!qDw|y!)X1H$qM{nYWOOG-sG%?Q4m6X;1)c1!OOhCXR6iZY> zy7{18v=Y1fxyJ819W5Ur4f?ks*n)_jhqIRH!=%~!VUojCs!9j~k9}`s=L<7bu##b2 zvvzfuOo~N)=~0Wk)v*VmeglH4NGUL#!8HUG_i#fY+(}nfeYuWo;yR8{?9+8eP|L zu-+AcC&s(^>$?7?qMYTKpC^y<+}C=U`ud>?u*``$OO|NPWg&#aC7kd);J42Y5D2ka zyMuB*TD zr7r$%027;fWb{s4W`$xwiS+Db`c!MGHX_)Rzq^QWg~Hm?nws&^Uel&xWKF!&4@cMw=I zsIkRP(jeRAVSJjTtzmtF@b8fcyLssFBykn*CZ^VI#@K<J6-kJ-Sn(}M)7Pe@8u^TTqYwPJ^CnlP}_#2;3!S8{3! z`ohkB>t*zN(V8}Bo??Tb}6d!PojF{$> z@Ur@qg{#Uw51J1T`rGRG&bI%c+2duMPAa8t#3QSrK{fY}Ek8bNZ&?3hFc))AC&ox;-KI+npOK!E$l=a}q#6-fCqwOn zf~8^rmf<3RQ2evwXQw5|7s-?HNc2mm)f%VMufOPRDhiFShF^s}(6YR|RDz@ml!#!a zS;YJ1;{Jwm@mNNsKMine#g&gK`VmKFwY+tc;G7Lb%TEx)5&E5Ke3y^wW4q+k9We(l zl#AjfBSq%CwH%VZrZU&$kt;GWgSBLmj3ZSA#vFKBC7Ksv+`^LM>t9Q{rFqdl?CsRT zmXa9LrM&-3a`b4Grocxm97{9gKGgM%yqiMCW#ZQ%~n7#Wxj{ME779 zzb|XpS%BF+s*J`~Y%W1q9v!YGn^J)A?dJ8fUGP(usXKeBW=MS>-JVB?M<|sYA23wi znQ>Fh!FcCaK0#5;?sr}gO@|OvXXJ#t+YSPB{37wOQ>Ul;(K91kg_v$-FJ9&q8OGZ- z#FsZyAQ}MhG9D$D>bG(L=8TBVTp;X4#QXDNb))?nx{<=HKOP3HKaLB{ zwmZ-7b23+^1Pf%}iJxmHpc}9qoqF`9STD0XY+Fh1Xq@XS{s2_ttL7LG1fUT3MS6OG zORz%lqk;ZqsilSaPcPRPTDTE!D%SVd=n!)KL;-|M&n~Pj*kSG7b-KU`*NB{sPksj) z0?^OzG$jK(Q!{y5&6hF&x9Z-M)IFf+nf`LTWWCNj(f~cbUXX2D|3YVesQ-gVg*eh& zz?wI^xRuKYjQ1nk0}M;E))@6xLAs$I-6%2?#tZghQs?5azpIY0EYI}Rn59g;%I59Z z3kM_g4&;Md#0Q@SB=QZ?lWuAWkSjkCYa8~bY!0AYk%#dJOS%56AgwgkU|viw_Vn>Q zv%E64=Pz5fLF?cVICIIq-LAEcdHTsrzT?$PpN3_cqVLGSIk^gHrIRmF1S5uI&gV~= z=C+Zk;Q~9v1F%b}WY9D?HvN5TfjF{>&Y_%{-fSvhs$I72Jc5O~FXzvaX4LWtd9a3f zvKP;jd*?-S)C2+inUZ;a*4TM1#@2)IdPpmKhtr+Z4l+cyJEynxRu z$aQecCG_GjTxN=;&BOU(ob@tJB*@}h84+L=CH?Tw+d*lag=p;w!IL8b_#QwRVr4Db5R4K;sq)7JprgdkBvRcI>lcbnQs4W z@PIEMC&zUrzLuDPn;0l@PiNJW@2;Hn8JXKRKV{sWdmKw7AQMc5mtpaKNh==$RXb2q zO1GM(J#i0&R|TxG`sW1vm11==-{r>cZTrrdPUqs%K58q`32&MzntL)AOn4_n)}#NB zgzW_IU!ao{^q*YWzObgia7%oZO+aKK=I)g6hH_2{Ihd5$Gno$qDcSF3Lqnq~P17dv znhsgu1LDb;O(4?pwwCl+eZvc{!U}j@&G;ek8EL{M0Jc>+6X0(i7z&B>m1bi&pFj%> z%0;a|&z?-?LSv(r{vs&E^{uO962*d%EfADq85PiAJtYb_K$7xHUrubsNSpgFz6V!* z4`&~nb+2BohxFkga@(s`L918hXGiav4+szn>BAHNI&j#oP;p7GQRFd0D_)w%N|VRq z^dGMP0l-R1MlN@HW8vZOKnG;ig87F#vf<=Gsjq0$*ww*lpTB3i|7hq3OvO$X2toZR z_QrBl66)pAwc;3Jz94iUSVFyTjZ5*zoTde;ZIf@b2d@nL{;iUS(7_LcvLM(X`^t5n zbNy6h=|4_wXJ^v3*=fb#nvHtDvs088Cu}LSc)w@$S-V|r*tqJVM&c}k$+(;SaCboC zxdHN$bS}{q487VeKmE5hFDZms`PU{HO5v|DdP;Q+v81^BwQ_FN**fdB zo--dS_!CfBTrSc$3!#K*pZ*+NNpsKD?Wsp42W13`-T`S(e5&oVofsH@%jiLte?OXP zM_c97b_6@c=2==smZ&QTK?Yp(xMqOryL zYg8bMl8z#aK-V@DJyIEhd1fI2SS;E?RcrU()vP8T4eg-hxM9RVFB}nQY%pxyau1D$v*IVn&ab)<1mdneiZXRQE%y4GkqO!F8A+@B`uFN|CT(p-lBwY-&*(H zmp>DXbCpY^1KNmAL01ShhQ!(6it9k z+%^?C709U0E8E#Jvt?Ffj%!_%zoP>lIb=@>tueHLP>pl_sW*t~q2Wp&5K$qLJSAN} z-(YI>o;wKv`*Tn!{_HP9=)Hr#Vh&o+qv9w7L@V$>m<6ZerQX`5^3*}ry(Sj)$0k1O z8JG5a!e2sV)R$hBr&M_1qSY*P|K!CdJsHAzZ|iZC_mb)d=FYGNj(Wgdu4%O_sp*jn zI)G@NARtAwiJ;BDw0 zSh~tco5aXQ-Ze5M$f09sYcPr&nBTsSUwt&A>^)%yX?kaJatFxkxLIM#tH9SZpu^Kl zdCZsj$Gy{{D+#Q9AGIdM;|g%qD#QG8ATM~Zp~1zQ%OGG$-w~bo{bXRe1lv;#MY3et zkrs3Tsn=VmG{WjeAN(BIH-UGGIhBl|&}WK8K!rnJX$DE1_o#i0Nn$WD>i&~KjK z9p@}qD?{D4h>9X`piav6%bT_y`6kVCq78Z=rJdNuzQw9)kk*qy&M_BTnH*M__k_5^5|iDS5}s< z^54eIscx@>&o$V+enEYi4dIWu#;g69ykYG99%Efi8U^^?VbiG zz;^_b=cm5n)EuahE8JaikG3KvBFE#d%r<$V=aklL8Z#MiS#-r&uRP`u}SIQ zv`9_e7fgkfo=;A{3B~?7>u|p9f%KZbKG^oGeAYPR?;`e{_f#tyH2(G1W^D!}slFNX z;2blpH5T-Q2fZ>Frqbxf3cHO}3Gkly*+Yrb4nPS3k?yS5vksXc48HT<=YVw-0G+Qk zTe~vr9tXKUrcgk;RV+&y8BZiLJ4Z5w6FWoGiGT`}^>3(|;2@nDt~1j-dQTF_$8S@H zf=cs8fhGbAeggkz=qjuR=Uu`jFO}Ur@nj?V%i|2c4;k^N_0@{0T@1-L{+fn&=$kg& z;#9^^=if!kI9?bT?UUeY?;O=D37h3rIaJp7t;0OQ|WpJ`ds5( zRky5a)L@(GPSbs84<;CQ`R8+%P#bSsnpeHmPaE;Bz5H|0pDLbnYZddYZfHN#JS;qx z>BTerL}OIeO*U(dM4Y#h0-NxoRhNQrK3+FDW$t0sAbxw)zfvHB&AKKj<7Vo{{jLC+P_O zLs)&r-y87X5L^FKBv)m_u?KSX9=0^Y>Pz#2B1qdcI=9sm2#n0$ks`u=_Yh-QL_Lga zcXJbol`qM*iZZSRlu82atSY6?7VZt%A(zRO(q08jEO~CpCyI+)tREODaWp(0VsZ66 z+9UZ&aKii=$zT{ufpZRZ?C$wimJ$y4gzHEQIu!Vr&%c9``A#R2P|D%iwK1++Y`kBl zu_GT#0tgF?4EaRbhWj_WKZ?8SiNEHDB_xkQpQ*b=h)B%E1zA5X!QV>Np8aT8=TTC* zV&>{}#gt4G$GN;kS2QT&75wGdP-1UDa$p>uy>C(yILVxr7vv`}EzRzHt>eo(U~bc; z=df&X52j!RlwZl~h^Np>HhR!(5_bjfA63X~={S^NunkI-dx4h>8KZr!-Y8UyEnq+%>^g$>Ezd2A7LBGNu z{fl|I0K&_r3BW#Fmu9X7oAKZ<8iA;PaE#pyF5v-9g@Ld00(UGC)B~ykRKxuCih>hS zfQUn}Pqc5L88oD%Z~nc5ZHZ`G)Gc+*{P>X%5DD2C$S|Vcx`IJCdVTlQHCdxPpJHTk zNtKsl;mI#L7+09~sxC8Y3EC4{*F5Y<2)->p!=W58kfQ)h<6I^%^j+>%nl^xavdMu`-_-Z*&1mNQ& zFLiS$5fip+hB6*B`hcDrw^`fClVC(YBw}VenrgeH;~TW?`cf$Ywc_xZnVF_wr=QSg zpoi1Mwo@I-Rj&6cBH@^cUh@ePH0(ZOk$AWwfAQb@>fnv(rP z`L^VMzvnzM$;Xqc3Zp|Lm2R@q&lfrcF@TZ#4ZG#di!q{Rj&rUn_U%&`7kM7&Br`(` zk7l?q2x`lsNsiN~V~rWpGY$=C&B+m8VTRo_HwN{uyb~t)rm`AanLS|s>}{6r30;kG z+dSQIsc9)S{~%GBWmL_LMGYnoP{y+V)d#GRvj%A8^R~Tm52HJ?CFG!eAN94X`5u5~ zmzL|4RxLZ1+jeBq-Wkw)X8U9#Da2PEzI)B~q0B<tMb|yO4{l1r)2JILb2VE;*2t!f4jR%;u z&=%>tKB_qshp9Bc2V%FTX2eE@5n5|wOHvtBQ$jyLCB56W^H9hwE5w|6yn zto?3o$XR6g^ib|*(9J&SSQ*Y#*-DgKgx*$Tm)IMz7oZ%qTI^Xc^z0%N%=OlpT*xQK!?MgE3 zxdj!<;JPP=ehjl!r?x6{>I4b>I0-oPsV(bjHF3CH5^mp1CIbtcgOcV3w6wMlG`MTl zPqRtJ8+P-8Ydg;GF4A1KxyKul2PvU!njwneo^Jvu-Q83I*5A+3#n&4e=OyJ_D|5h@QSUx8aY+S7 zrywR9b^S;j@L~4D$n(9T?;;$JBR56q#R-doJpX>Oo=t9sjIz-; z=+49Ck@G;4yt?Q4Q75+?P)Tz@INLa{c^SsxgG|ri}Ha*|s(81M!eo8ZAK=g-#F>(zyRT11aL%uAGE z?fnfSb$GFD-x+4jC-ES3ZIY(z&n7X={{t}pPdWTwMDsgM@{9BP9qHobPA9_cE6unL z*VY^nkS5p>Y9U$zvIA8KF}iu$S63n(G(wIEQ9ISaRiC8Wr#A-swjS*Iydk+`GClAf z6)Qh0LUKZfL2%N1!IwnSprOxJByJDYKREC|w!;>C2P+~hyR{BB^H&)D9qIK#u|*R2 z$mDZHTvNz$OE0JT%hY@~#63Pc$vgZp%fXO^LYc5+^J!@IGf?%h6jq5?F+jc^j5C4I zqaXJvk})+mYmB&yo=;G;D?u2Fo5k%O@ZYGf9a3VA#L@f59Ptn0+bWvaT@~kv{n^V_ zRVDs*m4hI9`N=!|{^lPOW~uUOE{1KI zCFJ1Ptb;rIAQ@zxP{aHYz9(qwdn?GiZ0=Qum zN2_}$0|bZ2XmiJ(2)RCLGF;hBx@r5@E?vF~SG?~E*R9qVrYk>DC%6~t*lRsK0Pbe2 z0X5V}pvngz5l6mTxGgBwFJYHh1qr^b=D+P|$Fo#;iJ;x-YDrborJqUDn>tt4Og4%b z5louN<<+^se=ovlIL)1NzGfD>X>w@Aq-)uA+0d1@^IFR|Sw@W|&x?4O8T%D0 zWcyhnGG#}rSH-l|qur(B`^wm8j;mWhZ*#NiCaG*6q`2?T3gp(jX z))-csSijF4jl^^KgLc>M47$gg?Kij9RuiohFNi>qJXYN{8+@gHpb|{Byxkc;XS%-5 z+C|$W+T1vx)wDI>^~fqWiao5y%5po7g^EyD*ktqGLoODg!iF)st*(L{PEs$aa>F^nN=db06(_vx@JCk7Z%EmO@bh6_Lf} z-X^P$*IvvL&m!Vsdvl8#H^{p}(+(fqk-8wnC#Plvdc6p3cZZRk3t82;)@+BJtvA)z zS#14{)?%RmdP2Hcxx=T(@$!EnfGH%}#Mj=!b!>I0I`n!CWfyXl*C_gZI}xoIx`kOR zva&18uCc3M`N_81iLdbDN%~i>`g+uo_zXakekXc6AZvfOXMXLBSw`N(q4jSk%@g9r zn$|y4o!c1&s>!`Wu~3W}zr(_$NKhJjNuh?Azpw~y)ErbKig>gs?73RBM*g}Hmr&=! zmQb_B@>X~;KYKvEZO%SrSY(wu`MM^#*&}_G_ zfK2TGM>kgneN82PrX{?l_It^#-|Qjw3L_2!@xQbi5d~Z5=k5fZmnCx|>kvl)^r9J7 zZF?PRQg%Zmzh+Kt9uxZ-6`!rn!cn)#l%GlbF5+>(4?Lh+I%9oU9CRiShac$K;Xud% ztukJ5AsShCe&lHUr(JP}L}$pz;ftSpTG$@^)YQoU?hs#fPGON87ry-6tiS1KitcEk z|LD46yx=DG?2AZDNH7{3|L=$?lR-+1Wr)AwofqSysq(m!D1J#zap%`nl`hsI^w;`u zfw|+QEe@|TT=qi9Z##qzW_Sh9ukOdxydA@-n(;gXv$f&&L+5SoI=@4L%|OXtehdP@ zq3L^J%@c!rXnfJGi)n@;nzC=B`keuy3l2C|NPltAWpC2O zQl0z+iP}Uh9bj8LI8>kx$BwC`K7qY(%A19((`({{7d3@i7Bhv;^`bft{9Bl0JGY1w zR!WRrw{bCZy-x2W18A>NYZt$)q6Q8aLKaxW+xVIW>C<`||H$jRn$S7|p}ogU&;LuS z$A1+?R`QAS!>+Tj+JoDeE+$gRy(gFFZAks1@Qq!x+fmU9Yc}Y6nT?UK0F0PK1`zyh zsDH5JkS2Rm2IsuE3VOtb6zo5u<@_H4#1t@50bq7V4aBQaIFafy|l4 zC5jpBT!(Ri1ARv6iEVXtG#M&l#5889*#D&vK9PmyBS0mr8UW8Lpdo^y8#A*6Yq`+IhdQ z@4+q&9Hmxsl!h}2#z9#?aTx(`D?WhU&i{DF$_eplJ@bYNk zGMgY~qF&jQ2yUT~D<6b)?!ywH^Zj51b-eteX=&gsNxt3T%Nr(KU@g!QC@@z z!kVO?VQ+)@T~>X`uZ5c}wjA`>64MSsk6 zV1UFUbMcMac7r+y;D87@J7nKXRdF+lSZFi`(^fRAJ&H`M{P^bWIn&{!xDSYIRDDp~ zxkRLiwKQ@R7|3w#P+GRzEtP+x^?5+1j2-ey$&7Drf`8101OTx7d+l+t6&8Pu1(47O z(>nR3yf{uxS8LYA^?y@s{{1ouzRF(2mup`So)2tW1G+6;hSfC)0kihTr6zMT!ISev z=8%dU$)Fw&V9|*+s&2Ee+a_-7E$|wrzyacH-9_L}tl1ar^-Q-U0N9%fqPPRoOR^WA zf%$}4o5LvKE;p(wd+_JdCm<0bbp4+vpG48YrcR^Bh)VSFu_|f@F^pQ6p;`QVZ|*tX z_uTs=CrD-3D9Fr|N#?#qUk3r$*}2qx!k-B33O#8S5vK7$n)!?7$N&^3AsP9X1xN!IaMc9rm1s?d{XgHHf3*$}E%}@}wSxgl$E* zScMia$$Hh9c6nC9#{FRTi1E;!F8g?y)5xaN?>hj}P?u{e_F)jPX^{_0kA5?&Ht=GB zdHTR`CfZa=1ex1@tcEGMyRbu?jLRZ#yU$^6m|WypmB@1ue$LSzON$!NWSqu1v}kst zaZ@bxHXHSiH~JDY7jo@3&vE;?>)LKH^u+wmzWr!235$&U9|cS>8k_YmCvwh$=ssCG z-&(;QIq5{Qn43jIzl~61W#(Z`^L(^9-3W7;Y?8atU?tYA|H%moM78x-#9qZplZAGy z&}Hug@jlA3ZvQM0S;tj1|dYb9y2G6<8c#$xeC4mo6Sr}znPa{u2wQ1T$o*L1h1;4p zN&spF%P|E1c42;zujiM)Np5zJbH7zf@9}?{X81isXIa7tH zt7>P&X@F9Bsn5915|b--%QQx{9x&6N84-Qq($c^Y(^Smj#4OwN)w>1~2Sk>QVUQ?Y_5O=69f8 zy8rLav;{jPNK5wIHjQSZxrC-LJaz3)#AOJJ-P+uhhs#q*4H5v_+3K~|eIUfqH0^9` zWayYCd-<9_tZM>oZjcmV(TV!#(;P8HKk(@7fmp+?g@@h0l{lp|3b8ZAQf_)QMVt=2 zolgAPcKuaPVmJejb07GN!l%JrT8_j}t~W)dx)31w@AQ~FmZt}~Z}mH-?*5I{2j{EhQTDt3d}uUIyVnW7*ybUJ;?9}Z?%od>W0D}9%T$`;uf?5dLI z;EUo22y>d%==pIi-|dySZwNn^Z|qS8(E2+}%uWK>%e3NS4VDI!HYqP%o<#in#N=_Q z+SMdM`(cl(yL&*q`4)CUwb4(hMv!fbazS3AATNl(K4JGVS+d{DELmHQEwZa)-^7NcTU+tfEno=^sJ%D3}r2tkAt`V zj!X0sUO|j#XSe$T8^QG({&pbUtaOdQ`C{vjW}g;o=?@9r@#xeIWU&#-w@&mirZZYD zY5NV!&0IPAFX_K*xB430|HqR9WMP~PPT=S>wFulyyZhDfwJ=HG7cKE@E4BewgRWp$C zQiZS~icD#-R=9R;t2TlWO7^)o@3d5V1X6253}6Lfg(oX8vQ7hbk)l)D=lTo>-y;8v zep+v;(3$v~(Cnltx_D+h$Ml`QS&ZS5Zap7nF>*kVz3A`Im_u)Fk3@GrMosny73S?s zmBBr0sV$~h-ZJUT*NAo5Vbk{hdX7cG!MDk$uOvtWm?(@ai4Kc4Er!W@*5F3R_>oo2{`yJ=J209$t9T6Vg3CLF-*mw(|X6YY*ZCvH*1Nwc}5XxjP+4c1jA{7)9R zOB62?%F}1f{`nqUV{dxI_*B|1?C}{`LbND^i(x<+)KS6_-@Hbl2)QCClnQLh$&}=z zQ%W@(OZky4{v2l^PENAE#X3EHi-uuOXlr_6M)09bd-LNgM%_1|87A{N@U&_ZgjE=3 zeEC3t|7=&hYV!p>JEQ^8>k&mLXGIk~O5a=_InFcEqyamcLgn3qI2z0`7uAh|G~*5| z?(NT*GaT-=S#GURig)$0zXwFD>nWq<<)Vu}%&LL@DP@J!ZxEV4rfPT-7Barc<*HzMSgK~2{xV`VUG?nn11?f)vs?Xx?M5(IZo1Kc=!2BDVo4O|$^`QF! zbo#aji77#ymoc~J@Eb#azdoSw&PGB-;|35#%nfL4o+^r|%ouRBDO-ea!qpeo zp`P5Q>0XuFTRi8f%gD)WMEJ8{%dgLJWoGoME6Q&VFbKlD3#}}g2eP8&LYM7+M5mU# z_@|S<+7FpvyHEEY6yYMUw(o&Z%%U$B zr$%9sJ0mO_^s zn`dXjd6U|^+Gm4dk}EWQBrS8T9vda{rmE%M{GMwmreW1Mp8ON34#?__BhHww6fwk0 zT|w@-)hAbP>$qP2v%3s>K?;pFd2API$d^8R`|f6E_R6vS=D5-KWFRKhSnoC zfjG^n>SU7ckRb7S0%3>(%tr zg9=ySFEC`PfJv;Wk$p**>K;;5dY)S6))Gc+l@*X|v}xK@3fVh|(EDY?;CL;?!F*ET zBu&evLSgKGvAOR@M;aOhtvYSh#Qef`o;h!o8Pj_3t@n?`D3k{Y~cujepPitv_fDujWKHulFil=hp~( zEh_h(e0Z%GT94{#u#Fn&%x01FOs(kEX-Iy$eK(TVkFT&}XOi%fmI0d7RyZwl-vo`> zl`o9Q)w^1DsI7rwGjtccj=G_8BppeeMilya_!f~^XN%;l&*rqUnkbhH2Kd9;D7Z-; zN`5IbwO!YGLQ5o13Zx5EYYqAlldpMexTqkDwB0}Z`Z4gSY(JSxP1AYly+AZ5F8-CK zzf7`>{=>=6+;;#3uxoX;y~w(9O~oIL!&Ml2yued9$@113lw}XV@E9MQ2ir$Cck@BbPmHT~+uY7>R$x~q z?(P6Ns~jBQeI&p9bhzOadx(+=XWCyWaSbh9t|GXbA5An|*U4F(f zN!lo<*vAyUIePS#?R9tyQ#PhU19?E##lL}A#>K`sdd*MYHDrSKfW zYlOeJ>g5Ask>}$i)}9vS9OR5RtB>!gt#dpY=^{&&poygV6DkT-l^Q;)5w$mLY4`-Y z$m{|5?ibzss<~UX38H-L{I!sB)~9QI1CLlMtbozc4S@;ye7^Ia8e$^&0}?Xp{OS(` zgo_fZvZsX;%Dg4dhV1!Y+4vT(3*(njys`~u_@}uD1c3ZX>$%bU%*P`Hj}Gup7e4D; zB+8ra9PZRI)SPR=r_+ISD@cw-?hih-AMYfeI`ix)Xy=EuoHMmfcX-M1!w_i$&GjN2o{w4OT-JWMdPmB}a;XWOtcneBTeE2QQ5@m;8OdAqJY(k7EC>Zs zlhj+DJi(sk)~!{hJfnI@~WMU&h&LK=iBmCwrpIv5Fy-KCJJmnQfRd0k1I+6%i(f?dzbYD~;Hw^>8O|xE zuJha)ld&#d+wX_4lRXbY<=t+$H8SYC`G-`?8aS~8-S(rAx1U7suK)a}7vQcKw%1S7 zA4vTmys+VqISOl9VM93SOtB7Yvr(r^ZYiTb16{-$QhLL3w|KX(ztXyJyMer3M+@d% ze-J4-PptM^cDd4Gy^eJnAkZNLf-7qM5B6`+Znt?zm%Brj+bQ$_gXm%?S~bu0Ztd>0 zd9g(HrquUr)JYE;>PvrNr=hvFE99Qix~l&1sE%m;f^ zK)FfE+bcNAY*BAeU;yMJ%7M(ob364 zGr+5edtHg=U|Y6N8~nDCiQubWBsk zS_?Q#zm_EdCil%9bUo z5uSxkQ*7ULQS?FS%bYm}9czy_&JdPs<>R$47p%BG!({ShYn&;I*a$05+AvpLd;v$* zYm|*@jiyfRsbi| z!I2YGtXLp1IqrY_C2YtG%^sxa?cqI$EA(1H@;fKp8;F`OW#O_8e6zbp3@m^>29+Qy zBrRau)B8LA%7`)-zWx%9wLusL21sC*dLW;j>(K`WY_#^2nFhMnZdt(U1mc1haHq{w z&}JM_$u3>trgMlg<>K4QS0wd(AohsyOIJMJ1M&2&!&mc!{j;xUN9!iBy~rcD0))XD zSElg3T=0B5fNvZC+~T9Op{Fg+Kse(uWKxp6_$cLw-V=g7HF3gQxv&Shy(8YzFTiF> zxZIjbpSp&+$?yn=R8k-o$`M5SL*REzMnBgkCVOj;JA?e2*bGy|rwNeoDP}Cq;G|U# zHtx&eFEHG?ns%A!XWxh&6O=nxBh$X>CbG;TYf?!+u0!1k#dWbNb}YJe(*v=Wp)Nn1 zHOYIt!@5bxaXpXTlsuE(N(Y|EPAjVNaEG#R<#*v8$~8Y;*ve;?Ur^$FB&*Ci3QblQ zv{Cr`A-hmwl`Ro|tCQlWSTQ*Cit8z9kUo5%llCh>#pe%t7*FXhxiZrc9b8-8xw1$x zOzK-*$o_0p4Ze^xGEbpbFN5_G(Vf2$ZIEsS=d&%HAnhQXw8Lkeyu@dYV#w{;e<{^@*&I}8m zUOm_vlPouO*80Uz|*lcJEF5@R+d?ns=C>M20_H|s{*qZzZP`+xw&7PT}|zo z(=PQBov+%^C!1j9TjXiUb&jj1DF{t>lI4ROoPal`0FvlXjtLskJ;EUP*~!(xj8b3Z zW(W)U4Srs1;C9~3+#swhcYxm)_)X$K)OE>mH*44B?J=TW^~3_Xdkf^AGuulc7hE;6 zyQXvRR-+IZMS?Dh&ZCdnpR@jveT;9tGbli=yip_Mb*bwtZ~1y>BXS#cNgDlTk;wAU zY}5l$Iez8eO)A*%i}0Q=EFO;ku|qp=;cJ8MhmJfKsl_2REA!IyDHDn%^|@CZD;2t` z8(-1x(<0dZRjPlzRGXkHgYh*3toq!uX|u(zFAojjVED6H3D|5%gTZ^8(+dB$(9E04 z+bE1Beos81eK?5d5ON|~Dt**=Z7sdq{h=f14fPd2D#+TED%~%0{Zg7@wp*Dz^v5F+ z(FUoI0KMxnifpz8p7w+b9}S|Tao9exG4wL)&aIs{WBsi1Or$^nmnGoOd$hjni9RBR zR|=_@k`lJ}z4mQbGaC_;vy-MhXI4&b{W2}m9P^8%IrO^gHW`hZccT@w->-_QzHQWK z*62K~_5HIXUJY7eg#P{eH`v&c(a~hde9;jJI~zp@bzyK(=b@XVh=Bn;%=@n6demG9 zs>>);>qn<}pU}c_3j=^2kWq&yIdbFVP=K%<3#)MyPz0sW5as>Z7 zAI1ExT~c4$&7_9}j%bx|LO`m%_5%t-*~0UwABeAO?IgH0(udEjscfxYSmM>=7D-zWdgWe3jxjemw@E$+_<{zW zqnzBQ_J&7_)Lx)f3`^3#%tyCX>e+5_F^yR8)~6V0=6y5&JM5(K<4W#hPSu+iEXr2e zDcA0k1lrTH&6Nf(P0q6#%{S}6f?zSq59UVQh$%5W|CBf$!u#vd?U!O-b}1iZ z(v7mOeMh#6%Wq~6S@q0dYm!O3#sq;9g(-e`k@ji!-jC_w+?jUWCy#4b{#Idson_AO zs_p#BBFkx9S0XbZvB^WYTu(r^MK5KoczJ}+{`6C6*Y|PsgP)hv$?8IX4+`+vH}QvH z7pt!A9_w{%8`ZntF)pt^D#e@-TC@(QTB+dB=NV1RLjZ;Qeye4)9g26pRQE6FHdb1^ zdL8qiH}T+E?0fjMK{ht~0Kqz*-alOZ(`5^-xs}hJ{pedghM*0TpuaRSOU(K%Xu31P zwJG8E+s{ZR5sNM6S4N3PSslNW&;8eJ49@~n?_W4Up$n_+MoE>|iB|-x{C|=^k2f4l z!rVSNkQN2mbxz=rv)FTYR|ju_J_t3MVd~kkZqBM@1oR?h|62w9-;!!x2synuchYt_ zNRtsqtCEJKoFCbmpv7hLiG*2nRvN$bq&{jC%W3zpMjk z*Ktl$fT9oEpdB__bZG&wFVp>8IE2cd&QJQ~3j_q<25-A3J$51pq$9!yoSomw6RHya zpnkEh=0gFB0C|d$9tUIlb0lmsD$7ww5zI?Qn(!vYV^iZr>a5E2bi+BM=PiPNAJA;6 z8uaxv_jz(Ry^XE-o}OUPHXP<{ch4TH=z^qtli|+q$pubKLCWobav^QLt9qJ@hEBNNLGH6eIN^ph zPwfeznlo0q`LrA>*sP`>VG?{<^Az$6mPK#Khe*B=dsVRVH_NVH;t^3y5zbRJ=>t#T zxPtB%%OFee7-&Sxs=7OEeLJ8CZAt8BBrLfZ%4OqxJPFLI3B?c3G>IJ^}CO@gb?MM$My>| z=#ojAa^S-IJm3bIF={%<=~c^_qWae|@ww7G7B>EWK{CNtQqp4AL#>wuxOgW6LnUG# z9{Zv6BocO+kAvBNVsVtQzxUZ%DPzl!?OgUX)4Ah`ZT8ZTTG7*qp^R~nrWE60)slK% zUu#m&nDKUiCcj`kG)8=*TPogU^A!o1dCwB55+N25HJ>wmT#oN2mD*q)jld*$0^F~A zVW$}WpeZ2uRk|N&Kar~ORN9KPMthJxy@}0{wmI?L6S6M`C<)x7kd6HHyhIZS5nqA| z1t@+-0kB*-fGdYRqdN*CP#fW2#LI-2o0_lp6|;0!}tQ`QG>1)XJnGek|DF zE;|gceUSERLj2n>2txlPZ7NwgPjQjgnn;(3PE}V4^nKB3ORh=TP2C03)CO%EU;%GN zlyX6yd*kHJ1B!fkf+>$w)r}?W8CvJ)H9jA%(*Hnj&QwD<{8xfx`kER1En&dKzZ5n!d)!6Jsxi&l{s$!Ez+ULik~!S$dSF+ z^aQ`Z^40%IZ+^EUib#{an?a9OcTT#RwpWWcE#?N^24h;guC-C@weaygNru;Wu#J)9 zuQ0@Jb;s|{i+djI( zQ!@RiBM(wLdeEL{mfrkj)rNZ*qnd+31P+nLhSyF3t`z<^^3}4m2|mdNYg1(<%V%iX z`csgc%IGFnrt1S{_-x(bHP_TPnn&nuj}t7_F+e)iyu`~myp2| zRV!)Mm$h)IqExz*d)x&(wlrEOjZ}A#Zsx&$Fg76hfg8hj4(kdv`V$KW0;WT3o&CC~ zINZrUNu?b=<5C#l=~xN2;nC%E*0dS0fd8xpm`orlNG|5KtvT@s%! z{rx*86@iO~Ybb`N#p6j!kRpC=g8>}nht>pYEebpf%zW>UmSaD$#fT{n<|YKq+|)8z zM!|2kHLqLul&Bo`{Y+r^fQhRRl>NhFSKb|5T3&(q+`k?b$~Y%iIu6XaqzH;%8k;5L zLuH{;v&$6&^JZ3IH{Uwk!cxX^cb|XDD{^B-f`zW6Jh(DcVe^-t_B_05d9uVSMK{cv z{c#=~IK8cIA;M!^u$z9B#7YOG-)c^NtV`6?!v`wR2^T`e^b3WSaSkj5Km|Eu?W!}0 zoaJU*k zQ#jO!xxq{dx}5zj>(u^+ZX4h|DH_-35x2Y(NON8|wi!~vDk~=>F{Hs<{Scqlh)gu( zK6<8AYLBVNb0j7*`;0d+ajt0_mu@UmvrhUSl< zdG8IjP~N*^0GQLE&;1|uag}!oEQ@ICD#y_dQ`q>%aZ{s@CA`;IA@dV_)e!83h~*n}7mueQM~bag!!U zL)DPn`ycJ?18DoW98Te5mIpx+9 zIWpx{Q+^bh9apMq&dxi>aDi-l*bpLL+)t4E`q7kgCj8CN7*~o6!9THt4LJ*I{L4SS zScM>z9$%W>{<%M5;hYH&*&h=p;);A|>D~!{fk*(7#;O6K4jfkBmX5_jDNf}v&U&OR z_6XK~rc3EPE<>vtn~iWNN2`X|VfG#`Qr^Q8`>LJDxkQ#STFCf`f(&In25QFos{#BC zJIYj4ysnBa1|Q5o{*t8m-(!3gEFHtzWY*8S*O1sJtV;PGGi!mS*lMiTbHL>mg`6=t z8Gt&cvX-1U$z6%U$5v9qovH@9ZSAQ{5f625^<<56XF%R^QjabZW#gv>@~gX{&IC+V zfUhDFuaQCyEj83~6k)|R(=Epy+G<`_9WL?CV*MzSk6V@*3s$G!)w#TMcLH=DpckeN z&XE{tQ23@T5+9(RkGmF_9C`sENj+cOh{QD zOD#b@xT|yC9mR@w9b_#^F^%dym5~f#aRtaJ{4^sQ#NQIv(}g`gdNL{9LS0*BGuuJp zM+}v-0FPzbJ#K=9FReM4stl#H4XVCPnn8afLjXkl_r;~<3ueueMh*a}R z{N1^l;u1IrN;|8|vOJj4qcPD2{*ALD(VZknch$%%c9|tRdb@kKl@>xG=~T7Z4?3W2 zN`|ShH4vM4x%KybJiA2VCyWP+R_n8p;zV}_~XCi>zD4K`Y@Ns`u(-p2wc?g-O(^= z*!-dweczJUzO46%HthOIzYgpUcLkaIfVG_q=Q5j;X$zOOW8^a?Ndva7Vdk*78-%h8 z-6&bKqG5IlJ;dC7x!Dapa$9VI|2v7#a~YSp)4_DtYc$2s@NPwNAbQWDP-COUsMPe1 zT}y!XIZtEW^*>dV>|J@+yv*eX(ja2Xec9lautd`rKit!k;F{NC)1bqTcc+@$wnaCe zTPz;`^-|0+HO5}TsvF~DUE}&SH?UXm91Q$6I4r|%ihgs32)hXxXqk&OQf=We$gfp8 zh%Zy;T`fovBGDNSGVMF(nD{ zAB>AxC=o&LSx)@20Py^MY>lyYRkyQ#y+I6!Hf_y^U8xCRyg~r(=J5o3Gf3O%`mvy# z;gx4TI$;ml>8j2*ZLT5KCyl((Tw!tn<$>7n9elb04xjH7uz9OGs)U<)3U8dRAm~OB zOeM~1QVm51PjXSazI<961$&be^0&JVIn>E}3l$2Q7X;f+(Fn5;q*fHhvF*>>BJ$cA z-Y0iGqClO|>f+W%)=KhNFw>K=asKrs-tr6*o^z5K1rEWq!yFaQ^b~ta_^)gPpY+c= z!iV4=a~_tZCA5kHjI0az?y0^CAuWi%g<_d`VdL~EY%iv*ll;L?iN6uF*w2tx!lJHn zLgeEl@l`HfciX^i>9t0#>I&wh(eg#UP<=b&)KvL8Vw>_MMf7h*ovY2qB?dvX=h}up zSb3eOPXyal_0>iE)s8Jo*H)XaVd02kT)Axf!Cb0)VPi z9WKjF+$8RRhiCU59zHY^G*C1U{4O}rp>@*1sNDI`BO5&#bwW>ShiLV(Ll{=}f1N6O ze(`d_r-X@@usih1+4yR!sOf6jaOQQ|VOy6#zl_MI0mr7ax74&V<~S+Tk79@ff8hxv zqt8oWr~^~_LF=yxt6L@z(4Q*818Z{Wi&(Vv%3#oVt|3Y?@4>vxmkyqF5!$qh8T~g7`CyuOr z0UHMKSwV)faYq2vPj5v>aJCIjx!!a_lMJYw2M+ zNLA|TRjkuK755k1c>I6*no~U@xE>T0pG;dsSJ%LBrujuTa?h6GN9^g;VDj4cdT znGY-bX(q_LF_+&aA72mJ*N6oq9|8vNq@K*FCZyNUn&}GR zW!k%2Q9o|9Two?VI6ZM?-9Pu@y%)!3aCgbUqq-#VAaa@?d3D6G;XOJLTqOww1IxF2 zA;zJB9WJ-;XkFvEfuL z0})t5v9NuOHKdhh2^Xgh7JXAp$EVv@&t)1z9dKD|C|-T*DqTy6*`ip^Z*{ze4+k`q4*H`?3FOqK?GjuSn>#&ay)FoDPEr zOZsOYH=ANTmD3ynE!pQb&K>Ro*(uh=D07o9k8ee1<38j`BSKzAAO{(&6{+Q8W6=BpRenZgOF485sk*EEYd~My9)*scix>!&}A0V zVXk_`m6VNa)WrDhvw!_Fub=SqiRXefnkV>Q(!qzHksXr|t=C>5hlu3`?=3 z)NIN|t5@oY%ALQ|7G##IP*a-r`xAl(^zMkqJ$K^r?~<@oTELoSl=W_o$7(@meqGCE z7@E5(X>QK_qW87FYx|YxGWd~C$`fqs+F82qzwJm8;W95eI|1k{PcS`({EnnsXO{-G z%dw5YS^lKH%g^Xe&&p}l@E?4g>Mp~p>Q9x3Z?(UWxVkiOWQ970G z&LIX#C8ZmryL*Vu`Ed5yXRovOUoaouS?hV8`@VkHuR#gjz!aDD2gd;ye%o<26HD1Y zf!vkXr-Nm^z{=gLXyZ+qgGMe$-6z&I-f?Ge)K40jmr6tSQXzo|YwyQza#d1&h6D={ zEdpSHI1ytOp0xU1caXu9yVrHr5h!u;pHa~Ea_x*pyfAb;H-5czLc$h#NZex#sn6=k3vVS>}*?cOR)Rz7dx? z`dpalOES^t#2xJ!u})Sr`y?`W`_;#EJSwOA8Zeyv?0qG~*vIhMgRPauRv=*8zslhU zGWJi5uUz{oVWclrS=;2_cB1{(@ZpDWQI#6AXa!qpM$>5v3;Y6FKretPo z4IF>1mGkd_Nh!s~^z{XI`Vzoqk|7 z?3dA;P2$f9y`3G~3?JCC?Hif5iuknVHOTIGKo%&md2z)1%3APme|^~T38x%hmohal zfrK!!n7WcFM4~C|3O<;t41NS5l(5^?`zAdM_lya~(>An|J}%XxbC z?dnFRRQcbyM@OMP|EQof-)93*)#F2_1hv0QN*C@CMf@0Ez1=S zkbzUy#O_zI^!e3~kWY6?CUXw9;I1T2GH3xYFy)qx0gmvznwngOIKDe=wB8!2JBuoH zwDeUpA$9=JVo5MHxa1L)U@Povx)=QrE;50aE8ixjLNC@@;INUfw~8wf3`F&@j#E{r z+`s3|Lr^O}kA&C!Fmtimrz4Wngia2MTcmRfVnmc8N_UUIsKBKvdy!~+u_t@g_Z-Jb zsaKgH_&M~%C#oyrphGE{Uc<(g2Ob7tw>FlIJ`T*E>HqZ0JO|!kGZ5=IdGaQ)%xG~j zWN}|$Gbkfyj>!vf>j6<=**6!K1VU`eDafl#K7zpjMRoF>Hx-M0Y*%Mf*?tvsC;qH= z@SvvU5$)D_YqgQ1fUE@~Kqfe75(OP9FyDoZh5hFdjgKf=leRrP4hMJ$1xKru3T>NH zg)8*D&}k_an}>s$4w5mg(mNHSVS0#D;(nQQ$t*0VVhapG6kKlx3fbuLYRR$QdGH&o zZ4_*)yMh1@J78;t=Z)`6=!>d@hVvXx@JcveFBb^U29eS!3_AzaiU544{jU~*aLd7u z)Nage*f^9qN?VQVEkWaa+l+)%m$fWfAX$?kpCmNY+STA;jN&KVzW#i|=Z^J1UY}ZT z6((*BZ-O9RQ*%QD!kQhQJZ5;gM4<4-hQ;2g*!eRx$tU~3wN%h4>`Zg@>t{9;z@2sx zp3em0@OwJ@R$P+jS&MM1%tz4~z)3WYLFHQ}QvURp6*q(U8Pq*d>mGP%*l=MPu1IJ)+64MI>yVdbpAADzl#h$Sr6E4S3ejaIzsXR4lRzpJxLpC_*arvyek_k z-MfdiA5@&^JF&(q1H5@hX)yxwfOl;E@z|QbElteOt*&wX_weE=^AF$oi696!7#q-L zwl{k<_wEMs`|@jQmW&>fjU6Gzf%X2|{3+1+gFl(+2Y>F7j=8(dX+wR!&s}b0@`*iJ z{FTaw%I?vCT;I+RkJ8E@qmLzMiJwBln%dvS1s6U&7x>e-{aKbjMr{ia7M%|YO-v=RXx%TZ~ ztOl&F72gb!Iy7?noH=qt>)g_7J_~AR7KVj3G{S$#rE|W|`>+~1EXEe^p>@^5<&oGo zl4ZY@gm$c3kSZ8_iZUl{YZN_@l;hH8l>uWZtag^hiN^wHlB)VD4q$DXT0$@_SWfsZ z7T2t8OsaXYtb?TAsOL02g@i{We#lUozJM<#J8kxNzn5D)z>@oXY|Ots6=%=_uIHVV zQSF>!j0obIeI#!gj|c)-(Rk#{SQiisxVC)ZV&MMDaGPmS_#{HSS0MPrso-|JpaH)W zK5V?eXslP2jE@tssdaxZlai>yR_~mm_tz_hRFbzzg|5kL^^|i_$=ER>sDr!sV&2Qh zmSDPK=lgr~acrF2%ur4k_0aw#;V)hdbQ9w^c69!!83QH!G~X^Q227bUQ?puiMoW!` zo-CswML-?4cRJ>#uVdhF7RBi(+mUPHmeRC;-BBH zw7rTBHJp-zY)RUk*wGIzWqC~btx**szq9g-OYDl7Pw+Kxf0GerC8ZO%^-!50DX1&V z&S^!@k4as~dTqz_+t6;4xgq3$2l*GH#Lv7w&r<)7{Uc9!<&$SlJGFA94f#-zP@D~%GCu1<-{BeCu{Zv!5#)Qs~nhE$XSFbK)?{b@}Yv5@D)h% z@$ic#f|@n-FaZh+p}2E@;*|98S)iEeyE0}E!<6v|T)qvcS+d|IqyYaD95aQ@TY&_> zU|9{BRsryY{N=+ zPCFT97IJ;(qme7t^c_?}sv;ohc1={uVV1d}bmLP1zT5fqNY6IJBf0 zAJLb!zUBGp%tc$4D;W0Grhd&a3#*&7_RE#Y6oZ=Ai(coc3f;A!x&+g#v9=FUjlWLY zkgBWal%IQ)u@K)nDG(xKVWcyxIhzP?aH^uJf@61d^ZDQBATP8{Q%A}iTH3;u^xH{* zPXKC9BH{o|2|#CN;E-MGDO;n9z>r%pwx$+u$aKwkMma@f@;UkF(EWN5F-w2m6mYj= zJ@WhL?97vB^#0fj6}s~!wz#Mq`C5oVVB+VJM91uubo*>*=qy~T)-@K6l)?Z*k1ssQ9F>kVp+>PX)J`92HG1uUmN^%O*y=G zeT9&=fu{~kzv9Pq3qqyx-GD({Arqo^rIR0U&>NPk*O@^&LA$WPrBC{3N_GA_aQfXg zl}U}Z%end|yhPM1;780z@x#^qkWl_@2fhD8?!m9rzIAd`Z}y$wrzdSl6PXD{59crPbv_E>UOHrj2x8LVQZZ^x5&eLJ39+|4m-@t#y99+7?*+HX#b1K0iepf98T|>yZ*l6{UIo zEY(9NMnb3dP)f&Mo5$E*xFSlj{Zf5UJje(Lg$ja2c_eQvko{33jayk7F)OpWY}$wv z3srvWC!}@WX}OhyY&F&*t3K97@?R|u&e_4>AC2_%bitIWA9~C1_ep7gMajS8_Byt- zf~Q|Lk;5;VDG1YU!cTlk#{zE&S9M~a&;DAgc9#0Hn+U{LzgzYmHz8bTnpCv*!K>kF z^$+%)-5dLT{hoG-VGwG)AW&ukOKbhod7C%_+W!&V+ufWmB;GN%bbVKPFy|cDR$g(` zV9m=_e-Q&YsMoK#=xu>j0+^a!6Z>Z6EZ@}#a~g9)^Cr-ysLFAE7Oc!wpaEW^Nv>(t zz14Lbdqx_UDY5?17kOy0L$2sYe|Xr;zY`=%v@Q2|aVUoRr{8sF+)~@R+%6)?{E`ny z)3MdQ!raxCjwKGI(2cc#D$wfSEc4ZhCN{vsz*+C?zq&vDqBD0|yRoBux@=Vg*{+jE zme3xD!dy^}{uN~x{&=i?TE1^v&1;DRKq*V14ijdZlo|A_e0$hPIPJ|6UQm?1y>@+{ zuKVlkrk{=m=D_7SvGs6cROwcRH|j^KV=q0=H$E6Lgv^KP(39%?8jz*p=do(^GccI` z+e)`Lt_F4Gj=u=5^Ci{yauXC*v=~9fvM5cS+sfPU``iSnAsoAPyR4HGIU!0iw@)^> zbCCV`(Ug~D9R457PhJ80c_@bZSCda_!P*(;i?$o~*&ILEG%hR4yo!2nmAWF7F^B=H znEjK@U2Wks|Fu%@NCGTP=twDXMF>Fp2X&mV)Vf=1T5YsWod}p!dIA*cdQx(BWNUMG zH5dlTzdG9>UUT?pxcw9E@eqr+RU9+yw?W7;Uw>fow@6{xkH8mJ%da1J`XD(W2-dmMLh?lA#$1Fk23JXz-S zPZ!kr>qRJa=XcB%q8)zSlx5r$xrRYM(4z{Tmc<#qrnS>M*?&I2pTF=9rlIcS!QeF= zfu+zT1)UwL366es!*-%g*VP6QV?3Nc5B8HB?NcEM>0YTi`g(=cdu$JCzh{0(hbn{N zAYd%N*9EJDG1-9f)>M3(;_W9c0xTO>hJ~=KyzOziNgB3S|iPN%w!)5Armi{T}CjU*o>B;FkWe?KKg~=Z1zt1}ad|Vu-6H zl3qbiogp(A8IDcYxiE(zGj_z63?T5%HJr7teux;PkMYyt#Oe`&LN%$?dqDrd)27c^ zyfd9#9o5ia@b4zzInh3>Bv_7T@`qSO+C$Id%4z9X8Ufj5DfZH(!p%1bMB;yP;@hIyTw(C3WH6Sh?m{K+V)DfOM@J6+!zEFUr+ ze8l;@En>XV?wlq2ev_WYs$Abo--Un?g8DePF3vVQ3Z4znrUUnR$1`(%$Hx$`FYjP| zeyuervmxI#{EejEY&4>d(xOH74Jv9iUR6O-gRV7x7MQG)B%cH1q%qI;1|NMCAJ;^u zf=Xi?yM5S(4rJhh&lY(+qR2ywZdzrMfgR1}>B!tub^-(85q?X1VnzXqq?nK^Swk>0 zKkKoQ2imWH#bK0IEC+;7KOLxu$xrwbN0p;VqFly9?h}fP=etbg&=uOecFi0N8qma< zP9)hY9kEfH@8i4or`&VvVNu9kizwCoqoeVK_LKZ~u21Ux*`&0=g@n}W*AvzI-~v*> z>QK*Z-mMHIgf~Yxi18UMht;PCs)8U1u+csu&>79@r9pu@=zASH7syh4=lAkuSW&Os znX|vX-A{DrLdB^g=9`m=4;fkPRYdnK&A9!|S9$W#|2~FaAl=S$YJD4Uu_$}WkWQ^C z-NEQok6jsMGbcKxLA23j{F=EO!uHwZ(o>EicKrm$O8c&J;MD04Etqf8J!WK5_1TN{ z5KN`i6hs0msNZIZnyT7$auqL4vs^r5!hD9t1z>pOe$RVwh&WTUX=lbokE2@f%{|cD zKHei>(FZ!T>c?Rod6Uj;70DkGD#Qg73T)(YHsj`D zfa~d$zy9f2dxi~gF~BZ*>3BW4ruD9jfhxy0M8;RfrkYxuc-v+;C=pENZ!;MWOZ}KU zk7+Cq%$aSb8K_5W0$=XEBtI<%I@vizit*_3{2Bh!w_##m8hB7y)iXP-Ngo+N$BT#V4DPJ0izYEvGLdr-L zx6RHidfxvjbzB>cX%G=#(n?Oo{XZmGROP+;g?YqxpTRB)) zuC@8IjPm1`f7U?m0Ent}ews5j2p_~YE=aHQ{PI~&?cCr+vF^TL_eErVV-NoAqGDlL zV4Y@t;Pjz^ePcz(--aI4tZ6Av*OO=&t)N|B<&g@zA;YP(!n8K%YFXg;!db;-ss2Hb zo#XNt4czstL}zXq&8!0Fd#3XMC;9roaEbKhE6-%h%G%q9=M1`#Ee@87!N_LuTq)`| zbGm!0u|y>eugDv}uXwXBr!a?9B?+!>bg`ekE+!$tEy~qj=42o!O*#yMVW&c91kex$ z2j(M$y^`i{tQjf4-%B9_K-YZVner>H>I>{`u1%B6!qJLm;l6RU^33UMl#w4?Q6oy* zZl*3j%+Jla|H4OJOCHRdlIj1Df58BGk8XK1{LmY)9e+|1svd~3o5i<`Y~^h%e9n*E z>q4<~E{)~daqRWL-tKly`QPIo|7Ex;ZH2@B^O#y%;Bv?K(beEGq3RUk->JRicbtB^ zdOu)L`1<>i?{7ZaubJpT8Snj7hkNtl!?dv-fie8)mM!4;CJ5IQ1;3Y&L`y^?C=B*{ zf|QGU24sQ(A>!H%T9mVF<_M1Od4*L%g^qJtTxIoX8DBlfS2qf~X)sb#&V=7%zyx!h z1`A(aqZtDQ%i)6I`}`r4euQ-R)o1}Z7n>mziV_ma19dw~{Xo*8!+eLwDP(H?i^`T9 z^r4X&DXLB(RL2sCMdXg#Nerk0raEHa*9oGm@a$W+e#^rs!`FOE4$o(VQ3v0vinv8D zm}RL1xX#Dnp^7Wmk^y6jpl;lyqVm=b_W2?92ww)>k)d#VM%T&n}UargIo@;2+xVOdL*LPzGYUQYmkcN#6_e#8NR+SuZzTv6icN)l0ae3)@In*-D5nbNl}a*9Lrzc| zEt#<3hL0??iyL|V*cfG;|MDE%o}h=`!UuB@s6u<_I@ws+e^p@*?o=Tg-b7~H)p?3K zhZlN0nr=XzLmoM5E2Am!?;Og#c{>jFtf*jV^68zDbUn13~J!(i|MmYci z6m?e77iG$u1PnqQ+YD_RXPW80;<*`wGFUFp=$!{p-^5Bb8KSJ#le1G_bk1$$BA5D? z(jX`cEU7^q&1jdh1v@M^m|;=;>RM^6((X@p*?OfLHkZ%4`@{!R*_hq8SAk=j&Smw5 zBN>hZW_yOsH&&31;S0}Y<$t=1*@sacx?8j45w*GY`;bh~HGYacFw=l5Lq}!>YPaN_ zDk(BW{SrllvgdI5>;{X%VcB?XNzqod2Gji-0cjP}8u66OAW!;9+%&_&AIqE{^{VdD z$$jn(aK{UPzg4XUya?2uzFgqGAXbu{o~gTxwDkKRD}$Z7e6mHOzQQ&_H@Kjj37<i^MibRIZ$1J9U@&HsQXr=HoHGy8bFb{rKS~{1EwLiXPb5?h`C{RLBN?Dn9k09rG$b_!5ATa>H1}2Z&`86b12l zC2v@IGkiT8?co%#VcFqWaYn!a#0RMpsS-9PU!p`q3dpUJQs~vvA=P}eMO!pIN>8*b zvUg@S@(nP{?9_~34VaKPl?(=@j+pg;*Qwr5H3JcSpR{id%Lfi_H?+ThJW+@J0oy0@ z76lyM2=S5~N9X1AY5rMP7t+)V9j*+;q6~Q4{$ZE8?5JJY{&%U>PxlXc;~=(?k&!Jq zkAa<<(XEWWC^By%yzVGdgo*61xdEv*u`9!(h1AA-*`@7a$_?4HpF8ZTY|&XU163KK64(Y zBvSPiUin9u&h`y--D9YlZ}aTF>!G$)VOePQaejlQKKl`FT;>&C_`f~#)@q^cNe?JH zUiY_beE~b2acd>g-XvEeKH8OrKF7Lf2U6p2AkLD81TJW0Vs3bY;{YFYsl3u3zKG0} zwo9Krqz0ByfvIUfVC0PeF_ur5recA=-2ltz(Ol#mC-g;U{2wB(_~%C;m>8k5Nn|=p zL&*x-K4ppm5T;Nt#Dr50;rsX+v=JH)PKwT>qD75fa9Z_5)jiky9nT`B7XKy3aYl+( z!gEFIF_>V&8k301ro^Qsq#p})^Wm(;9(P<7BbavYl)u*nF9IbV(eyqit$_+aC!C_) z2ZOsXg)*CF3Zcbc#}GvxE?D|{o^6f;~L*8x@4 z`FdTo2dk+92mV{Ubp)6|fPv7vBp3Y=WQm%P`=)_@z}-p1c}m`SH)4sb)YRXxQnIOW zYUcEJw#g6eqTfO4#$B8-vfvv?W)`_tIJ2BVc%*@ft( zDBYyqw39;hL&ZTwCUnmoc=Up7vhE5yn`HRuzM<84XOIH>cDAisb8UR%U*Xmba_iat z`y8xQYnweq5#8<=;V6ga@)X&AQZFM>=iO8`k$9&Y@QpS)a^bT8&rnP1$o@ z+(7iErp`9x@c?r63nMUs0<|FHbq!YcfCao3=Hcg?dl0B_f0~yk_oRt)vTU{+CUuVm z*gOB3<%?Arylv@`$ z(=P)giTCLFmlbwkt-pLwfIYfU4gHqIx(vTzK{WTI!ANir4pDWt=;5qoJhpg-& zdA5IbrlAP03R;DYbk4iIVBmB~z%LbvZ47CatUR^5dDrj+3|QNu8)mS-TZ zO}?kR{aed)XJ}!y7jlV=;K5Us7)W(^Q|XX-oKm>8L;$wnrWPS6jn_0DoMLAY@p(@o^_rM z)K^rgMM(I-KGjsXP~gCO6A!;t4@cso@jVw9;O7ge=X(0io?Fkb(ZT39G+5;3W=N+a zt~%|2EdtIDJ!$`!1<*$hQy(baWc87mSP6j;({rU>&JB;tE61}Nj{$o&OiIe(L5cY+7eVp2E%!Ze| zDo&z=`JGQD4vb{8a`Jurbh2_~fU(O_XVivdsM$;w`tUrJ6|Q*MOl2&od=+OT#?GbOo-x#!+bQ0s{L0m7w?>o z+3e+Z*LwfGJ>!J+$AN=|Kd<W9A2gFu=H;8tLBd3POAvk7`+Jy*eD!o?m9lK_~$x zRwZ-3Ij+&%tzC!i5m%p8d)cNvR3HvEfJnzT8+o%&81p+QDT9a9rBg)H|db&%cCz4L`W7KeEGv=z7kRT|*kU%MFMi+PFr)OVma%*Qd6wJ!^P3 zl2G^qg|Fy~)SMbig^iq(D_fsm?tP)?7&sVPQ>#}}lbGbm=$O$;I=>0iRDqLTRJPL{ z4oqn<{Zhr4VC392u*Kk&`8ze}RdYS|^eO>aY6u2s;Y4r#V` zY5b%@@ly~#v#9@y)7h;)3#X1n9^F%PnIN{#S#*w7QE8=o>ROWX%0~GQ7R@f4_~lTa zSqA-nsZ-6AqqfoCIhC>grrsZr5h=#yIPmq9ukzOXk!G+8nNB13FmI{iKctj)yZVh% z;?hn)>t-JL=9JWhm$b7u-v7*t^69MfS%Um&@8>?vBlGFWua)2dO$vwEU5u4&o{!PY z(x4z?U-)=5U;@s^c6kd z(~+@Vb8Wg)2o=OM>4_@X;@NAGk`%d1B_m+|71v`i3V0Q4R%H%aYw8dU z>X<;!kky*(d<%|31&F`1<|P1yOuP+Z40UyHS1$lbJI$Vd3B zUuTSGe4wY`>vOgE5J|-7*}ve3w&3y3|CcE~qG>3x%^9k5q)4G2)ZH&f|7`tve1Y{Y zn%=vKlUvmHpa2wAA_m43e1?+nTNEq8nieE1Z6R@DF~LE&c;$?u9%EW~UM*m|>wwpP zm2LhbId216rnT)ygJ+7)biXE=uf4^uf~@CO!)jZyyFBlcbb_@H>+9k3j%!OT!K!98 zMMJeL&cek2ql5jNWje~ONNE1b3-+cU{}l?f&BN?L8V}b#Pb~?| z4@SLuh6_dO$d?(%o}fPElvygX$KX-p&!JfJ`@>}v*6%ZN$#nBvR`n{o^ZYh_n)X`7 z>~(}X8U<1c#Npef&HNFkNjQfFpyZ=SP#r`66;T{e2DIf~8&uCFEUQuNcY{-m76CNO zx4rdWng{|(e%`wCLm~97IM?Tx_DyR0y9Hk6fcR`MGurq%$)l$Fsf-N29C8#oIjN!Q!5yF_Z^&mpT=vFzf>OOdt=(64;K5#y&{8wE7AR)PV)$5N+Ygr z8c;QKq|iF$7ASjETgLL1>{dv-hUUZ zrJT6dTkG=8Y+m&)(~(7ongSnd)AiDB7x%Nl(2M8O;Hfr-7&{=PCBLB~xZSF#*ppt6 zvst)$Q}W?J7!|r_L|2rwmp;wYh%}Q^`CXZn7`hprw=fV@&U8mtK zKo41rqoO{~w4Ala8+939*nQx1JmohM8|Z^bpbpkg@7`(f=8OTQvvUEO*HmU z?Wwl-OgL{oO}0qSXl1(RrEgGQ*0bpu7?O0k)VJNr6U-%!Jlz%ok8>oM80n1{%!7$qgcL4oZ6Wt zlh_5l0$A3*z1jLm|vRU6+5q`K-mou`41IZ1jCEHBht8*fUIJKF1{RSRV z)>9RNL&*OWB0)L4GU}HGS_gY<^bqBS_)q*;;P=e6t(8Hq5xPMg(4ST2LwBT@ioc?P zF-3nXjY|RjEUP4vS7Q?xHC=7t1W$3yulLB)#`(1MB5l+=sep%Qs7d0bx^AMPP7lmG zg``Q8Ws=+??nCYGCXw07w^;dcK2rpr)m2&F851iOEhOWaYuT;|&x;w5xKnYOvmOiz=J(T=NGcM+gyJ(y~ikQ7~lx~_jO^tFq86<|; zkfG?nhQ@1g#Foo$w{jd6LFSHoMsQAQ19JF7OI~hjcruUVQWkP0>cF2|dL}7p7%s%? zn6q@2u+L~#HutkVII-NKA8QtuW1IhvoX%{>KaSC?xvB34A3~~$pgbB&%;k)G63=sH z9E>9N^y(GY`ShnP-B+{KL14t<;@^e252@&q&e$t>AGHbIi*`+Iz(K$H!G7i6Q5lYo zfE&6E3G7Q$gF*8)B36aZCX18yNx8kkHS{$P{>w^McUKnJM1NMdPuGtif^h>Zz1mY= z>Fr7D;xe0~JK1V@xGVDe{CE8SuW#nR3>=Tj(awj?`|uyee$u)7IqP2~)uMoL-d?Mq zVMv|b^U$m$ucUz~u%(_Cx40ge!cS(U9rlcUnjIxb#GT&>n)R_DOZze09I)v2Ic*3L z16)dNOZFZfd_&V{IWI;B{{4fIyeisR7G-AOFJRm@U5NrPY9hg!8Ui9*641oi*;JuQ z21cTg@d;HXlkPrK@@Knza2_J7-at-+gnpxz^@N=EwIC4sf?+`eA%cljhr#Ksf~iQ{B$~Me)Wt47B5iRVC?JsfUGdE~MYNORdTgO}#GK ztX8&PNqNA^tWb69=wajmvbP>y64$WQ07Q74eFXp5B!ys-U|^Tv!SDKCH<_|+i2w;^@s1!ve>9RIdg(JZU2`b4J@?#XeI>Uo=kh2Qd%tp zb3Kf+j3yl8#s@ok`1XDG@dow77XcgAK;YW}I`(g+zJ_z=i zc)-vKzPYmYWcDDZ#Xv%^kj;;?&Ov^Z85v7(VTFf1q(C~uVpYajhOGr^5k3)wOS}M7 z4Ub4R7cl)bW*b<6lPm};XtuLZ$jMM_ewC$K|CsO|?RqkIrhNj)hTSoajiR~^MX%92 z^1!Oq3L?APDDK|c(OH7fikndc5`73;$jXmT6Q^0w?%%z^M!KSPOFtP6=XS?R3MhcM zL+w34lJ6!2vJ!RY(GN`G)bU?R!+wN~wFOAP8PqAD>5ZPv)U1OP8tlef5FB0rlgiczN529%R)UZb;Phid!(oagsUk z;IiD)+`86L0iO(gtw{)-*bJzN%(?50jRuX9@qMXhQ&d)vK>-S1VJKOC(!>SUAPTIV z&J>lZ+(y$Yp1Lvb^ZwOKsb>s+IN%0SGk9c1BnO5ycA@wBcki`bcHHyhYMDzc4cASc zN)*Tsn5}QWo+j36xr!K~h#=AL+P<0Ya}D$WgD{ehW&f^ydf#(A`}A4{ zVRCQk$%pK`E{{D1GC`%G|VJPjT-u5H(d3PTqp1Bm^WF*uK<-40z?cV6@{Z05zuHJky7q(uH7~u#? zHGH@kjkaQuvRlT1w&W6FonJ*iyvqtyvfuEZWO$NnzK27QU)Jv$KW%xaco+#=tnn+- zW;>F{XC)1NvtattaOpYTqPmO5FK^46Hb59jqE^Iqfmw^cO^dv{ngI$x?TTb(qhJ5RU5|X>w|^Sv zhJ-sxA>_4pgXWG-J5R5@QpbRsCr`J|s|a{LYXAI|vr;Tl51y;n(WrS95MW!~cKj=R z?#U4ppa0aev*qCoo<%!n>}uuQyl%PH+t@l9fu3F2#2iO@-?oY9O~Ja9aisy`~&JgoRGbb0C1FO2EBqz#*Ve&JQ$ zEJdj9aPfRzlxS1e{>DkJhR41tyl(%OL4^C^a*wLk3%|FU%2Tgtzvo-lg`Zz#jLYyG zWg1Zr2u}$oBv8lv(s-=B+^w%XTCTV~yxe*i)^6{+$LIb3n9=_8q`G=)MUuG7`kMJL zEv>rg?A!;JPg{;6n-(>0Zn?Z76}(RLYC-K30|!Ykb-v>=IhZT=JZS8gXlVD*o!tgo zqc$9h5(=R0+duUMr&*%7L6NTM!(d&Lmq-1bGp#`VBiP8EsS5I<8av(tzBWTuhI7&uT-sTWQG?PtwhJxC3i|$5#dGMP*0B+-eLNSan<>2On?_%Bf6V!DbIyzVuvEYtY!O|uP4qL1+>!Og` z%wXXd^6)F?6EI8ee^c1z5%!G1ZSGyFFq_a?BURVKvEu0AVFI~-(Zn5`Iy{|uJQWU;ICHD3JNI6iWUrri;Pe_Ww+})A>|TATOi~)$;=!|qN?_X z4w3EmnJO3b4EdeQCA0P>a^_OXR1R6~UA*VF<8;;0@+#nz4&-1P_%+wOlRBm10M7@# zg~9-}cN8Ml7+kJgAmxPMut8Ld77j)?dbsqOiy6?KGvDH=OAa_KoZDTU^(<-$LXQEU)s4FLEo4|C zGc9%{X+aJq`Ow9~70FG-UmSdW$9=_~R3=3MtF(AN!--TKrjKt`#eSyHB2Jg_waz{Q zRe=K@U>CEGKzG_Zw8I;G-?7M{thBF%TY6$Qf3tbgsZHt_w@y^s-X6Kd1{hQ;EpJj4 z*QaMUO`KvC%8WO{8@+sownXaJD7gKZ1cS`%ruh?QM?7e+8x4>98fqMV>m(;~f(CNh zi2>jl%BU;2RHm=z%f+5l#bt6fMio?WE06w&>HJITj>7lVmEB)lLg=`n^2)2eX%aGw zoUUA)CgsFdd}Im~);m{p2Wxhp+Z;Ci>l@jMPZIuKS3idZkAJI`7}QM`RBNK&=#dp6 zQ6O(#0T6ohJ*)GJNk?Oa`M>`bB)xhUS1`XQz4)|}IZV7<|K|(mYo8ppp%;*DW;}N4 z@IQM3njI&d7E?d`q?P%xd7Niq@Rs5g&|2=E3W`=UiwU~W-%HJIBRG3uExjd)CY2OQ zVsUse%TfN8=R2tA9iA>Cf82dcKQqgdp*qG|Iamg&3-jFbQBzdDp0jUsYIp)h z8!uvFn3=RR;yMKh3O?r4hLh|qCD;LRjW|&_2fu1`R=~1sxMgpi%k#f>Z2Tnjal5g{fV3?3Ytwhyxq@Bj*wY^-mzDCf0}V zFHQSv+Hc*!X~HK!)vamqFscV~k?7oj zF}+NfZh*VUveC?wWsk#Ytx2E*o2%t8cKa9^r|pD|Nlxu+K(t=q6%0%-*cRfX~b+J}gEL1)t?S8#K(qrp)xBy0rfxDkM!ajRG!0~g>?v+9J0DGZW zeT4+fAVlqoP6BD4o#^`E>hUaaE3Sn5e-(Cm^K$cD>W@2KX+JU|i~~snc+)SfMXxxM zrysI{Q3LnN`^0i8R#euHPyx<(RCdqD3*q^g<%h0@i(L=P?znoDw<1@l=yaSexjJ7$ zK4HH_2UxfJk^{jlx4X_XaSFd|dzDp+fAMWD`QBQITJ`!G<2 za)v_Ym2p{Q)WZ~L=g*GNY+6+K-e8}eBcY+T&i!i5>cPxrs-igx3=y@vdKuXaB|Hi) zlP?2Ny%N5n0Pi;AfVz>eAra~QH?y1Sr+>`&Xb_A<={Bl;4-*0-+b0GPWl%TE@4lfu z_wkzPT@0zw;f^B?L-STc$QvS`F~XQ zMXuzLF&uXSR4iPbIzoXP-_Z-Owz#^ zK$SV6)5|dETt3)ZP|YknbnrF~Whnu-1eIDXXw`co{} zTA_bb8l`9fW{;0vR3DxY@-yHl_7h-IAgUO4QyStW7+B3BGn8;{-?!N}N3Ff&uzbJf z+-N(AcQ?Z(wL~ZWn6g*hl8yQ+ET1zSYHKg{OG?1JDtMxJ(q7hX1WV00tyh|!r0PKq zTMC1s$y1J=ntVlHM%BW$IC(Z%<<4r(ejkKGhDqrn;7Myqb&)Tl8ITe?2UpGGUC?L$IO?gxt;CUDZqs?O;g1XqPqms?^85X4Fa(K|yYalQSz_la6wkpC zFg7m!SmTbdKuKF3iH_4UC0snNP+i1sKQ@WVfNQI$f0BSla-p}93`Bn_reh*St}t<> zD9xD^tz|O7&|KV1L7_-n_6CF;*~;6+4+ny{E<3M~VoutB=z)IVLACYgN6_dbI?;dv zb4p@I#$P&Oh7?q)T{IM!s{NqyodljmM48(m7)RBs&Psn_zn#lJ~$K)LsKfW8`= zs&h0>B`27knm1hRn(J+_fBlW5N7;;9Wa4#wTkZ3Aw@KdstsXIB-Hb%sdLnkvqQ^&h zpuRQRc^E<)S*H>%bD+6prAWK3kojAu)7!?m+j3zRYwj=S zV;bqYKPIhj^gI#HX9r}z0=7dUN?hwko_@aBA2fgmZjO&k#fAdS z#UnFuP_Ci3t+)8*y$3th7RDjumNZG9cn~D;I+W2@r>3LOaY)h?u^woorEXXb_Qf)MrWMR&o;T#cE0wAzAo5QeH z)2_tX0wq0;Onfzq6(vZy=Ml74#}aBQlvpT6$PYALb^e^8wM$rk&4u@DV*u&qa_@7`bI*D1J@?=J?0@(7?X}js-e2=;9gFWFRn+F5aW55(qMk18 z_;0}NBqOu@KPg&DkOGQz?Cyk+2ALEMRbQHcB!0O5Ar*lC$5^Tsa*S3;>Y`mn10-S4 zdm*jc2v(vVZR(?vl_fsut6)EItzCQ-^d%p{Qog~3Yf37Is;zOX#(tI^t!H&wpr!{B z&`RW@w_js+F78SI2}J3Y(fn`#SPDCA)?r?xaZw;sTT+}$_fK!ad{-KgdI5P6P%{_I z@%-<7ivJtM-q>O2+9~9Qm%z zP1ieH1ztic$;s>7z5pEOQj*=Mt65P#mY8sM#d;`Ec~BVd=!x;5b*;vKumBhO1WAey z5Piju!|h*FwSLliWE{x9`nU_=VA6g!0N}znD_Cujw+6rp69&Cwv(jj$8#SnRQu9r< zvFw^;TN!v(_Lo=-Z(-J*rSFN#vh@YmvOPJM#B|1W8-^yA%aqHstc#YMgs5GSLrQch zXrE>Zw>z_tYcPSVa{2p|F$D?&>*8Xmhr&bw2O0nMUs35E໮pq)%TK1Vvn9j2Ap2~WS0{Dp{RJ5xF`AgyU3lI z-HPuHK>(vr;I-I-AIEq8(c2P~C+|$_*6@<0p!0RnB4>`m6hH8RI8{2+e)FRxD!^U=<4og_3_nKrE>N2FNcHS zm#<3&&wq>UNFDS>m>zIUhpzrf>Sw*aLm+i8L_RqAp4_d%TZS8VQb7<$i@4or?u3EN znPU2luT^_LXHO?wecFIcFOZBZ7=hmfEIk35Jf6Rsdp~4T=(zB3{n&xJNt{ncq0TQC zltuZf_l~i9?G4A98y}JS+IgCwucDK%seYDz<{-5qrY3QId;s>=T6MErz0fNJv>yvi zR;hZX-Vx(n<2bgp<#wP;#GPgxwC}PRep47ntpA&;Y)PQdocJKT8EC;jN*TZAD2`gS14O5?$!h?3vwf`gVI3iW3GS) zyc3v#l0mqk`f(!`Z=aWqo&84Df(&^u9#`U~PfOKSKWLy>Wz3A@dH}e>Yh2W%c)2Ai ze*GcxdZ)cJ{8z1StN4Qc#ZmHj^I65CvtW)7f&eA)O3$h^*n)kcexry*mJx<}Pg2hi z>k-;y%%X@QN6>m4Q8^Ru+M64v07;j{zf3p>yVTrKbR}3T-C^9J)pCDNbRh*e2o>Yz zd1OGr-D-^%VbDlJ&sSjU{W~hIT`zep#lB@Za#_3Y(n-}k)g@ZRiDum8gZXb)ofH+&HqWp zdBIa;UQtNOY>s)mIDV(ZUiChx;{|v2UCDXZuc5mxK9wq*k#xt1Epk-i%mH0N$w?4! zFD)slk8wG5?UUtN#vfW}|7G)geAm9~v>SDhzpbW7lh&+u58K8KS!u3-W30Sav(8LW zrgF*Gi>STdKUL0a8n!+h`ISO5QNiDjR`76d*5yC#++;FIFngCjW3>Fbb@I=}VZ8r% zm5SvR8S~GKwrJnoo2H`tcvrK))SFHAg$}NJ0;6p%+>!rQ=k8 zFJm$WH)C}7$Eg>RIi*LLkAc!`fLJbkC}Tay@%GWbcYo-wbXG&6sZMVy z8oYmE$qc5~_;^8NEe{Ov z&wjokBJrs4|2Xo*c!4x2c#?r=jAVlO{BIk*`X}sPL)b?@dnk;#&!(s`->R2l$kMg_ z*NQ4ImG1z5EoVspJF+qg!7^JS7)uf)tJ1YB7ntg-sce}(X98U&As@zxRcg1mno5V* z467KRSiBXFV5v3T-A<-MZ^QsSZ%qm}JB(=b)O|tK z9J$55=$h}?c&k)i8)Lbo1BPc8{n_Q}pqDmVSWG5)N z71l62SdGE9vM(k1YSX8f&s!sh8$mncT$1Gjp6^zFy`;NxSU$@E^rpMpQ7B#yzw$Ix z>G&k6tyn>;8u7**u#AFJf>&ipS5`3fk@mz1-{_oa@mDNbx(1`(%pdzKl`N|~oNcG( zBzx{gKS^5Pa@p07y7eJi9S~2ucQ!csKEu1N(#BO803up&lrhBRZa-f9fc!``f@vM7 z7W8nT5TI6l*+U{1ssGZQxG+LTjJ&dqgc?IGgpAtWx>A2|>FlL)+44Un$&T@*#D!hZ zaa?43UKo2D8!nTpn=k?U^JRnT?si{bkJ5Xg_2WhRIcSjE`a_Y}0>~X*nC{|rP}dXg z8T*Z!zbrd@eI|P6#CfzufpiS>hfZaCQUp{~l;8A7pcn8{qciGN;hK{Ze z_lw@bLY;jM@HyJf)Nu`jO^sjS?bydu(@2A>=~UFCL5^=kBHyPikyYZ?s&fwU8< zI2SO%Nf@W{kfj2}O_qe&U+tca0kDMVxPiD2C6r^?sM+7=xohC0|9u7>WMSD#_o7+E zJn$oDhJ->opS9{p{2x%$&stEl%FQgI`?gDIl>AESesHX0%EGzgnjtqnhVQr-0+MWZGzis2xrQEczPmuH2q2fwkjCPo6K*#B z>JOVp=Cs?a;F4`)lXY*&?qN<|dhpcSLFr;DzSzeICB~ZV#FFZMZHL+mtmO}s%eFYN zJ{hXx$b{)-DgxS?JzIi#NSoRXjJaBPuLxGtP@oEZ#jZ`7Y{PJPbi$jFSb z5q$oEzZMi|g+tim>2YO6WIMQBdj*zAF8ffb6c>n%?WjVS`75wFHvEEpsWdAD{*rSS zbFR{E@1nTHGlr`o1qX|-fW>PlL~SS|(p=z5Lp&<>aUHKJT>6ENdcQ2a7q>cvWRc`>(RW!lh3KJ8ojLW{m#IrP{a8>+IZHo$q6FK#u$8It_V zJyH)WBx5LkAIHzSuQzEThrN)OQPe|m_mWNK@N&H=7F|6RCY2Z>Ka=linwo*Ge^`iz z;XALctGXK`HRFx?UQbXW{FsY_jysP$p2G}dTh#DxQ5pI$Q^{O!Vzh^)sI9Y1BIi@SMtV{R2;%rZn|2*-&J?fxH z@-}(b_Ld;<6Wzu$qyOcnbbcsmY02#OeDJ&4y1DP#ud3J=oUi5z{Jr?FAUHqVV>qwz zn0@?8|09xiTbEl09s7mIDCC}0*WDQIU&T6bN$BkfsG7`C{`%15aj zh_@V$h}0`?oO8K%j3fOOXJ65Pe!G(sKgrmrs%vOe*$!1z9vJ zWFoTdzB?prO~a}_Q_VELDA?8v<^%!xP+G*BwmdWR0iY7_XW!XeQ8;KkzWsd$H$Z13 zCEUN1L$@t-_cJXHSyQ0}k=k&*;{G9E@}RCjP^iiy+O+}J~<_WTsys1^sE`YAi_h^;a|@ ztgxB7cN|N^d5~7B#|;9Go@zL ztq-h@?$0X4;JNoe)%DD2d&SkzlfHx@z=YVJPD671T@)>O)~zPf@VU?Fq33>wr|?1g z2t?Hdd=xbti0xi(pq#DdAOO!XoF`***z-3n4}9a{D+8K;%goqjX{x-Pfw?5>AtaSJ z#}60+pAz5#O(e!i%R~4U_$TchE=Lqv_^RA#9+dm~>hh(pUk08{ z7%lt;H3D_bTz~2j(`oQu^$y_M*`Giw?0Id`v*54ivNiJeq*9hfcJ%cFqM0v$G!y_E z-}xn+WUm-5Ov)Qrp zQF-z2Q^W5`VexT8b7@qg&xJ!H2g_ET#rNOo7R1ls|3)ovmthM%oQ2{`cjchrQ~lkn zJpwic`czz}Und>IB#4dcvV`G_4B1Nx3qHBJ(flE(<-K=_3;yO0!QDsJ%oO()IO z^pHKO`e%hpfk0QSnbF6Zd`oA$FpkEN5wYF1i97ARoBsO*Fuj8JEW^N3WLP9u8N znxiG!M+S>X5eTzp0mQNF`yF>%x5CKu0T+0tAcad>5Mxiax>tCE-R$L2$zaO~V z^~Tl*)yF zO6G4&oR+80e#xd^_-6RT#8cY`s_#v|`}C%mS{GM#Kr2$EQC5MjpKb%+>m+VI9_^HT zd50xz2IKz~NcX?73xY?|9o;{xudDR_bT&U=FI=SE_bB_m`chozjKK5HjA7#g$UpNz zsoTwPcknqFAU0jFeiSrc1=i1`Q4F zc*6s@lNPwh{Q!sH!>^o)l3Yd!>8smHX&JO!e8IG&o!l__wzq+)x~728h$a7}W0F^2 zWxE^zggt4>&q|RJNB1z!0j>$FlGKgy%ZRmbM>Me8?gRIm=c0=H-8aaE)6)1Hm$Dtc z{rvUj%TLy7Io+BD^qfZKQ@YV(Bjf8KM-B*TwVRvrC?eCmo!fYp@Oft#ofv#KvBsor zTL9sq_nlvrh*d4_>x+tC4pTJ8n__8$r`N^RYoc3Al$vP~-G>Vz?!#AMmkb_n_CGJ! zmNsK?)zz>-e!wuEpVT=b?#d#>Alln$G`F-I+95G)r)XSRzHe!K5*%0~!E&rYO5tkC#l$7Z!Tb znh^JAQh@k(zDl|9pU;ArF9n(}{F~{rZXh5yYE-Rn+;{SJN92N~BT_`K$r_{f966_A z%NzJ1Y=vg%Ezc)A!uVBhHQrBW*n5_susFLiAlA~M-nB)mePN6{96a=u_#P>Ny^q;_+!YaQaR$@OTw+)!S0|BC?49uVx#CesK*6+F5tNB1GC7D6? z=I9W?t1d2*A1ANRtbS&i6p$-K2)n-sPY!pKsLE*aR=6U{Y|c3*_0hn!+O1B zPxO4YmePg1PjGG>P?ZabjO5NqyoLy1#0jojjuyP`n(huM{YH$jk#KNw@+;u|fvQBJ z+kmHalRG@BiisY#SA+IC?}L6sMh}=58#l@J7c7cony9qCuQ9m1AandTG#BL1Ks8o$ z2eONJ)-(J2!|6xu<5g3Qt)DzWV3FS3muIek!+_HTWcqCn^IpgHM8LG$^ccK} zTY%z&;TBs*FYwe2n4QXY+H^{r9*=UTyDYMCS1HwPp5_a)J7i3gO>1sPGzp1$rJq0i z`ZrnIqo4A9>gL1K%zCdE=Lb{uyNm|lH#HR<{%Vj3?i;FWTTZp+{q78J z{)OgvDBKj19@k?VH$+P#v)y`Z2}tg*R#f->sLgrFsnNmFBPHa9cd?mEPq)t&*24?V zszeRZ{9H&Nc}4AFNngvj(8+j1uh#22%RDiXLcVxdj)UQ-qSXK;Z=le>MUQE^R<~gP zWsa>6N#xHeOd)&gTZWxiMS*zxJzs`?b2juV8k2q{j&Q}c%Ip=+HF(dtft;r>Mw1F- zPCHAMh@w(FC1@1f3evu-YUYs|)X!erwmjS`V*3#hyH>a4uefuYTrf+H%uK!;a5bpi ze-a%*69&9~4?TW4k#dX+^nNK2%{LdaXqo-?t){Z{q~)m%8av&3R?Sm|*J^ zI(K*;>Q)fE+*WBs!#rWfQhhrhjFV8Ao^q)~yZmJ)-8GlAPDqq+@7!5bq@mP`Mu=me zIQ?5qA@@is@p+Dmkf!jkO{x1gOxl?y(5Ovt)aot2v(DZfBg#<)e`+iLUQEcR=$Jq} zKyXH2{$QBz&zWy+vBQg^lVRo5g{Cq5F8P*=ZK?X?so2LVV)DRdsQzJh*W%(T4U;@A(4o`IA62fN$Fr zJ1hW3!U+k%y9C-yc1fF=Ec&nol3499lglgY%6PZ+>_&J$QKE&BKFy&FdlgO=@-AmB zL|R-trnmK#2}P|P9Bpz<6dD!sF}bjC{ER*P?1oMcW<#U_K)_Ol5zL+ zQorbPum?NzsQc_)<72tSAiXs3$ML*yvC)+Bi??$-6B9 zO8a6x6RG^!PKqD*6U@X!F*pfSG7|kqqUBHYnS+ViR+zD%o%41h0nn>zQ9t@1@DZ7& z)oP|$O)Z*Q8(aw=p%n!gTm=-5c0yeu#v+_&;Eh4LP|`KjV3;jcslNeW!3W3V_5@(b z8@`!TC+uy>?IWc4gyZwvs!|McxC1nPex-AFV<0#>wH!g3d}QzM$6D#bvIJYRtL%x| zQ>q~;20BXui?2(@NyqpbeAitv4`72v5G|x8hOn(dF0NdHS%niQWBnY`jB89q&v@9h z{al**KE?TadH1by-30szv~)5m0=ApW1{iu#*s&X!_{+Tleyq=(n-LL>8-Hmj+Fg1~ zyWHdcak@ZC@z(@GEJ5t?^lG@@f9JN>G9|l}wD6TOC%hiv=teeJg(#DdX_&cU?R^LO5=E?iq=fQVrPn-C5Y z;&3Aw`SnFPf`oAiZOn~Z=yr0h-ewb&%eLzo9peEDfm?Z#=F z7+Ntx0Yqxx!MVoyX5Y6twHck;fW8wi`9gUwRQDIX8N4}*IKoRX6%oS<2I3(T_6NPR zEl6LM(s>shlP0joo*?}Z*3{4!{3Z1u~3s!!;OOaz>{hpch_~ zz_l$^@_81*X;wS#lXW;}RF=q>Dw-u>gYI=-CXjUkD$~`bKKQV&`?f0RUn2tsM3nsp zE|7y><5T?G-<2Qg-$55m0oyzBAgl|ajCA#D#L?#u%Ho;j&$|WLJNCaC$?|T-os!KATMPMar(vA z!x4VpjrrTjAC}EMP9`Piu)?u5Qe+BXSLQ=@~!2Ez6sK9HFiTgh!$VHmSU` z;(-@t_G>*T5BjygYr%Zw+~#jcK5dpoy9;~=FJx?! z*;COazGVc_qPfYfV>UV#{;ynF7=G05IJ)iH$9HsWSn%aphhMk?*xsD zR385`5sZqz6f;n?uX%e`wxI_`Q~p%E9->>hNCiU$n8t8WQALQ&x`rKVa9g*2b!EgU z=&Vzf02a;rCBXO_ySSmqLc<%8kfW(19R7QP3j$=ejz(GFp_ps_VqG~KJfbOvw}o63 zxkh3IlOlBEe6XgIaS3jvp_hm0uG_Bh%m{fprBPkSVdC&;e7b+9v%~F*m*rMgD78kh z{-NZR>)qiq_i#8#BLDL~i`{SyWl&x6mulXnieUELNpHgiIe|E;go6PxlD^W`4S5x z+fsYC1D=u3c3<U_ z-l$Mz<*}!U>R?n0v|$^*mi;2-?c%iWRIR<|6Pebh+*+9Qvh^m*zSVy`W3eudF=6w^ z!Q3>p-V;XkowxIP;DFYvm4JEANC%y&G0&1x&b$FewY+4zeD^Dq)yZN?4F4;`3#qjequzdP> zSD~UD0sT4GmFtch3(?y6HQPzQn%XHlq#bw81sFBMwCt0^ld$_3aD<<4tLz%cy&lJS= zcQ9o-4=>!`GTpMc$eIqDz(2d%`Im&m%#D28G5^z~M>%e>Y$`SiaWcLp{%0vMK;(VW zj#93m+T_=2J^Quk^mCmZy!&;o`=Vy=z}Cfr&az}D(A?(WwBA(4|nw~c?B?Fj4F1k;U>@NVVvg8)8w6GT|}65&b{Xo$gdjg z?!Ips!^_-*V`(U_Jxm}`gii859rXWKMQ|f06{z&k^HKESBG6+s@mvD&DMi;s5>c&@ z-iO0}c*BJq;{CXiareIQqV?gohl`3G3B_AZb1yLcdnU2q*7e<&m{t%0Xr8d-rNesL z>?4Vbu*9d+QN*N@<-Rw;3GTNF)(`!gs=LBis7eps-^tXi6Coqgw7Oxsfc03mAwwD! zbc|lC9*yvpQJlt<|Eb?NZHaC_(gcnQU-N@MU8a?YN13CqI`n}#>iXe%l`0L-;TXF_x)k7vHa-R0kNyC6*ixJLBRowxzx*U!&X%R1R z=D<|kZa8luuN(FpgF(vf(^1);Ci^5ZUc+Xlp2JFoM_pkf;_nCBeA7QXtry>2Au2A&*Hf#!SPK~h9&l((MO0!t>4s_XGs|D2a6>S>ukT3|__~FL{OwdCsC`f0YKS~9#-DTv)Gzg&;{?7UG zw`vxsKA3)uEBVn@%9kN(?ne8zpTn;TO;H?UC|KaMrzEJRu?2 zdu9Z)YSxw~gLX4;wh|X*Un9zDAI(*>32GeCC4NJmqIkI_N@I)(2_(bGO@+IiCu;*W z7sf`B>>@4ICH`IVzPC37$W0@<>MhcCV^0Gg<;V|3PxXX6tXC!8>pD`?RXKhI`Qbj- z*_V|iata1ZQvv6*nW$Qy5FEi?DL)?;mm!#vwXakY{y7vF7IkF+xP2do)JY)R&$hBr zrWbnQZv!dyIgRg#>XT+%HRp8@359u z#E!(bxpwf-eI)QKh`tC1`$tt<%}bK7>5Lghs-ThKz*u&1>V5h%21S&IO$LfKwFx@+ z%en15KOX+tOX#8wo0k{w&q46z%$Jx!{i@0J6F5_|4u^ngWXPb7q|d$Q!CP5vbFA}V zvCI$2{}h)0E^L(z?n}PJ$P-#+#m0#wkb8aH2?}>8)M(=v{w~HvfBs45 zpN2X?v=y9i$0b^afuK|(QCskTveXQpo;wQIlH;d+{X51LPr-1knw0)H+4ftI4p zfM{*&|2pLV%^~Sh#&%EJdeX1FUYyYr;xUfGX}2jhS=+uAG2!VmzT4QBRq}6$+snzPp~W zfP4^yMcUi3npL{4nc#3Zbr&9STT^x6KL7SlAPh+acyE}efdTxrA227=p(pC2>{g)H z>y)E*Jk#^KgY zkP%&GBdW0j(29w3eJT*u>2jWkI=))e}uXse=RY;x87ql2@(=h=haCUBZE$#S>lLpmFiqbU?0#lzj28E1q6Gwlj&osmTc zWSG{;C3{tsm51UxJwOP14h^kp~jfs$cK$(G7&qNpWA}6r}fI18C zY791krQn1J3H1GS-9ekJ!s%Pa$t~pW&*A}-VCu!a@y@bOAnHM(R1xWV3)9DTx2O2S zo0Z-|DgBIaLmhXtV8qkPekL4@viDc(D`GR%W$>7#`eTS*soY5V^NP0ir534`DUX}E z#*)s^whb(NZa9c}*;u$gb1%rQX^>F8@B+L#Sn@U!z>60hKLKjo7&u39JpS@HA#zkU z9P_T@6m+y53Uj+~=T~(UQ9HbzAEut#_TZkS23-{iYSGZFRdTXNNRDKW>>I0|Owx)? zwfrGiR9juQE8cQDTOScN;1`T(L|kVVl#QcjNgJwet%SY^Gm!}C%##HvDYTlWcMHD| z8BrKaV>sttXGQ3V(um_3jc)eMm~QGKMoKF54G*yLD>;krfj2KFmXh4Ac^*~4`+MB} z3N)53JKpBaS{Qm?y6}3YsrTT{81Uc989C^EmX|{ZRIdZ`3lkhW5+|Y&Bsw?hS2lJH zKHC_O((0O|DSelSPQsHkJrYWPbU?@A9YGpxM0`UaQ?IZ=DfN$u8*Gc)Twhe^nvJi_ z1!20U{I<2fu3Bo}j{h&=i?DGaKWSn3NK$VKT?WoL1k_l}^7p3+2JgAv&QJUVU$L`0 zjWdNBy?rH2bS{70eFMG#8hIh%Qi>BSkT}H4#&Lvh*xwns!us{)52SYyX%QG&oL)%n|Du_j}=#)~?#sy+)n^)U` z#-@3a2Y^>Dj`H+dP~k_nVcp8FkZ0uf9&62qe1-!@Pc(@)01e@6L)u&!cm`Xo7j zUj-CesC~ZYvF#+OC1G~>gp~_5fJ~9&Ni3Q>Fwkk=*Nw+W(qxalxGHopVWT@*UClW^ zYTrZs%Mn~>cSbrK^zr=b(b`ZZ^C})4Q38%gO~k8~iZJ60=G7&ZtL<}rbS?}j0py+m zO8ehRUB-bPmr7|@jia8uiF@VUhTwfe^YynG{TZunV9Bs~K4G{9so-g3{m`=|F>UDO zP=2?g{rHNr5pewL^&7{S81O?+>U8v{Jo6Q(F0jIvzOmjCuI ze`P)zeqVE|S{bmFI*HO*mf>rCsJ%UZ_;-`(dy;v#Y$PVx5OAd##**Y6#&q^2d!M>9 z6qV0YMdjtrZ#MM#Nv;$DF8^((Qv~WWcLG)V`ou*}3zDI*b3)lSAh8j{md*2G? zG;W4A;hsFR!$o#Bn*_7>Q8qJK9z~k_>a~qT!YFDj;4K`$WzVse@BK6mucqUk<%S07 z#1E-vHZWcq_`eAk7-lGV+<#r%r?qu1yw-y2rK!1WkybaMO)Q~#FT{8#NdkD%2 z)V=mWw%M+nF9Iw0*SiwIwOFC^6m1leFiQ(Rfukjmu}(XMG|&lew|tSzLpGtCNyN<& zp7^aac!~?acsSAAt+dAPa>^HDU1-lc5FZFC*M^RJ;)aME z8D4-DmH87)d<=|OLv|}oHy;qNE3N@so zBT>N`ngEjgCq@oeV6gkBd)7$&2QL2-Bkp(gUwrX|C~yn++!t^=OyjaIbnFL3*a(FO z5CJh^BpG;FuW{VNf%%=M>B#*m8IXW5e z%Bb>Lq^_m41)& z-C&kIjN_Mt3g14mKSOg3_3e2w1emV>^{XH)?#iBbFD>tv5IF0<>t~EqMy$2R_T++$ z&qd;7hutJ=dz3J;ZMn$v;gkLNn5roheCxxxHBcgN>$KNlWjp9EEsx)oV!vsMP3t3W zWUATw2k7s)PBFGqmLMP1WB!9N=Ij|F2 zjRT(sEUQN14=fs##XTy+>(npTa6;iNs=iL(Biiu_X)REYiCZU{Av5?JpwPh>{sa*@ znGq)*v6c97ShF-)bgFLLW6HQ#FsUI!ggyeNhb6|Y&+&C&dFjk7zDAnZq3&8*suzCQ zMe{oQw9=qtzHtnM`AMDb$gq<IGU+|r+ znT|j4yz`jXZ&-9FX&sd~`NM`X<8r83%aKKWtEH_Th%ffwpHpA7aqWohjFe%?Fy&9A zrlrlb-ilBxhi{(^%*6NfO@0eFX-#Ii==+PzXV~_8%>62|uOwd!@H(sCcK)6+l3x7Y z*yEuQ)E1n9LXB8v-!(sM*JgShT$=s2Ltq)KTP32=%IQyBaSk;4z0grB8>>HmKq-6Ol| z`)^IA>)Ht^)`+ib)1-kr@5Fx-cF&|*d3ihGipf0YP$`8sXyJDrnn9)zDtd6&keH{$ zDmE*gVc+1_VT40j$4bE&x@2Q;5VAz%Q9E(#@O*?ZEFeIXoExCeB3+Fdvk^)x`0N@~ zjnOL$gHciWAM&BntXQk2vqqVqs{OSH|94pgjDRBbidKHle9Ov z<3h{WblQ#DG2aP^6sJjsZ^2AE5^uiLC89UES6(`6hW)3tr`Km}0`vKyO_K*G3oaqu zRtg+3_8zVMe0QvU<^^LfFAfnj*(Z-KoEgWdbMKWi>(DkfK&>;v$NxNf28XS`fyFj$ zZvHk0w5>5yS*iqjvXk?o5E+J&EUH*B)txAU6wMY`w1s6mxDtyf;pL2*O5s6De06wR zc(*vp)!Ar1J?D8luPT^E;4^n7%wFs-Y`j!v`ET-7-V?`}50>HQS|1e$0U~6~bv+k* z<}WV#h*sNlch6juZmN3|b?qZk21dd?{*;$0(OJ923?~)oHc0A4gSn}-kk)RX9&ZU| z3;%$M5!|+ViR4c9<}uPIm<^wFe9Y%m>P-d6eFp=yzLj3D!r)Tvz1`>QPx8wq@uqN)0T zdGBubZ;*5vCe3GjUdX48r;bs zJ9up9#p$?6iX!Ep^xJ}<9R)P7Hy&C|-hwi(*p?9I| zZc*up`?$|-7^!;18Hdc$YcPRn+DNe;idKpdxxqoB3?3yC0Y2@YD9Xi?30AzXmh@5NaXxyJhWX#tW=-9vN+rRTl> zV%v<;n4Mxx2^Mp(+&vyXof{5()z3Da|JN-eTEFDr`y=$>KJi}X=H$DF?;|_JN|rTjecv*}7op3$=9 zDpxWM?!4Xmn;)h7L?(amWdXBzW9{@Wh!+03l3ULomW2s5B zFNkdpE^~*S;DQsMA_49&@o<$XVPM7di%>pQk0ce?gtf~$$+Pwa2FzVYIXt{)`)_J2 zTT&?}lQ6$4RmQbg9qtcUg^%~*8w1)yD14ARnZgbXXi$fJL~N~hk=}fZ9s$exwD3ck z@v%58zl4l|R@N9Ph|#0du+bzmU^}d@a%ritn z*$Dx5IJ!YaQ;Wmq_h!z?5f+Od(sm|8Lfg<0+i>&Hn9u*35ver@SDL%;dwAEFO!&kw zh@jTWb`spJohj_zDvgA2zg#D~H~9wZinZr4%UvoM-iz*(-HCYlcE(^}G%~V6jdVH@ zu?0J?6_`P8Lg6v5{p2r-`=nY~Aa`G3rt6B;88Kp`MzY{6e0+i;ZJr;Ugb~SxsMQZf zrH5bwP8v6OIs2N}sv4GLv0%0wapX%li#Hj;Il*AbQcL+nO|2(2KIK5oTz+ixKU>Hq zSrL`>V>sUlEpmCJRucDmUJSYIj)o0bh$jgzykg*#Tj?*MgG9*OVU*x;tM5twvNzaC zocjn5S79f?x2Gmc78S@H#W#{nEgh?)kmY+nH<0%TkwJNGD7z;>CN^4RJt?v7nX?~t zJFi77`vl#S+X3m%_b+n*i?XbjY#hG`XVIThNgY#~oiaqBOvDNY#6WgCtj%#@VGn&& zJ4tWo64!DZcx_e0b~3YN4Rx&obli3n zM=)SH0b||#__$ed!I9}CyrsWW7Y8R3dD6=N%KD`DQJZJ6yyOM685Ec(D&DSNsT>#u z=%-_@ixAa$Jw3nsiY)z1HM2|-{EusR{(LFBt^Ii?R<|=UthLB zP9y*|Aak~qiuOMlYl2S$#s3FiXW`!=NhxU*Q5tEf0VGtqn?Xt% zlxAq@?uG%RaR{YbItGw#mf!B#vuF4GzWXn{&vTx0zt??TpKD3r)h|1hJJ^{jd@;y| z=(F^H$v^nwLQeaUW=on-Q@g~$;#(8B7D0xxbh<}U&T(#Y`aL9O7{K>?O39#HdbIvR z>|&DYuuPfMcAMh8XlD7D!(Pv;5XqGCuK~?jbe)4SemRAYz8y1ri%@mFaLe&s5aZU_ zmUGGMOUA}O(pe)~_f(z3KMB6&N+D_Y&IyV2r=T7)5#6Y=H+*!6?@h&*d^r3ACk*@} zdX?%)Xa?)H)q#7PGzxmQIBOpTo(Q{Yl=NX6DF1-Dr7Q)LyZH|3gjc5LRyWi?|| zFeLRmH>R!0rDHQyDuvV#2X3dJG~Kz^p;X{mnjIl%D#3b9_piU38;f80wy2++-` z=++Ab7?wes_1uIJb7FlegKdHQ3@j$4{2*D+ z_DGj2FH69qm@We^yZI@Ygc&_-MqKmCG)wJ6qy%N|TH!PjKo(eHWo{k+v@s2J#)`)) z)`HkYz5$7J`EyvRndwiKKqmu1?3P&>&d(l?+DBFZNC6nhjy599?Pbht+`P_DhnYwC ze?w496o8LzlfOMD_F)|~TXQ|ZfY{oGC(O27EhSuv382d#-dj9i18mN0MGmLjrI(2SpVTf|3%YZ%P4>`=IT zI!^-l$!)e$f|EU+vSq_as2yN&v3rA9J~xAo;@72K+gQPd&$pPUr)* z+on7S8>$A~_8E6)sIV+Ibt&+6QLAzY6tO6O@qrE56MKxpY9wGd1v?C*MHw*PSgMN7 zyHgJo#ZtSGm(N-*(BwkQt`Sqoou-z2z~@%uFj=gOwx%jh%Rf2>x`aQ4whPGX?6rZz z;x0pso%g|E@?SyzCVNY5fs$YlZL<~mhE~8!d^3w@?A)!#1o((iv>jLE>rGKzQER|@ z1OO=8RuN9mdJ)J++afNL8c-0n2~ogXH!~8we81i0oPZIG5W4*-z)Y+OQ{Pb^ zGvL%SqHV~)7UM(l=@Z|qO+Y~F&li2aO@N+fWCTZa8Z=X$0jnZ^MUCGw^+<|NTmpSE zuR?vs&oLSpez7PO4I!0maN_k<|D06!>Ae*bzL>|Kb!+TA@%}Jq#)oYR03LmQTDJij zR0r0XL&sZ9MQZY0i6{D1C*Mjx8*wG@Y3&uY;wVX#H1K($9OngRr3UsRet#d*l6i$X zlL9QA?Uk`E3 zYF#k-?RC@Z*)pU0(@f%zPhcA-QXP4a+RCdFze;Z8R^)>u!V2=ey)&>SViS-g#-=)Z z3UQ2+{$=&cDZ}%{JLjdyEgF_OX#wIiQyZDd<^tqpkOxmM&lKrw(w!^HIR_K-=Q@J| zWx#d>59XgIx(-#B^!u|Y-(}~S5ew;;buWmAt&VDF;jcxOCHhZaCjYG+63@;cZS9TP zrL$*fKL!dcUApf63Y zmwARw0N5nS(d471FKtyUa-ss5K;=G9*7sk@0SYH1t7Tnm-u58TXY>TQDUON^Zb#i< z3^lq3ZM<KHxW1%T zlrlHLGDQH4p6R6KRnfs!E{ zneNDs9c>`TFswFwG1XkUDY{foOP^8 zw~~t)nyk5A;!(BppBLW$QFdA62nb#>{AhK*Kf-e}zDQK!GWhDM`~B^oYyA6*j$8S4 z$D1$8alBynnMB2*huos*7ZT#c%1)gcEv{NdH&R*vALzCI8aMd*RU}S&%HwXuD(Q!? zDbsYVWOESk4q@B(T`B7++tO)QJ5H+I5(1^AZ|D&%lg=gsknq_eAUsbkk`~MJ+pl58n4aa zg@4=izd!^_2-IStlQWO**Zd-cP?7Z1)VLGwb##D-!lt|SWFoiUkQu8z$H_|cQRw1K z^F&75rQzI6GYf>k)ELyD14c+Dgbt6~X90&WR4R4shV1fzHjZ_ThY}BNr=c?M6N_tK zXkn#?ZF22AxR$h7j5+6$!Lm|1Vm-|n)DD49_r-tL34sCK%p;ZhT_AFT5ZYJa%zuz( z9`4!&Yz6lXgMkklj8Zgl#1$$!r#ha4ANO^Wt;bYws=6S1kzlM9=D} zNWlaHu$Z0=G)A`@7$#&Jt=b+e%XktoQtM4q5ZC38j$yU+)}v!Pwx;^0Li}kDhd8=W zq-&!R?ytb_-=Nm;LCk$@eGsO^lX>k4^Ox)0lTKe33BM9G7lU8YzggA?i` z?aW3&0n_bHcg1&Ve<9ST(UBT^0*|iBgOTBWm%JCSt-Sh;h|u=?Q+PogS<69k%*W_m z&EqE%=|tF)CH_|hU>s0^uVirzeu0chuQ|f|Q#!xs`}5|oZRHq4g&>?a-F%Mmp(ua{ z7ICbDHUAWIew)!HbMv)4F8jrcs~!l>hXW4^{&;f4%$_k(Y+P zs(X5PUc5kV?)q|gX-Iy(Vxn%(wSTa!*t6w{+uD~&bWwDh>qVpQOW?2_;HN^OzMwhY z+q@qN=H37_pTDp9vZI9_Puwdgt=pm$^y9*;ITszU&Di4FW8N|;e`^feyu{iQ!il^A zzKU1Q1kA(J`v*AxOv&Frw24ixnM1=62+wx4%2sI~H-UdAo3=zTrb&cP)#PE9n z=CfK(Ai?Wtk3Ma5Azn?n;^pEbSg9*QJ@~ku!ZUg*9WpLyVuaiT(cgpJWv^>;Yf8>8 zY--xtIwt+@hfM6x8PV5j0cnGjx5K? zb5deQ3~<hSK5`iN4cS#vjdSxo)8Ha_=|Xm5x#e`;uH@P-;T-&I^Wj;Z z@|TB%+-u4FDkoqdG%w&J=xz52ePDGV32V(-1%c&VvnQC=RYiG`A#hec^G6g-krDA( z)zEmiTze~wq)x$J>K)VHbx}gXi{`aCJeEp5aCtGyu9;6Ct-CgV4)DR9or*`{81_j$ z)p`k4BpY~Qej@<<7jdUGG)*>p^CwP5xV(V2XeWM^qH4SW%Klaggt>v3Ce$^PG#o%i z{2P)9(fAp*@YPBicmb78MUI0i=jlZ~0CwGt0&U%fZ%n z$!ONP;Q`xhWiLHdFZ_Ty@2B!&HlKSnEMrYhPnO20vIGI>y2rg!5R0Cuz0iX4%=`4% zFIZT^zMEpGw)gj4KXxIPVrNjmA)p)N5y*$cIc~E&!0=+_HpPa2Hy0bW-30S6O)qD_ z-{8=rk zUW@=CJ~B6%VT#BMQC1yPM9+;`^_1Qt6x&}ms>4mzCCR#7^|SDIFdUZZKhplizmW$~ zF|wc0LCiO3$ZM)vpVBl3eulY#-((edOi4H7A{DrtjPU7JP1lK&8NNc^(Q{6IiJ3uM ziH1I1c|coCnt+rIV{+gEXd5yf_-9V_FF%90Qg`R@z$zI(2|(qvsV$_`L%JUfP2^m0 zUg_x>*%b(U0pCgaT)f${77NpOg-K z4PjQ~p>r2;k>T0}!#v<^m2NEkNYZXW+dAww8BuM89y?mg-!N@`rGbHc!wULx6NMVX z+!KY2B)pA}Snu5|rJV$lzge{FWZgujGDdIu++7Z6PZ%mPZbMy5)0`KH&W3yhRfu~( zC(ieyl-u9ydWn9BcDHoFhL>eRdH7E-)(C=2;(xGFrJ7NPb{aW&YkMHWiWln!Hy4dR zKy-J)<_+aD#$3H!%|Vk3o;gEvPBp3%Rw`Ud9pa}h(4KXy{<=a0w)SdDWetGruS0eX z>k{4Wyx$6oE6Wod8FprAbND$B;Q6;M_*C{-Q-pyrlQXi?C1!FZ44HQ}`EIVgVcL$7 z1EX)EDBqusV=oJ%v3ORwkWeN=?{>3zX*Y(k&R(ivb%XWOUjy$IL zfx1nqee{yu^4w@cRjE@vOiTy*F0dCQ)aEPLsNXzvj_mqbRFT{s8Q$J^ zR2NnrD@iG1-xcT%;j=*zK62;#bvyw$B$sULc&ndDlH9Vx#(+vW{*u-Qq_ux6IF?)* zgCjjf4DnCS&QcB_`rXhA6$`|zl6+>>E{I-10dSE2sZ^jX_aUVl--{S$&7;FlQ0}5i zT;_|@Hx@=jaN}h$q^PYRI~QD}^dn%}HgG@W7bVfw159FZViG4yI_Pyy#7%;Orp_6D zUPi8C)>R;SW;HR!?b)%9nsfL>Zv@xH&-s=Gc4g6dL;t>Ue7l+0TH7lUthU@+Qr5L< zQ^iXdH;;TV2`^{#O`m#(G%sUG2N`;W9*#X+B{;55l-IDm6P*G+LWaQdg*wsXn}s1N z=SF8W;})@t-{1elaYnKiMBTM8qh#6R<%2ivrep4z(No+w`!tZqM@yFeY!ki7OR9R0!hH{YQv+4g{--|DeTRBLXWw z?!KfC)P}|jfVNYWsEf<-v+CM@pS#p}1>g&7_01!0oD&2~9=slMP=IfWhN|o09b`~X z)>imQ{8Bi74}J?4rNiN<3Wc|=82VqpL`lP zGJy4hgYc_v^a zL-69FlaUyRHs!1EtUOzIy(2Eue?SI2rL)9dZkAn4R?BQ-+3gyB#)`XOoSv8v7~9RT z%3_;mouFgX_PpH2+nig5k?~nj2?A)NYLccFo#$N3o@B4z}~W1?|>Y-?82p1x?B zqmW5Yu!wdr;56Zms(XRGm3ZeSYp002K+3c5DQnP~D3)7==(^)+u0;X^0Ob2EXk=9o zK|;MRgLo?(KQ-p=z^J$UJ#{8AlLoGs)$NKUs&&_55}c?0CL)*C7wS)BoR23ydY>t0 zkMqX3^25^;w}XSs8dj6&gkaCB5A+RY$r{~f+Os{ce<_@&VYuJ`mN=ptW#w&7cDkXr zXQ>4cTDBh3yqK!~@#CTEEI1m(R--FmefAs}zNxZ%q=)-epXu?kPrcn*or<0Qrog%2 zbT`%sL4?SNa=AU-A6N@Tv*GDB6ehmf2`?jV*V2;B*K~Jbc@QVEl~$7}3rJ#k(b_yc ze(9g=yB%Hb2J-+K=KPo`vREnI{2W8LQq#Lo z&9fv^txf?9NNI!X!zEVhrJ1K7NwOQQ`FQ|PY++o&?NdC#KTpZJ+}B9j@qnH3*1w%^h^?VQlO!TC5( z*y`aCG9t75ULe^8?|jek_msLN;$GCzR&bxYKzV_#?0o*8-<|;kE}2mVe2&_+FyOGN zBywf98rRphboG*k@ag_~hQk8f@L0P7 znl!#|ziaK(3O)KR*|jp)6^=ti{E|6@gSHG!I-p;0> z#}!o8e5!>!sDjpS3UIU4rbikJFc@>%EI(q##--hwcdA@}L~}I{@U3X7**@P8eck5l zDp0v+Q?Ue#yl$w2KJ!6>aO?SuMb0^K)r0TKNv8C^Fj^hkT`t^b4aCEVW-)> z;iETL_Hi>%c;RQXJw`I%MF-ysOw8jC!!SKscruYY-R@c1@##apBI zLyhFnI8D|Q-=MJsDPb84dXly;s<`;VCrHs4pbHC#_Dd}?61SQT$U(LVphEcffeijh z8saFzC-F`N*!t+k*2r~+U;zcQwYtC%gi<{5A2b+PTv#Cg?q5@;D6Q(^=PJ8r03zB{ z(hT-rw6|}+)Ddr!uobaG(*VYg*sFt#NwaD(uPK`j&jiOGXc2Bc9KF;|a93QJGV@K0 z$$}tRc?CyR zc_dT|>dj^IC}S<9MsRJ|x~iCxTj0BQg7OHaQB_85WbvmN0t>(UnaPzTk0s2%XfG`f z2XmhWuH*a;m}yUReZo1sJg?h$4g}S_c9W(tp}?YqILw?HMPnrrDe3kJpfG^*Y-+gR zxfT7k2=?UR81ejkJu|n9WX`#gWY-k-9zoWuk~5~GbS=0DM4*{}D}qN^6)TZ6jtz?Q zYG0avcl_qmj0cLrtKTAdT8hYz6rD|;-jdbKQkmd$wNS=p-#0H&uv#vEQylTNQ>MBMIf;nrnFfmL@a)qR^)KMvBGb135MQZ`kleYlqw$}sd}N# z=^$8ew~6*EOuFTAAlx%L=P^|>=GT+{7*+jm`|l}84Z^vaCs!3c`bARlAx+GLAUj_Rt_+_QGBL9m2A zZoewZB($miM(8sxei;shD0hkl0sEfoFewi#*X1UKY9oemzfK{BK_o}c#=328aq(@2 z=~aK}r`|9GmdA5|Pi!#)Jv2Y?JOzYI5utBG{Fd$S{a%K0B~G+uiB8?3M|zyoqqH4g z^|vaj&I3rzsM|(+5?1;Bm&`M|EmWA0h_(%49Rr~9UG8+J9RSb8Gt^C3=uVi^$ToZb z!@|tkAFc_B}eh0QfGXrZ{vrrvv>H z`QGsJ?@)RGRs8@Z*S%Lc?W04o#{c_IjzHXM=&Zez4yxtym){d61ry96wTzK>-6! zL=K@Dwe)r`dB*i)r)f|c0_faTIwMV}1IzoLvGHBJUp{!m{gU5%1ar@reebx*X-K69 z=PCC|u*@<(zamfnvr;EhOj6@RIYzIpnB?AMWeTPqJ~~g&jzq-={g1Qq)rdO$0p@I9}8!2jUV>ck^LeJ8v29 z{16Os-vcW3H4{Gbz_5m2JKa2DsNR2Yr-!dvY>5E3N+z=Z+Y2y!iAT@P5M7Q9JhhtN zn}H|T3EtyLL|$^866n1;%|zTCJYZgvC7NmF?Ibgm%X&#Ka~Y72V}DuEyLy^3Y2rjkrnvAYup^p9p|n~={TgGZNwu4D3YJy7*j_9AZfwYI@- z#=0$RG+rg5UvNA`C#GA^mt|YddnLh2EhCF=IM#A~7`7N$5a6}H4vx)^(4T;-9sad} zQvb;nuW}d(_08%YAJ*3~XVce${-{w~Cofrrha#J4JxZ>q(fohbVUF{#u$2jF^FgFD zy^`7m!))dY)<3q%1$<1Wfkh$Ni<`d-;EJY;iQhF8dZ3F|`!nilC{u$e-!zbXXnOt8 zkSj{k2AUN(d>W78uwdW2WD~6>;+4AIFjf1U$#u;;*!a}>xC4YF82z@pQ(yVQD71@w z=l96ayLBhS5e(s$=fwgO5Mb?-fp+%@koD_O*+@^id!{t>=pv$`{r<#D}Ojr&HQKW<&^7Zj50_tVHERH%H!(Q|J{{b`m5;M;P;O9a`A) z0jJ<&UM5+=#io1hb@r0rqfs{gJC6QJ*KtOSQ)sL1$N-e9kGQ?5+V4Kkd)S-?dg@x= zupUj(kfB})B_KLF7T~=nTZgx#b@rZ@@@Mo4o@+4P(g$pgoC!Xp1q73*5k%|9rCcf+p5s@GF z4Q^UIo{?Y2&>; z$(1T-7AZKRD>PDzt?EPYt~&lY&vhB`rM6XZ*UOiEJ*2%7s#NiZ4uS1;-MTlViiWU& zhnQ^-R84OeXLSDS`06bKQVjwi+QxZj)tfO%SL)O%SfML&7rjeG*FBEXEwTIxrT{CBm2rs&RFGsDRcb z02uKAv8NxhtY7>k>=$LtkA6J)BEAMGBUK=vWBiRRd31F1Dyr&XP>10{mumz;$SdEc z#L26bihu?Rm$6I@`cz4Hu~W2=FN$wB-kdKx2RFCSA#hEKd^cKsluH1%5dX#v>)SI4 zFDA&LUkgXz&at@rY>kMCjW|ZYz^%6@vly{3Md0%zFTBE`ti9JoDPgw*ovGP3^dO}0 zPbrRj5CFl+#akfOgB!@GM~0Dqn}IeOhIWp*j+4|VLJeKjWil%{eC_3$$8|wgn#Rdy z01_1N3$K@ITYZd9b6>M9)UeYslN{_(Xcaj`VhRM_(ejz!on|a=w^#=+_MSKUTV^2! zYLcXh+J~iUhR$x|)g9g1B_&Tq0OWzLb!4uQQLh8KsmRl_+4B#>{hI6pKVaajoJ#65 zP2V^hY2-nI$$T^uL?dD)ymKyO4PMQ!OiL|vAshV*?oVGE{T=5jz#pPQFc{yP>{=be zXAJ$MQJ;?MK?P5YE0J-+X;p6g1P>4Xafr zZjQEu2_u%j_bj#Uu*hj zPK-tHje6%VU%1U-QB`X5-X*io0G_Yk1sYr8_r%>IDSk?N?iW?BYPHo)Wto`F4~0B1}Zd z2hGI2DyJ6jhNfoYfaR!(2WaG19{7^dAilp!5IsgGj-JU_qk$17PX4M6765j8y)@g@ zk5T~Hb_++L!V;6`Xx)JduHJGkNggnuK=2dm35v4+L1&DP+skYf-bwN4mW~;n?U%fn zt*otSsA&EDLSgCD@B{)cJI2sTGLR8_{;FjBpC~AOfa9TUXRhM*RQ9^;{-6Y{G7wI@ zqS>R_(RBImtU>D1et_i2PG%#|IrOnT-VhJbmvZ*`#G@h3BA1m{I!5nqKV14h;0{?6 zWJ#{*9;z01y31m%2FMi8K#`1pKBu~!Z$Cx;(F?(;SS2Eb}A$X-v_hy{~Lh^K;(mY{At z3Q_3lt0hoO)wfRNs<{3=S!hTKt$jf#N{s?F8sa7 zrB-F-$?SXuyPO z1H4LvFqj3H0LysVIuhgpu~x98rw;#j@R9#vhuyMUPbJp^dXzYUnEjyoEWQ4FOQ`L= zU@8~yP_m!qo;ZR@2MeQH5iE)`PPGZK8HP7Z7fcjnC0UC7^Wf=$t9}5{AH)z?W4#&L zD^wrL?`(3~zt7f*pLjOUfQ`$%!yhow0`0S;E&giZbR-Z%-~$SRf!UtBASflyjF=j7 zo`B$anm8e5*GSN=WB{_f`BFuLe*8&%)YQGr|gxeze@}3d}H@D=)lhXic;VBt-tH*^qxH$GAcwd7597Me)az7sZfhxx^;P9C*3L{ zU-yQW*yE_2?|J#5Sw>8bZHysbvaE9zTzNNv9RPkx6kAX9>s_SZYltn}Vy<2A1w?GN ziOCf=UW08n&hTx9<>Ra~#TMuv3JcT%=>pYqzO)4daF(!ruD;WZ`o&mRwltLnQeXe3 z$P>pZvufxo=|A5uQKO#AN@TzPFhiY}#tHxNc{pGuva-5nidc}9oT#BV;Ra&wT?_>6MrJM(+RIL;sGeJjOb z<7Lj-Rl~et;KD}dZLBoU`hQk3_c1S85Ffjlo3r8F8yMTsyKK_#3v;^XF-f;hkesTQ zPYZlvsA_ByrK|T9=#+4I+*arA1A`nIczdH2qq!+3`9De%l9s^BQn@O>7ty}xmJxS# zQPVZyS7^>Cw)-MUmv8r&h3e$gSg~W^cGsbK=H-eLgbBA1dORiL7|4(WUWf_Y>dtX}9N<_0PF=Tu{gF(6S9ukFL+r6}J}R-!&O8m+Wnp zVxx86Kgz>s)baQ@S;@KpzGzWQwiT=(Uy&D3JH)@uA4%!M%AeI3M==^H$-6uJyw3NV zKmQ|fJ^>04ymxY^L%W1B=JP!$K<$_*Y(GShysBilKTs{x4-^k>U^;_MqtlZ5dS)y? zu$?nhJ(TwxVQIu#Q8`|Z;Hj6sJ-$ZYUoP-AC}4-$`dS%WHz(A$oGMJY0C#l{_UvMG)%hP^HHmuEsW}+j`1)l=T+_+({u{gN$#>e_EDtaHz>47Af65ER zq&}pDSZ$W1V+ocs@7O^dZJC+XoHy34_mqC85iLgz_ctyd|1shcd(7M}tr2-Nq zpwl7DV-I=WgoHkn4sE24FkuMD$7pIZW02P7>DwyoG9bNcyFOUDL*K)po-)+^CX z0I6W+*3YhhV+}%TQqTUB)+iLCdT@~kX%GUDrXHLC6&#aqK!)`4?n5Jj^TTssAW!Du zY1iZ4!~;K6*u&&r?W&-bl>#49_(4fQr@AYu56BA=EAxWL2l+u4>jux7`T0ren!BcQ z-7n4HH(Kn|7F9ZS1MHXMWt(#gX}n*Js=1yXT;)5xeIjB^6b3=fox1&EUZ+BSvyoT6E9&E4f!78@+4 z7n_*jy8R$!*v{crXZKk?90Nfq>|Q^cpF>d^#)^F?x6p+okNp5~=zW4t3w_%S-w2|e z{r7HYG-NB4l4=u#o6G_i)I=E5wKF{RLw?Q>*YGgtLOj|?j6Hp7M-#*gTYP9k8^MDg zJayWcB$E7Q0+IY0cFWKrbDz#e5cTUTuYL*-=wIYMckHaCH(D+&;2$Zoa#tgZ z$|LM|LzQ3C!ck7>#Pj$u|93ySh0p)8GzKrou4z|mJru@KPO^eZ_Ti_)`_;Dm+=)l;`1pDvv=i(-Tl2hJ$mWU%- zB;L{kt@8q&dWEwT|?2|u5u_+%UZlvGP{CxC42QaSkBeHLxjIpbo zbAlL5(o|*f^(YN?OOAO#`6&A%s`~|1McuHgPYn&p`{sAVzvs^P=&_I`1@eo zo%FCc$E|07XMny)Z*uKm)wKi?)O*0UYg!ac_wCD z9@b6GinQMppq=g=y>(1)?cdy2*xJTLa3B;8->%F_aS)-{-ARf&#ml!%tpLloC93!4|VKQ|siL2Gzy072zhU%=pS$w2O%vg`DI7$i?+)&V z?$MNNToL!`(D;#|H}!MfG}r(SIB|yMQ3^HJnhHMgJZL!x-pl#3ifUHOvc_tiY=0$N zd}~)CByfexXw>#*A>=n`nUsGA27m$k13Rwy#p0-ZwrUdkp=p==QKzX&?uTV70MdFc z{s-XWhEX8}*t?DSsY>}m;7BXHTkiUUk5&Eyq&`o?;1gHHm{QJSmQcVQ(9n67;vtz6Ehs%CxbeZ*YL$v5scTC`*ohfi8e+TQj5P)^1 z-Hl=(aRbGOh`~Do#>0D^@EHEFEOLlRmluTi?V<^=jNw7>T5I0~fL)CO{7c|jqXFwJ z;?XM?*hjq-ayFrLOg)^AU|D%>v#$((50f(Xr03eNNDkrbl$2#ttU-09jNYCax07L0 z?~}J;8b0ra2Ga?uF4l0eJq6-4gJx7AlmZawht5$Kksc1D7r^x6YQXb@iYB+JR=~sd z9phTwe(Qo>!hd7O$_vmW!wSflDqBh;DHJCXr1{9|jr({IO zHRd-wf_*e{#kBPu+fY`rd4?MKqc*`k(}#mnbJW@PtOHjjlMG33Q@Q*p?$WK0saqpc zhONoWDX;Iq9!JF>2y`e!$<%A$D!cahB5=vN239F}N&xRFRw*_~n}437zJS2D9Uz)% zf7<6vz*pst(vXAgTJOjn0nuXORy5f`tO!y&8H^~PttfU@R@D%wxn@o)(5) zIW|@3^|$#EY8tNAo2>4ZHqDnmHn{Q=G{LTlFFl=GShjrk{-C3_X9Sh!m)A;NGUGKF z{b-^eT4q|*^Q$lmmZY)(B!ir;omXMRhkeNRw-LipP+gNF4eo0*XUp<62JpZM*!FZ@ zt>d(=3*cKe*|rVkbE`I$i{31vEXEXr4Y>ZAZvW#gJ5|cLxzj8s{E|kR!rY& zP9*Fc50#T*1hIWdd$kQu&eL{ku>DVy&_f-^6@Sj!&c5?X%t&c?%4y;*}CnRD-^q{EfUxK|d7xMOlly~&fBb|_bQ06=$xs61cRPBy=NCuDhgxx{Dq^e4v(9>HEJC{mUS3hUGEaaeQav%!d_VUNaIoLpIM zuYZxqDyfYSF~-FIM)D51%MVl@0Tr>}Wa3eQ8{@(LLSP7u1OB%z0YK9*NBNg?sNe7r zSiE|2bF3B55K1a`@OOr52H+CvSPi_uFK=+)nF8F~ET0GcV(>&p~ccz3+p&cnQDk;X#euC?*z_1ML zGCqYkSb0qYdLh@$1)u1Zz8WdF^6#XrNAOoAhY||CVLWl}xgY=l;89?i^Bu+K(GJ}E zJ@=z4sZJn~pYgCk5H1eP6Ch2GJP)Bv@0_@jg5v{(F_4VvY`{~j(sLQL($N5VH^_?B4&fpIjGq?*g#DtUMlakb|t z7vH;tg5y*v{?xRcSJKYBnE*09YE}rUiwW>ZRm%X|LZBSe`+P%VNvqVLHpLJ584ID{pP5u_H9{lCVUnlMg>(j)Yf? z#q}2z$IV!#<5oUe_2>*3i?bYLUGkKv=6l#=+7QF%)3o_Av=66V@T7F49v&_`jG zmhjaj2JX^janGo-7*KVXT4^U+?uP;0SGj6P$#I$_`xIvt6YR&^sTt&umgJ4Lk?Xc1 zT;^0G>)LU;T*{62v!p3cXk@N>-JPvHMfRkN|q-+yMCssS3xk;CxO<0AU~pPXeh4R2sQmyas#wH&)8;fq(c2k zdaClZXoooA%TXy;9nPQ3*&})8D9po*%yeoG+tm<@Sy9w=;3(2#v+<0-#R<9EB7JLx`Lfq|kn1*&O695SQ{!Pq< z97a;|VRyYe0J%lCS;8t5dVz%F^=`XSp9TZ}Y76jBIIOrB$ih6Z5*4PhK ztzAvbEm22K{-l;*kOAozjI)NuklmSZs~!F2!MQM`0K0v3Zh2N}-%#XopR(r2%ydqt zKDFGIBVT9_f~%43oa>?ND}DZvrxj$T za`^k46+TBp8Pw!P=MvBaD+fAeb5(R$Uj`p#l;XHhmbhy$@n%V0>&d`o_p9_!bfm#{ z#vfvsk(bcPf7ySB*|{(1>yGueQK1!g^4H_`w-Iia7pB)mx*3}9V}wYuA|{e}0DwyI z?JHR=@*{mFQSXId*6j3>TLj6H>=v9lwR2nqhK% z8~Xb;=7}n6om8`~E*9Dybh2&XY#)Z(a7866uyhC2z*@;yamQ?WX~Z{WbpV3|Bq8H+Oah z9alG9N2k*9b(@s@NjzQFm2nS$LsUrq4`P`q)Vvn+U0*S~2|gpBuKb1?kn^9Mn)RjB zD=<#r*;>Q$+iK!_- zYJAgX9}A*N@Vv$2n8^ay@}bdlUR&XjLHK6i@&ODlca`1>aYlQ0w+v0Acmws)_4TkW zh;gS|oXfA&6jCIgV&%!^t({ei>uyUr_~I5=w)@!-=P!lL2WJYY$9Vj0j`x4m_1*te z|Nr~X;~3%Cdls^iy|-*BBeK(xRoNoK$uY8>$coI6afBBgE90OXd+&Kq-lhXw$}StS$zm* zAkYz{e0mKuv)Xuty{(aXb`z_s+9$ z>09qYsgBBNIkksrs8b-&xHkzVS>(%~xq6vq^l1QmDrgaQ7|GN8xyXD#H50LSp^2l* z&8qif1ky53*s49f^s^eHT_2h2bxbN77aNn{*IxG#17RYH2*_!LpzTd<6`->eKll-? zETW#|Q{@5JaK1TH>48IHhMiR)FS>fN!I~C{sY^afk=d2|$&)%k^Bm)ja)QGJ;BZ_2 zV=lJn>Wnen7NwVuGP#4FPR=)fd(GASUkKiI-Ed=TQOBjxOzqH@@AW(fR5U*#$o72< zeUe;K#ma=PzLxUqhYg>6nhX}wp=>q?5HINE$wE70lvWIZB6thOXtxpNlSt-<*NIJQX3c$&}U zcA7c4ZOHX}avt6I+6<^ppZmttsH$cWxR~NPCs=9kKGa9&zIIS5DjY|aTx1D7+|0!0 zBF@?y%u}i&cNqq!|2@WFuYzg#*4MXaxKV9Emxo7I{{juRFBt~-HNL*c; z%#Crnq<8AafG;L9ubKB;Qz6)33%sY+0v^p1L?F(wH&aMzS=6 zKf@Un@U>1jJW+Gon7D&z`%k{!?i)@oDM9G*Cw2DANB9-W=xkkAsrMv%Nc)nK(FII7 z+Whdt@}q69osoQuxM7CT;L%*&bi1HeXl=`pYv_38@uGWPpl3--b)o7;xu!0Xni?s} zCN}+_mljuXGlA1~5*#?E1*JA2l$o){7F(`^prp)$6x>gq5rXW}NI%aRW>9cwYUq(k zOiR1g);}7A)8c3_RDB-Pq4bKv(R~ zpPzTxpNEDXyX_2J{mJe6qkaF9Qqg1nbzaEnl6zjr29B{6_H5+-5##lR8Zj3Gx2u+4b=`WoRX5uB?4~m%mGdQd^}I9el;kVS!1O1 zH|QT2N+3XH&hDv_*434uBoc6(6l4a-ylAtODt8QxT6V-~q@J9wReWaE1uk1)S3p6Z*R z6Jya?{9$*eQ#S&9rJLMjSg5*!iP-8WKL3J9-BHY@qDze3kGxPSWn38XQPgNwv*`Q9 zG3gT4Kx&S>5Sf^B2y~dF6JXT6j}X-gWxCU=%Cxqak3c6zVzgs{*%w8GM+2;QDywc< zqat{0uk04H1khrUMxt+@PREUnHkl(ebXY4OYamVkzQzZd4onQo2`GP=@AKZiqtuVp zoE42nNrdP89{<-7H@(KoAkFBYrhF{~n8StQySH`Dl$#cy;iiK`eBO2Wgv%=U7*Xq) zims*vGb|bh%?|HqhTM^?9cE$OjPt#h6n#zDu+4d{__F2oiOKzoG7?z(_)ylLY&7uj zjkA?}VD~@)DCdu@PSmAs1p3)r-sP&DoZJekKcanztJj}_Nb@|C)ZXxFSJ|%+jPK&< zcnsnsE`6BX)83a^S@jhI*-f{X9;b*gQ0B7z39#pSV12v3Nz*2}t7LZ&Q)35U$$E{t z=7CodKgnL@D=%a;4xR=Vkk0xPqIsqy2333Z5hmcHv$7in#APbGAY0+0T7U&Qubvip zYJ=d%}_nGJ@D=va@2{`tlrt6^5PS)8s&`$ZXp-Z24P$d+ z^l3Y9L7cw3f6Yq^O><6vo3F#H3y4F=tf!M(yq#>~g!W}uRX!6!p2BdSY6_>HK-peW z6uw)~3-AwEQ1;(k&T3C!qH9YIQhgl;R?@1=1#3Vx1X3iIlrt%+zT2i+SmM~XvR9c{ z0Z$adXYePKiZV66vh2C`=o8w{6%fFl)96RyQ69Bl-pVJ+e!aT|Lk4{bd^x;cEJ3-7 z{BKM{pE_Eb_dJmK1ME;24cAIzGP!+DmgfAC4*QF2}DRa^tLJlVPw-5eo zg`JgfbV@H&<+wFB9DgCl$7z+*gjB{j9cmdk98{>~BEJWUAK4y+#wMFYvm;PU~ z0W@$bltK@$yE+6()RqxJkz{>Jbrpx%s>`fx{r0A28nyzH>ho?j+begEbPW90d<+1qmy!A{!(zL=@p8`CY2DtQI>1H{Bvizj+*>p z<1%#Dv%jz96xtv-x533`0`ENTf92jy4RG>^IGoggn#tf9U2!t^N*rbcfttI3nldlR zfizndb9ud@SfR&6CIg_B@!zI7sW7)$iRSP8sAbb5rA^F>j`BNC%6MWwy@7+G?z73{ z(A#`$6?L<*!#9uhh7logPK#^MXbBuu-FxkH~JX!3fwe+u#C?V*=0O|bt<))%CZvz+|!U7ffYIzTOYvtQkp-a(H9}yP!)mbEa zi+o$>@0CD~kL(|uPSBb&;Qis?kn=j2Sb}sxqV#-dPpaJRg?kS9lYTal?-@KAf&Usf zug)5>n~{&bJ~9|tV_vn#n+>lJN{%IUMROX`^4yZ zQVMMxno?rM+wjshP0WgqKC87>JRfrnFw@C0Cugy4Z^?J7CJ<1hJ_#VYgpFxhufl6U z+fPtD=ZWeW%|*^Y>I53~!|fp55;=ssew_W1gZ!aYHzRTDsC*3yz52c^1oJu%UZ)orP2-&I?7N3y#?Rdhj|uPLEwA| z!_kSU&4$16twpi(62)JX-ieJRPm8yZUbGTnwTox4w9i6Vl5w6?XG)lIq<;<|vk3k* z!*B6p|Emh{-(f*fa8 zmNfZkd)Dsw-y!@IufH5~wIiiQYm;dtU-0qrx%_(tibffffq8Z`G-RDY5{Lfz26jjO zwr{b>&#wIK((O(Jr$N%FsQ3jlgNLv*cHkESoVnV?CtdBB?}ykyG$Ympc2FRmQnKQ&YAg+TLVrUYPsqpKfq|47Sp^H==ogGrqb43p=#A=1Y#*^3Q~72L3Uw6oRdw>1 zNSkjHv$e$M5gbf~-`e#0O?WbxjxMt@2FYVx{crte7L##GKWmZ3ktKET3T}UvVW{#E zclc+81ahR*?P%?VAFt%wQA8j{Os&R(+DsZ#+Ac5S|F&Nm_h!)K35%YTa`t5YO_#zp z9}g1x=H!c?9*wdO7axULll7=Wj0S!6PTu*%W_Q%P3QrXGE7L>t|+n z1}r)%OXgEIZAo*u)Jadw6bSonW|}i&AN6?R+Sc;k1D#@4z)XZz%Tl{yUmhF@r3Py& zu_J^EV?u(t0TC)F5tLV_n*IqTE1;5--2=Y`sNKK5`I8a)TNNv8K*|wQ#3zUkTnv!V zZE}-x*@X(g{{FhjyV4!64|s30+uT^<7p6`J2zYE;xyaX*ba6!)h3ZnPG6F6 z_ns)TE7}r}AmlX3-LGw53>s+(8M1tPuS`Om_rq45+P45q63uk{DHD9?*L_4{m<*o= z75qC*hv}~gStfp0p%nNUJwVOju3@L)F)7q#!6oc1>*Y)=ILZR_-yS57($;wTb?L4; zVrgRGF<=+`bZbgUX>0^8%dYr95%UP>-(R?0RiF_*VZe8Gdt(AOPQ0#IDAp+%E}L9^ z=L62~Qu~1!pH*_YwilTK#EN!M8T22>Ups>|J;CpH1|U(=oh0Swn4oztRf$to0#8t> ze-RRL(FCjaB7%1AybOiU{=f?oQ#mm_(>A|cx$ht$sAr#U^XDc`=pFM89@Sn_2y=+6 z3cWGCy|d%-oH{s=g;t&h0>z#cMB%(cm8<1-0o4IaPXlUR3ewn1iY@p{tyvZJZZ*Y} zE&{@&agtg}8lh!^YBWo~$@yeGH(je*_-`Q02i!AF>K%H7A}IEo)68%P-xJi;@22?Z z!xjhn^V7CnRbR_tBt{36Reo6Bc*k4Lf@^=_`(X{z?Y_zRiPfM@$(FOs^}9y@0*rf$ zJJM(i04fg94KB1T1_xErv9u(RmtX+-Od(qFk3G&t@O6q?!Y+SqnMdC4HBu$y!fK8@ z@HN%AkgLm;{+U}2-U0;Obg$Y}8ai9QB09Bw_|&r4Ok4Zti1aB3)=9Id)C~X76~bRt zkXuH!n>w%^Ax(-JFV+H;I}ru^LKX(U!}=BctE7BdXhFhm8M8Y920}mp^LEY#_ex09 zJu*e!kRB6W){69U%^G0un^Y-;1kUFlqJ2&j2`b_QTUA{8U4KnB1ry7aDJQsl!yW9M zBLBwa#a2v?OgN27DL=G6)}I$il=l<$?p=?s`7Sfa=TToyAY-?cx?Az}P3eGP&8@SP zDWcu@8wo=xnPKsfQOFpB;(5+1$SOKO;3T+`10Bn42GgqnleoC405hN1E{AbWp=Gam zU7K*MSZT+-mDzTiahw9p$ulMX2AdzV(w4IXRD5ks?OXf0UNfEN1WG{t2Fl&H^->v| zSMa$_Ry1azjdbw@pxDw)V$9_;PUOvN+u{zrrUItip4**TrN97AY6UKQKdLK;0@u+j zmoFPqme}q1Tzt9+ch~W98GL$g6=sbD!L2I`vRQ*%6QR`Sb+Ukj8w9vm#^hxj6GH7; zXVpIm>8?3R3D|)H~2ZQ;kPyhe` literal 0 HcmV?d00001 From 12b19e31f9f2bd548401d9411ebb4ad840c54acc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 7 Feb 2024 04:40:50 +1100 Subject: [PATCH 0135/1088] Torch 2.2 (#157) * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update save.py * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update save.py * Update save.py * Torch 2.2.0 * Update save.py * mistral swa * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Fix SWA inference * Fix llm_int8_skip_modules * SWA inference * Update save.py * Update save.py * Update pyproject.toml * __version__ * __version__ * Update save.py * Update save.py * Update mistral.py --- pyproject.toml | 51 +++++++++++++++++++++++++++++++++++++++- unsloth/__init__.py | 1 - unsloth/models/llama.py | 23 ++++++++++++++---- unsloth/models/loader.py | 2 +- unsloth/save.py | 18 ++++++++++---- 5 files changed, 82 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c7368b276a..5bbcc3560d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,7 @@ classifiers = [ ] [tool.setuptools.dynamic] -version = {attr = "unsloth.__version__"} +version = {attr = "unsloth.models._utils.__version__"} [tool.setuptools] include-package-data = false @@ -62,6 +62,16 @@ cu121onlytorch211 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] +cu118onlytorch220 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu121onlytorch220 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] cu118 = [ "unsloth[huggingface]", "bitsandbytes", @@ -82,6 +92,16 @@ cu121_torch211 = [ "bitsandbytes", "unsloth[cu121onlytorch211]", ] +cu118_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118onlytorch220]", +] +cu121_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch220]", +] kaggle = [ "unsloth[huggingface]", ] @@ -110,6 +130,19 @@ colab_ampere_torch211 = [ "ninja", "flash-attn", ] +colab_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch220]", +] +colab_ampere_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch220]", + "packaging", + "ninja", + "flash-attn", +] cu118_ampere = [ "unsloth[huggingface]", "bitsandbytes", @@ -142,6 +175,22 @@ cu121_ampere_torch211 = [ "ninja", "flash-attn", ] +cu118_ampere_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118onlytorch220]", + "packaging", + "ninja", + "flash-attn", +] +cu121_ampere_torch220 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch220]", + "packaging", + "ninja", + "flash-attn", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c3baac21d4..66c10e28e7 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.1" import os import warnings import importlib diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 40e5e56e19..266d9f2f89 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -171,15 +171,28 @@ def LlamaAttention_fast_forward_inference( Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + # Handle sliding windows + sliding_window = getattr(self.config, "sliding_window", None) + if sliding_window is not None and kv_seq_len > sliding_window: + # From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193 + slicing_tokens = 1 - sliding_window + Knn = Kn[:, :, slicing_tokens:, :]#.contiguous() + Vnn = Vn[:, :, slicing_tokens:, :]#.contiguous() + else: + Knn, Vnn = Kn, Vn + pass + # Grouped query attention if n_groups != 1: - _, _, cached_len, _ = Kn.shape - Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Vnn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + _, _, cached_len, _ = Knn.shape + Knn = Knn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vnn = Vnn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) - else: - Knn, Vnn = Kn, Vn + pass + # else: + # Knn, Vnn = Knn, Vnn + # pass # Attention A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:kv_seq_len]) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 07396313b3..674a5efa6f 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -128,7 +128,7 @@ def from_pretrained( "bnb_4bit_use_double_quant" : True, "llm_int8_enable_fp32_cpu_offload" : False, "llm_int8_has_fp16_weight" : False, - "llm_int8_skip_modules" : "null", + "llm_int8_skip_modules" : None, "llm_int8_threshold" : 6.0, "load_in_4bit" : True, "load_in_8bit" : False, diff --git a/unsloth/save.py b/unsloth/save.py index ae0f97a87e..b45ae99305 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -72,6 +72,7 @@ def print_quantization_methods(): pass + def _merge_lora(layer, name): if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)): @@ -85,9 +86,12 @@ def _merge_lora(layer, name): W = W.to(torch.float32).t() if A is not None: - sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) - W += sAB - if not torch.isfinite(W).all(): + # sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) + # W += sAB + W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) + # if not torch.isfinite(W).all(): + maximum_element = torch.max(W.min().abs(), W.max()) + if not torch.isfinite(maximum_element).item(): raise ValueError(f"Unsloth: Merge failed.\n{name} has some elements = infinity.") pass W = W.t().to(dtype) @@ -373,7 +377,7 @@ def unsloth_save_model( # elif (max_ram - W.nbytes) > 0: # # Save to CPU memory # logger.warning_once(f"We will save to RAM and not VRAM now.") - # state_dict[name] = W.to("cpu", non_blocking = True) + # state_dict[name] = W.to("cpu", non_blocking = True, copy = True) # max_ram = max(max_ram - W.nbytes, 0) else: # Save to Disk @@ -579,9 +583,11 @@ def save_to_gguf( f"--outfile {final_location} "\ f"--outtype {first_conversion} --concurrency {n_cpus}" - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: print(line.decode("utf-8"), flush = True, end = "") + if sp.returncode is not None and sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, sp.args) pass # Check if quantization succeeded! @@ -609,6 +615,8 @@ def save_to_gguf( with subprocess.Popen(command, shell = True, stderr = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stderr: print(line.decode("utf-8"), flush = True, end = "") + if sp.returncode is not None and sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, sp.args) pass # Check if quantization succeeded! From 5bc38697511fd06af5c53f9172ac4e25c9916e5a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 8 Feb 2024 03:40:28 +1100 Subject: [PATCH 0136/1088] Nightly (#161) * Update fast_lora.py * Update utils.py * Update llama.py * Update fast_lora.py * Update swiglu.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. * Update llama.py * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update save.py * Update save.py * Torch 2.2.0 * Update save.py * mistral swa * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Fix SWA inference * Fix llm_int8_skip_modules * SWA inference * Update save.py * Update save.py * Update pyproject.toml * __version__ * __version__ * Update save.py * Update save.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 266d9f2f89..d35a35c695 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -183,8 +183,8 @@ def LlamaAttention_fast_forward_inference( pass # Grouped query attention + _, _, cached_len, _ = Knn.shape if n_groups != 1: - _, _, cached_len, _ = Knn.shape Knn = Knn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Vnn = Vnn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) @@ -195,7 +195,7 @@ def LlamaAttention_fast_forward_inference( # pass # Attention - A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:kv_seq_len]) + A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) A *= self.scalar A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch.matmul(A, Vnn, out = Qn) From 7e7f5f355f1031a4dd9a6ad06016670038290d2d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 8 Feb 2024 13:11:54 +1100 Subject: [PATCH 0137/1088] Update README.md (#162) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3e56ec1307..a285df620d 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ All notebooks are **beginner friendly**! Colab provides a free GPU. Kaggle provi | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | -| **Mistral 7b** 2xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster | 60% less | +| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster | 60% less | - This [conversational notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for ShareGPT ChatML datatsets. - Our [raw text notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for text completion. From ba3fa461098d6461d66493cfe4d206eb1f1dfa6b Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 9 Feb 2024 03:51:59 +1100 Subject: [PATCH 0138/1088] Update mapper.py --- unsloth/models/mapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index c8c73dce10..1c90a459b8 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -28,7 +28,7 @@ "meta-llama/Llama-2-7b-hf", ), "unsloth/llama-2-13b-bnb-4bit" : ( - "unsloth/llama-13-7b", + "unsloth/llama-2-13b", "meta-llama/Llama-2-13b-hf", ), "unsloth/codellama-34b-bnb-4bit" : ( From 4a95362cc3ca61e42882361a3b8a1b2ef88f87e5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 9 Feb 2024 15:49:09 +1100 Subject: [PATCH 0139/1088] Update README.md (#164) --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a285df620d..69fb87f110 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,8 @@ ## ✨ Finetune for Free -All notebooks are **beginner friendly**! Colab provides a free GPU. Kaggle provides 30 hours for free per week. +All notebooks are **beginner friendly**! Easily add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF / vLLM / uploaded to HuggingFace. There's optional data prep, training adjustments and more! + | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | @@ -26,10 +27,12 @@ All notebooks are **beginner friendly**! Colab provides a free GPU. Kaggle provi | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | -| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster | 60% less | +| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 60% less | -- This [conversational notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for ShareGPT ChatML datatsets. -- Our [raw text notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for text completion. +- This [conversational notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for ShareGPT ChatML datatsets. +- Our [raw text notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for text completion. +- Colab provides a free GPU sometimes. Kaggle has 30 hrs free per week on a 12 hr running cap. +- \* Kaggle has 2x T4s, but we use 1. Due to 2x GPU overhead, 1 T4 is 5x faster than 2x T4. Apples to apples, Unsloth is 2x faster. ## 🦥 Unsloth.ai News - 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO. From e091bca34ad5df406a693685ea576366d79636f8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 9 Feb 2024 15:59:17 +1100 Subject: [PATCH 0140/1088] Update README.md (#165) --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 69fb87f110..45b7525554 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ ## ✨ Finetune for Free -All notebooks are **beginner friendly**! Easily add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF / vLLM / uploaded to HuggingFace. There's optional data prep, training adjustments and more! +All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| @@ -27,17 +27,17 @@ All notebooks are **beginner friendly**! Easily add your dataset, click "Run All | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | -| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 60% less | +| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | - This [conversational notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for ShareGPT ChatML datatsets. - Our [raw text notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for text completion. - Colab provides a free GPU sometimes. Kaggle has 30 hrs free per week on a 12 hr running cap. -- \* Kaggle has 2x T4s, but we use 1. Due to 2x GPU overhead, 1 T4 is 5x faster than 2x T4. Apples to apples, Unsloth is 2x faster. +- \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. Use Colab as Kaggle takes 10 mins to install. ## 🦥 Unsloth.ai News - 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO. - 📣 [TinyLlama 1.1b](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) on 3T tokens now works. -- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face! We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth). +- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face, and we're in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth). - 📣 Now supports **Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek** and their derived models (**Open Hermes** etc). Llama 7, 13, 70b; CodeLlama 7, 13, 34, 70b; Yi 6, 34b are all supported! - 📣 **Download models 4x faster** from 🤗Hugging Face! Eg: `unsloth/mistral-7b-bnb-4bit` See our [HF collection](https://huggingface.co/collections/unsloth/load-4bit-models-4x-faster-659042e3a41c3cbad582e734) for more! From 7c4620977d968533c65ba3e7a717e2a0f9445d4f Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 13 Feb 2024 08:28:42 +0100 Subject: [PATCH 0141/1088] add HF tagging in unsloth (#170) --- unsloth/models/loader.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 674a5efa6f..f11a34a1b1 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -118,6 +118,10 @@ def from_pretrained( *args, **kwargs, ) + # in case the model supports tagging, add the unsloth tag. + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth"]) + if load_in_4bit: # Fix up bitsandbytes config quantization_config = \ From a030e802030d4619ba247d45c5819fb59e9addb3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 15 Feb 2024 00:07:42 +1100 Subject: [PATCH 0142/1088] Prelim Feb release (#173) * Works? * Update pyproject.toml * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Swiglu * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update swiglu.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * attention_mask * Update llama.py * Update llama.py * labels * Update mistral.py * Update llama.py * attention mask * Update save.py * Update save.py * Update mistral.py * attention mask * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update dpo.py * Patch saving * Update save.py * Update save.py * patch_saving_functions * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * print * Mistral patch * Update mistral.py * Update save.py * saving * Update llama.py * Update llama.py * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update save.py * Update save.py * Torch 2.2.0 * Update save.py * mistral swa * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Fix SWA inference * Fix llm_int8_skip_modules * SWA inference * Update save.py * Update save.py * Update pyproject.toml * __version__ * __version__ * Update save.py * Update save.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Chat Templates * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer * Update chat_templates.py * Saving, LlamaRotaryEmbedding issues * Update llama.py * Update mistral.py --- README.md | 2 +- pyproject.toml | 1 + unsloth/__init__.py | 1 + unsloth/chat_templates.py | 384 ++++++++++++++++++++++++++++++++++++++ unsloth/models/_utils.py | 10 +- unsloth/models/llama.py | 78 +++++++- unsloth/models/mistral.py | 8 + unsloth/save.py | 16 +- 8 files changed, 494 insertions(+), 6 deletions(-) create mode 100644 unsloth/chat_templates.py diff --git a/README.md b/README.md index 45b7525554..8a6c970fb0 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | -- This [conversational notebook](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) is useful for ShareGPT ChatML datatsets. +- This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - Our [raw text notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for text completion. - Colab provides a free GPU sometimes. Kaggle has 30 hrs free per week on a 12 hr running cap. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. Use Colab as Kaggle takes 10 mins to install. diff --git a/pyproject.toml b/pyproject.toml index 5bbcc3560d..2fa2cb103b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ huggingface = [ "peft>=0.7.1", "tqdm", "psutil", + "wheel>=0.42.0", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 66c10e28e7..d052b333f2 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -82,3 +82,4 @@ from .models import * from .save import * +from .chat_templates import * diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py new file mode 100644 index 0000000000..eb61056320 --- /dev/null +++ b/unsloth/chat_templates.py @@ -0,0 +1,384 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "get_chat_template", + "test_chat_templates", +] + +from transformers import StoppingCriteria, StoppingCriteriaList +from torch import LongTensor, FloatTensor +from transformers.models.llama.modeling_llama import logger +from .models._utils import patch_tokenizer + +CHAT_TEMPLATES = {} + +# Unsloth efficient template leverages from Zephyr +unsloth_template = \ + "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{{ messages[0]['content'] + '\n' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% else %}"\ + "{{ 'You are a helpful assistant to the user\n' }}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '>>> User: ' + message['content'] + '\n' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ '>>> Assistant: ' + message['content'] + eos_token + '\n' }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '>>> Assistant: ' }}"\ + "{% endif %}" +unsloth_eos_token = "eos_token" +CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token,) + + +# Zephyr has no BOS! +zephyr_template = \ + "{% for message in messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '<|user|>\n' + message['content'] + eos_token + '\n' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ '<|assistant|>\n' + message['content'] + eos_token + '\n' }}"\ + "{% else %}"\ + "{{ '<|system|>\n' + message['content'] + eos_token + '\n' }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '<|assistant|>\n' }}"\ + "{% endif %}" +zephyr_eos_token = "eos_token" +CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token,) + + +# ChatML has no BOS and not EOS! Rather <|im_start|> and <|im_end|> acts as BOS / EOS. +chatml_template = \ + "{% for message in messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}"\ + "{% else %}"\ + "{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '<|im_start|>assistant\n' }}"\ + "{% endif %}" +chatml_eos_token = "<|im_end|>" +CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token,) + + +# Mistral Instruct doesn't allow system prompts, so we append it to the user message. +mistral_template = \ + "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{% if messages[1]['role'] == 'user' %}"\ + "{{ '[INST] ' + messages[0]['content'] + ' ' + messages[1]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[2:] %}"\ + "{% else %}"\ + "{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% endif %}"\ + "{% else %}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '[INST] ' + message['content'] + ' [/INST]' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ message['content'] + eos_token }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}" +mistral_eos_token = "eos_token" +CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token,) + + +# Adds BOS to every convo! And weird <> system messages. +llama_template = \ + "{% if messages[0]['role'] == 'system' %}"\ + "{% if messages[1]['role'] == 'user' %}"\ + "{{ bos_token + '[INST] <>\n' + messages[0]['content'] + '\n<>\n\n' + messages[1]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[2:] %}"\ + "{% else %}"\ + "{{ bos_token + '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% endif %}"\ + "{% else %}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ ' ' + message['content'].strip() + ' ' + eos_token }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}" +llama_eos_token = "eos_token" +CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token,) + + +# https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template +vicuna_template = \ + "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{{ messages[0]['content'] + ' ' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% else %}"\ + "{{ 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' + ' ' }}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ 'USER: ' + message['content'] + ' ' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ 'ASSISTANT: ' + message['content'] + eos_token }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ 'ASSISTANT:' }}"\ + "{% endif %}" +vicuna_eos_token = "eos_token" +CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token,) + + +# https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template +vicuna_old_template = \ + "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{{ messages[0]['content'] + '\n' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% else %}"\ + "{{ 'A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions.' + '\n' }}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '### Human: ' + message['content'] + '\n' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ '### Assistant: ' + message['content'] + eos_token + '\n' }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '### Assistant:' }}"\ + "{% endif %}" +vicuna_old_eos_token = "eos_token" +CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token,) + + +# https://github.com/tatsu-lab/stanford_alpaca Changed for multi-turn convos +alpaca_template = \ + "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{{ messages[0]['content'] + '\n\n' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% else %}"\ + "{{ 'Below are some instructions that describes some tasks. Write responses that appropriately completes each request.\n\n' }}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '### Instruction:\n' + message['content'] + '\n\n' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ '### Response:\n' + message['content'] + eos_token + '\n\n' }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '### Response:\n' }}"\ + "{% endif %}" +alpaca_eos_token = "eos_token" +CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token,) + + +def get_chat_template( + tokenizer, + chat_template = "chatml", + mapping = {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant"}, + map_eos_token = True, +): + if map_eos_token is False: + assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") + pass + + old_padding_side = tokenizer.padding_side + + if type(chat_template) in (list, tuple): + chat_template, stop_word = chat_template + assert(type(chat_template) is str) + assert(type(stop_word) is str) + + elif type(chat_template) is str: + + chat_template, stop_word = CHAT_TEMPLATES[chat_template] + + if stop_word != "eos_token": + logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") + + # Replaces the old EOS token with a new one. + # Useful for ChatML <|im_end|> for example. + # Usually we train 2 more tokens <|im_start|> and <|im_end|> + # But training the lm_head and embeddings are slow! + # This is a HACK! + # Idea from https://huggingface.co/cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser + string_vocab = tokenizer._tokenizer.to_str() + string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) + new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) + tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + pass + else: + raise TypeError( + f"Unsloth: `chat_template` must be a tuple of (your_template, eos_token,) or one of\n"\ + f"{CHAT_TEMPLATES.keys()}" + ) + pass + + # For ShareGPT role -> from and content -> value + chat_template = chat_template\ + .replace("'role'", "'" + mapping["role"] + "'")\ + .replace("'content'", "'" + mapping["content"] + "'")\ + .replace("'user'", "'" + mapping["user"] + "'")\ + .replace("'assistant'", "'" + mapping["assistant"] + "'") + + _, tokenizer = patch_tokenizer(model = None, tokenizer = tokenizer) + tokenizer.padding_side = old_padding_side + tokenizer.chat_template = chat_template + + #stopping_criteria = create_stopping_criteria(tokenizer, stop_word) + + return tokenizer#, stopping_criteria +pass + + +def create_stopping_criteria(tokenizer, stop_word = "eos_token"): + class StoppingCriteriaSub(StoppingCriteria): + __slots__ = "stop_token", "single_match", "length", + + def __init__(self, stops = "eos_token", device = "cuda", encounters = 1): + super().__init__() + if stops == "eos_token": + self.stop_token = torch.tensor(tokenizer.eos_token_id, device = "cuda") + self.length = 1 + else: + self.stop_token = tokenizer(["\n" + stops], add_special_tokens = False, return_tensors = "pt") + self.stop_token = self.stop_token.input_ids.ravel()[1:].to("cuda") + self.length = self.stop_token.shape[0] + pass + self.single_match = self.length == 1 + pass + + def __call__(self, input_ids: LongTensor, scores: FloatTensor) -> bool: + input_ids = input_ids.ravel() + last_token = input_ids[-1] + if self.single_match and (last_token == self.stop_token): return True + + if input_ids.shape[0] >= self.length and \ + (input_ids[-self.length:] == self.stop_token).all(): return True + return False + pass + pass + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops = stop_word)]) + return stopping_criteria +pass + + +def test_chat_templates(): + messages = [ + {"role": "system","content": " You are a friendly chatbot.",}, + {"role": "user", "content": "What is 2+2?"}, + {"role": "assistant", "content": "It's 4."}, + {"role": "user", "content": " But 2+2 is equal to 5. "}, + {"role": "assistant", "content": "No I'm sure its 4."}, + {"role": "user", "content": " No it's 100% 5! "}, + ] + + from transformers import AutoTokenizer + template = zephyr_template + correct_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") + correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) + + template = chatml_template + correct_tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B") + correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) + + template = mistral_template + correct_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") + correct_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) + + template = llama_template + correct_tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-2-7b-chat") + correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) + + try: + from fastchat.conversation import get_conv_template + except: + os.system("pip -qqq install git+https://github.com/lm-sys/FastChat.git") + from fastchat.conversation import get_conv_template + correct_prompt = get_conv_template("vicuna_v1.1") + for j in range(len(messages)-1): + correct_prompt.append_message(correct_prompt.roles[j%2==1], messages[j+1]["content"]) + correct_prompt.append_message(correct_prompt.roles[1], "") + correct_prompt = tokenizer.bos_token + correct_prompt.get_prompt() + + template = vicuna_template + correct_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5") + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) + + try: + from fastchat.conversation import get_conv_template + except: + os.system("pip -qqq install git+https://github.com/lm-sys/FastChat.git") + from fastchat.conversation import get_conv_template + correct_prompt = get_conv_template("zero_shot") + for j in range(len(messages)-1): + correct_prompt.append_message(correct_prompt.roles[j%2==1], messages[j+1]["content"]) + correct_prompt.append_message(correct_prompt.roles[1], "") + correct_prompt = tokenizer.bos_token + correct_prompt.get_prompt() + + template = vicuna_old_template + correct_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.5") + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + # We add ourselves + assert(correct_prompt == our_prompt.replace("", "")) +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 617b8509d9..c21da4e5a4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -16,6 +16,7 @@ from typing import Union, Optional, List, Any, Callable import warnings warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer @@ -116,21 +117,24 @@ def make_inputs_require_grad(module, input, output): def patch_tokenizer(model, tokenizer): - model.config.update({"unsloth_version" : __version__}) + if model is not None: + model.config.update({"unsloth_version" : __version__}) if not hasattr(tokenizer, "pad_token") or tokenizer.pad_token is None: # Fixes https://github.com/unslothai/unsloth/issues/5 if hasattr(tokenizer, "unk_token"): tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) tokenizer.pad_token = tokenizer.unk_token else: + name = model.config._name_or_path if model is not None else "Model" logger.warning_one( - f"{model.config._name_or_path} does not have a padding or unknown token!\n"\ + f"{name} does not have a padding or unknown token!\n"\ f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." ) assert(hasattr(tokenizer, "eos_token")) tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) tokenizer.pad_token = tokenizer.eos_token - config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) + if model is not None: + config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) pass return model, tokenizer pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d35a35c695..8a37be1b8e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -540,7 +540,7 @@ def LlamaModel_fast_forward( hidden_states = inputs_embeds - if past_key_values is None and self.gradient_checkpointing and self.training: + if past_key_values is None and self.training: use_cache = False # if use_cache: # logger.warning_once( @@ -776,6 +776,73 @@ def PeftModelForCausalLM_fast_forward( pass +# Solves https://github.com/unslothai/unsloth/issues/168 +# Static KV Cache was introduced in 4.38.0, causing training to be much slower. +# Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. +# https://github.com/huggingface/transformers/pull/27931 +# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py +class LlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + pass + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + pass + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + pass +pass + + +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + pass + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + pass +pass + + class FastLlamaModel: @staticmethod @@ -787,6 +854,15 @@ def pre_patch(): LlamaModel .forward = LlamaModel_fast_forward LlamaForCausalLM .forward = LlamaForCausalLM_fast_forward PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + + # Solves https://github.com/unslothai/unsloth/issues/168 + # Static KV Cache was introduced in 4.38.0, causing training to be much slower. + # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. + # https://github.com/huggingface/transformers/pull/27931 + # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = LlamaRotaryEmbedding + transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding = LlamaLinearScalingRotaryEmbedding return pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index bc00e7a982..615e43643d 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -271,6 +271,14 @@ def pre_patch(): MistralModel .forward = LlamaModel_fast_forward MistralForCausalLM .forward = MistralForCausalLM_fast_forward PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward + + # Solves https://github.com/unslothai/unsloth/issues/168 + # Static KV Cache was introduced in 4.38.0, causing training to be much slower. + # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. + # https://github.com/huggingface/transformers/pull/27931 + # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py + import transformers.models.mistral.modeling_mistral + transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding = LlamaRotaryEmbedding return pass diff --git a/unsloth/save.py b/unsloth/save.py index b45ae99305..dc83f27eca 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -41,6 +41,7 @@ "input_layernorm", "post_attention_layernorm", ) +# https://github.com/ggerganov/llama.cpp/blob/master/examples/quantize/quantize.cpp#L19 # From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html ALLOWED_QUANTS = \ { @@ -59,10 +60,16 @@ "q4_0" : "Original quant method, 4-bit.", "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", "q4_k_s" : "Uses Q4_K for all tensors", + "q4_k" : "alias for q4_k_m", + "q5_k" : "alias for q5_k_m", "q5_0" : "Higher accuracy, higher resource usage and slower inference.", "q5_1" : "Even higher accuracy, resource usage and slower inference.", "q5_k_s" : "Uses Q5_K for all tensors", "q6_k" : "Uses Q8_K for all tensors", + "iq2_xxs" : "2.06 bpw quantization", + "iq2_xs" : "2.31 bpw quantization", + "iq3_xxs" : "3.06 bpw quantization", + "q3_k_xs" : "3-bit extra small quantization", } def print_quantization_methods(): @@ -246,7 +253,8 @@ def unsloth_save_model( # If push_to_hub, we must remove the .../ part of a repo if push_to_hub and "/" in save_directory: - new_save_directory = save_directory[save_directory.find("/"):] + # +1 solves absolute path issues + new_save_directory = save_directory[save_directory.find("/")+1:] logger.warning_once( f"Unsloth: You are pushing to hub, but you passed your HF username.\n"\ @@ -861,10 +869,16 @@ def unsloth_save_pretrained_gguf( "q4_0" : "Original quant method, 4-bit.", "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", "q4_k_s" : "Uses Q4_K for all tensors", + "q4_k" : "alias for q4_k_m", + "q5_k" : "alias for q5_k_m", "q5_0" : "Higher accuracy, higher resource usage and slower inference.", "q5_1" : "Even higher accuracy, resource usage and slower inference.", "q5_k_s" : "Uses Q5_K for all tensors", "q6_k" : "Uses Q8_K for all tensors", + "iq2_xxs" : "2.06 bpw quantization", + "iq2_xs" : "2.31 bpw quantization", + "iq3_xxs" : "3.06 bpw quantization", + "q3_k_xs" : "3-bit extra small quantization", """ if tokenizer is None: raise ValueError("Unsloth: Saving to GGUF must have a tokenizer.") From 3e4c5a323c16bbda2c92212b790073c4e99c2a55 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 21 Feb 2024 03:58:59 +1100 Subject: [PATCH 0143/1088] Feb 2024 Release (#187) * Fast inference repatch * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update mistral.py * Update __init__.py * Fix inference * Update mistral.py * fast lm_head * Remove fast path * Update rope_embedding.py * Update loader.py * LlamaAttention_fast_forward_inference * if past_key_value is not None and q_len == 1: * revert inference * Update loader.py * past_key_value * Update llama.py * Update llama.py * Fix SDPA * Update llama.py * padding * Inference * Update llama.py * Revert * Update mistral.py * faster inference * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * inference * Update llama.py * Update utils.py * faster inference * Update llama.py * revert * lm_head * Update llama.py * inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * faster inference * Update llama.py * fast inference * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * torch compile * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * fast inference + saving config.json * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * fast inference again * more temp matrices * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * fast inference * Update mistral.py * Update llama.py * SDPA * attention_mask * New version * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update save.py * Update save.py * Torch 2.2.0 * Update save.py * mistral swa * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Fix SWA inference * Fix llm_int8_skip_modules * SWA inference * Update save.py * Update save.py * Update pyproject.toml * __version__ * __version__ * Update save.py * Update save.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Chat Templates * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer * Update chat_templates.py * Saving, LlamaRotaryEmbedding issues * Update llama.py * Update mistral.py * Update mapper.py * Fix RoPE precision issues * Bugs * saving bugs * Update llama.py * readme * spaces * spaces * globals * slash * slashes * spaces * apache * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * trainer * Update save.py * Update pyproject.toml * install * Update save.py * Update save.py * Update save.py * Update save.py * PeftModel token + saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * linking * llama.cpp bugs * Update save.py * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original --- README.md | 21 ++- pyproject.toml | 31 ++-- unsloth/__init__.py | 33 +++- unsloth/models/_utils.py | 1 + unsloth/models/llama.py | 182 ++++++++++++++++++-- unsloth/models/loader.py | 10 +- unsloth/models/mapper.py | 4 + unsloth/models/mistral.py | 134 +++++++++++++++ unsloth/save.py | 343 ++++++++++++++++++++++++++------------ 9 files changed, 611 insertions(+), 148 deletions(-) diff --git a/README.md b/README.md index 8a6c970fb0..98f83e09c7 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. -- Our [raw text notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is useful for text completion. +- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - Colab provides a free GPU sometimes. Kaggle has 30 hrs free per week on a 12 hr running cap. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. Use Colab as Kaggle takes 10 mins to install. @@ -86,9 +86,12 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ### Conda Installation Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs. ```bash -conda install pytorch torchvision torchaudio pytorch-cuda=<12.1/11.8> -c pytorch -c nvidia +conda create --name unsloth_env python=3.10 +conda activate unsloth_env -conda install xformers -c xformers -y +conda install pytorch cudatoolkit torchvision torchaudio pytorch-cuda=<12.1/11.8> -c pytorch -c nvidia + +conda install xformers -c xformers pip install bitsandbytes @@ -141,6 +144,7 @@ pip install --upgrade pip ``` ## 📜 Documentation +- Go to our [Wiki page](https://github.com/unslothai/unsloth/wiki) for saving to GGUF, checkpointing, evaluation and more! - We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! - We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! @@ -162,7 +166,8 @@ fourbit_models = [ "unsloth/llama-2-13b-bnb-4bit", "unsloth/codellama-34b-bnb-4bit", "unsloth/tinyllama-bnb-4bit", -] +] # Go to https://huggingface.co/unsloth for more 4-bit models! + # Load Llama model model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/mistral-7b-bnb-4bit", # Supports Llama, Mistral - replace this! @@ -183,6 +188,8 @@ model = FastLanguageModel.get_peft_model( use_gradient_checkpointing = True, random_state = 3407, max_seq_length = max_seq_length, + use_rslora = False, # We support rank stabilized LoRA + loftq_config = None, # And LoftQ ) trainer = SFTTrainer( @@ -205,6 +212,12 @@ trainer = SFTTrainer( ), ) trainer.train() + +# Go to https://github.com/unslothai/unsloth/wiki for advanced tips like +# (1) Saving to GGUF / merging to 16bit for vLLM +# (2) Continued training from a saved LoRA adapter +# (3) Adding an evaluation loop / OOMs +# (4) Cutomized chat templates ``` diff --git a/pyproject.toml b/pyproject.toml index 2fa2cb103b..049711276d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ huggingface = [ "tqdm", "psutil", "wheel>=0.42.0", + "numpy", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -83,22 +84,22 @@ cu121 = [ "bitsandbytes", "unsloth[cu121only]", ] -cu118_torch211 = [ +cu118-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118onlytorch211]", ] -cu121_torch211 = [ +cu121-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch211]", ] -cu118_torch220 = [ +cu118-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118onlytorch220]", ] -cu121_torch220 = [ +cu121-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch220]", @@ -112,18 +113,18 @@ conda = [ colab = [ "unsloth[cu121]", ] -colab_ampere = [ +colab-ampere = [ "unsloth[cu121]", "packaging", "ninja", "flash-attn", ] -colab_torch211 = [ +colab-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch211]", ] -colab_ampere_torch211 = [ +colab-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch211]", @@ -131,12 +132,12 @@ colab_ampere_torch211 = [ "ninja", "flash-attn", ] -colab_torch220 = [ +colab-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch220]", ] -colab_ampere_torch220 = [ +colab-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch220]", @@ -144,7 +145,7 @@ colab_ampere_torch220 = [ "ninja", "flash-attn", ] -cu118_ampere = [ +cu118-ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118only]", @@ -152,7 +153,7 @@ cu118_ampere = [ "ninja", "flash-attn", ] -cu121_ampere = [ +cu121-ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121only]", @@ -160,7 +161,7 @@ cu121_ampere = [ "ninja", "flash-attn", ] -cu118_ampere_torch211 = [ +cu118-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118onlytorch211]", @@ -168,7 +169,7 @@ cu118_ampere_torch211 = [ "ninja", "flash-attn", ] -cu121_ampere_torch211 = [ +cu121-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch211]", @@ -176,7 +177,7 @@ cu121_ampere_torch211 = [ "ninja", "flash-attn", ] -cu118_ampere_torch220 = [ +cu118-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118onlytorch220]", @@ -184,7 +185,7 @@ cu118_ampere_torch220 = [ "ninja", "flash-attn", ] -cu121_ampere_torch220 = [ +cu121-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121onlytorch220]", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d052b333f2..9dce29cd83 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -59,14 +59,38 @@ import bitsandbytes as bnb import triton from triton.common.build import libcuda_dirs +import os +import re +import numpy as np +import subprocess + try: cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: warnings.warn( - "Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ + "Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ ) - os.system("ldconfig /usr/lib64-nvidia") + + if os.path.exists("/usr/lib64-nvidia"): + os.system("ldconfig /usr/lib64-nvidia") + elif os.path.exists("/usr/local"): + # Sometimes bitsandbytes cannot be linked properly in Runpod for example + possible_cudas = subprocess.check_output(["ls", "-al", "/usr/local"]).decode("utf-8").split("\n") + find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$") + possible_cudas = [find_cuda.search(x) for x in possible_cudas] + possible_cudas = [x.group(1) for x in possible_cudas if x is not None] + + # Try linking cuda folder, or everything in local + if len(possible_cudas) == 0: + os.system(f"ldconfig /usr/local/") + else: + find_number = re.compile(r"([\d\.]{2,})") + latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] + latest_cuda = possible_cudas[latest_cuda] + os.system(f"ldconfig /usr/local/{latest_cuda}") + pass + importlib.reload(bnb) importlib.reload(triton) try: @@ -75,9 +99,10 @@ cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: - raise ImportError("CUDA is not linked properly.\n"\ + raise ImportError("Unsloth: CUDA is not linked properly.\n"\ "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ - "You need to run in your terminal `ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.") + "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ + "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.") pass from .models import * diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c21da4e5a4..c4de7198b9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -17,6 +17,7 @@ import warnings warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8a37be1b8e..3ca6291fd5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -55,6 +55,7 @@ from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit from peft.tuners.lora import Linear4bit as Peft_Linear4bit from ..save import patch_saving_functions +import re, os, inspect, math, sys def original_apply_qkv(self, X): @@ -782,30 +783,33 @@ def PeftModelForCausalLM_fast_forward( # https://github.com/huggingface/transformers/pull/27931 # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py class LlamaRotaryEmbedding(torch.nn.Module): + # Fixes https://github.com/huggingface/transformers/pull/28837 + # https://github.com/microsoft/DeepSpeed/issues/4932 + # The precision of RoPE buffers is not correct, so we cast to int64. def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() - self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base - inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) - self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache( - seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() - ) + self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): + # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and + # in FP32. They are applied (multiplied) in FP32 as well. self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) + ) + t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() - freqs = torch.outer(t, self.inv_freq) + freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + self.register_buffer("cos_cached", emb.cos().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) pass def forward(self, x, seq_len=None): @@ -823,7 +827,9 @@ def forward(self, x, seq_len=None): class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" - + # Fixes https://github.com/huggingface/transformers/pull/28837 + # https://github.com/microsoft/DeepSpeed/issues/4932 + # The precision of RoPE buffers is not correct, so we cast to int64. def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) @@ -831,14 +837,17 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) + ) + t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() t = t / self.scaling_factor - freqs = torch.outer(t, self.inv_freq) + freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + self.register_buffer("cos_cached", emb.cos().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) pass pass @@ -954,6 +963,125 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass + # Patch Trainer + from transformers.trainer import Trainer + try: + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + inner_training_loop = inspect.getsource(Trainer._inner_training_loop) + Trainer._original_training_loop = inner_training_loop + else: + inner_training_loop = Trainer._original_training_loop + except: + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in inner_training_loop: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] + end = inner_training_loop.find("\n\n", start) + original_debug = inner_training_loop[start:end] + spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] + front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + + debug_info = """debug_info = \\ + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + logger.warning_once(debug_info)""" + + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace(original_debug, debug_info) + + debug_info = """n_total_devices = total_train_batch_size // \\ + args.gradient_accumulation_steps // self._train_batch_size + if n_total_devices > 2: + logger.warning_once( + "Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + debug_info =""" + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) + + front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) + inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = tpu_spmd_dataloader(train_dataloader)", + "raise RuntimeError('Unsloth: TPUs are not yet supported!')" + ) + inner_training_loop = inner_training_loop.replace( + "self.accelerator.free_memory()", + "self.accelerator.free_memory()\n" + \ + front_spaces + "if self.is_deepspeed_enabled:"\ + "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, + ) + + check_batches = """train_dataloader = self.get_train_dataloader() + ga = args.gradient_accumulation_steps + bsz = self._train_batch_size + total_batches = bsz * ga * args.world_size + n_total_devices = total_batches // ga // bsz + if n_total_devices > 2: + logger.warning_once( + "Please consider a commercial license - Unsloth was designed for the GPU Poor.\\n" + "The OSS currently works on 4 GPUs - we're a 2 person team, so please help fund\\n" + "our development costs by supporting us through Ko-fi or buying a license! Thanks!", + ) + divisor = n_total_devices / 2 + bsz = self._train_batch_size = max(int(bsz / divisor), 1) + if total_batches // ga // bsz > 2: + divisor = n_total_devices / 2 + ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" + check_batches = check_batches.split('\n') + check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = self.get_train_dataloader()", + check_batches, 1, + ) + inner_training_loop = inner_training_loop.replace( + "_inner_training_loop", + "_fast_inner_training_loop", 1, + ) + exec(inner_training_loop, globals()) + + Trainer._inner_training_loop = _fast_inner_training_loop + inner_training_loop = inner_training_loop.replace( + "is_torch_tpu_available()", + "False", + ) + if "n_total_devices >" not in inner_training_loop: + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + inner_training_loop = inner_training_loop.replace( + "is_sagemaker_mp_enabled()", + "False", + ) + Trainer._inner_training_loop = _fast_inner_training_loop + # Save max_seq_length model.max_seq_length = max_position_embeddings internal_model = model @@ -1073,7 +1201,7 @@ def get_peft_model( signature = str(inspect.signature(LoraConfig)) SUPPORTS_LOFTQ = "loftq_config" in signature SUPPORTS_RSLORA = "use_rslora" in signature - + assert(max_seq_length <= model.max_seq_length) if lora_dropout != 0: @@ -1200,6 +1328,28 @@ def patch_peft_model( model.peft_config[active_adapter].revision = f"unsloth" pass + from transformers.trainer import Trainer + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + + # Fix loftq issues + # loftq_config must not = None, but rather {} + all_configs = model.peft_config + for key, current_config in all_configs.items(): + if hasattr(current_config, "loftq_config") and current_config.loftq_config is None: + new_args = current_config.__dict__ + new_args["loftq_config"] = {} + current_config = current_config.__class__(**new_args) + all_configs[key] = current_config + pass + pass + # Do patching n_mlp = 0 n_qkv = 0 diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f11a34a1b1..e4b3561deb 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -118,9 +118,13 @@ def from_pretrained( *args, **kwargs, ) - # in case the model supports tagging, add the unsloth tag. + # In case the model supports tagging, add the unsloth tag. if hasattr(model, "add_model_tags"): - model.add_model_tags(["unsloth"]) + model.add_model_tags(["unsloth",]) + pass + if hasattr(tokenizer, "add_model_tags"): + tokenizer.add_model_tags(["unsloth",]) + pass if load_in_4bit: # Fix up bitsandbytes config @@ -143,7 +147,7 @@ def from_pretrained( if is_peft: # Now add PEFT adapters - model = PeftModel.from_pretrained(model, old_model_name) + model = PeftModel.from_pretrained(model, old_model_name, token = token) # Patch it as well! model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 1c90a459b8..323358fff1 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -42,6 +42,10 @@ "unsloth/tinyllama", "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", ), + "unsloth/tinyllama-chat-bnb-4bit" : ( + "unsloth/tinyllama-chat", + "TinyLlama/TinyLlama-1.1B-Chat-v1.0", + ), "unsloth/mistral-7b-instruct-v0.1-bnb-4bit" : ( "mistralai/Mistral-7B-Instruct-v0.1", ), diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 615e43643d..0e36023255 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -368,6 +368,140 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass + # Patch Trainer + from transformers.trainer import Trainer + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + try: + inner_training_loop = inspect.getsource(Trainer._inner_training_loop) + except: + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + pass + + # Patch Trainer + from transformers.trainer import Trainer + try: + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + inner_training_loop = inspect.getsource(Trainer._inner_training_loop) + Trainer._original_training_loop = inner_training_loop + else: + inner_training_loop = Trainer._original_training_loop + except: + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in inner_training_loop: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] + end = inner_training_loop.find("\n\n", start) + original_debug = inner_training_loop[start:end] + spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] + front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + + debug_info = """debug_info = \\ + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + logger.warning_once(debug_info)""" + + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace(original_debug, debug_info) + + debug_info = """n_total_devices = total_train_batch_size // \\ + args.gradient_accumulation_steps // self._train_batch_size + if n_total_devices > 2: + logger.warning_once( + "Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + debug_info =""" + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) + + front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) + inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = tpu_spmd_dataloader(train_dataloader)", + "raise RuntimeError('Unsloth: TPUs are not yet supported!')" + ) + inner_training_loop = inner_training_loop.replace( + "self.accelerator.free_memory()", + "self.accelerator.free_memory()\n" + \ + front_spaces + "if self.is_deepspeed_enabled:"\ + "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, + ) + + check_batches = """train_dataloader = self.get_train_dataloader() + ga = args.gradient_accumulation_steps + bsz = self._train_batch_size + total_batches = bsz * ga * args.world_size + n_total_devices = total_batches // ga // bsz + if n_total_devices > 2: + logger.warning_once( + "Please consider a commercial license - Unsloth was designed for the GPU Poor.\\n" + "The OSS currently works on 4 GPUs - we're a 2 person team, so please help fund\\n" + "our development costs by supporting us through Ko-fi or buying a license! Thanks!", + ) + divisor = n_total_devices / 2 + bsz = self._train_batch_size = max(int(bsz / divisor), 1) + if total_batches // ga // bsz > 2: + divisor = n_total_devices / 2 + ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" + check_batches = check_batches.split('\n') + check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = self.get_train_dataloader()", + check_batches, 1, + ) + inner_training_loop = inner_training_loop.replace( + "_inner_training_loop", + "_fast_inner_training_loop", 1, + ) + exec(inner_training_loop, globals()) + + Trainer._inner_training_loop = _fast_inner_training_loop + inner_training_loop = inner_training_loop.replace( + "is_torch_tpu_available()", + "False", + ) + if "n_total_devices >" not in inner_training_loop: + raise RuntimeError( + "Our OSS was designed for people with few GPU resources to level the playing field.\n" + "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + ) + pass + inner_training_loop = inner_training_loop.replace( + "is_sagemaker_mp_enabled()", + "False", + ) + Trainer._inner_training_loop = _fast_inner_training_loop + # Save max_seq_length max_position_embeddings = max(max_seq_length, model.config.max_position_embeddings) model.max_seq_length = max_position_embeddings diff --git a/unsloth/save.py b/unsloth/save.py index dc83f27eca..83e13bd51c 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -140,17 +140,28 @@ def unsloth_save_model( # Push to hub use_temp_dir : Optional[bool] = None, - commit_message : Optional[str] = None, + commit_message : Optional[str] = "Trained with Unsloth", private : Optional[bool] = None, create_pr : bool = False, revision : str = None, - commit_description : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", tags : List[str] = None, # Our functions temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): + if commit_message is None: commit_message = "" + if "Unsloth" not in commit_message: + commit_message += " (Trained with Unsloth)" + commit_message = commit_message.lstrip() + + if commit_description is None: + commit_description = "Upload model trained with Unsloth 2x faster" + elif "Unsloth 2x faster" not in commit_description: + commit_description += " (Trained with Unsloth 2x faster)" + pass + if save_method == "merged_4bit": raise RuntimeError( "Unsloth: Merging into 4bit will cause your model to lose accuracy if you plan\n"\ @@ -202,7 +213,7 @@ def unsloth_save_model( pass save_pretrained_settings["tags"] = tags - if (save_method == "lora") and push_to_hub: + if ((save_method == "lora") or (save_method == "merged_4bit")) and push_to_hub: if token is None: raise RuntimeError( "Unsloth: Pushing to HF requires a token. Pass `token = 'hf_....'`\n"\ @@ -210,7 +221,20 @@ def unsloth_save_model( ) pass - model.push_to_hub( + if save_method == "lora": + print("Unsloth: Saving LoRA adapters. Please wait...") + elif save_method == "merged_4bit": + print("Unsloth: Saving 4bit Bitsandbytes model. Please wait...") + pass + + # Update model tag + _ = upload_to_huggingface( + model, save_directory, token, + "finetuned", "trl", file_location = None, + old_username = None, private = private, + ) + + model.original_push_to_hub( repo_id = save_directory, use_temp_dir = use_temp_dir, commit_message = commit_message, @@ -224,7 +248,7 @@ def unsloth_save_model( tags = tags, ) if tokenizer is not None: - tokenizer.push_to_hub( + tokenizer.original_push_to_hub( repo_id = save_directory, use_temp_dir = use_temp_dir, commit_message = commit_message, @@ -238,31 +262,11 @@ def unsloth_save_model( tags = tags, ) pass - return save_directory - pass - - # Update model tag - username = "" - if push_to_hub: - username = upload_to_huggingface( - model, save_directory, token, - "finetuned", "trl", file_location = None, - ) - pass - - # If push_to_hub, we must remove the .../ part of a repo - if push_to_hub and "/" in save_directory: - - # +1 solves absolute path issues - new_save_directory = save_directory[save_directory.find("/")+1:] - - logger.warning_once( - f"Unsloth: You are pushing to hub, but you passed your HF username.\n"\ - f"We shall truncate {save_directory} to {new_save_directory}" - ) - save_pretrained_settings["save_directory"] = new_save_directory - save_directory = new_save_directory + if hasattr(model, "config"): + print(f"Saved {save_method} model to https://huggingface.co/" + save_directory) + pass + return save_directory pass # Tokenizer has different saving arguments @@ -292,13 +296,25 @@ def unsloth_save_model( # Do general saving # Edit save_pretrained_settings # [TODO] _create_repo has errors due to **kwargs getting accepted - for deletion in \ - ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",): + # commit_description does not seem to work? + what_to_delete = ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",) \ + if save_pretrained_settings["push_to_hub"] is False else \ + ("use_temp_dir", "create_pr", "revision", "tags", "commit_description",) + for deletion in what_to_delete: del save_pretrained_settings[deletion] pass if hasattr(model, "add_model_tags"): model.add_model_tags(["unsloth",]) + # Update model tag + if push_to_hub: + _ = upload_to_huggingface( + model, save_pretrained_settings["save_directory"], token, + "finetuned", "trl", file_location = None, + old_username = None, private = private, + ) + pass + if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") tokenizer.save_pretrained(**tokenizer_save_settings) @@ -310,10 +326,33 @@ def unsloth_save_model( if save_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") model.save_pretrained(**save_pretrained_settings) + + if push_to_hub and hasattr(model, "config"): + print("Saved to https://huggingface.co/" + save_pretrained_settings["save_directory"]) + pass + print(" Done.") return save_directory pass + # If push_to_hub, we must remove the .../ part of a repo + username = None + if push_to_hub and "/" in save_directory: + + # +1 solves absolute path issues + username = save_directory[:save_directory.find("/")] + new_save_directory = save_directory[save_directory.find("/")+1:] + + logger.warning_once( + f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"\ + f"We shall truncate {save_directory} to {new_save_directory}" + ) + + save_pretrained_settings["save_directory"] = new_save_directory + tokenizer_save_settings ["save_directory"] = new_save_directory + save_directory = new_save_directory + pass + print("Unsloth: Merging 4bit and LoRA weights to 16bit...") # Determine max RAM usage minus sharding @@ -339,7 +378,7 @@ def unsloth_save_model( logger.warning_once( f"Unsloth: You have {n_cpus} CPUs. Using `safe_serialization` is 10x slower.\n"\ f"We shall switch to Pytorch saving, which will take 3 minutes and not 30 minutes.\n"\ - f"To force `safe_serialization`, set it to None instead.", + f"To force `safe_serialization`, set it to `None` instead.", ) safe_serialization = False save_function = fast_save_pickle @@ -413,13 +452,26 @@ def unsloth_save_model( # Edit save_pretrained_settings # [TODO] _create_repo has errors due to **kwargs getting accepted save_pretrained_settings["state_dict"] = state_dict - for deletion in \ - ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",): + + # commit_description does not seem to work? + what_to_delete = ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",) \ + if not push_to_hub else \ + ("use_temp_dir", "create_pr", "revision", "tags", "commit_description",) + for deletion in what_to_delete: del save_pretrained_settings[deletion] pass if hasattr(model, "add_model_tags"): model.add_model_tags(["unsloth",]) + # Update model tag + if push_to_hub: + _ = upload_to_huggingface( + model, save_pretrained_settings["save_directory"], token, + "finetuned", "trl", file_location = None, + old_username = username, private = private, + ) + pass + if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") tokenizer.save_pretrained(**tokenizer_save_settings) @@ -452,9 +504,8 @@ def unsloth_save_model( model.config = old_config print("Done.") - # Print location - if push_to_hub: - print(f"Saved to https://huggingface.co/{username}/{save_directory.lstrip('/')}") + if push_to_hub and hasattr(model, "config"): + print(f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/')}") pass save_pretrained_settings["state_dict"] = None @@ -478,7 +529,7 @@ def unsloth_save_model( for _ in range(3): torch.cuda.empty_cache() gc.collect() - return save_directory + return save_directory, username pass @@ -494,7 +545,7 @@ def install_llama_cpp_make_non_blocking(): n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean os.system("make clean -C llama.cpp") - full_command = ["make", "all", "-j", str(n_jobs), "-C", "llama.cpp"] + full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) return run_installer pass @@ -507,10 +558,44 @@ def install_python_non_blocking(packages = []): pass +def install_llama_cpp_old(version = -10): + # Download the 10th latest release since the latest might be broken! + # FALLBACK mechanism + releases = subprocess.check_output(["git", "ls-remote", "--tags", "https://github.com/ggerganov/llama.cpp.git"]) + releases = releases.decode("utf-8").replace("\t", " ").split("\n") + for i, x in enumerate(releases): + if "refs/tags/b" not in x: break + releases = releases[:i] + latest = releases[-1] + version = releases[version].split(" ")[0] + + # Clone a specific commit + commands = [ + "git clone https://github.com/ggerganov/llama.cpp", + f"cd llama.cpp && git reset --hard {version} && git clean -df && "\ + f"make clean && LLAMA_CUBLAS=1 make all -j{psutil.cpu_count()*2}", + "pip install gguf protobuf", + ] + for command in commands: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + for line in sp.stdout: + print(line.decode("utf-8"), flush = True, end = "") + pass + pass + # Check if successful + if not os.path.exists("llama.cpp/quantize"): + raise RuntimeError( + "Unsloth: llama.cpp GGUF seems to be too buggy to install.\n"\ + "File a report to llama.cpp's main repo since this is not an Unsloth issue." + ) + pass +pass + + def install_llama_cpp_blocking(): commands = [ "git clone https://github.com/ggerganov/llama.cpp", - f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j {psutil.cpu_count()*2}", + f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j{psutil.cpu_count()*2}", "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return @@ -563,10 +648,13 @@ def save_to_gguf( print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") if _run_installer is not None: - _run_installer.wait() + error = _run_installer.wait() else: + error = 0 install_llama_cpp_blocking() pass + # Check if successful. If not install 10th latest release + if error != 0 or not os.path.exists("llama.cpp/quantize"): install_llama_cpp_old(-10) if quantization_method == "f32": first_conversion = "f32" elif quantization_method == "f16": first_conversion = "f16" @@ -580,15 +668,18 @@ def save_to_gguf( first_conversion = "f16" pass pass - print(f"Unsloth: [1] Converting HF into {first_conversion} GGUF format. This will take 3 minutes...") n_cpus = psutil.cpu_count()*2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model final_location = f"./{model_directory}-unsloth.{first_conversion.upper()}.gguf" + print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ + f"The output location will be {final_location}\n"\ + "This will take 3 minutes...") + command = f"python llama.cpp/convert.py {model_directory} "\ - f"--outfile {final_location} "\ + f"--outfile {final_location} --vocab-type hfft "\ f"--outtype {first_conversion} --concurrency {n_cpus}" with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1) as sp: @@ -601,7 +692,8 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): raise RuntimeError( - "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ + f"Unsloth: Quantization failed for {final_location}\n"\ + "You might have to compile llama.cpp yourself, then run this again.\n"\ "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ "git clone https://github.com/ggerganov/llama.cpp\n"\ @@ -662,7 +754,7 @@ def unsloth_save_pretrained_merged( save_peft_format : bool = True, tags : List[str] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", - maximum_memory_usage : float = 0.85, + maximum_memory_usage : float = 0.85, ): """ Same as .save_pretrained(...) except 4bit weights are auto @@ -695,14 +787,14 @@ def unsloth_push_to_hub_merged( tokenizer = None, save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] use_temp_dir : Optional[bool] = None, - commit_message : Optional[str] = None, + commit_message : Optional[str] = "Trained with Unsloth", private : Optional[bool] = None, token : Union[bool, str, None] = None, max_shard_size : Union[int, str, None] = "5GB", create_pr : bool = False, safe_serialization : bool = True, revision : str = None, - commit_description : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", tags : Optional[List[str]] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.85, @@ -760,15 +852,27 @@ def unsloth_push_to_hub_merged( [](https://github.com/unslothai/unsloth) """ -def upload_to_huggingface(model, save_directory, token, method, extra = "", file_location = None): +def upload_to_huggingface( + model, + save_directory, + token, + method, + extra = "", + file_location = None, + old_username = None, + private = None, +): # Check for username username = "" save_directory = save_directory.lstrip("./") if "/" not in save_directory: from huggingface_hub import whoami try: - username = whoami()['name'] - save_directory = f"{save_directory}/{username}" + username = whoami(token = token)["name"] + if type(old_username) is str and username != old_username: + username = old_username + pass + save_directory = f"{username}/{save_directory}" except: raise RuntimeError(f"Unsloth: {save_directory} is not a Huggingface directory.") else: @@ -776,24 +880,28 @@ def upload_to_huggingface(model, save_directory, token, method, extra = "", file pass from huggingface_hub import create_repo - create_repo( - repo_id = save_directory, - token = token, - repo_type = "model", - exist_ok = True, - ) - - # Create model card - from huggingface_hub import ModelCard - content = MODEL_CARD.format( - username = username, - base_model = model.config._name_or_path, - model_type = model.config.model_type, - method = "", - extra = extra, - ) - card = ModelCard(content) - card.push_to_hub(save_directory, token = token) + try: + create_repo( + repo_id = save_directory, + token = token, + repo_type = "model", + exist_ok = False, + private = private, + ) + + # Create model card + from huggingface_hub import ModelCard + content = MODEL_CARD.format( + username = username, + base_model = model.config._name_or_path, + model_type = model.config.model_type, + method = "", + extra = extra, + ) + card = ModelCard(content) + card.push_to_hub(save_directory, token = token) + except: + pass if file_location is not None: # Now upload file @@ -811,6 +919,7 @@ def upload_to_huggingface(model, save_directory, token, method, extra = "", file path_in_repo = uploaded_location, repo_id = save_directory, repo_type = "model", + commit_message = "(Trained with Unsloth)", ) # We also upload a config.json file @@ -823,6 +932,7 @@ def upload_to_huggingface(model, save_directory, token, method, extra = "", file path_in_repo = "config.json", repo_id = save_directory, repo_type = "model", + commit_message = "(Trained with Unsloth)", ) os.remove("_temporary_unsloth_config.json") pass @@ -838,6 +948,7 @@ def unsloth_save_pretrained_gguf( first_conversion : str = "f16", push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, + private : Optional[bool] = None, is_main_process : bool = True, state_dict : Optional[dict] = None, save_function : Callable = torch.save, @@ -847,7 +958,7 @@ def unsloth_save_pretrained_gguf( save_peft_format : bool = True, tags : List[str] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", - maximum_memory_usage : float = 0.85, + maximum_memory_usage : float = 0.85, ): """ Same as .save_pretrained(...) except 4bit weights are auto @@ -898,11 +1009,11 @@ def unsloth_save_pretrained_gguf( python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) + new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() else: try: - new_save_directory = unsloth_save_model(**arguments) + new_save_directory, old_username = unsloth_save_model(**arguments) makefile = None except: # Retry by recloning llama.cpp @@ -910,7 +1021,7 @@ def unsloth_save_pretrained_gguf( python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) + new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass pass @@ -924,12 +1035,12 @@ def unsloth_save_pretrained_gguf( print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( self, save_directory, token, - "GGUF converted", "gguf", file_location, + "GGUF converted", "gguf", file_location, old_username, private, ) link = f"{username}/{new_save_directory.lstrip('/.')}" \ if username not in new_save_directory else \ new_save_directory.lstrip('/.') - print(f"Saved to https://huggingface.co/{link}") + print(f"Saved GGUF to https://huggingface.co/{link}") pass pass @@ -941,14 +1052,14 @@ def unsloth_push_to_hub_gguf( quantization_method : str = "fast_quantized", first_conversion : str = "f16", use_temp_dir : Optional[bool] = None, - commit_message : Optional[str] = None, + commit_message : Optional[str] = "Trained with Unsloth", private : Optional[bool] = None, token : Union[bool, str, None] = None, max_shard_size : Union[int, str, None] = "5GB", create_pr : bool = False, safe_serialization : bool = True, revision : str = None, - commit_description : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", tags : Optional[List[str]] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.85, @@ -998,19 +1109,19 @@ def unsloth_push_to_hub_gguf( python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) + new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() else: try: - new_save_directory = unsloth_save_model(**arguments) + new_save_directory, old_username = unsloth_save_model(**arguments) makefile = None except: # Retry by recloning llama.cpp git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() - new_save_directory = unsloth_save_model(**arguments) + makefile = install_llama_cpp_make_non_blocking() + new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass pass @@ -1023,12 +1134,12 @@ def unsloth_push_to_hub_gguf( print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( self, repo_id, token, - "GGUF converted", "gguf", file_location, + "GGUF converted", "gguf", file_location, old_username, private, ) link = f"{username}/{new_save_directory.lstrip('/.')}" \ if username not in new_save_directory else \ new_save_directory.lstrip('/.') - print(f"Saved to https://huggingface.co/{link}") + print(f"Saved GGUF to https://huggingface.co/{link}") pass @@ -1038,31 +1149,17 @@ def patch_saving_functions(model): import types from typing import Callable, Optional, Union, List - if hasattr(model, "_original_push_to_hub"): return - - # First check if this has already been called, and revert it - original_model = model - while True: - if hasattr(original_model, "_original_push_to_hub"): - original_model.push_to_hub = original_model._original_push_to_hub - del original_model._original_push_to_hub - if hasattr(original_model, "push_to_hub_merged"): del original_model.push_to_hub_merged - if hasattr(original_model, "save_pretrained_merged"): del original_model.save_pretrained_merged - if hasattr(original_model, "push_to_hub_gguf"): del original_model.push_to_hub_gguf - if hasattr(original_model, "save_pretrained_gguf"): del original_model.save_pretrained_gguf - pass - - if hasattr(original_model, "model"): original_model = original_model.model - else: break + # And now re add our saving methods! + if model.push_to_hub.__name__ == "unsloth_push_to_hub": + original_push_to_hub = model.original_push_to_hub + else: + original_push_to_hub = model.push_to_hub pass - # And now re add our saving methods! - original_push_to_hub = model.push_to_hub signature = str(inspect.signature(original_push_to_hub)).replace("NoneType", "None") signature = signature[1:] signature = re.sub("", "torch.save", signature) docs = original_push_to_hub.__doc__.encode("utf-8").decode("utf-8") - model._original_push_to_hub = original_push_to_hub push_to_hub_text = f'''def unsloth_push_to_hub(self, {signature}: """ @@ -1077,11 +1174,45 @@ def patch_saving_functions(model): arguments["tags"] = ["unsloth",] elif hasattr(self, "add_model_tags"): self.add_model_tags(["unsloth",]) + + if "commit_message" in arguments: + commit_message = arguments["commit_message"] + if commit_message is not None: + if not commit_message.endswith(" "): commit_message += " " + if "Unsloth" not in commit_message: + commit_message += "(Trained with Unsloth)" + else: + commit_message = "Upload model trained with Unsloth" + arguments["commit_message"] = commit_message + + if "commit_description" in arguments: + commit_description = arguments["commit_description"] + if commit_description is not None: + if not commit_description.endswith(" "): commit_description += " " + if "Unsloth" not in commit_description: + commit_description += "(Trained with Unsloth 2x faster)" + else: + commit_description = "Upload model trained with Unsloth 2x faster" + arguments["commit_description"] = commit_description + + # Update model tag + if hasattr(self, "config"): + _ = upload_to_huggingface( + self, arguments["repo_id"], arguments["token"], + "finetuned", "trl", file_location = None, + old_username = None, private = arguments["private"], + ) + pass + try: - return self._original_push_to_hub(**arguments) + self.original_push_to_hub(**arguments) except: del arguments["tags"] - return self._original_push_to_hub(**arguments) + self.original_push_to_hub(**arguments) + pass + + if hasattr(self, "config"): + print("Saved model to https://huggingface.co/" + arguments["repo_id"]) pass ''' exec(push_to_hub_text, globals()) @@ -1089,12 +1220,12 @@ def patch_saving_functions(model): original_model = model while True: - if not hasattr(original_model, "_original_push_to_hub"): - original_model._original_push_to_hub = original_model.push_to_hub + if original_model.push_to_hub.__name__ != "unsloth_push_to_hub": + original_model.original_push_to_hub = original_model.push_to_hub original_model.push_to_hub = types.MethodType(unsloth_push_to_hub, original_model) - if hasattr(original_model, "add_model_tags"): original_model.add_model_tags(["unsloth",]) + pass pass if hasattr(original_model, "model"): original_model = original_model.model From f946bed7b3b2f1fbee77838f96e59b9a94494790 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 27 Feb 2024 01:42:10 +1100 Subject: [PATCH 0144/1088] 2.4x faster Gemma (#197) * Update save.py * Update save.py * linking * llama.cpp bugs * Update save.py * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md --- README.md | 40 ++-- pyproject.toml | 2 +- unsloth/chat_templates.py | 77 ++++++- unsloth/kernels/__init__.py | 4 +- unsloth/kernels/cross_entropy_loss.py | 251 ++++++++++++++++------- unsloth/kernels/fast_lora.py | 51 ++--- unsloth/kernels/geglu.py | 104 ++++++++++ unsloth/kernels/rms_layernorm.py | 2 +- unsloth/models/gemma.py | 282 ++++++++++++++++++++++++++ unsloth/models/llama.py | 51 ++++- unsloth/models/loader.py | 13 ++ unsloth/models/mapper.py | 16 ++ unsloth/models/mistral.py | 10 +- unsloth/save.py | 5 +- 14 files changed, 767 insertions(+), 141 deletions(-) create mode 100644 unsloth/kernels/geglu.py create mode 100644 unsloth/models/gemma.py diff --git a/README.md b/README.md index 98f83e09c7..759057f2a4 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Mistral, Llama 2-5x faster with 70% less memory! +### Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -22,28 +22,30 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| +| **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 58% less | | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | | **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 43% less | -| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | +| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. -- Colab provides a free GPU sometimes. Kaggle has 30 hrs free per week on a 12 hr running cap. -- \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. Use Colab as Kaggle takes 10 mins to install. +- \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## 🦥 Unsloth.ai News -- 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO. -- 📣 [TinyLlama 1.1b](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) on 3T tokens now works. -- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face, and we're in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth). -- 📣 Now supports **Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek** and their derived models (**Open Hermes** etc). Llama 7, 13, 70b; CodeLlama 7, 13, 34, 70b; Yi 6, 34b are all supported! -- 📣 **Download models 4x faster** from 🤗Hugging Face! Eg: `unsloth/mistral-7b-bnb-4bit` See our [HF collection](https://huggingface.co/collections/unsloth/load-4bit-models-4x-faster-659042e3a41c3cbad582e734) for more! +- 📣 [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) on 6T tokens now works. And [Gemma 2b notebook](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) +- 📣 Added [conversational notebooks](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) and [raw text notebooks](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) +- 📣 [2x faster inference](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) added for all our models +- 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO +- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face and are in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth) +- 📣 [Download models 4x faster](https://huggingface.co/collections/unsloth/) from 🤗Hugging Face. Eg: `unsloth/mistral-7b-bnb-4bit` ## 🔗 Links and Resources | Type | Links | | ------------------------------- | --------------------------------------- | +| 📚 **Wiki & FAQ** | [Read Our Wiki](https://github.com/unslothai/unsloth/wiki) | | 📜 **Documentation** | [Read The Doc](https://github.com/unslothai/unsloth/tree/main#-documentation) | | 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| @@ -113,8 +115,8 @@ pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ ```bash pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118_ampere] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121_ampere] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-ampere] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-ampere] @ git+https://github.com/unslothai/unsloth.git" ``` 3. For Pytorch 2.1.1: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. ```bash @@ -122,10 +124,10 @@ pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.1 triton \ --index-url https://download.pytorch.org/whl/cu121 ``` ```bash -pip install "unsloth[cu118_torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121_torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-ampere-torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-ampere-torch211] @ git+https://github.com/unslothai/unsloth.git" ``` 4. For Pytorch 2.2.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. ```bash @@ -133,10 +135,10 @@ pip install --upgrade --force-reinstall --no-cache-dir torch==2.2.0 triton \ --index-url https://download.pytorch.org/whl/cu121 ``` ```bash -pip install "unsloth[cu118_torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121_torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118_ampere_torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121_ampere_torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-ampere-torch220] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-ampere-torch220] @ git+https://github.com/unslothai/unsloth.git" ``` 5. If you get errors, try the below first, then go back to step 1: ```bash diff --git a/pyproject.toml b/pyproject.toml index 049711276d..7e8956c712 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "transformers>=4.37.0", + "transformers>=4.38.0", "datasets", "sentencepiece", "accelerate>=0.26.1", diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index eb61056320..6f0237c27b 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -217,6 +217,35 @@ CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token,) +# https://huggingface.co/google/gemma-7b-it +# Notice we must use |trim for lstrip and rstrip. maps to 106. +# maps to 107. user and model are normal 1 word tokens. +gemma_template = \ + "{% for message in messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{'user\n' + message['content'] | trim + '\n'}}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{'model\n' + message['content'] | trim + '\n' }}"\ + "{% else %}"\ + "{{ 'system\n' + message['content'] | trim + '\n' }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ 'model\n' }}"\ + "{% endif %}" +gemma_eos_token = "" +CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token,) + + +# Gemma with ChatML instead +gemma_chatml_template = chatml_template +gemma_chatml_eos_token = ( + {"" : "<|im_start|>", "" : "<|im_end|>"}, + "<|im_end|>", +) +CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token,) + + def get_chat_template( tokenizer, chat_template = "chatml", @@ -229,7 +258,7 @@ def get_chat_template( old_padding_side = tokenizer.padding_side - if type(chat_template) in (list, tuple): + if type(chat_template) in (list, tuple,): chat_template, stop_word = chat_template assert(type(chat_template) is str) assert(type(stop_word) is str) @@ -238,7 +267,38 @@ def get_chat_template( chat_template, stop_word = CHAT_TEMPLATES[chat_template] - if stop_word != "eos_token": + if type(stop_word) in (list, tuple,): + token_mapping, stop_word = stop_word + assert(type(token_mapping) is dict) + else: + token_mapping = None + + assert(type(stop_word) is str) + + # token_mapping = {"" : "<|im_start|>", "" : "<|im_end|>"} + # For Gemma :) + if token_mapping is not None: + + string_vocab = tokenizer._tokenizer.to_str() + + for old_token, new_token in token_mapping.items(): + old_count = string_vocab.count(f'"{old_token}"') + new_count = string_vocab.count(f'"{new_token}"') + if new_count != 0: + print(f"{new_token} is already a token. Skipping.") + elif old_count == 0: + raise RuntimeError(f"{old_token} was not part of the tokenizer!") + else: + string_vocab = string_vocab.replace(f'"{old_token}"', f'"{new_token}"') + pass + pass + + logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") + string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) + new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) + tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + + elif stop_word != "eos_token": logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") # Replaces the old EOS token with a new one. @@ -252,6 +312,7 @@ def get_chat_template( new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) pass + else: raise TypeError( f"Unsloth: `chat_template` must be a tuple of (your_template, eos_token,) or one of\n"\ @@ -318,6 +379,7 @@ def test_chat_templates(): {"role": "user", "content": " No it's 100% 5! "}, ] + # Zephyr from transformers import AutoTokenizer template = zephyr_template correct_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") @@ -326,6 +388,7 @@ def test_chat_templates(): our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) + # Chatml template = chatml_template correct_tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B") correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) @@ -333,6 +396,7 @@ def test_chat_templates(): our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) + # Mistral template = mistral_template correct_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") correct_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) @@ -340,6 +404,7 @@ def test_chat_templates(): our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) + # Llama template = llama_template correct_tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-2-7b-chat") correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) @@ -347,6 +412,7 @@ def test_chat_templates(): our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) + # Vicuna try: from fastchat.conversation import get_conv_template except: @@ -381,4 +447,11 @@ def test_chat_templates(): our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) # We add ourselves assert(correct_prompt == our_prompt.replace("", "")) + + # Gemma + correct_tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-7b-it") + correct_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = gemma_template + our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) + assert(our_prompt == correct_prompt) pass diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index f5db8fa890..9c231e6ce1 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -16,9 +16,11 @@ from .rms_layernorm import fast_rms_layernorm from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel +from .geglu import geglu_forward_kernel, geglu_backward_kernel from .fast_lora import ( get_lora_parameters, - apply_lora_mlp, + apply_lora_mlp_swiglu, + apply_lora_mlp_geglu, apply_lora_qkv, apply_lora_o, ) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 0a73a393ec..260577912f 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -20,12 +20,14 @@ @triton.jit -def _cross_entropy_forward(logits_ptr, logits_row_stride, - loss_ptr, - lse_ptr, - labels_ptr, - n_cols, - BLOCK_SIZE: tl.constexpr,): +def _cross_entropy_forward( + logits_ptr, logits_row_stride, + loss_ptr, + logsumexp_ptr, + labels_ptr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, +): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] Pi = exp(xi) / sum(exp(xi)) @@ -34,40 +36,114 @@ def _cross_entropy_forward(logits_ptr, logits_row_stride, = y * (log[sum(exp(x))] - x) If y == 0: CE_i = 0 If y == 1: CE_i = logsumexp - x + + logsumexp is also stable + Take y = log[sum(exp(x))] + exp(y) = sum(exp(x)) + exp(y) = sum(exp(x - c)*exp(c)) Since e^(x-c)*e^c = e^x + exp(y) = exp(c)*sum(exp(x - c)) + y = log(exp(c)*sum(exp(x - c))) + y = c + log[sum(exp(x - c))] + This means we can set c = max(x) to make sure + exp(x - c) always is exp(x - max(x)). + This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride - loss_ptr += row_idx - lse_ptr += row_idx - labels_ptr += row_idx + logits_ptr += row_idx * logits_row_stride.to(tl.int64) + loss_ptr += row_idx + logsumexp_ptr += row_idx + labels_ptr += row_idx col_offsets = tl.arange(0, BLOCK_SIZE) - mask = col_offsets < n_cols + mask = col_offsets < VOCAB_SIZE - # TODO: Fixup int32 locations to int64 label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) - max_logits = tl.max(logits, 0) - # Maximum stops overflow - lse = tl.log(tl.sum(tl.exp(logits - max_logits), 0)) + max_logits - tl.store(lse_ptr, lse) + c = tl.max(logits, 0) + logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) if label_idx != -100: - logits_label = tl.load(logits_ptr + label_idx).to(tl.float32) - loss = lse - logits_label + x = tl.load(logits_ptr + label_idx).to(tl.float32) + loss = logsumexp - x else: loss = 0.0 + tl.store(logsumexp_ptr, logsumexp) tl.store(loss_ptr, loss) pass @triton.jit -def _cross_entropy_backward(logits_ptr, logits_row_stride, - dloss_ptr, dloss_row_stride, - lse_ptr, - labels_ptr, - n_cols, - BLOCK_SIZE: tl.constexpr,): +def _chunked_cross_entropy_forward( + logits_ptr, logits_row_stride, + loss_ptr, + logsumexp_ptr, + labels_ptr, + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, +): + """ + 256K vocab divided in 4 chunks + + |-65536-| |-65536-| |-65536-| |-65536-| + |-------| |-------| |-------| |-------| + |-------| |-------| |-------| |-------| + + If y == 0: CE_i = 0 + If y == 1: CE_i = logsumexp - x + + Notice we can do logsumexp for each chunk and then + logsumexp[chunk_sum(logsumexp)] == logsumexp + + chunk_sum = log[chunk_sum(logsumexp)] + = log[exp(logsumexp(a)) + ... + exp(logsumexp(z))] + = log[exp(log[sum(exp(a))]) + ... + exp(log[sum(exp(z))])] + = log[sum(exp(a)) + ... + sum(exp(z))] + = logsumexp(x) + + This means we can perform a logsumexp for each chunk, then do a + final logsumexp reduction! + + Ie do: logsumexp(chunked_logsumexp) - x + """ + row_idx = tl.program_id(0) + chunk_idx = tl.program_id(1) + logits_ptr += row_idx * logits_row_stride.to(tl.int64) + loss_ptr += row_idx + logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx + labels_ptr += row_idx + + col_offsets = chunk_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = col_offsets < VOCAB_SIZE + + label_idx = tl.load(labels_ptr).to(tl.int32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + c = tl.max(logits, 0) + logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) + + if chunk_idx == 0: + # logsumexp(chunked_logsumexp) - x + # Do the -x separately + if label_idx != -100: + x = tl.load(logits_ptr + label_idx).to(tl.float32) + loss = -1.0 * x + else: + loss = 0.0 + tl.store(loss_ptr, loss) + pass + tl.store(logsumexp_ptr, logsumexp) +pass + + +@triton.jit +def _cross_entropy_backward( + logits_ptr, logits_row_stride, + dloss_ptr, dloss_row_stride, + logsumexp_ptr, + labels_ptr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, +): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) dC/dx = d/dx (y * log[sum(exp(x))] - x * y) @@ -83,47 +159,80 @@ def _cross_entropy_backward(logits_ptr, logits_row_stride, If y == 1 and x == label: dC/dlabel = exp[x - logsumexp] - 1 If y == 1 and x != label: dC/dx = exp[x - logsumexp] """ - row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride + row_idx = tl.program_id(0) + block_idx = tl.program_id(1) + + logits_ptr += row_idx * logits_row_stride.to(tl.int64) dloss_ptr += row_idx * dloss_row_stride - col_offsets = tl.arange(0, BLOCK_SIZE) - mask = col_offsets < n_cols - # TODO: Fixup int32 locations to int64 + col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr + row_idx).to(tl.int32) if label_idx != -100: dloss = tl.load(dloss_ptr) else: dloss = 0.0 - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = 0).to(tl.float32) - lse = tl.load(lse_ptr + row_idx) - probs = tl.exp(logits - lse) - probs = tl.where(col_offsets == label_idx, probs - 1.0, probs) - tl.store(logits_ptr + col_offsets, dloss * probs, mask = mask) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + logsumexp = tl.load(logsumexp_ptr + row_idx) + y = tl.exp(x - logsumexp) + y = tl.where( + col_offsets == label_idx, + y - 1.0, # exp(x - logsumexp) - 1 + y, # exp(x - logsumexp) + ) + + # If y == 0: dC/dx = 0 ==> we already masked it to be = 0, so dloss = 0. + tl.store(logits_ptr + col_offsets, dloss * y, mask = mask) pass +MAX_FUSED_SIZE = 65536 # 2**16 + class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, labels): - n_rows, n_cols = logits.shape - BLOCK_SIZE, num_warps = calculate_settings(n_cols) - losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda") - logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda") - - _cross_entropy_forward[(n_rows,)]( - logits, logits.stride(0), - losses, - logsumexp, - labels, - n_cols, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + n_rows, vocab_size = logits.shape + + div, mod = divmod(vocab_size, MAX_FUSED_SIZE) + n_chunks = div + (mod != 0) + losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + + if n_chunks == 1: + # For small vocabs <= 65336 like Llama, Mistral + BLOCK_SIZE, num_warps = calculate_settings(vocab_size) + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + + _cross_entropy_forward[(n_rows,)]( + logits, logits.stride(0), + losses, + logsumexp, + labels, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + else: + # For large vocabs > 65336 like Gemma 256K + logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda") + + _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( + logits, logits.stride(0), + losses, + logsumexp, + labels, + VOCAB_SIZE = vocab_size, + N_CHUNKS = n_chunks, + BLOCK_SIZE = MAX_FUSED_SIZE, + num_warps = 32, + ) + # logsumexp(chunked_logsumexp) - x + # Do the -x separately + logsumexp = torch.logsumexp(logsumexp, dim = 1) # Row sum + losses += logsumexp + losses.masked_fill_(labels == -100, 0) # Don't forget to mask padding out! + pass - ctx.BLOCK_SIZE = BLOCK_SIZE - ctx.num_warps = num_warps ctx.save_for_backward(logits, logsumexp, labels) return losses pass @@ -131,23 +240,26 @@ def forward(ctx, logits, labels): @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors - n_rows, n_cols = logits.shape + n_rows, vocab_size = logits.shape - _cross_entropy_backward[(n_rows,)]( + BLOCK_SIZE = 4096 + div, mod = divmod(vocab_size, BLOCK_SIZE) + n_blocks = div + (mod != 0) + + _cross_entropy_backward[(n_rows, n_blocks,)]( logits, logits.stride(0), dlosses, dlosses.stride(0), logsumexp, labels, - n_cols, - BLOCK_SIZE = ctx.BLOCK_SIZE, - num_warps = ctx.num_warps, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = 8, ) return logits, None, None, pass pass -slow_cross_entropy_loss = torch.nn.functional.cross_entropy def fast_cross_entropy_loss(logits, labels): """ Arguments: @@ -159,25 +271,10 @@ def fast_cross_entropy_loss(logits, labels): batch, seq_len, d = logits.shape assert(labels.shape == (batch, seq_len)) - # Prelim support Qwen, Deepseek other large vocab sizes > 2^16 - if d > MAX_FUSED_SIZE: - logger.warning_once( - f"Unsloth: Vocab size of {d} exceeds the max CUDA blocksize of {MAX_FUSED_SIZE}.\n"\ - "For now, Unsloth will use Pytorch's CrossEntropyLoss, which will entail a\n"\ - "25% increase in memory usage and be slower. Make an issue on \n"\ - "Unsloth's Github page if you want a faster and more memory efficient kernel!" - ) - loss = slow_cross_entropy_loss( - logits.float().view(batch*seq_len, d), # Must cast to float32 for numerical stability - labels.view(-1), - ) - return loss - else: - loss = Fast_CrossEntropyLoss.apply( - logits.view(batch*seq_len, d), - labels.view(-1), - ) - n_items = torch.count_nonzero(labels != -100) - return loss.sum() / n_items - pass + loss = Fast_CrossEntropyLoss.apply( + logits.view(batch*seq_len, d), + labels.view(-1), + ) + n_items = torch.count_nonzero(labels != -100) + return loss.sum() / n_items pass diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index b3a1098355..3ed0d3c914 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -14,7 +14,6 @@ import torch from .utils import fast_dequantize, QUANT_STATE, get_lora_parameters -from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel def matmul_lora(X, W, W_quant, A, B, s, out = None): @@ -85,20 +84,20 @@ class LoRA_MLP(torch.autograd.Function): def forward(ctx, X : torch.Tensor, gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, - downW, downW_quant, downA, downB, downS): + downW, downW_quant, downA, downB, downS, + _forward_function, _backward_function,): dtype = X.dtype e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) g = matmul_lora(X, upW, upW_quant, upA, upB, upS) - # f = torch.nn.functional.silu(e) - # h = f * g - h = swiglu_fg_kernel(e, g) + h = _forward_function(e, g) i = matmul_lora(h, downW, downW_quant, downA, downB, downS) ctx.custom_saved_tensors = ( gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, + _backward_function, ) ctx.save_for_backward(gateA, gateB, upA, upB, downA, downB, X, e, g) @@ -109,8 +108,8 @@ def forward(ctx, X : torch.Tensor, @staticmethod @torch.cuda.amp.custom_bwd def backward(ctx, dY : torch.Tensor): - gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, = \ - ctx.custom_saved_tensors + gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, \ + _backward_function = ctx.custom_saved_tensors gateA, gateB, upA, upB, downA, downB, \ X, e, g = ctx.saved_tensors @@ -125,14 +124,7 @@ def backward(ctx, dY : torch.Tensor): dtype = X.dtype DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) - # e = e.float() - # se = 1.0 / (1.0 + torch.exp(-e)) - # f = (se * e).to(dtype) - # h = f * g - # df = DW * f - # dg = DW * g - # de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype) - DW, e, g = swiglu_DWf_DW_dfg_kernel(DW, e, g) + DW, e, g = _backward_function(DW, e, g) h, df, de = DW, e, g # Down projection LoRA weights @@ -155,7 +147,6 @@ def backward(ctx, dY : torch.Tensor): # dX = matmul_lora(df, upW.t(), upW_quant, upB, upA, upS) # dX += matmul_lora(de, gateW.t(), gateW_quant, gateB, gateA, gateS) - upW = fast_dequantize(upW.t(), upW_quant) dX = torch.matmul(df, upW.t(), out = X) del upW @@ -172,24 +163,36 @@ def backward(ctx, dY : torch.Tensor): return dX.view(batch, seq_len, hd), \ None, None, d_gateA.t(), d_gateB.t(), None, \ None, None, d_upA.t(), d_upB.t(), None, \ - None, None, d_downA.t(), d_downB.t(), None, + None, None, d_downA.t(), d_downB.t(), None, \ + None, None, # _backward and _forward pass pass -def apply_lora_mlp(self, X): - # gate = self.gate_proj(X) - # up = self. up_proj(X) - # h = torch.nn.functional.silu(gate) * up - # down = self.down_proj(h) - # return down +from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel +def apply_lora_mlp_swiglu(self, X): + gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) + upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) + downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) + out = LoRA_MLP.apply(X, + gateW, gateW_quant, gateA, gateB, gateS, + upW, upW_quant, upA, upB, upS, + downW, downW_quant, downA, downB, downS, + swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel,) + return out +pass + + +from .geglu import geglu_forward_kernel, geglu_backward_kernel +def apply_lora_mlp_geglu(self, X): gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) out = LoRA_MLP.apply(X, gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, - downW, downW_quant, downA, downB, downS) + downW, downW_quant, downA, downB, downS, + geglu_forward_kernel, geglu_backward_kernel,) return out pass diff --git a/unsloth/kernels/geglu.py b/unsloth/kernels/geglu.py new file mode 100644 index 0000000000..7001b8ff0a --- /dev/null +++ b/unsloth/kernels/geglu.py @@ -0,0 +1,104 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.jit +def _forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + # f = 1/2 * e * (1 + erf(1/sqrt(2) * e)) + # h = f * up + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) + + f_row = 0.5 * e_row * (tl.math.erf(tl.math.rsqrt(2.0) * e_row) + 1.0) + f_row = f_row.to(g_row.dtype) # Exact copy from HF + h_row = f_row * g_row + + # Store h + tl.store(h + offsets, h_row, mask = mask) +pass + + +def geglu_forward_kernel(gate, up): + batch, seq_len, hd = gate.shape + n_elements = gate.numel() + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda") + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) + return out +pass + + +@triton.jit +def _backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): + """ + f = 1/2 * e * (1 + erf(1/sqrt(2) * e)) + h = f * up + + df/de (with help of Wolfram :) + df/de = 1/2 * (1 + erf(1/sqrt(2) * e)) + 1/sqrt(2*pi) * e * exp(-1/2 * e^2) + + Reuse via + f = 1/2 * (1 + erf(1/sqrt(2) * e)) * e + """ + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32) + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) + + # Break e_row away for re-use + # f = 1/2 * e * (1 + erf(1/sqrt(2) * e)) + f_partial_row = 0.5 * (tl.math.erf(tl.math.rsqrt(2.0) * e_row) + 1.0) + f_row = f_partial_row * e_row + + f_row = f_row.to(DW_row.dtype) + # h = f * g + h_row = f_row * g_row + # df = DW * f + df_row = DW_row * f_row + # dg = DW * g + dg_row = DW_row * g_row + + # df/de = 1/2 * (1 + erf(1/sqrt(2) * e)) + 1/sqrt(2*pi) * e * exp(-1/2 * e^2) + t = 0.3989422804014327 # 1/sqrt(2*pi) + df_de = f_partial_row + t * e_row * tl.exp(-0.5 * e_row * e_row) + + de_row = dg_row.to(tl.float32) * df_de + de_row = de_row.to(DW_row.dtype) + + # Store derivatives in buffers + tl.store(DW + offsets, h_row, mask = mask) # h = f * g + tl.store(e + offsets, df_row, mask = mask) # df = DW * f + tl.store(g + offsets, de_row, mask = mask) # de +pass + + +def geglu_backward_kernel(DW, e, g): + batch_seq_len, hd = e.shape + n_elements = e.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + return DW, e, g +pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index ec34880a2c..ccd9f89948 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -44,7 +44,7 @@ def _rms_layernorm_forward( W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols - inv_var = 1.0 / tl.sqrt(row_var + eps) + inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var normed = normed.to(W_row.dtype) # Exact copy from HF diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py new file mode 100644 index 0000000000..4aa634a4bd --- /dev/null +++ b/unsloth/models/gemma.py @@ -0,0 +1,282 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from ._utils import __version__ + +from transformers.models.gemma.modeling_gemma import ( + GemmaAttention, + GemmaDecoderLayer, + GemmaModel, + GemmaForCausalLM, + GemmaRotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, +) +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) +# For Pytorch 2.1.1 +try: + from transformers.models.gemma.modeling_gemma import ( + GemmaSdpaAttention, + GemmaFlashAttention2, + ) +except: + GemmaSdpaAttention = GemmaAttention + GemmaFlashAttention2 = GemmaAttention +pass + + +def fast_geglu_inference(self, X): + # gate = self.gate_proj(X) + # up = self.up_proj(X) + bsz, _, hd = X.shape + mlp_size = self.config.intermediate_size + temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + + gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) + up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = torch.nn.functional.gelu(gate) + gate *= up + + # X = self.down_proj(gate) + down = fast_linear_forward(self.down_proj, gate, out = up[:,:,:hd]) + return down +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 +def GemmaDecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +): + if False:#past_key_value is not None: + do_prefill = not hasattr(self.self_attn, "paged_attention") + + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + self.self_attn, + hidden_states, + past_key_value, + position_ids, + do_prefill = do_prefill, + ) + hidden_states += residual + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_geglu_inference(self.mlp, hidden_states) + hidden_states += residual + else: + residual = hidden_states + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + # hidden_states = self.input_layernorm(hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + # hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + pass + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs +pass + + +from math import sqrt as math_sqrt + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +@torch.inference_mode +def GemmaModel_fast_forward_inference( + self, + input_ids, + past_key_values, +): + # Fix out of bounds tokenization + input_ids = input_ids[:,:self.max_seq_length] + + hidden_states = self.embed_tokens(input_ids) + hidden_states *= math_sqrt(self.config.hidden_size) + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.layers): + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states, + past_key_values[idx], + None, + ) + hidden_states += residual + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) + hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states) + hidden_states += residual + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + + +class FastGemmaModel(FastLlamaModel): + + @staticmethod + def pre_patch(): + GemmaAttention .forward = LlamaAttention_fast_forward + GemmaSdpaAttention .forward = LlamaAttention_fast_forward + GemmaFlashAttention2.forward = LlamaAttention_fast_forward + GemmaDecoderLayer .forward = GemmaDecoderLayer_fast_forward + GemmaModel .forward = LlamaModel_fast_forward + GemmaForCausalLM .forward = LlamaForCausalLM_fast_forward + PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + # Solves https://github.com/unslothai/unsloth/issues/168 + # Static KV Cache was introduced in 4.38.0, causing training to be much slower. + # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. + # https://github.com/huggingface/transformers/pull/27931 + # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py + import transformers.models.gemma.modeling_gemma + transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding = LlamaRotaryEmbedding + return + pass + + + @staticmethod + def post_patch(model): + # Patch model for Gemma + layers = model.model.layers + + # Torch.compile fails on embedding matrix?? + # Workaround randomnly fixes it for torch versions < 2.2 + model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + model.config.update({"unsloth_version" : __version__}) + + # We also do this for the lm_head + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.lm_head.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + + # Gemma has tied weights! This means lm_head == embed_tokens + if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.model.embed_tokens.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + pass + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + # Downcast RoPE embedding to correct data type + if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ + and (module.cos_cached.dtype != correct_dtype): + + module.cos_cached = module.cos_cached.to(correct_dtype) + module.sin_cached = module.sin_cached.to(correct_dtype) + pass + pass + pass + + # Add 1 to weight + # return output * (1 + self.weight) + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/gemma/modeling_gemma.py#L89 + from transformers.models.gemma.modeling_gemma import GemmaRMSNorm + + # Freeze all parameters except LoRA + # We do this first since += 1 seems to not be liked by requires_grad = True + for name, param in model.named_parameters(): + if ".lora_A." in name or ".lora_B." in name: + param.requires_grad_(True) + else: + param.requires_grad_(False) + pass + + # Patch RMS Layernorm + for name, module in model.named_modules(): + if isinstance(module, GemmaRMSNorm): + module.weight += 1.0 # return output * (1 + self.weight) + if not hasattr(module, "variance_epsilon"): + module.variance_epsilon = module.eps # Gemma doesn't use variance_epsilon + pass + + # Clear deleted GPU items + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3ca6291fd5..359761c06a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -119,6 +119,7 @@ def LlamaAttention_fast_forward_inference( n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads head_dim = self.head_dim + attention_size = n_heads*head_dim # assert(n_kv_heads * n_groups == n_heads) seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 @@ -131,7 +132,7 @@ def LlamaAttention_fast_forward_inference( self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, hd), dtype = dtype, device = "cuda") + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda") self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda") self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda") @@ -201,13 +202,13 @@ def LlamaAttention_fast_forward_inference( A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch.matmul(A, Vnn, out = Qn) A = A.transpose(1, 2) - A = A.reshape(bsz, 1, self.hidden_size) - A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1]) + A = A.reshape(bsz, 1, attention_size) + A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) return A, (Kn, Vn) pass -def fast_mlp_inference(self, X): +def fast_swiglu_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) bsz, _, hd = X.shape @@ -339,7 +340,7 @@ def LlamaAttention_fast_forward( # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass - attn_output = A.reshape(bsz, q_len, self.hidden_size) + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) attn_output = self.apply_o(self, attn_output) attn_weights = None return attn_output, attn_weights, past_key_value @@ -390,7 +391,7 @@ def LlamaDecoderLayer_fast_forward( # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) - hidden_states = fast_mlp_inference(self.mlp, hidden_states) + hidden_states = fast_swiglu_inference(self.mlp, hidden_states) hidden_states += residual else: residual = hidden_states @@ -507,6 +508,14 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + # Mormalized from Gemma + if self.config.model_type == "gemma": + inputs_requires_grad = inputs_embeds.requires_grad + if inputs_requires_grad: inputs_embeds.requires_grad_(False) + inputs_embeds *= math_sqrt(self.config.hidden_size) + if inputs_requires_grad: inputs_embeds.requires_grad_(True) + pass + # Fix up attention mask by setting elements to 0 # Specifically for DPO if self._has_no_labels and (attention_mask is not None) and (past_key_values is None): @@ -646,7 +655,7 @@ def LlamaModel_fast_forward_inference( # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) - hidden_states = fast_mlp_inference(decoder_layer.mlp, hidden_states) + hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) hidden_states += residual next_decoder_cache.append(present_key_value) @@ -812,7 +821,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) pass - def forward(self, x, seq_len=None): + def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) @@ -886,20 +895,22 @@ def from_pretrained( device_map = "sequential", rope_scaling = None, fix_tokenizer = True, + model_patcher = None, **kwargs, ): + if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth: Fast Llama patching release {__version__}\n"\ + f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) - FastLlamaModel.pre_patch() + model_patcher.pre_patch() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 @@ -955,7 +966,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model = FastLlamaModel.post_patch(model) + model = model_patcher.post_patch(model) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): @@ -1159,6 +1170,14 @@ def post_patch(model): quant_state.dtype = correct_dtype pass pass + # Downcast RoPE embedding to correct data type + if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ + and (module.cos_cached.dtype != correct_dtype): + + module.cos_cached = module.cos_cached.to(correct_dtype) + module.sin_cached = module.sin_cached.to(correct_dtype) + pass + pass pass # Clear deleted GPU items @@ -1309,6 +1328,16 @@ def patch_peft_model( ) pass + # Get activation function + model_type = model.config.model_type + + if model_type == "llama": apply_lora_mlp = apply_lora_mlp_swiglu + elif model_type == "mistral": apply_lora_mlp = apply_lora_mlp_swiglu + elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu + else: + raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") + pass + model = prepare_model_for_kbit_training( model, use_gradient_checkpointing = use_gradient_checkpointing, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index e4b3561deb..67a59c850c 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -24,6 +24,9 @@ major, minor = transformers_version.split(".")[:2] major, minor = int(major), int(minor) SUPPORTS_FOURBIT = (major > 4) or (major == 4 and minor >= 37) +SUPPORTS_GEMMA = (major > 4) or (major == 4 and minor >= 38) +if SUPPORTS_GEMMA: + from .gemma import FastGemmaModel del major, minor @@ -99,6 +102,15 @@ def from_pretrained( if model_type == "llama": dispatch_model = FastLlamaModel elif model_type == "mistral": dispatch_model = FastMistralModel + elif model_type == "gemma": + if not SUPPORTS_GEMMA: + raise RuntimeError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"\ + f"The minimum required version is 4.38.\n"\ + f'Try `pip install --upgrade "transformers>=4.38"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastGemmaModel else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ @@ -115,6 +127,7 @@ def from_pretrained( device_map = device_map, rope_scaling = rope_scaling, fix_tokenizer = fix_tokenizer, + model_patcher = dispatch_model, *args, **kwargs, ) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 323358fff1..afcbdb75f5 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -74,6 +74,22 @@ "unsloth/solar-10.7b-bnb-4bit" : ( "upstage/SOLAR-10.7B-v1.0", ), + "unsloth/gemma-7b-bnb-4bit" : ( + "unsloth/gemma-7b", + "google/gemma-7b", + ), + "unsloth/gemma-2b-bnb-4bit" : ( + "unsloth/gemma-2b", + "google/gemma-2b", + ), + "unsloth/gemma-7b-it-bnb-4bit" : ( + "unsloth/gemma-7b-it", + "google/gemma-7b-it", + ), + "unsloth/gemma-2b-bnb-4bit" : ( + "unsloth/gemma-2b-it", + "google/gemma-2b-it", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 0e36023255..6c9d9ecc5c 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -293,8 +293,10 @@ def from_pretrained( device_map = "sequential", rope_scaling = None, # Mistral does not support RoPE scaling fix_tokenizer = True, + model_patcher = None, **kwargs, ): + if model_patcher is None: model_patcher = FastMistralModel # Mistral does NOT support RoPE Scaling! if rope_scaling is not None: logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") @@ -305,13 +307,13 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth: Fast Mistral patching release {__version__}\n"\ + f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ - f' "-____-" Apache 2 free license: http://github.com/unslothai/unsloth' + f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) - FastMistralModel.pre_patch() + model_patcher.pre_patch() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 @@ -360,7 +362,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model = FastMistralModel.post_patch(model) + model = model_patcher.post_patch(model) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): diff --git a/unsloth/save.py b/unsloth/save.py index 83e13bd51c..51ddeb30e7 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -369,6 +369,7 @@ def unsloth_save_model( # Switch to our fast saving modules if it's a slow PC! n_cpus = psutil.cpu_count(logical = False) + if n_cpus is None: n_cpus = 1 if safe_serialization is None: safe_serialization = True @@ -669,7 +670,9 @@ def save_to_gguf( pass pass - n_cpus = psutil.cpu_count()*2 + n_cpus = psutil.cpu_count() + if n_cpus is None: n_cpus = 1 + n_cpus *= 2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model final_location = f"./{model_directory}-unsloth.{first_conversion.upper()}.gguf" From 1fe11bab5860054f36ab02315007eb013bf628e5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 29 Feb 2024 00:15:22 +1100 Subject: [PATCH 0145/1088] Hotfix - fix DoRA, Gemma prompt template (#202) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py --- unsloth/chat_templates.py | 3 +- unsloth/models/_utils.py | 81 ++++++++++++--------------------------- 2 files changed, 27 insertions(+), 57 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 6f0237c27b..5ad34d6882 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -221,6 +221,7 @@ # Notice we must use |trim for lstrip and rstrip. maps to 106. # maps to 107. user and model are normal 1 word tokens. gemma_template = \ + "{{ bos_token }}"\ "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ "{{'user\n' + message['content'] | trim + '\n'}}"\ @@ -238,7 +239,7 @@ # Gemma with ChatML instead -gemma_chatml_template = chatml_template +gemma_chatml_template = "{{ bos_token }}" + chatml_template gemma_chatml_eos_token = ( {"" : "<|im_start|>", "" : "<|im_end|>"}, "<|im_end|>", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c4de7198b9..e92be5e993 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -240,61 +240,30 @@ def check_tokenizer( # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. -def LoraLayer_update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, - use_rslora = False): - # This code works for linear layers, override for other layer types - if r <= 0: - raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") - - self.r[adapter_name] = r - self.lora_alpha[adapter_name] = lora_alpha - if lora_dropout > 0.0: - lora_dropout_layer = torch.nn.Dropout(p=lora_dropout) - else: - lora_dropout_layer = torch.nn.Identity() - - self.lora_dropout.update(torch.nn.ModuleDict({adapter_name: lora_dropout_layer})) - # Actual trainable parameters - self.lora_A[adapter_name] = torch.nn.Linear(self.in_features, r, bias=False) - self.lora_B[adapter_name] = torch.nn.Linear(r, self.out_features, bias=False) - if use_rslora: - self.scaling[adapter_name] = lora_alpha / math.sqrt(r) - else: - self.scaling[adapter_name] = lora_alpha / r - - if init_lora_weights == "loftq": - # We manually check for PEFT - if not hasattr(self, "loftq_init"): - import peft - raise RuntimeError( - f"Unsloth: Your PEFT version of {peft.__version__} does not support LoftQ init.\n"\ - "Please install PEFT 0.7.2 or higher.\n"\ - "You can also install from source: `pip install git+https://github.com/huggingface/peft.git" - ) - pass - self.loftq_init(adapter_name) - - elif init_lora_weights: - self.reset_lora_parameters(adapter_name, init_lora_weights) +from peft.tuners.lora.layer import LoraLayer +import inspect, re +try: + source = inspect.getsource(LoraLayer.update_layer) + text = "if weight is not None:\n" + start = source.find(text) + len(text) + end = source.find("self.to(weight.device)", start) + spaces = re.findall(r"^([ ]{1,})break", source, flags = re.MULTILINE)[0] + source = source.replace(source[start : end], spaces) + spaces = len(re.match(r"[\s]{1,}", source).group(0)) + lines = source.split("\n") + source = "\n".join(x[spaces:] for x in lines) + source = re.sub("([^\.])nn\.", r"\1torch.nn.", source) + source = source.replace("def update_layer", "def LoraLayer_update_layer") + exec(source, globals()) - # check weight and qweight (for GPTQ) - for weight_name in ("weight", "qweight"): - weight = getattr(self.get_base_layer(), weight_name, None) - if weight is not None: - # [INCORRECT code] - # - # the layer is already completely initialized, this is an update - # if weight.dtype.is_floating_point or weight.dtype.is_complex: - # self.to(weight.device, dtype=weight.dtype) - # else: - # self.to(weight.device) - self.to(weight.device, non_blocking = True) - break - self.set_adapter(self.active_adapters) + # Fix up incorrect downcasting of LoRA weights + from peft.tuners.lora.layer import LoraLayer + LoraLayer.update_layer = LoraLayer_update_layer + from peft.tuners.lora import LoraLayer + LoraLayer.update_layer = LoraLayer_update_layer +except: + logger.warning_once( + "Unsloth unsuccessfully patched LoraLayer.update_layer. Please file a bug report.\n"\ + "Luckily, your training run will still work in the meantime!" + ) pass - -# Fix up incorrect downcasting of LoRA weights -from peft.tuners.lora.layer import LoraLayer -LoraLayer.update_layer = LoraLayer_update_layer -from peft.tuners.lora import LoraLayer -LoraLayer.update_layer = LoraLayer_update_layer From dbba69b085b9d6049b57b48b882af7e9f29df5b2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 29 Feb 2024 00:18:38 +1100 Subject: [PATCH 0146/1088] Nightly (#204) * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py --- unsloth/models/llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 359761c06a..54e016d1af 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1216,6 +1216,8 @@ def get_peft_model( ) pass + if loftq_config is None: loftq_config = {} + import inspect signature = str(inspect.signature(LoraConfig)) SUPPORTS_LOFTQ = "loftq_config" in signature From 528d5fb0bd1adf50a792dda9fb63d5a2a3df4600 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Mar 2024 18:21:44 +1100 Subject: [PATCH 0147/1088] Fix Gemma activation function (#214) * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py --- pyproject.toml | 14 ++++- unsloth/kernels/__init__.py | 10 +++- unsloth/kernels/fast_lora.py | 20 ++++++- unsloth/kernels/geglu.py | 111 +++++++++++++++++++++++++++++++++-- unsloth/models/_utils.py | 2 +- unsloth/models/dpo.py | 31 ++++++---- unsloth/models/gemma.py | 7 ++- unsloth/models/llama.py | 16 ++++- 8 files changed, 180 insertions(+), 31 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7e8956c712..385366a391 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,8 +33,8 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "transformers>=4.38.0", - "datasets", + "transformers>=4.38.2", + "datasets>=2.16.0", "sentencepiece", "accelerate>=0.26.1", "trl>=0.7.9", @@ -64,6 +64,16 @@ cu121onlytorch211 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] +cu118onlytorch212 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu121onlytorch212 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] cu118onlytorch220 = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 9c231e6ce1..8ff255e4a4 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -16,11 +16,17 @@ from .rms_layernorm import fast_rms_layernorm from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel -from .geglu import geglu_forward_kernel, geglu_backward_kernel +from .geglu import ( + geglu_exact_forward_kernel, + geglu_exact_backward_kernel, + geglu_approx_forward_kernel, + geglu_approx_backward_kernel, +) from .fast_lora import ( get_lora_parameters, apply_lora_mlp_swiglu, - apply_lora_mlp_geglu, + apply_lora_mlp_geglu_exact, + apply_lora_mlp_geglu_approx, apply_lora_qkv, apply_lora_o, ) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 3ed0d3c914..6568bba681 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -183,8 +183,8 @@ def apply_lora_mlp_swiglu(self, X): pass -from .geglu import geglu_forward_kernel, geglu_backward_kernel -def apply_lora_mlp_geglu(self, X): +from .geglu import geglu_exact_forward_kernel, geglu_exact_backward_kernel +def apply_lora_mlp_geglu_exact(self, X): gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) @@ -192,7 +192,21 @@ def apply_lora_mlp_geglu(self, X): gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, downW, downW_quant, downA, downB, downS, - geglu_forward_kernel, geglu_backward_kernel,) + geglu_exact_forward_kernel, geglu_exact_backward_kernel,) + return out +pass + + +from .geglu import geglu_approx_forward_kernel, geglu_approx_backward_kernel +def apply_lora_mlp_geglu_approx(self, X): + gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) + upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) + downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) + out = LoRA_MLP.apply(X, + gateW, gateW_quant, gateA, gateB, gateS, + upW, upW_quant, upA, upB, upS, + downW, downW_quant, downA, downB, downS, + geglu_approx_forward_kernel, geglu_approx_backward_kernel,) return out pass diff --git a/unsloth/kernels/geglu.py b/unsloth/kernels/geglu.py index 7001b8ff0a..df80fcb79b 100644 --- a/unsloth/kernels/geglu.py +++ b/unsloth/kernels/geglu.py @@ -19,7 +19,7 @@ @triton.jit -def _forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): +def _exact_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): block_idx = tl.program_id(0) offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements @@ -38,18 +38,18 @@ def _forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): pass -def geglu_forward_kernel(gate, up): +def geglu_exact_forward_kernel(gate, up): batch, seq_len, hd = gate.shape n_elements = gate.numel() out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda") grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) + _exact_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) return out pass @triton.jit -def _backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): +def _exact_backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): """ f = 1/2 * e * (1 + erf(1/sqrt(2) * e)) h = f * up @@ -95,10 +95,109 @@ def _backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): pass -def geglu_backward_kernel(DW, e, g): +def geglu_exact_backward_kernel(DW, e, g): batch_seq_len, hd = e.shape n_elements = e.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + _exact_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + return DW, e, g +pass + + +@triton.jit +def _approx_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + # f = 1/2 * e * (1 + tanh( sqrt(2/pi) * (x + 0.044715 * x^3 ) )) + # f = 1/2 * e * (1 + tanh( sqrt(2/pi) * x * (1 + 0.044715 * x^2 ) )) + # h = f * up + s = 0.7978845608028654 # math.sqrt(2 / math.pi) + + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) + + f_row = 0.5 * e_row * ( + tl.math.tanh(s * e_row * (1.0 + 0.044715 * e_row * e_row)) \ + + 1.0 + ) + f_row = f_row.to(g_row.dtype) # Exact copy from HF + h_row = f_row * g_row + + # Store h + tl.store(h + offsets, h_row, mask = mask) +pass + + +def geglu_approx_forward_kernel(gate, up): + batch, seq_len, hd = gate.shape + n_elements = gate.numel() + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda") + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _approx_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) + return out +pass + + +@triton.jit +def _approx_backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): + """ + f = 1/2 * e * (1 + tanh( sqrt(2/pi) * x * (1 + 0.044715 * x^2 ) )) + h = f * up + + df/de (with help from https://arxiv.org/pdf/2305.12073.pdf :)) + df/de = 1/2 * [1 + tanh( sqrt(2/pi) * x * (1 + 0.044715 * x^2 ) )] + + 1/2 * sech^2 [ sqrt(2/pi) * x * (1 + 0.044715 * x^2 ) ] * \ + ( sqrt(2/pi) * x * (1 + 0.044715 * x^2 * 3 ) ) + + Notice sech^2(x) = 1 - tanh^2(x) + So reuse tanh( sqrt(2/pi) * x * (1 + 0.044715 * x^2 ) ) + + See https://www.desmos.com/calculator/nqprfoni6x + """ + block_idx = tl.program_id(0) + offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + + DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32) + e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32) + g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) + + # See https://www.desmos.com/calculator/nqprfoni6x + s = 0.7978845608028654 # math.sqrt(2 / math.pi) + a = s * e_row # a = sqrt(2 / pi) * x + b = a * 0.044715 * e_row * e_row # b = a * 0.044715 * x^2 + T = 1.0 + tl.math.tanh(a + b) + T2 = 0.5 * T + # Q = 0.5 * -T * (T - 2.0) * (a + 3.0 * b) + Q2 = -T2 * (T - 2.0) * (a + 3.0 * b) + df_de = T2 + Q2 # 1/2 * (T + Q) + + # f = 1/2 * e * (1 + tanh( sqrt(2/pi) * (x + 0.044715 * x^3 ) )) + f_row = T2 * e_row + f_row = f_row.to(DW_row.dtype) + # h = f * g + h_row = f_row * g_row + # df = DW * f + df_row = DW_row * f_row + # dg = DW * g + dg_row = DW_row * g_row + + de_row = dg_row.to(tl.float32) * df_de + de_row = de_row.to(DW_row.dtype) + + # Store derivatives in buffers + tl.store(DW + offsets, h_row, mask = mask) # h = f * g + tl.store(e + offsets, df_row, mask = mask) # df = DW * f + tl.store(g + offsets, de_row, mask = mask) # de +pass + + +def geglu_approx_backward_kernel(DW, e, g): + batch_seq_len, hd = e.shape + n_elements = e.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _approx_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) return DW, e, g pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e92be5e993..3e3b8ff22c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -25,7 +25,7 @@ platform_system = platform_system() import math -__version__ = "2024.2" +__version__ = "2024.3" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 3ae4d636f4..b7c7305bb3 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -12,11 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from transformers.utils.notebook import ( - IntervalStrategy, - NotebookTrainingTracker, - NotebookProgressCallback, -) +try: + from transformers.utils.notebook import ( + IntervalStrategy, + NotebookTrainingTracker, + NotebookProgressCallback, + ) + HAS_NOTEBOOK = True +except: + HAS_NOTEBOOK = False +pass DPOTrainer_metrics = [ "rewards/chosen", @@ -101,13 +106,15 @@ def NotebookTrainingTracker_write_line(self, values): def PatchDPOTrainer(): - from transformers.trainer import is_in_notebook - if is_in_notebook(): - # Patch DPO notebook printing - NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line - from transformers.trainer import DEFAULT_PROGRESS_CALLBACK - DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin - DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log + if HAS_NOTEBOOK: + from transformers.trainer import is_in_notebook + if is_in_notebook(): + # Patch DPO notebook printing + NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line + from transformers.trainer import DEFAULT_PROGRESS_CALLBACK + DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin + DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log + pass pass pass diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 4aa634a4bd..97da833c12 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -48,7 +48,7 @@ def fast_geglu_inference(self, X): gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) up = fast_linear_forward(self. up_proj, X, out = temp[1]) - gate = torch.nn.functional.gelu(gate) + gate = torch.nn.functional.gelu(gate, approximate = "tanh") gate *= up # X = self.down_proj(gate) @@ -70,7 +70,7 @@ def GemmaDecoderLayer_fast_forward( padding_mask: Optional[torch.LongTensor] = None, *args, **kwargs, ): - if False:#past_key_value is not None: + if past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") # Self Attention @@ -267,6 +267,9 @@ def post_patch(model): # Patch RMS Layernorm for name, module in model.named_modules(): if isinstance(module, GemmaRMSNorm): + # Must be in float32 + # https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36 + module = module.to(torch.float32) module.weight += 1.0 # return output * (1 + self.weight) if not hasattr(module, "variance_epsilon"): module.variance_epsilon = module.eps # Gemma doesn't use variance_epsilon diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 54e016d1af..20552644d9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -511,7 +511,12 @@ def LlamaModel_fast_forward( # Mormalized from Gemma if self.config.model_type == "gemma": inputs_requires_grad = inputs_embeds.requires_grad - if inputs_requires_grad: inputs_embeds.requires_grad_(False) + if not inputs_embeds.is_leaf: + inputs_embeds = inputs_embeds.detach() + inputs_requires_grad = True + elif inputs_requires_grad: + inputs_embeds.requires_grad_(False) + pass inputs_embeds *= math_sqrt(self.config.hidden_size) if inputs_requires_grad: inputs_embeds.requires_grad_(True) pass @@ -522,7 +527,12 @@ def LlamaModel_fast_forward( # Careful for inference the attention_mask is size (1, kv_seq_len) # Whilst the input_embeds is size (1, 1, 4096) inputs_requires_grad = inputs_embeds.requires_grad - if inputs_requires_grad: inputs_embeds.requires_grad_(False) + if not inputs_embeds.is_leaf: + inputs_embeds = inputs_embeds.detach() + inputs_requires_grad = True + elif inputs_requires_grad: + inputs_embeds.requires_grad_(False) + pass inputs_embeds *= attention_mask.unsqueeze(0).transpose(0, 1).transpose(1, 2) if inputs_requires_grad: inputs_embeds.requires_grad_(True) pass @@ -1335,7 +1345,7 @@ def patch_peft_model( if model_type == "llama": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "mistral": apply_lora_mlp = apply_lora_mlp_swiglu - elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu + elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") pass From b4fe3cd67d171d899e3a90b0d9157940b6aaba3c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Mar 2024 19:36:06 +1100 Subject: [PATCH 0148/1088] Fix Gemma fast inference (#215) * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py --- unsloth/models/gemma.py | 90 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 97da833c12..e9560cd172 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -173,6 +173,94 @@ def GemmaModel_fast_forward_inference( pass +def GemmaForCausalLM_fast_forward( + self, + input_ids: torch.LongTensor = None, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + *args, **kwargs, +) -> Union[Tuple, CausalLMOutputWithPast]: + + if causal_mask is None and past_key_values is None: + causal_mask = xformers.attn_bias.LowerTriangularMask() + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + self.model._has_no_labels = labels is None + + if past_key_values is not None and \ + hasattr(self.model.layers[0].self_attn, "paged_attention"): + outputs = GemmaModel_fast_forward_inference( + self.model, + input_ids, + past_key_values, + ) + else: + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pass + + hidden_states = outputs[0] + bsz, q_len, hd = hidden_states.shape + if bsz == 1 and q_len == 1: + logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + logits = logits.unsqueeze(0).unsqueeze(0) + else: + logits = self.lm_head(hidden_states) + pass + + loss = None + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + ) + pass + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) +pass + + class FastGemmaModel(FastLlamaModel): @staticmethod @@ -182,7 +270,7 @@ def pre_patch(): GemmaFlashAttention2.forward = LlamaAttention_fast_forward GemmaDecoderLayer .forward = GemmaDecoderLayer_fast_forward GemmaModel .forward = LlamaModel_fast_forward - GemmaForCausalLM .forward = LlamaForCausalLM_fast_forward + GemmaForCausalLM .forward = GemmaForCausalLM_fast_forward PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. From d862d053ed7deb2275e47d2aec2f7cd587decf0e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Mar 2024 16:19:55 +1100 Subject: [PATCH 0149/1088] Fix Gemma norm float32 (#217) * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py --- unsloth/models/gemma.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index e9560cd172..99bd9e7865 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -357,7 +357,8 @@ def post_patch(model): if isinstance(module, GemmaRMSNorm): # Must be in float32 # https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36 - module = module.to(torch.float32) + # module = module.to(torch.float32) + # Don't convert to float32 since error analysis shows it makes it worse!! module.weight += 1.0 # return output * (1 + self.weight) if not hasattr(module, "variance_epsilon"): module.variance_epsilon = module.eps # Gemma doesn't use variance_epsilon From a0cc0d163843a403a23e5cd94d20121690bd6830 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 7 Mar 2024 04:34:06 +1100 Subject: [PATCH 0150/1088] Fix Gemma (#223) * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py --- pyproject.toml | 11 ++ unsloth/__init__.py | 12 +- unsloth/chat_templates.py | 4 + unsloth/kernels/rms_layernorm.py | 52 ++++++++- unsloth/kernels/rope_embedding.py | 16 ++- unsloth/models/gemma.py | 175 +++++++++++++----------------- unsloth/models/llama.py | 174 +++++++++++++++-------------- unsloth/save.py | 1 + 8 files changed, 250 insertions(+), 195 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 385366a391..05c4191974 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,7 @@ huggingface = [ "psutil", "wheel>=0.42.0", "numpy", + "triton", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -104,6 +105,16 @@ cu121-torch211 = [ "bitsandbytes", "unsloth[cu121onlytorch211]", ] +cu118-torch212 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118onlytorch212]", +] +cu121-torch212 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch212]", +] cu118-torch220 = [ "unsloth[huggingface]", "bitsandbytes", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 9dce29cd83..7080c92894 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -99,10 +99,14 @@ cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: - raise ImportError("Unsloth: CUDA is not linked properly.\n"\ - "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ - "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ - "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.") + warnings.warn( + "Unsloth: CUDA is not linked properly.\n"\ + "Try running `python -m bitsandbytes` then `python -m xformers.info`\n"\ + "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ + "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ + "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\ + "Unsloth will still run for now, but maybe it might crash - let's hope it works!" + ) pass from .models import * diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 5ad34d6882..9675b10fe4 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -257,6 +257,10 @@ def get_chat_template( assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") pass + if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": + chat_template = "gemma_chatml" + pass + old_padding_side = tokenizer.padding_side if type(chat_template) in (list, tuple,): diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index ccd9f89948..4db89b7816 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -53,6 +53,7 @@ def _rms_layernorm_forward( pass +@triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, @@ -61,6 +62,7 @@ def _rms_layernorm_backward( r, r_row_stride, dW, dW_row_stride, n_cols, eps, + GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ @@ -84,16 +86,51 @@ def _rms_layernorm_backward( inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var - dY_W = dY_row * W_row + if GEMMA: dY_W = dY_row * (W_row + 1.0) + else: dY_W = dY_row * W_row + rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dY + col_offsets, output, mask = mask) pass +@triton.jit +def _gemma_rms_layernorm_forward( + Y, Y_row_stride, + X, X_row_stride, + W, W_row_stride, + r, r_row_stride, + n_cols, eps, + BLOCK_SIZE : tl.constexpr, +): + # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 + # and https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L33 + # exactly. Essentially all in float32! + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + Y += row_idx * Y_row_stride + X += row_idx * X_row_stride + r += row_idx * r_row_stride + + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + + row_var = tl.sum(X_row * X_row, axis = 0) / n_cols + inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl + tl.store(r, inv_var) + normed = X_row * inv_var + output = normed * (W_row + 1.0) + + tl.store(Y + col_offsets, output, mask = mask) +pass + + class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps): + def forward(ctx, X, W, eps, gemma = False): shape = X.shape dim = shape[-1] X = X.view(-1, dim) @@ -103,7 +140,8 @@ def forward(ctx, X, W, eps): Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") - _rms_layernorm_forward[(n_rows,)]( + fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward + fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), @@ -115,6 +153,7 @@ def forward(ctx, X, W, eps): ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps + ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) pass @@ -135,18 +174,19 @@ def backward(ctx, dY): r, r .stride(0), dW, dW.stride(0), n_cols, ctx.eps, + GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) dX = dY.view(*shape) - return dX, None, None + return dX, None, None, None pass pass -def fast_rms_layernorm(layernorm, X): +def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight eps = layernorm.variance_epsilon - out = Fast_RMS_Layernorm.apply(X, W, eps) + out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index a9527520ab..c1167393fb 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -39,24 +39,28 @@ def _rope_embedding( half_head_dim = head_dim // 2 mask = col_offsets < half_head_dim - Q1 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*0 + col_offsets, mask = mask, other = 0) - Q2 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*1 + col_offsets, mask = mask, other = 0) sin1 = tl.load(sin + (row_position % seqlen)*sin_row_stride + \ half_head_dim*0 + col_offsets, mask = mask, other = 0) cos1 = tl.load(cos + (row_position % seqlen)*cos_row_stride + \ half_head_dim*0 + col_offsets, mask = mask, other = 0) + # For Gemma - sometimes RoPE must be done in float32 and not bfloat16 + Q1 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*0 + col_offsets, mask = mask, other = 0).to(sin1.dtype) + Q2 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*1 + col_offsets, mask = mask, other = 0).to(sin1.dtype) + if BACKWARD_PASS: # See our blog post for more info. sin1 = -sin1 pass tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*0 + col_offsets, Q1*cos1 - Q2*sin1, mask = mask) + half_head_dim*0 + col_offsets, + Q1*cos1 - Q2*sin1, mask = mask) tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*1 + col_offsets, Q2*cos1 + Q1*sin1, mask = mask) + half_head_dim*1 + col_offsets, + Q2*cos1 + Q1*sin1, mask = mask) pass diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 99bd9e7865..bcd0e1abd9 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -39,6 +39,7 @@ pass +torch_nn_functional_gelu = torch.nn.functional.gelu def fast_geglu_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) @@ -48,7 +49,7 @@ def fast_geglu_inference(self, X): gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) up = fast_linear_forward(self. up_proj, X, out = temp[1]) - gate = torch.nn.functional.gelu(gate, approximate = "tanh") + gate = torch_nn_functional_gelu(gate, approximate = "tanh") gate *= up # X = self.down_proj(gate) @@ -57,6 +58,18 @@ def fast_geglu_inference(self, X): pass +def fast_rms_layernorm_inference_gemma(self, X, out_weight): + XX = X.to(torch.float32) + variance = XX.square().mean(-1, keepdim = True) + variance += self.variance_epsilon + XX *= variance.rsqrt_() + out_weight[:] = self.weight + out_weight += 1.0 + XX *= out_weight + return XX.to(X.dtype) +pass + + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 def GemmaDecoderLayer_fast_forward( self, @@ -72,10 +85,11 @@ def GemmaDecoderLayer_fast_forward( ): if past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") + out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") # Self Attention residual = hidden_states - hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(self.input_layernorm, hidden_states, out_weight) hidden_states, present_key_value = LlamaAttention_fast_forward_inference( self.self_attn, hidden_states, @@ -87,12 +101,12 @@ def GemmaDecoderLayer_fast_forward( # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(self.post_attention_layernorm, hidden_states, out_weight) hidden_states = fast_geglu_inference(self.mlp, hidden_states) hidden_states += residual else: residual = hidden_states - hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states, gemma = True) # hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, @@ -108,7 +122,7 @@ def GemmaDecoderLayer_fast_forward( # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states, gemma = True) # hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states @@ -137,15 +151,18 @@ def GemmaModel_fast_forward_inference( ): # Fix out of bounds tokenization input_ids = input_ids[:,:self.max_seq_length] + out_weight = torch.empty_like(self.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") hidden_states = self.embed_tokens(input_ids) - hidden_states *= math_sqrt(self.config.hidden_size) + # 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 + # 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 + hidden_states *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype) next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): # Self Attention residual = hidden_states - hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.input_layernorm, hidden_states, out_weight) hidden_states, present_key_value = LlamaAttention_fast_forward_inference( decoder_layer.self_attn, hidden_states, @@ -156,13 +173,13 @@ def GemmaModel_fast_forward_inference( # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.post_attention_layernorm, hidden_states, out_weight) hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states) hidden_states += residual next_decoder_cache.append(present_key_value) pass - hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(self.norm, hidden_states, out_weight) return BaseModelOutputWithPast( last_hidden_state = hidden_states, @@ -173,91 +190,54 @@ def GemmaModel_fast_forward_inference( pass -def GemmaForCausalLM_fast_forward( - self, - input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - *args, **kwargs, -) -> Union[Tuple, CausalLMOutputWithPast]: - - if causal_mask is None and past_key_values is None: - causal_mask = xformers.attn_bias.LowerTriangularMask() - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict +# Follows line by line https://github.com/google-deepmind/gemma/blob/main/gemma/positional_embeddings.py#L45 +# Formulates cos and sin differently from Llama! +class GemmaFixedRotaryEmbedding(torch.nn.Module): + # Fixes https://github.com/huggingface/transformers/pull/28837 + # https://github.com/microsoft/DeepSpeed/issues/4932 + # The precision of RoPE buffers is not correct, so we cast to int64. + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) + pass - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - self.model._has_no_labels = labels is None + def _set_cos_sin_cache(self, seq_len, device, dtype): + # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and + # in FP32. They are applied (multiplied) in FP32 as well. + self.max_seq_len_cached = seq_len - if past_key_values is not None and \ - hasattr(self.model.layers[0].self_attn, "paged_attention"): - outputs = GemmaModel_fast_forward_inference( - self.model, - input_ids, - past_key_values, - ) - else: - outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + # The difference is we do division explicity instead of t * (1/x) ie we do t/x. + freq_exponents = (2.0 / self.dim) * ( + torch.arange(self.dim // 2, dtype = torch.int64, device = "cpu").float() ) + timescale = self.base**freq_exponents + positions = torch.arange(self.max_seq_len_cached, device = "cpu", dtype = torch.int64).float() + radians_new = positions[..., None] / timescale[None, None, :] + radians_new = radians_new.squeeze(0) + + emb = torch.cat((radians_new, radians_new), dim = -1) + # We must do RoPE in float32! + cos = emb.cos().to(device = device, non_blocking = True)#, dtype = dtype) + sin = emb.sin().to(device = device, non_blocking = True)#, dtype = dtype) + self.register_buffer("cos_cached", cos, persistent = False) + self.register_buffer("sin_cached", sin, persistent = False) pass - hidden_states = outputs[0] - bsz, q_len, hd = hidden_states.shape - if bsz == 1 and q_len == 1: - logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) - logits = logits.unsqueeze(0).unsqueeze(0) - else: - logits = self.lm_head(hidden_states) - pass + def forward(self, x, position_ids=None, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) - loss = None - if labels is not None: - shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") - pass - - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), ) pass - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) pass @@ -270,7 +250,7 @@ def pre_patch(): GemmaFlashAttention2.forward = LlamaAttention_fast_forward GemmaDecoderLayer .forward = GemmaDecoderLayer_fast_forward GemmaModel .forward = LlamaModel_fast_forward - GemmaForCausalLM .forward = GemmaForCausalLM_fast_forward + GemmaForCausalLM .forward = CausalLM_fast_forward(GemmaModel_fast_forward_inference) PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. @@ -278,7 +258,7 @@ def pre_patch(): # https://github.com/huggingface/transformers/pull/27931 # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py import transformers.models.gemma.modeling_gemma - transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding = LlamaRotaryEmbedding + transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding = GemmaFixedRotaryEmbedding return pass @@ -329,13 +309,14 @@ def post_patch(model): pass pass # Downcast RoPE embedding to correct data type - if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - and (module.cos_cached.dtype != correct_dtype): - - module.cos_cached = module.cos_cached.to(correct_dtype) - module.sin_cached = module.sin_cached.to(correct_dtype) - pass - pass + # RoPE must be done in float32 for Gemma + # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ + # and (module.cos_cached.dtype != correct_dtype): + + # module.cos_cached = module.cos_cached.to(correct_dtype) + # module.sin_cached = module.sin_cached.to(correct_dtype) + # pass + # pass pass # Add 1 to weight @@ -358,8 +339,8 @@ def post_patch(model): # Must be in float32 # https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36 # module = module.to(torch.float32) - # Don't convert to float32 since error analysis shows it makes it worse!! - module.weight += 1.0 # return output * (1 + self.weight) + # Leave + 1 to Triton kernel itself + # module.weight += 1.0 # return output * (1 + self.weight) if not hasattr(module, "variance_epsilon"): module.variance_epsilon = module.eps # Gemma doesn't use variance_epsilon pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 20552644d9..3f281a09c8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -208,6 +208,7 @@ def LlamaAttention_fast_forward_inference( pass +torch_nn_functional_silu = torch.nn.functional.silu def fast_swiglu_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) @@ -217,7 +218,7 @@ def fast_swiglu_inference(self, X): gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) up = fast_linear_forward(self. up_proj, X, out = temp[1]) - gate = torch.nn.functional.silu(gate, inplace = True) + gate = torch_nn_functional_silu(gate, inplace = True) gate *= up # X = self.down_proj(gate) @@ -509,7 +510,8 @@ def LlamaModel_fast_forward( inputs_embeds = self.embed_tokens(input_ids) # Mormalized from Gemma - if self.config.model_type == "gemma": + IS_GEMMA = self.config.model_type == "gemma" + if IS_GEMMA: inputs_requires_grad = inputs_embeds.requires_grad if not inputs_embeds.is_leaf: inputs_embeds = inputs_embeds.detach() @@ -517,7 +519,12 @@ def LlamaModel_fast_forward( elif inputs_requires_grad: inputs_embeds.requires_grad_(False) pass - inputs_embeds *= math_sqrt(self.config.hidden_size) + # Match Gemma exactly by casting to bfloat16 / float16 + # inputs_embeds *= math_sqrt(self.config.hidden_size) + # Ie 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 + # & 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 + inputs_embeds *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = inputs_embeds.dtype) + # inputs_embeds *= math_sqrt(self.config.hidden_size) if inputs_requires_grad: inputs_embeds.requires_grad_(True) pass @@ -619,7 +626,7 @@ def custom_forward(*inputs): all_self_attns += (layer_outputs[1],) pass - hidden_states = fast_rms_layernorm(self.norm, hidden_states) + hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) # add hidden states from the last decoder layer if output_hidden_states: @@ -681,91 +688,94 @@ def LlamaModel_fast_forward_inference( pass -def LlamaForCausalLM_fast_forward( - self, - input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - *args, **kwargs, -) -> Union[Tuple, CausalLMOutputWithPast]: +def CausalLM_fast_forward(fast_forward_inference): + def _CausalLM_fast_forward( + self, + input_ids: torch.LongTensor = None, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + *args, **kwargs, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if causal_mask is None and past_key_values is None: + causal_mask = xformers.attn_bias.LowerTriangularMask() + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if causal_mask is None and past_key_values is None: - causal_mask = xformers.attn_bias.LowerTriangularMask() + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + self.model._has_no_labels = labels is None - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if past_key_values is not None and \ + hasattr(self.model.layers[0].self_attn, "paged_attention"): + outputs = fast_forward_inference( + self.model, + input_ids, + past_key_values, + ) + else: + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pass - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - self.model._has_no_labels = labels is None + hidden_states = outputs[0] + bsz, q_len, hd = hidden_states.shape + if bsz == 1 and q_len == 1: + logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + logits = logits.unsqueeze(0).unsqueeze(0) + else: + logits = self.lm_head(hidden_states) + pass - if past_key_values is not None and \ - hasattr(self.model.layers[0].self_attn, "paged_attention"): - outputs = LlamaModel_fast_forward_inference( - self.model, - input_ids, - past_key_values, - ) - else: - outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - pass + loss = None + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + ) + pass - hidden_states = outputs[0] - bsz, q_len, hd = hidden_states.shape - if bsz == 1 and q_len == 1: - logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) - logits = logits.unsqueeze(0).unsqueeze(0) - else: - logits = self.lm_head(hidden_states) - pass + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output - loss = None - if labels is not None: - shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") - pass - - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, ) pass - - if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) + return _CausalLM_fast_forward pass @@ -880,7 +890,7 @@ def pre_patch(): LlamaFlashAttention2.forward = LlamaAttention_fast_forward LlamaDecoderLayer .forward = LlamaDecoderLayer_fast_forward LlamaModel .forward = LlamaModel_fast_forward - LlamaForCausalLM .forward = LlamaForCausalLM_fast_forward + LlamaForCausalLM .forward = CausalLM_fast_forward(LlamaModel_fast_forward_inference) PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward # Solves https://github.com/unslothai/unsloth/issues/168 diff --git a/unsloth/save.py b/unsloth/save.py index 51ddeb30e7..5c1bceb38f 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -369,6 +369,7 @@ def unsloth_save_model( # Switch to our fast saving modules if it's a slow PC! n_cpus = psutil.cpu_count(logical = False) + if n_cpus is None: n_cpus = psutil.cpu_count() if n_cpus is None: n_cpus = 1 if safe_serialization is None: From a2215fdd1467fec434bec20c9a2c153452e4c6fa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 10 Mar 2024 04:54:23 +1100 Subject: [PATCH 0151/1088] Fix bugs (#230) * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten --- unsloth/chat_templates.py | 14 +++++++++----- unsloth/models/llama.py | 8 ++++++-- unsloth/models/loader.py | 13 ++++++++++++- unsloth/models/mistral.py | 6 +++++- unsloth/save.py | 2 ++ 5 files changed, 34 insertions(+), 9 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 9675b10fe4..520c998c5c 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -257,9 +257,9 @@ def get_chat_template( assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") pass - if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": - chat_template = "gemma_chatml" - pass + # if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": + # chat_template = "gemma_chatml" + # pass old_padding_side = tokenizer.padding_side @@ -298,8 +298,12 @@ def get_chat_template( pass pass - logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") - string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) + if not stop_word in token_mapping.values(): + # Do not map 107 = <|im_end|> and 1 = <|im_end|>. This will reduce the vocab size by 1 + logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") + string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) + pass + new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3f281a09c8..6ed52a7acb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -916,6 +916,7 @@ def from_pretrained( rope_scaling = None, fix_tokenizer = True, model_patcher = None, + tokenizer_name = None, **kwargs, ): if model_patcher is None: model_patcher = FastLlamaModel @@ -978,13 +979,16 @@ def from_pretrained( max_position_embeddings = max_position_embeddings, **kwargs, ) + + # Counteract saved tokenizers + tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoTokenizer.from_pretrained( - model_name, + tokenizer_name, model_max_length = max_position_embeddings, padding_side = "right", token = token, ) - + model, tokenizer = patch_tokenizer(model, tokenizer) model = model_patcher.post_patch(model) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 67a59c850c..47b568ae2a 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -18,7 +18,7 @@ from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER - +import os # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! major, minor = transformers_version.split(".")[:2] @@ -118,6 +118,16 @@ def from_pretrained( ) pass + # Check if this is local model since the tokenizer gets overwritten + if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ + os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ + os.path.exists(os.path.join(old_model_name, "special_tokens_map.json")): + + tokenizer_name = old_model_name + else: + tokenizer_name = None + pass + model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, @@ -128,6 +138,7 @@ def from_pretrained( rope_scaling = rope_scaling, fix_tokenizer = fix_tokenizer, model_patcher = dispatch_model, + tokenizer_name = tokenizer_name, *args, **kwargs, ) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 6c9d9ecc5c..c1e39e4a2e 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -294,6 +294,7 @@ def from_pretrained( rope_scaling = None, # Mistral does not support RoPE scaling fix_tokenizer = True, model_patcher = None, + tokenizer_name = None, **kwargs, ): if model_patcher is None: model_patcher = FastMistralModel @@ -354,8 +355,11 @@ def from_pretrained( # rope_scaling = rope_scaling, **kwargs, ) + + # Counteract saved tokenizers + tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoTokenizer.from_pretrained( - model_name, + tokenizer_name, model_max_length = max_position_embeddings, padding_side = "right", token = token, diff --git a/unsloth/save.py b/unsloth/save.py index 5c1bceb38f..5971d76e6e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -91,11 +91,13 @@ def _merge_lora(layer, name): else: dtype = W.dtype W = W.to(torch.float32).t() + # W = W.t() if A is not None: # sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) # W += sAB W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) + # W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s) # if not torch.isfinite(W).all(): maximum_element = torch.max(W.min().abs(), W.max()) if not torch.isfinite(maximum_element).item(): From e375c5813e81670d65f2aba850564333feb5fe5a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 10 Mar 2024 20:09:34 +1100 Subject: [PATCH 0152/1088] Saving fixes (#231) * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py --- unsloth/models/llama.py | 28 +++++++++++++--------------- unsloth/save.py | 8 ++++---- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6ed52a7acb..30348b69c2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -705,26 +705,24 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if causal_mask is None and past_key_values is None: - causal_mask = xformers.attn_bias.LowerTriangularMask() - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) - self.model._has_no_labels = labels is None - - if past_key_values is not None and \ - hasattr(self.model.layers[0].self_attn, "paged_attention"): + if past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): outputs = fast_forward_inference( self.model, input_ids, past_key_values, ) else: + causal_mask = xformers.attn_bias.LowerTriangularMask() + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + self.model._has_no_labels = labels is None + outputs = self.model( input_ids=input_ids, causal_mask=causal_mask, @@ -988,7 +986,7 @@ def from_pretrained( padding_side = "right", token = token, ) - + model, tokenizer = patch_tokenizer(model, tokenizer) model = model_patcher.post_patch(model) diff --git a/unsloth/save.py b/unsloth/save.py index 5971d76e6e..42d326e128 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -90,14 +90,14 @@ def _merge_lora(layer, name): W = fast_dequantize(W, quant_state) else: dtype = W.dtype - W = W.to(torch.float32).t() - # W = W.t() + # W = W.to(torch.float32).t() + W = W.t() if A is not None: # sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) # W += sAB - W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) - # W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s) + # W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) + W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s) # if not torch.isfinite(W).all(): maximum_element = torch.max(W.min().abs(), W.max()) if not torch.isfinite(maximum_element).item(): From dd72d9f8069a5fd0384a460c044a48f0946b2013 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 11 Mar 2024 04:31:03 +1100 Subject: [PATCH 0153/1088] Fix more bugs (#232) * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py --- unsloth/chat_templates.py | 19 ++++++-- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 97 +++++++++++++++++++++++++++++++-------- unsloth/save.py | 21 ++++++++- 4 files changed, 114 insertions(+), 25 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 520c998c5c..5487e9dcab 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -253,13 +253,15 @@ def get_chat_template( mapping = {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant"}, map_eos_token = True, ): + old_tokenizer = tokenizer + if map_eos_token is False: assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") pass - # if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": - # chat_template = "gemma_chatml" - # pass + if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": + chat_template = "gemma_chatml" + pass old_padding_side = tokenizer.padding_side @@ -340,6 +342,17 @@ def get_chat_template( tokenizer.padding_side = old_padding_side tokenizer.chat_template = chat_template + # Also fix up other tokens + old_pad_token = getattr(old_tokenizer, "pad_token", None) + old_bos_token = getattr(old_tokenizer, "bos_token", None) + old_unk_token = getattr(old_tokenizer, "unk_token", None) + new_pad_token = getattr(tokenizer, "pad_token", None) + new_bos_token = getattr(tokenizer, "bos_token", None) + new_unk_token = getattr(tokenizer, "unk_token", None) + if old_pad_token != new_pad_token: tokenizer.pad_token = old_pad_token + if old_bos_token != new_bos_token: tokenizer.bos_token = old_bos_token + if old_unk_token != new_unk_token: tokenizer.unk_token = old_unk_token + #stopping_criteria = create_stopping_criteria(tokenizer, stop_word) return tokenizer#, stopping_criteria diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3e3b8ff22c..95a032b698 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -95,7 +95,7 @@ def prepare_model_for_kbit_training( # Freeze all parameters except LoRA for name, param in model.named_parameters(): - if ".lora_A." in name or ".lora_B." in name: + if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: param.requires_grad_(True) else: param.requires_grad_(False) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 30348b69c2..e9493376be 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -511,26 +511,36 @@ def LlamaModel_fast_forward( # Mormalized from Gemma IS_GEMMA = self.config.model_type == "gemma" + train_embed_tokens = self.embed_tokens.weight.requires_grad + if IS_GEMMA: - inputs_requires_grad = inputs_embeds.requires_grad - if not inputs_embeds.is_leaf: - inputs_embeds = inputs_embeds.detach() - inputs_requires_grad = True - elif inputs_requires_grad: - inputs_embeds.requires_grad_(False) - pass # Match Gemma exactly by casting to bfloat16 / float16 # inputs_embeds *= math_sqrt(self.config.hidden_size) # Ie 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 # & 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 - inputs_embeds *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = inputs_embeds.dtype) - # inputs_embeds *= math_sqrt(self.config.hidden_size) - if inputs_requires_grad: inputs_embeds.requires_grad_(True) + normalizer = torch.tensor(math_sqrt(self.config.hidden_size), dtype = inputs_embeds.dtype) + + if train_embed_tokens: + # Careful we must not do an inplace op! + inputs_embeds = inputs_embeds * normalizer + else: + inputs_requires_grad = inputs_embeds.requires_grad + if not inputs_embeds.is_leaf: + inputs_embeds = inputs_embeds.detach() + inputs_requires_grad = True + elif inputs_requires_grad: + inputs_embeds.requires_grad_(False) + pass + inputs_embeds *= normalizer + # inputs_embeds *= math_sqrt(self.config.hidden_size) + if inputs_requires_grad: inputs_embeds.requires_grad_(True) + pass pass # Fix up attention mask by setting elements to 0 # Specifically for DPO - if self._has_no_labels and (attention_mask is not None) and (past_key_values is None): + if self._has_no_labels and (attention_mask is not None) and (past_key_values is None) and \ + (not train_embed_tokens): # Careful for inference the attention_mask is size (1, kv_seq_len) # Whilst the input_embeds is size (1, 1, 4096) inputs_requires_grad = inputs_embeds.requires_grad @@ -1226,6 +1236,7 @@ def get_peft_model( random_state = 3407, max_seq_length = 2048, # not used anymore use_rslora = False, + modules_to_save = None, init_lora_weights = True, loftq_config = {}, **kwargs, @@ -1312,15 +1323,45 @@ def get_peft_model( accepted_modules = frozenset(("q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",),) model.config.update({"unsloth_version" : __version__}) + + train_lm_head = False + train_embed_tokens = False + final_modules = [] for module in target_modules: - assert(module in accepted_modules) + if module == "lm_head": + logger.warning_once( + "Unsloth: `lm_head` should be placed in `modules_to_save` and not `target_modules`."\ + "We shall do it for you!" + ) + train_lm_head = True + + elif module == "embed_tokens": + logger.warning_once( + "Unsloth: `embed_tokens` should be placed in `modules_to_save` and not `target_modules`."\ + "We shall do it for you!" + ) + train_embed_tokens = True + + else: + assert(module in accepted_modules) + final_modules.append(module) + pass + + # Check modules_to_save + if modules_to_save is not None: + for module in modules_to_save: + if module == "lm_head": + train_lm_head = True + elif module == "embed_tokens": + train_embed_tokens = True + pass pass # Get LoRA arguments = dict( r = r, lora_alpha = lora_alpha, - target_modules = target_modules, + target_modules = final_modules, lora_dropout = lora_dropout, bias = bias, task_type = TaskType.CAUSAL_LM, @@ -1328,6 +1369,7 @@ def get_peft_model( init_lora_weights = init_lora_weights, loftq_config = loftq_config, use_rslora = use_rslora, + modules_to_save = modules_to_save, **kwargs, ) if not SUPPORTS_LOFTQ: del arguments["loftq_config"] @@ -1337,6 +1379,14 @@ def get_peft_model( model = _get_peft_model(model, lora_config) model = FastLlamaModel.patch_peft_model(model, use_gradient_checkpointing) + + # Now patch lm_head and embed_tokens + if train_embed_tokens: + model.model.model.embed_tokens.requires_grad_(True) + if train_lm_head: + model.model.lm_head.requires_grad_(True) + pass + return model pass @@ -1427,9 +1477,12 @@ def patch_peft_model( if hasattr(gate_proj, "lora_A") and \ hasattr( up_proj, "lora_A") and \ hasattr(down_proj, "lora_A") and \ - (gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None and \ - ( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None and \ - (down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None: + ((gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None) and \ + (( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None) and \ + ((down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None) and \ + ((gate_proj.lora_magnitude_vector if hasattr(gate_proj, "lora_magnitude_vector") else None) is None) and \ + (( up_proj.lora_magnitude_vector if hasattr( up_proj, "lora_magnitude_vector") else None) is None) and \ + ((down_proj.lora_magnitude_vector if hasattr(down_proj, "lora_magnitude_vector") else None) is None): # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) @@ -1448,9 +1501,12 @@ def patch_peft_model( if hasattr(q_proj, "lora_A") and \ hasattr(k_proj, "lora_A") and \ hasattr(v_proj, "lora_A") and \ - (q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None and \ - (k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None and \ - (v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None: + ((q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None) and \ + ((k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None) and \ + ((v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None) and \ + ((q_proj.lora_magnitude_vector if hasattr(q_proj, "lora_magnitude_vector") else None) is None) and \ + ((k_proj.lora_magnitude_vector if hasattr(k_proj, "lora_magnitude_vector") else None) is None) and \ + ((v_proj.lora_magnitude_vector if hasattr(v_proj, "lora_magnitude_vector") else None) is None): layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 @@ -1464,7 +1520,8 @@ def patch_peft_model( # O attention patching o_proj = layer.self_attn.o_proj if hasattr(o_proj, "lora_A") and \ - (o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None: + ((o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None) and \ + ((o_proj.lora_magnitude_vector if hasattr(o_proj, "lora_magnitude_vector") else None) is None): layer.self_attn.apply_o = apply_lora_o n_o += 1 diff --git a/unsloth/save.py b/unsloth/save.py index 42d326e128..7543eef10e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -203,7 +203,11 @@ def unsloth_save_model( print("Unsloth: Merging 4bit and LoRA weights to 4bit...") print("This might take 5 minutes...") - model = model.merge_and_unload() + + # Counteract no LoRA adapters! + if hasattr(model, "merge_and_unload"): + model = model.merge_and_unload() + pass print("Done.") pass @@ -573,6 +577,21 @@ def install_llama_cpp_old(version = -10): latest = releases[-1] version = releases[version].split(" ")[0] + # Check if the llama.cpp exists + if os.path.exists("llama.cpp"): + print( + "**[WARNING]** You have a llama.cpp old directory which is broken.\n"\ + "Unsloth will DELETE the broken directory and install a new one.\n"\ + "Press CTRL + C / cancel this if this is wrong. We shall wait 10 seconds.\n" + ) + import time + for i in range(10): + print(f"**[WARNING]** Deleting llama.cpp directory... {10-i} seconds left.") + time.sleep(1) + import shutil + shutil.rmtree("llama.cpp") + pass + # Clone a specific commit commands = [ "git clone https://github.com/ggerganov/llama.cpp", From 42076f6580e71522ed1c122043edfba595be64e4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 14 Mar 2024 20:32:04 +1100 Subject: [PATCH 0154/1088] Fix Gemma GGUF (#234) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml --- README.md | 2 +- pyproject.toml | 44 +++++++++++++++++++++++++++------------ unsloth/chat_templates.py | 12 +++++++++-- unsloth/models/gemma.py | 1 + unsloth/models/llama.py | 36 +++++++++++++++++++------------- unsloth/save.py | 33 ++++++++++++++++++++++++----- 6 files changed, 92 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 759057f2a4..412e3ffda2 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. - Works on **Linux** and **Windows** via WSL. - Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -- Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! +- Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for up to **30x faster training**! - If you trained a model with 🦥Unsloth, you can use this cool sticker!   diff --git a/pyproject.toml b/pyproject.toml index 05c4191974..3ea50ca00e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,17 +33,17 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ + "tyro", "transformers>=4.38.2", "datasets>=2.16.0", "sentencepiece", - "accelerate>=0.26.1", - "trl>=0.7.9", - "peft>=0.7.1", "tqdm", "psutil", "wheel>=0.42.0", "numpy", - "triton", + "accelerate>=0.26.1", + "trl>=0.7.9", + "peft>=0.7.1", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -128,17 +128,12 @@ cu121-torch220 = [ kaggle = [ "unsloth[huggingface]", ] -conda = [ +kaggle-new = [ "unsloth[huggingface]", + "bitsandbytes", ] -colab = [ - "unsloth[cu121]", -] -colab-ampere = [ - "unsloth[cu121]", - "packaging", - "ninja", - "flash-attn", +conda = [ + "unsloth[huggingface]", ] colab-torch211 = [ "unsloth[huggingface]", @@ -166,6 +161,29 @@ colab-ampere-torch220 = [ "ninja", "flash-attn", ] +colab-new = [ + "tyro", + "transformers>=4.38.2", + "datasets>=2.16.0", + "sentencepiece", + "tqdm", + "psutil", + "wheel>=0.42.0", + "numpy", +] +colab-no-deps = [ + "accelerate>=0.26.1", + "trl>=0.7.9", + "peft>=0.7.1", + "xformers", + "bitsandbytes", +] +colab-ampere = [ + "unsloth[colab-ampere-torch220]", + "packaging", + "ninja", + "flash-attn", +] cu118-ampere = [ "unsloth[huggingface]", "bitsandbytes", diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 5487e9dcab..7d205ebc5f 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -259,8 +259,10 @@ def get_chat_template( assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") pass - if tokenizer.__class__.__name__.startswith("Gemma") and chat_template == "chatml": - chat_template = "gemma_chatml" + IS_GEMMA = False + if tokenizer.__class__.__name__.startswith("Gemma"): + if chat_template == "chatml": chat_template = "gemma_chatml" + IS_GEMMA = True pass old_padding_side = tokenizer.padding_side @@ -338,6 +340,12 @@ def get_chat_template( .replace("'user'", "'" + mapping["user"] + "'")\ .replace("'assistant'", "'" + mapping["assistant"] + "'") + # Careful on Gemma + # bos_token is a must or else losses become too high + if IS_GEMMA and not chat_template.startswith("{{ bos_token }}"): + chat_template = "{{ bos_token }}" + chat_template + pass + _, tokenizer = patch_tokenizer(model = None, tokenizer = tokenizer) tokenizer.padding_side = old_padding_side tokenizer.chat_template = chat_template diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index bcd0e1abd9..7bfec43e51 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -154,6 +154,7 @@ def GemmaModel_fast_forward_inference( out_weight = torch.empty_like(self.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") hidden_states = self.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) # 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 # 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 hidden_states *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e9493376be..d83d9b76f2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -509,7 +509,10 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - # Mormalized from Gemma + # Downcast to the correct dtype ie float32 to float16 + inputs_embeds = inputs_embeds.to(self.config.torch_dtype) + + # Normalized from Gemma IS_GEMMA = self.config.model_type == "gemma" train_embed_tokens = self.embed_tokens.weight.requires_grad @@ -665,6 +668,7 @@ def LlamaModel_fast_forward_inference( input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): @@ -1334,6 +1338,7 @@ def get_peft_model( "We shall do it for you!" ) train_lm_head = True + model.model.embed_tokens.to(torch.float32, non_blocking = True) elif module == "embed_tokens": logger.warning_once( @@ -1341,6 +1346,7 @@ def get_peft_model( "We shall do it for you!" ) train_embed_tokens = True + model.lm_head.to(torch.float32, non_blocking = True) else: assert(module in accepted_modules) @@ -1477,12 +1483,12 @@ def patch_peft_model( if hasattr(gate_proj, "lora_A") and \ hasattr( up_proj, "lora_A") and \ hasattr(down_proj, "lora_A") and \ - ((gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None) and \ - (( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None) and \ - ((down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None) and \ - ((gate_proj.lora_magnitude_vector if hasattr(gate_proj, "lora_magnitude_vector") else None) is None) and \ - (( up_proj.lora_magnitude_vector if hasattr( up_proj, "lora_magnitude_vector") else None) is None) and \ - ((down_proj.lora_magnitude_vector if hasattr(down_proj, "lora_magnitude_vector") else None) is None): + (getattr(gate_proj, "base_layer", gate_proj).bias is None) and \ + (getattr( up_proj, "base_layer", up_proj).bias is None) and \ + (getattr(down_proj, "base_layer", down_proj).bias is None) and \ + (getattr(gate_proj, "lora_magnitude_vector", None) is None) and \ + (getattr( up_proj, "lora_magnitude_vector", None) is None) and \ + (getattr(down_proj, "lora_magnitude_vector", None) is None): # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) @@ -1501,12 +1507,12 @@ def patch_peft_model( if hasattr(q_proj, "lora_A") and \ hasattr(k_proj, "lora_A") and \ hasattr(v_proj, "lora_A") and \ - ((q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None) and \ - ((k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None) and \ - ((v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None) and \ - ((q_proj.lora_magnitude_vector if hasattr(q_proj, "lora_magnitude_vector") else None) is None) and \ - ((k_proj.lora_magnitude_vector if hasattr(k_proj, "lora_magnitude_vector") else None) is None) and \ - ((v_proj.lora_magnitude_vector if hasattr(v_proj, "lora_magnitude_vector") else None) is None): + (getattr(q_proj, "base_layer", q_proj).bias is None) and \ + (getattr(q_proj, "base_layer", k_proj).bias is None) and \ + (getattr(q_proj, "base_layer", v_proj).bias is None) and \ + (getattr(q_proj, "lora_magnitude_vector", None) is None) and \ + (getattr(k_proj, "lora_magnitude_vector", None) is None) and \ + (getattr(v_proj, "lora_magnitude_vector", None) is None): layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 @@ -1520,8 +1526,8 @@ def patch_peft_model( # O attention patching o_proj = layer.self_attn.o_proj if hasattr(o_proj, "lora_A") and \ - ((o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None) and \ - ((o_proj.lora_magnitude_vector if hasattr(o_proj, "lora_magnitude_vector") else None) is None): + (getattr(o_proj, "base_layer", o_proj).bias is None) and \ + (getattr(o_proj, "lora_magnitude_vector", None) is None): layer.self_attn.apply_o = apply_lora_o n_o += 1 diff --git a/unsloth/save.py b/unsloth/save.py index 7543eef10e..dd0dc26e66 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -632,6 +632,7 @@ def install_llama_cpp_blocking(): def save_to_gguf( + model_type : str, model_directory : str = "unsloth_finetuned_model", quantization_method : str = "fast_quantized", first_conversion : str = "f16", @@ -639,10 +640,18 @@ def save_to_gguf( ): from transformers.models.llama.modeling_llama import logger + # Careful convert.py is only for Llama / Mistral based archs + use_fast_convert = False + if model_type == "llama": use_fast_convert = True + elif model_type == "mistral": use_fast_convert = True + pass + logger.warning_once(f"Unsloth: Converting {model_type} model. Can use fast conversion = {use_fast_convert}.") + if quantization_method == "not_quantized": quantization_method = "f16" elif quantization_method == "fast_quantized": quantization_method = "q8_0" elif quantization_method == "quantized": quantization_method = "q4_k_m" elif quantization_method is None: quantization_method = "q8_0" + pass if quantization_method not in ALLOWED_QUANTS.keys(): error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" @@ -692,6 +701,12 @@ def save_to_gguf( pass pass + # Non llama/mistral needs can only use f32 or f16 + if not use_fast_convert and (first_conversion != "f16" or first_conversion != "f32"): + logger.warning_once("Unsloth: We must use f16 for non Llama and Mistral models.") + first_conversion = "f16" + pass + n_cpus = psutil.cpu_count() if n_cpus is None: n_cpus = 1 n_cpus *= 2 @@ -703,9 +718,15 @@ def save_to_gguf( f"The output location will be {final_location}\n"\ "This will take 3 minutes...") - command = f"python llama.cpp/convert.py {model_directory} "\ - f"--outfile {final_location} --vocab-type hfft "\ - f"--outtype {first_conversion} --concurrency {n_cpus}" + if use_fast_convert: + command = f"python llama.cpp/convert.py {model_directory} "\ + f"--outfile {final_location} --vocab-type hfft "\ + f"--outtype {first_conversion} --concurrency {n_cpus}" + else: + command = f"python llama.cpp/convert-hf-to-gguf.py {model_directory} "\ + f"--outfile {final_location} "\ + f"--outtype {first_conversion}" + pass with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: @@ -1054,7 +1075,8 @@ def unsloth_save_pretrained_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization_method, first_conversion, makefile) + model_type = self.config.model_type + file_location = save_to_gguf(model_type, new_save_directory, quantization_method, first_conversion, makefile) if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") @@ -1154,7 +1176,8 @@ def unsloth_push_to_hub_gguf( for _ in range(3): gc.collect() - file_location = save_to_gguf(new_save_directory, quantization_method, first_conversion, makefile) + model_type = self.config.model_type + file_location = save_to_gguf(model_type, new_save_directory, quantization_method, first_conversion, makefile) print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( From 9ac4ed66540170085bc6a649e4e7724f49bd90ef Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 15 Mar 2024 05:09:45 +1100 Subject: [PATCH 0155/1088] Gemma GGUF chat templates work! (#246) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py --- unsloth/chat_templates.py | 78 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 7d205ebc5f..da46972750 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -15,12 +15,15 @@ __all__ = [ "get_chat_template", "test_chat_templates", + "fix_sentencepiece_tokenizer", ] from transformers import StoppingCriteria, StoppingCriteriaList from torch import LongTensor, FloatTensor from transformers.models.llama.modeling_llama import logger from .models._utils import patch_tokenizer +import os +import shutil CHAT_TEMPLATES = {} @@ -239,14 +242,75 @@ # Gemma with ChatML instead +# We find using is still more appropriate! gemma_chatml_template = "{{ bos_token }}" + chatml_template gemma_chatml_eos_token = ( - {"" : "<|im_start|>", "" : "<|im_end|>"}, + {"" : "<|im_start|>", "" : "<|im_end|>"}, "<|im_end|>", ) CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token,) +def fix_sentencepiece_tokenizer( + old_tokenizer, + new_tokenizer, + token_mapping, + temporary_location = "_unsloth_sentencepiece_temp", +): + # From https://github.com/google/sentencepiece/issues/121 + # We need to manually edit the sentencepiece tokenizer! + try: + import sentencepiece.sentencepiece_model_pb2 as sentencepiece_model_pb2 + except: + if not os.path.exists(temporary_location): + os.system("git clone https://github.com/google/sentencepiece.git unsloth_sentencepiece_temp") + os.system(f"cd {temporary_location}/src && protoc --python_out=. sentencepiece_model.proto") + shutil.rmtree(temporary_location) + pass + import sentencepiece.sentencepiece_model_pb2 as sentencepiece_model_pb2 + pass + + if not os.path.exists(temporary_location): + os.makedirs(temporary_location) + pass + + # First save the old tokenizer + old_tokenizer.save_pretrained(temporary_location) + + from sentencepiece import SentencePieceProcessor + tokenizer_file = sentencepiece_model_pb2.ModelProto() + tokenizer_file.ParseFromString(open(f"{temporary_location}/tokenizer.model", "rb").read()) + + # Now save the new tokenizer + new_tokenizer.save_pretrained(temporary_location) + + # Now correct the old tokenizer's .model file + for old_token, new_token in token_mapping.items(): + ids = old_tokenizer([old_token], add_special_tokens = False).input_ids + ids = ids[0] + if (len(ids) != 1): + # Skip this token! + print(f"Skip mapping {old_token} to {new_token} since {new_token} is already in the tokenizer!") + continue + pass + ids = ids[0] + tokenizer_piece = tokenizer_file.pieces[ids] + assert(tokenizer_piece.piece == old_token) + tokenizer_piece.piece = new_token + pass + + # And now write it + with open(f"{temporary_location}/tokenizer.model", "wb") as file: + file.write(tokenizer_file.SerializeToString()) + pass + + # And load it! + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(temporary_location, eos_token = new_tokenizer.eos_token) + return tokenizer +pass + + def get_chat_template( tokenizer, chat_template = "chatml", @@ -290,11 +354,13 @@ def get_chat_template( string_vocab = tokenizer._tokenizer.to_str() + skipped = 0 for old_token, new_token in token_mapping.items(): old_count = string_vocab.count(f'"{old_token}"') new_count = string_vocab.count(f'"{new_token}"') if new_count != 0: print(f"{new_token} is already a token. Skipping.") + skipped += 1 elif old_count == 0: raise RuntimeError(f"{old_token} was not part of the tokenizer!") else: @@ -308,8 +374,14 @@ def get_chat_template( string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) pass - new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) - tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + if skipped != len(token_mapping): + new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) + new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + + # Must fix the sentence piece tokenizer since there's no tokenizer.model file! + tokenizer = fix_sentencepiece_tokenizer(tokenizer, new_tokenizer, token_mapping,) + else: + pass elif stop_word != "eos_token": logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") From 809bdbef9394ed3cf20fce3a29deb455c109c00b Mon Sep 17 00:00:00 2001 From: HuyNguyen-hust <64361543+HuyNguyen-hust@users.noreply.github.com> Date: Fri, 15 Mar 2024 20:09:59 +0700 Subject: [PATCH 0156/1088] 10% faster RoPE embedding from HuyNguyen-hust (#238) --- unsloth/kernels/rope_embedding.py | 48 +++++++++++++++++++------------ 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index c1167393fb..99a99dc320 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -24,7 +24,7 @@ def _rope_embedding( Q, Q_row_stride, cos, cos_row_stride, sin, sin_row_stride, - seqlen, head_dim, + seqlen, head_dim, group_size, n_heads, BACKWARD_PASS: tl.constexpr, BLOCK_SIZE : tl.constexpr, ): @@ -34,7 +34,7 @@ def _rope_embedding( See our blog post for more info """ row_position = tl.program_id(0) - head_position = tl.program_id(1) + group_head_position = tl.program_id(1) col_offsets = tl.arange(0, BLOCK_SIZE) half_head_dim = head_dim // 2 mask = col_offsets < half_head_dim @@ -44,23 +44,25 @@ def _rope_embedding( cos1 = tl.load(cos + (row_position % seqlen)*cos_row_stride + \ half_head_dim*0 + col_offsets, mask = mask, other = 0) - # For Gemma - sometimes RoPE must be done in float32 and not bfloat16 - Q1 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*0 + col_offsets, mask = mask, other = 0).to(sin1.dtype) - Q2 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*1 + col_offsets, mask = mask, other = 0).to(sin1.dtype) - if BACKWARD_PASS: # See our blog post for more info. sin1 = -sin1 pass - tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*0 + col_offsets, - Q1*cos1 - Q2*sin1, mask = mask) - tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ - half_head_dim*1 + col_offsets, - Q2*cos1 + Q1*sin1, mask = mask) + head_start = group_head_position * group_size + head_end = tl.math.min((head_start + group_size), n_heads) + + for i in range(head_start, head_end): + offs_q1 = row_position * Q_row_stride + i * head_dim + col_offsets + offs_q2 = row_position * Q_row_stride + i * head_dim + col_offsets + half_head_dim + + # For Gemma - sometimes RoPE must be done in float32 and not bfloat16 + Q1 = tl.load(Q + offs_q1, mask = mask, other = 0).to(sin1.dtype) + Q2 = tl.load(Q + offs_q2, mask = mask, other = 0).to(sin1.dtype) + + tl.store(Q + offs_q1, Q1*cos1 - Q2*sin1, mask = mask) + tl.store(Q + offs_q2, Q2*cos1 + Q1*sin1, mask = mask) + pass pass @@ -75,12 +77,16 @@ def forward(ctx, Q, cos, sin): # [TODO] Changing blocksize to head_dim//2 seems to have # some concurrency / un-deterministic issues. - BLOCK_SIZE, num_warps = calculate_settings(head_dim) # (head_dim//2) - _rope_embedding[(n_rows, n_heads,)]( + BLOCK_SIZE, num_warps = calculate_settings(head_dim//2) # (head_dim//2) + group_size = 4 # 4 or 8, too large group_size can hurt performance. + n_groups = triton.cdiv(n_heads, group_size) + + grid = (n_rows, n_groups, ) + _rope_embedding[grid]( Q, Q.stride(0), cos, cos.stride(0), sin, sin.stride(0), - seq_len, head_dim, + seq_len, head_dim, group_size, n_heads, BACKWARD_PASS = False, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, @@ -102,11 +108,15 @@ def backward(ctx, dY): cos = ctx.cos sin = ctx.sin - _rope_embedding[(n_rows, n_heads,)]( + group_size = 4 # 4 or 8, too large group_size can hurt performance. + n_groups = triton.cdiv(n_heads, group_size) + + grid = (n_rows, n_groups, ) + _rope_embedding[grid]( dY, dY .stride(0), cos, cos.stride(0), sin, sin.stride(0), - seq_len, head_dim, + seq_len, head_dim, group_size, n_heads, BACKWARD_PASS = True, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, From 1e61cdbcb2a6c0c399d9e3e58a157ee1144ebf69 Mon Sep 17 00:00:00 2001 From: Qubitium <417764+Qubitium@users.noreply.github.com> Date: Fri, 15 Mar 2024 21:12:16 +0800 Subject: [PATCH 0157/1088] Fix single gpu limit code overriding the wrong cuda gpu id via env (#228) --- unsloth/__init__.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 7080c92894..0cc76cd6f2 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -17,22 +17,16 @@ # Currently only supports 1 GPU, or else seg faults will occur. if "CUDA_VISIBLE_DEVICES" in os.environ: - device = os.environ["CUDA_VISIBLE_DEVICES"] - if not device.isdigit(): + devices = os.environ["CUDA_VISIBLE_DEVICES"] + # check if there are multiple cuda devices set in env + if not devices.isdigit(): + first_id = devices.split(',')[0] warnings.warn( - f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {device} "\ - "but we require 'CUDA_VISIBLE_DEVICES=0'\n"\ - "We shall set it ourselves." + f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {devices} \n"\ + "Multiple CUDA devices detected but we require a single device.\n"\ + f"We will override CUDA_VISIBLE_DEVICES to first device: {first_id}." ) - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - elif "CUDA_DEVICE_ORDER" not in os.environ: - warnings.warn( - f"Unsloth: 'CUDA_DEVICE_ORDER' is not set "\ - "but we require 'CUDA_DEVICE_ORDER=PCI_BUS_ID'\n"\ - "We shall set it ourselves." - ) - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = str(first_id) else: # warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" From cb121797721dd39e3e8b842cb3ec557066b8c7e4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Mar 2024 02:47:05 +1100 Subject: [PATCH 0158/1088] Bug fixes (#249) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md --- README.md | 24 +++++++++--- unsloth/__init__.py | 19 +++++++--- unsloth/kernels/rope_embedding.py | 46 ++++++++++++----------- unsloth/models/_utils.py | 2 + unsloth/save.py | 62 ++++++++++++++++++++++++++++--- 5 files changed, 116 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 412e3ffda2..fc282604b1 100644 --- a/README.md +++ b/README.md @@ -91,13 +91,11 @@ Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA conda create --name unsloth_env python=3.10 conda activate unsloth_env -conda install pytorch cudatoolkit torchvision torchaudio pytorch-cuda=<12.1/11.8> -c pytorch -c nvidia +conda install pytorch-cuda=<12.1/11.8> pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers -conda install xformers -c xformers +pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install bitsandbytes - -pip install "unsloth[conda] @ git+https://github.com/unslothai/unsloth.git" +pip install --no-deps trl peft accelerate bitsandbytes ``` ### Pip Installation @@ -144,6 +142,22 @@ pip install "unsloth[cu121-ampere-torch220] @ git+https://github.com/unslothai/u ```bash pip install --upgrade pip ``` +6. For Pytorch 2.2.1: +```bash +# RTX 3090, 4090 Ampere GPUs: +pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" +pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes + +# Pre Ampere RTX 2080, T4, GTX 1080 GPUs: +pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" +pip install --no-deps xformers trl peft accelerate bitsandbytes +``` +7. To troubleshoot installs try the below (all must succeed). Xformers should mostly all be available. +```bash +nvcc +python -m xformers.info +python -m bitsandbytes +``` ## 📜 Documentation - Go to our [Wiki page](https://github.com/unslothai/unsloth/wiki) for saving to GGUF, checkpointing, evaluation and more! diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 0cc76cd6f2..40da487e7f 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -18,9 +18,9 @@ # Currently only supports 1 GPU, or else seg faults will occur. if "CUDA_VISIBLE_DEVICES" in os.environ: devices = os.environ["CUDA_VISIBLE_DEVICES"] - # check if there are multiple cuda devices set in env + # Check if there are multiple cuda devices set in env if not devices.isdigit(): - first_id = devices.split(',')[0] + first_id = devices.split(",")[0] warnings.warn( f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {devices} \n"\ "Multiple CUDA devices detected but we require a single device.\n"\ @@ -33,20 +33,29 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "0" pass +# Reduce VRAM usage by reducing fragmentation +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" + try: import torch except: raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ "We have some installation instructions on our Github page.") -# We support torch 2.1 and 2.1.1 +# We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") major_torch, minor_torch = torch_version[0], torch_version[1] major_torch, minor_torch = int(major_torch), int(minor_torch) -if (major_torch != 2):# or (major_torch == 2 and minor_torch < 1): - raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ +if (major_torch < 2): + raise ImportError("Unsloth only supports Pytorch 2 for now. Please update your Pytorch to 2.1.\n"\ "We have some installation instructions on our Github page.") +elif (major_torch == 2) and (minor_torch < 2): + # Disable expandable_segments + del os.environ["PYTORCH_CUDA_ALLOC_CONF"] + # Must reimport Pytorch! + importlib.reload(torch) +pass # Try loading bitsandbytes and triton diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 99a99dc320..b32d75ebf3 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -17,6 +17,7 @@ import torch from .utils import calculate_settings +ROPE_GROUP_SIZE = 4 @triton.heuristics({"BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],}) @triton.jit @@ -24,9 +25,11 @@ def _rope_embedding( Q, Q_row_stride, cos, cos_row_stride, sin, sin_row_stride, - seqlen, head_dim, group_size, n_heads, - BACKWARD_PASS: tl.constexpr, - BLOCK_SIZE : tl.constexpr, + seqlen, + head_dim : tl.constexpr, + n_heads : tl.constexpr, + BACKWARD_PASS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, ): """ Calculates the RoPE Embedding quickly @@ -49,16 +52,18 @@ def _rope_embedding( sin1 = -sin1 pass - head_start = group_head_position * group_size - head_end = tl.math.min((head_start + group_size), n_heads) + # [TODO] Autotune ROPE_GROUP_SIZE to be 1, 2, 4, 8 + head_start = group_head_position * ROPE_GROUP_SIZE + head_end = min((head_start + ROPE_GROUP_SIZE), n_heads) - for i in range(head_start, head_end): - offs_q1 = row_position * Q_row_stride + i * head_dim + col_offsets - offs_q2 = row_position * Q_row_stride + i * head_dim + col_offsets + half_head_dim + # 10% Faster kernel from [HuyNguyen-hust](https://github.com/unslothai/unsloth/pull/238) + for k in range(head_start, head_end): + offs_q1 = row_position * Q_row_stride + k * head_dim + col_offsets + offs_q2 = row_position * Q_row_stride + k * head_dim + col_offsets + half_head_dim # For Gemma - sometimes RoPE must be done in float32 and not bfloat16 - Q1 = tl.load(Q + offs_q1, mask = mask, other = 0).to(sin1.dtype) - Q2 = tl.load(Q + offs_q2, mask = mask, other = 0).to(sin1.dtype) + Q1 = tl.load(Q + offs_q1, mask = mask, other = 0).to(sin1.dtype) + Q2 = tl.load(Q + offs_q2, mask = mask, other = 0).to(sin1.dtype) tl.store(Q + offs_q1, Q1*cos1 - Q2*sin1, mask = mask) tl.store(Q + offs_q2, Q2*cos1 + Q1*sin1, mask = mask) @@ -78,21 +83,24 @@ def forward(ctx, Q, cos, sin): # [TODO] Changing blocksize to head_dim//2 seems to have # some concurrency / un-deterministic issues. BLOCK_SIZE, num_warps = calculate_settings(head_dim//2) # (head_dim//2) - group_size = 4 # 4 or 8, too large group_size can hurt performance. - n_groups = triton.cdiv(n_heads, group_size) + + # group_size = 4 # 4 or 8, too large group_size can hurt performance. + div, mod = divmod(n_heads, ROPE_GROUP_SIZE) + n_groups = div + (mod != 0) - grid = (n_rows, n_groups, ) - _rope_embedding[grid]( + _rope_embedding[(n_rows, n_groups, )]( Q, Q.stride(0), cos, cos.stride(0), sin, sin.stride(0), - seq_len, head_dim, group_size, n_heads, + seq_len, + head_dim, n_heads, BACKWARD_PASS = False, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps + ctx.n_groups = n_groups ctx.cos = cos ctx.sin = sin return Q.view(batch, seq_len, n_heads, head_dim) @@ -108,15 +116,11 @@ def backward(ctx, dY): cos = ctx.cos sin = ctx.sin - group_size = 4 # 4 or 8, too large group_size can hurt performance. - n_groups = triton.cdiv(n_heads, group_size) - - grid = (n_rows, n_groups, ) - _rope_embedding[grid]( + _rope_embedding[(n_rows, ctx.n_groups, )]( dY, dY .stride(0), cos, cos.stride(0), sin, sin.stride(0), - seq_len, head_dim, group_size, n_heads, + seq_len, head_dim, n_heads, BACKWARD_PASS = True, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 95a032b698..02eea4ba0c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -18,6 +18,8 @@ warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer diff --git a/unsloth/save.py b/unsloth/save.py index dd0dc26e66..05ff748db2 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -593,16 +593,17 @@ def install_llama_cpp_old(version = -10): pass # Clone a specific commit + # Also don't use the GPU! commands = [ "git clone https://github.com/ggerganov/llama.cpp", f"cd llama.cpp && git reset --hard {version} && git clean -df && "\ - f"make clean && LLAMA_CUBLAS=1 make all -j{psutil.cpu_count()*2}", + f"make clean make all -j{psutil.cpu_count()*2}", "pip install gguf protobuf", ] for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: - print(line.decode("utf-8"), flush = True, end = "") + print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass pass # Check if successful @@ -625,12 +626,55 @@ def install_llama_cpp_blocking(): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: - print(line.decode("utf-8"), flush = True, end = "") + print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass pass pass +def _fix_gemma_gguf(): + # Fixes Gemma saving to GGUF to float32 instead of float16! + with open("llama.cpp/convert-hf-to-gguf.py", "rb") as file: + text = file.read() + pass + + gemma_start = text.find(b"class GemmaModel(Model):") + if gemma_start == -1: return + + gemma_end = text.find(b"self.gguf_writer.add_tensor(new_name, data)", gemma_start) + if gemma_end == -1: return + + gemma_text = text[gemma_start : gemma_end] + bad_text = \ +b""" data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16)""" + good_text = \ +b""" # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16)""" + find_bad = gemma_text.find(bad_text) + if find_bad == -1: return + + gemma_text = gemma_text[:find_bad] + good_text + gemma_text[find_bad + len(bad_text):] + text = text[:gemma_start] + gemma_text + text[gemma_end:] + + with open("llama.cpp/convert-hf-to-gguf.py", "w+b") as file: + file.write(text) + pass +pass + + def save_to_gguf( model_type : str, model_directory : str = "unsloth_finetuned_model", @@ -686,7 +730,10 @@ def save_to_gguf( install_llama_cpp_blocking() pass # Check if successful. If not install 10th latest release - if error != 0 or not os.path.exists("llama.cpp/quantize"): install_llama_cpp_old(-10) + if error != 0 or not os.path.exists("llama.cpp/quantize"): + print(f"Unsloth: llama.cpp error code = {error}.") + install_llama_cpp_old(-10) + pass if quantization_method == "f32": first_conversion = "f32" elif quantization_method == "f16": first_conversion = "f16" @@ -723,6 +770,9 @@ def save_to_gguf( f"--outfile {final_location} --vocab-type hfft "\ f"--outtype {first_conversion} --concurrency {n_cpus}" else: + # Need to fix convert-hf-to-gguf.py for some models! + _fix_gemma_gguf() + command = f"python llama.cpp/convert-hf-to-gguf.py {model_directory} "\ f"--outfile {final_location} "\ f"--outtype {first_conversion}" @@ -730,7 +780,7 @@ def save_to_gguf( with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: - print(line.decode("utf-8"), flush = True, end = "") + print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) pass @@ -760,7 +810,7 @@ def save_to_gguf( # quantize uses stderr with subprocess.Popen(command, shell = True, stderr = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stderr: - print(line.decode("utf-8"), flush = True, end = "") + print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) pass From 13a7cbf3b76b0d0a00272c0153878f15e02ff606 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Mar 2024 22:09:50 +1100 Subject: [PATCH 0159/1088] Bug fixes (#257) * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py --- pyproject.toml | 3 +++ unsloth/save.py | 10 ++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3ea50ca00e..324e01157b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,6 +178,9 @@ colab-no-deps = [ "xformers", "bitsandbytes", ] +colab = [ + "unsloth[cu121]", +] colab-ampere = [ "unsloth[colab-ampere-torch220]", "packaging", diff --git a/unsloth/save.py b/unsloth/save.py index 05ff748db2..5970d74d11 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -684,6 +684,9 @@ def save_to_gguf( ): from transformers.models.llama.modeling_llama import logger + if quantization_method.startswith("iq2"): + raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") + # Careful convert.py is only for Llama / Mistral based archs use_fast_convert = False if model_type == "llama": use_fast_convert = True @@ -743,8 +746,11 @@ def save_to_gguf( if first_conversion == "f32" : pass elif first_conversion == "f16" : pass elif first_conversion == "q8_0": - logger.warning_once("Unsloth: We must use f16 for quantization first.") - first_conversion = "f16" + logger.warning_once( + "Unsloth: Using q8_0 for the `first_conversion` will lose a bit of accuracy, "\ + "but saves disk space!" + ) + # first_conversion = "f16" pass pass From cd7c3e4a85456ca2c5255d135a2cc0fa239c07b6 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 17 Mar 2024 22:21:36 +1100 Subject: [PATCH 0160/1088] Squashed commit of the following: commit 61d45c60db23eac0b9e4977eb33bea3c37c39ae4 Merge: f024ce2 13a7cbf Author: Daniel Han-Chen Date: Sun Mar 17 22:12:44 2024 +1100 Merge branch 'main' into nightly commit f024ce282131a1dd70c6448852265e079c335135 Author: Daniel Han-Chen Date: Sun Mar 17 22:12:18 2024 +1100 Update __init__.py commit d38cf5387c09c32ae42667200700077fd1f31033 Author: Daniel Han-Chen Date: Sun Mar 17 22:09:23 2024 +1100 Update fast_lora.py commit 9c35a2c4b0a861ed119731389277855fed6f42ed Author: Daniel Han-Chen Date: Sun Mar 17 22:08:33 2024 +1100 Update pyproject.toml commit 6edc35f686b3dd0869ec74e811d8de6ae8370e60 Author: Daniel Han-Chen Date: Sun Mar 17 20:18:00 2024 +1100 Update fast_lora.py commit 2a9d4fb947c2d7b7dcc31fbeff6c93e0be72c571 Author: Daniel Han-Chen Date: Sun Mar 17 20:10:30 2024 +1100 Bugs commit 14717be070f57c1efa36de2bd5e7d7e146afd5e1 Merge: 5c24a3b cb12179 Author: Daniel Han-Chen Date: Sun Mar 17 20:10:26 2024 +1100 Merge branch 'main' into nightly commit 5c24a3bc2e13e4ee678b6a4298e5fd3598495971 Author: Daniel Han-Chen Date: Sun Mar 17 02:46:35 2024 +1100 Update README.md commit fd729a7131a8ff7cd9d6c8780577f1b51c05fd93 Author: Daniel Han-Chen Date: Sun Mar 17 02:44:58 2024 +1100 Update README.md commit 7e9f092e9fbf3e027e9458a179275b36c00c48a7 Author: Daniel Han-Chen Date: Sat Mar 16 23:17:35 2024 +1100 Update save.py commit e3efca87781b55e663c396ffa418c47b2fbb079d Author: Daniel Han-Chen Date: Sat Mar 16 22:36:57 2024 +1100 Update save.py commit d58fa31e0ccd2e6c68f7a8ebaf81811dcaaf3ac4 Author: Daniel Han-Chen Date: Sat Mar 16 22:31:02 2024 +1100 Update save.py commit 64d954bd13918ee7398b988b77b52045d4c17bae Author: Daniel Han-Chen Date: Sat Mar 16 20:32:12 2024 +1100 Update save.py commit 815202f8329872205fecbb622ca0ed7fe9fb4a15 Author: Daniel Han-Chen Date: Sat Mar 16 20:03:51 2024 +1100 GGUF commit 338b2c928b7f824e68663b875794a8858d8bda57 Author: Daniel Han-Chen Date: Sat Mar 16 04:40:21 2024 +1100 Update README.md commit f342425c1ed157e3f765b47520b9403dae8775a9 Author: Daniel Han-Chen Date: Sat Mar 16 04:34:51 2024 +1100 Update README.md commit cef733a4204ea1ab60e3ecdfdd099ae0c088b481 Author: Daniel Han-Chen Date: Sat Mar 16 04:10:54 2024 +1100 Update fast_lora.py commit e5bcab2a74b43a8e1683baba323447d297581085 Author: Daniel Han-Chen Date: Sat Mar 16 03:43:16 2024 +1100 Update fast_lora.py commit 80cfe132f6c198481e75f83f8803a94f6d3c2836 Author: Daniel Han-Chen Date: Sat Mar 16 03:38:13 2024 +1100 Fix bugs commit d8e98be90da5dda503d9626573754269c64be2c0 Merge: 51c2484 1e61cdb Author: Daniel Han-Chen Date: Sat Mar 16 00:14:02 2024 +1100 Merge branch 'main' into nightly commit 51c2484ffdbec0919bea406601ace30ff2a3ba0a Author: Daniel Han-Chen Date: Fri Mar 15 22:35:44 2024 +1100 Update rope_embedding.py commit 3e93a78794feb10914e9074a204e41fb901a3e42 Author: Daniel Han-Chen Date: Fri Mar 15 20:14:56 2024 +1100 Update rope_embedding.py commit 82d80e9e9896b5ba9c3a167eea1d19e42c0048c6 Merge: 718a6a1 9ac4ed6 Author: Daniel Han-Chen Date: Fri Mar 15 19:50:20 2024 +1100 Merge branch 'main' into nightly commit 718a6a1c8435cb92b9fc731149774384e46c4ab1 Author: Daniel Han-Chen Date: Fri Mar 15 19:50:03 2024 +1100 Update pyproject.toml commit 385c6d44a80697dce7296b786550e841da2543e7 Author: Daniel Han-Chen Date: Fri Mar 15 19:49:09 2024 +1100 Update pyproject.toml commit 9c9ede468089da2e9e0e358be9275c5068691560 Author: Daniel Han-Chen Date: Fri Mar 15 19:48:39 2024 +1100 Update pyproject.toml commit 9d6c9c9ebcb291682485859524c48c696c9caf2d Merge: a12e4ea 42076f6 Author: Daniel Han-Chen Date: Fri Mar 15 05:07:09 2024 +1100 Merge branch 'main' into nightly commit a12e4ea3e003a1e01c1f27a042e14afaf740ab4c Author: Daniel Han-Chen Date: Fri Mar 15 04:44:54 2024 +1100 Update chat_templates.py commit c3e0e518d95564e42b191e53e599107136a58128 Author: Daniel Han-Chen Date: Fri Mar 15 04:30:03 2024 +1100 Update chat_templates.py commit fe0e2d7baaf0f37a6f964c096b9b97aa40e1c949 Author: Daniel Han-Chen Date: Fri Mar 15 03:50:32 2024 +1100 Update chat_templates.py commit 5dfe582c96fe3d242533238410dd13302d3d13e2 Author: Daniel Han-Chen Date: Fri Mar 15 03:37:37 2024 +1100 Update chat_templates.py commit ec73a776ecaf23f5cae3c645308db22284e8ef23 Author: Daniel Han-Chen Date: Fri Mar 15 03:34:32 2024 +1100 Update chat_templates.py commit 5c4241a240756885bbcacbc03e093aeb2ad512c7 Author: Daniel Han-Chen Date: Thu Mar 14 20:27:20 2024 +1100 Update pyproject.toml commit 6841a303d90737e5a8c20979736477db4f75b0af Author: Daniel Han-Chen Date: Thu Mar 14 20:20:16 2024 +1100 Update pyproject.toml commit 341b5f46e61d25a9e243eaba1c851c97fd8d05bb Author: Daniel Han-Chen Date: Thu Mar 14 20:12:54 2024 +1100 Update pyproject.toml commit 6809846464223cae0341cb037f28af7ed3a01aac Author: Daniel Han-Chen Date: Thu Mar 14 20:12:02 2024 +1100 Update pyproject.toml commit 77edfb10b730046d62dd351ed93e610d857eed6d Author: Daniel Han-Chen Date: Thu Mar 14 20:11:24 2024 +1100 Update pyproject.toml commit 0ed19ec1704f3f48542d8b5209938034bc536324 Author: Daniel Han-Chen Date: Thu Mar 14 20:10:10 2024 +1100 Update pyproject.toml commit 1f4c625f2a4d5fa0c0d6ecf0ced7e6e0b3a427fe Author: Daniel Han-Chen Date: Thu Mar 14 20:06:05 2024 +1100 Update pyproject.toml commit 8fa0aab61ce663754da3adf03851ca93824792ce Author: Daniel Han-Chen Date: Thu Mar 14 20:04:01 2024 +1100 Update pyproject.toml commit eb61377632e3df3f75616dfd5d3e1a267942afef Author: Daniel Han-Chen Date: Thu Mar 14 19:02:29 2024 +1100 Fix Colab commit d6ac9b56c125a7e0a80b7ab006ca9929aa778b3a Author: Daniel Han-Chen Date: Tue Mar 12 20:12:55 2024 +1100 upcasting commit b7c3190e978a3ed3311f9765f7360bb4122ae18a Author: Daniel Han-Chen Date: Tue Mar 12 00:46:57 2024 +1100 Update pyproject.toml commit 4d77c32425152cf801c349a850087928cc17b662 Author: Daniel Han-Chen Date: Mon Mar 11 23:28:03 2024 +1100 Update pyproject.toml commit 98573e8618208ccc8d2345b7db589cf2f37163ff Author: Daniel Han-Chen Date: Mon Mar 11 23:21:27 2024 +1100 kaggle new commit 4a794dd880bd390418690abffeb00bfc42950997 Author: Daniel Han-Chen Date: Mon Mar 11 20:28:09 2024 +1100 Update pyproject.toml commit a0c18c9880222b80dcd03f1245fdf5054c55fecb Author: Daniel Han-Chen Date: Mon Mar 11 20:05:06 2024 +1100 Update save.py commit 684eaae9ea354dc58ff983c6e801ee8c34641581 Author: Daniel Han-Chen Date: Mon Mar 11 19:52:18 2024 +1100 GGUF incorrect commit 029d5886031630e62ef08bedf213a277c38844bc Author: Daniel Han-Chen Date: Mon Mar 11 19:13:42 2024 +1100 Update save.py commit 252a38a2df81ad54b3fa307467ca7c1d6e25d7f7 Merge: 63b1f58 dd72d9f Author: Daniel Han-Chen Date: Mon Mar 11 19:13:29 2024 +1100 Merge branch 'main' into nightly commit 63b1f5879e3b8b0b651bf3fa87a55d0086c1b3be Author: Daniel Han-Chen Date: Mon Mar 11 04:05:43 2024 +1100 Update llama.py commit cb0d9374698a7058c8f55e0417dea8d5313b86f6 Author: Daniel Han-Chen Date: Mon Mar 11 03:53:23 2024 +1100 Account for DoRA commit 2e87755a5cfc3ba4594c860b35faf88c16b0943b Author: Daniel Han-Chen Date: Mon Mar 11 03:19:20 2024 +1100 Update llama.py commit 93d88ad68de3aa39aec8e0caaa04d1e2d33b8ae2 Merge: aba595d e375c58 Author: Daniel Han-Chen Date: Mon Mar 11 03:15:30 2024 +1100 Merge branch 'main' into nightly commit aba595de9cbd274a3db2d192136de4275c9605b3 Author: Daniel Han-Chen Date: Mon Mar 11 03:07:44 2024 +1100 Update llama.py commit 133097d995e7cb7046c79a6c4d5b7b7a76a4d2c4 Author: Daniel Han-Chen Date: Mon Mar 11 02:25:10 2024 +1100 Update save.py commit b5d3d63df5553ae92b07322a493af7d22a71c75d Author: Daniel Han-Chen Date: Mon Mar 11 02:15:33 2024 +1100 Update save.py commit 8be91b050febab108306580317553261461f4a0f Author: Daniel Han-Chen Date: Mon Mar 11 02:10:51 2024 +1100 Update chat_templates.py commit 23b7a5764cc1b4b431f3cdf1dbb39bc8f144fb6b Author: Daniel Han-Chen Date: Sun Mar 10 19:32:02 2024 +1100 Update fast_lora.py commit c1728a9904cdf83fb2e7ad6d0b0c50e0a6357689 Author: Daniel Han-Chen Date: Sun Mar 10 19:11:00 2024 +1100 Update fast_lora.py commit c192ce3ed433d3d20eca9051ff6bd281762e5d28 Author: Daniel Han-Chen Date: Sun Mar 10 19:09:10 2024 +1100 Update fast_lora.py commit 7e3abd19ba27bb396262c28fd622c1839931d9c7 Author: Daniel Han-Chen Date: Sun Mar 10 18:36:35 2024 +1100 Update fast_lora.py commit c1f3e703946df542e930f7aff683b186fb08c725 Author: Daniel Han-Chen Date: Sun Mar 10 18:33:52 2024 +1100 Update fast_lora.py commit 08da057f04f1c3e9a87c1c06d745293a6d817f2c Author: Daniel Han-Chen Date: Sun Mar 10 18:09:58 2024 +1100 Update save.py commit 1e8922af2b9943fbba6e5592baf188936bd723bc Author: Daniel Han-Chen Date: Sun Mar 10 17:42:41 2024 +1100 Revert commit 74fc5caa60c712757165294134cd1b5b49e7f722 Author: Daniel Han-Chen Date: Sun Mar 10 16:20:20 2024 +1100 Accuracy commit 35c6d776c4192870c0e0ce443af48d8c52f2f1bf Author: Daniel Han-Chen Date: Sun Mar 10 14:13:53 2024 +1100 Update save.py commit 6d2bc97117d9d12d6fe254ffc0664cfb10cd04cb Author: Daniel Han-Chen Date: Sun Mar 10 14:08:38 2024 +1100 Update llama.py commit baf8e4c0a8b198214c795cf05ea60a2cbadb2145 Author: Daniel Han-Chen Date: Sun Mar 10 13:11:42 2024 +1100 Update llama.py commit c0d95162556757d4e0b688574bb36aeebe07cf18 Author: Daniel Han-Chen Date: Sun Mar 10 13:10:03 2024 +1100 Update llama.py commit 91877f550684791d53d5ff988a60825da5f6920d Merge: f887080 a2215fd Author: Daniel Han-Chen Date: Sun Mar 10 13:08:23 2024 +1100 Merge branch 'main' into nightly commit f887080a6db016d61b8ea9eda7a112b99b48add9 Author: Daniel Han-Chen Date: Sun Mar 10 04:31:40 2024 +1100 Tokenizer overwritten commit 1c1461ae0990c2594fe33bfcb196ee33fabc7d41 Author: Daniel Han-Chen Date: Sun Mar 10 04:08:02 2024 +1100 Update loader.py commit 14f063819acf40392c6678143048b3904e610dd6 Author: Daniel Han-Chen Date: Sun Mar 10 03:54:18 2024 +1100 model_name commit daba749ee1f60c1108df263f8f0bea36b9e2881f Author: Daniel Han-Chen Date: Sun Mar 10 03:48:05 2024 +1100 Update llama.py commit 457b7ba6d69c14fe5357f84d2164a0cc7166cded Author: Daniel Han-Chen Date: Sun Mar 10 02:57:41 2024 +1100 Update chat_templates.py commit 5c9629f5fe6df20de16facdf907fbd0666bfb21d Author: Daniel Han-Chen Date: Sun Mar 10 02:55:07 2024 +1100 Update save.py commit e6c3cdfc2d2dd206225a3879445dec00025235bb Author: Daniel Han-Chen Date: Sat Mar 9 23:22:14 2024 +1100 Update save.py commit f0a3c05b0794e44983dc4fcc6f7ce68faaab8123 Author: Daniel Han-Chen Date: Sat Mar 9 23:19:02 2024 +1100 Update save.py commit 58d1f1e03c65e7dccf42b3d52826a453e4349043 Author: Daniel Han-Chen Date: Sat Mar 9 20:05:52 2024 +1100 Update chat_templates.py commit 9b3dd3e9f890a87bea787cf0e33e8337cf33139a Merge: b321ada a0cc0d1 Author: Daniel Han-Chen Date: Fri Mar 8 19:40:25 2024 +1100 Merge branch 'main' into nightly commit b321adac2c8e980123ea52a94b894c0355a46bed Merge: 03232df d862d05 Author: Daniel Han-Chen Date: Thu Mar 7 04:32:51 2024 +1100 Merge branch 'main' into nightly commit 03232dff4e6fe5a9bc1219573f68ba19c084f89a Author: Daniel Han-Chen Date: Thu Mar 7 02:48:15 2024 +1100 Update chat_templates.py commit e67c6b4ec958d3e02f202f6a6db36f8d44ecadd8 Author: Daniel Han-Chen Date: Thu Mar 7 02:05:38 2024 +1100 Fix warning commit 5a7f52819e307993fedc9be2161396c93d543add Author: Daniel Han-Chen Date: Wed Mar 6 19:21:44 2024 +1100 Update rms_layernorm.py commit 837ba610cf45945f69caa894b1cbdc43c3daf6b9 Author: Daniel Han-Chen Date: Wed Mar 6 18:18:07 2024 +1100 RoPE and Gemma precision commit 1e41fa0c8c6ee1c46f2530d99d5164140d35d29e Author: Daniel Han-Chen Date: Wed Mar 6 05:07:37 2024 +1100 Update save.py commit 333d3d9a51c013860193703c581ced889a5ec2aa Author: Daniel Han-Chen Date: Wed Mar 6 04:52:59 2024 +1100 Update gemma.py commit 85c052d8fbd7636a673023289a41c8d22322bf33 Author: Daniel Han-Chen Date: Wed Mar 6 04:48:26 2024 +1100 sqrt commit ed1aa0007a734dd8fda6a2d724f1c0bf6a8229a1 Author: Daniel Han-Chen Date: Wed Mar 6 04:24:10 2024 +1100 Update gemma.py commit be81c07d43200b0417612e835de347dd729a93fe Author: Daniel Han-Chen Date: Wed Mar 6 04:14:11 2024 +1100 Gemma precision commit 5a693a107df69ad7fa81e5ff12056bbc6c098d82 Author: Daniel Han-Chen Date: Tue Mar 5 23:58:00 2024 +1100 Layernorms commit e0a24637381676a42a0c7c7d93d72f4d21932962 Author: Daniel Han-Chen Date: Tue Mar 5 20:07:44 2024 +1100 Update pyproject.toml commit 160320b3d9d9401a92a48798f8182368b61ad751 Author: Daniel Han-Chen Date: Tue Mar 5 18:55:50 2024 +1100 Update gemma.py commit 9f7f205f67f35ec0ea90caf856e9cb2774544f14 Author: Daniel Han-Chen Date: Tue Mar 5 18:27:11 2024 +1100 Update rms_layernorm.py commit 43e710b0640c365eba027bb21fb7a200fd123705 Author: Daniel Han-Chen Date: Tue Mar 5 18:21:59 2024 +1100 Fix Gemma merging commit 3137392ccf048a7316bbf1b2204f49ace393c540 Author: Daniel Han-Chen Date: Tue Mar 5 02:41:56 2024 +1100 Update gemma.py commit fe35b127fc9b0cd8f3052108e040a0c716da9e72 Author: Daniel Han-Chen Date: Tue Mar 5 02:40:20 2024 +1100 Update gemma.py commit 5fae81b628f5871f946f34a803bf150ace924bc3 Author: Daniel Han-Chen Date: Tue Mar 5 02:22:31 2024 +1100 Update gemma.py commit 961813b35fbc3375353dcbf5972e30878c5fca9a Author: Daniel Han-Chen Date: Tue Mar 5 01:45:08 2024 +1100 Update gemma.py commit c027dacb180e72f10af547c3f275064da1432d80 Merge: cb193f7 b4fe3cd Author: Daniel Han-Chen Date: Mon Mar 4 16:19:13 2024 +1100 Merge branch 'main' into nightly commit cb193f7e0a8a1f0709e2eb433281b0d2f04d81c1 Author: Daniel Han-Chen Date: Mon Mar 4 16:14:50 2024 +1100 Update gemma.py commit 440c29273f55c604f1cc60fccea741d0cd6703cc Author: Daniel Han-Chen Date: Mon Mar 4 16:11:12 2024 +1100 Update rms_layernorm.py commit c31b27b7dc4e4f1d5c438125442c86e00e44d780 Author: Daniel Han-Chen Date: Mon Mar 4 16:04:29 2024 +1100 Update rms_layernorm.py commit 6fa081ae77eed48e051134852e9068889afd8e24 Author: Daniel Han-Chen Date: Mon Mar 4 16:00:35 2024 +1100 Update rms_layernorm.py commit aa2fb63048a3d1c35e1aa12321b85c5737bcd211 Author: Daniel Han-Chen Date: Sun Mar 3 19:32:18 2024 +1100 Update gemma.py commit ac23e4b6125421069db3ce6c66a71a052f29a730 Merge: 1fea4ff 528d5fb Author: Daniel Han-Chen Date: Sun Mar 3 19:29:41 2024 +1100 Merge branch 'main' into nightly commit 1fea4ffcf24b92f9be39f41145e69158c91226a5 Author: Daniel Han-Chen Date: Sun Mar 3 18:07:55 2024 +1100 Update geglu.py commit 245fe4716c15003f3565fe8e6c3f6fd18d34479d Author: Daniel Han-Chen Date: Sun Mar 3 03:39:36 2024 +1100 Update _utils.py commit e27523cd5ec15e8ee1167f9887901a1fa23248ee Author: Daniel Han-Chen Date: Sun Mar 3 03:39:10 2024 +1100 Update __init__.py commit 65fbde6cd9a931d6510dff5a4d2d14ff0b1284cf Author: Daniel Han-Chen Date: Sun Mar 3 03:37:43 2024 +1100 Update __init__.py commit 9a2e791e6c831137f5017d4bf7fa1741a3b06b0e Author: Daniel Han-Chen Date: Sun Mar 3 03:35:21 2024 +1100 Update llama.py commit 786885c38cbfade2ab7fbb109f6431209d0d0624 Author: Daniel Han-Chen Date: Sun Mar 3 03:17:59 2024 +1100 Approx gelu commit 1c7f0d21ee71bb1da548a32ab142956fce9ee2c5 Author: Daniel Han-Chen Date: Sun Mar 3 02:24:39 2024 +1100 Update geglu.py commit 393e53b0168c6eecffea34d0048377090b9b5239 Merge: c88ab10 dbba69b Author: Daniel Han-Chen Date: Sat Mar 2 18:29:02 2024 +1100 Merge branch 'main' into nightly commit c88ab10a5ce27aa6e3d3b1372f4bdd67f9e03cf6 Author: Daniel Han-Chen Date: Sat Mar 2 18:28:39 2024 +1100 Approx gelu commit e032445694e994d941faf2db4a7433fa540e90a5 Author: Daniel Han-Chen Date: Sat Mar 2 02:39:11 2024 +1100 Update pyproject.toml commit c970a2b3bedc90569f88642785421e044d008bb9 Author: Daniel Han-Chen Date: Sat Mar 2 02:38:40 2024 +1100 Small fixes commit db87262625b2789802f209889dcde7e79903173b Author: Daniel Han-Chen Date: Fri Mar 1 04:16:25 2024 +1100 Update pyproject.toml commit d44bbf5f2e5be72d0bd8ac5252069c9403d6e433 Merge: 0866020 d0c15bb Author: Daniel Han-Chen Date: Thu Feb 29 00:17:47 2024 +1100 Merge branch 'nightly' of https://github.com/unslothai/unsloth into nightly commit 0866020037cc9dd98c64decb0d5146e469c6dee7 Merge: 54b26c0 1fe11ba Author: Daniel Han-Chen Date: Thu Feb 29 00:17:12 2024 +1100 Merge branch 'main' into nightly commit d0c15bb508c161c501c83474dfb485bf7a2f077e Author: Daniel Han Date: Thu Feb 29 00:17:03 2024 +1100 Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py commit 54b26c0466d37603fb5aa247d8e6e9d6e9c93429 Author: Daniel Han-Chen Date: Thu Feb 29 00:16:19 2024 +1100 Update llama.py commit 074aa737ce9c298d40a7fe34221bb755ba02dd0d Merge: b1892b5 f946bed Author: Daniel Han-Chen Date: Thu Feb 29 00:14:42 2024 +1100 Merge branch 'main' into nightly commit b1892b55112d8d8ddeead4b7f09632f2ba9a3d84 Author: Daniel Han-Chen Date: Thu Feb 29 00:09:00 2024 +1100 Update chat_templates.py commit 072dc0c447d1ae7ec5ea6a7b44716de28ab7b2c7 Author: Daniel Han-Chen Date: Thu Feb 29 00:02:40 2024 +1100 Update _utils.py commit d37f284eaa913a6cfcaf5727e7a00860f472e6c1 Author: Daniel Han-Chen Date: Wed Feb 28 23:59:49 2024 +1100 DoRA commit 967fed83c7ab39e0e69f1b2f0237596688ec2c84 Author: Daniel Han-Chen Date: Wed Feb 28 22:02:18 2024 +1100 Update llama.py commit a4cd4a6e411ce9d41a72fa3b14161229a15005ec Author: Daniel Han-Chen Date: Tue Feb 27 01:40:45 2024 +1100 Update README.md commit 90a5c2c121a3fdc06f0e4b484671235c3e596203 Author: Daniel Han-Chen Date: Tue Feb 27 01:39:52 2024 +1100 Update README.md commit 4b7df80abdbd5c0352c5d952b28460c8585d8030 Author: Daniel Han-Chen Date: Tue Feb 27 00:16:57 2024 +1100 Chat Templates commit 54ff6eb169d44106a7bb2330ae83c59f17d33fec Author: Daniel Han-Chen Date: Mon Feb 26 17:27:42 2024 +1100 Update cross_entropy_loss.py commit 7cf69dd9f2cd848985a9715dd12ccfc05032249d Author: Daniel Han-Chen Date: Mon Feb 26 16:19:39 2024 +1100 Update cross_entropy_loss.py commit 1f450cd6ee9d01725f2fed3e60dfd1747fa0abbf Author: Daniel Han-Chen Date: Mon Feb 26 14:44:22 2024 +1100 Update gemma.py commit 7a5db6758c87c232b2ccefe982b829ac587f05ed Author: Daniel Han-Chen Date: Mon Feb 26 14:39:43 2024 +1100 correct_dtype commit cf16811ab92f26ae77acce8fc5b35a9280a749e6 Author: Daniel Han-Chen Date: Mon Feb 26 14:38:27 2024 +1100 Update gemma.py commit 637d01643ce321a62d616be012fc65e2076141b8 Author: Daniel Han-Chen Date: Mon Feb 26 14:37:48 2024 +1100 Update llama.py commit 3d73560df33a244567fda5dbe9313ed80b5e39ab Author: Daniel Han-Chen Date: Mon Feb 26 14:37:09 2024 +1100 Update llama.py commit 0d24acf0f5e3e8f931f14616fd7c99f35a97a8a4 Author: Daniel Han-Chen Date: Mon Feb 26 14:36:39 2024 +1100 Update llama.py commit 1aeca6f00cd35d4f5f7b06f478debdb543748acb Author: Daniel Han-Chen Date: Mon Feb 26 14:35:44 2024 +1100 RoPE commit 4ec1b216e01bdaa01a470b094bd36589f5bcb583 Author: Daniel Han-Chen Date: Mon Feb 26 14:15:37 2024 +1100 Update save.py commit 11c9ea4d1b4c92404a769c740ba903dcc2dee79a Author: Daniel Han-Chen Date: Mon Feb 26 05:06:04 2024 +1100 Update gemma.py commit 24e9d0af283d5765da360e4b32789b85692bd671 Author: Daniel Han-Chen Date: Mon Feb 26 05:05:41 2024 +1100 Update gemma.py commit 0b693df64689baa5bcc66d2fabe53fe492d7a463 Author: Daniel Han-Chen Date: Mon Feb 26 04:37:12 2024 +1100 Update gemma.py commit ae336e96631b467504034974686fad0fe385bdcb Author: Daniel Han-Chen Date: Mon Feb 26 04:36:10 2024 +1100 Update gemma.py commit 1d53b366a07f41fb9eb26c2f49de627c22bc9307 Author: Daniel Han-Chen Date: Mon Feb 26 04:32:49 2024 +1100 Update gemma.py commit b1864710c54ad4a0ea9b8eb51dcf31ac5e02be2e Author: Daniel Han-Chen Date: Mon Feb 26 04:30:29 2024 +1100 Update gemma.py commit 686d9fa4c4085c3d4d2eae188bc9ba9eeb686eeb Author: Daniel Han-Chen Date: Mon Feb 26 04:27:55 2024 +1100 Update gemma.py commit 40169f9fd3cff63cfbee1fcfe9841505d60f3833 Author: Daniel Han-Chen Date: Mon Feb 26 04:23:38 2024 +1100 Update gemma.py commit b0b38f770bc6f2596c1942a1cc76d70f8f8b6d82 Author: Daniel Han-Chen Date: Mon Feb 26 04:20:19 2024 +1100 Update gemma.py commit 13e06a062b19f2b1cf97d4cb849d8752f5f436e4 Author: Daniel Han-Chen Date: Mon Feb 26 03:56:34 2024 +1100 Update gemma.py commit fd5389a485eddab7a55581734b451d4db91c60a6 Author: Daniel Han-Chen Date: Mon Feb 26 03:55:15 2024 +1100 Update cross_entropy_loss.py commit a5abe39ded5d7a221379da075d6f95d88cd76289 Author: Daniel Han-Chen Date: Mon Feb 26 03:53:49 2024 +1100 Update cross_entropy_loss.py commit 78c96ef83cac925b3eeaec434523395116c8201a Author: Daniel Han-Chen Date: Mon Feb 26 03:50:34 2024 +1100 Update cross_entropy_loss.py commit 88cfe5eeb22691ce6d4d5052850f5dd37a3d7457 Author: Daniel Han-Chen Date: Mon Feb 26 03:48:16 2024 +1100 Update cross_entropy_loss.py commit dba4c0355daa24a44d12888f92021e199145145e Author: Daniel Han-Chen Date: Mon Feb 26 03:46:18 2024 +1100 Update cross_entropy_loss.py commit 199460d5ef6c83bfa1a2bc474e07460467f720ae Author: Daniel Han-Chen Date: Mon Feb 26 03:43:22 2024 +1100 Update cross_entropy_loss.py commit 13718b9847d7f3599289983a9c71825c24dd6a37 Author: Daniel Han-Chen Date: Mon Feb 26 03:36:38 2024 +1100 Update cross_entropy_loss.py commit 48736a075563e1f00a0da87fd3bcf1a661dc5ff3 Author: Daniel Han-Chen Date: Mon Feb 26 03:35:45 2024 +1100 Update cross_entropy_loss.py commit c1dbf6710e79b626d5f29c8608b0ea4d01203084 Author: Daniel Han-Chen Date: Mon Feb 26 03:29:19 2024 +1100 Update cross_entropy_loss.py commit b55d7ad4b2aa97ad04a382fe0cf35b61ce7f537f Author: Daniel Han-Chen Date: Mon Feb 26 03:29:11 2024 +1100 Update cross_entropy_loss.py commit bc3ff0f3a643a3fc0f090ba981ac1c32532366bb Author: Daniel Han-Chen Date: Mon Feb 26 03:28:22 2024 +1100 Update cross_entropy_loss.py commit 7bbce701c884407db1b7b966f378b50d992934f1 Author: Daniel Han-Chen Date: Mon Feb 26 03:26:42 2024 +1100 Update cross_entropy_loss.py commit 9c0b3cd431f63b2233009697d9b220a1a50e9ec3 Author: Daniel Han-Chen Date: Mon Feb 26 03:19:26 2024 +1100 Update cross_entropy_loss.py commit e696487e844b55bd6cca735f552a0df60c953feb Author: Daniel Han-Chen Date: Mon Feb 26 03:18:01 2024 +1100 Update cross_entropy_loss.py commit c67522037320f1cac13beb289c056291db9ba61e Author: Daniel Han-Chen Date: Mon Feb 26 03:16:31 2024 +1100 Update cross_entropy_loss.py commit bb6b6f238e337c786f2ae8bdaa548e1fb8949fcf Author: Daniel Han-Chen Date: Mon Feb 26 03:15:19 2024 +1100 Update cross_entropy_loss.py commit 096544eff426ad3373e54b5016f1477809e49b19 Author: Daniel Han-Chen Date: Mon Feb 26 03:14:04 2024 +1100 Update cross_entropy_loss.py commit b5eff3bfbc839a6450a2ae1167dd89b74fd29218 Author: Daniel Han-Chen Date: Mon Feb 26 03:12:23 2024 +1100 Update cross_entropy_loss.py commit fdaa9493be2ac7dd885f7d92bf4855ba54b6bac1 Author: Daniel Han-Chen Date: Mon Feb 26 03:09:37 2024 +1100 Update cross_entropy_loss.py commit 8259b16a5d9b4916e78a146cf970cabec9563591 Author: Daniel Han-Chen Date: Mon Feb 26 02:49:02 2024 +1100 gemma commit df8034d185255c2b6286a4fb44bcb8e8388ad302 Author: Daniel Han-Chen Date: Sun Feb 25 04:48:34 2024 +1100 Update llama.py commit db2b387b222eb8bd5b9b44b870a5f7bb613d00c0 Author: Daniel Han-Chen Date: Sun Feb 25 04:47:45 2024 +1100 llama commit 36d29078cd42da4c2199a696e3790dc2232e1ba5 Author: Daniel Han-Chen Date: Sun Feb 25 04:35:39 2024 +1100 Update gemma.py commit 4cc14b207498e3b6a397998b01eca472ea4f7d75 Author: Daniel Han-Chen Date: Sun Feb 25 04:30:59 2024 +1100 Update gemma.py commit bc428f4e7b8f1399b613e545e302d5a1b877e944 Author: Daniel Han-Chen Date: Sun Feb 25 04:29:01 2024 +1100 Update gemma.py commit 7c958ab9e7dc79bd9b72f4d1fc8044fcaca26301 Author: Daniel Han-Chen Date: Sun Feb 25 04:25:57 2024 +1100 Update gemma.py commit d530f95135dc9036ea7c6e36fee3b61e1560e0ee Author: Daniel Han-Chen Date: Sun Feb 25 04:25:15 2024 +1100 Update gemma.py commit 6ad44835d29879f019580a4383442338fa620367 Author: Daniel Han-Chen Date: Sun Feb 25 04:22:35 2024 +1100 Update gemma.py commit 46723124f7bd69a9a01eb1e001758094603ebc6f Author: Daniel Han-Chen Date: Sun Feb 25 04:21:03 2024 +1100 Update gemma.py commit ad1ce483dbfcba735b33419d938050f0cb1e75eb Author: Daniel Han-Chen Date: Sun Feb 25 04:20:54 2024 +1100 Update gemma.py commit 47d4a33f41150dd3aab10fe6e9abddaeb645ee00 Author: Daniel Han-Chen Date: Sun Feb 25 04:20:09 2024 +1100 Update gemma.py commit 6b531eff93d50f183c1ab7de70e0b8ce828a60f1 Author: Daniel Han-Chen Date: Sun Feb 25 04:18:23 2024 +1100 Update gemma.py commit 147d1297722d80bf8de3842cfba1863e041d23f4 Author: Daniel Han-Chen Date: Sun Feb 25 04:17:16 2024 +1100 Update gemma.py commit 884fadc7441d09eadedde1d0e3d8df36fb505b0d Author: Daniel Han-Chen Date: Sun Feb 25 04:14:57 2024 +1100 Update gemma.py commit 9d648cbccfb6cfdca81ca0d701c009b6e4468299 Author: Daniel Han-Chen Date: Sun Feb 25 04:14:38 2024 +1100 Update gemma.py commit 6238f166253d813f1e76b4e687dcb8cd87d7ad0e Author: Daniel Han-Chen Date: Sun Feb 25 04:08:04 2024 +1100 Update gemma.py commit 9a20dff5d2afec668f18f98ec3f335616ba1aeb9 Author: Daniel Han-Chen Date: Sun Feb 25 04:07:47 2024 +1100 Update gemma.py commit 94833602da432b73fd3054609788809937eba63b Author: Daniel Han-Chen Date: Sun Feb 25 03:55:44 2024 +1100 Update gemma.py commit 40c244a7bc13202a56aff2ffbf3a429170d24880 Author: Daniel Han-Chen Date: Sun Feb 25 03:53:40 2024 +1100 Update gemma.py commit 0e1578dedb7d94484dea80b8b21b47ba52f7bef1 Author: Daniel Han-Chen Date: Sun Feb 25 03:51:32 2024 +1100 Update gemma.py commit 33a72ba122ed8f68b64ee34d8e8986a356281e37 Author: Daniel Han-Chen Date: Sun Feb 25 03:50:59 2024 +1100 Update gemma.py commit 890d73e6d04558854205fd26c7a94e3c35531710 Author: Daniel Han-Chen Date: Sun Feb 25 03:49:55 2024 +1100 Update gemma.py commit 33eeb7add2b75aee9dbd382c91c4c751cfb51db5 Author: Daniel Han-Chen Date: Sun Feb 25 03:44:22 2024 +1100 Update gemma.py commit 096a4192fb3da237620a4e9d7e11ed681e8dbaa6 Author: Daniel Han-Chen Date: Sun Feb 25 03:39:40 2024 +1100 Update gemma.py commit f270f377abb2240767bcc4ffcdabd912751e1ffa Author: Daniel Han-Chen Date: Sun Feb 25 03:39:31 2024 +1100 Update gemma.py commit 765e54fe5550fca9cb289ca655b1c85e1e4b0087 Author: Daniel Han-Chen Date: Sun Feb 25 03:38:18 2024 +1100 Update gemma.py commit 29721a86d1ad128f9c7c073baf70f81ec11a3f16 Author: Daniel Han-Chen Date: Sun Feb 25 03:37:30 2024 +1100 Update gemma.py commit 208e2c11897058ddc457dcc594c5f996831e74e8 Author: Daniel Han-Chen Date: Sun Feb 25 03:32:23 2024 +1100 Update gemma.py commit aff1db7a4c91028907f821af0785258d575433a5 Author: Daniel Han-Chen Date: Sun Feb 25 03:22:24 2024 +1100 Update gemma.py commit 98903e72c2a43384fb6f0d6b9916817a9f4500f5 Author: Daniel Han-Chen Date: Sun Feb 25 03:21:37 2024 +1100 Update gemma.py commit 20da5547f58cc08bab2adb6fa1d9c86a0578b84a Author: Daniel Han-Chen Date: Sun Feb 25 03:19:34 2024 +1100 Update gemma.py commit 20e9ca2fac88a6f24ac02ff1cf08e87d16e9a4b5 Author: Daniel Han-Chen Date: Sun Feb 25 03:16:55 2024 +1100 Update gemma.py commit 842b31076702963dd3e3c9984550af9818f16a8e Author: Daniel Han-Chen Date: Sun Feb 25 03:16:34 2024 +1100 Update gemma.py commit 11067cf849c8aeda31307a8cf15a31b14736dd6d Author: Daniel Han-Chen Date: Sun Feb 25 03:14:02 2024 +1100 Update gemma.py commit 6725dc93f11e7f6c7bb053ec8e350819d3030970 Author: Daniel Han-Chen Date: Sun Feb 25 03:13:18 2024 +1100 Update gemma.py commit baf11e0e3436053a5fa2ad642f85c30c28547127 Author: Daniel Han-Chen Date: Sun Feb 25 03:12:07 2024 +1100 Update gemma.py commit e0ada344acdfc4eb58644f2900fb5d2dbbccfbc6 Author: Daniel Han-Chen Date: Sun Feb 25 03:11:20 2024 +1100 Update gemma.py commit 39a58f7d2e0a9de44656169f49206b21bd1c4abe Author: Daniel Han-Chen Date: Sun Feb 25 03:07:20 2024 +1100 Update gemma.py commit c96e8c28f85d452b07e9f6d20f30bd179abccffd Author: Daniel Han-Chen Date: Sun Feb 25 03:03:31 2024 +1100 Update gemma.py commit 10d03d8315b1f309f557d0bb8adcbd6501f4e7ee Author: Daniel Han-Chen Date: Sun Feb 25 03:03:23 2024 +1100 Update gemma.py commit 45a33bae1cb95c5880cf04ea3679c965ce305d92 Author: Daniel Han-Chen Date: Sun Feb 25 03:01:56 2024 +1100 Update gemma.py commit 77fea80a1bd808749cb4b3bfd367a25fb3eb01db Author: Daniel Han-Chen Date: Sun Feb 25 03:01:30 2024 +1100 Update gemma.py commit e3517f02483f9b5122a2e37a2e224b0ad1b4fea5 Author: Daniel Han-Chen Date: Sun Feb 25 03:00:22 2024 +1100 Update gemma.py commit 03c7f211a61025ede6da81445e980fd783c80932 Author: Daniel Han-Chen Date: Sun Feb 25 02:49:14 2024 +1100 Update gemma.py commit 105a0325f19513e7b9e1ef24da31158246ffab87 Author: Daniel Han-Chen Date: Sun Feb 25 02:42:56 2024 +1100 Update gemma.py commit 9d9e38a74af2eb36724ad3bd8700e96ca8430369 Author: Daniel Han-Chen Date: Sun Feb 25 02:41:24 2024 +1100 Update gemma.py commit 49a124243e20232cf0dcbc49551b420c83d9228d Author: Daniel Han-Chen Date: Sun Feb 25 02:37:48 2024 +1100 Update gemma.py commit 5c31fd1e5b17697df30087a4436f1d0234ffda1a Author: Daniel Han-Chen Date: Sun Feb 25 02:34:18 2024 +1100 Update gemma.py commit c0f6b6c0cccc0b77abf7f34aaa7992b0c17a866b Author: Daniel Han-Chen Date: Sun Feb 25 02:31:22 2024 +1100 Update gemma.py commit 911745377aba2ded64eaf96e09e9ee3495d54763 Author: Daniel Han-Chen Date: Sun Feb 25 02:30:06 2024 +1100 Update gemma.py commit f73698859e1d1d519eb41b1238bd3bea73b78008 Author: Daniel Han-Chen Date: Sun Feb 25 02:28:01 2024 +1100 Update gemma.py commit e56eb3f4069a1d048f18ec40b0f6518fc7f6006c Author: Daniel Han-Chen Date: Sun Feb 25 02:26:57 2024 +1100 Update gemma.py commit 045bce2fa56ed8ad4d495553f61bc4708f00d37b Author: Daniel Han-Chen Date: Sun Feb 25 02:25:39 2024 +1100 Update gemma.py commit a847719499a505318a7af3169141c3d316e2b63b Author: Daniel Han-Chen Date: Sun Feb 25 02:18:43 2024 +1100 Update gemma.py commit c058d1aa96b46dd9ff4248967d4190e012ac8e09 Author: Daniel Han-Chen Date: Sun Feb 25 02:17:45 2024 +1100 Update gemma.py commit dfb2e250f6f332a3d8aa49da2391d928c535bdd9 Author: Daniel Han-Chen Date: Sun Feb 25 02:17:12 2024 +1100 Update gemma.py commit f2fca2d6c1f474f9531a30af85397becc2b3af79 Author: Daniel Han-Chen Date: Sun Feb 25 02:10:35 2024 +1100 Update gemma.py commit ae473e0f2be99bf8db68172e3c2f96b3b8220997 Author: Daniel Han-Chen Date: Sun Feb 25 02:09:08 2024 +1100 rope commit ce2d74732d289c2ea804ae9887ab09d849268f1c Author: Daniel Han-Chen Date: Sun Feb 25 02:07:29 2024 +1100 Update llama.py commit ed5042aa07c16cb88fc3cfac47acce24cd2ef3e5 Author: Daniel Han-Chen Date: Sun Feb 25 02:04:11 2024 +1100 Update llama.py commit 42840647d0ab2f878713af3159797a63634c8856 Author: Daniel Han-Chen Date: Sun Feb 25 02:02:59 2024 +1100 Update llama.py commit ecd21b375b2ebbe6358cfcd4f2570e2e49866297 Author: Daniel Han-Chen Date: Sun Feb 25 02:02:02 2024 +1100 Update llama.py commit 5bf8cacebd67fa40b7a4d150414722d262bb0fb1 Author: Daniel Han-Chen Date: Sun Feb 25 01:58:33 2024 +1100 Update gemma.py commit 66a4380d084f67e8c207b596ae9caffcf15e540e Author: Daniel Han-Chen Date: Sun Feb 25 01:50:42 2024 +1100 Update gemma.py commit b85b45e7a083154a845fe5f3dcb276858d9f6a0f Author: Daniel Han-Chen Date: Sun Feb 25 01:49:39 2024 +1100 Update gemma.py commit 72870a1ed33e5c8ddc4c3787a09fd0221e0362c3 Author: Daniel Han-Chen Date: Sun Feb 25 01:48:02 2024 +1100 Update gemma.py commit 7b903368abf5fe0e7078e2298967e0b010871329 Author: Daniel Han-Chen Date: Sun Feb 25 01:45:44 2024 +1100 Update gemma.py commit 0c2d7e503bd391c9cee6d09f890f6f714a4e4a87 Author: Daniel Han-Chen Date: Sun Feb 25 01:39:25 2024 +1100 Update cross_entropy_loss.py commit 9ae5abc83fd24f9abd402e219238a54c6aa26bb1 Author: Daniel Han-Chen Date: Sun Feb 25 01:12:44 2024 +1100 Update gemma.py commit 3907ea90030c37a9447c2f17694aaa13298ba322 Author: Daniel Han-Chen Date: Sun Feb 25 01:10:12 2024 +1100 Update gemma.py commit c4c8558a9075441b1ef8f4602b75649341f410c9 Author: Daniel Han-Chen Date: Sun Feb 25 01:04:09 2024 +1100 Update gemma.py commit e0e96ef5ce1b666551f06f7e8fb8d695c9f4a31d Author: Daniel Han-Chen Date: Sun Feb 25 01:03:04 2024 +1100 Update gemma.py commit df22d0cb3afcaf000dafc827b1d282eed49fa27f Author: Daniel Han-Chen Date: Sun Feb 25 00:46:58 2024 +1100 Update gemma.py commit b9d9aa1b07ff8e4a38a26482c075115a91793dc9 Author: Daniel Han-Chen Date: Sun Feb 25 00:44:44 2024 +1100 Update gemma.py commit 785760ba0dedcc65869644c28168f5203d245e60 Author: Daniel Han-Chen Date: Sat Feb 24 22:43:00 2024 +1100 Update gemma.py commit 103b1cdbed56a05f1bfa1affad931686686dcd7a Author: Daniel Han-Chen Date: Sat Feb 24 22:42:09 2024 +1100 Update gemma.py commit b482ce13e9e38f8ad1106f3d1df119afecf9f7f3 Author: Daniel Han-Chen Date: Sat Feb 24 22:40:46 2024 +1100 Update gemma.py commit 7be57334cb820238d51bb555727c9a36fc0c7704 Author: Daniel Han-Chen Date: Sat Feb 24 22:39:28 2024 +1100 Update gemma.py commit 2242bdf0a32e49d6692e8a5e70677f110d81e61d Author: Daniel Han-Chen Date: Sat Feb 24 22:30:14 2024 +1100 Update gemma.py commit a066c348f2f33710958d09084f97bebdf009520b Author: Daniel Han-Chen Date: Sat Feb 24 22:24:43 2024 +1100 Update llama.py commit b3d7e6160840b23e1c4334195abe0adf52f0ccb9 Author: Daniel Han-Chen Date: Sat Feb 24 20:20:37 2024 +1100 Update gemma.py commit 5b5652de364716da0fb6f9fc36a91a4f42bcb9ca Author: Daniel Han-Chen Date: Sat Feb 24 20:17:24 2024 +1100 Update gemma.py commit 0f5cc839f39311866ae425a4dd6ecd96d9c6d197 Author: Daniel Han-Chen Date: Sat Feb 24 20:15:25 2024 +1100 Update gemma.py commit 6011a490c66dabb17314368883d5153f6516440e Author: Daniel Han-Chen Date: Sat Feb 24 20:13:24 2024 +1100 Update gemma.py commit 6d94cf88a7fb5f993815af945dac7f3ec07061fc Author: Daniel Han-Chen Date: Sat Feb 24 20:11:23 2024 +1100 Update gemma.py commit c706447fd4bbace502e6ab2de214d3de38e9961d Author: Daniel Han-Chen Date: Sat Feb 24 20:04:31 2024 +1100 Update gemma.py commit ed3f139a9cc0e5c6f6963bed809dd5947d6b9d78 Author: Daniel Han-Chen Date: Sat Feb 24 19:55:22 2024 +1100 Update gemma.py commit b7ba95857f1d46504a75c3c6596929ec91552dea Author: Daniel Han-Chen Date: Sat Feb 24 19:47:10 2024 +1100 Update gemma.py commit f0b21f9b7b829304349fa193f6171e6b521066c4 Author: Daniel Han-Chen Date: Sat Feb 24 19:45:20 2024 +1100 Update gemma.py commit 762790d6a92f5975b16175313dc341b27a218830 Author: Daniel Han-Chen Date: Sat Feb 24 19:35:03 2024 +1100 Update gemma.py commit b174e54507166987f44a59cb5d1e11a4ce8bf770 Author: Daniel Han-Chen Date: Sat Feb 24 19:33:04 2024 +1100 Update gemma.py commit 47c6feaf899f7224466bc2bf5c688cabd673ede6 Author: Daniel Han-Chen Date: Sat Feb 24 19:12:52 2024 +1100 Update gemma.py commit b2b658cbee9428e3ff9d848842b627212218baff Author: Daniel Han-Chen Date: Sat Feb 24 19:10:36 2024 +1100 Update gemma.py commit 407205dd87fd63a5ec5fbea6f6185fc2e7a6134e Author: Daniel Han-Chen Date: Sat Feb 24 18:58:31 2024 +1100 Update gemma.py commit 82e45c02a51594378880d2d807e1872e01ba5e76 Author: Daniel Han-Chen Date: Sat Feb 24 18:25:25 2024 +1100 revert commit 94201755db41740e02d7931f6249ccd4c06c6549 Author: Daniel Han-Chen Date: Sat Feb 24 18:17:38 2024 +1100 revert commit d5e625b674fb3317f6d1ff1fd1fb7ea028ea0523 Author: Daniel Han-Chen Date: Sat Feb 24 18:04:40 2024 +1100 Update cross_entropy_loss.py commit ba19344fb9400a7f0c0d2887977a74f81d3dead8 Author: Daniel Han-Chen Date: Sat Feb 24 18:03:45 2024 +1100 Update cross_entropy_loss.py commit 7916872408cb567151822ac2906a4faa0302bfe5 Author: Daniel Han-Chen Date: Sat Feb 24 17:48:19 2024 +1100 Update llama.py commit 49129bc66ca11f4e3330f9f84d2da9fe85e0bad5 Author: Daniel Han-Chen Date: Sat Feb 24 17:43:19 2024 +1100 Update gemma.py commit ebbd4756d42a9766f9e9d786056f5933796a407d Author: Daniel Han-Chen Date: Sat Feb 24 17:41:53 2024 +1100 Update gemma.py commit 68e280405c4842c88a9020582257cd291d15225b Author: Daniel Han-Chen Date: Sat Feb 24 17:40:21 2024 +1100 Update gemma.py commit 9af090f6ea28d5d2e2907381d5b58ed673de66b8 Author: Daniel Han-Chen Date: Sat Feb 24 17:32:04 2024 +1100 Update gemma.py commit 61add47f37572d9f5eca0f464cede384b39dff7f Author: Daniel Han-Chen Date: Sat Feb 24 17:31:10 2024 +1100 Update gemma.py commit de96285442c5fc65c12f80a09f7345a12b703d0a Author: Daniel Han-Chen Date: Sat Feb 24 17:29:51 2024 +1100 Update gemma.py commit 0abbcdc15a036ec1f2244d7285547354010c3f1b Author: Daniel Han-Chen Date: Sat Feb 24 17:29:09 2024 +1100 Update gemma.py commit 17036a6e54de4fb078768a91fff432b61b555a4d Author: Daniel Han-Chen Date: Sat Feb 24 17:27:49 2024 +1100 Update llama.py commit 8b7de591c133ff2528e5b4aeb2274595359a20e6 Author: Daniel Han-Chen Date: Sat Feb 24 17:26:19 2024 +1100 Update gemma.py commit 041aa7d909c040028b24f76ad4c911ff7f8163b3 Author: Daniel Han-Chen Date: Sat Feb 24 17:18:09 2024 +1100 Update gemma.py commit 71483e6c8d577f78fb0e130f9859e715a4786e50 Author: Daniel Han-Chen Date: Sat Feb 24 17:17:26 2024 +1100 Update gemma.py commit 8cb12078f9536b7936817e9b068a6ddd7b4686dd Author: Daniel Han-Chen Date: Sat Feb 24 17:15:44 2024 +1100 Update gemma.py commit 23e6ebf14dc7b2d9bcba737b29abf6f1922682fc Author: Daniel Han-Chen Date: Sat Feb 24 17:13:40 2024 +1100 Update gemma.py commit a6752f3f16e147024fe9079d881bf8bc0854000b Author: Daniel Han-Chen Date: Sat Feb 24 17:12:00 2024 +1100 Update gemma.py commit 9c565580fbcc6bf95ca9d775e83f0c863eab9f1e Author: Daniel Han-Chen Date: Sat Feb 24 17:09:03 2024 +1100 Update gemma.py commit cd479b4374821b2b694a0467063a52c73c546481 Author: Daniel Han-Chen Date: Sat Feb 24 17:05:59 2024 +1100 Update cross_entropy_loss.py commit 0080357a8832f89054158b3d341241af923f08f1 Author: Daniel Han-Chen Date: Sat Feb 24 17:03:54 2024 +1100 Update gemma.py commit e8de606be97ced3a0770da87a0aea6e9a0b217c2 Author: Daniel Han-Chen Date: Sat Feb 24 17:02:54 2024 +1100 Update gemma.py commit 35adcbf83ca7db437826ad578c4bbdd1c49c0808 Author: Daniel Han-Chen Date: Sat Feb 24 17:02:14 2024 +1100 Update gemma.py commit 7e33aa25205a783896877c50eb666d19d7937721 Author: Daniel Han-Chen Date: Sat Feb 24 16:59:24 2024 +1100 Update gemma.py commit 4c7d21e41b99b09c421e668af18200e2e207b51d Author: Daniel Han-Chen Date: Sat Feb 24 16:57:16 2024 +1100 Update gemma.py commit 3feae56451db28a142f28c5859c3df46c853f49c Author: Daniel Han-Chen Date: Sat Feb 24 16:55:21 2024 +1100 Update gemma.py commit c060bad7ce50ab1532327389337f205e96303461 Author: Daniel Han-Chen Date: Sat Feb 24 16:47:39 2024 +1100 Update gemma.py commit 8ffaf5f1098c1dc35cda64332506c1f12f35058f Author: Daniel Han-Chen Date: Sat Feb 24 15:44:10 2024 +1100 Update gemma.py commit be098122f14aba8304f1a5b1963ebd16fe42deaa Author: Daniel Han-Chen Date: Sat Feb 24 15:42:12 2024 +1100 Update llama.py commit 8a618c60ed6c24dc78067e5c541c02c291b07f5c Author: Daniel Han-Chen Date: Sat Feb 24 14:29:15 2024 +1100 pos commit ef235c35089b1a46b4fe10789f330addada6b09d Author: Daniel Han-Chen Date: Sat Feb 24 14:27:39 2024 +1100 Update gemma.py commit a48adb0a83635d7013647bc92092d778a1f55f54 Author: Daniel Han-Chen Date: Sat Feb 24 14:27:31 2024 +1100 Update gemma.py commit 8920cda9a7b2cf8477c6a00d84e25c7e7969ff75 Author: Daniel Han-Chen Date: Sat Feb 24 14:13:31 2024 +1100 position_ids commit a1eab801fa855dea0f6737c0e9e576676577fbec Author: Daniel Han-Chen Date: Sat Feb 24 14:06:48 2024 +1100 Update gemma.py commit c07aff5188229052456673eae679d122e2ea69bf Author: Daniel Han-Chen Date: Sat Feb 24 14:05:18 2024 +1100 Update gemma.py commit 4c6e122caa3a9a3a9dafa7af295ecd9f91373022 Author: Daniel Han-Chen Date: Sat Feb 24 14:02:26 2024 +1100 norm commit 5c5bb5324167b6a13afc33b143bb7092457548a0 Author: Daniel Han-Chen Date: Sat Feb 24 13:59:30 2024 +1100 Update llama.py commit 0e1826dd8c8370d28401721d2d30ab062644e56a Author: Daniel Han-Chen Date: Sat Feb 24 13:55:37 2024 +1100 Update llama.py commit 893b5dfe2b0a84eee68be1471c8b5f9f44770fe0 Author: Daniel Han-Chen Date: Sat Feb 24 13:54:00 2024 +1100 revert commit 9ab87f3648f714590595939ed644d31fadb238f0 Author: Daniel Han-Chen Date: Sat Feb 24 13:33:05 2024 +1100 Update cross_entropy_loss.py commit d2dc658077ebdef1d884edd768ff853a459d15e4 Author: Daniel Han-Chen Date: Sat Feb 24 13:22:17 2024 +1100 Update geglu.py commit 1fcbd61f76f1e58eaddb51bf38cda96689dc8e89 Author: Daniel Han-Chen Date: Sat Feb 24 03:40:56 2024 +1100 Update cross_entropy_loss.py commit fadcb311c3769619e7cd7120658d885684f73525 Author: Daniel Han-Chen Date: Sat Feb 24 03:40:43 2024 +1100 Update llama.py commit 1a2a10d0280d1f6d93d4135c60d09418f902080e Author: Daniel Han-Chen Date: Sat Feb 24 03:38:25 2024 +1100 Update llama.py commit 17a1a855e028cb501474e0e1e822f28e20b9f8ef Author: Daniel Han-Chen Date: Sat Feb 24 03:33:46 2024 +1100 CE commit 097629108f45fb53fd000baab195e8bb90a0096e Author: Daniel Han-Chen Date: Sat Feb 24 03:30:43 2024 +1100 Update llama.py commit 30cc4ffd67abd9ea227a982d1eeb87fabfb2b79f Author: Daniel Han-Chen Date: Sat Feb 24 03:27:21 2024 +1100 Update llama.py commit 20227ba5c774dea025eacee4c922606465957636 Author: Daniel Han-Chen Date: Sat Feb 24 03:14:29 2024 +1100 Update llama.py commit 73a8616f99720e52ac5cadee4aaef624d60172b9 Author: Daniel Han-Chen Date: Sat Feb 24 03:03:00 2024 +1100 Update llama.py commit bd4fd22f34dd6c673f994de6af62da6ca7cf18fb Author: Daniel Han-Chen Date: Sat Feb 24 02:58:05 2024 +1100 Update cross_entropy_loss.py commit 2814f02087c7a77587925095c297afa88cda24f1 Author: Daniel Han-Chen Date: Sat Feb 24 02:33:28 2024 +1100 Update cross_entropy_loss.py commit dd321363f2a496c05950b5394e29a30f8efa2c21 Author: Daniel Han-Chen Date: Sat Feb 24 02:30:49 2024 +1100 Update cross_entropy_loss.py commit 574b2f787f19898ae00d7bfcbc0e282c32c838a2 Author: Daniel Han-Chen Date: Sat Feb 24 02:29:33 2024 +1100 Update cross_entropy_loss.py commit 6184f770e7c72303c1cd6b6de78c3aaf26d1d9bc Author: Daniel Han-Chen Date: Sat Feb 24 02:10:38 2024 +1100 Update cross_entropy_loss.py commit f5f3d6794ecf70e8ece5443b6c986168d8d0c418 Author: Daniel Han-Chen Date: Sat Feb 24 02:08:26 2024 +1100 Update cross_entropy_loss.py commit 88bf684c615f2d1b0e5d06ead94b6e7b5d14117b Author: Daniel Han-Chen Date: Sat Feb 24 02:06:11 2024 +1100 Update cross_entropy_loss.py commit 603c71c7f0b8d29d3b562d6bf03987f083dea185 Author: Daniel Han-Chen Date: Sat Feb 24 02:02:11 2024 +1100 Fast CE Loss commit 25339b71f762c30c798bdf234929bbcc1dd72eba Author: Daniel Han-Chen Date: Fri Feb 23 18:12:15 2024 +1100 Update fast_lora.py commit ebfc8f8a55769c616a18327677275d6c4686bc96 Author: Daniel Han-Chen Date: Fri Feb 23 17:57:42 2024 +1100 Update fast_lora.py commit 6ab5eb75f4ac629ecc214c7bf0762861236fc326 Author: Daniel Han-Chen Date: Fri Feb 23 17:55:58 2024 +1100 Update llama.py commit f999bcd52ddd5db12bcbdd12b238b2a294df3094 Author: Daniel Han-Chen Date: Fri Feb 23 17:46:33 2024 +1100 Update llama.py commit 13d0cee4f6a62ea17150e63c6baf9adb6e9decc4 Author: Daniel Han-Chen Date: Fri Feb 23 17:37:17 2024 +1100 Update llama.py commit b66f6dbfd3b34d25917c27b984d8a08fb9341b0b Author: Daniel Han-Chen Date: Fri Feb 23 17:28:29 2024 +1100 Update llama.py commit 5866b08f71daaf25cf0fdb09ba77abb8aba59624 Author: Daniel Han-Chen Date: Fri Feb 23 04:11:08 2024 +1100 gemma commit dd478be2fadebf664ebff8d7bfa812bd52757496 Author: Daniel Han-Chen Date: Fri Feb 23 03:59:09 2024 +1100 Update llama.py commit 2ff33fb2707735831676a5c53ccf07baff5c9a6f Author: Daniel Han-Chen Date: Fri Feb 23 03:52:24 2024 +1100 Update llama.py commit 4c9f366688a3be27e69bf27c72c62a396567026d Author: Daniel Han-Chen Date: Fri Feb 23 03:27:42 2024 +1100 Update cross_entropy_loss.py commit cfebc8d9794d97fcc7350fcc177ee843b8041682 Author: Daniel Han-Chen Date: Fri Feb 23 03:27:03 2024 +1100 Update llama.py commit 4b009aad336ecc2ac26b02ec782dab7699007347 Author: Daniel Han-Chen Date: Fri Feb 23 03:16:50 2024 +1100 Update llama.py commit 6f340f54178e58fe4609fdb3c06343f0e3cec821 Author: Daniel Han-Chen Date: Fri Feb 23 02:38:16 2024 +1100 Update fast_lora.py commit ff27a824fac89dbebddf2f68089b229f6551809d Author: Daniel Han-Chen Date: Fri Feb 23 02:33:37 2024 +1100 Update llama.py commit 5d728270d3706b38e98383f8457d5fa5f39f774f Author: Daniel Han-Chen Date: Fri Feb 23 02:28:05 2024 +1100 Update llama.py commit e743e58cb62b5a075325773de1c016367dea19b6 Author: Daniel Han-Chen Date: Fri Feb 23 02:23:25 2024 +1100 Update gemma.py commit 879bdd2efb52468108cf7dfa432501bcfcd11f33 Author: Daniel Han-Chen Date: Fri Feb 23 02:20:26 2024 +1100 Update gemma.py commit 9052a85f92378c892ccbd7595af49ca999e34bd0 Author: Daniel Han-Chen Date: Fri Feb 23 02:19:06 2024 +1100 Update gemma.py commit bad295a41338daf07b7ee22eee114413298adff9 Author: Daniel Han-Chen Date: Fri Feb 23 02:15:12 2024 +1100 Update llama.py commit b30d2502ccb8f97e5c0ec252a36ec6dc04f20f4b Author: Daniel Han-Chen Date: Fri Feb 23 02:12:02 2024 +1100 Update llama.py commit 11233cb3d40077a4f92a0f57b0dc4ac9a9dcdcd3 Author: Daniel Han-Chen Date: Fri Feb 23 02:10:11 2024 +1100 model_type commit 76de9c1fe3fa94342b78d43877077761c0fd628f Author: Daniel Han-Chen Date: Fri Feb 23 02:05:31 2024 +1100 FastGemmaModel commit c0d32de795c562057bdd36c4b36024a41dca4f97 Author: Daniel Han-Chen Date: Fri Feb 23 02:02:39 2024 +1100 Update fast_lora.py commit bd2fa264c9027367cb5b5458d03cb6d623f12e9e Author: Daniel Han-Chen Date: Fri Feb 23 01:57:34 2024 +1100 Update mapper.py commit 9a1b28d6912a117a2fe8ca3128424d0c1882a7ee Author: Daniel Han-Chen Date: Fri Feb 23 01:55:25 2024 +1100 Update pyproject.toml commit 0beaf18908a2f275ebd553b5784a984db7b7b094 Author: Daniel Han-Chen Date: Fri Feb 23 01:50:51 2024 +1100 Gemma commit 0659d90d95ac0aa5e8f5b6994290415923539e3a Merge: 7372768 3e4c5a3 Author: Daniel Han-Chen Date: Wed Feb 21 17:49:57 2024 +1100 Merge branch 'main' into nightly commit 7372768d14b564386e47a83492875870b82d9110 Author: Daniel Han-Chen Date: Wed Feb 21 03:29:49 2024 +1100 original commit 10d9d56434b98aabf961685429631662f3f350a4 Author: Daniel Han-Chen Date: Wed Feb 21 03:25:46 2024 +1100 spaces commit edd03f66fbd0698076352630aa3bcb10e6b46f00 Author: Daniel Han-Chen Date: Wed Feb 21 03:14:27 2024 +1100 trainer commit 028ee5ca06f09cd01567ec2ac90db54f1e957fcd Author: Daniel Han-Chen Date: Wed Feb 21 02:05:33 2024 +1100 save commit 917f791ab78a0fb73f752d9736fbef782242ee19 Author: Daniel Han-Chen Date: Wed Feb 21 00:53:17 2024 +1100 Update save.py commit bf3e10b26eece02d1aad11e2f8a83f019d1bfa92 Author: Daniel Han-Chen Date: Wed Feb 21 00:48:18 2024 +1100 Update save.py commit d266332141e29025649eef49cc98540494e64206 Author: Daniel Han-Chen Date: Tue Feb 20 23:28:37 2024 +1100 Update save.py commit 4d1e575047d883144ad4825d73fd865829ea2543 Author: Daniel Han-Chen Date: Tue Feb 20 20:01:33 2024 +1100 Update __init__.py commit 6aac6c4be8219f8fb0ba19d8a77b404ebbd9c2f6 Author: Daniel Han-Chen Date: Tue Feb 20 19:50:29 2024 +1100 Update save.py commit 83d906a2c0ec54c072c18dcf539803713b84704e Author: Daniel Han-Chen Date: Tue Feb 20 18:35:11 2024 +1100 Update save.py commit 5d88ffefd5480acbe3d97050ee9b93df0f56d67c Author: Daniel Han-Chen Date: Tue Feb 20 18:00:51 2024 +1100 Update save.py commit 4c9be6d0572dc120a8044516b51ce7c1d9e14ae2 Author: Daniel Han-Chen Date: Tue Feb 20 17:12:18 2024 +1100 Update save.py commit d3eac595a139bb8d0ef2c15a9317bab3c7221f94 Author: Daniel Han-Chen Date: Tue Feb 20 17:11:33 2024 +1100 Update save.py commit 632705b9b0e8be7426fe89259612837bf32c3601 Author: Daniel Han-Chen Date: Tue Feb 20 17:00:19 2024 +1100 Update save.py commit 8f60bf57be9401a783e535b77564d461bcfa8ed9 Author: Daniel Han-Chen Date: Tue Feb 20 15:59:38 2024 +1100 Update save.py commit 164fa807fe6a2dd3b9b83e112d0d3e7676c96a57 Author: Daniel Han-Chen Date: Tue Feb 20 04:44:17 2024 +1100 Update save.py commit 11e04a70570352b8cddcce88b0cee8d42eb3ed22 Author: Daniel Han-Chen Date: Tue Feb 20 04:42:47 2024 +1100 Update save.py commit d2e76580aec520662c015633ceadd8a2669164ce Author: Daniel Han-Chen Date: Tue Feb 20 04:41:40 2024 +1100 Update save.py commit 3e3dc37640fd375895872a78a9fe3b76698b3257 Author: Daniel Han-Chen Date: Tue Feb 20 04:41:34 2024 +1100 Update save.py commit d9751e655314fec4fe69c5de34eb600b4c600976 Author: Daniel Han-Chen Date: Tue Feb 20 04:35:07 2024 +1100 Update save.py commit 44659a05a304aed408b0bf9e1195614885ff8a79 Author: Daniel Han-Chen Date: Tue Feb 20 04:13:58 2024 +1100 Update save.py commit 34998914acdff890b313c7c236a5bbe27dd9aa49 Author: Daniel Han-Chen Date: Tue Feb 20 03:35:19 2024 +1100 saving commit 3728b8e876e9f22d509f3b968348bc14be39c4a3 Author: Daniel Han-Chen Date: Tue Feb 20 02:19:36 2024 +1100 Update save.py commit c20bb30710babd52ddc7f78886f06ac570b4369d Author: Daniel Han-Chen Date: Tue Feb 20 02:07:01 2024 +1100 Update save.py commit 2c207b4989a4be827b19e0b213ac1821b059207a Author: Daniel Han-Chen Date: Tue Feb 20 00:10:04 2024 +1100 llama.cpp bugs commit 0ffd7b46f3531ba356674a40852b8a4cf4ea1808 Author: Daniel Han-Chen Date: Mon Feb 19 19:39:47 2024 +1100 linking commit ad0c2bcd878bc6b0395ad9d83d3ef25d5ac5927c Author: Daniel Han-Chen Date: Mon Feb 19 13:08:03 2024 +1100 Update save.py commit 0c1b71ac63c977c38fe8c53a7de58b53627fde18 Author: Daniel Han-Chen Date: Mon Feb 19 12:51:25 2024 +1100 Update save.py commit 14db89f4fc14774307e036c3fe2d5b00b125353b Author: Daniel Han-Chen Date: Mon Feb 19 04:13:40 2024 +1100 Update save.py commit e31071f3683ff1739e269a7a02f4c0e1d0a6e387 Author: Daniel Han-Chen Date: Mon Feb 19 04:01:27 2024 +1100 Update save.py commit aa45208f5ae77a72291e5f9042282042c66eb6a0 Author: Daniel Han-Chen Date: Mon Feb 19 03:39:09 2024 +1100 Update save.py commit 28de27dd8d4d19c749ddf0ac9a8122f169ca4df8 Author: Daniel Han-Chen Date: Mon Feb 19 03:22:21 2024 +1100 PeftModel token + saving commit 99cdf0bd5a767c0dec4a701b53730165db01bbab Author: Daniel Han-Chen Date: Mon Feb 19 02:45:54 2024 +1100 Update save.py commit 71555c7678b6e3ffae9856413688902a8f527a98 Author: Daniel Han-Chen Date: Mon Feb 19 02:37:43 2024 +1100 Update save.py commit 89d2418cd785d2fbae2f07fb1ec00554e4022a9f Author: Daniel Han-Chen Date: Sun Feb 18 23:35:18 2024 +1100 Update save.py commit 3e89d4e7ec63782604023ce5d922ba460ca7fa29 Author: Daniel Han-Chen Date: Sun Feb 18 23:31:24 2024 +1100 Update save.py commit 457c0444737eb0a136ca7231cd119ec18ff9eb82 Author: Daniel Han-Chen Date: Sun Feb 18 23:25:36 2024 +1100 install commit 22378e95cc026cd63138772db0f7d40b40c75150 Author: Daniel Han-Chen Date: Sun Feb 18 23:12:37 2024 +1100 Update pyproject.toml commit 3d789cb09ee9f3e600c7bae1da040a930762d626 Author: Daniel Han-Chen Date: Sun Feb 18 22:59:37 2024 +1100 Update save.py commit 31a62c52d3af16300ba1da7b875afe8077bde3bb Author: Daniel Han-Chen Date: Sun Feb 18 22:54:38 2024 +1100 trainer commit 870edf35343c0f91633359d07126f6d038694194 Author: Daniel Han-Chen Date: Sun Feb 18 22:52:51 2024 +1100 Update save.py commit b6a6e90b7aa0398f446eb51a23b0cd96c3f2cbd9 Author: Daniel Han-Chen Date: Sun Feb 18 22:03:22 2024 +1100 Update save.py commit 02b7b7f3f292165d77da65df005be62bc5648543 Author: Daniel Han-Chen Date: Sun Feb 18 20:28:56 2024 +1100 Update save.py commit 53f6a07b92220cd0e8ee16b9ef32f5cc235d49da Author: Daniel Han-Chen Date: Sun Feb 18 20:22:52 2024 +1100 Update save.py commit 7ee8243e8bc542087891c39b83d1fc0c82c4fec3 Author: Daniel Han-Chen Date: Sun Feb 18 20:19:28 2024 +1100 Update save.py commit 94b5c58c2e3a4bc0097e2320fc37172501fd0c68 Author: Daniel Han-Chen Date: Sun Feb 18 20:11:51 2024 +1100 Update save.py commit c6ad5f97a3128197cd9f8f33e3ab2611fe11a8ef Author: Daniel Han-Chen Date: Sun Feb 18 20:09:11 2024 +1100 Update loader.py commit cc2764cdddf2c5202780374b46702b5102f35a21 Author: Daniel Han-Chen Date: Sun Feb 18 19:54:34 2024 +1100 Update save.py commit 0af3a1b14357f1cd4712eaf00122f40e3d2d89c7 Author: Daniel Han-Chen Date: Sun Feb 18 19:44:59 2024 +1100 Update save.py commit ef8abf4c6e637aa4b658c5ec2d83ff09bc306f59 Author: Daniel Han-Chen Date: Sun Feb 18 19:25:56 2024 +1100 apache commit 164b950cadf1dbbacd07bfbc33bed5ec1a13b534 Author: Daniel Han-Chen Date: Sun Feb 18 19:24:35 2024 +1100 spaces commit 7aedc92637b373b805f7c632176d627b177ebd3b Author: Daniel Han-Chen Date: Sun Feb 18 19:21:02 2024 +1100 slashes commit 7573ee2c2284b371dcadb8b28e604de5c02a4fb4 Author: Daniel Han-Chen Date: Sun Feb 18 19:15:41 2024 +1100 slash commit e51a381305cfb55339eed3d20cf9d4c31e014129 Author: Daniel Han-Chen Date: Sun Feb 18 19:08:04 2024 +1100 globals commit c660879cdc2e60cc4d56b0f0e861c439f0938b73 Author: Daniel Han-Chen Date: Sun Feb 18 18:30:03 2024 +1100 spaces commit dcbbab3c22cd0fbf1d919cf5c375119f6e2592a9 Author: Daniel Han-Chen Date: Sun Feb 18 18:25:19 2024 +1100 spaces commit 5edae1cd797dab26f80646d79d1895b3410a57de Author: Daniel Han-Chen Date: Sun Feb 18 18:21:54 2024 +1100 readme commit 22f1f52513f20b2791e397943d93548cbba2f30b Author: Daniel Han-Chen Date: Sun Feb 18 18:18:12 2024 +1100 Update llama.py commit 2e65e636784403d95e5f80586a8e914e354c84f4 Author: Daniel Han-Chen Date: Sun Feb 18 18:09:38 2024 +1100 saving bugs commit c9a524a99a57ac83e89c0b69b810319df6d754cd Author: Daniel Han-Chen Date: Sun Feb 18 04:15:20 2024 +1100 Bugs commit 6d3a3b42864e6e0bcffa934b47d15182dfbe4340 Author: Daniel Han-Chen Date: Thu Feb 15 19:24:07 2024 +1100 Fix RoPE precision issues commit 84b37f8548912764345fbc3db7ede9099b92d916 Author: Daniel Han-Chen Date: Thu Feb 15 03:53:58 2024 +1100 Update mapper.py commit bd4a701e7aedde25f961987029906b0072defc9c Merge: 629c39d a030e80 Author: Daniel Han-Chen Date: Thu Feb 15 03:35:47 2024 +1100 Merge branch 'main' into nightly commit 629c39d2e5fc74f74fd048d8c761169f0fa6d04a Author: Daniel Han-Chen Date: Wed Feb 14 23:17:38 2024 +1100 Update mistral.py commit 227c26ca44b1c966b7c8cb81e224963431e1ace0 Author: Daniel Han-Chen Date: Wed Feb 14 23:15:45 2024 +1100 Update llama.py commit b66013238071cd734e3afbb2e28bdf82833cd650 Author: Daniel Han-Chen Date: Wed Feb 14 23:11:06 2024 +1100 Saving, LlamaRotaryEmbedding issues commit 91a3c434680129642026cc2bbcb80b416767d327 Author: Daniel Han-Chen Date: Wed Feb 14 17:58:41 2024 +1100 Update chat_templates.py commit efbb1e6049bab0fc7b9f21dca60a31d2f911695a Author: Daniel Han-Chen Date: Wed Feb 14 17:56:14 2024 +1100 patch tokenizer commit 5f5910ffee33ee7bb96117cf17aa7a0b01b28f98 Author: Daniel Han-Chen Date: Wed Feb 14 17:45:02 2024 +1100 Update chat_templates.py commit d40a12852e179acacf05b7bd96e024c045afcde8 Author: Daniel Han-Chen Date: Wed Feb 14 17:31:33 2024 +1100 Update chat_templates.py commit 7c713cb58cabeab227e8f51ddba32bb005f43348 Author: Daniel Han-Chen Date: Wed Feb 14 17:27:03 2024 +1100 Update chat_templates.py commit b28a383d1cc57e28c9fd44629d0edc44e2a5e29c Author: Daniel Han-Chen Date: Wed Feb 14 04:20:41 2024 +1100 Update chat_templates.py commit 4f20e20e28e3544a3402a335e36a5fffc180921b Merge: 2cdf43d 7c46209 Author: Daniel Han-Chen Date: Wed Feb 14 03:25:40 2024 +1100 Merge branch 'main' into nightly commit 2cdf43d8b7e6e8e87ae9d6518a562002d8671079 Author: Daniel Han-Chen Date: Mon Feb 12 04:28:41 2024 +1100 Chat Templates commit acd635aa0d120118ef23c193bf48d03d40468650 Merge: b7c5296 e091bca Author: Daniel Han-Chen Date: Sun Feb 11 16:42:37 2024 +1100 Merge branch 'main' into nightly commit b7c52963ada5c8ee5debb77cca5646beb0b4a0ea Merge: 868fb27 ba3fa46 Author: Daniel Han-Chen Date: Fri Feb 9 03:52:36 2024 +1100 Merge branch 'main' into nightly commit 868fb27e1179992ea545ee4b32c1cdd7625e0b89 Merge: 601dc9e 7e7f5f3 Author: Daniel Han-Chen Date: Thu Feb 8 16:50:19 2024 +1100 Merge branch 'main' into nightly commit 601dc9ec4bc3b590153fb53f886c13b63c0993a4 Author: Daniel Han-Chen Date: Thu Feb 8 03:39:45 2024 +1100 Update llama.py commit 31de486f1c2411bc54ca91504185d242ad1c09ff Merge: 81128a4 12b19e3 Author: Daniel Han-Chen Date: Thu Feb 8 03:39:19 2024 +1100 revert commit 81128a45040ed4ac67ae3f1195352cd7cfc765bc Author: Daniel Han-Chen Date: Thu Feb 8 03:00:53 2024 +1100 Update llama.py commit 998097394a9807ce502764f08331ff62b2507777 Author: Daniel Han-Chen Date: Thu Feb 8 02:57:09 2024 +1100 Update llama.py commit 277ca9eecf3a0e9861a691843d075e270b641c18 Author: Daniel Han-Chen Date: Thu Feb 8 02:52:43 2024 +1100 Update llama.py commit d6ab9c92d7bd7a71eae2420091da7e43c8f5cb80 Author: Daniel Han-Chen Date: Thu Feb 8 02:49:26 2024 +1100 Update llama.py commit e094914b0ea2a13ed1cfbcd5f5dd7b940e383dce Author: Daniel Han-Chen Date: Thu Feb 8 02:30:49 2024 +1100 Update llama.py commit 9a54a6f05eef1a74724739f24701019068934bf8 Author: Daniel Han-Chen Date: Wed Feb 7 20:15:19 2024 +1100 Update llama.py commit e94647c2dc2f0cf3de3c617983ea67359be5c06f Author: Daniel Han-Chen Date: Wed Feb 7 19:25:46 2024 +1100 Update llama.py commit e8593b710311c54f7c17de9e4438c58005875290 Author: Daniel Han-Chen Date: Wed Feb 7 18:25:08 2024 +1100 Update llama.py commit 1065936ecbb8d990dde225d1813d1c8f2ab48603 Author: Daniel Han-Chen Date: Wed Feb 7 18:21:36 2024 +1100 Update llama.py commit 60b47f6130d1dd1416a357fd5a0585ce7b4ab3bc Author: Daniel Han-Chen Date: Wed Feb 7 18:18:43 2024 +1100 Update llama.py commit e43e819a5706fb16d36d71f5c80197fd01241480 Author: Daniel Han-Chen Date: Wed Feb 7 18:02:58 2024 +1100 Update llama.py commit 1e77ab25b6932fac39b375e667182f13e79233fb Author: Daniel Han-Chen Date: Wed Feb 7 17:43:51 2024 +1100 Update llama.py commit b36e0bfc8902c93b76dab0d094348a833857ab0b Author: Daniel Han-Chen Date: Wed Feb 7 17:34:59 2024 +1100 Update llama.py commit ab0a3c9976c212278c5aea52ae0e35b3f1857a0b Author: Daniel Han-Chen Date: Wed Feb 7 04:40:19 2024 +1100 Update mistral.py commit 2b346dc498bab3a2d262ec93cbecdf8206b5e2ca Author: Daniel Han-Chen Date: Wed Feb 7 03:45:07 2024 +1100 Update save.py commit 17a6b12ee339ed87206a3cc71819359fcd1c6455 Author: Daniel Han-Chen Date: Wed Feb 7 03:41:32 2024 +1100 Update save.py commit 7da7afcc78b2367ea2771bf41af1ab36bdd094c7 Author: Daniel Han-Chen Date: Wed Feb 7 03:20:55 2024 +1100 __version__ commit 9e2b00e1678476d6aa71d27c640a93670b2b7b5e Author: Daniel Han-Chen Date: Wed Feb 7 03:19:33 2024 +1100 __version__ commit 9c3849fc1dec9cedba319fae159652a1c363f4ed Author: Daniel Han-Chen Date: Wed Feb 7 03:18:02 2024 +1100 Update pyproject.toml commit bfb3ea717917e39cc7de6e2e9c49e2a952d1b623 Author: Daniel Han-Chen Date: Wed Feb 7 02:57:54 2024 +1100 Update save.py commit 213cfee903f57a185e718ba93038dced052ff993 Author: Daniel Han-Chen Date: Wed Feb 7 02:57:24 2024 +1100 Update save.py commit 8b52dc027e9601ac92721f325d18fa7917ab7ce3 Merge: 8e9d9c3 1d393f7 Author: Daniel Han-Chen Date: Wed Feb 7 02:50:35 2024 +1100 Merge branch 'main' into nightly commit 8e9d9c38b1c4dae4dbd4608dfebb4aa250bbed13 Author: Daniel Han-Chen Date: Wed Feb 7 01:48:31 2024 +1100 SWA inference commit d0b1144423e00575733f0957aa768ab5bcbccbc8 Author: Daniel Han-Chen Date: Tue Feb 6 18:53:32 2024 +1100 Fix llm_int8_skip_modules commit 39a2a7c57d6f00918a3c65c56640d16a8b37f14c Author: Daniel Han-Chen Date: Tue Feb 6 18:50:48 2024 +1100 Fix SWA inference commit aa0427f8982dce16411e826f2ce0bcd0da2520a5 Author: Daniel Han-Chen Date: Tue Feb 6 02:11:06 2024 +1100 Update save.py commit 262289d5a54f1408805dda9ae908f6015843e0a4 Author: Daniel Han-Chen Date: Tue Feb 6 01:54:22 2024 +1100 Update save.py commit 53ca91f74439b01169d77e449557044d5114c068 Author: Daniel Han-Chen Date: Tue Feb 6 01:30:48 2024 +1100 Update save.py commit e487abd94ab82f1acd8a25aba06be6241884c841 Author: Daniel Han-Chen Date: Mon Feb 5 19:07:00 2024 +1100 Update save.py commit 797a87a3e37f23eee3f8e655c5a7089852cd38da Author: Daniel Han-Chen Date: Mon Feb 5 18:53:27 2024 +1100 Update save.py commit e9031ceabe3b067f5fce89ca91a4ef2d7a804cf9 Author: Daniel Han-Chen Date: Mon Feb 5 18:48:40 2024 +1100 mistral swa commit 33192d6dc8096135afa9ffc6d03ea0dfaba44f33 Author: Daniel Han-Chen Date: Mon Feb 5 18:05:25 2024 +1100 Update save.py commit 03ed3a83d586ac62274185e0c8f4d9b62100647b Author: Daniel Han-Chen Date: Mon Feb 5 17:38:01 2024 +1100 Torch 2.2.0 commit a50daa19d8f9682ad0c1c0b80b79a4f68027dd71 Author: Daniel Han-Chen Date: Mon Feb 5 17:32:43 2024 +1100 Update save.py commit d69cef9f23948eb41c5becd7c4d45ad52b1c41fe Author: Daniel Han-Chen Date: Mon Feb 5 17:16:20 2024 +1100 Update save.py commit 1750b13a6339d61e9269a2d1f58382acde5c3a8a Merge: 63ed23a 35f2ab4 Author: Daniel Han-Chen Date: Mon Feb 5 02:29:56 2024 +1100 Merge branch 'main' into nightly commit 63ed23ae9852e0a29fca4aae63620bc3b5458455 Author: Daniel Han-Chen Date: Sun Feb 4 14:02:26 2024 +1100 Update utils.py commit 990068b9778191a00eb1d9edfa333bf30235d6c2 Author: Daniel Han-Chen Date: Sun Feb 4 13:50:21 2024 +1100 Update utils.py commit 6ab4019e4a42b28e31be510e637d9140b7959f9b Author: Daniel Han-Chen Date: Sun Feb 4 04:07:47 2024 +1100 Update llama.py commit 7ab34266080070b87f62955e23de5d0f447d2ad1 Author: Daniel Han-Chen Date: Sun Feb 4 03:57:29 2024 +1100 Update llama.py commit 201d90c3ba9393dfafdfb79191feca240971a95b Author: Daniel Han-Chen Date: Sun Feb 4 03:54:12 2024 +1100 Update llama.py commit 00242d5f8923af6bcea5d99336417788a18a3cbc Author: Daniel Han-Chen Date: Sun Feb 4 03:52:08 2024 +1100 Update llama.py commit 3bfb0ebf9dbf857dba9098b04c25de29a7834612 Author: Daniel Han-Chen Date: Sun Feb 4 03:50:31 2024 +1100 Update llama.py commit 71899d7060bed8002769a77e9b88510bb5dc900a Author: Daniel Han-Chen Date: Sun Feb 4 03:48:18 2024 +1100 Update llama.py commit 9cd251794989f05f96311b64180646d805118a65 Author: Daniel Han-Chen Date: Sun Feb 4 03:46:30 2024 +1100 Update llama.py commit 24431b4347d733d12a672672457235955d934f28 Author: Daniel Han-Chen Date: Sun Feb 4 03:45:25 2024 +1100 Update llama.py commit 63816fc11916aa5097ca4d6bc9944ce92b8af891 Author: Daniel Han-Chen Date: Sun Feb 4 03:42:41 2024 +1100 Update llama.py commit 7166b11599468e23eacf896491fd2a085441bdbd Author: Daniel Han-Chen Date: Sun Feb 4 03:41:15 2024 +1100 Update llama.py commit d867b9bbdfd155a368584929f8bea286b84cc521 Author: Daniel Han-Chen Date: Sun Feb 4 03:27:15 2024 +1100 Update llama.py commit 9a5ebef14849371216c68e03e53730c93b8bb353 Author: Daniel Han-Chen Date: Sun Feb 4 03:23:26 2024 +1100 Update llama.py commit 65270cec60455430ea1c25b939b97dfa26971369 Author: Daniel Han-Chen Date: Sun Feb 4 03:13:42 2024 +1100 Update llama.py commit 80fa8e93c9dab4565ad5f08ed1300e89f2d6e46c Author: Daniel Han-Chen Date: Sun Feb 4 03:12:07 2024 +1100 Update llama.py commit 36b400e59895dd56e028c161890378bd8874e503 Author: Daniel Han-Chen Date: Sun Feb 4 03:08:49 2024 +1100 Update llama.py commit 4a5d3b1de78ddc562b76f5cea6b448c7349c23d9 Author: Daniel Han-Chen Date: Sun Feb 4 03:05:49 2024 +1100 Update llama.py commit 88a695da3a406825e5789fb5bd6cbe37fa35a479 Author: Daniel Han-Chen Date: Sun Feb 4 02:49:51 2024 +1100 Update llama.py commit ac9bc79251a6af5015bca0f752479aa25db09a28 Author: Daniel Han-Chen Date: Sun Feb 4 02:42:30 2024 +1100 Update llama.py commit 8c8685eeef82a326a7e376a9cb24deee1bd4f2de Author: Daniel Han-Chen Date: Sun Feb 4 02:40:30 2024 +1100 Update llama.py commit 607dfa1d0e2fa2b6af76e2b045a18d21a8e8c63b Author: Daniel Han-Chen Date: Sun Feb 4 02:35:17 2024 +1100 Update llama.py commit 54802ecbb9fa4777e6922468264de8612655f33b Author: Daniel Han-Chen Date: Sun Feb 4 02:32:21 2024 +1100 New version commit 711e5c09229af3781ce1e9c7b55d8f00a66aed09 Author: Daniel Han-Chen Date: Sun Feb 4 02:19:42 2024 +1100 attention_mask commit 74d7fc65c6539dc575af691485db43df2116f362 Author: Daniel Han-Chen Date: Sun Feb 4 02:03:23 2024 +1100 SDPA commit fcb884643bce8a83733ded21dc31031d1df3e120 Author: Daniel Han-Chen Date: Sun Feb 4 02:02:51 2024 +1100 Update llama.py commit aa032fc80da5bfd38a3e01e722511b60f4816eaa Author: Daniel Han-Chen Date: Sun Feb 4 01:46:04 2024 +1100 Update mistral.py commit 31578f20101e910cc9350e19f067975880a829e9 Author: Daniel Han-Chen Date: Sun Feb 4 01:42:46 2024 +1100 fast inference commit 257cd7d531b4fd35186116a4f3843c96a0dd6511 Author: Daniel Han-Chen Date: Sat Feb 3 23:22:21 2024 +1100 Update llama.py commit e500b785f76306800dbef5d367a84b064a4c0d2e Author: Daniel Han-Chen Date: Sat Feb 3 23:12:16 2024 +1100 Update llama.py commit cf0fae9a55e97148ac1ac27f251f4fc4e2a0c968 Author: Daniel Han-Chen Date: Sat Feb 3 23:01:56 2024 +1100 Update llama.py commit 381b991c456fe66182e85a1b360f55fc3241ec06 Author: Daniel Han-Chen Date: Sat Feb 3 22:42:11 2024 +1100 Update llama.py commit 665908eb70783b2cf8e558f0eb0913f66a5ca186 Author: Daniel Han-Chen Date: Sat Feb 3 22:28:20 2024 +1100 Update llama.py commit 5534f8a8f188ba29c93832d88f3748438e89d9a6 Author: Daniel Han-Chen Date: Sat Feb 3 20:22:42 2024 +1100 more temp matrices commit 68db1c7af7138d2eb6d7cb49fc01556a92e7d15e Author: Daniel Han-Chen Date: Sat Feb 3 19:44:36 2024 +1100 fast inference again commit d76f583349d1b3cffec5b81fbba7ddf38b6156c0 Author: Daniel Han-Chen Date: Sat Feb 3 19:30:22 2024 +1100 Update mistral.py commit 9225dd6708e1e158068007c04a97b95deeccc4de Author: Daniel Han-Chen Date: Sat Feb 3 19:19:13 2024 +1100 Update llama.py commit 82708212693114b0c89376fbc86ec57b48144799 Author: Daniel Han-Chen Date: Sat Feb 3 19:08:42 2024 +1100 Update llama.py commit 1c6e1f18b4741a8f99111d5081b7b6e936ed0bc0 Author: Daniel Han-Chen Date: Sat Feb 3 18:32:18 2024 +1100 Update llama.py commit dd03abedd6efb210342717b6db6c23c0e5a7a9ba Author: Daniel Han-Chen Date: Sat Feb 3 18:20:06 2024 +1100 Update llama.py commit 522f6dbfd29734576f0e20ffe10c5c482365f217 Author: Daniel Han-Chen Date: Sat Feb 3 18:07:55 2024 +1100 Update llama.py commit ad357dec89dde48f72459e8b41a8f99f1bd1f216 Author: Daniel Han-Chen Date: Sat Feb 3 03:52:25 2024 +1100 fast inference + saving config.json commit a78d6fba7ed371403f2dc7ebe6cf46a171a3f130 Author: Daniel Han-Chen Date: Sat Feb 3 03:34:02 2024 +1100 Update llama.py commit fa3d23406effd39409477cb23ace64eaf40d2178 Author: Daniel Han-Chen Date: Sat Feb 3 03:10:39 2024 +1100 Update utils.py commit 8e3f0296be2520e0d948d51a91dda00c7452e305 Author: Daniel Han-Chen Date: Sat Feb 3 02:55:22 2024 +1100 Update utils.py commit 404e177adbb6151cb2e01041394a69e9d729401b Author: Daniel Han-Chen Date: Sat Feb 3 02:30:47 2024 +1100 Update utils.py commit a81d19383047d3bfaae4e79efe171f22dd83439d Author: Daniel Han-Chen Date: Sat Feb 3 02:20:00 2024 +1100 Update utils.py commit 4436a1099db55ece184741ac4f2a0dd0ed4f7cb3 Author: Daniel Han-Chen Date: Sat Feb 3 01:50:48 2024 +1100 Update llama.py commit a15ffc95e03358ff0cca67520462262c9067261c Author: Daniel Han-Chen Date: Sat Feb 3 01:48:03 2024 +1100 Update llama.py commit 0be46406d88808116976861b0ac9fc2e04f76467 Author: Daniel Han-Chen Date: Sat Feb 3 01:39:39 2024 +1100 Update llama.py commit e9110471880fee872f9e08b8025f6382c0c674cf Author: Daniel Han-Chen Date: Sat Feb 3 01:35:45 2024 +1100 Update llama.py commit c934c16e21f04f242dd51390e7db278d006b529b Author: Daniel Han-Chen Date: Sat Feb 3 01:29:50 2024 +1100 Update llama.py commit 705bbba5c5f4a932ee6a382610e104ff0a3d1a16 Author: Daniel Han-Chen Date: Sat Feb 3 01:22:53 2024 +1100 Update llama.py commit 9c2bed35b966e622390da3363cc27e637e2d30b1 Author: Daniel Han-Chen Date: Fri Feb 2 23:45:02 2024 +1100 Update llama.py commit ea9b4eea0cb07bf02339f226d285e76e3b4d94e0 Author: Daniel Han-Chen Date: Fri Feb 2 23:40:52 2024 +1100 Update llama.py commit c6ad936f887387019eb5499a4811c7cc8ce6700e Author: Daniel Han-Chen Date: Fri Feb 2 23:32:58 2024 +1100 Update llama.py commit 03df29111013a652e2c37965688366bbd416dcc9 Author: Daniel Han-Chen Date: Fri Feb 2 23:32:36 2024 +1100 Update llama.py commit dc2740416ba3940fadb588732fc25ab2f35bdcc6 Author: Daniel Han-Chen Date: Fri Feb 2 22:26:47 2024 +1100 Update llama.py commit b33c92d1bd5c1da325ee7b19254352cce8fd31b6 Author: Daniel Han-Chen Date: Fri Feb 2 22:22:42 2024 +1100 Update llama.py commit 923c6bad145c5ec085aa80c61055b16486720815 Author: Daniel Han-Chen Date: Fri Feb 2 22:20:45 2024 +1100 Update llama.py commit b3703926ca42eef6d0f975623f7c0b8f4634a87d Author: Daniel Han-Chen Date: Fri Feb 2 22:17:02 2024 +1100 Update llama.py commit 1b274212e127812ea6c463b6b8b1b6bdb4088eb5 Author: Daniel Han-Chen Date: Fri Feb 2 20:35:00 2024 +1100 Update llama.py commit 9df19aeb9fec1e985d4e9f8d2b273f25baa1a0d8 Author: Daniel Han-Chen Date: Fri Feb 2 20:32:21 2024 +1100 past_key_values commit 82eea75cde8bf4288ebee9fd014b32b612943fff Author: Daniel Han-Chen Date: Fri Feb 2 20:29:35 2024 +1100 torch compile commit e74807809571eea89e18c9932eb661cf28b6a27e Author: Daniel Han-Chen Date: Fri Feb 2 20:24:44 2024 +1100 Update llama.py commit 9146fa443dca4768e428ae06fefe7c1ccc7e86b8 Author: Daniel Han-Chen Date: Fri Feb 2 20:24:28 2024 +1100 Update llama.py commit a5a123db08745ed3a179fede894c03c82f36ae54 Author: Daniel Han-Chen Date: Fri Feb 2 20:20:14 2024 +1100 Update llama.py commit 82ead808d59958e767fdc22e245916f529e2b4cd Author: Daniel Han-Chen Date: Fri Feb 2 20:19:15 2024 +1100 Update llama.py commit 1497521e5c4a26463acb792615cda11bf112b081 Author: Daniel Han-Chen Date: Fri Feb 2 20:06:52 2024 +1100 Update mistral.py commit c3c454a7299b9ad58252bcd29479573328f436df Author: Daniel Han-Chen Date: Fri Feb 2 20:06:31 2024 +1100 Update llama.py commit f299c9cde10b673f0938910dd60d889d31fe8ba1 Author: Daniel Han-Chen Date: Fri Feb 2 19:44:52 2024 +1100 Update llama.py commit 0e1d67d4e1c59f9f215a4eb10b2ae2949e703844 Author: Daniel Han-Chen Date: Fri Feb 2 19:07:41 2024 +1100 fast inference commit 168ded977ecae0bf22308f6ee6ec79b6c63076c8 Author: Daniel Han-Chen Date: Fri Feb 2 18:28:05 2024 +1100 Update llama.py commit bf441055caa7865c52c99059030b8b09ee85d136 Author: Daniel Han-Chen Date: Fri Feb 2 18:14:31 2024 +1100 faster inference commit 7c2b04254cbae8e58dedf8ea57375f721053233e Author: Daniel Han-Chen Date: Fri Feb 2 16:59:02 2024 +1100 Update llama.py commit ca99d7c19477193891ba36c71c61fe17d1589579 Author: Daniel Han-Chen Date: Fri Feb 2 16:46:30 2024 +1100 Update mistral.py commit 40e8848c4e7d4bba7490ce9e1a02f43e5dea1509 Author: Daniel Han-Chen Date: Fri Feb 2 15:56:56 2024 +1100 Update llama.py commit 6cc98350213795dbc30a735670153cb486f3fc61 Author: Daniel Han-Chen Date: Fri Feb 2 13:52:12 2024 +1100 Update llama.py commit 7ad1a1fa62cb2717640678bf1a296f0a71e3d37a Author: Daniel Han-Chen Date: Fri Feb 2 04:18:34 2024 +1100 Update llama.py commit 0b661a23b9993e735f74b8be4e973566d8c8df4a Author: Daniel Han-Chen Date: Fri Feb 2 03:55:36 2024 +1100 Update llama.py commit da64d3403cdf6476767213975d43aae9377973ee Author: Daniel Han-Chen Date: Thu Feb 1 23:54:01 2024 +1100 Update llama.py commit abc47836dcac59f90d818b5905a30e62937eb84d Author: Daniel Han-Chen Date: Thu Feb 1 23:41:32 2024 +1100 Update llama.py commit f231c4f395be0735a6b1947fe084a2d81f4190fd Author: Daniel Han-Chen Date: Thu Feb 1 23:41:23 2024 +1100 Update llama.py commit 5f3c51b394e1765a1b4bca45232df0ea6a3e1151 Author: Daniel Han-Chen Date: Thu Feb 1 23:41:11 2024 +1100 Update llama.py commit e0ea2382568a851b46df571132162e0b30cc0198 Author: Daniel Han-Chen Date: Thu Feb 1 23:40:36 2024 +1100 Update llama.py commit 73f63d6884a83b6c61114feabcbe8121a3f0625f Author: Daniel Han-Chen Date: Thu Feb 1 23:38:07 2024 +1100 Update llama.py commit e4b5e38800b365c5d0c026adc3a82b523ed10384 Author: Daniel Han-Chen Date: Thu Feb 1 23:17:53 2024 +1100 inference commit 334c5ed1f05d6a2dbed3a389d565800dc4792153 Author: Daniel Han-Chen Date: Thu Feb 1 22:48:54 2024 +1100 Update llama.py commit cd39f6108febda2d3b491f899f82a87f59f23a2d Author: Daniel Han-Chen Date: Thu Feb 1 22:34:33 2024 +1100 lm_head commit 24c4c37b7cd88aa36d61292cd2ab097b36236a09 Author: Daniel Han-Chen Date: Thu Feb 1 22:21:35 2024 +1100 revert commit e791db9b06f4aa848741e09d1f1e7b69abca9288 Author: Daniel Han-Chen Date: Thu Feb 1 20:12:59 2024 +1100 Update llama.py commit 1793a16c8f2c3e68cfda57665bb85a7ac9279a02 Author: Daniel Han-Chen Date: Thu Feb 1 20:03:54 2024 +1100 faster inference commit 19fb50e24498086cbae0fcc942616630779face4 Author: Daniel Han-Chen Date: Thu Feb 1 19:56:54 2024 +1100 Update utils.py commit b83cea7cb46e748d179159a058712754d4a2f4b5 Author: Daniel Han-Chen Date: Thu Feb 1 19:56:06 2024 +1100 Update llama.py commit 8920cafbe7ce552437521084b4756a0810f075cb Author: Daniel Han-Chen Date: Thu Feb 1 19:46:38 2024 +1100 inference commit 38b59825b1b6fe6a96afd42c5b4d0ab8c9e11cd5 Author: Daniel Han-Chen Date: Thu Feb 1 19:36:36 2024 +1100 Update llama.py commit e8ec80a4c25832ebe88c70850adc2fef310d3abf Author: Daniel Han-Chen Date: Thu Feb 1 19:19:44 2024 +1100 Update llama.py commit a2cb7a113b3f03de5a45108cd2e2e866aabf8b1c Author: Daniel Han-Chen Date: Thu Feb 1 19:07:30 2024 +1100 Update llama.py commit a5ee70b63ff70ffd7b99c49906adb4a8369cd4d5 Author: Daniel Han-Chen Date: Thu Feb 1 18:34:43 2024 +1100 Update llama.py commit e2f0dd8683adafaf9ad854b808065b4d4766df47 Author: Daniel Han-Chen Date: Thu Feb 1 18:16:46 2024 +1100 Update llama.py commit 3f0ddf0c7c3500191bbb6fd254dc814a017123c0 Author: Daniel Han-Chen Date: Thu Feb 1 18:16:15 2024 +1100 Update llama.py commit e90b3bf192b3e1256d99ab281c29edddf42ec532 Author: Daniel Han-Chen Date: Thu Feb 1 17:59:21 2024 +1100 Update llama.py commit 57044509ad692c3b72a4da5874cf905c9c44d8ce Author: Daniel Han-Chen Date: Thu Feb 1 17:44:22 2024 +1100 Update llama.py commit cf4b58eeb6689616b6f5509cf2bacf68234dbc24 Author: Daniel Han-Chen Date: Thu Feb 1 17:20:02 2024 +1100 inference commit 648c79ec1b7c8920e6cd0f265dc9f01eda5c6655 Author: Daniel Han-Chen Date: Thu Feb 1 03:47:51 2024 +1100 faster inference commit 20f19391e69c8e6f01cad6883f716b2519f1214d Author: Daniel Han-Chen Date: Thu Feb 1 03:05:02 2024 +1100 Update mistral.py commit e2f72fe52f3a4fce6578a9641cadcd503d20d99b Author: Daniel Han-Chen Date: Thu Feb 1 03:04:46 2024 +1100 Revert commit 329f80ac4c88a5693086f42a9188fb5f6ab59765 Author: Daniel Han-Chen Date: Thu Feb 1 02:47:57 2024 +1100 Update llama.py commit 713a95ca0e3f50a330db7072855f56458503472b Author: Daniel Han-Chen Date: Thu Feb 1 02:46:26 2024 +1100 Inference commit acbdef7ff5eef0be43e2028f847a1e5dccb75976 Author: Daniel Han-Chen Date: Wed Jan 31 20:16:08 2024 +1100 padding commit 0dc26ed98a871659e6db94f63e07bcda36ce6616 Author: Daniel Han-Chen Date: Wed Jan 31 20:06:28 2024 +1100 Update llama.py commit 9f312545398fb823f6ae34bace8c8f71c9c21688 Author: Daniel Han-Chen Date: Wed Jan 31 20:02:21 2024 +1100 Fix SDPA commit 7227de48cfb8bbe00d03e41897ab5881376c0c99 Author: Daniel Han-Chen Date: Wed Jan 31 19:48:33 2024 +1100 Update llama.py commit 5edad55b5d8d17e4ef493d06984a55b337139c1a Author: Daniel Han-Chen Date: Wed Jan 31 19:44:19 2024 +1100 Update llama.py commit c928c57aa9f31ab799098bacb4c402cd818226a0 Merge: 5da0555 051a73b Author: Daniel Han-Chen Date: Wed Jan 31 19:38:27 2024 +1100 Merge branch 'main' into nightly commit 5da05558a036334a893e06e6c1c7e0a9e7e66ce6 Author: Daniel Han-Chen Date: Wed Jan 31 03:50:47 2024 +1100 past_key_value commit d347db0944650df0dcc8aebcc6140424c4a7d44a Author: Daniel Han-Chen Date: Wed Jan 31 03:46:11 2024 +1100 Update loader.py commit c0edaa46db141441bf0a0702998f2e6c4e53986e Author: Daniel Han-Chen Date: Wed Jan 31 03:40:55 2024 +1100 revert inference commit 55fe6052ca2edeba9612b093e32061fd3cc0a983 Author: Daniel Han-Chen Date: Wed Jan 31 03:36:40 2024 +1100 if past_key_value is not None and q_len == 1: commit 248887b51c2fa946110e71be89a02f3146d560bc Author: Daniel Han-Chen Date: Wed Jan 31 03:35:24 2024 +1100 LlamaAttention_fast_forward_inference commit c68a3bc9ecea534fa296a06ddf76ac00dace163d Author: Daniel Han-Chen Date: Wed Jan 31 03:28:30 2024 +1100 Update loader.py commit 44168377f330e26f5721d8510196487b0de6afe4 Author: Daniel Han-Chen Date: Wed Jan 31 01:32:56 2024 +1100 Update rope_embedding.py commit 270df81b60a46ad2bbb35d2c9d6c8dce243565ab Author: Daniel Han-Chen Date: Tue Jan 30 17:33:08 2024 +1100 Remove fast path commit b8f665bf227ef00c5865840ca27fa75a02b51ea4 Author: Daniel Han-Chen Date: Tue Jan 30 17:26:45 2024 +1100 fast lm_head commit ed5a653ecfd81c4d74ce71bc2fc44863b7dcf7c2 Author: Daniel Han-Chen Date: Tue Jan 30 04:16:39 2024 +1100 Update mistral.py commit e0bad0eec5ecb7aeb3916fd51ce4a6b5b5db14fa Author: Daniel Han-Chen Date: Tue Jan 30 04:10:14 2024 +1100 Fix inference commit 71725aeea568b7841282736855971ad71efbc0e3 Author: Daniel Han-Chen Date: Tue Jan 30 03:23:42 2024 +1100 Update __init__.py commit 3ddda6f49299e607541f2cc2ae17e505a51c2f45 Author: Daniel Han-Chen Date: Tue Jan 30 03:08:46 2024 +1100 Update mistral.py commit 6f74c98fbc00fa8e2ea80d7ff3b76b41aa83d427 Author: Daniel Han-Chen Date: Mon Jan 29 23:19:43 2024 +1100 Update utils.py commit a7bfeec9193eeef53756cc6e8dbe5e931abf0a56 Author: Daniel Han-Chen Date: Mon Jan 29 23:14:51 2024 +1100 Update utils.py commit 7c87d60bc1f230b501eff30344cc36e585edab0f Author: Daniel Han-Chen Date: Mon Jan 29 23:07:40 2024 +1100 Update utils.py commit bb364204cc23f697a349589aeaf8885ce1d2ddb1 Author: Daniel Han-Chen Date: Mon Jan 29 20:09:59 2024 +1100 Update llama.py commit 4700d51916c58c043724cc8dc1ee363fb1927e0e Author: Daniel Han-Chen Date: Mon Jan 29 19:55:40 2024 +1100 Fast inference repatch commit 58cabcbc06a59b2ecb08e88bf6148de5fb780753 Merge: 01b8162 0562464 Author: Daniel Han-Chen Date: Mon Jan 29 17:59:34 2024 +1100 Merge branch 'main' into nightly commit 01b8162244a74dab1d37061248f088441896ef83 Merge: 5cfea20 206a9b6 Author: Daniel Han-Chen Date: Mon Jan 29 17:49:57 2024 +1100 Merge branch 'main' into nightly commit 5cfea201294d66646ec65ca46fd1277f2b042530 Author: Daniel Han-Chen Date: Mon Jan 29 17:35:19 2024 +1100 Update llama.py commit 25a88ea003056aa3028267f60dcc1d0e30154ec1 Author: Daniel Han-Chen Date: Mon Jan 29 16:52:04 2024 +1100 Update llama.py commit 03ca52dc942930090859041483a855f72bf686f0 Author: Daniel Han-Chen Date: Mon Jan 29 03:43:39 2024 +1100 saving commit 01e5c305f997e70f9c116225e005c1231fb1e298 Merge: e10e488 8faf469 Author: Daniel Han-Chen Date: Mon Jan 29 03:43:19 2024 +1100 Merge branch 'main' into nightly commit e10e48893bf6e93887c55ef7b966e99e7e76bb2f Author: Daniel Han-Chen Date: Mon Jan 29 03:43:17 2024 +1100 Update save.py commit 5bd916b91b0b943dc3080116984f8f5fb81c096f Author: Daniel Han-Chen Date: Mon Jan 29 03:43:05 2024 +1100 Update mistral.py commit 11ba2c520b91ea41bb978c48c589b810966465ad Author: Daniel Han-Chen Date: Mon Jan 29 03:34:27 2024 +1100 Mistral patch commit 498dfb8acc07993ecb1290b65738248d73c7914e Author: Daniel Han-Chen Date: Mon Jan 29 02:42:08 2024 +1100 print commit a3d2b9b778f682691e9196d38a58be756dc2708d Author: Daniel Han-Chen Date: Mon Jan 29 02:15:52 2024 +1100 Update save.py commit 9e00cc287ad31a45d1ffdd2a0fdfcc59008d2616 Author: Daniel Han-Chen Date: Mon Jan 29 00:59:29 2024 +1100 Update save.py commit 460de24ea2f2c2e03c120453469e54b388ddcc44 Author: Daniel Han-Chen Date: Mon Jan 29 00:57:11 2024 +1100 Update save.py commit b060d7b62120dc2e419d052108f93e68cee2b4f4 Author: Daniel Han-Chen Date: Mon Jan 29 00:56:44 2024 +1100 Update save.py commit 74b69775c22b39baef13241fb3f2b52c9c757e86 Author: Daniel Han-Chen Date: Mon Jan 29 00:51:57 2024 +1100 Update save.py commit fef0589d6e32c9b80a87eabb46e1a11753df39f9 Author: Daniel Han-Chen Date: Mon Jan 29 00:51:45 2024 +1100 Update save.py commit 9dec4b3f0863291fb402207db9ea1b9d517a2383 Author: Daniel Han-Chen Date: Mon Jan 29 00:49:31 2024 +1100 Update save.py commit ee0bf6fe6681c18850d7f9f8f5cab3b643f15187 Author: Daniel Han-Chen Date: Sun Jan 28 20:07:17 2024 +1100 Update save.py commit c69c166b822c26dc1b9680544935414596b00361 Author: Daniel Han-Chen Date: Sun Jan 28 18:12:11 2024 +1100 patch_saving_functions commit ac02ba6c3849711e6ab42719cacf75f1109c72fc Author: Daniel Han-Chen Date: Sun Jan 28 18:08:52 2024 +1100 Update save.py commit 20e524a5967ae311ca7db183a0b800ff56ab316f Author: Daniel Han-Chen Date: Sun Jan 28 18:04:35 2024 +1100 Update save.py commit 788e69518091d1f5ca221cf0cfaf2501eb298e3f Author: Daniel Han-Chen Date: Sun Jan 28 17:48:34 2024 +1100 Patch saving commit 31222ced74d977c7ae04f6b1e2824b6a6e5729fb Merge: 893aab0 1ecc018 Author: Daniel Han-Chen Date: Sun Jan 28 17:48:28 2024 +1100 Merge branch 'main' into nightly commit 893aab0e573b6f77c7c9f1a75fa6f1264310ca81 Author: Daniel Han-Chen Date: Sun Jan 28 17:20:49 2024 +1100 Update dpo.py commit d5c852e7115f7a5ee20628361f95d1d4f905a098 Author: Daniel Han-Chen Date: Sun Jan 28 16:47:33 2024 +1100 Update llama.py commit ddb48efd33fd316fe30c2a946ee5e25858992aa7 Author: Daniel Han-Chen Date: Sun Jan 28 16:43:10 2024 +1100 Update llama.py commit 5fe166d32e18b2bd8622826348c682accbc5ff35 Author: Daniel Han-Chen Date: Sun Jan 28 16:40:59 2024 +1100 Update llama.py commit 2d64e0a904c12ca01f2336419294bc69df906b51 Author: Daniel Han-Chen Date: Sun Jan 28 16:37:21 2024 +1100 Update mistral.py commit 2f73cb4049a6328b354ebbc61f906e7e5a2e5043 Author: Daniel Han-Chen Date: Sun Jan 28 16:31:01 2024 +1100 Update llama.py commit 6aa46ffff69b59482c7abec0faa2c357c4435159 Author: Daniel Han-Chen Date: Sun Jan 28 15:13:54 2024 +1100 Update llama.py commit ee6f5096edc33acbcbdd513829bfcdc48993f464 Author: Daniel Han-Chen Date: Sun Jan 28 13:53:06 2024 +1100 attention mask commit f1b0fd084817449e16cce938c5a20cd9474b08c9 Author: Daniel Han-Chen Date: Sun Jan 28 13:37:38 2024 +1100 Update mistral.py commit 7663a32753743b068f6c68ec97f6ad2f86a9e4b9 Author: Daniel Han-Chen Date: Sun Jan 28 04:29:52 2024 +1100 Update save.py commit a158003625c502f7d5e0fa5d8f6cfc2ec35f9498 Merge: 36829b7 cd32ba7 Author: Daniel Han-Chen Date: Sun Jan 28 04:29:35 2024 +1100 Merge branch 'nightly' of https://github.com/unslothai/unsloth into nightly commit 36829b7ec44d381cd34f026bfa7e6f5ed3026e95 Author: Daniel Han-Chen Date: Sun Jan 28 04:27:10 2024 +1100 Update save.py commit 917ce1586115b478db581b4f4535a5ca52d0ca13 Merge: 166f8c8 89daa0e Author: Daniel Han-Chen Date: Sun Jan 28 04:20:15 2024 +1100 Merge branch 'main' into nightly commit 166f8c812ecab9e60fa90794a1c666a98c534d95 Author: Daniel Han-Chen Date: Sun Jan 28 04:18:19 2024 +1100 attention mask commit 6c7f0dbcb4eee100e19cca98e6d728f4beafbdd2 Author: Daniel Han-Chen Date: Sun Jan 28 04:08:19 2024 +1100 Update llama.py commit c836ed7f3d06750983c7020462260b550a70e188 Author: Daniel Han-Chen Date: Sun Jan 28 04:08:03 2024 +1100 Update mistral.py commit 12d57e5308948d9b7f9b523039d32e6746a3bcb8 Author: Daniel Han-Chen Date: Sun Jan 28 03:59:23 2024 +1100 labels commit 4c5ebcc960393131841cc3685b9155722ceca2bd Author: Daniel Han-Chen Date: Sun Jan 28 03:56:54 2024 +1100 Update llama.py commit 6a027a8292f83480fb2cdb7ee8e135733bd77b70 Author: Daniel Han-Chen Date: Sun Jan 28 03:55:23 2024 +1100 Update llama.py commit 9f9739cbace7f8d730c905319c96f9a268634aad Author: Daniel Han-Chen Date: Sun Jan 28 03:49:54 2024 +1100 attention_mask commit 2bd77e7277b56e6179f470e64ede948512b710e9 Author: Daniel Han-Chen Date: Sun Jan 28 02:34:03 2024 +1100 Update fast_lora.py commit a1e5aca7cca5144e0625175c6c85481ec1adda09 Author: Daniel Han-Chen Date: Sun Jan 28 01:27:46 2024 +1100 Update fast_lora.py commit a2f705d65b9d80518577cc97126d3faacab6b33f Author: Daniel Han-Chen Date: Sun Jan 28 01:17:27 2024 +1100 Update fast_lora.py commit d01ba458df8009e490ffb203e29532c09c1d211b Author: Daniel Han-Chen Date: Sun Jan 28 00:45:55 2024 +1100 Update fast_lora.py commit 6fa0635971e086b954f9302eeb875942c3bba6a5 Author: Daniel Han-Chen Date: Sat Jan 27 23:20:41 2024 +1100 Update fast_lora.py commit e094af81a12d053ca718832c552bae43fe206ed0 Author: Daniel Han-Chen Date: Sat Jan 27 20:39:19 2024 +1100 Update fast_lora.py commit c74e1af85c0807160edab4006686bbfce00b35d4 Author: Daniel Han-Chen Date: Sat Jan 27 20:37:43 2024 +1100 Update fast_lora.py commit 363ffba1c241ebffa2a255162c13d681a352e7bc Author: Daniel Han-Chen Date: Sat Jan 27 20:17:08 2024 +1100 Update fast_lora.py commit 510c85f41263e1a1250d95351e3d06c8b89b8acf Author: Daniel Han-Chen Date: Sat Jan 27 20:04:55 2024 +1100 Update swiglu.py commit 35daafdd6e3b233508a569835feb40a2acc0cb61 Author: Daniel Han-Chen Date: Sat Jan 27 19:53:57 2024 +1100 Update fast_lora.py commit 6201f7681f9f3b359175c9574c05549bb80214c2 Author: Daniel Han-Chen Date: Sat Jan 27 19:38:28 2024 +1100 Update fast_lora.py commit 86a1c9788b3b60a2e2dc6fe8f4037444e65da81a Author: Daniel Han-Chen Date: Sat Jan 27 19:29:53 2024 +1100 Update fast_lora.py commit 8e0e4ccdc69de918b79edec65ce62c40bba8ba58 Author: Daniel Han-Chen Date: Sat Jan 27 19:18:59 2024 +1100 Update fast_lora.py commit 2e4c59deaf8c53d6632bb1364c33a9d7e0aeb63e Author: Daniel Han-Chen Date: Sat Jan 27 19:05:02 2024 +1100 Update swiglu.py commit 85e87d9ba2849c09ecc342ac1bce33a91a6638f1 Author: Daniel Han-Chen Date: Sat Jan 27 19:03:04 2024 +1100 Swiglu commit 83b6937285c83c1123c78d38ba255610d3e37242 Author: Daniel Han-Chen Date: Sat Jan 27 18:21:29 2024 +1100 Update fast_lora.py commit 3d3e7f5b42e279751975ee7878ba1c1618532103 Author: Daniel Han-Chen Date: Sat Jan 27 18:15:34 2024 +1100 Update fast_lora.py commit d3f3b6fc494ab1409f09387d8d039bd11bc53595 Author: Daniel Han-Chen Date: Sat Jan 27 18:00:41 2024 +1100 Update fast_lora.py commit e77d7c069f04f56c065df610ba81409bcb408434 Author: Daniel Han-Chen Date: Sat Jan 27 18:00:22 2024 +1100 Update fast_lora.py commit f7d11d10f84fa667065c3d2e238ce937d2b68684 Author: Daniel Han-Chen Date: Sat Jan 27 17:47:54 2024 +1100 Update fast_lora.py commit 8ed03f5f45cc31cc72a85df57c42112c88da5bcd Author: Daniel Han-Chen Date: Sat Jan 27 17:38:30 2024 +1100 Update pyproject.toml commit af65cb0d3da5368a4062c074f2ecb80965baf4a7 Author: Daniel Han-Chen Date: Sat Jan 27 04:50:47 2024 +1100 Works? commit fb5333726a90946085fa6dae1123246e706801b4 Author: Daniel Han-Chen Date: Sat Jan 27 04:49:36 2024 +1100 Update llama.py commit a59ec7903cc5968bd1cb48af6c118af176c816d8 Merge: 704e36a 87a7ef1 Author: Daniel Han-Chen Date: Sat Jan 27 04:48:40 2024 +1100 Merge branch 'main' into nightly commit 704e36a64e8e290bce5bc6474a5c97d37184f7c8 Author: Daniel Han-Chen Date: Sat Jan 27 04:48:34 2024 +1100 Revert "Update llama.py" This reverts commit a208ec46e012cf470ecefe6268a66358215df7b6. commit a208ec46e012cf470ecefe6268a66358215df7b6 Author: Daniel Han-Chen Date: Sat Jan 27 04:48:03 2024 +1100 Update llama.py commit bd2ff90817fb1fdbe057b88f61e781b9d68b1c84 Author: Daniel Han-Chen Date: Sat Jan 27 04:47:42 2024 +1100 Update llama.py commit a3d892a15b0cb828164c67596cee6dc0c64a6818 Author: Daniel Han-Chen Date: Sat Jan 27 04:47:01 2024 +1100 Update llama.py commit b89599a3f728235d3714df736730dad8bbb5eb07 Author: Daniel Han-Chen Date: Sat Jan 27 04:46:47 2024 +1100 Update llama.py commit baeea64917bc27d47627d8c797e3b270a300dcdd Author: Daniel Han-Chen Date: Sat Jan 27 04:20:58 2024 +1100 Update save.py commit ecdbb28dcb949467b59e21cafc40a94cc4eebba8 Author: Daniel Han-Chen Date: Sat Jan 27 04:19:59 2024 +1100 Update save.py commit 393341f21174904fdda983454de4782e786e0471 Author: Daniel Han-Chen Date: Sat Jan 27 04:18:25 2024 +1100 Update swiglu.py commit 47babc780af9a6c724e43de422bec6652af4e986 Author: Daniel Han-Chen Date: Sat Jan 27 04:16:38 2024 +1100 Update fast_lora.py commit 9edc309f595a27175e24601df3cfe3b50bef7bd3 Author: Daniel Han-Chen Date: Sat Jan 27 04:15:37 2024 +1100 Update llama.py commit a27ac6175eaaf19e1a535e2dc8452cf62b3c8d90 Author: Daniel Han-Chen Date: Sat Jan 27 04:03:47 2024 +1100 Update utils.py commit 7fb64e0e31d871ada3c1d3ac8114304f8f62ccea Author: Daniel Han-Chen Date: Sat Jan 27 03:58:21 2024 +1100 Update fast_lora.py commit e3bd0bb74f8dfe70146c400ec9c6535cf191e119 Author: Daniel Han-Chen Date: Sat Jan 27 03:51:53 2024 +1100 Update save.py commit ba847f5411b55b948791f251c30b8221ec9727ec Author: Daniel Han-Chen Date: Sat Jan 27 03:38:33 2024 +1100 Update fast_lora.py commit c57495df6fd7b61e6fe2b8ce463f0c8ae06fb1d6 Author: Daniel Han-Chen Date: Sat Jan 27 03:38:19 2024 +1100 Update fast_lora.py commit d379bb8cf570fab2883a415c971bd731247a8943 Author: Daniel Han-Chen Date: Sat Jan 27 03:36:27 2024 +1100 Update fast_lora.py commit 421ed33b42ccc040badbd7193bc8da4aa383448d Author: Daniel Han-Chen Date: Sat Jan 27 03:23:50 2024 +1100 Update fast_lora.py commit 83ceb11cc9b76796826821ecc93e4ab6fe320802 Author: Daniel Han-Chen Date: Sat Jan 27 03:12:53 2024 +1100 Update fast_lora.py commit f3da0d2e4a661e799fda429f391454ab8d84e99d Author: Daniel Han-Chen Date: Sat Jan 27 02:52:54 2024 +1100 Update fast_lora.py commit 4ae3ad3bceaeb4cfe97b2e592e92920e2f7583de Author: Daniel Han-Chen Date: Sat Jan 27 02:34:21 2024 +1100 Update fast_lora.py commit c74d3dd3b9f1fb56289b61b7b66755dbd3495c33 Author: Daniel Han-Chen Date: Sat Jan 27 02:22:15 2024 +1100 Update fast_lora.py commit 0a1aa98d3c6851ece9a48de5ff8c0ee804e6fb0a Author: Daniel Han-Chen Date: Sat Jan 27 02:19:00 2024 +1100 Update fast_lora.py commit f7760719f0a48de63030ca61c6b754a5c6571d70 Author: Daniel Han-Chen Date: Sat Jan 27 01:28:03 2024 +1100 Update fast_lora.py commit 38bff800b64c40abd60707fcb54f772d18657a57 Author: Daniel Han-Chen Date: Sat Jan 27 01:16:31 2024 +1100 Update fast_lora.py commit 485d54fd70211c6ad94667e595fb12650cd95874 Author: Daniel Han-Chen Date: Fri Jan 26 23:48:17 2024 +1100 Update fast_lora.py commit ce08a15ceef7c4209feb09c3a241adf588604928 Author: Daniel Han-Chen Date: Fri Jan 26 23:33:17 2024 +1100 Update fast_lora.py commit b4492eb29c088487307a8fecb0951cdfba301962 Author: Daniel Han-Chen Date: Fri Jan 26 23:14:44 2024 +1100 Update fast_lora.py commit 3865c8cd68219257f55ebfd5c4de432c828d4ddd Author: Daniel Han-Chen Date: Fri Jan 26 23:06:06 2024 +1100 Update fast_lora.py commit e9e34f1d4d5c29e7330dcf09d76774af3714b168 Author: Daniel Han-Chen Date: Fri Jan 26 22:29:38 2024 +1100 Update fast_lora.py commit d96f665510569b85d6671dbf04a5245de36c4bdb Author: Daniel Han-Chen Date: Fri Jan 26 22:21:54 2024 +1100 Update swiglu.py commit f8aa20d2db39879a1e82b2cc26f075939488a753 Author: Daniel Han-Chen Date: Fri Jan 26 22:09:32 2024 +1100 Update fast_lora.py commit ae0d9380c1561a77cda9d1d0163f079a44c2a384 Author: Daniel Han-Chen Date: Fri Jan 26 22:07:10 2024 +1100 Update swiglu.py commit 694da2dba3c48ab268497772277a2d801351b8f3 Author: Daniel Han-Chen Date: Fri Jan 26 20:06:52 2024 +1100 Update fast_lora.py commit 4f573494da67fde0036619a12f81314d7edea5fa Author: Daniel Han-Chen Date: Fri Jan 26 20:04:51 2024 +1100 Update fast_lora.py commit cb06ce849bec9bb6b1fc923f979a103aefb5d741 Author: Daniel Han-Chen Date: Fri Jan 26 19:41:48 2024 +1100 Update fast_lora.py commit e0a36b356b0db8d6b4bd57b752b639419faf3d45 Author: Daniel Han-Chen Date: Fri Jan 26 19:30:18 2024 +1100 Update fast_lora.py commit c4f0de58a96043dc84776972e8d1419503d3b973 Author: Daniel Han-Chen Date: Fri Jan 26 19:21:00 2024 +1100 Update fast_lora.py commit a0a409d5afa2290d4a96357b1bc7ae278dcec353 Author: Daniel Han-Chen Date: Fri Jan 26 19:20:53 2024 +1100 Update fast_lora.py commit 97658f9a74aa1259ae6c0a879c5fa1e7ca418a24 Author: Daniel Han-Chen Date: Fri Jan 26 19:12:08 2024 +1100 Update fast_lora.py commit 979de5222011f93d1bec74464f4d4151f56f3489 Author: Daniel Han-Chen Date: Fri Jan 26 19:00:22 2024 +1100 Update fast_lora.py commit 39d251757fe8ff6edaeaa63f18edab86aa379c2c Author: Daniel Han-Chen Date: Fri Jan 26 18:19:36 2024 +1100 Update fast_lora.py commit 77365cc740d7aecdf1a127d666238716b6f9d1f8 Author: Daniel Han-Chen Date: Fri Jan 26 18:11:43 2024 +1100 Update llama.py commit 4bfbccec4b399f5795d06b01101d471dd69ac20a Author: Daniel Han-Chen Date: Fri Jan 26 18:04:59 2024 +1100 Update fast_lora.py commit 56dffca23f93cf9ed492c3e1ceb4b467be0b7e92 Author: Daniel Han-Chen Date: Fri Jan 26 17:44:10 2024 +1100 Update llama.py commit 259097bc61dc684557b66d7100155dc55d6628e9 Author: Daniel Han-Chen Date: Fri Jan 26 17:36:40 2024 +1100 Update fast_lora.py commit b8de6b6a9b4078d6fe1bfe4f7f5709851e32dc43 Author: Daniel Han-Chen Date: Fri Jan 26 17:25:36 2024 +1100 Update fast_lora.py commit 796aa4d0efbd3d996cc418dc4f0f02cad1a7a8f8 Author: Daniel Han-Chen Date: Fri Jan 26 17:13:09 2024 +1100 Update fast_lora.py commit 9ecb3bc869c79162ff0ea25d8fe62dd8370bcd4b Author: Daniel Han-Chen Date: Fri Jan 26 17:04:33 2024 +1100 Update fast_lora.py commit 7bed11a8ba1836d71415c3c87fcde5ae1cab2865 Author: Daniel Han-Chen Date: Fri Jan 26 16:43:50 2024 +1100 Update fast_lora.py commit 317bbc57fee180b6881480e460bdefacbdf6590a Author: Daniel Han-Chen Date: Fri Jan 26 16:16:32 2024 +1100 Update fast_lora.py commit c4ac728d4c87b10823499433cd0465d0766b5068 Author: Daniel Han-Chen Date: Fri Jan 26 16:08:10 2024 +1100 Update fast_lora.py commit ab449453365177ac7afa41bce24469de27222914 Author: Daniel Han-Chen Date: Fri Jan 26 14:06:25 2024 +1100 Update fast_lora.py commit 84772de40fec66aa99726f11a8f3bf4e992c02cb Author: Daniel Han-Chen Date: Fri Jan 26 13:59:57 2024 +1100 Update fast_lora.py commit 653f1036aeeab6b7c53092daebb81e0bff8f1166 Author: Daniel Han-Chen Date: Fri Jan 26 03:49:48 2024 +1100 Update fast_lora.py commit 46ec8bbc3da51e0d9cb54004dbbf89d27db80f19 Author: Daniel Han-Chen Date: Fri Jan 26 03:15:23 2024 +1100 Repatch commit 99eeebf72a85744ac3c5b35eab0418cfe12d697e Author: Daniel Han-Chen Date: Fri Jan 26 02:41:15 2024 +1100 Update swiglu.py commit ae156d9154c89eec3c5c52900fa63530638d0c70 Author: Daniel Han-Chen Date: Fri Jan 26 02:07:35 2024 +1100 Update llama.py commit 29945bd8c1ba951fe995462ae58a32124708c54d Author: Daniel Han-Chen Date: Fri Jan 26 01:33:23 2024 +1100 Update llama.py commit 194093e375b6545d62464aa8a18e001f1baf178e Author: Daniel Han-Chen Date: Thu Jan 25 23:35:56 2024 +1100 remove patching commit 4f60055f9eef9a8044adf9f1bf02ecb713341af6 Author: Daniel Han-Chen Date: Thu Jan 25 23:25:17 2024 +1100 Update fast_lora.py commit 2d25facc7b3fab2b5cd703b5f5e1eb6f796ee520 Author: Daniel Han-Chen Date: Thu Jan 25 23:14:37 2024 +1100 Update fast_lora.py commit 380f2fd6e4065c67f400125641c52aa0daee9f91 Author: Daniel Han-Chen Date: Thu Jan 25 19:05:43 2024 +1100 Fix saving and bnb-4bit commit 0474451a6a952feee084b7f11460d64be7a26b02 Author: Daniel Han-Chen Date: Thu Jan 25 03:55:26 2024 +1100 Update pyproject.toml commit 5a5d34b8aef1ec7901891f2fdd75afb0756aff61 Author: Daniel Han-Chen Date: Thu Jan 25 03:47:23 2024 +1100 Update mapper.py commit bbf5ef65af9e3f18a20a27bef8f52fa6255b6a38 Author: Daniel Han-Chen Date: Thu Jan 25 03:46:23 2024 +1100 Graceful FA2 error + torch 2.1.1 commit 5ea888c5785098b2471db77d53cf02c000126988 Author: Daniel Han-Chen Date: Thu Jan 25 02:29:37 2024 +1100 Update to transformers 4.37 commit d2f1521a49f5aa013c5cafe853c7525843204f90 Author: Daniel Han-Chen Date: Tue Jan 23 23:23:05 2024 +1100 incorrect inference commit f186fe9bc9fe711c37416692298d54b6e843428a Author: Daniel Han-Chen Date: Tue Jan 23 19:44:45 2024 +1100 Update mistral.py commit e184b06c4b44d4635d6745603a6538f3a8384b65 Author: Daniel Han-Chen Date: Tue Jan 23 19:28:17 2024 +1100 Update mistral.py commit b31450099396648be37d0a0af57402788b754ba6 Author: Daniel Han-Chen Date: Tue Jan 23 19:23:58 2024 +1100 q_len issue commit d6e85d77310cf77d33e672592975aa90c2bd7afe Author: Daniel Han-Chen Date: Tue Jan 23 19:17:42 2024 +1100 q_len == 1 commit 5d9e68181eeef30d0c0758a68ff0ff3f5bfb4cdf Author: Daniel Han-Chen Date: Tue Jan 23 19:11:17 2024 +1100 hidden_states commit f311a8efccc4b4abc4cdd58eb736f5191f40eeee Author: Daniel Han-Chen Date: Tue Jan 23 18:04:20 2024 +1100 Update llama.py commit eeee6333ec5cb3c3d12cc392e184f0295d40b971 Merge: 20d8f22 a833f40 Author: Daniel Han-Chen Date: Tue Jan 23 18:04:10 2024 +1100 Merge branch 'main' into nightly commit 20d8f223d7a346289eb6fc684481a17ea509b7aa Author: Daniel Han-Chen Date: Tue Jan 23 03:47:28 2024 +1100 Fast LoRA saving commit 1bb1c3c2b9ea27c0a630155a99526cc805f99ca7 Author: Daniel Han-Chen Date: Tue Jan 23 03:36:50 2024 +1100 LoRA commit 24c7a67556f5c3013e2b2ec0500afad1b19b38ce Author: Daniel Han-Chen Date: Tue Jan 23 02:44:52 2024 +1100 Update llama.py commit d87ef86991ed82c77bbcffd82d0313e64e555be2 Author: Daniel Han-Chen Date: Tue Jan 23 02:22:37 2024 +1100 Update llama.py commit a77c448939287e63c8380efb0bda0c6b031a1cb3 Author: Daniel Han-Chen Date: Tue Jan 23 02:18:37 2024 +1100 Update llama.py commit 5260ec2a0a554236c35a01a911bbb1a1cdc80890 Author: Daniel Han-Chen Date: Tue Jan 23 02:13:01 2024 +1100 Update llama.py commit 716e03fe1b013b1334071465232a75f07d9d3c43 Author: Daniel Han-Chen Date: Tue Jan 23 02:09:35 2024 +1100 Update llama.py commit 00f50876f7b308ea12e2f498e40d24bf2576d516 Author: Daniel Han-Chen Date: Tue Jan 23 02:03:25 2024 +1100 Update llama.py commit e5b53331376c9730cb606a4ba41a8cb2b7263005 Author: Daniel Han-Chen Date: Tue Jan 23 02:00:10 2024 +1100 Update llama.py commit 9a5062e6c4a4dc05bf930288113e131126377f68 Author: Daniel Han-Chen Date: Tue Jan 23 01:44:10 2024 +1100 Update llama.py commit 1289ae825b31ff7c88130fe197efa174b23c4338 Author: Daniel Han-Chen Date: Tue Jan 23 01:41:20 2024 +1100 RoPE commit 7e4140ebfca93fde875558b5303a367e7b0c1781 Author: Daniel Han-Chen Date: Tue Jan 23 01:37:36 2024 +1100 Update llama.py commit 1ba28d8e26ee3d46e4b2f47e59eb65a216f604f9 Author: Daniel Han-Chen Date: Tue Jan 23 01:33:51 2024 +1100 Update llama.py commit 3e1b244d5eef14713cb2f748c439b19c4c006a31 Author: Daniel Han-Chen Date: Tue Jan 23 01:29:03 2024 +1100 Fast inference RoPE commit 8647f0e86e5085e7b9fd5c8da8e8c9d23ba1e215 Author: Daniel Han-Chen Date: Tue Jan 23 01:06:34 2024 +1100 Update llama.py commit 085a8e944adb019b167d4372260125ced48e85ac Author: Daniel Han-Chen Date: Tue Jan 23 00:32:57 2024 +1100 inference commit f41a437540ee60e00d27147c49a0b51a9de37c71 Author: Daniel Han-Chen Date: Mon Jan 22 22:34:33 2024 +1100 Update utils.py commit 31f0c9c08d5b4e076f88697a24c902000e5e6f22 Author: Daniel Han-Chen Date: Mon Jan 22 22:31:38 2024 +1100 Update utils.py commit 4220f6a6dcb7a39b886ec156b1ccc9a7703d42ae Author: Daniel Han-Chen Date: Mon Jan 22 22:26:55 2024 +1100 No print commit e1cbc9e4236c972f65d05002d207a4900d2162f0 Author: Daniel Han-Chen Date: Mon Jan 22 22:21:29 2024 +1100 Update utils.py commit 050c61bc0b77414bf9e26828786c2c81f841ff62 Author: Daniel Han-Chen Date: Mon Jan 22 20:04:27 2024 +1100 Update utils.py commit 7c3b647ba7e0f72d1bca9e14f36b00d2a1554013 Author: Daniel Han-Chen Date: Mon Jan 22 20:01:59 2024 +1100 fast_linear_forward commit 8c613e851fb7b003de082f4ba046feb277c6d96c Author: Daniel Han-Chen Date: Mon Jan 22 19:36:17 2024 +1100 Apache 2 commit a18f982e67206885180b8804cc589ca67dac2426 Author: Daniel Han-Chen Date: Mon Jan 22 19:31:34 2024 +1100 Max sequence lengths commit e38485feaa2b4e63b67ad55a0b5901152d10561f Author: Daniel Han-Chen Date: Mon Jan 22 18:18:23 2024 +1100 Mistral correct RoPE scaling commit 278de9f375f348e9bbe05ab9324a25f4316bb33e Author: Daniel Han-Chen Date: Mon Jan 22 04:33:44 2024 +1100 Update llama.py commit 0666589acec7aeff93da8aa7684adf4ab2c75e2c Author: Daniel Han-Chen Date: Mon Jan 22 04:28:24 2024 +1100 Update save.py commit e34393f975de67f1c08fed85198b0f06ffec6090 Author: Daniel Han-Chen Date: Mon Jan 22 04:25:37 2024 +1100 Update llama.py commit 2a3d4f3a8d7685f1334529a85c9d2ed1ffe97b1d Author: Daniel Han-Chen Date: Mon Jan 22 04:18:42 2024 +1100 fast inference commit a5ab4dc21a420c338c788f2d901ccd3683c2c027 Merge: 8828ece b370c9c Author: Daniel Han-Chen Date: Mon Jan 22 01:58:00 2024 +1100 Merge branch 'main' into nightly commit 8828eceb5ba206a26e5a114a616f388610b05eb7 Author: Daniel Han-Chen Date: Sun Jan 21 22:18:53 2024 +1100 Update llama.py commit fcde58859bf319f9edf4d09b871443fe195331a7 Author: Daniel Han-Chen Date: Sun Jan 21 20:06:14 2024 +1100 Update llama.py commit 2f80890578cb93bc6bd5e3a992c4842dd7a94645 Author: Daniel Han-Chen Date: Sun Jan 21 19:21:56 2024 +1100 Update llama.py commit 7be801ff8d9359115179d57adaa0c050ee4238fa Author: Daniel Han-Chen Date: Sun Jan 21 19:10:48 2024 +1100 Update llama.py commit 92512e670ec5eeb7292e7955540a5ffcdfd552e9 Author: Daniel Han-Chen Date: Sun Jan 21 19:01:45 2024 +1100 Update llama.py commit 5c927e4930e2d52661b440c4ca1cc999a6cdb2a3 Author: Daniel Han-Chen Date: Sun Jan 21 19:01:28 2024 +1100 Update mistral.py commit fe2bc30987cd728a09f5144e2c863013ddb5b969 Author: Daniel Han-Chen Date: Sun Jan 21 18:52:25 2024 +1100 Update llama.py commit ac99a47a45bc80b424f48ce9e50de59b209af992 Author: Daniel Han-Chen Date: Sun Jan 21 18:52:15 2024 +1100 Update llama.py commit 5bf108ebdaab86688538f8b2a770ea33ef4341a7 Author: Daniel Han-Chen Date: Sun Jan 21 18:15:55 2024 +1100 Update llama.py commit b591f33a3710e2a7036f8908a1725fc39117563b Author: Daniel Han-Chen Date: Sun Jan 21 18:06:44 2024 +1100 Update llama.py commit da7b4f59ee0a4e2dcb8d5177f8603800fd281b2c Author: Daniel Han-Chen Date: Sun Jan 21 17:32:37 2024 +1100 Update save.py commit 196ab974d6848ca63c310b7c753e79ebc95e3230 Author: Daniel Han-Chen Date: Sun Jan 21 16:53:58 2024 +1100 Update llama.py commit cbc1c69e294bf6a491f77bde13b30ac2fa9dd454 Author: Daniel Han-Chen Date: Sun Jan 21 16:22:28 2024 +1100 faster saving & inference --- unsloth/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 40da487e7f..7d271edf4e 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -53,8 +53,6 @@ elif (major_torch == 2) and (minor_torch < 2): # Disable expandable_segments del os.environ["PYTORCH_CUDA_ALLOC_CONF"] - # Must reimport Pytorch! - importlib.reload(torch) pass From 223657f3257f0799ddce9ccddc36b34a8deb67aa Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 17 Mar 2024 22:46:17 +1100 Subject: [PATCH 0161/1088] Update fast_lora.py --- unsloth/kernels/fast_lora.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 6568bba681..f1524653a5 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -87,6 +87,7 @@ def forward(ctx, X : torch.Tensor, downW, downW_quant, downA, downB, downS, _forward_function, _backward_function,): dtype = X.dtype + print("Forward ", dtype) e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) g = matmul_lora(X, upW, upW_quant, upA, upB, upS) @@ -122,6 +123,7 @@ def backward(ctx, dY : torch.Tensor): e = e .view(-1, e .shape[-1]) g = g .view(-1, g .shape[-1]) dtype = X.dtype + print("Backward ", dtype) DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) DW, e, g = _backward_function(DW, e, g) From e91cc6580d652379fc316d9e4a9c291d88fb9c40 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 17 Mar 2024 22:46:44 +1100 Subject: [PATCH 0162/1088] Update fast_lora.py --- unsloth/kernels/fast_lora.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index f1524653a5..6568bba681 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -87,7 +87,6 @@ def forward(ctx, X : torch.Tensor, downW, downW_quant, downA, downB, downS, _forward_function, _backward_function,): dtype = X.dtype - print("Forward ", dtype) e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) g = matmul_lora(X, upW, upW_quant, upA, upB, upS) @@ -123,7 +122,6 @@ def backward(ctx, dY : torch.Tensor): e = e .view(-1, e .shape[-1]) g = g .view(-1, g .shape[-1]) dtype = X.dtype - print("Backward ", dtype) DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) DW, e, g = _backward_function(DW, e, g) From 264a8eeaf88f7c0dbe5c886d2531de532142c550 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 18 Mar 2024 04:18:15 +1100 Subject: [PATCH 0163/1088] Fix lm_head, embed_tokens (#258) * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code --- unsloth/models/llama.py | 27 ++++++++++++++++++--------- unsloth/models/loader.py | 2 ++ unsloth/models/mistral.py | 10 +++++++--- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d83d9b76f2..8b86feb35e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -505,11 +505,10 @@ def LlamaModel_fast_forward( position_ids = position_ids.repeat((batch_size, 1)) pass - # embed positions + # Embed positions if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - # Downcast to the correct dtype ie float32 to float16 inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma @@ -759,6 +758,7 @@ def _CausalLM_fast_forward( else: logits = self.lm_head(hidden_states) pass + logits = logits.to(self.config.torch_dtype) loss = None if labels is not None: @@ -929,6 +929,7 @@ def from_pretrained( fix_tokenizer = True, model_patcher = None, tokenizer_name = None, + trust_remote_code = False, **kwargs, ): if model_patcher is None: model_patcher = FastLlamaModel @@ -989,6 +990,7 @@ def from_pretrained( token = token, rope_scaling = rope_scaling, max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, **kwargs, ) @@ -996,9 +998,10 @@ def from_pretrained( tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, - model_max_length = max_position_embeddings, - padding_side = "right", - token = token, + model_max_length = max_position_embeddings, + padding_side = "right", + token = token, + trust_remote_code = trust_remote_code, ) model, tokenizer = patch_tokenizer(model, tokenizer) @@ -1338,7 +1341,6 @@ def get_peft_model( "We shall do it for you!" ) train_lm_head = True - model.model.embed_tokens.to(torch.float32, non_blocking = True) elif module == "embed_tokens": logger.warning_once( @@ -1346,7 +1348,6 @@ def get_peft_model( "We shall do it for you!" ) train_embed_tokens = True - model.lm_head.to(torch.float32, non_blocking = True) else: assert(module in accepted_modules) @@ -1388,9 +1389,17 @@ def get_peft_model( # Now patch lm_head and embed_tokens if train_embed_tokens: - model.model.model.embed_tokens.requires_grad_(True) + print("Unsloth: Casting embed_tokens to float32") + assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) + model.model.model.embed_tokens.modules_to_save.default.to(torch.float32) + model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) + pass + if train_lm_head: - model.model.lm_head.requires_grad_(True) + print("Unsloth: Casting lm_head to float32") + assert(hasattr(model.model.lm_head, "modules_to_save")) + model.model.lm_head.modules_to_save.default.to(torch.float32) + model.model.lm_head.modules_to_save.default.requires_grad_(True) pass return model diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 47b568ae2a..29d25f3c20 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -74,6 +74,7 @@ def from_pretrained( device_map = "sequential", rope_scaling = None, fix_tokenizer = True, + trust_remote_code = False, use_gradient_checkpointing = True, *args, **kwargs, ): @@ -139,6 +140,7 @@ def from_pretrained( fix_tokenizer = fix_tokenizer, model_patcher = dispatch_model, tokenizer_name = tokenizer_name, + trust_remote_code = trust_remote_code, *args, **kwargs, ) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index c1e39e4a2e..2db71f0288 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -230,6 +230,7 @@ def MistralForCausalLM_fast_forward( else: logits = self.lm_head(hidden_states) pass + logits = logits.to(self.config.torch_dtype) loss = None if labels is not None: @@ -295,6 +296,7 @@ def from_pretrained( fix_tokenizer = True, model_patcher = None, tokenizer_name = None, + trust_remote_code = False, **kwargs, ): if model_patcher is None: model_patcher = FastMistralModel @@ -353,6 +355,7 @@ def from_pretrained( quantization_config = bnb_config, token = token, # rope_scaling = rope_scaling, + trust_remote_code = trust_remote_code, **kwargs, ) @@ -360,9 +363,10 @@ def from_pretrained( tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, - model_max_length = max_position_embeddings, - padding_side = "right", - token = token, + model_max_length = max_position_embeddings, + padding_side = "right", + token = token, + trust_remote_code = trust_remote_code, ) model, tokenizer = patch_tokenizer(model, tokenizer) From 159e7d1a7a60eea618aec2b53c67e995104edef7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Mar 2024 04:32:28 +1100 Subject: [PATCH 0164/1088] Fix GGUF and saving (#261) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py --- unsloth/models/gemma.py | 2 +- unsloth/models/llama.py | 33 +++++- unsloth/models/mistral.py | 3 +- unsloth/save.py | 220 +++++++++++++++++++++++++++++++------- 4 files changed, 216 insertions(+), 42 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 7bfec43e51..6bd8a6f345 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -143,7 +143,7 @@ def GemmaDecoderLayer_fast_forward( from math import sqrt as math_sqrt # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 -@torch.inference_mode +# @torch.inference_mode def GemmaModel_fast_forward_inference( self, input_ids, diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8b86feb35e..574385fba0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -657,7 +657,7 @@ def custom_forward(*inputs): # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 -@torch.inference_mode +# @torch.inference_mode def LlamaModel_fast_forward_inference( self, input_ids, @@ -753,7 +753,8 @@ def _CausalLM_fast_forward( hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape if bsz == 1 and q_len == 1: - logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + lm_head = self.lm_head.weight + logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) else: logits = self.lm_head(hidden_states) @@ -893,6 +894,16 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass +def _wrap_fast_inference(generate, device_type, dtype): + # Wraps inference with bfloat16 / float16 + @torch.inference_mode + def _fast_generate(*args, **kwargs): + with torch.autocast(device_type = device_type, dtype = dtype): + return generate(*args, **kwargs) + return _fast_generate +pass + + class FastLlamaModel: @staticmethod @@ -1581,6 +1592,15 @@ def for_inference(model): internal_model.gradient_checkpointing = False internal_model.training = False pass + + # Also check if lm_head / embeddings are trained + lm_head = getattr(model, "model", model).lm_head.weight + device_type = lm_head.device.type + dtype = model.config.torch_dtype + + # Wrap model.generate + model._unwrapped_old_generate = model.generate + model.generate = _wrap_fast_inference(model.generate, device_type, dtype) pass @@ -1601,5 +1621,14 @@ def for_training(model, use_gradient_checkpointing = True): internal_model.gradient_checkpointing = use_gradient_checkpointing internal_model.training = True pass + + # Also revert model.generate + if hasattr(model, "_unwrapped_old_generate"): + model.generate = model._unwrapped_old_generate + del model._unwrapped_old_generate + pass pass pass + + + diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 2db71f0288..f650d8f12e 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -225,7 +225,8 @@ def MistralForCausalLM_fast_forward( hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape if bsz == 1 and q_len == 1: - logits = torch.mv(self.lm_head.weight, hidden_states.ravel()) + lm_head = self.lm_head.weight + logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) else: logits = self.lm_head(hidden_states) diff --git a/unsloth/save.py b/unsloth/save.py index 5970d74d11..c8b4053a10 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -32,6 +32,8 @@ "patch_saving_functions", ] +# Check Kaggle +IS_A_KAGGLE_ENVIRONMENT = "KAGGLE_CONTAINER_NAME" in os.environ LLAMA_WEIGHTS = ( "self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", "self_attn.o_proj", @@ -66,9 +68,9 @@ "q5_1" : "Even higher accuracy, resource usage and slower inference.", "q5_k_s" : "Uses Q5_K for all tensors", "q6_k" : "Uses Q8_K for all tensors", - "iq2_xxs" : "2.06 bpw quantization", - "iq2_xs" : "2.31 bpw quantization", - "iq3_xxs" : "3.06 bpw quantization", + # "iq2_xxs" : "2.06 bpw quantization", # Not supported sadly + # "iq2_xs" : "2.31 bpw quantization", + # "iq3_xxs" : "3.06 bpw quantization", "q3_k_xs" : "3-bit extra small quantization", } @@ -79,6 +81,27 @@ def print_quantization_methods(): pass +def _free_cached_model(model): + from huggingface_hub import scan_cache_dir + cached_repos = list(scan_cache_dir().repos) + + # Go through every cached repo, and delete the one that matches the model we want to save. + # Can save 4GB of disk space - useful for Kaggle systems. + for cached_repo in cached_repos: + if cached_repo.repo_id == model.config._name_or_path: + remove_cache_commit = list(cached_repo.revisions)[0].commit_hash + delete_strategy = scan_cache_dir().delete_revisions(remove_cache_commit,) + + logger.warning_once( + "Unsloth: Will remove a cached repo with size " + \ + delete_strategy.expected_freed_size_str, + ) + + delete_strategy.execute() + pass + pass +pass + def _merge_lora(layer, name): @@ -153,6 +176,19 @@ def unsloth_save_model( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): + # First check for a token! + if push_to_hub: + from huggingface_hub import whoami + try: + username = whoami(token = token)["name"] + except: + raise RuntimeError( + "Unsloth: Please supply a token!\n"\ + "Go to https://huggingface.co/settings/tokens" + ) + pass + pass + if commit_message is None: commit_message = "" if "Unsloth" not in commit_message: commit_message += " (Trained with Unsloth)" @@ -411,6 +447,16 @@ def unsloth_save_model( os.makedirs(temporary_location) pass + # Check if Kaggle, since only 20GB of Disk space allowed. + if IS_A_KAGGLE_ENVIRONMENT: + # We free up 4GB of space + logger.warning_once( + "Unsloth: Kaggle only allows 20GB of disk space. We need to delete the downloaded\n"\ + "model which will save 4GB of disk space, allowing you to save on Kaggle." + ) + _free_cached_model(internal_model) + pass + # HF also uses a OrderedDict from collections import OrderedDict state_dict = OrderedDict() @@ -480,12 +526,35 @@ def unsloth_save_model( ) pass + # First check if we're pushing to an organization! + save_directory = save_pretrained_settings["save_directory"] + + if save_pretrained_settings["push_to_hub"]: + new_save_directory, new_username = _determine_username(save_directory, username, token) + + if token is not None: + from huggingface_hub import whoami + actual_username = whoami(token = token)["name"] + else: + actual_username = username + pass + + # Check if pushing to an organization + if save_pretrained_settings["push_to_hub"] and (username != actual_username): + print(f"Unsloth: Saving to organization with address {new_save_directory}") + # We upload everything at the end! + tokenizer_save_settings["push_to_hub"] = False + tokenizer_save_settings["save_directory"] = new_save_directory + pass + + # Save tokenizer if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") tokenizer.save_pretrained(**tokenizer_save_settings) print(" Done.") else: print() + pass print("Unsloth: Saving model... This might take 5 minutes for Llama-7b...") @@ -502,7 +571,35 @@ def unsloth_save_model( model.config = new_config # Save! - internal_model.save_pretrained(**save_pretrained_settings) + + # Check if pushing to an organization + if save_pretrained_settings["push_to_hub"] and (username != actual_username): + print(f"Unsloth: Saving to organization with address {new_save_directory}") + # Pushing to organization! + # Sadly .save_pretrained doesn't work :( + # We first save it via .save_pretrained, then upload manually! + save_pretrained_settings["save_directory"] = new_save_directory + save_pretrained_settings["push_to_hub"] = False + internal_model.save_pretrained(**save_pretrained_settings) + + # Now manually go through each file and upload them manually! + filenames = os.listdir(new_save_directory) + + from huggingface_hub import HfApi + hf_api = HfApi(token = save_pretrained_settings["token"]) + + print("Unsloth: Uploading all files... Please wait!") + hf_api.upload_folder( + folder_path = new_save_directory, + path_in_repo = ".", + repo_id = new_save_directory, + repo_type = "model", + commit_message = "(Trained with Unsloth)", + ignore_patterns = "*.md", + ) + else: + internal_model.save_pretrained(**save_pretrained_settings) + pass # Revert config back original_model = model @@ -616,13 +713,16 @@ def install_llama_cpp_old(version = -10): pass -def install_llama_cpp_blocking(): +def install_llama_cpp_blocking(use_cuda = True): + use_cuda = "LLAMA_CUBLAS=1" if use_cuda else "" + commands = [ "git clone https://github.com/ggerganov/llama.cpp", - f"cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j{psutil.cpu_count()*2}", + f"cd llama.cpp && make clean && {use_cuda} make all -j{psutil.cpu_count()*2}", "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return + for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: for line in sp.stdout: @@ -954,17 +1054,8 @@ def unsloth_push_to_hub_merged( [](https://github.com/unslothai/unsloth) """ -def upload_to_huggingface( - model, - save_directory, - token, - method, - extra = "", - file_location = None, - old_username = None, - private = None, -): - # Check for username + +def _determine_username(save_directory, old_username, token): username = "" save_directory = save_directory.lstrip("./") if "/" not in save_directory: @@ -980,6 +1071,21 @@ def upload_to_huggingface( else: username = save_directory.split("/")[0] pass + return save_directory, username +pass + + +def upload_to_huggingface( + model, + save_directory, + token, + method, + extra = "", + file_location = None, + old_username = None, + private = None, +): + save_directory, username = _determine_username(save_directory, old_username, token) from huggingface_hub import create_repo try: @@ -1107,18 +1213,15 @@ def unsloth_save_pretrained_gguf( # Non blocking install GGUF first if not os.path.exists("llama.cpp"): - git_clone = install_llama_cpp_clone_non_blocking() - python_install = install_python_non_blocking(["gguf", "protobuf"]) - git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() - new_save_directory, old_username = unsloth_save_model(**arguments) - python_install.wait() - else: - try: + + if IS_A_KAGGLE_ENVIRONMENT: + # Kaggle is weird - no blocking installs, and no CUDA? + python_install = install_python_non_blocking(["gguf", "protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda = False) new_save_directory, old_username = unsloth_save_model(**arguments) makefile = None - except: - # Retry by recloning llama.cpp + else: git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() @@ -1126,6 +1229,28 @@ def unsloth_save_pretrained_gguf( new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass + else: + try: + new_save_directory, old_username = unsloth_save_model(**arguments) + makefile = None + except: + # Retry by recloning llama.cpp + if IS_A_KAGGLE_ENVIRONMENT: + # Kaggle is weird - no blocking installs, and no CUDA? + python_install = install_python_non_blocking(["gguf", "protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda = False) + new_save_directory, old_username = unsloth_save_model(**arguments) + makefile = None + else: + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory, old_username = unsloth_save_model(**arguments) + python_install.wait() + pass + pass pass for _ in range(3): @@ -1208,25 +1333,44 @@ def unsloth_push_to_hub_gguf( # Non blocking install GGUF first if not os.path.exists("llama.cpp"): - git_clone = install_llama_cpp_clone_non_blocking() - python_install = install_python_non_blocking(["gguf", "protobuf"]) - git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() - new_save_directory, old_username = unsloth_save_model(**arguments) - python_install.wait() - else: - try: + + if IS_A_KAGGLE_ENVIRONMENT: + # Kaggle is weird - no blocking installs, and no CUDA? + python_install = install_python_non_blocking(["gguf", "protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda = False) new_save_directory, old_username = unsloth_save_model(**arguments) makefile = None - except: - # Retry by recloning llama.cpp + else: git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass + else: + try: + new_save_directory, old_username = unsloth_save_model(**arguments) + makefile = None + except: + # Retry by recloning llama.cpp + if IS_A_KAGGLE_ENVIRONMENT: + # Kaggle is weird - no blocking installs, and no CUDA? + python_install = install_python_non_blocking(["gguf", "protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda = False) + new_save_directory, old_username = unsloth_save_model(**arguments) + makefile = None + else: + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["gguf", "protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + new_save_directory, old_username = unsloth_save_model(**arguments) + python_install.wait() + pass + pass pass for _ in range(3): From b5c24c79d4f13a2b43a4d3f9f992a4fcb934714b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Mar 2024 19:55:02 +1100 Subject: [PATCH 0165/1088] Fix Saving (#264) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py --- unsloth/models/_utils.py | 86 ++++++++++++++++++++++++++++++++++++++++ unsloth/models/llama.py | 7 ++++ unsloth/save.py | 30 +++++++------- 3 files changed, 108 insertions(+), 15 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 02eea4ba0c..6f7da0f32a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -26,6 +26,7 @@ from platform import system as platform_system platform_system = platform_system() import math +import numpy as np __version__ = "2024.3" @@ -269,3 +270,88 @@ def check_tokenizer( "Luckily, your training run will still work in the meantime!" ) pass + + +def _calculate_n_gradient_checkpoints( + n_layers : int, + method : Optional[Union[str, int]] = "sqrt", +) -> List[int]: + assert(type(n_layers) is int and n_layers > 0) + + if method is None: method = "sqrt" + + if method == "sqrt": + n_checkpoints = int(n_layers**0.5) + elif type(method) is int and method > 0: + n_checkpoints = int(np.ceil(n_layers / method)) + else: + raise ValueError("method must be 'sqrt' or an int >0 and <= n_layers.") + + size = n_layers // n_checkpoints + sizes = np.full(n_checkpoints, size, dtype = int) + leftovers = n_layers % n_checkpoints + # We append leftovers from the right + for k in range(leftovers): + sizes[n_checkpoints-1-k] += 1 + boundaries = np.hstack((0, np.cumsum(sizes))) + boundaries = boundaries.tolist() + return boundaries +pass + + +def calculate_n_gradient_checkpoints( + n_layers : int, + layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", +) -> List[int]: + assert(type(n_layers) is int and n_layers > 0) + + if layers_per_checkpoint is None or layers_per_checkpoint == 1: + return None + + boundaries = _calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) + + assert(boundaries[0] == 0 and boundaries[-1] == n_layers) + assert(min(boundaries) == 0 and max(boundaries) == n_layers) + assert(np.diff(boundaries).min() >= 0) + return boundaries +pass + + +def prepare_n_gradient_checkpoints( + model : Any, + layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", + use_reentrant : Optional[bool] = True, +) -> None: + """ + Calculates where to place the gradient checkpoints given n_layers. + + Args: + model: Any LlamaModel with layers. + layers_per_checkpoint (`Union[str, int]`, *optional*): + Can either be `sqrt` or an integer for how many layers per checkpoint you want. + The more, the less memory usage, but can be slower. Default is `sqrt`. + Choose 1 for Pytorch gradient checkpointing. 2 to wrap 2 layers in 1 module etc. + use_reentrant (`bool`, *optional*): + https://github.com/pytorch/pytorch/blob/main/torch/utils/checkpoint.py#L354 + Optimal gradient checkpointing algorithm `use_reentrant=False` which will + be the default in future Pytorch versions doesn't seem to work?? + """ + _model = None + if hasattr(model, "layers"): + _model = model + elif hasattr(model, "model"): + if hasattr(model.model, "layers"): + _model = model.model + if _model is None: + raise TypeError("`model` or `model.model` does not have attribute `layers`. Are you sure this is a model?") + pass + + if use_reentrant is False: + use_reentrant = True + pass + + n_layers = len(_model.layers) + boundaries = calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) + _model._gradient_checkpointing_boundaries = boundaries + _model._gradient_checkpointing_use_reentrant = use_reentrant +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 574385fba0..b7aae06ee5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -593,6 +593,13 @@ def LlamaModel_fast_forward( all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None + # Gradient checkpointing methods (ie sqrt) + if hasattr(self, "_gradient_checkpointing_boundaries"): + boundaries = self._gradient_checkpointing_boundaries + else: + boundaries = None + pass + for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) diff --git a/unsloth/save.py b/unsloth/save.py index c8b4053a10..a161394057 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -24,6 +24,7 @@ from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters import subprocess import psutil +import re __all__ = [ "print_quantization_methods", @@ -176,19 +177,6 @@ def unsloth_save_model( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): - # First check for a token! - if push_to_hub: - from huggingface_hub import whoami - try: - username = whoami(token = token)["name"] - except: - raise RuntimeError( - "Unsloth: Please supply a token!\n"\ - "Go to https://huggingface.co/settings/tokens" - ) - pass - pass - if commit_message is None: commit_message = "" if "Unsloth" not in commit_message: commit_message += " (Trained with Unsloth)" @@ -215,7 +203,19 @@ def unsloth_save_model( for deletion in ("model", "tokenizer", "save_method", "temporary_location", "maximum_memory_usage"): del save_pretrained_settings[deletion] pass - import re + + # First check for a token! + if push_to_hub: + from huggingface_hub import whoami + try: + username = whoami(token = token)["name"] + except: + raise RuntimeError( + "Unsloth: Please supply a token!\n"\ + "Go to https://huggingface.co/settings/tokens" + ) + pass + pass assert(maximum_memory_usage > 0 and maximum_memory_usage <= 0.95) @@ -588,7 +588,7 @@ def unsloth_save_model( from huggingface_hub import HfApi hf_api = HfApi(token = save_pretrained_settings["token"]) - print("Unsloth: Uploading all files... Please wait!") + print("Unsloth: Uploading all files... Please wait...") hf_api.upload_folder( folder_path = new_save_directory, path_in_repo = ".", From bb81079ca1dba43fc2cdb79a81ce6edf23f87907 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Mar 2024 04:48:15 +1100 Subject: [PATCH 0166/1088] lm_head issue (#266) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py --- unsloth/models/llama.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b7aae06ee5..6b258dbb6b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -599,7 +599,7 @@ def LlamaModel_fast_forward( else: boundaries = None pass - + for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) @@ -1601,7 +1601,11 @@ def for_inference(model): pass # Also check if lm_head / embeddings are trained - lm_head = getattr(model, "model", model).lm_head.weight + internal_model = model + while not hasattr(internal_model, "lm_head"): + internal_model = internal_model.model + pass + lm_head = internal_model.lm_head.weight device_type = lm_head.device.type dtype = model.config.torch_dtype From a100afa60cf1566e784b41eb462b5d2ed80977f8 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 24 Mar 2024 14:00:04 +1100 Subject: [PATCH 0167/1088] Update mapper.py --- unsloth/models/mapper.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index afcbdb75f5..dc54bfd791 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -90,6 +90,10 @@ "unsloth/gemma-2b-it", "google/gemma-2b-it", ), + "unsloth/mistral-7b-v0.2-bnb-4bit" : ( + "unsloth/mistral-7b-v0.2", + "alpindale/Mistral-7B-v0.2-hf", + ) } INT_TO_FLOAT_MAPPER = {} From a68aebc1fa17755ffbcdafc9239e7ca37ab21657 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 28 Mar 2024 04:16:50 +1100 Subject: [PATCH 0168/1088] Auto Healing Tokenizer (#283) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit --- .gitignore | 160 -------------- LICENSE | 2 +- unsloth/__init__.py | 1 + unsloth/chat_templates.py | 114 ++++------ unsloth/models/_utils.py | 105 +--------- unsloth/models/llama.py | 5 +- unsloth/models/mistral.py | 2 +- unsloth/save.py | 6 +- unsloth/tokenizer_utils.py | 414 +++++++++++++++++++++++++++++++++++++ 9 files changed, 464 insertions(+), 345 deletions(-) delete mode 100644 .gitignore create mode 100644 unsloth/tokenizer_utils.py diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 68bc17f9ff..0000000000 --- a/.gitignore +++ /dev/null @@ -1,160 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ diff --git a/LICENSE b/LICENSE index 261eeb9e9f..8894f17a3b 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright [2024-] [Unsloth AI, Daniel Han-Chen & Michael Han-Chen] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 7d271edf4e..d4ca45d7d1 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -113,3 +113,4 @@ from .models import * from .save import * from .chat_templates import * +from .tokenizer_utils import * diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index da46972750..ec62c9b858 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -15,15 +15,19 @@ __all__ = [ "get_chat_template", "test_chat_templates", - "fix_sentencepiece_tokenizer", ] from transformers import StoppingCriteria, StoppingCriteriaList from torch import LongTensor, FloatTensor from transformers.models.llama.modeling_llama import logger -from .models._utils import patch_tokenizer +from .save import patch_saving_functions import os import shutil +from .tokenizer_utils import ( + load_correct_tokenizer, + fix_sentencepiece_tokenizer, +) +from .models._utils import patch_tokenizer CHAT_TEMPLATES = {} @@ -251,84 +255,23 @@ CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token,) -def fix_sentencepiece_tokenizer( - old_tokenizer, - new_tokenizer, - token_mapping, - temporary_location = "_unsloth_sentencepiece_temp", -): - # From https://github.com/google/sentencepiece/issues/121 - # We need to manually edit the sentencepiece tokenizer! - try: - import sentencepiece.sentencepiece_model_pb2 as sentencepiece_model_pb2 - except: - if not os.path.exists(temporary_location): - os.system("git clone https://github.com/google/sentencepiece.git unsloth_sentencepiece_temp") - os.system(f"cd {temporary_location}/src && protoc --python_out=. sentencepiece_model.proto") - shutil.rmtree(temporary_location) - pass - import sentencepiece.sentencepiece_model_pb2 as sentencepiece_model_pb2 - pass - - if not os.path.exists(temporary_location): - os.makedirs(temporary_location) - pass - - # First save the old tokenizer - old_tokenizer.save_pretrained(temporary_location) - - from sentencepiece import SentencePieceProcessor - tokenizer_file = sentencepiece_model_pb2.ModelProto() - tokenizer_file.ParseFromString(open(f"{temporary_location}/tokenizer.model", "rb").read()) - - # Now save the new tokenizer - new_tokenizer.save_pretrained(temporary_location) - - # Now correct the old tokenizer's .model file - for old_token, new_token in token_mapping.items(): - ids = old_tokenizer([old_token], add_special_tokens = False).input_ids - ids = ids[0] - if (len(ids) != 1): - # Skip this token! - print(f"Skip mapping {old_token} to {new_token} since {new_token} is already in the tokenizer!") - continue - pass - ids = ids[0] - tokenizer_piece = tokenizer_file.pieces[ids] - assert(tokenizer_piece.piece == old_token) - tokenizer_piece.piece = new_token - pass - - # And now write it - with open(f"{temporary_location}/tokenizer.model", "wb") as file: - file.write(tokenizer_file.SerializeToString()) - pass - - # And load it! - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(temporary_location, eos_token = new_tokenizer.eos_token) - return tokenizer -pass - - def get_chat_template( tokenizer, chat_template = "chatml", mapping = {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant"}, map_eos_token = True, ): + assert(type(map_eos_token) is bool) old_tokenizer = tokenizer - if map_eos_token is False: - assert("Unsloth: Can only map new tokens to EOS for now. Adding new tokens is not yet supported.") - pass - IS_GEMMA = False if tokenizer.__class__.__name__.startswith("Gemma"): if chat_template == "chatml": chat_template = "gemma_chatml" IS_GEMMA = True pass + # We first check if the tokenizer is a fast one. If not, we cannot convert this! + is_fast_tokenizer = getattr(tokenizer, "is_fast", False) old_padding_side = tokenizer.padding_side if type(chat_template) in (list, tuple,): @@ -348,9 +291,17 @@ def get_chat_template( assert(type(stop_word) is str) - # token_mapping = {"" : "<|im_start|>", "" : "<|im_end|>"} - # For Gemma :) - if token_mapping is not None: + # Check fast tokenizer + if not is_fast_tokenizer: + logger.warning_once( + f"Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ + "Please log a Github issue if you want this as a new feature!\n"\ + "Your chat template will still work, but it won't add or edit tokens." + ) + + elif token_mapping is not None: + # token_mapping = {"" : "<|im_start|>", "" : "<|im_end|>"} + # For Gemma :) string_vocab = tokenizer._tokenizer.to_str() @@ -368,7 +319,7 @@ def get_chat_template( pass pass - if not stop_word in token_mapping.values(): + if map_eos_token and (not stop_word in token_mapping.values()): # Do not map 107 = <|im_end|> and 1 = <|im_end|>. This will reduce the vocab size by 1 logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) @@ -376,14 +327,19 @@ def get_chat_template( if skipped != len(token_mapping): new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) - new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + + if map_eos_token: + new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + else: + new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer) + pass # Must fix the sentence piece tokenizer since there's no tokenizer.model file! tokenizer = fix_sentencepiece_tokenizer(tokenizer, new_tokenizer, token_mapping,) else: pass - elif stop_word != "eos_token": + elif map_eos_token and (stop_word != "eos_token"): logger.warning_once(f"Unsloth: Will map {stop_word} to EOS = {tokenizer.eos_token}.") # Replaces the old EOS token with a new one. @@ -393,9 +349,14 @@ def get_chat_template( # This is a HACK! # Idea from https://huggingface.co/cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser string_vocab = tokenizer._tokenizer.to_str() - string_vocab = string_vocab.replace(tokenizer.eos_token, stop_word) + old_eos_token = tokenizer.eos_token + string_vocab = string_vocab.replace(old_eos_token, stop_word) new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) - tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + + # Must fix the sentence piece tokenizer since there's no tokenizer.model file! + token_mapping = { old_eos_token : stop_word, } + tokenizer = fix_sentencepiece_tokenizer(tokenizer, new_tokenizer, token_mapping,) pass else: @@ -433,7 +394,10 @@ def get_chat_template( if old_bos_token != new_bos_token: tokenizer.bos_token = old_bos_token if old_unk_token != new_unk_token: tokenizer.unk_token = old_unk_token - #stopping_criteria = create_stopping_criteria(tokenizer, stop_word) + # stopping_criteria = create_stopping_criteria(tokenizer, stop_word) + + # Patch saving functions + tokenizer = patch_saving_functions(tokenizer) return tokenizer#, stopping_criteria pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6f7da0f32a..1989313ee6 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -60,22 +60,16 @@ __all__ = [ "prepare_model_for_kbit_training", - "patch_tokenizer", - "check_tokenizer", "xformers", "xformers_attention", "xformers_version", "__version__", "HAS_FLASH_ATTENTION", "platform_system", + "patch_tokenizer", ] -IGNORED_TOKENIZER_CHECKING = frozenset(( - "CodeLlamaTokenizerFast", - "CodeLlamaTokenizer", -)) - def prepare_model_for_kbit_training( model : Any, use_gradient_checkpointing : bool = True, @@ -144,103 +138,6 @@ def patch_tokenizer(model, tokenizer): pass -def check_tokenizer( - model, - tokenizer, - model_name = "unsloth/llama-2-7b-bnb-4bit", - model_max_length = 4096, - padding_side = "right", - token = None, - _reload = True, -): - # Checks tokenizer for out of bounds ids. - # Mainly a fix for https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha - # where had token id=32002. - # See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25 - # Seems like the Fast tokenizer in Rust breaks things! - - # We ignore some of them! - if tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING: - return tokenizer - pass - - max_embedding_size = model.model.embed_tokens.weight.shape[0] - added_tokens_fast = tokenizer.added_tokens_decoder - added_tokens_fast = {index : str(value) for index, value in added_tokens_fast.items()} - sorted_keys = sorted(added_tokens_fast) - added_tokens_fast = {key : added_tokens_fast[key] for key in sorted_keys} - - for j, index in enumerate(added_tokens_fast.keys()): - if index >= max_embedding_size: - bad_indices = list(added_tokens_fast.keys ())[j:] - bad_tokens = list(added_tokens_fast.values())[j:] - - if not _reload: - # Try removing the token - added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] - special_tokens = tokenizer.special_tokens_map - import itertools - special_tokens = frozenset( - itertools.chain.from_iterable( - [x] if type(x) is str else x for x in special_tokens.values() - ) - ) - can_be_removed1 = [x for x in bad_tokens if x not in special_tokens] - can_be_removed2 = [x for x in can_be_removed1 if x in tokenizer._added_tokens_encoder.keys()] - - # Check of extra tokens can in fact we removed! - - if (len(can_be_removed1) == len(bad_tokens)) and \ - (len(can_be_removed2) == len(bad_tokens)): - # Yes it can be fixed! - for bad_token in can_be_removed1: - remove_id = tokenizer._added_tokens_encoder[bad_token] - del tokenizer._added_tokens_decoder[remove_id] - del tokenizer._added_tokens_encoder[bad_token] - pass - # Confirm 1 more time! - if max(tokenizer.added_tokens_decoder.keys()) < max_embedding_size: - logger.warning_once( - f"Unsloth loaded a broken tokenizer `{model_name}`, but managed to repair it!\n"\ - f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ - "We removed these bad tokens. If you think this is incorrect, fix your tokenizer first." - ) - return tokenizer - pass - pass - - # :( Failure - raise RuntimeError( - f"Unsloth tried to load `{model_name}`, but cannot succeed.\n"\ - f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ - f"Fix your tokenizer since it'll perform out of bounds memory accesses." - ) - pass - - # Try slow tokenizer which can fix things! - tokenizer = AutoTokenizer.from_pretrained( - model_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - use_fast = False, - ) - return check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - _reload = False, - ) - break - pass - pass - return tokenizer -pass - - # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. from peft.tuners.lora.layer import LoraLayer diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6b258dbb6b..bc558c2ddb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -26,6 +26,7 @@ from ..kernels import * from ._utils import * from ._utils import __version__ +from ..tokenizer_utils import * if HAS_FLASH_ATTENTION: from flash_attn import flash_attn_func @@ -1014,8 +1015,8 @@ def from_pretrained( # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name, + tokenizer = load_correct_tokenizer( + tokenizer_name = tokenizer_name, model_max_length = max_position_embeddings, padding_side = "right", token = token, diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index f650d8f12e..9c73266575 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -362,7 +362,7 @@ def from_pretrained( # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name - tokenizer = AutoTokenizer.from_pretrained( + tokenizer = load_correct_tokenizer( tokenizer_name, model_max_length = max_position_embeddings, padding_side = "right", diff --git a/unsloth/save.py b/unsloth/save.py index a161394057..a08a744077 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -276,7 +276,8 @@ def unsloth_save_model( old_username = None, private = private, ) - model.original_push_to_hub( + getattr(model, "original_push_to_hub", tokenizer.push_to_hub)\ + ( repo_id = save_directory, use_temp_dir = use_temp_dir, commit_message = commit_message, @@ -290,7 +291,8 @@ def unsloth_save_model( tags = tags, ) if tokenizer is not None: - tokenizer.original_push_to_hub( + getattr(tokenizer, "original_push_to_hub", tokenizer.push_to_hub)\ + ( repo_id = save_directory, use_temp_dir = use_temp_dir, commit_message = commit_message, diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py new file mode 100644 index 0000000000..00e937c97c --- /dev/null +++ b/unsloth/tokenizer_utils.py @@ -0,0 +1,414 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from transformers import AutoTokenizer +from transformers.convert_slow_tokenizer import convert_slow_tokenizer +from transformers import PreTrainedTokenizerFast +import re +import os +from transformers.models.llama.modeling_llama import logger + +__all__ = [ + "load_correct_tokenizer", + "fix_sentencepiece_tokenizer", + "check_tokenizer", +] + + +IGNORED_TOKENIZER_CHECKING = frozenset(( + "CodeLlamaTokenizerFast", + "CodeLlamaTokenizer", +)) + + +def try_fix_tokenizer(tokenizer, prepend = True): + + if hasattr(tokenizer, "_tokenizer"): + converted_tokenizer = tokenizer._tokenizer + else: + converted_tokenizer = convert_slow_tokenizer(tokenizer) + pass + + tokenizer_string = converted_tokenizer.to_str() + + # Llama does ▁apple. Sometimes this is wrong!! + prepend_text = '{"type":"Prepend","prepend":"▁"},' + if not prepend and prepend_text in tokenizer_string: + tokenizer_string = tokenizer_string.replace(prepend_text, "", 1) + pass + + dir_names = dir(tokenizer) + # Get eos_token, bos_token etc + token_names = [x for x in dir_names if x.endswith("_token") and x.count("_") == 1] + + for token_name in token_names: + token = getattr(tokenizer, token_name, None) + if token is None: continue + token_id = getattr(tokenizer, token_name + "_id", None) + + # Locate the token's id mapping in the string + find_text = f'"id":{token_id},"content":"' + start = tokenizer_string.find(find_text) + len(find_text) + if start == -1: continue + end = tokenizer_string.find('",', start) + + bad_token = tokenizer_string[start : end] + # Check if token is the actual same one - if not, edit it + if bad_token != token: + bad_text = f'{find_text}{bad_token}",' + good_text = f'{find_text}{token}",' + tokenizer_string = tokenizer_string.replace(bad_text, good_text, 1) + + # And replace vocab section + bad_text = f'"{bad_token}":{token_id},' + good_text = f'"{token}":{token_id},' + tokenizer_string = tokenizer_string.replace(bad_text, good_text, 1) + pass + pass + + fixed_tokenizer = converted_tokenizer.from_str(tokenizer_string) + return fixed_tokenizer +pass + + +def get_sorted_dict(dictionary): + sorted_keys = sorted(dictionary.values()) + inverted_dictionary = { value : key for key, value in dictionary.items() } + + sorted_dictionary = {} + for key in sorted_keys: + value = inverted_dictionary[key] + sorted_dictionary[value] = key + return sorted_dictionary +pass + + +def convert_to_fast_tokenizer( + slow_tokenizer, + temporary_location = "_unsloth_sentencepiece_temp", +): + is_fast = getattr(slow_tokenizer, "is_fast", False) + if is_fast: return slow_tokenizer + + try: + tokenizer_name = slow_tokenizer.__class__.__name__ + lowered_tokenizer_name = tokenizer_name.lower() + if lowered_tokenizer_name.endswith("tokenizer"): + class_name = lowered_tokenizer_name[:-len("tokenizer")] + FastTokenizer = eval( + f'__import__(f"transformers.models.{class_name}").{tokenizer_name}Fast' + ) + else: + FastTokenizer = PreTrainedTokenizerFast + except: + FastTokenizer = PreTrainedTokenizerFast + pass + + # Get all arguments (bos_token, etc) + docs = FastTokenizer.__doc__ + docs = docs[docs.find("Args:"):] + args = re.findall(r"\n[\s]+([^\s]{1,}) \(", docs, flags = re.MULTILINE) + args = [x for x in args if not x.endswith("_file")] + + # Also some missing maybe! + docs = PreTrainedTokenizerFast.__doc__ + docs = docs[docs.find("Args:"):] + args2 = re.findall(r"\n[\s]+([^\s]{1,}) \(", docs, flags = re.MULTILINE) + args2 = [x for x in args2 if not x.endswith("_file")] + args = list(set(args + args2)) + + kwargs = {} + for arg in args: kwargs[arg] = getattr(slow_tokenizer, arg, None) + kwargs["tokenizer_object"] = try_fix_tokenizer(slow_tokenizer, prepend = True) + fast_tokenizer = FastTokenizer( **kwargs ) + + # Check if they're similar! + sorted_slow_tokenizer = get_sorted_dict(slow_tokenizer.get_vocab()) + sorted_fast_tokenizer = get_sorted_dict(fast_tokenizer.get_vocab()) + + check_vocab = (sorted_slow_tokenizer == sorted_fast_tokenizer) + check_special = (slow_tokenizer.all_special_tokens == fast_tokenizer.all_special_tokens) + + # Failure so return slow_tokenizer + if not check_vocab or not check_special: return slow_tokenizer + + # Now confirm if they match + if not assert_same_tokenization(slow_tokenizer, fast_tokenizer): + # Maybe remove prepending of __apple? + kwargs["tokenizer_object"] = try_fix_tokenizer(slow_tokenizer, prepend = False) + fast_tokenizer = FastTokenizer( **kwargs ) + if not assert_same_tokenization(slow_tokenizer, fast_tokenizer): + # Failure :( + return slow_tokenizer + pass + pass + + # Also tokenizer.model is missing! + name = slow_tokenizer.name_or_path.replace("/", "_") + if not os.path.exists(temporary_location): + os.makedirs(temporary_location) + pass + new_location = f"{temporary_location}/{name}" + slow_tokenizer.save_pretrained(new_location) + fast_tokenizer.save_pretrained(new_location) + + # Now load it! + fast_tokenizer = AutoTokenizer.from_pretrained(new_location) + if assert_same_tokenization(slow_tokenizer, fast_tokenizer): + return fast_tokenizer + return slow_tokenizer +pass + + +def assert_same_tokenization(slow_tokenizer, fast_tokenizer): + # Get eos_token, bos_token etc + dir_names = dir(slow_tokenizer) + special_tokens = list(filter(None, ( + getattr(slow_tokenizer, x) for x in dir_names + if x.endswith("_token") and x.count("_") == 1 + ))) + all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens)) + string = "\n".join(all_special_tokens) + \ + "A quick brown fox jumps over the lazy dog!!\n\n" + \ + "".join(all_special_tokens) + return slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids +pass + + +global sentencepiece_model_pb2 +sentencepiece_model_pb2 = None + +def fix_sentencepiece_tokenizer( + old_tokenizer, + new_tokenizer, + token_mapping, + temporary_location = "_unsloth_sentencepiece_temp", +): + # From https://github.com/google/sentencepiece/issues/121 + # We need to manually edit the sentencepiece tokenizer! + global sentencepiece_model_pb2 + if sentencepiece_model_pb2 is None: + try: + import sentencepiece.sentencepiece_model_pb2 as _sentencepiece_model_pb2 + sentencepiece_model_pb2 = _sentencepiece_model_pb2 + except: + if not os.path.exists(temporary_location): + os.system(f"git clone https://github.com/google/sentencepiece.git {temporary_location}") + os.system(f"cd {temporary_location}/src && protoc --python_out=. sentencepiece_model.proto") + pass + import sentencepiece.sentencepiece_model_pb2 as _sentencepiece_model_pb2 + sentencepiece_model_pb2 = _sentencepiece_model_pb2 + pass + + if not os.path.exists(temporary_location): + os.makedirs(temporary_location) + pass + + # First save the old tokenizer + old_tokenizer.save_pretrained(temporary_location) + + from sentencepiece import SentencePieceProcessor + tokenizer_file = sentencepiece_model_pb2.ModelProto() + tokenizer_file.ParseFromString(open(f"{temporary_location}/tokenizer.model", "rb").read()) + + # Now save the new tokenizer + new_tokenizer.save_pretrained(temporary_location) + + # Now correct the old tokenizer's .model file + for old_token, new_token in token_mapping.items(): + ids = old_tokenizer([old_token], add_special_tokens = False).input_ids + ids = ids[0] + if (len(ids) != 1): + # Skip this token! + print(f"Skip mapping {old_token} to {new_token} since {new_token} is already in the tokenizer!") + continue + pass + ids = ids[0] + tokenizer_piece = tokenizer_file.pieces[ids] + assert(tokenizer_piece.piece == old_token) + tokenizer_piece.piece = new_token + pass + + # And now write it + with open(f"{temporary_location}/tokenizer.model", "wb") as file: + file.write(tokenizer_file.SerializeToString()) + pass + + # And load it! + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(temporary_location, eos_token = new_tokenizer.eos_token) + return tokenizer +pass + + +def load_correct_tokenizer( + tokenizer_name, + model_max_length = None, + padding_side = "right", + token = None, + trust_remote_code = False, +): + slow_tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + trust_remote_code = trust_remote_code, + use_fast = False, + ) + fast_tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + trust_remote_code = trust_remote_code, + ) + fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token + fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token + + # Confirm if slow and fast are equivalent! + if assert_same_tokenization(slow_tokenizer, fast_tokenizer): + return fast_tokenizer + else: + return convert_to_fast_tokenizer(slow_tokenizer) + pass +pass + + +def check_tokenizer( + model, + tokenizer, + model_name = "unsloth/llama-2-7b-bnb-4bit", + model_max_length = 4096, + padding_side = "right", + token = None, + _reload = True, +): + # Checks tokenizer for out of bounds ids. + # Mainly a fix for https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha + # where had token id=32002. + # See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25 + # Seems like the Fast tokenizer in Rust breaks things! + + # We ignore some of them! + if tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING: + return tokenizer + pass + + max_embedding_size = model.model.embed_tokens.weight.shape[0] + added_tokens_fast = tokenizer.added_tokens_decoder + added_tokens_fast = {index : str(value) for index, value in added_tokens_fast.items()} + sorted_keys = sorted(added_tokens_fast) + added_tokens_fast = {key : added_tokens_fast[key] for key in sorted_keys} + + for j, index in enumerate(added_tokens_fast.keys()): + if index >= max_embedding_size: + bad_indices = list(added_tokens_fast.keys ())[j:] + bad_tokens = list(added_tokens_fast.values())[j:] + if not _reload: + # Try removing the token + added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] + special_tokens = tokenizer.special_tokens_map + import itertools + special_tokens = frozenset( + itertools.chain.from_iterable( + [x] if type(x) is str else x for x in special_tokens.values() + ) + ) + can_be_removed1 = [x for x in bad_tokens if x not in special_tokens] + can_be_removed2 = [x for x in can_be_removed1 if x in tokenizer._added_tokens_encoder.keys()] + + # Check of extra tokens can in fact we removed! + can_be_removed = \ + (len(can_be_removed1) == len(bad_tokens)) and \ + (len(can_be_removed2) == len(bad_tokens)) + + # Check if sep_token or other generic types + remove_generic = False + try_mapper = [] + if not can_be_removed: + names = dir(tokenizer) + names = (x for x in names if x.endswith("_token") and x.count("_") == 1) + generic_tokens = [(x, getattr(tokenizer, x, None)) for x in names] + + try_removal = [] + for token in bad_tokens: + for (name_token, check_token) in generic_tokens: + if check_token == token: + try_removal.append(token) + try_mapper.append(name_token) + pass + pass + pass + + # Recheck! + can_be_removed = (len(try_removal) == len(bad_tokens)) + if can_be_removed: remove_generic = True + can_be_removed1 = bad_tokens + pass + + if can_be_removed: + # Yes it can be fixed! + for j, bad_token in enumerate(can_be_removed1): + remove_id = tokenizer._added_tokens_encoder[bad_token] + del tokenizer._added_tokens_decoder[remove_id] + del tokenizer._added_tokens_encoder[bad_token] + + if remove_generic and (try_removal[j] == bad_token): + # Remove sep token for example + setattr(tokenizer, try_mapper[j], None) + setattr(tokenizer, try_mapper[j] + "_id", None) + pass + pass + # Confirm 1 more time! + if max(tokenizer.added_tokens_decoder.keys()) < max_embedding_size: + logger.warning_once( + f"Unsloth loaded a broken tokenizer `{model_name}`, but managed to repair it!\n"\ + f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ + "We removed these bad tokens. If you think this is incorrect, fix your tokenizer first." + ) + return convert_to_fast_tokenizer(tokenizer) + pass + pass + + # :( Failure + raise RuntimeError( + f"Unsloth tried to load `{model_name}`, but cannot succeed.\n"\ + f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ + f"Fix your tokenizer since it'll perform out of bounds memory accesses." + ) + pass + + # Try slow tokenizer which can fix things! + tokenizer = AutoTokenizer.from_pretrained( + model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + use_fast = False, + ) + return check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + _reload = False, + ) + break + pass + pass + return convert_to_fast_tokenizer(tokenizer) +pass From 1e45a9e9b0c24b47464c850434bed452dd5f4f44 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 1 Apr 2024 04:38:12 +1100 Subject: [PATCH 0169/1088] Nightly (#293) Env checking --- unsloth/models/_utils.py | 39 +++++++++++++++++++++++++++++++++++++++ unsloth/models/llama.py | 1 + unsloth/models/mistral.py | 1 + 3 files changed, 41 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1989313ee6..8cdb5e384c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -27,6 +27,8 @@ platform_system = platform_system() import math import numpy as np +import os +import psutil __version__ = "2024.3" @@ -67,6 +69,7 @@ "HAS_FLASH_ATTENTION", "platform_system", "patch_tokenizer", + "get_statistics", ] @@ -169,6 +172,42 @@ def patch_tokenizer(model, tokenizer): pass +def get_statistics(): + # We log some basic stats about which environment is being used. + # We simply download a README.md file from HF - all data is made public. + # This is simply so we can check if some envs are broken or not. + try: + from huggingface_hub import hf_hub_download + from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + n_cpus = psutil.cpu_count(logical = False) + + keynames = "\n" + "\n".join(os.environ.keys()) + statistics = None + if "\nCOLAB_" in keynames and n_cpus == 1: statistics = "colab" + elif "\nCOLAB_" in keynames: statistics = "colabpro" + elif "\nKAGGLE_" in keynames: statistics = "kaggle" + elif "\nRUNPOD_" in keynames: statistics = "runpod" + elif "\nAWS_" in keynames: statistics = "aws" + elif "\nAZURE_" in keynames: statistics = "azure" + elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" + elif "\nINVOCATION_ID" in keynames: statistics = "lambda" + + if statistics is not None: + disabled = False + if not are_progress_bars_disabled(): + disable_progress_bars() + disabled = True + pass + hf_hub_download(f"unslothai/statistics-{statistics}", "README.md", force_download = True) + if disabled: + enable_progress_bars() + pass + pass + except: + pass +pass + + def _calculate_n_gradient_checkpoints( n_layers : int, method : Optional[Union[str, int]] = "sqrt", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bc558c2ddb..bfbd10eb89 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -964,6 +964,7 @@ def from_pretrained( f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() + get_statistics() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 9c73266575..c609d2ecad 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -318,6 +318,7 @@ def from_pretrained( f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() + get_statistics() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 From 3f8159820493d33b040eddbf054d593cf2e065c3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 2 Apr 2024 04:35:28 +1100 Subject: [PATCH 0170/1088] Temp fix batch inference (#294) * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py --- unsloth/models/gemma.py | 38 ++++++++------- unsloth/models/llama.py | 97 +++++++++++++++++++++++---------------- unsloth/models/mistral.py | 3 +- 3 files changed, 81 insertions(+), 57 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 6bd8a6f345..5135766d8b 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -45,10 +45,9 @@ def fast_geglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape mlp_size = self.config.intermediate_size - temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) - up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = fast_linear_forward(self.gate_proj, X) + up = fast_linear_forward(self. up_proj, X) gate = torch_nn_functional_gelu(gate, approximate = "tanh") gate *= up @@ -83,20 +82,30 @@ def GemmaDecoderLayer_fast_forward( padding_mask: Optional[torch.LongTensor] = None, *args, **kwargs, ): - if past_key_value is not None: + if use_cache: #past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference_gemma(self.input_layernorm, hidden_states, out_weight) - hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - self.self_attn, - hidden_states, - past_key_value, - position_ids, - do_prefill = do_prefill, + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, ) + # hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + # self.self_attn, + # hidden_states, + # past_key_value, + # position_ids, + # do_prefill = do_prefill, + # ) hidden_states += residual # Fully Connected @@ -129,13 +138,8 @@ def GemmaDecoderLayer_fast_forward( pass outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) return outputs pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bfbd10eb89..b802403abc 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,7 +74,7 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt -KV_CACHE_INCREMENT = 128 # KV Cache update size +KV_CACHE_INCREMENT = 256 # KV Cache update size def LlamaAttention_fast_forward_inference( self, @@ -82,6 +82,7 @@ def LlamaAttention_fast_forward_inference( past_key_value: Optional[Tuple[torch.Tensor]], position_ids, do_prefill = False, + attention_mask = None, ): """ https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 @@ -167,12 +168,12 @@ def LlamaAttention_fast_forward_inference( Kn *= cos; Kn.addcmul_(RH_K, sin); # New KV cache - # Kn = torch.cat([K1, Kn], dim = 2) - # Vn = torch.cat([V1, Vn], dim = 2) - self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) - self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) - Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) - Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + Kn = torch.cat([K1, Kn], dim = 2) + Vn = torch.cat([V1, Vn], dim = 2) + # self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + # self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + # Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + # Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) # Handle sliding windows sliding_window = getattr(self.config, "sliding_window", None) @@ -200,6 +201,7 @@ def LlamaAttention_fast_forward_inference( # Attention A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) A *= self.scalar + if attention_mask is not None: A += attention_mask # Must add attention_mask for batched A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch.matmul(A, Vnn, out = Qn) A = A.transpose(1, 2) @@ -215,10 +217,9 @@ def fast_swiglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape mlp_size = self.config.intermediate_size - temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) - up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = fast_linear_forward(self.gate_proj, X) + up = fast_linear_forward(self. up_proj, X) gate = torch_nn_functional_silu(gate, inplace = True) gate *= up @@ -375,19 +376,30 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - if past_key_value is not None: + if use_cache: #past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) - hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - self.self_attn, - hidden_states, - past_key_value, - position_ids, - do_prefill = do_prefill, + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, ) + # hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + # self.self_attn, + # hidden_states, + # past_key_value, + # position_ids, + # do_prefill = do_prefill, + # attention_mask = attention_mask, + # ) hidden_states += residual # Fully Connected @@ -418,13 +430,8 @@ def LlamaDecoderLayer_fast_forward( pass outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) return outputs pass @@ -602,9 +609,8 @@ def LlamaModel_fast_forward( pass for idx, decoder_layer in enumerate(self.layers): - if output_hidden_states: - all_hidden_states += (hidden_states,) + if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: @@ -636,22 +642,15 @@ def custom_forward(*inputs): use_cache=use_cache, padding_mask=padding_mask, ) + pass hidden_states = layer_outputs[0] - - if use_cache: - next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) - - if output_attentions: - all_self_attns += (layer_outputs[1],) + if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + if output_attentions: all_self_attns += (layer_outputs[1],) pass - hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) - + if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) @@ -670,12 +669,29 @@ def LlamaModel_fast_forward_inference( self, input_ids, past_key_values, + attention_mask = None, ): # Fix out of bounds tokenization input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + + # Must use attention mask for batched processing + sliding_window = getattr(self.config, "sliding_window", None) + if (sliding_window is not None and seq_len >= sliding_window) or (bsz != 1): + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = sliding_window, + ) + else: + attention_mask = None + pass next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): @@ -686,7 +702,9 @@ def LlamaModel_fast_forward_inference( decoder_layer.self_attn, hidden_states, past_key_values[idx], - None, + position_ids = None, + do_prefill = False, + attention_mask = attention_mask, ) hidden_states += residual @@ -726,11 +744,12 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): + if False:#past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): outputs = fast_forward_inference( self.model, input_ids, past_key_values, + attention_mask = attention_mask, ) else: causal_mask = xformers.attn_bias.LowerTriangularMask() diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index c609d2ecad..fcc1ab62e0 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -200,12 +200,13 @@ def MistralForCausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - if past_key_values is not None and \ + if False and past_key_values is not None and \ hasattr(self.model.layers[0].self_attn, "paged_attention"): outputs = LlamaModel_fast_forward_inference( self.model, input_ids, past_key_values, + attention_mask = attention_mask, ) else: outputs = self.model( From 8bcb0b37038469eab0de8305a937c9a7b29fd304 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 2 Apr 2024 13:18:31 +1100 Subject: [PATCH 0171/1088] Revert "Temp fix batch inference (#294)" This reverts commit 3f8159820493d33b040eddbf054d593cf2e065c3. --- unsloth/models/gemma.py | 38 +++++++-------- unsloth/models/llama.py | 97 ++++++++++++++++----------------------- unsloth/models/mistral.py | 3 +- 3 files changed, 57 insertions(+), 81 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 5135766d8b..6bd8a6f345 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -45,9 +45,10 @@ def fast_geglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape mlp_size = self.config.intermediate_size + temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X) - up = fast_linear_forward(self. up_proj, X) + gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) + up = fast_linear_forward(self. up_proj, X, out = temp[1]) gate = torch_nn_functional_gelu(gate, approximate = "tanh") gate *= up @@ -82,30 +83,20 @@ def GemmaDecoderLayer_fast_forward( padding_mask: Optional[torch.LongTensor] = None, *args, **kwargs, ): - if use_cache: #past_key_value is not None: + if past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference_gemma(self.input_layernorm, hidden_states, out_weight) - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + self.self_attn, + hidden_states, + past_key_value, + position_ids, + do_prefill = do_prefill, ) - # hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - # self.self_attn, - # hidden_states, - # past_key_value, - # position_ids, - # do_prefill = do_prefill, - # ) hidden_states += residual # Fully Connected @@ -138,8 +129,13 @@ def GemmaDecoderLayer_fast_forward( pass outputs = (hidden_states,) - if output_attentions: outputs += (self_attn_weights,) - if use_cache: outputs += (present_key_value,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + return outputs pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b802403abc..bfbd10eb89 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,7 +74,7 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt -KV_CACHE_INCREMENT = 256 # KV Cache update size +KV_CACHE_INCREMENT = 128 # KV Cache update size def LlamaAttention_fast_forward_inference( self, @@ -82,7 +82,6 @@ def LlamaAttention_fast_forward_inference( past_key_value: Optional[Tuple[torch.Tensor]], position_ids, do_prefill = False, - attention_mask = None, ): """ https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 @@ -168,12 +167,12 @@ def LlamaAttention_fast_forward_inference( Kn *= cos; Kn.addcmul_(RH_K, sin); # New KV cache - Kn = torch.cat([K1, Kn], dim = 2) - Vn = torch.cat([V1, Vn], dim = 2) - # self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) - # self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) - # Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) - # Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) # Handle sliding windows sliding_window = getattr(self.config, "sliding_window", None) @@ -201,7 +200,6 @@ def LlamaAttention_fast_forward_inference( # Attention A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) A *= self.scalar - if attention_mask is not None: A += attention_mask # Must add attention_mask for batched A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch.matmul(A, Vnn, out = Qn) A = A.transpose(1, 2) @@ -217,9 +215,10 @@ def fast_swiglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape mlp_size = self.config.intermediate_size + temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X) - up = fast_linear_forward(self. up_proj, X) + gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) + up = fast_linear_forward(self. up_proj, X, out = temp[1]) gate = torch_nn_functional_silu(gate, inplace = True) gate *= up @@ -376,30 +375,19 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - if use_cache: #past_key_value is not None: + if past_key_value is not None: do_prefill = not hasattr(self.self_attn, "paged_attention") # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + self.self_attn, + hidden_states, + past_key_value, + position_ids, + do_prefill = do_prefill, ) - # hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - # self.self_attn, - # hidden_states, - # past_key_value, - # position_ids, - # do_prefill = do_prefill, - # attention_mask = attention_mask, - # ) hidden_states += residual # Fully Connected @@ -430,8 +418,13 @@ def LlamaDecoderLayer_fast_forward( pass outputs = (hidden_states,) - if output_attentions: outputs += (self_attn_weights,) - if use_cache: outputs += (present_key_value,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + return outputs pass @@ -609,8 +602,9 @@ def LlamaModel_fast_forward( pass for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) - if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: @@ -642,15 +636,22 @@ def custom_forward(*inputs): use_cache=use_cache, padding_mask=padding_mask, ) - pass hidden_states = layer_outputs[0] - if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) - if output_attentions: all_self_attns += (layer_outputs[1],) + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) pass + hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) - if output_hidden_states: all_hidden_states += (hidden_states,) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) @@ -669,29 +670,12 @@ def LlamaModel_fast_forward_inference( self, input_ids, past_key_values, - attention_mask = None, ): # Fix out of bounds tokenization input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) - bsz, q_len, hd = hidden_states.shape - seq_len = past_key_values[0][0].shape[-2] - - # Must use attention mask for batched processing - sliding_window = getattr(self.config, "sliding_window", None) - if (sliding_window is not None and seq_len >= sliding_window) or (bsz != 1): - attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( - attention_mask, - (bsz, q_len), - hidden_states, - seq_len, - sliding_window = sliding_window, - ) - else: - attention_mask = None - pass next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): @@ -702,9 +686,7 @@ def LlamaModel_fast_forward_inference( decoder_layer.self_attn, hidden_states, past_key_values[idx], - position_ids = None, - do_prefill = False, - attention_mask = attention_mask, + None, ) hidden_states += residual @@ -744,12 +726,11 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if False:#past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): + if past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): outputs = fast_forward_inference( self.model, input_ids, past_key_values, - attention_mask = attention_mask, ) else: causal_mask = xformers.attn_bias.LowerTriangularMask() diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index fcc1ab62e0..c609d2ecad 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -200,13 +200,12 @@ def MistralForCausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - if False and past_key_values is not None and \ + if past_key_values is not None and \ hasattr(self.model.layers[0].self_attn, "paged_attention"): outputs = LlamaModel_fast_forward_inference( self.model, input_ids, past_key_values, - attention_mask = attention_mask, ) else: outputs = self.model( From a7217b2e929400f981cee87a6d1e61baa0bd482d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 3 Apr 2024 04:56:11 +1100 Subject: [PATCH 0172/1088] Fix batched inference (#298) * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --- unsloth/kernels/utils.py | 27 +++---- unsloth/models/gemma.py | 83 +++++++++---------- unsloth/models/llama.py | 165 ++++++++++++++++++++++++-------------- unsloth/models/mistral.py | 7 +- 4 files changed, 153 insertions(+), 129 deletions(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 2ed2a68558..76f0a9854a 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -118,7 +118,7 @@ def fast_gemv(X, W, quant_state, out = None): if quant_state is None: return torch.matmul(X, W, out = out) # For fast X @ W where seq_len == 1 # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 - bsz, q_len, hd = X.shape + _, q_len, hd = X.shape # assert(q_len == 1) if type(quant_state) is not list: @@ -142,10 +142,10 @@ def fast_gemv(X, W, quant_state, out = None): bout = shape[0] if out is None: - out = torch.empty((bsz, 1, bout,), dtype = dtype, device = "cuda") - else: - assert(out.shape == (bsz, 1, bout,)) - pass + out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda") + # else: + # assert(out.shape == (1, 1, bout,)) + # pass n = 1 m = shape[0] @@ -171,15 +171,9 @@ def fast_gemv(X, W, quant_state, out = None): fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ cgemm_4bit_inference_naive_bf16 - ptr_W = get_ptr(W) - ptr_absmax = get_ptr(absmax) - ptr_stats = get_ptr(stats) - blocksize = ctypes.c_int32(blocksize) - - for row in range(bsz): - fx(m, n, k, get_ptr(X[row]), ptr_W, ptr_absmax, ptr_stats, get_ptr(out[row]), - lda, ldb, ldc, blocksize) - pass + blocksize = ctypes.c_int32(blocksize) + fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), + lda, ldb, ldc, blocksize) return out pass @@ -189,12 +183,11 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) - bsz, _, in_dim = X.shape + bsz, q_len, in_dim = X.shape if W_quant is None: out = torch.matmul(X, W.t(), out = out) - elif bsz <= 2: - # Only batches of 2 are faster with Gemv + elif bsz == 1 and q_len == 1: out = fast_gemv(X, W, W_quant, out = out) else: W = fast_dequantize(W.t(), W_quant) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 6bd8a6f345..0be28da927 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -44,11 +44,11 @@ def fast_geglu_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) bsz, _, hd = X.shape - mlp_size = self.config.intermediate_size - temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + # mlp_size = self.config.intermediate_size + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) - up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) + up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) gate = torch_nn_functional_gelu(gate, approximate = "tanh") gate *= up @@ -58,18 +58,6 @@ def fast_geglu_inference(self, X): pass -def fast_rms_layernorm_inference_gemma(self, X, out_weight): - XX = X.to(torch.float32) - variance = XX.square().mean(-1, keepdim = True) - variance += self.variance_epsilon - XX *= variance.rsqrt_() - out_weight[:] = self.weight - out_weight += 1.0 - XX *= out_weight - return XX.to(X.dtype) -pass - - # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 def GemmaDecoderLayer_fast_forward( self, @@ -83,19 +71,21 @@ def GemmaDecoderLayer_fast_forward( padding_mask: Optional[torch.LongTensor] = None, *args, **kwargs, ): - if past_key_value is not None: - do_prefill = not hasattr(self.self_attn, "paged_attention") + if use_cache: #past_key_value is not None: out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") # Self Attention residual = hidden_states hidden_states = fast_rms_layernorm_inference_gemma(self.input_layernorm, hidden_states, out_weight) - hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - self.self_attn, - hidden_states, - past_key_value, - position_ids, - do_prefill = do_prefill, + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, ) hidden_states += residual @@ -107,7 +97,6 @@ def GemmaDecoderLayer_fast_forward( else: residual = hidden_states hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states, gemma = True) - # hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, causal_mask=causal_mask, @@ -123,19 +112,13 @@ def GemmaDecoderLayer_fast_forward( # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states, gemma = True) - # hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states pass outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) return outputs pass @@ -148,31 +131,37 @@ def GemmaModel_fast_forward_inference( self, input_ids, past_key_values, + position_ids, + attention_mask = None, ): - # Fix out of bounds tokenization - input_ids = input_ids[:,:self.max_seq_length] out_weight = torch.empty_like(self.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") - - hidden_states = self.embed_tokens(input_ids) + input_ids = input_ids[:,:self.max_seq_length] + hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) # 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 # 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 hidden_states *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype) + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + attention_mask = _prepare_4d_causal_attention_mask(attention_mask, (bsz, q_len), hidden_states, seq_len,) + pass + next_decoder_cache = [] - for idx, decoder_layer in enumerate(self.layers): - # Self Attention + for idx, decoder_layer in enumerate(self.model.layers): residual = hidden_states hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.input_layernorm, hidden_states, out_weight) hidden_states, present_key_value = LlamaAttention_fast_forward_inference( decoder_layer.self_attn, - hidden_states, - past_key_values[idx], - None, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = attention_mask, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), ) hidden_states += residual - # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.post_attention_layernorm, hidden_states, out_weight) hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states) @@ -180,13 +169,13 @@ def GemmaModel_fast_forward_inference( next_decoder_cache.append(present_key_value) pass - hidden_states = fast_rms_layernorm_inference_gemma(self.norm, hidden_states, out_weight) + hidden_states = fast_rms_layernorm_inference_gemma(self.model.norm, hidden_states, out_weight) return BaseModelOutputWithPast( last_hidden_state = hidden_states, - past_key_values = next_decoder_cache, - hidden_states = [], - attentions = [], + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], ) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bfbd10eb89..dc66059b0f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,7 +74,8 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt -KV_CACHE_INCREMENT = 128 # KV Cache update size +KV_CACHE_INCREMENT = 256 # KV Cache update size +torch_nn_functional_softmax = torch.nn.functional.softmax def LlamaAttention_fast_forward_inference( self, @@ -82,6 +83,7 @@ def LlamaAttention_fast_forward_inference( past_key_value: Optional[Tuple[torch.Tensor]], position_ids, do_prefill = False, + attention_mask = None, ): """ https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 @@ -138,6 +140,7 @@ def LlamaAttention_fast_forward_inference( self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda") self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda") self.scalar = 1.0 / math_sqrt(self.head_dim) + self.half_head_dim = head_dim // 2 elif kv_seq_len >= self.paged_attention.shape[0]: self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) self.paged_attention_K = self.paged_attention[:,0] @@ -154,17 +157,23 @@ def LlamaAttention_fast_forward_inference( # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) - cos = self.rotary_emb.cos_cached[seq_len] - sin = self.rotary_emb.sin_cached[seq_len] - h = head_dim // 2 + cos = self.rotary_emb.cos_cached[position_ids].unsqueeze(1) + sin = self.rotary_emb.sin_cached[position_ids].unsqueeze(1) + h = self.half_head_dim RH_Q = self.RH_Q - RH_Q[:,:,:,:h] = Qn[:,:,:,h:]; RH_Q[:,:,:,h:] = Qn[:,:,:,:h]; torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]); - Qn *= cos; Qn.addcmul_(RH_Q, sin); + RH_Q[:,:,:,:h] = Qn[:,:,:,h:] + RH_Q[:,:,:,h:] = Qn[:,:,:,:h] + torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + Qn *= cos + Qn.addcmul_(RH_Q, sin) RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda") - RH_K[:,:,:,:h] = Kn[:,:,:,h:]; RH_K[:,:,:,h:] = Kn[:,:,:,:h]; torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]); - Kn *= cos; Kn.addcmul_(RH_K, sin); + RH_K[:,:,:,:h] = Kn[:,:,:,h:] + RH_K[:,:,:,h:] = Kn[:,:,:,:h] + torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + Kn *= cos + Kn.addcmul_(RH_K, sin) # New KV cache # Kn = torch.cat([K1, Kn], dim = 2) @@ -198,10 +207,15 @@ def LlamaAttention_fast_forward_inference( # pass # Attention - A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) - A *= self.scalar - A[:] = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) - A = torch.matmul(A, Vnn, out = Qn) + if bsz == 1: + A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + A *= self.scalar + # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched + A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch.matmul(A, Vnn, out = Qn) + else: + A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) + pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) @@ -214,11 +228,11 @@ def fast_swiglu_inference(self, X): # gate = self.gate_proj(X) # up = self.up_proj(X) bsz, _, hd = X.shape - mlp_size = self.config.intermediate_size - temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + # mlp_size = self.config.intermediate_size + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") - gate = fast_linear_forward(self.gate_proj, X, out = temp[0]) - up = fast_linear_forward(self. up_proj, X, out = temp[1]) + gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) + up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) gate = torch_nn_functional_silu(gate, inplace = True) gate *= up @@ -240,6 +254,24 @@ def fast_rms_layernorm_inference(self, X): pass +def fast_rms_layernorm_inference_gemma(self, X, out_weight = None): + XX = X.to(torch.float32) + variance = XX.square().mean(-1, keepdim = True) + variance += self.variance_epsilon + XX *= variance.rsqrt_() + + if out_weight is None: + out_weight = self.weight + 1.0 + else: + out_weight[:] = self.weight + out_weight += 1.0 + pass + + XX *= out_weight + return XX.to(X.dtype) +pass + + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, @@ -375,18 +407,18 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - if past_key_value is not None: - do_prefill = not hasattr(self.self_attn, "paged_attention") - - # Self Attention + if use_cache: residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) - hidden_states, present_key_value = LlamaAttention_fast_forward_inference( - self.self_attn, - hidden_states, - past_key_value, - position_ids, - do_prefill = do_prefill, + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, ) hidden_states += residual @@ -418,13 +450,8 @@ def LlamaDecoderLayer_fast_forward( pass outputs = (hidden_states,) - - if output_attentions: - outputs += (self_attn_weights,) - - if use_cache: - outputs += (present_key_value,) - + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) return outputs pass @@ -602,9 +629,8 @@ def LlamaModel_fast_forward( pass for idx, decoder_layer in enumerate(self.layers): - if output_hidden_states: - all_hidden_states += (hidden_states,) + if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: @@ -636,23 +662,24 @@ def custom_forward(*inputs): use_cache=use_cache, padding_mask=padding_mask, ) + pass hidden_states = layer_outputs[0] - - if use_cache: - next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) - - if output_attentions: - all_self_attns += (layer_outputs[1],) + if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + if output_attentions: all_self_attns += (layer_outputs[1],) pass - - hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) - # add hidden states from the last decoder layer - if output_hidden_states: - all_hidden_states += (hidden_states,) + # Final layernorm + if use_cache: + hidden_states = (fast_rms_layernorm_inference_gemma if IS_GEMMA else fast_rms_layernorm_inference)\ + (self.norm, hidden_states) + else: + hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) + pass + if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None + if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( @@ -665,32 +692,44 @@ def custom_forward(*inputs): # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 -# @torch.inference_mode def LlamaModel_fast_forward_inference( self, input_ids, past_key_values, + position_ids, + attention_mask = None, ): - # Fix out of bounds tokenization input_ids = input_ids[:,:self.max_seq_length] - - hidden_states = self.embed_tokens(input_ids) + hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = getattr(self.config, "sliding_window", None), + ) + else: + attention_mask = None + pass next_decoder_cache = [] - for idx, decoder_layer in enumerate(self.layers): - # Self Attention + for idx, decoder_layer in enumerate(self.model.layers): residual = hidden_states hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) hidden_states, present_key_value = LlamaAttention_fast_forward_inference( decoder_layer.self_attn, - hidden_states, - past_key_values[idx], - None, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = attention_mask, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), ) hidden_states += residual - # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) @@ -698,13 +737,13 @@ def LlamaModel_fast_forward_inference( next_decoder_cache.append(present_key_value) pass - hidden_states = fast_rms_layernorm_inference(self.norm, hidden_states) + hidden_states = fast_rms_layernorm_inference(self.model.norm, hidden_states) return BaseModelOutputWithPast( last_hidden_state = hidden_states, - past_key_values = next_decoder_cache, - hidden_states = [], - attentions = [], + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], ) pass @@ -726,11 +765,13 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if past_key_values is not None and hasattr(self.model.layers[0].self_attn, "paged_attention"): + if past_key_values is not None: outputs = fast_forward_inference( - self.model, + self, input_ids, past_key_values, + position_ids = position_ids, + attention_mask = attention_mask, ) else: causal_mask = xformers.attn_bias.LowerTriangularMask() diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index c609d2ecad..e867ceef23 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -200,12 +200,13 @@ def MistralForCausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - if past_key_values is not None and \ - hasattr(self.model.layers[0].self_attn, "paged_attention"): + if past_key_values is not None: outputs = LlamaModel_fast_forward_inference( - self.model, + self, input_ids, past_key_values, + position_ids = position_ids, + attention_mask = attention_mask, ) else: outputs = self.model( From d3a33a0dc3cabd3b3c0dba0255fb4919db44e3b5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 3 Apr 2024 05:38:31 +1100 Subject: [PATCH 0173/1088] Nightly (#299) * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update llama.py * Hotfix - fix DoRA, Gemma prompt template (#202) (#203) * Update save.py * saving * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update __init__.py * Update save.py * Update save.py * Update save.py * save * trainer * spaces * original * Gemma * Update pyproject.toml * Update mapper.py * Update fast_lora.py * FastGemmaModel * model_type * Update llama.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * gemma * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update fast_lora.py * Update fast_lora.py * Fast CE Loss * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * CE * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update geglu.py * Update cross_entropy_loss.py * revert * Update llama.py * Update llama.py * norm * Update gemma.py * Update gemma.py * position_ids * Update gemma.py * Update gemma.py * pos * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * revert * revert * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * rope * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * llama * Update llama.py * gemma * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update save.py * RoPE * Update llama.py * Update llama.py * Update llama.py * Update gemma.py * correct_dtype * Update gemma.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Chat Templates * Update README.md * Update README.md * Update llama.py * DoRA * Update _utils.py * Update chat_templates.py * Update pyproject.toml * Small fixes * Update pyproject.toml * Approx gelu * Update geglu.py * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul --- unsloth/kernels/__init__.py | 2 +- unsloth/kernels/fast_lora.py | 27 +-------------------------- unsloth/kernels/utils.py | 29 +++++++++++++++++++++++++++-- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 8ff255e4a4..cb04377b1a 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -30,4 +30,4 @@ apply_lora_qkv, apply_lora_o, ) -from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward +from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 6568bba681..aba44f0214 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -13,32 +13,7 @@ # limitations under the License. import torch -from .utils import fast_dequantize, QUANT_STATE, get_lora_parameters - - -def matmul_lora(X, W, W_quant, A, B, s, out = None): - dtype = X.dtype - W = fast_dequantize(W.t(), W_quant) - - if X.dim() == 3: - batch, seq_len, d = X.shape - X = X.view(-1, X.shape[-1]) - reshape = True - else: - reshape = False - pass - - out = torch.matmul(X, W, out = out) - if W_quant is not None: del W - - if A is not None: - # LoRA is enabled - A, B = A.t(), B.t() - out += (X @ A.to(dtype)) @ (s * B.to(dtype)) - pass - - return out.view(batch, seq_len, -1) if reshape else out -pass +from .utils import fast_dequantize, QUANT_STATE, get_lora_parameters, matmul_lora class LoRA_MLP(torch.autograd.Function): diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 76f0a9854a..1f2085df5a 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -182,8 +182,8 @@ def fast_gemv(X, W, quant_state, out = None): def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) - bsz, q_len, in_dim = X.shape + if q_len != 1: return matmul_lora(X, W, W_quant, lora_A, lora_B, lora_S) if W_quant is None: out = torch.matmul(X, W.t(), out = out) @@ -203,7 +203,7 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): lora_A._fast_lora = lora_A.to(dtype) lora_B._fast_lora = lora_B.to(dtype) pass - + if bsz == 1: out = out.view(out_dim) temp_lora = torch.mv(lora_A._fast_lora, X.ravel(), out = temp_lora) @@ -218,3 +218,28 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): return out pass + + +def matmul_lora(X, W, W_quant, A, B, s, out = None): + dtype = X.dtype + W = fast_dequantize(W.t(), W_quant) + + if X.dim() == 3: + batch, seq_len, d = X.shape + X = X.view(-1, X.shape[-1]) + reshape = True + else: + reshape = False + pass + + out = torch.matmul(X, W, out = out) + if W_quant is not None: del W + + if A is not None: + # LoRA is enabled + A, B = A.t(), B.t() + out += (X @ A.to(dtype)) @ (s * B.to(dtype)) + pass + + return out.view(batch, seq_len, -1) if reshape else out +pass From 09a2fabd712c28147846c23a626781d1fccdb38d Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 4 Apr 2024 22:46:32 +1100 Subject: [PATCH 0174/1088] Update gemma.py eabdullin --- unsloth/models/gemma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 0be28da927..0259ab7889 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -134,7 +134,7 @@ def GemmaModel_fast_forward_inference( position_ids, attention_mask = None, ): - out_weight = torch.empty_like(self.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") + out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) From 56f34eef1c60f4f634673711b89acaac04721b74 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 5 Apr 2024 03:51:53 +1100 Subject: [PATCH 0175/1088] Gemma inference fix --- pyproject.toml | 2 +- unsloth/models/gemma.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 324e01157b..9c862a2617 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ huggingface = [ "tyro", "transformers>=4.38.2", "datasets>=2.16.0", - "sentencepiece", + "sentencepiece>=0.2.0", "tqdm", "psutil", "wheel>=0.42.0", diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 0259ab7889..c0cce75e2f 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -145,7 +145,12 @@ def GemmaModel_fast_forward_inference( bsz, q_len, hd = hidden_states.shape seq_len = past_key_values[0][0].shape[-2] if bsz != 1: - attention_mask = _prepare_4d_causal_attention_mask(attention_mask, (bsz, q_len), hidden_states, seq_len,) + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) pass next_decoder_cache = [] From a3853e2995abae191e3566fe27b928507f4b1dc9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 6 Apr 2024 04:31:24 +1100 Subject: [PATCH 0176/1088] Bug fixes (#306) * Approx gelu * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update geglu.py * Update gemma.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side --- unsloth/models/_utils.py | 54 ++++++++++++++++++++++++++++++++-- unsloth/models/llama.py | 47 +++++++++++++++++++++++------ unsloth/models/mistral.py | 4 +-- unsloth/save.py | 60 ++++++++++++++++++++++++++++++-------- unsloth/tokenizer_utils.py | 17 +---------- 5 files changed, 140 insertions(+), 42 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8cdb5e384c..f4268e3493 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -30,7 +30,7 @@ import os import psutil -__version__ = "2024.3" +__version__ = "2024.4" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() @@ -70,12 +70,13 @@ "platform_system", "patch_tokenizer", "get_statistics", + "Offloaded_Gradient_Checkpointer", ] def prepare_model_for_kbit_training( model : Any, - use_gradient_checkpointing : bool = True, + use_gradient_checkpointing : Optional = True, use_reentrant : Optional[bool] = True, ) -> Any: """ @@ -101,9 +102,23 @@ def prepare_model_for_kbit_training( param.requires_grad_(False) pass - if use_gradient_checkpointing: + # Gradient checkpointing! + if use_gradient_checkpointing == "offloaded": + + # Saves VRAM! + original_model = model + while hasattr(original_model, "model"): + original_model._offloaded_gradient_checkpointing = True + original_model = original_model.model + pass + original_model._offloaded_gradient_checkpointing = True + model.gradient_checkpointing_enable() + elif use_gradient_checkpointing == True: + model.gradient_checkpointing_enable() + pass + # If use_reentrant = True which is the Pytorch default, we just make the input requires_grad. if use_reentrant: if hasattr(model, "enable_input_require_grads"): @@ -179,6 +194,7 @@ def get_statistics(): try: from huggingface_hub import hf_hub_download from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + import psutil n_cpus = psutil.cpu_count(logical = False) keynames = "\n" + "\n".join(os.environ.keys()) @@ -291,3 +307,35 @@ def prepare_n_gradient_checkpoints( _model._gradient_checkpointing_boundaries = boundaries _model._gradient_checkpointing_use_reentrant = use_reentrant pass + + +class Offloaded_Gradient_Checkpointer(torch.autograd.Function): + """ + Saves VRAM by smartly offloading to RAM. + Tiny hit to performance, since we mask the movement via non blocking calls. + [TODO] Load the backward pass earlier + """ + @staticmethod + @torch.cuda.amp.custom_fwd + def forward(ctx, forward_function, hidden_states, *args): + saved_hidden_states = hidden_states.to("cpu", non_blocking = True) + with torch.no_grad(): + (output,) = forward_function(hidden_states, *args) + ctx.save_for_backward(saved_hidden_states) + ctx.forward_function = forward_function + ctx.args = args + return output + pass + + @staticmethod + @torch.cuda.amp.custom_bwd + def backward(ctx, dY): + (hidden_states,) = ctx.saved_tensors + hidden_states = hidden_states.to("cuda", non_blocking = True).detach() + hidden_states.requires_grad = True + with torch.enable_grad(): + (output,) = ctx.forward_function(hidden_states, *ctx.args) + torch.autograd.backward(output, dY) + return (None, hidden_states.grad,) + (None,)*len(ctx.args) + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index dc66059b0f..a7ade9fc32 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -628,19 +628,42 @@ def LlamaModel_fast_forward( boundaries = None pass + # Check checkpointing method + gradient_checkpointing = False + offloaded_gradient_checkpointing = False + + if (self.gradient_checkpointing and self.training and not use_cache): + + gradient_checkpointing = True + + if output_attentions is False and hasattr(self, "_offloaded_gradient_checkpointing"): + offloaded_gradient_checkpointing = True + pass + + # Go through every layer! for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None - if self.gradient_checkpointing and self.training: + if offloaded_gradient_checkpointing: + hidden_states = Offloaded_Gradient_Checkpointer.apply( + decoder_layer, + hidden_states, + causal_mask, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + elif gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask) - + return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask) return custom_forward + pass layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), @@ -648,9 +671,11 @@ def custom_forward(*inputs): causal_mask, attention_mask, position_ids, - use_reentrant=True, - preserve_rng_state=False, + use_reentrant = True, + preserve_rng_state = False, ) + hidden_states = layer_outputs[0] + else: layer_outputs = decoder_layer( hidden_states, @@ -662,9 +687,9 @@ def custom_forward(*inputs): use_cache=use_cache, padding_mask=padding_mask, ) + hidden_states = layer_outputs[0] pass - hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) pass @@ -801,12 +826,12 @@ def _CausalLM_fast_forward( hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape + lm_head = self.lm_head.weight if bsz == 1 and q_len == 1: - lm_head = self.lm_head.weight logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) else: - logits = self.lm_head(hidden_states) + logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass logits = logits.to(self.config.torch_dtype) @@ -1402,6 +1427,8 @@ def get_peft_model( "We shall do it for you!" ) train_lm_head = True + if modules_to_save is None: modules_to_save = ["lm_head"] + else: modules_to_save.append("lm_head") elif module == "embed_tokens": logger.warning_once( @@ -1409,6 +1436,8 @@ def get_peft_model( "We shall do it for you!" ) train_embed_tokens = True + if modules_to_save is None: modules_to_save = ["embed_tokens"] + else: modules_to_save.append("embed_tokens") else: assert(module in accepted_modules) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e867ceef23..87f5c85ad1 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -225,12 +225,12 @@ def MistralForCausalLM_fast_forward( hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape + lm_head = self.lm_head.weight if bsz == 1 and q_len == 1: - lm_head = self.lm_head.weight logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) else: - logits = self.lm_head(hidden_states) + logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass logits = logits.to(self.config.torch_dtype) diff --git a/unsloth/save.py b/unsloth/save.py index a08a744077..49d88bffc0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -33,9 +33,13 @@ "patch_saving_functions", ] -# Check Kaggle -IS_A_KAGGLE_ENVIRONMENT = "KAGGLE_CONTAINER_NAME" in os.environ +# Check environments +keynames = "\n" + "\n".join(os.environ.keys()) +IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames +IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +del keynames +# Weights LLAMA_WEIGHTS = ( "self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", "self_attn.o_proj", "mlp.gate_proj", "mlp.up_proj", "mlp.down_proj", @@ -177,6 +181,9 @@ def unsloth_save_model( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): + if token is None and "HF_TOKEN" in os.environ: + token = os.environ["HF_TOKEN"] + if commit_message is None: commit_message = "" if "Unsloth" not in commit_message: commit_message += " (Trained with Unsloth)" @@ -291,6 +298,10 @@ def unsloth_save_model( tags = tags, ) if tokenizer is not None: + # Set padding side to left for inference + old_padding_side = tokenizer.padding_side + tokenizer.padding_side = "left" + getattr(tokenizer, "original_push_to_hub", tokenizer.push_to_hub)\ ( repo_id = save_directory, @@ -305,6 +316,9 @@ def unsloth_save_model( commit_description = commit_description, tags = tags, ) + + # Revert back padding side + tokenizer.padding_side = old_padding_side pass if hasattr(model, "config"): @@ -361,7 +375,16 @@ def unsloth_save_model( if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") + + # Set padding side to left for inference + old_padding_side = tokenizer.padding_side + tokenizer.padding_side = "left" + tokenizer.save_pretrained(**tokenizer_save_settings) + + # Revert back padding side + tokenizer.padding_side = old_padding_side + print(" Done.") else: print() @@ -449,12 +472,12 @@ def unsloth_save_model( os.makedirs(temporary_location) pass - # Check if Kaggle, since only 20GB of Disk space allowed. - if IS_A_KAGGLE_ENVIRONMENT: + # Check if Kaggle or Colab, since only 20GB of Disk space allowed. + if IS_KAGGLE_ENVIRONMENT or IS_COLAB_ENVIRONMENT: # We free up 4GB of space logger.warning_once( - "Unsloth: Kaggle only allows 20GB of disk space. We need to delete the downloaded\n"\ - "model which will save 4GB of disk space, allowing you to save on Kaggle." + "Unsloth: Kaggle/Colab has limited disk space. We need to delete the downloaded\n"\ + "model which will save 4-16GB of disk space, allowing you to save on Kaggle/Colab." ) _free_cached_model(internal_model) pass @@ -462,7 +485,10 @@ def unsloth_save_model( # HF also uses a OrderedDict from collections import OrderedDict state_dict = OrderedDict() - state_dict["model.embed_tokens.weight"] = internal_model.model.embed_tokens.weight.data + + torch_dtype = model.config.torch_dtype + # Check modules to save float32 dtype + state_dict["model.embed_tokens.weight"] = internal_model.model.embed_tokens.weight.data.to(torch_dtype) max_vram = int(torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage) @@ -495,7 +521,8 @@ def unsloth_save_model( pass state_dict["model.norm.weight"] = internal_model.model.norm.weight.data - state_dict["lm_head.weight"] = internal_model.lm_head.weight.data + # Check for modules_to_save float32 dtype + state_dict["lm_head.weight"] = internal_model.lm_head.weight.data.to(torch_dtype) # All tensors MUST be type torch.Tensor and not torch.nn.parameter.Parameter for key, value in state_dict.items(): @@ -552,7 +579,16 @@ def unsloth_save_model( # Save tokenizer if tokenizer is not None: print("Unsloth: Saving tokenizer...", end = "") + + # Set padding side to left for inference + old_padding_side = tokenizer.padding_side + tokenizer.padding_side = "left" + tokenizer.save_pretrained(**tokenizer_save_settings) + + # Revert back padding side + tokenizer.padding_side = old_padding_side + print(" Done.") else: print() @@ -1216,7 +1252,7 @@ def unsloth_save_pretrained_gguf( # Non blocking install GGUF first if not os.path.exists("llama.cpp"): - if IS_A_KAGGLE_ENVIRONMENT: + if IS_KAGGLE_ENVIRONMENT: # Kaggle is weird - no blocking installs, and no CUDA? python_install = install_python_non_blocking(["gguf", "protobuf"]) python_install.wait() @@ -1237,7 +1273,7 @@ def unsloth_save_pretrained_gguf( makefile = None except: # Retry by recloning llama.cpp - if IS_A_KAGGLE_ENVIRONMENT: + if IS_KAGGLE_ENVIRONMENT: # Kaggle is weird - no blocking installs, and no CUDA? python_install = install_python_non_blocking(["gguf", "protobuf"]) python_install.wait() @@ -1336,7 +1372,7 @@ def unsloth_push_to_hub_gguf( # Non blocking install GGUF first if not os.path.exists("llama.cpp"): - if IS_A_KAGGLE_ENVIRONMENT: + if IS_KAGGLE_ENVIRONMENT: # Kaggle is weird - no blocking installs, and no CUDA? python_install = install_python_non_blocking(["gguf", "protobuf"]) python_install.wait() @@ -1357,7 +1393,7 @@ def unsloth_push_to_hub_gguf( makefile = None except: # Retry by recloning llama.cpp - if IS_A_KAGGLE_ENVIRONMENT: + if IS_KAGGLE_ENVIRONMENT: # Kaggle is weird - no blocking installs, and no CUDA? python_install = install_python_non_blocking(["gguf", "protobuf"]) python_install.wait() diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 00e937c97c..46de1c98ad 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -186,9 +186,6 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): pass -global sentencepiece_model_pb2 -sentencepiece_model_pb2 = None - def fix_sentencepiece_tokenizer( old_tokenizer, new_tokenizer, @@ -197,19 +194,7 @@ def fix_sentencepiece_tokenizer( ): # From https://github.com/google/sentencepiece/issues/121 # We need to manually edit the sentencepiece tokenizer! - global sentencepiece_model_pb2 - if sentencepiece_model_pb2 is None: - try: - import sentencepiece.sentencepiece_model_pb2 as _sentencepiece_model_pb2 - sentencepiece_model_pb2 = _sentencepiece_model_pb2 - except: - if not os.path.exists(temporary_location): - os.system(f"git clone https://github.com/google/sentencepiece.git {temporary_location}") - os.system(f"cd {temporary_location}/src && protoc --python_out=. sentencepiece_model.proto") - pass - import sentencepiece.sentencepiece_model_pb2 as _sentencepiece_model_pb2 - sentencepiece_model_pb2 = _sentencepiece_model_pb2 - pass + from transformers.utils import sentencepiece_model_pb2 if not os.path.exists(temporary_location): os.makedirs(temporary_location) From f6f621ea9441b6c869c39232c17737fb956f458d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Apr 2024 03:44:45 +1000 Subject: [PATCH 0177/1088] Bug fixes (#308) * Update rms_layernorm.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py --- pyproject.toml | 3 +++ unsloth/models/llama.py | 44 +++++++++++++++++++++++++++--------- unsloth/save.py | 8 +++---- unsloth/tokenizer_utils.py | 46 +++++++++++++++++++++++++++++++++----- 4 files changed, 80 insertions(+), 21 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9c862a2617..e6f663a969 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ huggingface = [ "accelerate>=0.26.1", "trl>=0.7.9", "peft>=0.7.1", + "protobuf<4.0.0", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -170,6 +171,7 @@ colab-new = [ "psutil", "wheel>=0.42.0", "numpy", + "protobuf<4.0.0", ] colab-no-deps = [ "accelerate>=0.26.1", @@ -177,6 +179,7 @@ colab-no-deps = [ "peft>=0.7.1", "xformers", "bitsandbytes", + "protobuf<4.0.0", ] colab = [ "unsloth[cu121]", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a7ade9fc32..f39d34fa35 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1068,17 +1068,39 @@ def from_pretrained( # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 # RoPE Scaling's max_position_embeddings must be updated max_position_embeddings = max(max_seq_length, model_max_seq_length) - model = AutoModelForCausalLM.from_pretrained( - model_name, - device_map = device_map, - torch_dtype = dtype, - quantization_config = bnb_config, - token = token, - rope_scaling = rope_scaling, - max_position_embeddings = max_position_embeddings, - trust_remote_code = trust_remote_code, - **kwargs, - ) + try: + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + rope_scaling = rope_scaling, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + **kwargs, + ) + except Exception as error: + if "rope_scaling" in str(error): + if rope_scaling is not None: + raise TypeError("Unsloth: {model_name} does not support rope_scaling.") + pass + + # Counteract missing rope_scaling + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + **kwargs, + ) + else: + raise error + pass + pass # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name diff --git a/unsloth/save.py b/unsloth/save.py index 49d88bffc0..636a3d84e7 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -684,7 +684,7 @@ def install_llama_cpp_clone_non_blocking(): def install_llama_cpp_make_non_blocking(): - env = { **os.environ, "LLAMA_CUBLAS": "1", } + env = { **os.environ, "LLAMA_CUDA": "1", } n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean os.system("make clean -C llama.cpp") @@ -752,7 +752,7 @@ def install_llama_cpp_old(version = -10): def install_llama_cpp_blocking(use_cuda = True): - use_cuda = "LLAMA_CUBLAS=1" if use_cuda else "" + use_cuda = "LLAMA_CUDA=1" if use_cuda else "" commands = [ "git clone https://github.com/ggerganov/llama.cpp", @@ -937,7 +937,7 @@ def save_to_gguf( "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ "git clone https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j\n"\ + "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ "Once that's done, redo the quantization." ) pass @@ -966,7 +966,7 @@ def save_to_gguf( "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ "git clone https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUBLAS=1 make all -j\n"\ + "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ "Once that's done, redo the quantization." ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 46de1c98ad..2640f321d9 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -31,6 +31,12 @@ "CodeLlamaTokenizer", )) +# Check environments +keynames = "\n" + "\n".join(os.environ.keys()) +IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames +IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +del keynames + def try_fix_tokenizer(tokenizer, prepend = True): @@ -179,10 +185,19 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): if x.endswith("_token") and x.count("_") == 1 ))) all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens)) - string = "\n".join(all_special_tokens) + \ - "A quick brown fox jumps over the lazy dog!!\n\n" + \ - "".join(all_special_tokens) - return slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids + try: + string = "\n".join(all_special_tokens) + \ + "A quick brown fox jumps over the lazy dog!!\n\n" + \ + "".join(all_special_tokens) + return slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids + except: + # For eg see https://github.com/unslothai/unsloth/issues/292 + # Sometimes tokenizer has weird tokens, causing a combined tokenization to fail. + # [TODO] We temporarily disable this for CodeLlama tokenizers + if slow_tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING: + return True + else: + return False pass @@ -203,7 +218,6 @@ def fix_sentencepiece_tokenizer( # First save the old tokenizer old_tokenizer.save_pretrained(temporary_location) - from sentencepiece import SentencePieceProcessor tokenizer_file = sentencepiece_model_pb2.ModelProto() tokenizer_file.ParseFromString(open(f"{temporary_location}/tokenizer.model", "rb").read()) @@ -220,7 +234,11 @@ def fix_sentencepiece_tokenizer( continue pass ids = ids[0] - tokenizer_piece = tokenizer_file.pieces[ids] + # [TODO] Hack for Starling - try except + try: + tokenizer_piece = tokenizer_file.pieces[ids] + except: + continue assert(tokenizer_piece.piece == old_token) tokenizer_piece.piece = new_token pass @@ -243,7 +261,14 @@ def load_correct_tokenizer( padding_side = "right", token = None, trust_remote_code = False, + cache_dir = "huggingface_tokenizers_cache", ): + if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT: + cache_dir = cache_dir + else: + cache_dir = None + pass + slow_tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, model_max_length = model_max_length, @@ -251,6 +276,7 @@ def load_correct_tokenizer( token = token, trust_remote_code = trust_remote_code, use_fast = False, + cache_dir = cache_dir, ) fast_tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, @@ -258,6 +284,7 @@ def load_correct_tokenizer( padding_side = padding_side, token = token, trust_remote_code = trust_remote_code, + cache_dir = cache_dir, ) fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token @@ -375,6 +402,12 @@ def check_tokenizer( ) pass + if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT: + cache_dir = "huggingface_tokenizers_cache" + else: + cache_dir = None + pass + # Try slow tokenizer which can fix things! tokenizer = AutoTokenizer.from_pretrained( model_name, @@ -382,6 +415,7 @@ def check_tokenizer( padding_side = padding_side, token = token, use_fast = False, + cache_dir = cache_dir, ) return check_tokenizer( model = model, From 03f2c646d470760d4f4834dc3e3b1f959265a0ba Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 8 Apr 2024 01:28:00 +1000 Subject: [PATCH 0178/1088] Fix Gemma GGUF (#311) * Update gemma.py * Fix Gemma merging * Update rms_layernorm.py * Update gemma.py * Update pyproject.toml * Layernorms * Gemma precision * Update gemma.py * sqrt * Update gemma.py * Update save.py * RoPE and Gemma precision * Update rms_layernorm.py * Fix warning * Update chat_templates.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update save.py * Update save.py * checkpoint * Gemma 1.1 * more models --- unsloth/models/_utils.py | 7 ++-- unsloth/models/llama.py | 2 +- unsloth/models/mapper.py | 22 +++++++++++- unsloth/save.py | 74 ++++++++++++++++++++++++++++------------ 4 files changed, 77 insertions(+), 28 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f4268e3493..88a9e9661e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -70,7 +70,7 @@ "platform_system", "patch_tokenizer", "get_statistics", - "Offloaded_Gradient_Checkpointer", + "Unsloth_Offloaded_Gradient_Checkpointer", ] @@ -103,7 +103,7 @@ def prepare_model_for_kbit_training( pass # Gradient checkpointing! - if use_gradient_checkpointing == "offloaded": + if use_gradient_checkpointing == "unsloth": # Saves VRAM! original_model = model @@ -309,11 +309,10 @@ def prepare_n_gradient_checkpoints( pass -class Offloaded_Gradient_Checkpointer(torch.autograd.Function): +class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): """ Saves VRAM by smartly offloading to RAM. Tiny hit to performance, since we mask the movement via non blocking calls. - [TODO] Load the backward pass earlier """ @staticmethod @torch.cuda.amp.custom_fwd diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f39d34fa35..876ccb25a4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -647,7 +647,7 @@ def LlamaModel_fast_forward( past_key_value = past_key_values[idx] if past_key_values is not None else None if offloaded_gradient_checkpointing: - hidden_states = Offloaded_Gradient_Checkpointer.apply( + hidden_states = Unsloth_Offloaded_Gradient_Checkpointer.apply( decoder_layer, hidden_states, causal_mask, diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index dc54bfd791..90208c52b2 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -93,7 +93,27 @@ "unsloth/mistral-7b-v0.2-bnb-4bit" : ( "unsloth/mistral-7b-v0.2", "alpindale/Mistral-7B-v0.2-hf", - ) + ), + "unsloth/gemma-1.1-2b-it-bnb-4bit" : ( + "unsloth/gemma-1.1-2b-it", + "google/gemma-1.1-2b-it", + ), + "unsloth/gemma-1.1-7b-it-bnb-4bit" : ( + "unsloth/gemma-1.1-7b-it", + "google/gemma-1.1-7b-it", + ), + "unsloth/Starling-LM-7B-beta-bnb-4bit" : ( + "unsloth/Starling-LM-7B-beta", + "Nexusflow/Starling-LM-7B-beta", + ), + "unsloth/Hermes-2-Pro-Mistral-7B-bnb-4bit" : ( + "unsloth/Hermes-2-Pro-Mistral-7B", + "NousResearch/Hermes-2-Pro-Mistral-7B", + ), + "unsloth/OpenHermes-2.5-Mistral-7B-bnb-4bit" : ( + "unsloth/OpenHermes-2.5-Mistral-7B", + "teknium/OpenHermes-2.5-Mistral-7B", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/save.py b/unsloth/save.py index 636a3d84e7..d1cd7d6361 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -183,6 +183,9 @@ def unsloth_save_model( ): if token is None and "HF_TOKEN" in os.environ: token = os.environ["HF_TOKEN"] + + if token is None and "HUGGINGFACE_TOKEN" in os.environ: + token = os.environ["HUGGINGFACE_TOKEN"] if commit_message is None: commit_message = "" if "Unsloth" not in commit_message: @@ -522,7 +525,11 @@ def unsloth_save_model( state_dict["model.norm.weight"] = internal_model.model.norm.weight.data # Check for modules_to_save float32 dtype - state_dict["lm_head.weight"] = internal_model.lm_head.weight.data.to(torch_dtype) + + # Check for tied weights + if internal_model.model.embed_tokens.weight.data_ptr() != internal_model.lm_head.weight.data_ptr(): + state_dict["lm_head.weight"] = internal_model.lm_head.weight.data.to(torch_dtype) + pass # All tensors MUST be type torch.Tensor and not torch.nn.parameter.Parameter for key, value in state_dict.items(): @@ -731,9 +738,9 @@ def install_llama_cpp_old(version = -10): # Also don't use the GPU! commands = [ "git clone https://github.com/ggerganov/llama.cpp", - f"cd llama.cpp && git reset --hard {version} && git clean -df && "\ - f"make clean make all -j{psutil.cpu_count()*2}", - "pip install gguf protobuf", + f"cd llama.cpp && git reset --hard {version} && git clean -df", + "make clean -C llama.cpp", + f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: @@ -756,7 +763,8 @@ def install_llama_cpp_blocking(use_cuda = True): commands = [ "git clone https://github.com/ggerganov/llama.cpp", - f"cd llama.cpp && make clean && {use_cuda} make all -j{psutil.cpu_count()*2}", + "make clean -C llama.cpp", + f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return @@ -931,15 +939,26 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You might have to compile llama.cpp yourself, then run this again.\n"\ - "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ - "You must run this in the same folder as you're saving your model.\n"\ - "git clone https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ - "Once that's done, redo the quantization." - ) + if IS_KAGGLE_ENVIRONMENT: + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) + else: + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You might have to compile llama.cpp yourself, then run this again.\n"\ + "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ + "You must run this in the same folder as you're saving your model.\n"\ + "git clone https://github.com/ggerganov/llama.cpp\n"\ + "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ + "Once that's done, redo the quantization." + ) + pass pass print(f"Unsloth: Conversion completed! Output location: {final_location}") @@ -961,14 +980,25 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): - raise RuntimeError( - "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ - "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ - "You must run this in the same folder as you're saving your model.\n"\ - "git clone https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ - "Once that's done, redo the quantization." - ) + if IS_KAGGLE_ENVIRONMENT: + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) + else: + raise RuntimeError( + "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ + "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ + "You must run this in the same folder as you're saving your model.\n"\ + "git clone https://github.com/ggerganov/llama.cpp\n"\ + "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ + "Once that's done, redo the quantization." + ) + pass pass print(f"Unsloth: Conversion completed! Output location: {final_location}") From 568eb74275f62610b2920e079723a846bfa672a0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 8 Apr 2024 23:19:46 +1000 Subject: [PATCH 0179/1088] Torch dtype (#314) * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update save.py * Update save.py * checkpoint * Gemma 1.1 * more models * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * dtype --- unsloth/models/llama.py | 5 +++++ unsloth/save.py | 9 +++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 876ccb25a4..0b8092eaac 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1702,6 +1702,11 @@ def for_inference(model): lm_head = internal_model.lm_head.weight device_type = lm_head.device.type dtype = model.config.torch_dtype + + if type(dtype) is str: + if dtype == "float16": dtype = torch.float16 + elif dtype == "bfloat16": dtype = torch.bfloat16 + pass # Wrap model.generate model._unwrapped_old_generate = model.generate diff --git a/unsloth/save.py b/unsloth/save.py index d1cd7d6361..d0010321b1 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -183,7 +183,7 @@ def unsloth_save_model( ): if token is None and "HF_TOKEN" in os.environ: token = os.environ["HF_TOKEN"] - + if token is None and "HUGGINGFACE_TOKEN" in os.environ: token = os.environ["HUGGINGFACE_TOKEN"] @@ -489,7 +489,12 @@ def unsloth_save_model( from collections import OrderedDict state_dict = OrderedDict() - torch_dtype = model.config.torch_dtype + torch_dtype = internal_model.config.torch_dtype + if type(torch_dtype) is str: + if torch_dtype == "float16": torch_dtype = torch.float16 + elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 + pass + # Check modules to save float32 dtype state_dict["model.embed_tokens.weight"] = internal_model.model.embed_tokens.weight.data.to(torch_dtype) From 4f6cdbedc3a086d11eb12f2aaa02e26f9f960dca Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 9 Apr 2024 23:35:02 +1000 Subject: [PATCH 0180/1088] CodeGemma (#317) * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update save.py * Update save.py * checkpoint * Gemma 1.1 * more models * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * dtype * Update llama.py * CodeGemma --- unsloth/models/mapper.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 90208c52b2..f24108b75a 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -114,6 +114,22 @@ "unsloth/OpenHermes-2.5-Mistral-7B", "teknium/OpenHermes-2.5-Mistral-7B", ), + "unsloth/codegemma-2b-bnb-4bit" : ( + "unsloth/codegemma-2b", + "google/codegemma-2b", + ), + "unsloth/codegemma-7b-bnb-4bit" : ( + "unsloth/codegemma-7b", + "google/codegemma-7b", + ), + "unsloth/codegemma-2b-it-bnb-4bit" : ( + "unsloth/codegemma-2b-it", + "google/codegemma-2b-it", + ), + "unsloth/codegemma-7b-it-bnb-4bit" : ( + "unsloth/codegemma-7b-it", + "google/codegemma-7b-it", + ), } INT_TO_FLOAT_MAPPER = {} From d95daa06926e66b91447d464336e59bbf5b632b7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 10 Apr 2024 00:44:58 +1000 Subject: [PATCH 0181/1088] Fix downcasting LoRA (#318) * Update save.py * Update chat_templates.py * Update llama.py * model_name * Update loader.py * Tokenizer overwritten * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update save.py * Update save.py * checkpoint * Gemma 1.1 * more models * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * dtype * Update llama.py * CodeGemma * Fix downcasting --- unsloth/models/_utils.py | 21 ++++++++++++++++----- unsloth/models/llama.py | 2 +- unsloth/models/mapper.py | 4 ---- unsloth/models/mistral.py | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 88a9e9661e..77509cd8da 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -95,11 +95,22 @@ def prepare_model_for_kbit_training( """ # Freeze all parameters except LoRA - for name, param in model.named_parameters(): - if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: - param.requires_grad_(True) - else: - param.requires_grad_(False) + import re + with torch.inference_mode(): + for name, param in model.named_parameters(): + if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: + param.requires_grad_(True) + # Also must be in float32! + if param.dtype != torch.float32: + name = name.replace("base_model", "model", 1) + layer_number = re.search(r"\.[\d]{1,}\.", name).group(0) + name = name.replace(layer_number, f"[{layer_number[1:-1]}].") + name = name.replace(".weight", "", 1) + exec(f"{name}.to(torch.float32)") + pass + else: + param.requires_grad_(False) + pass pass # Gradient checkpointing! diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0b8092eaac..202c692c4e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1030,7 +1030,7 @@ def from_pretrained( f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() - get_statistics() + # get_statistics() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index f24108b75a..b1d2faedb3 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -122,10 +122,6 @@ "unsloth/codegemma-7b", "google/codegemma-7b", ), - "unsloth/codegemma-2b-it-bnb-4bit" : ( - "unsloth/codegemma-2b-it", - "google/codegemma-2b-it", - ), "unsloth/codegemma-7b-it-bnb-4bit" : ( "unsloth/codegemma-7b-it", "google/codegemma-7b-it", diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 87f5c85ad1..3034b83d2a 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -319,7 +319,7 @@ def from_pretrained( f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() - get_statistics() + # get_statistics() if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 From 35dbef803b0b1e539e14c78f9f3276793fff5593 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 10 Apr 2024 00:51:06 +1000 Subject: [PATCH 0182/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 77509cd8da..69e570dc9a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -96,7 +96,7 @@ def prepare_model_for_kbit_training( # Freeze all parameters except LoRA import re - with torch.inference_mode(): + with torch.no_grad(): for name, param in model.named_parameters(): if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: param.requires_grad_(True) From 4606443b77f98a624896d4ca50710255d8436d86 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 11 Apr 2024 01:43:34 +1000 Subject: [PATCH 0183/1088] Readme Changes (#324) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index fc282604b1..b54c14fc67 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory! +### Finetune Mistral, Gemma, Llama 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -22,21 +22,28 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| -| **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 58% less | -| **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | -| **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 43% less | -| **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -| **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | -| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | -| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - +| **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | +| **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | +| **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 60% less | +| **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 82% less | +| **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 49% less | +| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 73% less | +| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | + +- Benchmarking compared to FA2 + Hugging Face combined. - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## 🦥 Unsloth.ai News -- 📣 [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) on 6T tokens now works. And [Gemma 2b notebook](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) -- 📣 Added [conversational notebooks](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) and [raw text notebooks](https://colab.research.google.com/drive/1bMOKOBzxQWUIGZBs_B0zm8pimuEnZdfM?usp=sharing) +- 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: +```python +model = FastLanguageModel.get_peft_model( + model, + use_gradient_checkpointing = "unsloth", # <<<<<<< +) +``` +- 📣 NEW! [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) - 📣 [2x faster inference](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) added for all our models - 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO - 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face and are in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth) @@ -46,9 +53,9 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Type | Links | | ------------------------------- | --------------------------------------- | | 📚 **Wiki & FAQ** | [Read Our Wiki](https://github.com/unslothai/unsloth/wiki) | +|   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| | 📜 **Documentation** | [Read The Doc](https://github.com/unslothai/unsloth/tree/main#-documentation) | | 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| -|   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) | 🌐 **Released Models** | [Unsloth Releases](https://huggingface.co/unsloth)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| @@ -413,7 +420,8 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
-### Credits -1. [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support -2. [152334H](https://github.com/152334H) for experimental DPO support -3. [atgctg](https://github.com/atgctg) for syntax highlighting +### Thank You to +- [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) +- [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support +- [152334H](https://github.com/152334H) for experimental DPO support +- [atgctg](https://github.com/atgctg) for syntax highlighting From d215fd902cf28feb8abcfde2d25281d0fbf9d28c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 15 Apr 2024 04:18:01 +1000 Subject: [PATCH 0184/1088] Tokenizers fix (#336) * Update llama.py * Update llama.py * Update llama.py * Update save.py * Accuracy * Revert * Update save.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update fast_lora.py * Update chat_templates.py * Update save.py * Update save.py * Update llama.py * Update llama.py * Account for DoRA * Update llama.py * Update save.py * GGUF incorrect * Update save.py * Update pyproject.toml * kaggle new * Update pyproject.toml * Update pyproject.toml * upcasting * Fix Colab * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update rope_embedding.py * Update rope_embedding.py * Fix bugs * Update fast_lora.py * Update fast_lora.py * Update README.md * Update README.md * GGUF * Update save.py * Update save.py * Update save.py * Update save.py * Update README.md * Update README.md * Bugs * Update fast_lora.py * Update pyproject.toml * Update fast_lora.py * Update __init__.py * Update fast_lora.py * dtype * Update llama.py * Update llama.py * Update llama.py * dtype * Update mistral.py * trust_remote_code * lm_head * Update llama.py * save_pretrained_settings * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * state_dict * Update save.py * whoami * Update llama.py * Update save.py * Update llama.py * Patch tokenizer * Update chat_templates.py * Heal tokenizers * Update chat_templates.py * Update mapper.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * tokenizer patching * patch_tokenizer * Update chat_templates.py * Update tokenizer_utils.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update tokenizer_utils.py * Edit * Update mistral.py * Update mistral.py * Stats * Update mistral.py * attention_mask * Update llama.py * Update llama.py * batch * Temp fix batch inference * Update llama.py * Update gemma.py * Fix inference * swiglu * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mistral.py * Update llama.py * fast inference * model * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update utils.py * inference * Update llama.py * Update llama.py * Update llama.py * overhead * Update llama.py * Update llama.py * compile * Update gemma.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * lora mamtul * Update llama.py * Update llama.py * Update llama.py * offloaded checkpointing * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update gemma.py * Revert "Update gemma.py" This reverts commit c68b59bbfd276d108098100901aaf6d123c52102. * Update _utils.py * Update _utils.py * Update _utils.py * Saving * sentencepiece_model_pb2 * Update llama.py * Update save.py * Update llama.py * padding side * Update tokenizer_utils.py * cache dir * Update tokenizer_utils.py * Update tokenizer_utils.py * Update pyproject.toml * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update save.py * Update save.py * checkpoint * Gemma 1.1 * more models * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * dtype * Update llama.py * CodeGemma * Fix downcasting * Some bugs * Fix Yi tokenizer * HF_TOKEN * Update llama.py * Update tokenizer_utils.py --- unsloth/chat_templates.py | 28 +++++++++-- unsloth/models/llama.py | 14 ++++-- unsloth/models/loader.py | 6 +++ unsloth/models/mistral.py | 7 +++ unsloth/save.py | 4 +- unsloth/tokenizer_utils.py | 100 ++++++++++++++++++++++++------------- 6 files changed, 113 insertions(+), 46 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index ec62c9b858..6a0be38620 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -293,7 +293,7 @@ def get_chat_template( # Check fast tokenizer if not is_fast_tokenizer: - logger.warning_once( + print( f"Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ "Please log a Github issue if you want this as a new feature!\n"\ "Your chat template will still work, but it won't add or edit tokens." @@ -348,11 +348,31 @@ def get_chat_template( # But training the lm_head and embeddings are slow! # This is a HACK! # Idea from https://huggingface.co/cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser + + old_bos_token = getattr(tokenizer, "bos_token", None) + old_eos_token = getattr(tokenizer, "eos_token", None) + old_pad_token = getattr(tokenizer, "pad_token", None) + old_unk_token = getattr(tokenizer, "unk_token", None) + string_vocab = tokenizer._tokenizer.to_str() - old_eos_token = tokenizer.eos_token - string_vocab = string_vocab.replace(old_eos_token, stop_word) + # First check if new stop_word is in the tokenizer + if stop_word in string_vocab: + # We shall swap them around + temporary_stop_token = "<|:__TEMP//STOP//TOKEN__:|>" + string_vocab = string_vocab.replace(old_eos_token, temporary_stop_token) + string_vocab = string_vocab.replace(stop_word, old_eos_token) + string_vocab = string_vocab.replace(temporary_stop_token, stop_word) + else: + string_vocab = string_vocab.replace(old_eos_token, stop_word) + pass new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) - new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + new_tokenizer = tokenizer.__class__( + tokenizer_object = new_tokenizer, + bos_token = old_bos_token, + eos_token = stop_word, + unk_token = old_unk_token, + pad_token = old_pad_token, + ) # Must fix the sentence piece tokenizer since there's no tokenizer.model file! token_mapping = { old_eos_token : stop_word, } diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 202c692c4e..6f70bc5105 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1017,6 +1017,12 @@ def from_pretrained( trust_remote_code = False, **kwargs, ): + if token is None and "HF_TOKEN" in os.environ: + token = os.environ["HF_TOKEN"] + + if token is None and "HUGGINGFACE_TOKEN" in os.environ: + token = os.environ["HUGGINGFACE_TOKEN"] + if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) @@ -1445,8 +1451,8 @@ def get_peft_model( for module in target_modules: if module == "lm_head": logger.warning_once( - "Unsloth: `lm_head` should be placed in `modules_to_save` and not `target_modules`."\ - "We shall do it for you!" + "Unsloth: `lm_head` should be placed in `modules_to_save` and not `target_modules`. "\ + "Luckily, we shall do it for you!" ) train_lm_head = True if modules_to_save is None: modules_to_save = ["lm_head"] @@ -1454,8 +1460,8 @@ def get_peft_model( elif module == "embed_tokens": logger.warning_once( - "Unsloth: `embed_tokens` should be placed in `modules_to_save` and not `target_modules`."\ - "We shall do it for you!" + "Unsloth: `embed_tokens` should be placed in `modules_to_save` and not `target_modules`. "\ + "Luckily, we shall do it for you!" ) train_embed_tokens = True if modules_to_save is None: modules_to_save = ["embed_tokens"] diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 29d25f3c20..fa864a9a80 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -78,6 +78,12 @@ def from_pretrained( use_gradient_checkpointing = True, *args, **kwargs, ): + if token is None and "HF_TOKEN" in os.environ: + token = os.environ["HF_TOKEN"] + + if token is None and "HUGGINGFACE_TOKEN" in os.environ: + token = os.environ["HUGGINGFACE_TOKEN"] + old_model_name = model_name model_name = _get_model_name(model_name, load_in_4bit) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 3034b83d2a..56108939b9 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -13,6 +13,7 @@ # limitations under the License. from .llama import * +import os from ._utils import __version__ from transformers.models.mistral.modeling_mistral import ( @@ -301,6 +302,12 @@ def from_pretrained( trust_remote_code = False, **kwargs, ): + if token is None and "HF_TOKEN" in os.environ: + token = os.environ["HF_TOKEN"] + + if token is None and "HUGGINGFACE_TOKEN" in os.environ: + token = os.environ["HUGGINGFACE_TOKEN"] + if model_patcher is None: model_patcher = FastMistralModel # Mistral does NOT support RoPE Scaling! if rope_scaling is not None: diff --git a/unsloth/save.py b/unsloth/save.py index d0010321b1..655d1c5107 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -327,7 +327,7 @@ def unsloth_save_model( if hasattr(model, "config"): print(f"Saved {save_method} model to https://huggingface.co/" + save_directory) pass - return save_directory + return save_directory, None pass # Tokenizer has different saving arguments @@ -402,7 +402,7 @@ def unsloth_save_model( pass print(" Done.") - return save_directory + return save_directory, None pass # If push_to_hub, we must remove the .../ part of a repo diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 2640f321d9..132591276a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -48,7 +48,7 @@ def try_fix_tokenizer(tokenizer, prepend = True): tokenizer_string = converted_tokenizer.to_str() - # Llama does ▁apple. Sometimes this is wrong!! + # Llama does _apple. Sometimes this is wrong!! prepend_text = '{"type":"Prepend","prepend":"▁"},' if not prepend and prepend_text in tokenizer_string: tokenizer_string = tokenizer_string.replace(prepend_text, "", 1) @@ -269,15 +269,26 @@ def load_correct_tokenizer( cache_dir = None pass - slow_tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - trust_remote_code = trust_remote_code, - use_fast = False, - cache_dir = cache_dir, - ) + # Try loading the slow tokenizer. If it fails, then try Fast only + # Mainly to solve Deepseek models with no tokenizer.model file + slow_tokenizer = None + try: + slow_tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + trust_remote_code = trust_remote_code, + use_fast = False, + cache_dir = cache_dir, + ) + except: + print( + f"Unsloth: {tokenizer_name} has no tokenizer.model file.\n"\ + "Just informing you about this - this is not a critical error." + ) + pass + fast_tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, model_max_length = model_max_length, @@ -286,14 +297,19 @@ def load_correct_tokenizer( trust_remote_code = trust_remote_code, cache_dir = cache_dir, ) - fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token - fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token - - # Confirm if slow and fast are equivalent! - if assert_same_tokenization(slow_tokenizer, fast_tokenizer): - return fast_tokenizer + + if slow_tokenizer is not None: + fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token + fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token + + # Confirm if slow and fast are equivalent! + if assert_same_tokenization(slow_tokenizer, fast_tokenizer): + return fast_tokenizer + else: + return convert_to_fast_tokenizer(slow_tokenizer) + pass else: - return convert_to_fast_tokenizer(slow_tokenizer) + return fast_tokenizer pass pass @@ -408,25 +424,37 @@ def check_tokenizer( cache_dir = None pass - # Try slow tokenizer which can fix things! - tokenizer = AutoTokenizer.from_pretrained( - model_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - use_fast = False, - cache_dir = cache_dir, - ) - return check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - _reload = False, - ) - break + # Sometimes slow tokenizer does not work like Deepseek + try: + # Try slow tokenizer which can fix things! + tokenizer = AutoTokenizer.from_pretrained( + model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + use_fast = False, + cache_dir = cache_dir, + ) + return check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + _reload = False, + ) + break + except: + # Tokenizer has out of bounds issues and we can't + # load the slow tokenizer version :( + logger.warning_once( + "Unsloth: Tokenizer is most likely buggy, and Unsloth failed to repair it.\n"\ + "It will still work, but beware of out of bounds memory accesses.\n"\ + "Please file an issue on the model owner's repo about this issue." + ) + return tokenizer + pass pass pass return convert_to_fast_tokenizer(tokenizer) From dadd4abcbd5d9ed35457e5fae20636a191d5f352 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 02:55:20 +1000 Subject: [PATCH 0185/1088] Llama-3 --- unsloth/models/mapper.py | 8 ++++++++ unsloth/tokenizer_utils.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b1d2faedb3..b6aab83069 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -126,6 +126,14 @@ "unsloth/codegemma-7b-it", "google/codegemma-7b-it", ), + "unsloth/llama-3-8b-bnb-4bit" : ( + "unsloth/llama-3-8b", + "meta-llama/Meta-Llama-3-8B", + ), + "unsloth/llama-3-8b-Instruct-bnb-4bit" : ( + "unsloth/llama-3-8b-Instruct", + "meta-llama/Meta-Llama-3-8B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 132591276a..01ad2bff7f 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -187,7 +187,7 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens)) try: string = "\n".join(all_special_tokens) + \ - "A quick brown fox jumps over the lazy dog!!\n\n" + \ + "A quick brown fox jumps over the lazy dog!!\n\nHi
\n\n" + \ "".join(all_special_tokens) return slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids except: From 82a4116febf549b68e53b24bf82ea80e2cd55319 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 03:03:05 +1000 Subject: [PATCH 0186/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 01ad2bff7f..edd9717557 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -299,8 +299,10 @@ def load_correct_tokenizer( ) if slow_tokenizer is not None: - fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token - fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token + if hasattr(fast_tokenizer "add_bos_token") and hasattr(slow_tokenizer "add_bos_token"): + fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token + if hasattr(fast_tokenizer "add_eos_token") and hasattr(slow_tokenizer "add_eos_token"): + fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token # Confirm if slow and fast are equivalent! if assert_same_tokenization(slow_tokenizer, fast_tokenizer): From aed494a1ca152ae71ee460ad9c2b4d391239e23d Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 03:05:54 +1000 Subject: [PATCH 0187/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index edd9717557..fa536ef29a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -299,9 +299,9 @@ def load_correct_tokenizer( ) if slow_tokenizer is not None: - if hasattr(fast_tokenizer "add_bos_token") and hasattr(slow_tokenizer "add_bos_token"): + if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token - if hasattr(fast_tokenizer "add_eos_token") and hasattr(slow_tokenizer "add_eos_token"): + if hasattr(fast_tokenizer, "add_eos_token") and hasattr(slow_tokenizer, "add_eos_token"): fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token # Confirm if slow and fast are equivalent! From 4a695a73f5cfc1955276c3d5b04fffce8325dc22 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 03:33:12 +1000 Subject: [PATCH 0188/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 69e570dc9a..8864ebab4b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -148,7 +148,7 @@ def patch_tokenizer(model, tokenizer): model.config.update({"unsloth_version" : __version__}) if not hasattr(tokenizer, "pad_token") or tokenizer.pad_token is None: # Fixes https://github.com/unslothai/unsloth/issues/5 - if hasattr(tokenizer, "unk_token"): + if hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None: tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) tokenizer.pad_token = tokenizer.unk_token else: From 912ddd99b214ad76eaf3dd81769ebc4e45d6816d Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 03:36:45 +1000 Subject: [PATCH 0189/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8864ebab4b..32da0a7345 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -153,7 +153,7 @@ def patch_tokenizer(model, tokenizer): tokenizer.pad_token = tokenizer.unk_token else: name = model.config._name_or_path if model is not None else "Model" - logger.warning_one( + logger.warning_once( f"{name} does not have a padding or unknown token!\n"\ f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." ) From dc8a5f51ee3e1c19fc9c14acdd8068bf7751befd Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 19 Apr 2024 05:37:53 +1000 Subject: [PATCH 0190/1088] Update mapper.py --- unsloth/models/mapper.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b6aab83069..bad43190b7 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -134,6 +134,9 @@ "unsloth/llama-3-8b-Instruct", "meta-llama/Meta-Llama-3-8B-Instruct", ), + "unsloth/llama-3-70b-bnb-4bit" : ( + "meta-llama/Meta-Llama-3-70B", + ), } INT_TO_FLOAT_MAPPER = {} From 9c7bbecb50b09c3471fd17e9cfc0ad6e8d103b1f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Apr 2024 05:47:04 +1000 Subject: [PATCH 0191/1088] Update README.md (#351) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b54c14fc67..d6b0fa1ed8 100644 --- a/README.md +++ b/README.md @@ -22,9 +22,9 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| +| **Llama-3 8b** | [▶️ Start on Colab](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | | **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | -| **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 60% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 82% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 49% less | | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 73% less | @@ -36,6 +36,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works! Llama-3 70b also works (just change the model name in the notebook). - 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: ```python model = FastLanguageModel.get_peft_model( @@ -43,11 +44,10 @@ model = FastLanguageModel.get_peft_model( use_gradient_checkpointing = "unsloth", # <<<<<<< ) ``` -- 📣 NEW! [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) +- 📣 [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) - 📣 [2x faster inference](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) added for all our models - 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO - 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face and are in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth) -- 📣 [Download models 4x faster](https://huggingface.co/collections/unsloth/) from 🤗Hugging Face. Eg: `unsloth/mistral-7b-bnb-4bit` ## 🔗 Links and Resources | Type | Links | From 88eee5026716a81e8f42a20fa83cae24eb35e45e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Apr 2024 05:50:19 +1000 Subject: [PATCH 0192/1088] Update README.md (#352) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d6b0fa1ed8..b771f48867 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ unsloth logo - + From bd88b011ada9d00847ef6bf4f6700f2b5f6d4203 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Apr 2024 04:59:19 +1000 Subject: [PATCH 0193/1088] Fix prompt (#357) --- unsloth/chat_templates.py | 22 ++++++++++++++++++++++ unsloth/models/mapper.py | 3 +++ unsloth/tokenizer_utils.py | 5 +++++ 3 files changed, 30 insertions(+) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 6a0be38620..56749d6c26 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -255,6 +255,20 @@ CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token,) +# Llama-3 +# Weirdly \n\n is needed? +llama3_template = \ + "{{ bos_token }}"\ + "{% for message in messages %}"\ + "{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"\ + "{% endif %}" +llama3_template_eos_token = "eos_token" +CHAT_TEMPLATES["llama-3"] = (llama3_template, gemma_chatml_eos_token,) + + def get_chat_template( tokenizer, chat_template = "chatml", @@ -540,4 +554,12 @@ def test_chat_templates(): correct_tokenizer.chat_template = gemma_template our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) assert(our_prompt == correct_prompt) + + # Llama-3 + template = llama3_template + correct_tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-3-8b-Instruct") + correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index bad43190b7..769cbff536 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -137,6 +137,9 @@ "unsloth/llama-3-70b-bnb-4bit" : ( "meta-llama/Meta-Llama-3-70B", ), + "unsloth/llama-3-70b-Instruct-bnb-4bit" : ( + "meta-llama/Meta-Llama-3-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index fa536ef29a..76d9372e21 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -215,6 +215,11 @@ def fix_sentencepiece_tokenizer( os.makedirs(temporary_location) pass + # Check if tokenizer.model exists + if not os.path.isfile(f"{temporary_location}/tokenizer.model"): + return new_tokenizer + pass + # First save the old tokenizer old_tokenizer.save_pretrained(temporary_location) From ce523ed15cf56a6cd05fa4646153e7688ec44c8a Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sat, 20 Apr 2024 14:22:52 +1000 Subject: [PATCH 0194/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b771f48867..555e08089e 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## 🦥 Unsloth.ai News -- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works! Llama-3 70b also works (just change the model name in the notebook). +- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (just change the model name in the notebook). - 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: ```python model = FastLanguageModel.get_peft_model( From ec19e61c854dcf9104386fa63fc6c4f2944d4f35 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 22 Apr 2024 05:12:11 +1000 Subject: [PATCH 0195/1088] Fix Llama-3 (#366) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py --- unsloth/chat_templates.py | 38 ++++++++--- unsloth/models/_utils.py | 1 + unsloth/models/llama.py | 34 ++++++++++ unsloth/save.py | 9 ++- unsloth/tokenizer_utils.py | 131 ++++++++++++++++++++++++++++++++++++- 5 files changed, 203 insertions(+), 10 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 56749d6c26..d31b6cf7a4 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -23,10 +23,7 @@ from .save import patch_saving_functions import os import shutil -from .tokenizer_utils import ( - load_correct_tokenizer, - fix_sentencepiece_tokenizer, -) +from .tokenizer_utils import * from .models._utils import patch_tokenizer CHAT_TEMPLATES = {} @@ -266,7 +263,7 @@ "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"\ "{% endif %}" llama3_template_eos_token = "eos_token" -CHAT_TEMPLATES["llama-3"] = (llama3_template, gemma_chatml_eos_token,) +CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token,) def get_chat_template( @@ -288,6 +285,8 @@ def get_chat_template( is_fast_tokenizer = getattr(tokenizer, "is_fast", False) old_padding_side = tokenizer.padding_side + same_padding_token = False + if type(chat_template) in (list, tuple,): chat_template, stop_word = chat_template assert(type(chat_template) is str) @@ -342,10 +341,24 @@ def get_chat_template( if skipped != len(token_mapping): new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) + # Careful on pad_token + old_pad_token = tokenizer.pad_token + if old_pad_token == tokenizer.eos_token: + old_pad_token = stop_word + same_padding_token = True + pass + if map_eos_token: - new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer, eos_token = stop_word) + new_tokenizer = tokenizer.__class__( + tokenizer_object = new_tokenizer, + eos_token = stop_word, + pad_token = old_pad_token, + ) else: - new_tokenizer = tokenizer.__class__(tokenizer_object = new_tokenizer) + new_tokenizer = tokenizer.__class__( + tokenizer_object = new_tokenizer, + pad_token = old_pad_token, + ) pass # Must fix the sentence piece tokenizer since there's no tokenizer.model file! @@ -380,6 +393,13 @@ def get_chat_template( string_vocab = string_vocab.replace(old_eos_token, stop_word) pass new_tokenizer = tokenizer._tokenizer.from_str(string_vocab) + + # Careful on pad_token + if old_pad_token == old_eos_token: + old_pad_token = stop_word + same_padding_token = True + pass + new_tokenizer = tokenizer.__class__( tokenizer_object = new_tokenizer, bos_token = old_bos_token, @@ -424,9 +444,11 @@ def get_chat_template( new_pad_token = getattr(tokenizer, "pad_token", None) new_bos_token = getattr(tokenizer, "bos_token", None) new_unk_token = getattr(tokenizer, "unk_token", None) - if old_pad_token != new_pad_token: tokenizer.pad_token = old_pad_token if old_bos_token != new_bos_token: tokenizer.bos_token = old_bos_token if old_unk_token != new_unk_token: tokenizer.unk_token = old_unk_token + if not same_padding_token: + if old_pad_token != new_pad_token: tokenizer.pad_token = old_pad_token + pass # stopping_criteria = create_stopping_criteria(tokenizer, stop_word) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 32da0a7345..9c4ae8fc68 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -349,3 +349,4 @@ def backward(ctx, dY): return (None, hidden_states.grad,) + (None,)*len(ctx.args) pass pass + diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6f70bc5105..45c75010b8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1445,6 +1445,10 @@ def get_peft_model( "gate_proj", "up_proj", "down_proj",),) model.config.update({"unsloth_version" : __version__}) + if type(modules_to_save) is tuple: + modules_to_save = list(modules_to_save) + pass + train_lm_head = False train_embed_tokens = False final_modules = [] @@ -1472,6 +1476,29 @@ def get_peft_model( final_modules.append(module) pass + # Check if we added new tokens! + if hasattr(model, "_need_to_train_embeddings"): + if not train_lm_head or not train_embed_tokens: + print( + "Unsloth: You added new tokens but did not specify if you wanted to "\ + "train the lm_head and embed_tokens.\nWe must turn it on for you." + ) + train_lm_head = True + train_embed_tokens = True + + if modules_to_save is None: modules_to_save = ["embed_tokens"] + else: modules_to_save.append("embed_tokens") + + if modules_to_save is None: modules_to_save = ["lm_head"] + else: modules_to_save.append("lm_head") + pass + pass + + # First fix untrained tokens + if train_embed_tokens or train_lm_head: + fix_untrained_tokens(model, eps = 1e-16) + pass + # Check modules_to_save if modules_to_save is not None: for module in modules_to_save: @@ -1479,8 +1506,15 @@ def get_peft_model( train_lm_head = True elif module == "embed_tokens": train_embed_tokens = True + else: + raise TypeError( + f"Unsloth: Module = {module} is not allowed. Only 'lm_head' and 'embed_tokens' is allowed." + ) pass pass + if isinstance(modules_to_save, (tuple, list)): + modules_to_save = list(set(modules_to_save)) + pass # Get LoRA arguments = dict( diff --git a/unsloth/save.py b/unsloth/save.py index 655d1c5107..6e9d82c88a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -922,9 +922,16 @@ def save_to_gguf( f"The output location will be {final_location}\n"\ "This will take 3 minutes...") + # We first check if tokenizer.model exists in the model_directory + if os.path.exists(f"{model_directory}/tokenizer.model"): + vocab_type = "hfft" + else: + vocab_type = "bpe" + pass + if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ - f"--outfile {final_location} --vocab-type hfft "\ + f"--outfile {final_location} --vocab-type {vocab_type} "\ f"--outtype {first_conversion} --concurrency {n_cpus}" else: # Need to fix convert-hf-to-gguf.py for some models! diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 76d9372e21..f1a9daa998 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -18,11 +18,15 @@ import re import os from transformers.models.llama.modeling_llama import logger +from peft import PeftModelForCausalLM +import torch __all__ = [ "load_correct_tokenizer", "fix_sentencepiece_tokenizer", "check_tokenizer", + "fix_untrained_tokens", + "add_new_tokens", ] @@ -255,7 +259,11 @@ def fix_sentencepiece_tokenizer( # And load it! from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(temporary_location, eos_token = new_tokenizer.eos_token) + tokenizer = AutoTokenizer.from_pretrained( + temporary_location, + eos_token = new_tokenizer.eos_token, + pad_token = new_tokenizer.pad_token, + ) return tokenizer pass @@ -466,3 +474,124 @@ def check_tokenizer( pass return convert_to_fast_tokenizer(tokenizer) pass + + +@torch.inference_mode +def fix_untrained_tokens(model, eps = 1e-16): + """ + Llama-3 for eg has untrained vectors in the base model. + These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> + We reset them to the mean of the rest of the tokens + """ + embedding_matrix = model.get_input_embeddings ().weight.data + lm_head_matrix = model.get_output_embeddings().weight.data + + # Get untrained tokens + indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps + where_untrained = torch.where(indicator_untrained)[0] + n_untrained = where_untrained.shape[0] + n_trained = embedding_matrix.shape[0] - n_untrained + if n_untrained != 0: + print( + f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ + "We shall set them to the mean of the other trained tokens." + ) + pass + + # First set untrained to all 0s - sometimes it's not! 1e-23 for bfloat16 + embedding_matrix[where_untrained] = 0 + lm_head_matrix [where_untrained] = 0 + + # Find sum + sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) + sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) + + # Find correct average by dividing by sum of trained tokens + mean_embedding = (sum_embedding / n_trained).to(embedding_matrix.dtype) + mean_lm_head = (sum_lm_head / n_trained).to(lm_head_matrix .dtype) + + # Set them to the mean + embedding_matrix[where_untrained] = mean_embedding + lm_head_matrix [where_untrained] = mean_lm_head + + return mean_embedding, mean_lm_head +pass + + +@torch.inference_mode +def add_new_tokens( + model, + tokenizer, + new_tokens = [], + method = "mean", + interpolation = 0.05, +): + """ + Smartly resizes the tokenizer and adds new tokens to the model. + We also disregard untrained tokens by removing them from the mean calculation. + """ + assert(isinstance(new_tokens, (list, tuple))) + assert(len(new_tokens) > 0) + assert(method == "mean" or method == "interpolation") + assert(interpolation >= 0 and interpolation <= 1) + + # Check if tokens already exist + overlapping_tokens = set(new_tokens) & set(tokenizer.vocab.keys()) + if len(overlapping_tokens) != 0: + print( + f"Unsloth: You're adding new_tokens = {new_tokens}\n"\ + f"There are tokens which are overlapping = {list(overlapping_tokens)}\n"\ + f"We shall safely ignore these overlapping tokens." + ) + new_tokens = [x for x in new_tokens if x not in overlapping_tokens] + pass + + # Get mean of trained tokens + mean_embedding, mean_lm_head = fix_untrained_tokens(model) + mean_embedding = mean_embedding.to(torch.float32) + mean_lm_head = mean_lm_head .to(torch.float32) + + # Add tokens! + old_length = len(tokenizer) + tokenizer.add_tokens(new_tokens) + model.resize_token_embeddings(len(tokenizer)) + + # If we use interpolation, we interpolate between the mean embeddings and + # the Word2Vec sum of the other vectors + embedding_matrix = model.get_input_embeddings ().weight.data + lm_head_matrix = model.get_output_embeddings().weight.data + + if method == "interpolation": + print( + "Unsloth: You are using interpolation to add new tokens.\n"\ + f"We shall set new tokens = mean(embeddings)*{1-interpolation} + mean(new_tokens)*{interpolation}" + ) + for j, token in enumerate(new_tokens): + input_ids = tokenizer(token, add_special_tokens = False).input_ids + mean_embedding_token = embedding_matrix[input_ids].mean(axis = 0, dtype = torch.float32) + mean_lm_head_token = lm_head_matrix [input_ids].mean(axis = 0, dtype = torch.float32) + + # Interpolate + mean_embedding_token = mean_embedding*(1-interpolation) + mean_embedding_token*interpolation + mean_lm_head_token = mean_lm_head *(1-interpolation) + mean_lm_head_token *interpolation + + # Set the new vector + embedding_matrix[old_length+j] = mean_embedding_token + lm_head_matrix [old_length+j] = mean_lm_head_token + pass + else: + # Now set the new tokens to the mean! + embedding_matrix[old_length:] = mean_embedding + lm_head_matrix [old_length:] = mean_lm_head + pass + + # We set a flag to say we need to train embeddings + internal_model = model + while hasattr(internal_model, "model"): + internal_model._need_to_train_embeddings = True + internal_model = internal_model.model + pass + internal_model._need_to_train_embeddings = True + + return +pass From 5039ea3c365662ebfdf5feb6e5524a4b193749ab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 29 Apr 2024 04:47:03 +1000 Subject: [PATCH 0196/1088] Nightly (#370) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 40 ++++++++++++++++-------------- unsloth/chat_templates.py | 11 ++++++++ unsloth/models/llama.py | 51 ++++++++++++++++++++++++++++++++++++-- unsloth/models/loader.py | 4 +++ unsloth/models/mistral.py | 9 +++++++ unsloth/save.py | 19 +++++--------- unsloth/tokenizer_utils.py | 2 +- 7 files changed, 101 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 555e08089e..6df6616225 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Mistral, Gemma, Llama 2-5x faster with 80% less memory! +### Finetune Llama 3, Mistral & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -22,12 +22,11 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| -| **Llama-3 8b** | [▶️ Start on Colab](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | -| **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | -| **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | -| **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 82% less | -| **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 49% less | -| **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 73% less | +| **Llama 3 (8B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | +| **Mistral (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | +| **Gemma (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | +| **Llama 3 (8B)** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook) | 5x faster\* | 73% less | +| **ORPO** | [▶️ Start on Colab](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | - Benchmarking compared to FA2 + Hugging Face combined. @@ -36,7 +35,8 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. ## 🦥 Unsloth.ai News -- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (just change the model name in the notebook). +- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). +- 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! - 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: ```python model = FastLanguageModel.get_peft_model( @@ -46,8 +46,6 @@ model = FastLanguageModel.get_peft_model( ``` - 📣 [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) - 📣 [2x faster inference](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) added for all our models -- 📣 [DPO support](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) is now included. [More info](#DPO) on DPO -- 📣 We did a [blog](https://huggingface.co/blog/unsloth-trl) with 🤗Hugging Face and are in their official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth) ## 🔗 Links and Resources | Type | Links | @@ -182,18 +180,20 @@ max_seq_length = 2048 # Supports RoPE Scaling interally, so choose any! url = "https://huggingface.co/datasets/laion/OIG/resolve/main/unified_chip2.jsonl" dataset = load_dataset("json", data_files = {"train" : url}, split = "train") -# 4bit pre quantized models we support - 4x faster downloading! +# 4bit pre quantized models we support for 4x faster downloading + no OOMs. fourbit_models = [ "unsloth/mistral-7b-bnb-4bit", + "unsloth/mistral-7b-instruct-v0.2-bnb-4bit", "unsloth/llama-2-7b-bnb-4bit", - "unsloth/llama-2-13b-bnb-4bit", - "unsloth/codellama-34b-bnb-4bit", - "unsloth/tinyllama-bnb-4bit", -] # Go to https://huggingface.co/unsloth for more 4-bit models! + "unsloth/gemma-7b-bnb-4bit", + "unsloth/gemma-7b-it-bnb-4bit", # Instruct version of Gemma 7b + "unsloth/gemma-2b-bnb-4bit", + "unsloth/gemma-2b-it-bnb-4bit", # Instruct version of Gemma 2b + "unsloth/llama-3-8b-bnb-4bit", # [NEW] 15 Trillion token Llama-3 +] # More models at https://huggingface.co/unsloth -# Load Llama model model, tokenizer = FastLanguageModel.from_pretrained( - model_name = "unsloth/mistral-7b-bnb-4bit", # Supports Llama, Mistral - replace this! + model_name = "unsloth/llama-3-8b-bnb-4bit", max_seq_length = max_seq_length, dtype = None, load_in_4bit = True, @@ -208,7 +208,8 @@ model = FastLanguageModel.get_peft_model( lora_alpha = 16, lora_dropout = 0, # Supports any, but = 0 is optimized bias = "none", # Supports any, but = "none" is optimized - use_gradient_checkpointing = True, + # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes! + use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context random_state = 3407, max_seq_length = max_seq_length, use_rslora = False, # We support rank stabilized LoRA @@ -272,7 +273,8 @@ model = FastLanguageModel.get_peft_model( lora_alpha = 64, lora_dropout = 0, # Supports any, but = 0 is optimized bias = "none", # Supports any, but = "none" is optimized - use_gradient_checkpointing = True, + # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes! + use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context random_state = 3407, max_seq_length = max_seq_length, ) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index d31b6cf7a4..4e7a71aee7 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -281,6 +281,17 @@ def get_chat_template( IS_GEMMA = True pass + # We add a check for Llama-3 + # if chat_template == "llama-3": + # tokenizer._using_llama3_template = True + # else: + # llama3_tokens = set(["<|end_header_id|>", "<|eot_id|>", "<|start_header_id|>"]) + # check_llama3_tokens = llama3_tokens & set(str(x) for x in tokenizer.added_tokens_decoder.values()) + # if len(check_llama3_tokens) == len(llama3_tokens): + # tokenizer._using_llama3_template = True + # pass + # pass + # We first check if the tokenizer is a fast one. If not, we cannot convert this! is_fast_tokenizer = getattr(tokenizer, "is_fast", False) old_padding_side = tokenizer.padding_side diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 45c75010b8..a7cacea9b9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1284,6 +1284,15 @@ def from_pretrained( # Add save modules patch_saving_functions(model) + # Save tokenizer for inference purposes + tokenizer.padding_side = "left" # Force inference + internal_model = model + while hasattr(internal_model, "model"): + internal_model._saved_temp_tokenizer = tokenizer + internal_model = internal_model.model + pass + internal_model._saved_temp_tokenizer = tokenizer + return model, tokenizer pass @@ -1534,8 +1543,12 @@ def get_peft_model( if not SUPPORTS_LOFTQ: del arguments["loftq_config"] if not SUPPORTS_RSLORA: del arguments["use_rslora"] + _saved_temp_tokenizer = model._saved_temp_tokenizer + lora_config = LoraConfig(**arguments) model = _get_peft_model(model, lora_config) + + model._saved_temp_tokenizer = _saved_temp_tokenizer model = FastLlamaModel.patch_peft_model(model, use_gradient_checkpointing) @@ -1554,6 +1567,18 @@ def get_peft_model( model.model.lm_head.modules_to_save.default.requires_grad_(True) pass + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + return model pass @@ -1751,6 +1776,18 @@ def for_inference(model): # Wrap model.generate model._unwrapped_old_generate = model.generate model.generate = _wrap_fast_inference(model.generate, device_type, dtype) + + # Patch tokenizer to pad to the left + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass pass @@ -1777,8 +1814,18 @@ def for_training(model, use_gradient_checkpointing = True): model.generate = model._unwrapped_old_generate del model._unwrapped_old_generate pass + + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass pass pass - - diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index fa864a9a80..a107200eae 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -76,6 +76,7 @@ def from_pretrained( fix_tokenizer = True, trust_remote_code = False, use_gradient_checkpointing = True, + resize_model_vocab = None, *args, **kwargs, ): if token is None and "HF_TOKEN" in os.environ: @@ -149,6 +150,9 @@ def from_pretrained( trust_remote_code = trust_remote_code, *args, **kwargs, ) + + if resize_model_vocab is not None: + model.resize_token_embeddings(resize_model_vocab) # In case the model supports tagging, add the unsloth tag. if hasattr(model, "add_model_tags"): diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 56108939b9..80d0ffdf7f 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -559,6 +559,15 @@ def from_pretrained( # Add save modules patch_saving_functions(model) + + # Save tokenizer for inference purposes + tokenizer.padding_side = "left" # Force inference + internal_model = model + while hasattr(internal_model, "model"): + internal_model._saved_temp_tokenizer = tokenizer + internal_model = internal_model.model + pass + internal_model._saved_temp_tokenizer = tokenizer return model, tokenizer pass diff --git a/unsloth/save.py b/unsloth/save.py index 6e9d82c88a..a2c55bb539 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -689,7 +689,7 @@ def unsloth_save_model( def install_llama_cpp_clone_non_blocking(): - full_command = ["git", "clone", "https://github.com/ggerganov/llama.cpp"] + full_command = ["git", "clone", "--recursive", "https://github.com/ggerganov/llama.cpp"] run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) return run_installer pass @@ -742,7 +742,7 @@ def install_llama_cpp_old(version = -10): # Clone a specific commit # Also don't use the GPU! commands = [ - "git clone https://github.com/ggerganov/llama.cpp", + "git clone --recursive https://github.com/ggerganov/llama.cpp", f"cd llama.cpp && git reset --hard {version} && git clean -df", "make clean -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", @@ -767,7 +767,7 @@ def install_llama_cpp_blocking(use_cuda = True): use_cuda = "LLAMA_CUDA=1" if use_cuda else "" commands = [ - "git clone https://github.com/ggerganov/llama.cpp", + "git clone --recursive https://github.com/ggerganov/llama.cpp", "make clean -C llama.cpp", f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", "pip install gguf protobuf", @@ -922,16 +922,9 @@ def save_to_gguf( f"The output location will be {final_location}\n"\ "This will take 3 minutes...") - # We first check if tokenizer.model exists in the model_directory - if os.path.exists(f"{model_directory}/tokenizer.model"): - vocab_type = "hfft" - else: - vocab_type = "bpe" - pass - if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ - f"--outfile {final_location} --vocab-type {vocab_type} "\ + f"--outfile {final_location} --vocab-type spm,hfft,bpe "\ f"--outtype {first_conversion} --concurrency {n_cpus}" else: # Need to fix convert-hf-to-gguf.py for some models! @@ -966,7 +959,7 @@ def save_to_gguf( "You might have to compile llama.cpp yourself, then run this again.\n"\ "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ - "git clone https://github.com/ggerganov/llama.cpp\n"\ + "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ "Once that's done, redo the quantization." ) @@ -1006,7 +999,7 @@ def save_to_gguf( "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ - "git clone https://github.com/ggerganov/llama.cpp\n"\ + "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ "Once that's done, redo the quantization." ) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index f1a9daa998..5dc5856c2a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -524,7 +524,7 @@ def add_new_tokens( tokenizer, new_tokens = [], method = "mean", - interpolation = 0.05, + interpolation = 0.5, ): """ Smartly resizes the tokenizer and adds new tokens to the model. From 327a15549d8b559cff225b6d8af2e8c0af1002c4 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Mon, 29 Apr 2024 17:55:04 +1000 Subject: [PATCH 0197/1088] Update save.py --- unsloth/save.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index a2c55bb539..0e131fe309 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -922,9 +922,16 @@ def save_to_gguf( f"The output location will be {final_location}\n"\ "This will take 3 minutes...") + # We first check if tokenizer.model exists in the model_directory + if os.path.exists(f"{model_directory}/tokenizer.model"): + vocab_type = "spm,hfft,bpe" + else: + vocab_type = "bpe" + pass + if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ - f"--outfile {final_location} --vocab-type spm,hfft,bpe "\ + f"--outfile {final_location} --vocab-type {vocab_type} "\ f"--outtype {first_conversion} --concurrency {n_cpus}" else: # Need to fix convert-hf-to-gguf.py for some models! From 81819c337d1c35c5ed9873637ace27b7d5b4e384 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Apr 2024 05:59:02 +1000 Subject: [PATCH 0198/1088] Phi-3 (#397) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 27 +++++++++++++++------------ unsloth/models/mapper.py | 4 ++++ 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 6df6616225..07bcf2cf5f 100644 --- a/README.md +++ b/README.md @@ -20,23 +20,25 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. -| Unsloth supports | Free Notebooks | Performance | Memory use | -|-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| -| **Llama 3 (8B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | -| **Mistral (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | -| **Gemma (7B)** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | -| **Llama 3 (8B)** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook) | 5x faster\* | 73% less | -| **ORPO** | [▶️ Start on Colab](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | -| **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | +| Unsloth supports | Free Notebooks | Performance | Memory use | +|-----------|---------|--------|----------| +| **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | +| **Mistral (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | +| **Gemma (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | +| **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | +| **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | +| **Phi-3 (3.8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1NvkBmkHfucGO3Ve9s1NKZvMNlw5p83ym?usp=sharing) | 2x faster | 50% less | +| **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | - Benchmarking compared to FA2 + Hugging Face combined. -- This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. -- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. -- \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster. +- **Kaggle Notebooks** for [Llama-3 8b](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7b](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7b](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for Llama-3. And ChatML for [Mistral 7b](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). +- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text. ## 🦥 Unsloth.ai News - 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). - 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! +- 📣 NEW! [Phi-3 3.8b support](https://colab.research.google.com/drive/1NvkBmkHfucGO3Ve9s1NKZvMNlw5p83ym?usp=sharing) is here! - 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: ```python model = FastLanguageModel.get_peft_model( @@ -45,7 +47,7 @@ model = FastLanguageModel.get_peft_model( ) ``` - 📣 [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) -- 📣 [2x faster inference](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) added for all our models +- 📣 [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models ## 🔗 Links and Resources | Type | Links | @@ -190,6 +192,7 @@ fourbit_models = [ "unsloth/gemma-2b-bnb-4bit", "unsloth/gemma-2b-it-bnb-4bit", # Instruct version of Gemma 2b "unsloth/llama-3-8b-bnb-4bit", # [NEW] 15 Trillion token Llama-3 + "unsloth/Phi-3-mini-4k-instruct-bnb-4bit", ] # More models at https://huggingface.co/unsloth model, tokenizer = FastLanguageModel.from_pretrained( diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 769cbff536..b4fbe57387 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -140,6 +140,10 @@ "unsloth/llama-3-70b-Instruct-bnb-4bit" : ( "meta-llama/Meta-Llama-3-70B-Instruct", ), + "unsloth/Phi-3-mini-4k-instruct-bnb-4bit" : ( + "unsloth/Phi-3-mini-4k-instruct", + "microsoft/Phi-3-mini-4k-instruct", + ), } INT_TO_FLOAT_MAPPER = {} From 72aee4a3a128268c3d1e5cef249626897996645c Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 30 Apr 2024 20:26:10 +1000 Subject: [PATCH 0199/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 07bcf2cf5f..c6e7d6c50a 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Benchmarking compared to FA2 + Hugging Face combined. - **Kaggle Notebooks** for [Llama-3 8b](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7b](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7b](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for Llama-3. And ChatML for [Mistral 7b](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). +- This [conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) is useful for Llama-3. And ChatML for [Mistral 7b](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text. ## 🦥 Unsloth.ai News From 53dbf76391da0aea35bc6b044b2fe85460d9e345 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 5 May 2024 05:45:01 +1000 Subject: [PATCH 0200/1088] Fix llama-3 (#423) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Fix reserved tokens * Update save.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/models/llama.py | 14 +++++-- unsloth/save.py | 37 ++++++++++++----- unsloth/tokenizer_utils.py | 84 +++++++++++++++++++++++++++++++++++++- 3 files changed, 119 insertions(+), 16 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a7cacea9b9..136ceb2c7b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1503,10 +1503,16 @@ def get_peft_model( pass pass + # Check for Llama-3 + # if hasattr(model._saved_temp_tokenizer, "_using_llama3_template"): + # if not train_embed_tokens and not train_lm_head: + # raise RuntimeError("") + # First fix untrained tokens - if train_embed_tokens or train_lm_head: - fix_untrained_tokens(model, eps = 1e-16) - pass + # Wrong - can cause reserved tokens to pop out!! + # if train_embed_tokens or train_lm_head: + # fix_untrained_tokens(model, eps = 1e-16) + # pass # Check modules_to_save if modules_to_save is not None: @@ -1547,7 +1553,7 @@ def get_peft_model( lora_config = LoraConfig(**arguments) model = _get_peft_model(model, lora_config) - + model._saved_temp_tokenizer = _saved_temp_tokenizer model = FastLlamaModel.patch_peft_model(model, use_gradient_checkpointing) diff --git a/unsloth/save.py b/unsloth/save.py index 0e131fe309..e50f0d34da 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -118,14 +118,14 @@ def _merge_lora(layer, name): W = fast_dequantize(W, quant_state) else: dtype = W.dtype - # W = W.to(torch.float32).t() - W = W.t() + W = W.to(torch.float32).t() + # W = W.t() if A is not None: # sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) # W += sAB - # W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) - W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s) + W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s) + # W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s) # if not torch.isfinite(W).all(): maximum_element = torch.max(W.min().abs(), W.max()) if not torch.isfinite(maximum_element).item(): @@ -696,12 +696,18 @@ def install_llama_cpp_clone_non_blocking(): def install_llama_cpp_make_non_blocking(): - env = { **os.environ, "LLAMA_CUDA": "1", } + # https://github.com/ggerganov/llama.cpp/issues/7062 + # Weirdly GPU conversion for GGUF breaks?? + # env = { **os.environ, "LLAMA_CUDA": "1", } n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean os.system("make clean -C llama.cpp") full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] - run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) + + # https://github.com/ggerganov/llama.cpp/issues/7062 + # Weirdly GPU conversion for GGUF breaks?? + # run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) + run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) return run_installer pass @@ -764,12 +770,17 @@ def install_llama_cpp_old(version = -10): def install_llama_cpp_blocking(use_cuda = True): - use_cuda = "LLAMA_CUDA=1" if use_cuda else "" + # https://github.com/ggerganov/llama.cpp/issues/7062 + # Weirdly GPU conversion for GGUF breaks?? + # use_cuda = "LLAMA_CUDA=1" if use_cuda else "" commands = [ "git clone --recursive https://github.com/ggerganov/llama.cpp", "make clean -C llama.cpp", - f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", + # https://github.com/ggerganov/llama.cpp/issues/7062 + # Weirdly GPU conversion for GGUF breaks?? + # f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", + f"make all -j{psutil.cpu_count()*2} -C llama.cpp", "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return @@ -833,6 +844,12 @@ def save_to_gguf( first_conversion : str = "f16", _run_installer = None, # Non blocking install of llama.cpp ): + logger.warning( + "WARNING: llama.cpp GGUF conversion is currently unstable, since llama.cpp is\n"\ + "undergoing some major bug fixes as at 5th of May 2024. This is not an Unsloth issue.\n"\ + "Please be patient - GGUF saving should still work, but might not work as well." + ) + from transformers.models.llama.modeling_llama import logger if quantization_method.startswith("iq2"): @@ -967,7 +984,7 @@ def save_to_gguf( "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ + "cd llama.cpp && make clean && make all -j\n"\ "Once that's done, redo the quantization." ) pass @@ -1007,7 +1024,7 @@ def save_to_gguf( "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ "You must run this in the same folder as you're saving your model.\n"\ "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j\n"\ + "cd llama.cpp && make clean && make all -j\n"\ "Once that's done, redo the quantization." ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 5dc5856c2a..0d6dadf7dd 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -25,7 +25,6 @@ "load_correct_tokenizer", "fix_sentencepiece_tokenizer", "check_tokenizer", - "fix_untrained_tokens", "add_new_tokens", ] @@ -518,6 +517,44 @@ def fix_untrained_tokens(model, eps = 1e-16): pass +@torch.inference_mode +def mean_of_trained_tokens(model, eps = 1e-16): + """ + Llama-3 for eg has untrained vectors in the base model. + These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> + We reset them to the mean of the rest of the tokens + """ + embedding_matrix = model.get_input_embeddings ().weight.data.clone() + lm_head_matrix = model.get_output_embeddings().weight.data.clone() + + # Get untrained tokens + indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps + where_untrained = torch.where(indicator_untrained)[0] + n_untrained = where_untrained.shape[0] + n_trained = embedding_matrix.shape[0] - n_untrained + if n_untrained != 0: + print( + f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ + "We shall set them to the mean of the other trained tokens." + ) + pass + + # First set untrained to all 0s - sometimes it's not! 1e-23 for bfloat16 + embedding_matrix[where_untrained] = 0 + lm_head_matrix [where_untrained] = 0 + + # Find sum + sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) + sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) + + # Find correct average by dividing by sum of trained tokens + mean_embedding = (sum_embedding / n_trained).to(embedding_matrix.dtype) + mean_lm_head = (sum_lm_head / n_trained).to(lm_head_matrix .dtype) + + return mean_embedding, mean_lm_head +pass + + @torch.inference_mode def add_new_tokens( model, @@ -547,7 +584,10 @@ def add_new_tokens( pass # Get mean of trained tokens - mean_embedding, mean_lm_head = fix_untrained_tokens(model) + # mean_embedding, mean_lm_head = fix_untrained_tokens(model) + + # Weirdly be careful reserved tokens can pop out + mean_embedding, mean_lm_head = mean_of_trained_tokens(model) mean_embedding = mean_embedding.to(torch.float32) mean_lm_head = mean_lm_head .to(torch.float32) @@ -595,3 +635,43 @@ def add_new_tokens( return pass + + +from inspect import getsource +import trl.trainer.sft_trainer +from trl.trainer.sft_trainer import * + +def fix_sft_trainer_tokenizer(): + """ + Fixes double adding BOS tokens like in llama-3 + """ + for function_name, replacer in ( + ("_prepare_non_packed_dataloader", "def tokenize(element):",), + # ("_prepare_packed_dataloader", "if dataset_text_field is not None",), + ): + function = getsource(eval(f"trl.trainer.sft_trainer.SFTTrainer.{function_name}")) + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + + check_text = \ + "\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])\n"\ + "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ + "chat_template = '' if chat_template is None else chat_template\n"\ + "has_bos_token_already = test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template\n"\ + "add_special_tokens = False if has_bos_token_already else add_special_tokens\n\n" + + check_text = check_text.split("\n") + check_text = "\n".join(" "*where + x for x in check_text) + + function = function.replace(replacer, check_text + replacer) + exec(function, globals()) + + # Replace TRL's SFTTrainer + exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) + pass +pass + +# Fixes double adding BOS tokens like in llama-3 +fix_sft_trainer_tokenizer() From a93a885c286934c9c7467324054ca3f9d526a2bd Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 5 May 2024 13:28:21 +1000 Subject: [PATCH 0201/1088] Update save.py --- unsloth/save.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index e50f0d34da..868d25de4b 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -25,6 +25,7 @@ import subprocess import psutil import re +from transformers.models.llama.modeling_llama import logger __all__ = [ "print_quantization_methods", @@ -845,13 +846,11 @@ def save_to_gguf( _run_installer = None, # Non blocking install of llama.cpp ): logger.warning( - "WARNING: llama.cpp GGUF conversion is currently unstable, since llama.cpp is\n"\ + "NOTICE: llama.cpp GGUF conversion is currently unstable, since llama.cpp is\n"\ "undergoing some major bug fixes as at 5th of May 2024. This is not an Unsloth issue.\n"\ "Please be patient - GGUF saving should still work, but might not work as well." ) - from transformers.models.llama.modeling_llama import logger - if quantization_method.startswith("iq2"): raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") From 4211cc01409e3ced4f7abebaf68e244193b46e2c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 May 2024 07:40:41 +1000 Subject: [PATCH 0202/1088] llama-3 bug fixes (#429) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Fix reserved tokens * Update save.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * Update save.py * Update _utils.py * Update chat_templates.py --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/chat_templates.py | 22 ++++++++++++++ unsloth/models/_utils.py | 64 ++++++++++++++++++++++++++++++--------- unsloth/save.py | 35 ++++++++++++++++++--- 3 files changed, 103 insertions(+), 18 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 4e7a71aee7..07999ea0dc 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -266,6 +266,20 @@ CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token,) +# Phi-3 +phi3_template = \ + "{{ bos_token }}"\ + "{% for message in messages %}"\ + "{% if (message['role'] == 'user') %}"\ + "{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}"\ + "{% elif (message['role'] == 'assistant') %}"\ + "{{message['content'] + '<|end|>' + '\n'}}"\ + "{% endif %}"\ + "{% endfor %}" +phi3_template_eos_token = "<|end|>" +CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token,) + + def get_chat_template( tokenizer, chat_template = "chatml", @@ -595,4 +609,12 @@ def test_chat_templates(): correct_tokenizer.chat_template = template our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) + + # Phi-3 + template = phi3_template + correct_tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") + correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_tokenizer.chat_template = template + our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + assert(correct_prompt == our_prompt) pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 9c4ae8fc68..49f054e432 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -144,24 +144,60 @@ def make_inputs_require_grad(module, input, output): def patch_tokenizer(model, tokenizer): + """ + Phi3's pad_token isn't set. We set it to <|placeholder... + Llama-3 is <|reserved... + Llama-2 is + Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! + Fixes https://github.com/unslothai/unsloth/issues/5 + """ + possible_reserved_tokens = ("<|reserved", "<|placeholder",) + if model is not None: model.config.update({"unsloth_version" : __version__}) - if not hasattr(tokenizer, "pad_token") or tokenizer.pad_token is None: - # Fixes https://github.com/unslothai/unsloth/issues/5 - if hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None: - tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) - tokenizer.pad_token = tokenizer.unk_token - else: - name = model.config._name_or_path if model is not None else "Model" - logger.warning_once( - f"{name} does not have a padding or unknown token!\n"\ - f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." + + bad_pad_token = False + if hasattr(tokenizer, "pad_token") and tokenizer.pad_token is not None: + # Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! + bad_pad_token = tokenizer.eos_token == tokenizer.pad_token + elif hasattr(tokenizer, "pad_token") and tokenizer.pad_token is None: + bad_pad_token = True + else: + bad_pad_token = False + pass + + if bad_pad_token: + # Find a better pad token + added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] + possible_pad_token = None + for added_token in added_tokens[::-1]: + if added_token.startswith(possible_reserved_tokens): + possible_pad_token = added_token + break + pass + pass + if possible_pad_token is None: + # Try unk_token + possible_pad_token = tokenizer.unk_token + pass + if possible_pad_token is None: + # Failure!! + raise RuntimeError( + "Unsloth: Tokenizer's pad_token cannot be = eos_token, and we couldn't find a\n"\ + "replacement of either <|reserved... or <|placeholder..." ) - assert(hasattr(tokenizer, "eos_token")) - tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) - tokenizer.pad_token = tokenizer.eos_token + pass + + name = model.config._name_or_path if model is not None else "Model" + logger.warning_once( + f"{name} does not have a padding token! Will use pad_token = {possible_pad_token}." + ) + + # Edit pad_token + tokenizer.add_special_tokens({"pad_token" : possible_pad_token}) + tokenizer.pad_token = possible_pad_token if model is not None: - config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) + config = model.config.update({"pad_token_id" : tokenizer.pad_token_id}) pass return model, tokenizer pass diff --git a/unsloth/save.py b/unsloth/save.py index 868d25de4b..b825b10fb7 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -18,6 +18,7 @@ from typing import Optional, Callable, Union, List import torch import os +import shutil import pickle import gc from transformers.models.llama.modeling_llama import logger @@ -87,6 +88,24 @@ def print_quantization_methods(): pass +def check_if_sentencepiece_model(model, temporary_location = "_unsloth_sentencepiece_temp"): + if not hasattr(model, "_saved_temp_tokenizer"): return False + + temp_tokenizer = model._saved_temp_tokenizer + sentencepiece_model = False + file_location = f"{temporary_location}/{temp_tokenizer.name_or_path}" + if not os.path.exists(file_location): + os.makedirs(file_location) + pass + temp_tokenizer.save_pretrained(file_location) + if os.path.isfile(f"{file_location}/tokenizer.model"): + sentencepiece_model = True + pass + shutil.rmtree(file_location) + return sentencepiece_model +pass + + def _free_cached_model(model): from huggingface_hub import scan_cache_dir cached_repos = list(scan_cache_dir().repos) @@ -840,6 +859,7 @@ def _fix_gemma_gguf(): def save_to_gguf( model_type : str, + is_sentencepiece : bool = False, model_directory : str = "unsloth_finetuned_model", quantization_method : str = "fast_quantized", first_conversion : str = "f16", @@ -856,7 +876,8 @@ def save_to_gguf( # Careful convert.py is only for Llama / Mistral based archs use_fast_convert = False - if model_type == "llama": use_fast_convert = True + if not is_sentencepiece: use_fast_convert = False # Llama-3 + elif model_type == "llama": use_fast_convert = True elif model_type == "mistral": use_fast_convert = True pass logger.warning_once(f"Unsloth: Converting {model_type} model. Can use fast conversion = {use_fast_convert}.") @@ -951,7 +972,7 @@ def save_to_gguf( f"--outtype {first_conversion} --concurrency {n_cpus}" else: # Need to fix convert-hf-to-gguf.py for some models! - _fix_gemma_gguf() + # _fix_gemma_gguf() command = f"python llama.cpp/convert-hf-to-gguf.py {model_directory} "\ f"--outfile {final_location} "\ @@ -1353,7 +1374,10 @@ def unsloth_save_pretrained_gguf( gc.collect() model_type = self.config.model_type - file_location = save_to_gguf(model_type, new_save_directory, quantization_method, first_conversion, makefile) + is_sentencepiece_model = check_if_sentencepiece_model(self) + file_location = save_to_gguf(model_type, is_sentencepiece_model, + new_save_directory, quantization_method, first_conversion, makefile, + ) if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") @@ -1473,7 +1497,10 @@ def unsloth_push_to_hub_gguf( gc.collect() model_type = self.config.model_type - file_location = save_to_gguf(model_type, new_save_directory, quantization_method, first_conversion, makefile) + is_sentencepiece_model = check_if_sentencepiece_model(self) + file_location = save_to_gguf(model_type, is_sentencepiece_model, + new_save_directory, quantization_method, first_conversion, makefile, + ) print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( From d4512f7c138a254d789fcba247b9c363a8aa2e25 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 13 May 2024 05:22:03 +1000 Subject: [PATCH 0203/1088] May 2024 Prelim (#447) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Fix reserved tokens * Update save.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * Update save.py * Update _utils.py * Update chat_templates.py * Adds dependencies and extras for torch 2.3.0 with new xformers versions (#415) * Adds dependencies and extras for torch 2.3.0 with new xformers versions * Add 2.3.0 section to readme * Support Qwen2 (#428) * support Qwen2 * support Qwen2 * Delete README.md * Revert "Delete README.md" This reverts commit 026b05f859410ddd04e1a2b4b54e950b89b4a58a. * Update README.md * Qwen2 == Mistral * Update llama.py * Update __init__.py * Update README.md --------- Co-authored-by: Daniel Han * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update save.py * test_hf_gguf_equivalence * Update chat_templates.py * Update chat_templates.py * --pad-vocab * Update tokenizer_utils.py --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nathan Azrak <42650258+nathan-az@users.noreply.github.com> Co-authored-by: Yang JianXin <995462226@qq.com> --- README.md | 10 ++++- pyproject.toml | 37 ++++++++++++++++ unsloth/chat_templates.py | 88 ++++++++++++++++++++++++++++++++---- unsloth/models/__init__.py | 7 +-- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 1 + unsloth/models/loader.py | 3 ++ unsloth/models/mistral.py | 2 +- unsloth/models/qwen2.py | 91 ++++++++++++++++++++++++++++++++++++++ unsloth/save.py | 29 ++++++------ unsloth/tokenizer_utils.py | 66 +++++++++++++++++++++++++++ 11 files changed, 309 insertions(+), 27 deletions(-) create mode 100644 unsloth/models/qwen2.py diff --git a/README.md b/README.md index c6e7d6c50a..ca5b6533b5 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text. ## 🦥 Unsloth.ai News +- 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) - 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). - 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! - 📣 NEW! [Phi-3 3.8b support](https://colab.research.google.com/drive/1NvkBmkHfucGO3Ve9s1NKZvMNlw5p83ym?usp=sharing) is here! @@ -159,7 +160,14 @@ pip install --no-deps packaging ninja einops flash-attn xformers trl peft accele pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" pip install --no-deps xformers trl peft accelerate bitsandbytes ``` -7. To troubleshoot installs try the below (all must succeed). Xformers should mostly all be available. +7. For Pytorch 2.3.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. +```bash +pip install "unsloth[cu118-torch230] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-torch230] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" +``` +8. To troubleshoot installs try the below (all must succeed). Xformers should mostly all be available. ```bash nvcc python -m xformers.info diff --git a/pyproject.toml b/pyproject.toml index e6f663a969..0398d0df4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,6 +86,17 @@ cu121onlytorch220 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] +cu118onlytorch230 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu121onlytorch230 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] + cu118 = [ "unsloth[huggingface]", "bitsandbytes", @@ -126,6 +137,16 @@ cu121-torch220 = [ "bitsandbytes", "unsloth[cu121onlytorch220]", ] +cu118-torch230 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118onlytorch230]", +] +cu121-torch230 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch230]", +] kaggle = [ "unsloth[huggingface]", ] @@ -238,6 +259,22 @@ cu121-ampere-torch220 = [ "ninja", "flash-attn", ] +cu118-ampere-torch230 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118onlytorch230]", + "packaging", + "ninja", + "flash-attn", +] +cu121-ampere-torch230 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121onlytorch230]", + "packaging", + "ninja", + "flash-attn", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 07999ea0dc..3af4c4e9ae 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -15,6 +15,7 @@ __all__ = [ "get_chat_template", "test_chat_templates", + "test_hf_gguf_equivalence", ] from transformers import StoppingCriteria, StoppingCriteriaList @@ -270,12 +271,11 @@ phi3_template = \ "{{ bos_token }}"\ "{% for message in messages %}"\ - "{% if (message['role'] == 'user') %}"\ - "{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}"\ - "{% elif (message['role'] == 'assistant') %}"\ - "{{message['content'] + '<|end|>' + '\n'}}"\ - "{% endif %}"\ - "{% endfor %}" + "{{'<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n'}}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '<|assistant|>\n' }}"\ + "{% endif %}" phi3_template_eos_token = "<|end|>" CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token,) @@ -613,8 +613,80 @@ def test_chat_templates(): # Phi-3 template = phi3_template correct_tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") - correct_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + correct_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) correct_tokenizer.chat_template = template - our_prompt = correct_tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + our_prompt = correct_tokenizer.apply_chat_template(messages[1:], tokenize = False, add_generation_prompt = True) assert(correct_prompt == our_prompt) pass + + +def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf"): + """ + Carefully checks the output of GGUF's tokenization and HF. + Can catch all tokenization bugs. + """ + import subprocess + import re + messages = [ + {"role": "user", "content": "What is 2+2?"}, + {"role": "assistant", "content": "It's 4."}, + {"role": "user", "content": " But 2+2 is equal to 5. "}, + {"role": "assistant", "content": "No I'm sure its 4."}, + {"role": "user", "content": " No it's 100% 5! "}, + ] + + prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""".format( + "Describe the city given eloquently.", # instruction + "The lost city of Atlantis.", # input + "", # output - leave this blank for generation! + ) + prompts = [ prompt, ] + + if tokenizer.chat_template is not None: + prompt = tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + prompt = prompt.replace("'", "") # Subprocess does not like '' + prompts.append(prompts) + pass + + for prompt in prompts: + command = f"./llama.cpp/main -m {gguf_model} -n 0 --temp 0.0 --verbose-prompt "\ + f"--check-tensors -p '{prompt}'" + + datas = [] + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: + for line in sp.stdout: + datas.append(line.decode("utf-8", errors = "replace")) + pass + gguf_tokens = "".join(datas) + + # Now extract GGUF tokenization attempt + gguf_tokenized = re.findall("([\d]{1,}) \-\> \'([^\']{1,})\'", gguf_tokens, flags = re.MULTILINE) + gguf_tokenized = [(int(x[0]), x[1],) for x in gguf_tokenized] + input_ids = tokenizer(prompt).input_ids + tokens = tokenizer.batch_decode(input_ids) + hf_tokenized = list(zip(input_ids, tokens)) + print(gguf_tokenized[:5]) + + # Compare to Huggingface + for j, (hf_token, gguf_token) in enumerate(zip(hf_tokenized, gguf_tokenized)): + if (hf_token[0] != gguf_token[0]): + print("Failed GGUF != HF at", j) + print("HF =", hf_token) + print("GGUF =", gguf_token) + print(hf_tokenized[:j+1]) + print(gguf_tokenized[:j+1]) + print(gguf_tokens) + raise RuntimeError("Failed comparing GGUF to HF.") + pass + pass + return True +pass diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 891947d69e..ff7129e06a 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .loader import FastLanguageModel -from .llama import FastLlamaModel +from .loader import FastLanguageModel +from .llama import FastLlamaModel from .mistral import FastMistralModel -from .dpo import PatchDPOTrainer +from .qwen2 import FastQwen2Model +from .dpo import PatchDPOTrainer diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 49f054e432..80cb19517d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -30,7 +30,7 @@ import os import psutil -__version__ = "2024.4" +__version__ = "2024.5" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 136ceb2c7b..44998b4cfb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1605,6 +1605,7 @@ def patch_peft_model( if model_type == "llama": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "mistral": apply_lora_mlp = apply_lora_mlp_swiglu + elif model_type == "qwen2": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index a107200eae..2b3bf47949 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -14,6 +14,7 @@ from .llama import FastLlamaModel, logger from .mistral import FastMistralModel +from .qwen2 import FastQwen2Model from transformers import AutoConfig from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel @@ -119,6 +120,8 @@ def from_pretrained( f"to obtain the latest transformers build, then restart this session."\ ) dispatch_model = FastGemmaModel + elif model_type == "qwen2": + dispatch_model = FastQwen2Model else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 80d0ffdf7f..902177cb12 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -343,7 +343,7 @@ def from_pretrained( # Mistral does NOT support RoPE Scaling sadly so we have to error out. if max_seq_length > model_max_seq_length: raise RuntimeError( - "Unsloth: Unfortunately Mistral type models do not support RoPE scaling!\n"\ + f"Unsloth: Unfortunately {model_patcher.__name__[4:-5]} type models do not support RoPE scaling!\n"\ f"The maximum sequence length supported is {model_max_seq_length}.", ) pass diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py new file mode 100644 index 0000000000..76fe31a6d1 --- /dev/null +++ b/unsloth/models/qwen2.py @@ -0,0 +1,91 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from .mistral import FastMistralModel +import os +from ._utils import __version__ + +from transformers.models.qwen2.modeling_qwen2 import ( + Qwen2Attention, + Qwen2DecoderLayer, + Qwen2Model, + Qwen2ForCausalLM, +) +# For Pytorch 2.1.1 +try: + from transformers.models.qwen2.modeling_qwen2 import ( + Qwen2SdpaAttention, + Qwen2FlashAttention2, + ) +except: + Qwen2SdpaAttention = Qwen2Attention + Qwen2FlashAttention2 = Qwen2Attention +pass + + +class FastQwen2Model(FastLlamaModel): + + @staticmethod + def pre_patch(): + Qwen2Attention .forward = LlamaAttention_fast_forward + Qwen2SdpaAttention .forward = LlamaAttention_fast_forward + Qwen2FlashAttention2.forward = LlamaAttention_fast_forward + Qwen2DecoderLayer .forward = LlamaDecoderLayer_fast_forward + Qwen2Model .forward = LlamaModel_fast_forward + Qwen2ForCausalLM .forward = CausalLM_fast_forward(LlamaModel_fast_forward_inference) + PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + + # Solves https://github.com/unslothai/unsloth/issues/168 + # Static KV Cache was introduced in 4.38.0, causing training to be much slower. + # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. + # https://github.com/huggingface/transformers/pull/27931 + # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py + import transformers.models.qwen2.modeling_qwen2 + transformers.models.qwen2.modeling_qwen2.Qwen2RotaryEmbedding = LlamaRotaryEmbedding + return + pass + + + @staticmethod + def from_pretrained( + model_name = "Qwen/Qwen1.5-7B", + max_seq_length = 4096, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, # Qwen2 does not support RoPE scaling + fix_tokenizer = True, + model_patcher = None, + tokenizer_name = None, + trust_remote_code = False, + **kwargs, + ): + return FastMistralModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, + model_patcher = FastQwen2Model, + tokenizer_name = tokenizer_name, + trust_remote_code = trust_remote_code, + **kwargs, + ) + pass +pass diff --git a/unsloth/save.py b/unsloth/save.py index b825b10fb7..39b18d0dd9 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -27,6 +27,7 @@ import psutil import re from transformers.models.llama.modeling_llama import logger +from .tokenizer_utils import fix_sentencepiece_gguf __all__ = [ "print_quantization_methods", @@ -774,7 +775,7 @@ def install_llama_cpp_old(version = -10): f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass @@ -806,7 +807,7 @@ def install_llama_cpp_blocking(use_cuda = True): if os.path.exists("llama.cpp"): return for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, bufsize = 1) as sp: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass @@ -865,11 +866,11 @@ def save_to_gguf( first_conversion : str = "f16", _run_installer = None, # Non blocking install of llama.cpp ): - logger.warning( - "NOTICE: llama.cpp GGUF conversion is currently unstable, since llama.cpp is\n"\ - "undergoing some major bug fixes as at 5th of May 2024. This is not an Unsloth issue.\n"\ - "Please be patient - GGUF saving should still work, but might not work as well." - ) + # logger.warning( + # "NOTICE: llama.cpp GGUF conversion is currently unstable, since llama.cpp is\n"\ + # "undergoing some major bug fixes as at 5th of May 2024. This is not an Unsloth issue.\n"\ + # "Please be patient - GGUF saving should still work, but might not work as well." + # ) if quantization_method.startswith("iq2"): raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") @@ -962,6 +963,8 @@ def save_to_gguf( # We first check if tokenizer.model exists in the model_directory if os.path.exists(f"{model_directory}/tokenizer.model"): vocab_type = "spm,hfft,bpe" + # Fix Sentencepiece model as well! + fix_sentencepiece_gguf(model_directory) else: vocab_type = "bpe" pass @@ -969,7 +972,7 @@ def save_to_gguf( if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ f"--outfile {final_location} --vocab-type {vocab_type} "\ - f"--outtype {first_conversion} --concurrency {n_cpus}" + f"--outtype {first_conversion} --concurrency {n_cpus} --pad-vocab" else: # Need to fix convert-hf-to-gguf.py for some models! # _fix_gemma_gguf() @@ -979,7 +982,7 @@ def save_to_gguf( f"--outtype {first_conversion}" pass - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, bufsize = 1) as sp: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: @@ -1020,8 +1023,8 @@ def save_to_gguf( f"{final_location} {quantization_method} {n_cpus}" # quantize uses stderr - with subprocess.Popen(command, shell = True, stderr = subprocess.PIPE, bufsize = 1) as sp: - for line in sp.stderr: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: + for line in sp.stdout: print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) @@ -1073,7 +1076,7 @@ def unsloth_save_pretrained_merged( save_peft_format : bool = True, tags : List[str] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", - maximum_memory_usage : float = 0.85, + maximum_memory_usage : float = 0.75, ): """ Same as .save_pretrained(...) except 4bit weights are auto @@ -1116,7 +1119,7 @@ def unsloth_push_to_hub_merged( commit_description : str = "Upload model trained with Unsloth 2x faster", tags : Optional[List[str]] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", - maximum_memory_usage : float = 0.85, + maximum_memory_usage : float = 0.75, ): """ Same as .push_to_hub(...) except 4bit weights are auto diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 0d6dadf7dd..87cba843d4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -26,6 +26,7 @@ "fix_sentencepiece_tokenizer", "check_tokenizer", "add_new_tokens", + "fix_sentencepiece_gguf", ] @@ -267,6 +268,71 @@ def fix_sentencepiece_tokenizer( pass +def fix_sentencepiece_gguf(saved_location): + """ + Fixes sentencepiece tokenizers which did not extend the vocabulary with + user defined tokens. + Inspiration from https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py + """ + import numpy as np + from copy import deepcopy + from transformers.utils import sentencepiece_model_pb2 + import json + from enum import IntEnum + import os + + class SentencePieceTokenTypes(IntEnum): + NORMAL = 1 + UNKNOWN = 2 + CONTROL = 3 + USER_DEFINED = 4 + UNUSED = 5 + BYTE = 6 + pass + + # Load tokenizer.model + tokenizer_file = sentencepiece_model_pb2.ModelProto() + if not os.path.isfile(f"{saved_location}/tokenizer.model"): return + tokenizer_file.ParseFromString(open(f"{saved_location}/tokenizer.model", "rb").read()) + sentence_piece_size = len(tokenizer_file.pieces) + + # Load added_tokens_json + if not os.path.isfile(f"{saved_location}/added_tokens.json"): return + with open(f"{saved_location}/added_tokens.json", "r", encoding = "utf-8") as file: + added_tokens_json = json.load(file) + pass + if len(added_tokens_json) == 0: return + + added_tokens_json = dict(sorted(added_tokens_json.items(), key = lambda item: item[1])) + + # Confirm added_tokens_json is correct + added_tokens_ids = np.array(list(added_tokens_json.values())) + diff = np.diff(added_tokens_ids) + if (diff.min() != 1 or diff.max() != 1): return + if (added_tokens_ids.min() != sentence_piece_size): return + + # Edit sentence piece tokens with added_tokens_json + logger.warning("Unsloth: Extending tokenizer.model with added_tokens.json!") + new_tokens = deepcopy(tokenizer_file.pieces[-len(added_tokens_ids):]) + for new_token, added_token in zip(new_tokens, added_tokens_json.keys()): + new_token.piece = added_token.encode("utf-8") + new_token.score = -1000.0 + new_token.type = SentencePieceTokenTypes.USER_DEFINED + pass + + tokenizer_file.pieces.extend(new_tokens) + + with open(f"{saved_location}/tokenizer.model", "wb") as file: + file.write(tokenizer_file.SerializeToString()) + pass + + # Add padding tokens + # actual_vocab_size = model.config.vocab_size + # padding = actual_vocab_size - len(tokenizer_file.pieces) + return +pass + + def load_correct_tokenizer( tokenizer_name, model_max_length = None, From 47ffd39abd02338e8a5f226d0f529347fb7e5f89 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 14 May 2024 04:51:23 +1000 Subject: [PATCH 0204/1088] Nightly (#461) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Fix reserved tokens * Update save.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * Update save.py * Update _utils.py * Update chat_templates.py * Adds dependencies and extras for torch 2.3.0 with new xformers versions (#415) * Adds dependencies and extras for torch 2.3.0 with new xformers versions * Add 2.3.0 section to readme * Support Qwen2 (#428) * support Qwen2 * support Qwen2 * Delete README.md * Revert "Delete README.md" This reverts commit 026b05f859410ddd04e1a2b4b54e950b89b4a58a. * Update README.md * Qwen2 == Mistral * Update llama.py * Update __init__.py * Update README.md --------- Co-authored-by: Daniel Han * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update save.py * test_hf_gguf_equivalence * Update chat_templates.py * Update chat_templates.py * --pad-vocab * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Unspecified max_seq_length * possible_pad_token * Update tokenizer_utils.py --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nathan Azrak <42650258+nathan-az@users.noreply.github.com> Co-authored-by: Yang JianXin <995462226@qq.com> --- unsloth/models/_utils.py | 12 +++++++----- unsloth/models/llama.py | 7 ++++++- unsloth/models/loader.py | 4 ++-- unsloth/models/mistral.py | 7 ++++++- unsloth/tokenizer_utils.py | 16 ++++++++++++++-- 5 files changed, 35 insertions(+), 11 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 80cb19517d..09e035e684 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -180,12 +180,14 @@ def patch_tokenizer(model, tokenizer): # Try unk_token possible_pad_token = tokenizer.unk_token pass + if possible_pad_token is None: - # Failure!! - raise RuntimeError( - "Unsloth: Tokenizer's pad_token cannot be = eos_token, and we couldn't find a\n"\ - "replacement of either <|reserved... or <|placeholder..." - ) + # Failure to find a good replacement!! We shall manually add one! + new_pad_token = "<|PAD_TOKEN|>" + while new_pad_token in tokenizer.get_vocab(): + new_pad_token += "#" + pass + possible_pad_token = new_pad_token pass name = model.config._name_or_path if model is not None else "Model" diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 44998b4cfb..f08c403074 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1005,7 +1005,7 @@ def pre_patch(): @staticmethod def from_pretrained( model_name = "unsloth/llama-2-7b-bnb-4bit", - max_seq_length = 4096, + max_seq_length = None, dtype = None, load_in_4bit = True, token = None, @@ -1050,6 +1050,11 @@ def from_pretrained( model_max_seq_length = \ AutoConfig.from_pretrained(model_name, token = token).max_position_embeddings + # If max_seq_length is not specified, use maximum fron config + if max_seq_length is None: + max_seq_length = model_max_seq_length + pass + if (rope_scaling is None) and (max_seq_length > model_max_seq_length): rope_scaling = max_seq_length / model_max_seq_length logger.warning_once( diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 2b3bf47949..86a0f5d7bf 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -67,8 +67,8 @@ def _get_model_name(model_name, load_in_4bit = True): class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( - model_name = "unsloth/mistral-7b-bnb-4bit", - max_seq_length = 4096, + model_name = "unsloth/llama-3-8b-bnb-4bit", + max_seq_length = None, dtype = None, load_in_4bit = True, token = None, diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 902177cb12..4594919b38 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -290,7 +290,7 @@ def pre_patch(): @staticmethod def from_pretrained( model_name = "unsloth/mistral-7b-bnb-4bit", - max_seq_length = 4096, + max_seq_length = None, dtype = None, load_in_4bit = True, token = None, @@ -340,6 +340,11 @@ def from_pretrained( model_config = AutoConfig.from_pretrained(model_name, token = token) model_max_seq_length = model_config.max_position_embeddings + # If max_seq_length is not specified, use maximum fron config + if max_seq_length is None: + max_seq_length = model_max_seq_length + pass + # Mistral does NOT support RoPE Scaling sadly so we have to error out. if max_seq_length > model_max_seq_length: raise RuntimeError( diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 87cba843d4..7a1d2ddacc 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -304,6 +304,7 @@ class SentencePieceTokenTypes(IntEnum): if len(added_tokens_json) == 0: return added_tokens_json = dict(sorted(added_tokens_json.items(), key = lambda item: item[1])) + new_size = sentence_piece_size + len(added_tokens_json) # Confirm added_tokens_json is correct added_tokens_ids = np.array(list(added_tokens_json.values())) @@ -312,7 +313,11 @@ class SentencePieceTokenTypes(IntEnum): if (added_tokens_ids.min() != sentence_piece_size): return # Edit sentence piece tokens with added_tokens_json - logger.warning("Unsloth: Extending tokenizer.model with added_tokens.json!") + logger.warning( + f"Unsloth: Extending {saved_location}/tokenizer.model with added_tokens.json.\n"\ + f"Originally tokenizer.model is of size ({sentence_piece_size}).\n"\ + f"But we need to extend to sentencepiece vocab size ({new_size})." + ) new_tokens = deepcopy(tokenizer_file.pieces[-len(added_tokens_ids):]) for new_token, added_token in zip(new_tokens, added_tokens_json.keys()): new_token.piece = added_token.encode("utf-8") @@ -357,7 +362,10 @@ def load_correct_tokenizer( padding_side = padding_side, token = token, trust_remote_code = trust_remote_code, + # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 use_fast = False, + legacy = False, + from_slow = True, cache_dir = cache_dir, ) except: @@ -512,7 +520,10 @@ def check_tokenizer( model_max_length = model_max_length, padding_side = padding_side, token = token, + # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 use_fast = False, + legacy = False, + from_slow = True, cache_dir = cache_dir, ) return check_tokenizer( @@ -725,7 +736,8 @@ def fix_sft_trainer_tokenizer(): "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ - "has_bos_token_already = test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template\n"\ + "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ + "if getattr(tokenizer, 'bos_token', None) is not None else False\n"\ "add_special_tokens = False if has_bos_token_already else add_special_tokens\n\n" check_text = check_text.split("\n") From 25975f9a2dc9cdde4ed72e1efd70fb809a0405e9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 16 May 2024 15:09:42 +1000 Subject: [PATCH 0205/1088] Fix generation (#472) * Fix prompt * Update chat_templates.py * fix_untrained_tokens * Update llama.py * add tokens * Update _utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * pad_token * Update chat_templates.py * Update chat_templates.py * tokenizer * Update save.py * Update chat_templates.py * Update chat_templates.py * patch tokenizer padding * Update tokenizer_utils.py * Update save.py * Fix: loading models with resized vocabulary (#377) * new: vocab resize on load * new: gitignore * GGUF fix * Readme (#390) * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update README.md * Delete .gitignore * Phi-3 * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Fix reserved tokens * Update save.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update chat_templates.py * Update save.py * Update _utils.py * Update chat_templates.py * Adds dependencies and extras for torch 2.3.0 with new xformers versions (#415) * Adds dependencies and extras for torch 2.3.0 with new xformers versions * Add 2.3.0 section to readme * Support Qwen2 (#428) * support Qwen2 * support Qwen2 * Delete README.md * Revert "Delete README.md" This reverts commit 026b05f859410ddd04e1a2b4b54e950b89b4a58a. * Update README.md * Qwen2 == Mistral * Update llama.py * Update __init__.py * Update README.md --------- Co-authored-by: Daniel Han * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update save.py * test_hf_gguf_equivalence * Update chat_templates.py * Update chat_templates.py * --pad-vocab * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Unspecified max_seq_length * possible_pad_token * Update tokenizer_utils.py * past_key_values * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * flag --------- Co-authored-by: Igor Kilbas Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nathan Azrak <42650258+nathan-az@users.noreply.github.com> Co-authored-by: Yang JianXin <995462226@qq.com> --- unsloth/models/_utils.py | 41 ++++++++++++++++++++++++++++++++++---- unsloth/models/gemma.py | 2 +- unsloth/models/llama.py | 32 ++++++++++++++++++++++++----- unsloth/tokenizer_utils.py | 9 +++++---- 4 files changed, 70 insertions(+), 14 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 09e035e684..a53de42c09 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -15,11 +15,12 @@ import torch from typing import Union, Optional, List, Any, Callable import warnings -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") -warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer @@ -388,3 +389,35 @@ def backward(ctx, dY): pass pass + +""" + Remove warnings about missing kwargs +""" +try: + from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod + from inspect import getsource + import re + BitsAndBytesConfig__init__ = getsource(BitsAndBytesConfig.__init__) + BitsAndBytesConfig__init__ = re.sub( + r"if[\s]{1,}kwargs\:[\s]{1,}.+?\n", + "", + BitsAndBytesConfig__init__, + flags = re.MULTILINE, + ) + BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.split("\n") + length_spaces = len(re.match(r"[\s]{1,}", BitsAndBytesConfig__init__[0]).group(0)) + BitsAndBytesConfig__init__ = "\n".join(x[length_spaces:] for x in BitsAndBytesConfig__init__) + BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.replace( + "__init__", + "_BitsAndBytesConfig__init__", + ) + exec(BitsAndBytesConfig__init__, globals()) + + import transformers.utils.quantization_config + transformers.utils.quantization_config.BitsAndBytesConfig.__init__ = _BitsAndBytesConfig__init__ +except: + logger.warning_once( + "Unsloth unsuccessfully patched bitsandbytes. Please file a bug report.\n"\ + "Luckily, your training run will still work in the meantime!" + ) +pass diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index c0cce75e2f..5dd2a5abd5 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -71,7 +71,7 @@ def GemmaDecoderLayer_fast_forward( padding_mask: Optional[torch.LongTensor] = None, *args, **kwargs, ): - if use_cache: #past_key_value is not None: + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") # Self Attention diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f08c403074..f7fd5f13f4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -407,7 +407,7 @@ def LlamaDecoderLayer_fast_forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ - if use_cache: + if use_cache and hasattr(self, "_flag_for_generation"): residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( @@ -789,7 +789,7 @@ def _CausalLM_fast_forward( return_dict: Optional[bool] = None, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - + if past_key_values is not None: outputs = fast_forward_inference( self, @@ -968,12 +968,34 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass -def _wrap_fast_inference(generate, device_type, dtype): +def _wrap_fast_inference(generate, device_type, dtype, model): # Wraps inference with bfloat16 / float16 @torch.inference_mode def _fast_generate(*args, **kwargs): + + # Set a flag for generation! + internal_model = model + while hasattr(internal_model, "model"): + internal_model._flag_for_generation = True + internal_model = internal_model.model + pass + internal_model._flag_for_generation = True + + # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): - return generate(*args, **kwargs) + output = generate(*args, **kwargs) + pass + + # Unset a flag for generation! + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation + internal_model = internal_model.model + pass + if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation + + return output + pass return _fast_generate pass @@ -1787,7 +1809,7 @@ def for_inference(model): # Wrap model.generate model._unwrapped_old_generate = model.generate - model.generate = _wrap_fast_inference(model.generate, device_type, dtype) + model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) # Patch tokenizer to pad to the left internal_model = model diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 7a1d2ddacc..1cbe49b7ae 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -369,10 +369,11 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) except: - print( - f"Unsloth: {tokenizer_name} has no tokenizer.model file.\n"\ - "Just informing you about this - this is not a critical error." - ) + pass + # print( + # f"Unsloth: {tokenizer_name} has no tokenizer.model file.\n"\ + # "Just informing you about this - this is not a critical error." + # ) pass fast_tokenizer = AutoTokenizer.from_pretrained( From 8dc0561ec0776fcc49d8a406c8a0acf295bd561a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 17 May 2024 04:18:18 +1000 Subject: [PATCH 0206/1088] peft issue (#480) --- pyproject.toml | 2 +- unsloth/save.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0398d0df4c..91654e68a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ huggingface = [ "numpy", "accelerate>=0.26.1", "trl>=0.7.9", - "peft>=0.7.1", + "peft>=0.7.1,<0.11.0", "protobuf<4.0.0", ] cu118only = [ diff --git a/unsloth/save.py b/unsloth/save.py index 39b18d0dd9..caa5aaa619 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -777,6 +777,8 @@ def install_llama_cpp_old(version = -10): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + if "undefined reference" in line: + raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass pass @@ -809,6 +811,8 @@ def install_llama_cpp_blocking(use_cuda = True): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + if "undefined reference" in line: + raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") print(line.decode("utf-8", errors = "replace"), flush = True, end = "") pass pass @@ -984,6 +988,8 @@ def save_to_gguf( with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + if "undefined reference" in line: + raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) @@ -1025,6 +1031,8 @@ def save_to_gguf( # quantize uses stderr with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + if "undefined reference" in line: + raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") print(line.decode("utf-8", errors = "replace"), flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) From e6f63d1d5e2ead92428bfb2ae2003cc238f93719 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 17 May 2024 14:18:52 +1000 Subject: [PATCH 0207/1088] Squashed commit of the following: commit 9f9f83fb4387ab717a6ec2409471842bcc01ae71 Author: Daniel Han-Chen Date: Fri May 17 14:16:39 2024 +1000 Update save.py commit e0103a61a68b7089ad942e78da32d6276845b221 Author: Daniel Han-Chen Date: Fri May 17 04:14:05 2024 +1000 peft issue --- unsloth/save.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index caa5aaa619..033f6eb1e0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -777,9 +777,10 @@ def install_llama_cpp_old(version = -10): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") if "undefined reference" in line: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line.decode("utf-8", errors = "replace"), flush = True, end = "") + print(line, flush = True, end = "") pass pass # Check if successful @@ -811,9 +812,10 @@ def install_llama_cpp_blocking(use_cuda = True): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") if "undefined reference" in line: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line.decode("utf-8", errors = "replace"), flush = True, end = "") + print(line, flush = True, end = "") pass pass pass @@ -988,9 +990,10 @@ def save_to_gguf( with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") if "undefined reference" in line: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line.decode("utf-8", errors = "replace"), flush = True, end = "") + print(line, flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) pass @@ -1031,9 +1034,10 @@ def save_to_gguf( # quantize uses stderr with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") if "undefined reference" in line: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line.decode("utf-8", errors = "replace"), flush = True, end = "") + print(line, flush = True, end = "") if sp.returncode is not None and sp.returncode != 0: raise subprocess.CalledProcessError(sp.returncode, sp.args) pass From 2f2b478868f63b66aaaa93db66ab3d811cddc95e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 17 May 2024 23:46:33 +1000 Subject: [PATCH 0208/1088] Nightly (#483) * peft issue * Update save.py * Update __init__.py * Update pyproject.toml --- pyproject.toml | 2 +- unsloth/kernels/__init__.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 91654e68a4..3015563c1d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ huggingface = [ "numpy", "accelerate>=0.26.1", "trl>=0.7.9", - "peft>=0.7.1,<0.11.0", + "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", ] cu118only = [ diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index cb04377b1a..b1fdba8328 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -31,3 +31,9 @@ apply_lora_o, ) from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora + +try: + print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") +except: + print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") +pass From 5134a42f0689c0bb69aba12dc668755bdd4b4693 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 May 2024 04:45:57 +1000 Subject: [PATCH 0209/1088] Nightly (#506) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py --- README.md | 12 +++-- unsloth/__init__.py | 1 + unsloth/models/__init__.py | 1 + unsloth/models/_utils.py | 56 ++++++++++++++++++++++ unsloth/models/llama.py | 74 ++++++++++++++++++++++------- unsloth/models/mistral.py | 2 +- unsloth/trainer.py | 95 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 219 insertions(+), 22 deletions(-) create mode 100644 unsloth/trainer.py diff --git a/README.md b/README.md index ca5b6533b5..4d934eb033 100644 --- a/README.md +++ b/README.md @@ -180,7 +180,8 @@ python -m bitsandbytes - We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! ```python -from unsloth import FastLanguageModel +from unsloth import FastLanguageModel +from unsloth import is_bfloat16_supported import torch from trl import SFTTrainer from transformers import TrainingArguments @@ -238,8 +239,8 @@ trainer = SFTTrainer( gradient_accumulation_steps = 4, warmup_steps = 10, max_steps = 60, - fp16 = not torch.cuda.is_bf16_supported(), - bf16 = torch.cuda.is_bf16_supported(), + fp16 = not is_bfloat16_supported(), + bf16 = is_bfloat16_supported(), logging_steps = 1, output_dir = "outputs", optim = "adamw_8bit", @@ -263,6 +264,7 @@ We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggi ```python from unsloth import FastLanguageModel, PatchDPOTrainer +from unsloth import is_bfloat16_supported PatchDPOTrainer() import torch from transformers import TrainingArguments @@ -298,8 +300,8 @@ dpo_trainer = DPOTrainer( gradient_accumulation_steps = 8, warmup_ratio = 0.1, num_train_epochs = 3, - fp16 = not torch.cuda.is_bf16_supported(), - bf16 = torch.cuda.is_bf16_supported(), + fp16 = not is_bfloat16_supported(), + bf16 = is_bfloat16_supported(), logging_steps = 1, optim = "adamw_8bit", seed = 42, diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d4ca45d7d1..2dcf1e6a43 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -114,3 +114,4 @@ from .save import * from .chat_templates import * from .tokenizer_utils import * +from .trainer import * diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index ff7129e06a..e67a9e5fad 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -17,3 +17,4 @@ from .mistral import FastMistralModel from .qwen2 import FastQwen2Model from .dpo import PatchDPOTrainer +from ._utils import is_bfloat16_supported diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a53de42c09..2c1eb4d5c4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -35,7 +35,10 @@ # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() +SUPPORTS_BFLOAT16 = False + if major_version >= 8: + SUPPORTS_BFLOAT16 = True try: from flash_attn import flash_attn_func # Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl" @@ -72,6 +75,10 @@ "patch_tokenizer", "get_statistics", "Unsloth_Offloaded_Gradient_Checkpointer", + "offload_to_disk", + "offload_input_embeddings", + "offload_output_embeddings", + "is_bfloat16_supported", ] @@ -421,3 +428,52 @@ def backward(ctx, dY): "Luckily, your training run will still work in the meantime!" ) pass + + +# Offloading to disk for modules (lm_head, embed_tokens) +import os +import pickle + +def offload_to_disk(W, model, name, temporary_location : str = "_unsloth_temporary_saved_buffers"): + file_location = os.path.join(temporary_location, model.config._name_or_path) + if not os.path.exists(file_location): + os.makedirs(file_location) + pass + + filename = os.path.join(file_location, f"{name}.pt") + W = W.weight if hasattr(W, "weight") else W + torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) + offloaded_W = torch.load(filename, map_location = "cpu", mmap = True) + offloaded_W._offloaded_file_location = filename + return offloaded_W +pass + + +def offload_input_embeddings(model, temporary_location : str = "_unsloth_temporary_saved_buffers"): + offloaded_W = offload_to_disk(model.get_input_embeddings(), model, "input_embeddings", temporary_location) + new_input_embeddings = torch.nn.Embedding.from_pretrained(offloaded_W) + new_input_embeddings._offloaded_file_location = offloaded_W._offloaded_file_location + model.set_input_embeddings(new_input_embeddings) + return +pass + + +def offload_output_embeddings(model, temporary_location : str = "_unsloth_temporary_saved_buffers"): + offloaded_W = offload_to_disk(model.get_output_embeddings(), model, "output_embeddings", temporary_location) + + new_output_embeddings = torch.nn.Linear(1, 1, bias = None) + del new_output_embeddings.weight + new_output_embeddings.weight = offloaded_W + new_output_embeddings.in_features = offloaded_W.shape[1] + new_output_embeddings.out_features = offloaded_W.shape[0] + + new_output_embeddings._offloaded_file_location = offloaded_W._offloaded_file_location + model.set_output_embeddings(new_output_embeddings) + return +pass + + +# Fixes a weird Torch 2.3 bug which says T4s have bfloat16 +def is_bfloat16_supported(): + return SUPPORTS_BFLOAT16 +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f7fd5f13f4..1d6a282a55 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -13,6 +13,7 @@ # limitations under the License. import torch +import gc from typing import Optional, Tuple, List, Union from torch.nn.functional import scaled_dot_product_attention from transformers.models.llama.modeling_llama import ( @@ -1046,7 +1047,7 @@ def from_pretrained( token = os.environ["HUGGINGFACE_TOKEN"] if model_patcher is None: model_patcher = FastLlamaModel - SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() + SUPPORTS_BFLOAT16 = is_bfloat16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) @@ -1193,7 +1194,11 @@ def from_pretrained( f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning_once(debug_info)""" + logger.warning_once(debug_info) + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache()""" debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) @@ -1370,7 +1375,6 @@ def post_patch(model): pass # Clear deleted GPU items - import gc for _ in range(3): gc.collect() torch.cuda.empty_cache() @@ -1396,6 +1400,7 @@ def get_peft_model( modules_to_save = None, init_lora_weights = True, loftq_config = {}, + temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): transformers_set_seed(random_state) @@ -1490,19 +1495,19 @@ def get_peft_model( final_modules = [] for module in target_modules: if module == "lm_head": - logger.warning_once( - "Unsloth: `lm_head` should be placed in `modules_to_save` and not `target_modules`. "\ - "Luckily, we shall do it for you!" - ) + # logger.warning_once( + # "Unsloth: `lm_head` should be placed in `modules_to_save` and not `target_modules`. "\ + # "Luckily, we shall do it for you!" + # ) train_lm_head = True if modules_to_save is None: modules_to_save = ["lm_head"] else: modules_to_save.append("lm_head") elif module == "embed_tokens": - logger.warning_once( - "Unsloth: `embed_tokens` should be placed in `modules_to_save` and not `target_modules`. "\ - "Luckily, we shall do it for you!" - ) + # logger.warning_once( + # "Unsloth: `embed_tokens` should be placed in `modules_to_save` and not `target_modules`. "\ + # "Luckily, we shall do it for you!" + # ) train_embed_tokens = True if modules_to_save is None: modules_to_save = ["embed_tokens"] else: modules_to_save.append("embed_tokens") @@ -1579,6 +1584,35 @@ def get_peft_model( _saved_temp_tokenizer = model._saved_temp_tokenizer lora_config = LoraConfig(**arguments) + + # First offload lm_head and embed_tokens to disk + input_embeddings_device = model. get_input_embeddings().weight.device + output_embeddings_device = model.get_output_embeddings().weight.device + + if use_gradient_checkpointing == "unsloth": + if train_embed_tokens: + print("Unsloth: Offloading input_embeddings to disk to save VRAM") + offload_input_embeddings(model, temporary_location) + pass + + # Remove old items to save VRAM + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + + if train_lm_head: + print("Unsloth: Offloading output_embeddings to disk to save VRAM") + offload_output_embeddings(model, temporary_location) + pass + + # Remove old items to save VRAM + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + pass + model = _get_peft_model(model, lora_config) model._saved_temp_tokenizer = _saved_temp_tokenizer @@ -1589,14 +1623,16 @@ def get_peft_model( if train_embed_tokens: print("Unsloth: Casting embed_tokens to float32") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) - model.model.model.embed_tokens.modules_to_save.default.to(torch.float32) + model.model.model.embed_tokens.modules_to_save.default\ + .to(device = input_embeddings_device, dtype = torch.float32, non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass if train_lm_head: print("Unsloth: Casting lm_head to float32") assert(hasattr(model.model.lm_head, "modules_to_save")) - model.model.lm_head.modules_to_save.default.to(torch.float32) + model.model.lm_head.modules_to_save.default\ + .to(device = output_embeddings_device, dtype = torch.float32, non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass @@ -1612,6 +1648,12 @@ def get_peft_model( internal_model._saved_temp_tokenizer.padding_side = "right" pass + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + return model pass @@ -1715,7 +1757,7 @@ def patch_peft_model( n_mlp += 1 else: logger.warning_once( - "Unsloth cannot patch MLP layers with our manual autograd engine since either LoRA adapters\n"\ + "Not an error, but Unsloth cannot patch MLP layers with our manual autograd engine since either LoRA adapters\n"\ "are not enabled or a bias term (like in Qwen) is used." ) pass @@ -1738,7 +1780,7 @@ def patch_peft_model( n_qkv += 1 else: logger.warning_once( - "Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ + "Not an error, but Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ "are not enabled or a bias term (like in Qwen) is used." ) pass @@ -1753,7 +1795,7 @@ def patch_peft_model( n_o += 1 else: logger.warning_once( - "Unsloth cannot patch O projection layer with our manual autograd engine since either LoRA adapters\n"\ + "Not an error, but Unsloth cannot patch O projection layer with our manual autograd engine since either LoRA adapters\n"\ "are not enabled or a bias term (like in Qwen) is used." ) pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 4594919b38..365d60a3e4 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -314,7 +314,7 @@ def from_pretrained( logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") pass - SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() + SUPPORTS_BFLOAT16 = is_bfloat16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) diff --git a/unsloth/trainer.py b/unsloth/trainer.py new file mode 100644 index 0000000000..b234a98d8b --- /dev/null +++ b/unsloth/trainer.py @@ -0,0 +1,95 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field +from typing import Optional +from transformers import TrainingArguments +from trl import SFTTrainer + +__all__ = [ + "UnslothTrainingArguments", + "UnslothTrainer", +] + + +@dataclass +class UnslothTrainingArguments(TrainingArguments): + embedding_learning_rate : Optional[float] = field( + default = None, + metadata = {"help" : "Different learning rates for embeddings and lm_head."} + ) +pass + + +def _create_unsloth_optimizer( + model, + optimizer_cls, + optimizer_kwargs, + embedding_lr = 5e-5, +): + lr = optimizer_kwargs["lr"] + weight_decay = optimizer_kwargs.get("weight_decay", 0.0) + + param_groups = \ + { + "non_embeddings" : {}, + "embeddings" : {}, + } + + for name, param in model.named_parameters(): + if not param.requires_grad: continue + if name.endswith("modules_to_save.default.weight"): + partial_name = name[:-len(".modules_to_save.default.weight")] + partial_name = partial_name[partial_name.rfind(".")+1:] + print(f"Unsloth: Setting lr = {embedding_lr:.2e} instead of {lr:.2e} for {partial_name}.") + param_groups["embeddings"] [name] = param + else: + param_groups["non_embeddings"][name] = param + pass + pass + + optimizer_grouped_parameters = [ + { + "params" : list(param_groups["non_embeddings"].values()), + "weight_decay" : weight_decay, + "lr" : lr, + }, + { + "params" : list(param_groups["embeddings"].values()), + "weight_decay" : weight_decay, + "lr" : embedding_lr, + }, + ] + optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + return optimizer +pass + + +class UnslothTrainer(SFTTrainer): + def create_optimizer(self): + embedding_learning_rate = getattr(self.args, "embedding_learning_rate", None) + if embedding_learning_rate is None: return super().create_optimizer() + + if self.optimizer is None: + optimizer_cls, optimizer_kwargs = SFTTrainer.get_optimizer_cls_and_kwargs(self.args) + self.optimizer = _create_unsloth_optimizer( + self.model, + optimizer_cls, + optimizer_kwargs, + embedding_learning_rate, + ) + pass + return self.optimizer + pass +pass From b107b432bb70628d76c2c25fde240a0bfde495ae Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 May 2024 20:40:57 +1000 Subject: [PATCH 0210/1088] Fix `is_bfloat16_supported` missing (#510) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py --- unsloth/__init__.py | 7 +++++++ unsloth/trainer.py | 1 + 2 files changed, 8 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 2dcf1e6a43..c8f4ca1041 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -41,6 +41,13 @@ except: raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ "We have some installation instructions on our Github page.") +pass + +# Fix up is_bf16_supported https://github.com/unslothai/unsloth/issues/504 +major_version, minor_version = torch.cuda.get_device_capability() +SUPPORTS_BFLOAT16 = (major_version >= 8) +def is_bf16_supported(): return SUPPORTS_BFLOAT16 +torch.cuda.is_bf16_supported = is_bf16_supported # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 diff --git a/unsloth/trainer.py b/unsloth/trainer.py index b234a98d8b..c8e00be231 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -16,6 +16,7 @@ from typing import Optional from transformers import TrainingArguments from trl import SFTTrainer +from . import is_bfloat16_supported __all__ = [ "UnslothTrainingArguments", From 715e935b5eda5768d1bab6592095dc192ddab7e6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 23 May 2024 04:15:02 +1000 Subject: [PATCH 0211/1088] Mistral v3 (#514) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 --- unsloth/__init__.py | 1 + unsloth/models/mapper.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c8f4ca1041..d85eca003d 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -17,6 +17,7 @@ # Currently only supports 1 GPU, or else seg faults will occur. if "CUDA_VISIBLE_DEVICES" in os.environ: + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" devices = os.environ["CUDA_VISIBLE_DEVICES"] # Check if there are multiple cuda devices set in env if not devices.isdigit(): diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b4fbe57387..29896ef2bb 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -144,6 +144,14 @@ "unsloth/Phi-3-mini-4k-instruct", "microsoft/Phi-3-mini-4k-instruct", ), + "unsloth/mistral-7b-v0.3-bnb-4bit" : ( + "unsloth/mistral-7b-v0.3", + "mistralai/Mistral-7B-v0.3", + ), + "unsloth/mistral-7b-instruct-v0.3-bnb-4bit" : ( + "unsloth/mistral-7b-instruct-v0.3", + "mistralai/Mistral-7B-Instruct-v0.3", + ), } INT_TO_FLOAT_MAPPER = {} From a106e0b256fb9cdb2e423eddd9edef805f292c52 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 23 May 2024 04:31:26 +1000 Subject: [PATCH 0212/1088] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4d934eb033..ab06abe99a 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,8 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | -| **Mistral (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | +| **Mistral v3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | +| **Mistral v1 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | | **Gemma (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | @@ -32,10 +33,11 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Benchmarking compared to FA2 + Hugging Face combined. - **Kaggle Notebooks** for [Llama-3 8b](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7b](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7b](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- This [conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) is useful for Llama-3. And ChatML for [Mistral 7b](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). +- Also [Llama-3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing). [Mistral 7b v1 ChatML](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). [Mistral 7b v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing). - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text. ## 🦥 Unsloth.ai News +- 📣 NEW! Mistral v3 Base and Instruct now supported! 2x faster, 70% less VRAM notebooks for the [base model](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [instruct with ShareGPT](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) - 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). - 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! From bad4bc64fa2bd45f18fe5d0ef9365dc63c542fc5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 24 May 2024 04:24:01 +1000 Subject: [PATCH 0213/1088] Phi 3 Medium (#518) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py --- unsloth/chat_templates.py | 46 ++++++++++++++++++++++++++------------- unsloth/models/_utils.py | 13 +++++++---- unsloth/models/mapper.py | 4 ++++ 3 files changed, 44 insertions(+), 19 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 3af4c4e9ae..3decdf7ffc 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -52,7 +52,7 @@ "{{ '>>> Assistant: ' }}"\ "{% endif %}" unsloth_eos_token = "eos_token" -CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token,) +CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False,) # Zephyr has no BOS! @@ -70,7 +70,7 @@ "{{ '<|assistant|>\n' }}"\ "{% endif %}" zephyr_eos_token = "eos_token" -CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token,) +CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False,) # ChatML has no BOS and not EOS! Rather <|im_start|> and <|im_end|> acts as BOS / EOS. @@ -88,7 +88,7 @@ "{{ '<|im_start|>assistant\n' }}"\ "{% endif %}" chatml_eos_token = "<|im_end|>" -CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token,) +CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True,) # Mistral Instruct doesn't allow system prompts, so we append it to the user message. @@ -115,7 +115,7 @@ "{% endif %}"\ "{% endfor %}" mistral_eos_token = "eos_token" -CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token,) +CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False,) # Adds BOS to every convo! And weird <> system messages. @@ -141,7 +141,7 @@ "{% endif %}"\ "{% endfor %}" llama_eos_token = "eos_token" -CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token,) +CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False,) # https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template @@ -167,7 +167,7 @@ "{{ 'ASSISTANT:' }}"\ "{% endif %}" vicuna_eos_token = "eos_token" -CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token,) +CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False,) # https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template @@ -193,7 +193,7 @@ "{{ '### Assistant:' }}"\ "{% endif %}" vicuna_old_eos_token = "eos_token" -CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token,) +CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False,) # https://github.com/tatsu-lab/stanford_alpaca Changed for multi-turn convos @@ -219,7 +219,7 @@ "{{ '### Response:\n' }}"\ "{% endif %}" alpaca_eos_token = "eos_token" -CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token,) +CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False,) # https://huggingface.co/google/gemma-7b-it @@ -240,7 +240,7 @@ "{{ 'model\n' }}"\ "{% endif %}" gemma_eos_token = "" -CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token,) +CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True,) # Gemma with ChatML instead @@ -250,7 +250,7 @@ {"" : "<|im_start|>", "" : "<|im_end|>"}, "<|im_end|>", ) -CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token,) +CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True,) # Llama-3 @@ -258,26 +258,38 @@ llama3_template = \ "{{ bos_token }}"\ "{% for message in messages %}"\ - "{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '<|start_header_id|>user<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\ + "{% else %}"\ + "{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\ + "{% endif %}"\ "{% endfor %}"\ "{% if add_generation_prompt %}"\ "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"\ "{% endif %}" llama3_template_eos_token = "eos_token" -CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token,) +CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False,) # Phi-3 phi3_template = \ "{{ bos_token }}"\ "{% for message in messages %}"\ - "{{'<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n'}}"\ + "{% if message['role'] == 'user' %}"\ + "{{'<|user|>\n' + message['content'] + '<|end|>\n'}}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}"\ + "{% else %}"\ + "{{'<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n'}}"\ + "{% endif %}"\ "{% endfor %}"\ "{% if add_generation_prompt %}"\ "{{ '<|assistant|>\n' }}"\ "{% endif %}" phi3_template_eos_token = "<|end|>" -CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token,) +CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False,) def get_chat_template( @@ -319,7 +331,11 @@ def get_chat_template( elif type(chat_template) is str: - chat_template, stop_word = CHAT_TEMPLATES[chat_template] + chat_template, stop_word, yes_map_eos_token = CHAT_TEMPLATES[chat_template] + + # Check mapping to eos_token + if not map_eos_token and yes_map_eos_token: map_eos_token = True + if not yes_map_eos_token and map_eos_token: map_eos_token = False if type(stop_word) in (list, tuple,): token_mapping, stop_word = stop_word diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2c1eb4d5c4..0217c7b518 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -159,7 +159,7 @@ def patch_tokenizer(model, tokenizer): Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! Fixes https://github.com/unslothai/unsloth/issues/5 """ - possible_reserved_tokens = ("<|reserved", "<|placeholder",) + possible_reserved_tokens = ("<|reserved", "<|placeholder", "[control") if model is not None: model.config.update({"unsloth_version" : __version__}) @@ -176,14 +176,19 @@ def patch_tokenizer(model, tokenizer): if bad_pad_token: # Find a better pad token - added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] + aadded_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] possible_pad_token = None + n_possible_pad_tokens = 0 for added_token in added_tokens[::-1]: if added_token.startswith(possible_reserved_tokens): - possible_pad_token = added_token - break + if possible_pad_token is None: possible_pad_token = added_token + n_possible_pad_tokens += 1 + # We must see at least 3 of the reserved tokens + if n_possible_pad_tokens >= 3: break pass pass + if n_possible_pad_tokens < 3: possible_pad_token = None + if possible_pad_token is None: # Try unk_token possible_pad_token = tokenizer.unk_token diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 29896ef2bb..777f310c73 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -152,6 +152,10 @@ "unsloth/mistral-7b-instruct-v0.3", "mistralai/Mistral-7B-Instruct-v0.3", ), + "unsloth/Phi-3-medium-4k-instruct-bnb-4bit" : ( + "unsloth/Phi-3-medium-4k-instruct", + "microsoft/Phi-3-medium-4k-instruct", + ), } INT_TO_FLOAT_MAPPER = {} From 2e1cb3888b2b6c9ea3bca56e808d0604b715f23a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 24 May 2024 06:36:10 +1000 Subject: [PATCH 0214/1088] Phi-3, Llama-3 bug fixes (#519) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 --- README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index ab06abe99a..1d335101d8 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3, Mistral & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3, Mistral, Phi-3 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -24,24 +24,24 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and |-----------|---------|--------|----------| | **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | | **Mistral v3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | -| **Mistral v1 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 73% less | +| **Phi-3 (medium)** | [▶️ Start for free](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) | 2x faster | 50% less | +| **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | -| **Phi-3 (3.8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1NvkBmkHfucGO3Ve9s1NKZvMNlw5p83ym?usp=sharing) | 2x faster | 50% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -- Benchmarking compared to FA2 + Hugging Face combined. -- **Kaggle Notebooks** for [Llama-3 8b](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7b](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7b](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Also [Llama-3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing). [Mistral 7b v1 ChatML](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing). [Mistral 7b v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing). -- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text. +- **Kaggle Notebooks** for [Llama 3 8B](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7B](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7B](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral 7B v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text + ## 🦥 Unsloth.ai News -- 📣 NEW! Mistral v3 Base and Instruct now supported! 2x faster, 70% less VRAM notebooks for the [base model](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [instruct with ShareGPT](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- 📣 NEW! [Phi-3 medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) and [Phi-3 mini](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) support is here! +- 📣 NEW! [Mistral v3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! - 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) - 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). - 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! -- 📣 NEW! [Phi-3 3.8b support](https://colab.research.google.com/drive/1NvkBmkHfucGO3Ve9s1NKZvMNlw5p83ym?usp=sharing) is here! - 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: ```python model = FastLanguageModel.get_peft_model( @@ -195,15 +195,15 @@ dataset = load_dataset("json", data_files = {"train" : url}, split = "train") # 4bit pre quantized models we support for 4x faster downloading + no OOMs. fourbit_models = [ + "unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster! + "unsloth/mistral-7b-instruct-v0.3-bnb-4bit", + "unsloth/llama-3-8b-bnb-4bit", # Llama-3 15 trillion tokens model 2x faster! + "unsloth/llama-3-8b-Instruct-bnb-4bit", + "unsloth/llama-3-70b-bnb-4bit", + "unsloth/Phi-3-mini-4k-instruct", # Phi-3 2x faster! + "unsloth/Phi-3-medium-4k-instruct", "unsloth/mistral-7b-bnb-4bit", - "unsloth/mistral-7b-instruct-v0.2-bnb-4bit", - "unsloth/llama-2-7b-bnb-4bit", - "unsloth/gemma-7b-bnb-4bit", - "unsloth/gemma-7b-it-bnb-4bit", # Instruct version of Gemma 7b - "unsloth/gemma-2b-bnb-4bit", - "unsloth/gemma-2b-it-bnb-4bit", # Instruct version of Gemma 2b - "unsloth/llama-3-8b-bnb-4bit", # [NEW] 15 Trillion token Llama-3 - "unsloth/Phi-3-mini-4k-instruct-bnb-4bit", + "unsloth/gemma-7b-bnb-4bit", # Gemma 2.2x faster! ] # More models at https://huggingface.co/unsloth model, tokenizer = FastLanguageModel.from_pretrained( From b1965320cbe5d4b8a1dc81676e4d2bf86551d44d Mon Sep 17 00:00:00 2001 From: Z <48565901+CoffeeVampir3@users.noreply.github.com> Date: Thu, 23 May 2024 19:10:06 -0600 Subject: [PATCH 0215/1088] Update _utils.py (#520) Fixed a typo in the tokenizer fixer. --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0217c7b518..a2d4d50c24 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -176,7 +176,7 @@ def patch_tokenizer(model, tokenizer): if bad_pad_token: # Find a better pad token - aadded_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] + added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] possible_pad_token = None n_possible_pad_tokens = 0 for added_token in added_tokens[::-1]: From b0781339f035c72b3028d846eb2261e8115cd375 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 24 May 2024 11:21:29 +1000 Subject: [PATCH 0216/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 1cbe49b7ae..a03491669f 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -719,9 +719,9 @@ def add_new_tokens( import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * -def fix_sft_trainer_tokenizer(): +def patch_sft_trainer_tokenizer(): """ - Fixes double adding BOS tokens like in llama-3 + Patches the trainer with changes """ for function_name, replacer in ( ("_prepare_non_packed_dataloader", "def tokenize(element):",), @@ -747,10 +747,8 @@ def fix_sft_trainer_tokenizer(): function = function.replace(replacer, check_text + replacer) exec(function, globals()) - # Replace TRL's SFTTrainer exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) pass pass -# Fixes double adding BOS tokens like in llama-3 -fix_sft_trainer_tokenizer() +patch_sft_trainer_tokenizer() From 951503886758aff886ffd0517c98a18f72ddde8a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 29 May 2024 00:30:38 +1000 Subject: [PATCH 0217/1088] Nightly (#548) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 4 +- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 19 ++-- unsloth/models/loader.py | 16 +-- unsloth/save.py | 49 ++++++++- unsloth/tokenizer_utils.py | 198 ++++++++++++++++++++++++++++++------- 6 files changed, 229 insertions(+), 59 deletions(-) diff --git a/README.md b/README.md index 1d335101d8..3537b8de38 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | -| **Mistral v3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | +| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | | **Phi-3 (medium)** | [▶️ Start for free](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) | 2x faster | 50% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | @@ -38,7 +38,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News - 📣 NEW! [Phi-3 medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) and [Phi-3 mini](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) support is here! -- 📣 NEW! [Mistral v3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! +- 📣 NEW! [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! - 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) - 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). - 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a2d4d50c24..22fb51144c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -381,7 +381,7 @@ class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): def forward(ctx, forward_function, hidden_states, *args): saved_hidden_states = hidden_states.to("cpu", non_blocking = True) with torch.no_grad(): - (output,) = forward_function(hidden_states, *args) + output = forward_function(hidden_states, *args) ctx.save_for_backward(saved_hidden_states) ctx.forward_function = forward_function ctx.args = args diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1d6a282a55..9aeb55e4ea 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -657,7 +657,7 @@ def LlamaModel_fast_forward( past_key_values, output_attentions, use_cache, - ) + )[0] elif gradient_checkpointing: def create_custom_forward(module): @@ -1166,7 +1166,7 @@ def from_pretrained( except: raise RuntimeError( "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) @@ -1194,7 +1194,7 @@ def from_pretrained( f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning_once(debug_info) + logger.warning(debug_info) import gc for _ in range(3): gc.collect() @@ -1209,7 +1209,7 @@ def from_pretrained( if n_total_devices > 2: logger.warning_once( "Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) @@ -1238,9 +1238,10 @@ def from_pretrained( n_total_devices = total_batches // ga // bsz if n_total_devices > 2: logger.warning_once( - "Please consider a commercial license - Unsloth was designed for the GPU Poor.\\n" - "The OSS currently works on 4 GPUs - we're a 2 person team, so please help fund\\n" - "our development costs by supporting us through Ko-fi or buying a license! Thanks!", + "Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" + "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) divisor = n_total_devices / 2 bsz = self._train_batch_size = max(int(bsz / divisor), 1) @@ -1267,7 +1268,7 @@ def from_pretrained( if "n_total_devices >" not in inner_training_loop: raise RuntimeError( "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) @@ -1703,7 +1704,7 @@ def patch_peft_model( if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": raise RuntimeError( "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 86a0f5d7bf..b2f0e4efdc 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -45,18 +45,18 @@ def _get_model_name(model_name, load_in_4bit = True): elif not load_in_4bit and model_name in INT_TO_FLOAT_MAPPER: new_model_name = INT_TO_FLOAT_MAPPER[model_name] - logger.warning_once( - f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ - f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." - ) + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ + # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." + # ) model_name = new_model_name elif load_in_4bit and SUPPORTS_FOURBIT and model_name in FLOAT_TO_INT_MAPPER: new_model_name = FLOAT_TO_INT_MAPPER[model_name] - logger.warning_once( - f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ - f"We shall load `{new_model_name}` for 4x faster loading." - ) + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ + # f"We shall load `{new_model_name}` for 4x faster loading." + # ) model_name = new_model_name pass diff --git a/unsloth/save.py b/unsloth/save.py index 033f6eb1e0..7af6280910 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -94,7 +94,7 @@ def check_if_sentencepiece_model(model, temporary_location = "_unsloth_sentencep temp_tokenizer = model._saved_temp_tokenizer sentencepiece_model = False - file_location = f"{temporary_location}/{temp_tokenizer.name_or_path}" + file_location = os.path.join(temporary_location, temp_tokenizer.name_or_path) if not os.path.exists(file_location): os.makedirs(file_location) pass @@ -1390,6 +1390,22 @@ def unsloth_save_pretrained_gguf( model_type = self.config.model_type is_sentencepiece_model = check_if_sentencepiece_model(self) + + # Check if BOS added already, then warn + print_bos_token_message = False + if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): + chat_template = getattr(tokenizer, "chat_template", None) + if chat_template is not None and \ + (tokenizer.bos_token in chat_template or "{bos_token}" in chat_template.replace(" ", "")): + print_bos_token_message = True + logger.warning( + f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ + "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." + ) + pass + pass + + # Save to GGUF file_location = save_to_gguf(model_type, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1405,6 +1421,13 @@ def unsloth_save_pretrained_gguf( new_save_directory.lstrip('/.') print(f"Saved GGUF to https://huggingface.co/{link}") pass + + if print_bos_token_message: + logger.warning( + f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ + "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." + ) + pass pass @@ -1513,6 +1536,22 @@ def unsloth_push_to_hub_gguf( model_type = self.config.model_type is_sentencepiece_model = check_if_sentencepiece_model(self) + + # Check if BOS added already, then warn + print_bos_token_message = False + if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): + chat_template = getattr(tokenizer, "chat_template", None) + if chat_template is not None and \ + (tokenizer.bos_token in chat_template or "{bos_token}" in chat_template.replace(" ", "")): + print_bos_token_message = True + logger.warning( + f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ + "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." + ) + pass + pass + + # Save to GGUF file_location = save_to_gguf(model_type, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1525,7 +1564,15 @@ def unsloth_push_to_hub_gguf( link = f"{username}/{new_save_directory.lstrip('/.')}" \ if username not in new_save_directory else \ new_save_directory.lstrip('/.') + print(f"Saved GGUF to https://huggingface.co/{link}") + + if print_bos_token_message: + logger.warning( + f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ + "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." + ) + pass pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index a03491669f..03f3e341d7 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -20,6 +20,10 @@ from transformers.models.llama.modeling_llama import logger from peft import PeftModelForCausalLM import torch +import itertools +import collections +import numpy as np +import gc __all__ = [ "load_correct_tokenizer", @@ -274,12 +278,10 @@ def fix_sentencepiece_gguf(saved_location): user defined tokens. Inspiration from https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py """ - import numpy as np from copy import deepcopy from transformers.utils import sentencepiece_model_pb2 import json from enum import IntEnum - import os class SentencePieceTokenTypes(IntEnum): NORMAL = 1 @@ -554,44 +556,128 @@ def check_tokenizer( @torch.inference_mode -def fix_untrained_tokens(model, eps = 1e-16): +def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): """ Llama-3 for eg has untrained vectors in the base model. These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> We reset them to the mean of the rest of the tokens """ - embedding_matrix = model.get_input_embeddings ().weight.data - lm_head_matrix = model.get_output_embeddings().weight.data + embedding_matrix = model.get_input_embeddings ().weight + lm_head_matrix = model.get_output_embeddings().weight # Get untrained tokens indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps where_untrained = torch.where(indicator_untrained)[0] n_untrained = where_untrained.shape[0] n_trained = embedding_matrix.shape[0] - n_untrained - if n_untrained != 0: - print( - f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ - "We shall set them to the mean of the other trained tokens." + + # Get set and actual tokens + where_untrained = where_untrained.tolist() + if len(where_untrained) == 0: return + + where_untrained_set = frozenset(where_untrained) + actual_bad_tokens = tokenizer.convert_ids_to_tokens(where_untrained) + + # Check if tokenizer and training datasets have bad tokens + if_bad_first = False + if_bad_second = False + # Check tokenizer's chat template for any untrained tokens + chat_template = getattr(tokenizer, "chat_template", None) + if chat_template is not None: + if_bad_first = any(x in chat_template for x in actual_bad_tokens) + pass + + # Check the first 250, last 250 input_ids + size_dataset = len(train_dataset) + size = min(size_dataset, 250) + for j in range(size): + input_ids = train_dataset[j] + if "input_ids" in input_ids: + input_ids = input_ids["input_ids"] + if_bad = any(item in where_untrained_set for item in input_ids) + if if_bad: + if_bad_second = True + break + pass + pass + pass + + # Check last 250 + if not if_bad_second: + left = max(size_dataset-250, 0) + for j in range(left, size_dataset): + input_ids = train_dataset[j] + if "input_ids" in input_ids: + input_ids = input_ids["input_ids"] + if_bad = any(item in where_untrained_set for item in input_ids) + if if_bad: + if_bad_second = True + break + pass + pass + pass + pass + + # Check if bad tokens exists! + if not if_bad_first and not if_bad_second: return + + # Check if lm_head / embed_token are trainable! + bad_not_trainable = False + if not embedding_matrix.requires_grad: bad_not_trainable = True + if not lm_head_matrix .requires_grad: bad_not_trainable = True + + if bad_not_trainable: + raise ValueError( + 'Unsloth: Untrained tokens found, but embed_tokens & lm_head not trainable, causing NaNs. '\ + 'Restart then add `embed_tokens` & `lm_head` to '\ + '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",])`', ) pass - # First set untrained to all 0s - sometimes it's not! 1e-23 for bfloat16 - embedding_matrix[where_untrained] = 0 - lm_head_matrix [where_untrained] = 0 + # Count all the possible bad tokens + final_counts = np.zeros(len(tokenizer), dtype = np.int64) + def mapping(examples): + input_ids = examples["input_ids"] + counter = np.fromiter(itertools.chain.from_iterable(input_ids), dtype = np.int32) + np.add.at(final_counts, counter, 1) + pass + train_dataset.map(mapping, batched = True, desc = "Counting untrained tokens") - # Find sum - sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) - sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) + # Get sum of all items + sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) + sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) + + # Remove bad tokens + sum_embedding -= torch.sum(embedding_matrix[where_untrained], dtype = torch.float32, axis = 0) + sum_lm_head -= torch.sum(lm_head_matrix [where_untrained], dtype = torch.float32, axis = 0) # Find correct average by dividing by sum of trained tokens - mean_embedding = (sum_embedding / n_trained).to(embedding_matrix.dtype) - mean_lm_head = (sum_lm_head / n_trained).to(lm_head_matrix .dtype) + mean_embedding = (sum_embedding / n_trained) + mean_lm_head = (sum_lm_head / n_trained) + + # Scale each to be equal to 1/max_frequency. Also set some to 0 if none seen + scaling = final_counts[where_untrained] / max(final_counts.max(), 1) + scaling = torch.tensor(scaling, device = mean_embedding.device).unsqueeze(1) + mean_embedding = mean_embedding.repeat((n_untrained, 1,)) * scaling + mean_lm_head = mean_lm_head .repeat((n_untrained, 1,)) * scaling + where_null = scaling.ravel() == 0 + mean_embedding[where_null] = 0 + mean_lm_head [where_null] = 0 # Set them to the mean - embedding_matrix[where_untrained] = mean_embedding - lm_head_matrix [where_untrained] = mean_lm_head + logger.warning( + "Unsloth: Setting embed_tokens & lm_head untrained tokens to "\ + "mean(trained) to counteract NaNs during training." + ) + embedding_matrix[where_untrained] = mean_embedding.to(embedding_matrix.dtype) + lm_head_matrix [where_untrained] = mean_lm_head .to(lm_head_matrix .dtype) - return mean_embedding, mean_lm_head + # Clean up + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + return pass @@ -602,32 +688,32 @@ def mean_of_trained_tokens(model, eps = 1e-16): These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> We reset them to the mean of the rest of the tokens """ - embedding_matrix = model.get_input_embeddings ().weight.data.clone() - lm_head_matrix = model.get_output_embeddings().weight.data.clone() + embedding_matrix = model.get_input_embeddings ().weight.clone() + lm_head_matrix = model.get_output_embeddings().weight.clone() # Get untrained tokens indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps where_untrained = torch.where(indicator_untrained)[0] n_untrained = where_untrained.shape[0] n_trained = embedding_matrix.shape[0] - n_untrained - if n_untrained != 0: - print( - f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ - "We shall set them to the mean of the other trained tokens." - ) - pass + # if n_untrained != 0: + # print( + # f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ + # "We shall set them to the mean of the other trained tokens." + # ) + # pass - # First set untrained to all 0s - sometimes it's not! 1e-23 for bfloat16 - embedding_matrix[where_untrained] = 0 - lm_head_matrix [where_untrained] = 0 + # Get sum of all items + sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) + sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) - # Find sum - sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) - sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) + # Remove bad tokens + sum_embedding -= torch.sum(embedding_matrix[where_untrained], dtype = torch.float32, axis = 0) + sum_lm_head -= torch.sum(lm_head_matrix [where_untrained], dtype = torch.float32, axis = 0) # Find correct average by dividing by sum of trained tokens - mean_embedding = (sum_embedding / n_trained).to(embedding_matrix.dtype) - mean_lm_head = (sum_lm_head / n_trained).to(lm_head_matrix .dtype) + mean_embedding = (sum_embedding / n_trained) + mean_lm_head = (sum_lm_head / n_trained) return mean_embedding, mean_lm_head pass @@ -676,8 +762,8 @@ def add_new_tokens( # If we use interpolation, we interpolate between the mean embeddings and # the Word2Vec sum of the other vectors - embedding_matrix = model.get_input_embeddings ().weight.data - lm_head_matrix = model.get_output_embeddings().weight.data + embedding_matrix = model.get_input_embeddings ().weight + lm_head_matrix = model.get_output_embeddings().weight if method == "interpolation": print( @@ -718,6 +804,7 @@ def add_new_tokens( from inspect import getsource import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * +from transformers.trainer import * def patch_sft_trainer_tokenizer(): """ @@ -749,6 +836,41 @@ def patch_sft_trainer_tokenizer(): exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) pass + + # Patch train with fix_untrained_tokens + function_name, replacer = "train", "if resume_from_checkpoint is False:" + function = getsource(eval(f"trl.trainer.sft_trainer.SFTTrainer.{function_name}")) + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + + check_text = \ + "\n"\ + "if self._inner_training_loop.__name__ != '_fast_inner_training_loop':\n"\ + " raise RuntimeError(\n"\ + " 'Do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ + " )\n"\ + "pass\n"\ + "n_devices = torch.cuda.device_count()\n"\ + "more_than = 0\n"\ + "for j in range(n_devices):\n"\ + " vram = torch.cuda.max_memory_reserved(torch.cuda.device(j)) / 1024 / 1024 / 1024\n"\ + " more_than += (vram > 4)\n"\ + "if more_than > 1: raise RuntimeError('Error: More than 1 GPUs have a lot of VRAM usage.')\n"\ + "for _ in range(3):\n"\ + " gc.collect()\n"\ + " torch.cuda.empty_cache()\n"\ + "pass\n"\ + "\n"\ + "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" + + check_text = check_text.split("\n") + check_text = "\n".join(" "*where + x for x in check_text) + + function = function.replace(replacer, check_text + replacer) + exec(function, globals()) + + exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) pass patch_sft_trainer_tokenizer() From cd1b44878686972d1de60e905215825da330f1e1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 29 May 2024 14:30:26 +1000 Subject: [PATCH 0218/1088] Fix Phi-3 (#556) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/models/_utils.py | 7 +++++++ unsloth/tokenizer_utils.py | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 22fb51144c..b7333f0098 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -79,6 +79,7 @@ "offload_input_embeddings", "offload_output_embeddings", "is_bfloat16_supported", + "unsloth_offloaded_gradient_checkpoint", ] @@ -402,6 +403,12 @@ def backward(ctx, dY): pass +@torch._disable_dynamo +def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, **kwargs): + return Unsloth_Offloaded_Gradient_Checkpointer.apply(function, *args) +pass + + """ Remove warnings about missing kwargs """ diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 03f3e341d7..6e4d69107e 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -577,7 +577,9 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): where_untrained_set = frozenset(where_untrained) actual_bad_tokens = tokenizer.convert_ids_to_tokens(where_untrained) - + # Remove None items in actual_bad_tokens + actual_bad_tokens = [x for x in actual_bad_tokens if x is not None] + # Check if tokenizer and training datasets have bad tokens if_bad_first = False if_bad_second = False From 27fa021a7bb959a53667dd4e7cdb9598c207aa0d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 31 May 2024 00:41:44 +1000 Subject: [PATCH 0219/1088] Fix #563 (#564) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/models/_utils.py | 70 +++++++++++++++++++++++--------------- unsloth/models/llama.py | 10 +++--- unsloth/save.py | 1 + unsloth/tokenizer_utils.py | 2 +- 4 files changed, 50 insertions(+), 33 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b7333f0098..bcd4a7b30a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -410,36 +410,50 @@ def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, """ - Remove warnings about missing kwargs + Remove warnings about missing kwargs and patch stuff """ -try: - from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod - from inspect import getsource - import re - BitsAndBytesConfig__init__ = getsource(BitsAndBytesConfig.__init__) - BitsAndBytesConfig__init__ = re.sub( - r"if[\s]{1,}kwargs\:[\s]{1,}.+?\n", - "", - BitsAndBytesConfig__init__, - flags = re.MULTILINE, - ) - BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.split("\n") - length_spaces = len(re.match(r"[\s]{1,}", BitsAndBytesConfig__init__[0]).group(0)) - BitsAndBytesConfig__init__ = "\n".join(x[length_spaces:] for x in BitsAndBytesConfig__init__) - BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.replace( - "__init__", - "_BitsAndBytesConfig__init__", - ) - exec(BitsAndBytesConfig__init__, globals()) - - import transformers.utils.quantization_config - transformers.utils.quantization_config.BitsAndBytesConfig.__init__ = _BitsAndBytesConfig__init__ -except: - logger.warning_once( - "Unsloth unsuccessfully patched bitsandbytes. Please file a bug report.\n"\ - "Luckily, your training run will still work in the meantime!" - ) +from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod +from inspect import getsource +from accelerate.utils.dataclasses import DistributedType +import re +BitsAndBytesConfig__init__ = getsource(BitsAndBytesConfig.__init__) +BitsAndBytesConfig__init__ = re.sub( + r"if[\s]{1,}kwargs\:[\s]{1,}.+?\n", + "", + BitsAndBytesConfig__init__, + flags = re.MULTILINE, +) +BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.split("\n") +length_spaces = len(re.match(r"[\s]{1,}", BitsAndBytesConfig__init__[0]).group(0)) +BitsAndBytesConfig__init__ = "\n".join(x[length_spaces:] for x in BitsAndBytesConfig__init__) +BitsAndBytesConfig__init__ = BitsAndBytesConfig__init__.replace( + "__init__", + "_BitsAndBytesConfig__init__", +) + +def _prepare_backend( + self, cpu: bool = False, sagemaker_dp = False, backend: str = None, +) -> tuple[str, DistributedType]: + return None, DistributedType.NO pass +import accelerate.state +accelerate.state.PartialState._prepare_backend = _prepare_backend + +import accelerate.accelerator +prepare = inspect.getsource(accelerate.accelerator.Accelerator.prepare) +prepare = prepare.split("\n") +spaces = prepare[0].find("def") +prepare = "\n".join(x[spaces:] for x in prepare) +x = "for obj in args:" +s = " "*spaces +prepare = prepare.replace(x, f'self.state.distributed_type = DistributedType.NO\n{s}{x}', 1) +exec(prepare, globals()) +accelerate.accelerator.Accelerator.prepare = prepare + +exec(BitsAndBytesConfig__init__, globals()) + +import transformers.utils.quantization_config +transformers.utils.quantization_config.BitsAndBytesConfig.__init__ = _BitsAndBytesConfig__init__ # Offloading to disk for modules (lm_head, embed_tokens) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9aeb55e4ea..7dec8624e1 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1277,6 +1277,7 @@ def from_pretrained( "is_sagemaker_mp_enabled()", "False", ) + exec(inner_training_loop, globals()) Trainer._inner_training_loop = _fast_inner_training_loop # Save max_seq_length @@ -1316,6 +1317,7 @@ def from_pretrained( # Add save modules patch_saving_functions(model) + Trainer._inner_training_loop = _fast_inner_training_loop # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference @@ -1336,18 +1338,18 @@ def post_patch(model): layers = model.model.layers # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2.2 - model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + # Workaround randomnly fixes it for torch versions < 2. + model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) model.config.update({"unsloth_version" : __version__}) # We also do this for the lm_head lm_head = torch.nn.Linear(1, 1, bias = None) del lm_head.weight - lm_head.weight = model.lm_head.weight + lm_head.weight = model.get_output_embeddings().weight lm_head.in_features = lm_head.weight.shape[1] lm_head.out_features = lm_head.weight.shape[0] model.lm_head = lm_head - + # Also patch all dtypes - BnB seems to not allocate the correct type? # BnB default dtype seems to be float16! correct_dtype = lm_head.weight.dtype diff --git a/unsloth/save.py b/unsloth/save.py index 7af6280910..5d6f925d45 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -975,6 +975,7 @@ def save_to_gguf( vocab_type = "bpe" pass + use_fast_convert = False if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ f"--outfile {final_location} --vocab-type {vocab_type} "\ diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 6e4d69107e..6afea68057 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -579,7 +579,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): actual_bad_tokens = tokenizer.convert_ids_to_tokens(where_untrained) # Remove None items in actual_bad_tokens actual_bad_tokens = [x for x in actual_bad_tokens if x is not None] - + # Check if tokenizer and training datasets have bad tokens if_bad_first = False if_bad_second = False From f9689b1472723bb13efe9f17cf467e3c825b3b25 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 5 Jun 2024 06:14:11 +1000 Subject: [PATCH 0220/1088] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 3537b8de38..460d92f203 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,11 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - **Kaggle Notebooks** for [Llama 3 8B](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7B](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7B](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral 7B v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text +- This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language ## 🦥 Unsloth.ai News +- 📣 NEW! [Continued Pretraining] for other languages like Korean: (https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) - 📣 NEW! [Phi-3 medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) and [Phi-3 mini](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) support is here! - 📣 NEW! [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! - 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) From 83807103ca7eb4be68c13738d49f1fda2f00865a Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 5 Jun 2024 06:15:27 +1000 Subject: [PATCH 0221/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 460d92f203..2c50f45763 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News -- 📣 NEW! [Continued Pretraining] for other languages like Korean: (https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) +- 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! - 📣 NEW! [Phi-3 medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) and [Phi-3 mini](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) support is here! - 📣 NEW! [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! - 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) From 3771f5de8dd7098440def726e69de637df939b88 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 5 Jun 2024 20:57:08 +1000 Subject: [PATCH 0222/1088] Update llama.py --- unsloth/models/llama.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7dec8624e1..9c9b45dbae 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1853,9 +1853,11 @@ def for_inference(model): pass # Wrap model.generate - model._unwrapped_old_generate = model.generate - model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) - + if model.generate.__name__ != "_fast_generate": + model._unwrapped_old_generate = model.generate + model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) + pass + # Patch tokenizer to pad to the left internal_model = model while hasattr(internal_model, "model"): From 172219e3e76e5508e97da3e5e281597a4246dcb7 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 6 Jun 2024 01:22:29 +1000 Subject: [PATCH 0223/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3015563c1d..c2cf4ae9ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.26.1", - "trl>=0.7.9", + "trl>=0.7.9,<0.9.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", ] From 471565f21dfd837b5ecdb3de70545ea7a76035f0 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 7 Jun 2024 02:53:17 +1000 Subject: [PATCH 0224/1088] Qwen2 --- unsloth/models/llama.py | 10 ++++++---- unsloth/models/mapper.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9c9b45dbae..ea5dabf738 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1782,10 +1782,12 @@ def patch_peft_model( layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 else: - logger.warning_once( - "Not an error, but Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ - "are not enabled or a bias term (like in Qwen) is used." - ) + if model_type != "qwen2": + logger.warning_once( + "Not an error, but Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) + pass pass # O attention patching diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 777f310c73..8808b8554d 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -156,6 +156,36 @@ "unsloth/Phi-3-medium-4k-instruct", "microsoft/Phi-3-medium-4k-instruct", ), + "unsloth/Qwen2-0.5B-bnb-4bit" : ( + "unsloth/Qwen2-0.5B", + "Qwen/Qwen2-0.5B", + ), + "unsloth/Qwen2-0.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-0.5B-Instruct", + "Qwen/Qwen2-0.5B-Instruct", + ), + "unsloth/Qwen2-1.5B-bnb-4bit" : ( + "unsloth/Qwen2-1.5B", + "Qwen/Qwen2-1.5B", + ), + "unsloth/Qwen2-1.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-1.5B-Instruct", + "Qwen/Qwen2-1.5B-Instruct", + ), + "unsloth/Qwen2-7B-bnb-4bit" : ( + "unsloth/Qwen2-7B", + "Qwen/Qwen2-7B", + ), + "unsloth/Qwen2-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-7B-Instruct", + "Qwen/Qwen2-7B-Instruct", + ), + "unsloth/Qwen2-70B-bnb-4bit" : ( + "Qwen/Qwen2-70B", + ), + "unsloth/Qwen2-70B-Instruct-bnb-4bit" : ( + "Qwen/Qwen2-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From c1e1646382480e235288c1c4cfd6e959dff1f999 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 7 Jun 2024 03:47:44 +1000 Subject: [PATCH 0225/1088] Update utils.py --- unsloth/kernels/utils.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 1f2085df5a..ddee198b77 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -63,6 +63,25 @@ def get_lora_parameters(proj): pass +def get_lora_parameters_bias(proj): + # For DPO or disabled adapters + base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) + W = base_layer.weight + bias = base_layer.bias + + if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: + return W, QUANT_STATE(W), None, None, None, bias + pass + + active_adapter = proj.active_adapters[0] if \ + hasattr(proj, "active_adapters") else proj.active_adapter + A = proj.lora_A [active_adapter].weight + B = proj.lora_B [active_adapter].weight + s = proj.scaling[active_adapter] + return W, QUANT_STATE(W), A, B, s, bias +pass + + def fast_dequantize(W, quant_state = None, out = None): if quant_state is None: return W if type(quant_state) is not list: @@ -181,7 +200,7 @@ def fast_gemv(X, W, quant_state, out = None): def fast_linear_forward(proj, X, temp_lora = None, out = None): - W, W_quant, lora_A, lora_B, lora_S = get_lora_parameters(proj) + W, W_quant, lora_A, lora_B, lora_S, bias = get_lora_parameters_bias(proj) bsz, q_len, in_dim = X.shape if q_len != 1: return matmul_lora(X, W, W_quant, lora_A, lora_B, lora_S) @@ -216,6 +235,8 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): out = out.view(bsz, 1, out_dim) pass + if bias is not None: out += bias + return out pass From 8b1ebc4affde38a4cd26abda7a47e527891c89a9 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 7 Jun 2024 04:25:33 +1000 Subject: [PATCH 0226/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ea5dabf738..1e301acc9b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -791,7 +791,7 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if past_key_values is not None: + if past_key_values is not None and self.config.model_type != "qwen2": outputs = fast_forward_inference( self, input_ids, From 8d9bd0ea8bf662618ba96fe7fe3478c5b81d0dff Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 7 Jun 2024 04:54:57 +1000 Subject: [PATCH 0227/1088] Update llama.py --- unsloth/models/llama.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1e301acc9b..0e860b9de4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1830,6 +1830,11 @@ def patch_peft_model( @staticmethod def for_inference(model): + if model.config.model_type == "qwen2": + FastLlamaModel.for_training(model) + return + pass + internal_model = model internal_model.gradient_checkpointing = False internal_model.training = False From 8a9e24ed4f092e438bd09436a897f2ca2530e72b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Jun 2024 05:04:54 +1000 Subject: [PATCH 0228/1088] Ollama Chat Templates (#582) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/chat_templates.py | 679 +++++++++++++++++++++++++++++++++++-- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 47 +-- unsloth/models/loader.py | 3 + unsloth/models/mapper.py | 8 + unsloth/models/mistral.py | 54 ++- unsloth/models/qwen2.py | 5 +- unsloth/save.py | 195 +++++++---- unsloth/tokenizer_utils.py | 183 +++++++++- 9 files changed, 1015 insertions(+), 161 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 3decdf7ffc..4c782326b1 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -16,6 +16,12 @@ "get_chat_template", "test_chat_templates", "test_hf_gguf_equivalence", + "remove_special_tokens", + "standardize_dataset", + + "construct_chat_template", + "test_construct_chat_template", + "create_ollama_modelfile", ] from transformers import StoppingCriteria, StoppingCriteriaList @@ -29,6 +35,7 @@ CHAT_TEMPLATES = {} +# =========================================== Unsloth # Unsloth efficient template leverages from Zephyr unsloth_template = \ "{{ bos_token }}"\ @@ -51,10 +58,24 @@ "{% if add_generation_prompt %}"\ "{{ '>>> Assistant: ' }}"\ "{% endif %}" -unsloth_eos_token = "eos_token" -CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False,) +pass +unsloth_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}{{ .System }} +{{ end }}{{ if .Prompt }}>>> User: {{ .Prompt }} +{{ end }}>>> Assistant: {{ .Response }}{__EOS_TOKEN__} +""" +PARAMETER stop "{__EOS_TOKEN__}" +SYSTEM """You are a helpful assistant to the user""" +''' + +unsloth_eos_token = "eos_token" +CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False, unsloth_ollama,) +pass +# =========================================== Zephyr # Zephyr has no BOS! zephyr_template = \ "{% for message in messages %}"\ @@ -69,10 +90,26 @@ "{% if add_generation_prompt %}"\ "{{ '<|assistant|>\n' }}"\ "{% endif %}" -zephyr_eos_token = "eos_token" -CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False,) +pass + +zephyr_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}<|system|> +{{ .System }}{__EOS_TOKEN__} +{{ end }}{{ if .Prompt }}<|user|> +{{ .Prompt }}{__EOS_TOKEN__} +{{ end }}<|assistant|> +{{ .Response }}{__EOS_TOKEN__} +""" +PARAMETER stop "{__EOS_TOKEN__}" +''' +zephyr_eos_token = "eos_token" +CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False, zephyr_ollama,) +pass +# =========================================== ChatML # ChatML has no BOS and not EOS! Rather <|im_start|> and <|im_end|> acts as BOS / EOS. chatml_template = \ "{% for message in messages %}"\ @@ -87,10 +124,27 @@ "{% if add_generation_prompt %}"\ "{{ '<|im_start|>assistant\n' }}"\ "{% endif %}" -chatml_eos_token = "<|im_end|>" -CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True,) +pass + +chatml_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +""" +PARAMETER stop "<|im_start|>" +PARAMETER stop "<|im_end|>" +''' +chatml_eos_token = "<|im_end|>" +CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True, chatml_ollama,) +pass +# =========================================== Mistral-1 # Mistral Instruct doesn't allow system prompts, so we append it to the user message. mistral_template = \ "{{ bos_token }}"\ @@ -114,10 +168,21 @@ "{{ raise_exception('Only user and assistant roles are supported!') }}"\ "{% endif %}"\ "{% endfor %}" -mistral_eos_token = "eos_token" -CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False,) +pass +# Ollama from https://www.ollama.com/library/mistral +mistral_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]""" +PARAMETER stop "{__EOS_TOKEN__}" +''' +mistral_eos_token = "eos_token" +CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False, mistral_ollama,) +pass + +# =========================================== Llama-2 # Adds BOS to every convo! And weird <> system messages. llama_template = \ "{% if messages[0]['role'] == 'system' %}"\ @@ -140,10 +205,23 @@ "{{ raise_exception('Only user and assistant roles are supported!') }}"\ "{% endif %}"\ "{% endfor %}" -llama_eos_token = "eos_token" -CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False,) +pass + +# Ollama from https://www.ollama.com/library/llama3 +llama_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """[INST] <>{{ .System }}<> +{{ .Prompt }} [/INST]""" +PARAMETER stop "{__EOS_TOKEN__}" +''' +llama_eos_token = "eos_token" +CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False, llama_ollama,) +pass + +# =========================================== Vicuna # https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template vicuna_template = \ "{{ bos_token }}"\ @@ -166,10 +244,21 @@ "{% if add_generation_prompt %}"\ "{{ 'ASSISTANT:' }}"\ "{% endif %}" -vicuna_eos_token = "eos_token" -CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False,) +pass + +# Ollama from https://www.ollama.com/library/vicuna +vicuna_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} {__EOS_TOKEN__}""" +PARAMETER stop "{__EOS_TOKEN__}" +''' +vicuna_eos_token = "eos_token" +CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False, vicuna_ollama,) +pass +# =========================================== Vicuna Old # https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template vicuna_old_template = \ "{{ bos_token }}"\ @@ -192,10 +281,24 @@ "{% if add_generation_prompt %}"\ "{{ '### Assistant:' }}"\ "{% endif %}" -vicuna_old_eos_token = "eos_token" -CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False,) +pass + +vicuna_old_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}{{ .System }} +{{ end }}{{ if .Prompt }}### Human: {{ .Prompt }} +{{ end }}### Assistant: {{ .Response }}{__EOS_TOKEN__} +""" +PARAMETER stop "{__EOS_TOKEN__}" +SYSTEM """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.""" +''' +vicuna_old_eos_token = "eos_token" +CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False, vicuna_old_ollama,) +pass +# =========================================== Alpaca multi turn # https://github.com/tatsu-lab/stanford_alpaca Changed for multi-turn convos alpaca_template = \ "{{ bos_token }}"\ @@ -203,7 +306,7 @@ "{{ messages[0]['content'] + '\n\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'Below are some instructions that describes some tasks. Write responses that appropriately completes each request.\n\n' }}"\ + "{{ 'Below are some instructions that describe some tasks. Write responses that appropriately complete each request.\n\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -218,42 +321,100 @@ "{% if add_generation_prompt %}"\ "{{ '### Response:\n' }}"\ "{% endif %}" -alpaca_eos_token = "eos_token" -CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False,) +pass + +alpaca_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}{{ .System }} + +{{ end }}{{ if .Prompt }}### Instruction: +{{ .Prompt }}{{ end }} + +### Response: +{{ .Response }}{__EOS_TOKEN__} +""" +PARAMETER stop "{__EOS_TOKEN__}" +SYSTEM """Below are some instructions that describe some tasks. Write responses that appropriately complete each request.""" +''' +alpaca_eos_token = "eos_token" +CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False, alpaca_ollama,) +pass + +# =========================================== Gemma # https://huggingface.co/google/gemma-7b-it # Notice we must use |trim for lstrip and rstrip. maps to 106. # maps to 107. user and model are normal 1 word tokens. gemma_template = \ "{{ bos_token }}"\ + "{% if messages[0]['role'] == 'system' %}"\ + "{{'user\n' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + '\n'}}"\ + "{% set loop_messages = messages[2:] %}"\ + "{% endif %}"\ "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ "{{'user\n' + message['content'] | trim + '\n'}}"\ "{% elif message['role'] == 'assistant' %}"\ "{{'model\n' + message['content'] | trim + '\n' }}"\ "{% else %}"\ - "{{ 'system\n' + message['content'] | trim + '\n' }}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ "{% endif %}"\ "{% endfor %}"\ "{% if add_generation_prompt %}"\ "{{ 'model\n' }}"\ "{% endif %}" -gemma_eos_token = "" -CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True,) +pass + +# Ollama from https://www.ollama.com/library/gemma +gemma_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """user +{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} +model +{{ .Response }} +""" +PARAMETER repeat_penalty 1 +PARAMETER stop "" +PARAMETER stop "" +PARAMETER penalize_newline false +''' +gemma_eos_token = "" +CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True, gemma_ollama,) +pass -# Gemma with ChatML instead +# =========================================== Gemma with ChatML instead # We find using is still more appropriate! gemma_chatml_template = "{{ bos_token }}" + chatml_template +pass + +gemma_chatml_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +""" +PARAMETER repeat_penalty 1 +PARAMETER stop "<|im_start|>" +PARAMETER stop "<|im_end|>" +PARAMETER penalize_newline false +''' + gemma_chatml_eos_token = ( {"" : "<|im_start|>", "" : "<|im_end|>"}, "<|im_end|>", ) -CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True,) - +CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True, gemma_chatml_ollama,) +pass -# Llama-3 +# =========================================== Llama-3 # Weirdly \n\n is needed? llama3_template = \ "{{ bos_token }}"\ @@ -269,11 +430,30 @@ "{% if add_generation_prompt %}"\ "{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"\ "{% endif %}" +pass + +# Ollama from https://www.ollama.com/library/llama3 +llama3_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> + +{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> + +{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> + +{{ .Response }}<|eot_id|>""" +PARAMETER stop "<|start_header_id|>" +PARAMETER stop "<|end_header_id|>" +PARAMETER stop "<|eot_id|>" +''' + llama3_template_eos_token = "eos_token" -CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False,) +CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,) +pass -# Phi-3 +# =========================================== Phi-3 phi3_template = \ "{{ bos_token }}"\ "{% for message in messages %}"\ @@ -288,8 +468,27 @@ "{% if add_generation_prompt %}"\ "{{ '<|assistant|>\n' }}"\ "{% endif %}" +pass + +# Ollama from https://www.ollama.com/library/phi3 +phi3_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .System }}<|system|> +{{ .System }}<|end|> +{{ end }}{{ if .Prompt }}<|user|> +{{ .Prompt }}<|end|> +{{ end }}<|assistant|> +{{ .Response }}<|end|> +""" +PARAMETER stop "<|end|>" +PARAMETER stop "<|user|>" +PARAMETER stop "<|assistant|>" +''' + phi3_template_eos_token = "<|end|>" -CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False,) +CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) +pass def get_chat_template( @@ -297,6 +496,7 @@ def get_chat_template( chat_template = "chatml", mapping = {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant"}, map_eos_token = True, + system_message = None, ): assert(type(map_eos_token) is bool) old_tokenizer = tokenizer @@ -331,7 +531,7 @@ def get_chat_template( elif type(chat_template) is str: - chat_template, stop_word, yes_map_eos_token = CHAT_TEMPLATES[chat_template] + chat_template, stop_word, yes_map_eos_token, ollama_modelfile = CHAT_TEMPLATES[chat_template] # Check mapping to eos_token if not map_eos_token and yes_map_eos_token: map_eos_token = True @@ -496,10 +696,421 @@ def get_chat_template( # Patch saving functions tokenizer = patch_saving_functions(tokenizer) + # Add Ollama + tokenizer._ollama_modelfile = ollama_modelfile + tokenizer._system_message = system_message return tokenizer#, stopping_criteria pass +def remove_special_tokens(tokenizer, prompt): + # Removes double BOS token + if prompt.startswith(tokenizer.bos_token): + prompt = prompt[len(tokenizer.bos_token):] + pass + return prompt +pass + + +def standardize_dataset( + dataset, + conversation_key = "conversations", + system_message = None, + aliases_for_system = ["system",], + aliases_for_user = ["user", "human", "input",], + aliases_for_assistant = ["gpt", "assistant", "output",], +): + """ + Standardizes ShareGPT and other formats to user/assistant Hugging Face format. + """ + import collections + import itertools + + convos = dataset[:10][conversation_key] + uniques = collections.defaultdict(list) + for convo in convos: + for message in convo: + for key, value in message.items(): + uniques[key].append(value) + pass + + # Must be only 2 entries + assert(len(uniques.keys()) == 2) + + keys = list(uniques.keys()) + length_first = len(set(uniques[keys[0]])) + length_second = len(set(uniques[keys[1]])) + + if length_first < length_second: + # Role is assigned to the first element + role_key = keys[0] + content_key = keys[1] + else: + role_key = keys[1] + content_key = keys[0] + pass + + # Check roles are in aliases + all_aliases = set(aliases_for_system + aliases_for_user + aliases_for_assistant) + roles = set(uniques[role_key]) + leftover_aliases = (all_aliases | roles) - all_aliases + if len(leftover_aliases) != 0: + raise TypeError( + f"Unsloth: {list(leftover_aliases)} are not in aliases. Please update aliases." + ) + pass + + # Mapping for aliases + aliases_mapping = {} + for x in aliases_for_system: aliases_mapping[x] = "system" + for x in aliases_for_user: aliases_mapping[x] = "user" + for x in aliases_for_assistant: aliases_mapping[x] = "assistant" + + def _standardize_dataset(examples): + convos = examples[conversation_key] + all_convos = [] + for convo in convos: + new_convo = [] + if len(convo) == 0: continue + has_system = aliases_mapping[convo[0][role_key]] == "system" + if not has_system and system_message is not None: + new_convo.append({ "role" : "system", "content" : system_message, }) + for message in convo: + role = aliases_mapping[message[role_key]] + new_convo.append({ "role" : role, "content" : message[content_key], }) + pass + all_convos.append(new_convo) + pass + return { conversation_key : all_convos, } + pass + + return dataset.map(_standardize_dataset, batched = True,) +pass + + +def get_ollama_eos_tokens(tokenizer, extra_eos_tokens = []): + added_tokens_decoder = tokenizer.added_tokens_decoder.values() + added_tokens_decoder = [str(x) for x in added_tokens_decoder] + + # Remove added_tokens_decoder duplicates + added_tokens_decoder = list(set(added_tokens_decoder) - set(extra_eos_tokens)) + + # Remove BOS + if getattr(tokenizer, "bos_token", None) is not None: + added_tokens_decoder = [x for x in added_tokens_decoder if x != tokenizer.bos_token] + pass + + repeatted_tokens = [] + # Join all vocab + joined_text = "\x01\x00".join(added_tokens_decoder) + for token in added_tokens_decoder: + n = len(token) + repeatted_counts = joined_text.count(token[:n//2]) + # Try finding longer than 1/2 of the token in the rest + # For eg <|reserved_special_token_0|>, <|reserved_special_token_1|> + if repeatted_counts > 2: + for j in range(n//2+1, n): + if joined_text.count(token[:j]) < repeatted_counts: + j -= 1 + # Remove repeatted tokens to reduce search space + joined_text = joined_text.replace(token[:j], "") + repeatted_tokens.append(token[:j]) + break + pass + pass + pass + + # Remove duplicates + splitted = joined_text.split("\x01\x00") + final_eos_tokens = [] + for old, new in zip(added_tokens_decoder, splitted): + if old == new: final_eos_tokens.append(old) + pass + final_eos_tokens += extra_eos_tokens + final_eos_tokens += repeatted_tokens + return final_eos_tokens +pass + + +def construct_chat_template( \ + +tokenizer = None, + +template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +{SYSTEM}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|>""", + +default_system_message = \ + "Below are some instructions that describe some tasks. Write responses that appropriately complete each request.", + +extra_eos_tokens = None, + +): + """ + Creates a Ollama modelfile and a HF Jinja template from a custom + template. You must provide 2x examples of an input & output. + There is an optional system message as well. + + You must use {INPUT}, {OUTPUT} twice, and {SYSTEM} is optional. + """ + assert(tokenizer is not None) + + if extra_eos_tokens is None: extra_eos_tokens = [] + + vocab = tokenizer.get_vocab() + for extra_eos in extra_eos_tokens: + assert(type(extra_eos) is str) + if extra_eos not in vocab: + raise ValueError(f"Unsloth: `{extra_eos}` is not a singular token in the tokenizer.") + pass + pass + + error_msg = \ + "Unsloth: Your prompt template must have 2 examples showing the user input {INPUT} "\ + "and the assistant output {OUTPUT}\n\n"\ + "For example what is not allowed is just:\n"\ + "### Input:\\n{INPUT}\\n\\n### Response:\\n{OUTPUT}\\n\n\n"\ + "What is required is 2x of this:\n"\ + "### Input:\\n{INPUT}\\n\\n### Response:\\n{OUTPUT}\\n"\ + "### Input:\\n{INPUT}\\n\\n### Response:\\n{OUTPUT}\\n" + + # O(N^2) search finding 2 repeatted pieces of text + j = len(template)-1 + at_least_one = False + while j > 0: + found = template.rfind(template[j:], 0, j) + if found == -1: break + j -= 1 + at_least_one = True + pass + if j > 0: j += 1 + else: raise RuntimeError(error_msg) + + + if not at_least_one: raise RuntimeError(error_msg) + + # Repeatted text + instruction_response = template[j:] + if instruction_response.count("{INPUT}") != 1 or instruction_response.count("{OUTPUT}") != 1: + raise RuntimeError(error_msg) + pass + + # 1st System, Instruction, Output pair + left = template[:j] + # 2nd Instruction, Output pair + right = template[j:] + + # Isolate input + extra_eos_tokens_regex = "|".join(f"(?:{re.escape(x)})" for x in extra_eos_tokens) + if len(extra_eos_tokens_regex) != 0: + find_end = f"(?:{extra_eos_tokens_regex})?" + else: + find_end = "" + find_end = r"\{INPUT\}[\s\n]{0,}" + find_end + input_end = list(re.finditer(find_end, right)) + assert(len(input_end) == 1) + input_end = input_end[0] + input_end = input_end.span(0)[1] + input_part = right[:input_end] + + # Isolate output + output_part = right[input_end:] + + # Isolate system + system_part = left[:left.find(input_part)] + + # Check if the user provided a correct prompt + combined = system_part + input_part + output_part + if combined != left: + combined_changed = combined.replace('\n', '\\n') + left_changed = left .replace('\n', '\\n') + raise RuntimeError( + "Unsloth: The prompt template you provided isn't correct. You gave:\n"\ + f"{combined_changed}\n\n"\ + "But we require the following:\n"\ + f"{left_changed}" + ) + pass + + # Ollama modelfile parts + + # Check bos_token is in system prompt + ollama_system = system_part + has_bos_token = False + if tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None): + if ollama_system.startswith(tokenizer.bos_token): + has_bos_token = True + ollama_system = ollama_system[len(tokenizer.bos_token):] + pass + pass + system_modelfile = "{{ if .System }}" + ollama_system.replace("{SYSTEM}", "{{ .System }}") + "{{ end }}" + input_modelfile = "{{ if .Prompt }}" + input_part .replace("{INPUT}", "{{ .Prompt }}") + "{{ end }}" + output_modelfile = output_part.replace("{OUTPUT}", "{{ .Response }}") + + # Check if EOS token is at the end of the output + if not output_modelfile.endswith(tuple(extra_eos_tokens)): + output_modelfile += "{__EOS_TOKEN__}" + pass + + # Ollama EOS + ollama_eos = get_ollama_eos_tokens(tokenizer, extra_eos_tokens) + ollama_eos = '\n'.join(f'PARAMETER stop "{eos}"' for eos in ollama_eos) + + # Ollama modelfile + modelfile = 'FROM {__FILE_LOCATION__}\n\n'\ + 'TEMPLATE """' + system_modelfile + input_modelfile + output_modelfile + \ + '"""\n\n' + ollama_eos + + # HF Jinja Chat template + def process(part, which, content = "message['content']"): + if part.endswith(which): + part = "'" + part[:part.find(which)] + f"' + {content}" + elif part.startswith(which): + part = f"{content} + '" + part[part.find(which):] + "'" + else: + part = "'" + part.replace(which, f"' + {content} + '") + "'" + if part.startswith("'' + "): part = part[5:] + return part + pass + input_jinja = process(input_part, "{INPUT}") + output_jinja = process(output_part, "{OUTPUT}") + pass + + jinja_template = \ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ " + input_jinja + " }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ " + output_jinja + " }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '" + output_part[:output_part.find("{OUTPUT}")] + "' }}"\ + "{% endif %}" + pass + + # Now add system prompt to jinja + if len(system_part) != 0: + partial_system = process(system_part, "{SYSTEM}", "messages[0]['content']") + partial_system = partial_system.replace("{SYSTEM}", "") + + # Separate the BOS + if has_bos_token: + partial_system = partial_system.replace(tokenizer.bos_token, "", 1) + pass + + partial_system = \ + "{% if messages[0]['role'] == 'system' %}"\ + "{{ " + partial_system + " }}"\ + "{% set loop_messages = messages[1:] %}" + if default_system_message is not None: + partial_system += "{% else %}"\ + "{{ '" + system_part.replace("{SYSTEM}", default_system_message) + "' }}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}" + else: + partial_system += "{% endif %}" + pass + + jinja_template = partial_system + jinja_template + + if has_bos_token: + jinja_template = "{{ bos_token }}" + jinja_template + pass + + return modelfile, jinja_template +pass + + +def test_construct_chat_template(): + token = "hf_" + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token = token) + + template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +{SYSTEM}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|>""" + + default_system_message = \ + "Below are some instructions that describe some tasks. Write responses that appropriately complete each request." + + extra_eos_tokens = None + + modelfile, jinja_template = construct_chat_template(template, default_system_message, extra_eos_tokens) + + messages = [ + {"role": "system", "content": "You are an assistant"}, + {"role": "user", "content": "What is 2+2?"}, + {"role": "assistant", "content": "It's 4."}, + {"role": "user", "content": "Ok!"}, + {"role": "assistant", "content": "Anything else?"}, + {"role": "user", "content": "What's 2x2?"}, + ] + correct_output = tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + + tokenizer.chat_template = jinja_template + new_output = tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) + + assert(correct_output == new_output) + pass +pass + + +def create_ollama_modelfile(tokenizer, gguf_location): + """ + Creates an Ollama Modelfile. + Use ollama.create(model = "new_ollama_model", modelfile = modelfile) + """ + modelfile = getattr(tokenizer, "_ollama_modelfile", None) + if modelfile is None: + raise RuntimeError( + "Unsloth: Tokenizer does not have a `ollama_modelfile` attribute.\n"\ + "Please use get_chat_template(...)." + ) + pass + + system_message = getattr(tokenizer, "_system_message", None) + if system_message is None: + __SYSTEM_MESSAGE__ = "" + else: + __SYSTEM_MESSAGE__ = f'SYSTEM """{system_message}"""' + pass + + modelfile = modelfile\ + .replace("{{", "⚫@✅#🦥")\ + .replace("}}", "⚡@🦥#⛵")\ + .format( + __FILE_LOCATION__ = gguf_location, + __SYSTEM_MESSAGE__ = __SYSTEM_MESSAGE__, + __EOS_TOKEN__ = tokenizer.eos_token, + )\ + .replace("⚫@✅#🦥", "{{")\ + .replace("⚡@🦥#⛵", "}}")\ + .rstrip() + pass + + return modelfile +pass + + def create_stopping_criteria(tokenizer, stop_word = "eos_token"): class StoppingCriteriaSub(StoppingCriteria): __slots__ = "stop_token", "single_match", "length", @@ -670,7 +1281,8 @@ def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf") if tokenizer.chat_template is not None: prompt = tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) prompt = prompt.replace("'", "") # Subprocess does not like '' - prompts.append(prompts) + prompt = remove_special_tokens(tokenizer, prompt) + prompts.append(prompt) pass for prompt in prompts: @@ -688,9 +1300,9 @@ def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf") gguf_tokenized = re.findall("([\d]{1,}) \-\> \'([^\']{1,})\'", gguf_tokens, flags = re.MULTILINE) gguf_tokenized = [(int(x[0]), x[1],) for x in gguf_tokenized] input_ids = tokenizer(prompt).input_ids + tokens = tokenizer.batch_decode(input_ids) hf_tokenized = list(zip(input_ids, tokens)) - print(gguf_tokenized[:5]) # Compare to Huggingface for j, (hf_token, gguf_token) in enumerate(zip(hf_tokenized, gguf_tokenized)): @@ -698,9 +1310,10 @@ def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf") print("Failed GGUF != HF at", j) print("HF =", hf_token) print("GGUF =", gguf_token) - print(hf_tokenized[:j+1]) - print(gguf_tokenized[:j+1]) - print(gguf_tokens) + print(hf_tokenized) + print() + print(gguf_tokenized) + print() raise RuntimeError("Failed comparing GGUF to HF.") pass pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bcd4a7b30a..a693389355 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -31,7 +31,7 @@ import os import psutil -__version__ = "2024.5" +__version__ = "2024.6" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0e860b9de4..4cbbcf0a82 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -209,8 +209,9 @@ def LlamaAttention_fast_forward_inference( # Attention if bsz == 1: + Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 + # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) - A *= self.scalar # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch.matmul(A, Vnn, out = Qn) @@ -791,7 +792,7 @@ def _CausalLM_fast_forward( *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if past_key_values is not None and self.config.model_type != "qwen2": + if past_key_values is not None: outputs = fast_forward_inference( self, input_ids, @@ -1195,7 +1196,13 @@ def from_pretrained( f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) - import gc + import subprocess, re, gc + output = subprocess.check_output( + 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) + output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) + if output > 1: raise RuntimeError( + 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.') for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1206,12 +1213,12 @@ def from_pretrained( debug_info = """n_total_devices = total_train_batch_size // \\ args.gradient_accumulation_steps // self._train_batch_size - if n_total_devices > 2: + if n_total_devices > 1: logger.warning_once( - "Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" + "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) debug_info =""" debug_info = debug_info.split('\n') @@ -1236,17 +1243,17 @@ def from_pretrained( bsz = self._train_batch_size total_batches = bsz * ga * args.world_size n_total_devices = total_batches // ga // bsz - if n_total_devices > 2: + if n_total_devices > 1: logger.warning_once( - "Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" + "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) - divisor = n_total_devices / 2 + divisor = n_total_devices / 1 bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 2: - divisor = n_total_devices / 2 + if total_batches // ga // bsz > 1: + divisor = n_total_devices / 1 ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" check_batches = check_batches.split('\n') check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) @@ -1830,10 +1837,10 @@ def patch_peft_model( @staticmethod def for_inference(model): - if model.config.model_type == "qwen2": - FastLlamaModel.for_training(model) - return - pass + # if model.config.model_type == "qwen2": + # FastLlamaModel.for_training(model) + # return + # pass internal_model = model internal_model.gradient_checkpointing = False diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index b2f0e4efdc..3bc091b364 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -33,6 +33,9 @@ def _get_model_name(model_name, load_in_4bit = True): + # First try replacing lowercase 'b' with uppercase 'B' + model_name = model_name.lower() + if not SUPPORTS_FOURBIT and model_name in INT_TO_FLOAT_MAPPER: model_name = INT_TO_FLOAT_MAPPER[model_name] logger.warning_once( diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 8808b8554d..73aa06ca68 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -197,4 +197,12 @@ for value in values: FLOAT_TO_INT_MAPPER[value] = key pass + + # Get lowercased + lowered_key = key.lower() + INT_TO_FLOAT_MAPPER[lowered_key] = values[0].lower() + + for value in values: + FLOAT_TO_INT_MAPPER[value.lower()] = lowered_key + pass pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 365d60a3e4..fc2e1a9fb0 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -393,21 +393,6 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass - # Patch Trainer - from transformers.trainer import Trainer - if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - try: - inner_training_loop = inspect.getsource(Trainer._inner_training_loop) - except: - raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", - ) - pass - pass - # Patch Trainer from transformers.trainer import Trainer try: @@ -419,7 +404,7 @@ def from_pretrained( except: raise RuntimeError( "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) @@ -447,7 +432,17 @@ def from_pretrained( f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning_once(debug_info)""" + logger.warning(debug_info) + import subprocess, re, gc + output = subprocess.check_output( + 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) + output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) + if output > 1: raise RuntimeError( + 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.') + for _ in range(3): + gc.collect() + torch.cuda.empty_cache()""" debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) @@ -455,12 +450,12 @@ def from_pretrained( debug_info = """n_total_devices = total_train_batch_size // \\ args.gradient_accumulation_steps // self._train_batch_size - if n_total_devices > 2: + if n_total_devices > 1: logger.warning_once( - "Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" + "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) debug_info =""" debug_info = debug_info.split('\n') @@ -485,16 +480,17 @@ def from_pretrained( bsz = self._train_batch_size total_batches = bsz * ga * args.world_size n_total_devices = total_batches // ga // bsz - if n_total_devices > 2: + if n_total_devices > 1: logger.warning_once( - "Please consider a commercial license - Unsloth was designed for the GPU Poor.\\n" - "The OSS currently works on 4 GPUs - we're a 2 person team, so please help fund\\n" - "our development costs by supporting us through Ko-fi or buying a license! Thanks!", + "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" + "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" + "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" + "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) - divisor = n_total_devices / 2 + divisor = n_total_devices / 1 bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 2: - divisor = n_total_devices / 2 + if total_batches // ga // bsz > 1: + divisor = n_total_devices / 1 ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" check_batches = check_batches.split('\n') check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py index 76fe31a6d1..115bf3e090 100644 --- a/unsloth/models/qwen2.py +++ b/unsloth/models/qwen2.py @@ -13,7 +13,6 @@ # limitations under the License. from .llama import * -from .mistral import FastMistralModel import os from ._utils import __version__ @@ -60,7 +59,7 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "Qwen/Qwen1.5-7B", + model_name = "Qwen/Qwen2-7B", max_seq_length = 4096, dtype = None, load_in_4bit = True, @@ -73,7 +72,7 @@ def from_pretrained( trust_remote_code = False, **kwargs, ): - return FastMistralModel.from_pretrained( + return FastLlamaModel.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, dtype = dtype, diff --git a/unsloth/save.py b/unsloth/save.py index 5d6f925d45..3ad2f3465a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -59,7 +59,8 @@ "fast_quantized" : "Recommended. Fast conversion. OK inference, OK file size.", "quantized" : "Recommended. Slow conversion. Fast inference, small files.", "f32" : "Not recommended. Retains 100% accuracy, but super slow and memory hungry.", - "f16" : "Fastest conversion + retains 100% accuracy. Slow and memory hungry.", + "bf16" : "Bfloat16 - Fastest conversion + retains 100% accuracy. Slow and memory hungry.", + "f16" : "Float16 - Fastest conversion + retains 100% accuracy. Slow and memory hungry.", "q8_0" : "Fast conversion. High resource use, but generally acceptable.", "q4_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", "q5_k_m" : "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", @@ -102,7 +103,7 @@ def check_if_sentencepiece_model(model, temporary_location = "_unsloth_sentencep if os.path.isfile(f"{file_location}/tokenizer.model"): sentencepiece_model = True pass - shutil.rmtree(file_location) + shutil.rmtree(file_location, ignore_errors = True) return sentencepiece_model pass @@ -700,7 +701,7 @@ def unsloth_save_model( # Remove temporary location import shutil - shutil.rmtree(temporary_location) + shutil.rmtree(temporary_location, ignore_errors = True) for _ in range(3): torch.cuda.empty_cache() @@ -763,7 +764,7 @@ def install_llama_cpp_old(version = -10): print(f"**[WARNING]** Deleting llama.cpp directory... {10-i} seconds left.") time.sleep(1) import shutil - shutil.rmtree("llama.cpp") + shutil.rmtree("llama.cpp", ignore_errors = True) pass # Clone a specific commit @@ -866,10 +867,11 @@ def _fix_gemma_gguf(): def save_to_gguf( model_type : str, + model_dtype : str, is_sentencepiece : bool = False, model_directory : str = "unsloth_finetuned_model", quantization_method : str = "fast_quantized", - first_conversion : str = "f16", + first_conversion : str = None, _run_installer = None, # Non blocking install of llama.cpp ): # logger.warning( @@ -877,6 +879,22 @@ def save_to_gguf( # "undergoing some major bug fixes as at 5th of May 2024. This is not an Unsloth issue.\n"\ # "Please be patient - GGUF saving should still work, but might not work as well." # ) + assert(model_dtype == "float16" or model_dtype == "bfloat16") + model_dtype = "f16" if model_dtype == "float16" else "bf16" + + # Check if bfloat16 is supported + if model_dtype == "bf16" and not torch.cuda.is_bf16_supported(): + logger.warning( + "Unsloth: Cannot convert to bf16 GGUF since your computer doesn't support it.\n"\ + "We shall switch instead to f16." + ) + model_dtype = "f16" + pass + + # Check first_conversion as well + if first_conversion is None: + first_conversion = model_dtype + pass if quantization_method.startswith("iq2"): raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") @@ -889,7 +907,7 @@ def save_to_gguf( pass logger.warning_once(f"Unsloth: Converting {model_type} model. Can use fast conversion = {use_fast_convert}.") - if quantization_method == "not_quantized": quantization_method = "f16" + if quantization_method == "not_quantized": quantization_method = model_dtype elif quantization_method == "fast_quantized": quantization_method = "q8_0" elif quantization_method == "quantized": quantization_method = "q4_k_m" elif quantization_method is None: quantization_method = "q8_0" @@ -911,12 +929,13 @@ def save_to_gguf( print(print_info) # Check first_conversion format - if first_conversion == "f16" : pass - elif first_conversion == "f32" : pass - elif first_conversion == "q8_0": pass + if first_conversion == "f16" : pass + if first_conversion == "bf16" : pass + elif first_conversion == "f32" : pass + elif first_conversion == "q8_0" : pass else: raise RuntimeError( - f"Unsloth: `first_conversion` can only be one of ['f16', 'f32', 'q8_0'] and not `{first_conversion}`." + f"Unsloth: `first_conversion` can only be one of ['f16', 'bf16', 'f32', 'q8_0'] and not `{first_conversion}`." ) pass @@ -935,11 +954,13 @@ def save_to_gguf( if quantization_method == "f32": first_conversion = "f32" elif quantization_method == "f16": first_conversion = "f16" + elif quantization_method == "bf16": first_conversion = "bf16" elif quantization_method == "q8_0": first_conversion = "q8_0" else: # Quantized models must have f16 as the default argument - if first_conversion == "f32" : pass - elif first_conversion == "f16" : pass + if first_conversion == "f32" : pass + elif first_conversion == "f16" : pass + elif first_conversion == "bf16" : pass elif first_conversion == "q8_0": logger.warning_once( "Unsloth: Using q8_0 for the `first_conversion` will lose a bit of accuracy, "\ @@ -950,8 +971,22 @@ def save_to_gguf( pass # Non llama/mistral needs can only use f32 or f16 - if not use_fast_convert and (first_conversion != "f16" or first_conversion != "f32"): - logger.warning_once("Unsloth: We must use f16 for non Llama and Mistral models.") + if not use_fast_convert and \ + (first_conversion != "f16" or first_conversion != "bf16" or first_conversion != "f32"): + + pass + # Latest llama.cpp works for all models for q8_0! + + # logger.warning_once("Unsloth: We must use f16 for non Llama and Mistral models.") + # first_conversion = "f16" + pass + + # Check if bfloat16 is supported + if first_conversion == "bf16" and not torch.cuda.is_bf16_supported(): + logger.warning( + "Unsloth: Cannot convert to bf16 GGUF since your computer doesn't support it.\n"\ + "We shall switch instead to f16." + ) first_conversion = "f16" pass @@ -975,6 +1010,7 @@ def save_to_gguf( vocab_type = "bpe" pass + # convert.py is deprecated! use_fast_convert = False if use_fast_convert: command = f"python llama.cpp/convert.py {model_directory} "\ @@ -1281,12 +1317,44 @@ def upload_to_huggingface( pass +def fix_tokenizer_bos_token(tokenizer): + # Check if BOS added already, then warn + fix_bos_token = False + chat_template = getattr(tokenizer, "chat_template", None) + + if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): + if chat_template is not None and \ + ( + tokenizer.bos_token in chat_template or \ + "{bos_token}" in chat_template.replace(" ", "") or \ + "{bos_token+" in chat_template.replace(" ", "") + ): + + fix_bos_token = True + logger.warning( + f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### Your chat template has a BOS token. We shall remove it temporarily." + ) + + # Remove {{bos_token}} + new_chat_template = re.sub(r"\{[\s]{0,}\{[\s]{0,}bos\_token[\s]{0,}\}[\s]{0,}\}", "", chat_template) + # Remove {{bos_token + + new_chat_template = re.sub(r"\{[\s]{0,}\{[\s]{0,}bos\_token[\s]{0,}\+[\s]{0,}", "", new_chat_template) + + tokenizer.chat_template = new_chat_template + + pass + pass + return fix_bos_token, chat_template +pass + + def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], tokenizer = None, quantization_method : str = "fast_quantized", - first_conversion : str = "f16", + first_conversion : str = None, push_to_hub : bool = False, token : Optional[Union[str, bool]] = None, private : Optional[bool] = None, @@ -1344,6 +1412,9 @@ def unsloth_save_pretrained_gguf( del arguments["quantization_method"] del arguments["first_conversion"] + # Fix tokenizer adding an extra BOS token at the front + fix_bos_token, old_chat_template = fix_tokenizer_bos_token(tokenizer) + # Non blocking install GGUF first if not os.path.exists("llama.cpp"): @@ -1386,31 +1457,40 @@ def unsloth_save_pretrained_gguf( pass pass + # Use old chat template if the bos is removed + if fix_bos_token: + tokenizer.chat_template = old_chat_template + pass + for _ in range(3): gc.collect() - model_type = self.config.model_type - is_sentencepiece_model = check_if_sentencepiece_model(self) - - # Check if BOS added already, then warn - print_bos_token_message = False - if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): - chat_template = getattr(tokenizer, "chat_template", None) - if chat_template is not None and \ - (tokenizer.bos_token in chat_template or "{bos_token}" in chat_template.replace(" ", "")): - print_bos_token_message = True - logger.warning( - f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ - "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." - ) - pass + model_dtype = self.config.torch_dtype + model_type = self.config.model_type + if type(model_dtype) is str: + assert(model_dtype == "float16" or model_dtype == "bfloat16") + elif model_dtype == torch.float16: + model_dtype = "float16" + elif model_dtype == torch.bfloat16: + model_dtype = "bfloat16" + else: + raise TypeError("Unsloth: Model dtype can only be float16 or bfloat16") pass + is_sentencepiece_model = check_if_sentencepiece_model(self) + # Save to GGUF - file_location = save_to_gguf(model_type, is_sentencepiece_model, + file_location = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) + if fix_bos_token: + logger.warning( + f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### We removed in GGUF's chat template for you." + ) + pass + if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( @@ -1422,13 +1502,6 @@ def unsloth_save_pretrained_gguf( new_save_directory.lstrip('/.') print(f"Saved GGUF to https://huggingface.co/{link}") pass - - if print_bos_token_message: - logger.warning( - f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ - "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." - ) - pass pass @@ -1437,7 +1510,7 @@ def unsloth_push_to_hub_gguf( repo_id : str, tokenizer = None, quantization_method : str = "fast_quantized", - first_conversion : str = "f16", + first_conversion : str = None, use_temp_dir : Optional[bool] = None, commit_message : Optional[str] = "Trained with Unsloth", private : Optional[bool] = None, @@ -1490,6 +1563,9 @@ def unsloth_push_to_hub_gguf( del arguments["quantization_method"] del arguments["first_conversion"] + # Fix tokenizer adding an extra BOS token at the front + fix_bos_token, old_chat_template = fix_tokenizer_bos_token(tokenizer) + # Non blocking install GGUF first if not os.path.exists("llama.cpp"): @@ -1532,28 +1608,30 @@ def unsloth_push_to_hub_gguf( pass pass + # Use old chat template if the bos is removed + if fix_bos_token: + tokenizer.chat_template = old_chat_template + pass + for _ in range(3): gc.collect() - model_type = self.config.model_type - is_sentencepiece_model = check_if_sentencepiece_model(self) - - # Check if BOS added already, then warn - print_bos_token_message = False - if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): - chat_template = getattr(tokenizer, "chat_template", None) - if chat_template is not None and \ - (tokenizer.bos_token in chat_template or "{bos_token}" in chat_template.replace(" ", "")): - print_bos_token_message = True - logger.warning( - f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ - "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." - ) - pass + model_dtype = self.config.torch_dtype + model_type = self.config.model_type + if type(model_dtype) is str: + assert(model_dtype == "float16" or model_dtype == "bfloat16") + elif model_dtype == torch.float16: + model_dtype = "float16" + elif model_dtype == torch.bfloat16: + model_dtype = "bfloat16" + else: + raise TypeError("Unsloth: Model dtype can only be float16 or bfloat16") pass + is_sentencepiece_model = check_if_sentencepiece_model(self) + # Save to GGUF - file_location = save_to_gguf(model_type, is_sentencepiece_model, + file_location = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1568,10 +1646,10 @@ def unsloth_push_to_hub_gguf( print(f"Saved GGUF to https://huggingface.co/{link}") - if print_bos_token_message: + if fix_bos_token: logger.warning( - f"Unsloth: ##### The current model type of {model_type} auto adds a BOS token.\n"\ - "Unsloth: ##### If you're using Ollama or GGUF etc, do not add a BOS in the chat template." + f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### We removed in GGUF's chat template for you." ) pass pass @@ -1579,7 +1657,6 @@ def unsloth_push_to_hub_gguf( def patch_saving_functions(model): import inspect - import re import types from typing import Callable, Optional, Union, List diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 6afea68057..f10b2c0a47 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -185,6 +185,111 @@ def convert_to_fast_tokenizer( pass +# Check Mistral chat template without BOS / EOS +mistral_template = \ + "{% if messages[0]['role'] == 'system' %}"\ + "{% if messages[1]['role'] == 'user' %}"\ + "{{ '[INST] ' + messages[0]['content'] + ' ' + messages[1]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[2:] %}"\ + "{% else %}"\ + "{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% endif %}"\ + "{% else %}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '[INST] ' + message['content'] + ' [/INST]' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ message['content'] }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}" +pass + +# Check Llama chat template without BOS / EOS +llama_template = \ + "{% if messages[0]['role'] == 'system' %}"\ + "{% if messages[1]['role'] == 'user' %}"\ + "{{ '[INST] <>\n' + messages[0]['content'] + '\n<>\n\n' + messages[1]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[2:] %}"\ + "{% else %}"\ + "{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\ + "{% set loop_messages = messages[1:] %}"\ + "{% endif %}"\ + "{% else %}"\ + "{% set loop_messages = messages %}"\ + "{% endif %}"\ + "{% for message in loop_messages %}"\ + "{% if message['role'] == 'user' %}"\ + "{{ '[INST] ' + message['content'].strip() + ' [/INST]' }}"\ + "{% elif message['role'] == 'assistant' %}"\ + "{{ ' ' + message['content'].strip() + ' ' }}"\ + "{% else %}"\ + "{{ raise_exception('Only user and assistant roles are supported!') }}"\ + "{% endif %}"\ + "{% endfor %}" +pass + + +def select_correct_slow_tokenizer( + tokenizer_name, + model_max_length = None, + padding_side = "right", + token = None, + trust_remote_code = False, + cache_dir = "huggingface_tokenizers_cache", +): + """ + Returns 'correct' tokenizer by checking if the chat templates are + actually tokenized correctly. + """ + messages = [ + {"role": "user", "content": "What is 2+2?"}, + {"role": "assistant", "content": "It's 4."}, + ] + + settings = ( + (False, False, True,), + (False, True, True,), + (True, False, True,), + (True, False, False,), + ) + + for (use_fast, legacy, from_slow,) in settings: + # Default as mentioned by Arthur from HF: + slow_tokenizer = AutoTokenizer.from_pretrained( + tokenizer_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + trust_remote_code = trust_remote_code, + # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 + use_fast = use_fast, + legacy = legacy, + from_slow = from_slow, + cache_dir = cache_dir, + ) + slow_tokenizer_chat_template = slow_tokenizer.chat_template + + slow_tokenizer.chat_template = llama_template + result1 = slow_tokenizer.decode(slow_tokenizer.apply_chat_template(messages)) + slow_tokenizer.chat_template = mistral_template + result2 = slow_tokenizer.decode(slow_tokenizer.apply_chat_template(messages)) + + # If 2 spaces seen, normally wrong! + if " "*2 not in result1 and " "*2 not in result2: + slow_tokenizer.chat_template = slow_tokenizer_chat_template + return slow_tokenizer + pass + pass + # Return fast version as default + return slow_tokenizer +pass + + def assert_same_tokenization(slow_tokenizer, fast_tokenizer): # Get eos_token, bos_token etc dir_names = dir(slow_tokenizer) @@ -193,19 +298,64 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): if x.endswith("_token") and x.count("_") == 1 ))) all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens)) + + # Check if chat template is enabled! + check_chat_template1 = True + check_chat_template2 = True + check_chat_template3 = True + slow_chat_template = getattr(slow_tokenizer, "chat_template", None) + fast_chat_template = getattr(fast_tokenizer, "chat_template", None) + messages = [ + {"role": "user", "content": " What is 2+2? "}, + {"role": "assistant", "content": " It's 4. "}, + ] + # Check the tokenizer's own chat template + if slow_chat_template is not None and fast_chat_template is not None: + check_chat_template1 = \ + slow_tokenizer.apply_chat_template(messages) == \ + fast_tokenizer.apply_chat_template(messages) + pass + + # Check Mistral chat template without BOS / EOS + slow_tokenizer.chat_template = mistral_template + fast_tokenizer.chat_template = mistral_template + check_chat_template2 = \ + slow_tokenizer.apply_chat_template(messages) == \ + fast_tokenizer.apply_chat_template(messages) + pass + + # Check Llama chat template without BOS / EOS + slow_tokenizer.chat_template = llama_template + fast_tokenizer.chat_template = llama_template + check_chat_template3 = \ + slow_tokenizer.apply_chat_template(messages) == \ + fast_tokenizer.apply_chat_template(messages) + pass + + # Combine them all and revert chat templates + check_chat_template = check_chat_template1 and check_chat_template2 and check_chat_template3 + slow_tokenizer.chat_template = slow_chat_template + fast_tokenizer.chat_template = fast_chat_template + + # Try special tokens try: string = "\n".join(all_special_tokens) + \ "A quick brown fox jumps over the lazy dog!!\n\nHi
\n\n" + \ "".join(all_special_tokens) - return slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids + check_special_tokens = \ + slow_tokenizer(string).input_ids == \ + fast_tokenizer(string).input_ids + + return check_chat_template and check_special_tokens except: # For eg see https://github.com/unslothai/unsloth/issues/292 # Sometimes tokenizer has weird tokens, causing a combined tokenization to fail. # [TODO] We temporarily disable this for CodeLlama tokenizers if slow_tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING: - return True + return check_chat_template else: return False + pass pass @@ -358,17 +508,13 @@ def load_correct_tokenizer( # Mainly to solve Deepseek models with no tokenizer.model file slow_tokenizer = None try: - slow_tokenizer = AutoTokenizer.from_pretrained( + slow_tokenizer = select_correct_slow_tokenizer( tokenizer_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, trust_remote_code = trust_remote_code, - # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 - use_fast = False, - legacy = False, - from_slow = True, - cache_dir = cache_dir, + cache_dir = cache_dir, ) except: pass @@ -397,6 +543,7 @@ def load_correct_tokenizer( if assert_same_tokenization(slow_tokenizer, fast_tokenizer): return fast_tokenizer else: + logger.warning(f"Unsloth: Will load {tokenizer_name} as a legacy tokenizer.") return convert_to_fast_tokenizer(slow_tokenizer) pass else: @@ -574,6 +721,8 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # Get set and actual tokens where_untrained = where_untrained.tolist() if len(where_untrained) == 0: return + + # Remove untrained indices where it's longer where_untrained_set = frozenset(where_untrained) actual_bad_tokens = tokenizer.convert_ids_to_tokens(where_untrained) @@ -854,11 +1003,13 @@ def patch_sft_trainer_tokenizer(): " )\n"\ "pass\n"\ "n_devices = torch.cuda.device_count()\n"\ - "more_than = 0\n"\ - "for j in range(n_devices):\n"\ - " vram = torch.cuda.max_memory_reserved(torch.cuda.device(j)) / 1024 / 1024 / 1024\n"\ - " more_than += (vram > 4)\n"\ - "if more_than > 1: raise RuntimeError('Error: More than 1 GPUs have a lot of VRAM usage.')\n"\ + "import subprocess, re\n"\ + "output = subprocess.check_output(\n"\ + " 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ + "output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output)\n"\ + "output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output)\n"\ + "if output > 1: raise RuntimeError(\n"\ + " 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.')\n"\ "for _ in range(3):\n"\ " gc.collect()\n"\ " torch.cuda.empty_cache()\n"\ From d2c0c1bbda7b35e8021cfd1e24ca7225fa875b29 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Jun 2024 02:58:08 +1000 Subject: [PATCH 0229/1088] Nightly (#632) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann --- PARAMETERS.md | 87 +++++++++++++ unsloth/__init__.py | 29 ++++- unsloth/chat_templates.py | 2 +- unsloth/kernels/__init__.py | 1 + unsloth/kernels/fast_lora.py | 8 +- unsloth/models/loader.py | 68 ++++++----- unsloth/models/mapper.py | 3 + unsloth/save.py | 229 ++++++++++++++++++++++++++--------- unsloth/tokenizer_utils.py | 74 ++--------- 9 files changed, 342 insertions(+), 159 deletions(-) create mode 100644 PARAMETERS.md diff --git a/PARAMETERS.md b/PARAMETERS.md new file mode 100644 index 0000000000..94d6379897 --- /dev/null +++ b/PARAMETERS.md @@ -0,0 +1,87 @@ +## LoraConfig Parameters + +Adjusting the `LoraConfig` parameters allows you to balance model performance and computational efficiency in Low-Rank Adaptation (LoRA). Here’s a concise breakdown of key parameters: + +**r** +- **Description**: Rank of the low-rank decomposition for factorizing weight matrices. +- **Impact**: + - **Higher**: Retains more information, increases computational load. + - **Lower**: Fewer parameters, more efficient training, potential performance drop if too small. + + +**lora_alpha** +- **Description**: Scaling factor for the low-rank matrices' contribution. +- **Impact**: + - **Higher**: Increases influence, speeds up convergence, risks instability or overfitting. + - **Lower**: Subtler effect, may require more training steps. + +**lora_dropout** +- **Description**: Probability of zeroing out elements in low-rank matrices for regularization. +- **Impact**: + - **Higher**: More regularization, prevents overfitting, may slow training and degrade performance. + - **Lower**: Less regularization, may speed up training, risks overfitting. + +**loftq_config** +- **Description**: Configuration for LoftQ, a quantization method for the backbone weights and initialization of LoRA layers. +- **Impact**: + - **Not None**: If specified, LoftQ will quantize the backbone weights and initialize the LoRA layers. It requires setting `init_lora_weights='loftq'`. + - **None**: LoftQ quantization is not applied. + - **Note**: Do not pass an already quantized model when using LoftQ as LoftQ handles the quantization process itself. + + +**use_rslora** +- **Description**: Enables Rank-Stabilized LoRA (RSLora). +- **Impact**: + - **True**: Uses Rank-Stabilized LoRA, setting the adapter scaling factor to `lora_alpha/math.sqrt(r)`, which has been proven to work better as per the [Rank-Stabilized LoRA paper](https://doi.org/10.48550/arXiv.2312.03732). + - **False**: Uses the original default scaling factor `lora_alpha/r`. + +**gradient_accumulation_steps** +- **Default**: 1 +- **Description**: The number of steps to accumulate gradients before performing a backpropagation update. +- **Impact**: + - **Higher**: Accumulate gradients over multiple steps, effectively increasing the batch size without requiring additional memory. This can improve training stability and convergence, especially with large models and limited hardware. + - **Lower**: Faster updates but may require more memory per step and can be less stable. + +**weight_decay** +- **Default**: 0.01 +- **Description**: Regularization technique that applies a small penalty to the weights during training. +- **Impact**: + - **Non-zero Value (e.g., 0.01)**: Adds a penalty proportional to the magnitude of the weights to the loss function, helping to prevent overfitting by discouraging large weights. + - **Zero**: No weight decay is applied, which can lead to overfitting, especially in large models or with small datasets. + +**learning_rate** +- **Default**: 2e-4 +- **Description**: The rate at which the model updates its parameters during training. +- **Impact**: + - **Higher**: Faster convergence but risks overshooting optimal parameters and causing instability in training. + - **Lower**: More stable and precise updates but may slow down convergence, requiring more training steps to achieve good performance. + +## Target Modules + +**q_proj (query projection)** +- **Description**: Part of the attention mechanism in transformer models, responsible for projecting the input into the query space. +- **Impact**: Transforms the input into query vectors that are used to compute attention scores. + +**k_proj (key projection)** +- **Description**: Projects the input into the key space in the attention mechanism. +- **Impact**: Produces key vectors that are compared with query vectors to determine attention weights. + +**v_proj (value projection)** +- **Description**: Projects the input into the value space in the attention mechanism. +- **Impact**: Produces value vectors that are weighted by the attention scores and combined to form the output. + +**o_proj (output projection)** +- **Description**: Projects the output of the attention mechanism back into the original space. +- **Impact**: Transforms the combined weighted value vectors back to the input dimension, integrating attention results into the model. + +**gate_proj (gate projection)** +- **Description**: Typically used in gated mechanisms within neural networks, such as gating units in gated recurrent units (GRUs) or other gating mechanisms. +- **Impact**: Controls the flow of information through the gate, allowing selective information passage based on learned weights. + +**up_proj (up projection)** +- **Description**: Used for up-projection, typically increasing the dimensionality of the input. +- **Impact**: Expands the input to a higher-dimensional space, often used in feedforward layers or when transitioning between different layers with differing dimensionalities. + +**down_proj (down projection)** +- **Description**: Used for down-projection, typically reducing the dimensionality of the input. +- **Impact**: Compresses the input to a lower-dimensional space, useful for reducing computational complexity and controlling the model size. diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d85eca003d..93960e2fbc 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -14,8 +14,20 @@ import os import warnings import importlib +import sys +from packaging.version import Version -# Currently only supports 1 GPU, or else seg faults will occur. +# Define a list of modules to check +MODULES_TO_CHECK = ["peft", "bitsandbytes"] + +# Check if any of the modules in the list have been imported +for module in MODULES_TO_CHECK: + if module in sys.modules: + raise ImportError(f"Unsloth: Please import Unsloth before {module}.") + pass +pass + +# Currently only supports 1 GPU, or else seg faults will occur. if "CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" devices = os.environ["CUDA_VISIBLE_DEVICES"] @@ -66,8 +78,14 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Try loading bitsandbytes and triton import bitsandbytes as bnb + import triton -from triton.common.build import libcuda_dirs +libcuda_dirs = lambda: None +if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass +else: from triton.common.build import libcuda_dirs + import os import re import numpy as np @@ -103,8 +121,11 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 importlib.reload(bnb) importlib.reload(triton) try: - import bitsandbytes as bnb - from triton.common.build import libcuda_dirs + libcuda_dirs = lambda: None + if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass + else: from triton.common.build import libcuda_dirs cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 4c782326b1..2e3761f567 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1286,7 +1286,7 @@ def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf") pass for prompt in prompts: - command = f"./llama.cpp/main -m {gguf_model} -n 0 --temp 0.0 --verbose-prompt "\ + command = f"./llama.cpp/llama-cli -m {gguf_model} -n 0 --temp 0.0 --verbose-prompt "\ f"--check-tensors -p '{prompt}'" datas = [] diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index b1fdba8328..ebea02afd7 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -24,6 +24,7 @@ ) from .fast_lora import ( get_lora_parameters, + get_lora_parameters_bias, apply_lora_mlp_swiglu, apply_lora_mlp_geglu_exact, apply_lora_mlp_geglu_approx, diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index aba44f0214..8f7aea585b 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -13,7 +13,13 @@ # limitations under the License. import torch -from .utils import fast_dequantize, QUANT_STATE, get_lora_parameters, matmul_lora +from .utils import ( + fast_dequantize, + QUANT_STATE, + get_lora_parameters, + get_lora_parameters_bias, + matmul_lora, +) class LoRA_MLP(torch.autograd.Function): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 3bc091b364..de1e2e57bf 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -33,11 +33,8 @@ def _get_model_name(model_name, load_in_4bit = True): - # First try replacing lowercase 'b' with uppercase 'B' - model_name = model_name.lower() - if not SUPPORTS_FOURBIT and model_name in INT_TO_FLOAT_MAPPER: - model_name = INT_TO_FLOAT_MAPPER[model_name] + model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ f"4bit loading.\nThe minimum required version is 4.37.\n"\ @@ -47,7 +44,7 @@ def _get_model_name(model_name, load_in_4bit = True): ) elif not load_in_4bit and model_name in INT_TO_FLOAT_MAPPER: - new_model_name = INT_TO_FLOAT_MAPPER[model_name] + new_model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." @@ -55,7 +52,7 @@ def _get_model_name(model_name, load_in_4bit = True): model_name = new_model_name elif load_in_4bit and SUPPORTS_FOURBIT and model_name in FLOAT_TO_INT_MAPPER: - new_model_name = FLOAT_TO_INT_MAPPER[model_name] + new_model_name = FLOAT_TO_INT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ # f"We shall load `{new_model_name}` for 4x faster loading." @@ -70,17 +67,18 @@ def _get_model_name(model_name, load_in_4bit = True): class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( - model_name = "unsloth/llama-3-8b-bnb-4bit", - max_seq_length = None, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, - fix_tokenizer = True, - trust_remote_code = False, - use_gradient_checkpointing = True, - resize_model_vocab = None, + model_name = "unsloth/llama-3-8b-bnb-4bit", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + fix_tokenizer = True, + trust_remote_code = False, + use_gradient_checkpointing = "unsloth", + resize_model_vocab = None, + revision = None, *args, **kwargs, ): if token is None and "HF_TOKEN" in os.environ: @@ -95,12 +93,12 @@ def from_pretrained( # First check if it's a normal model via AutoConfig is_peft = False try: - model_config = AutoConfig.from_pretrained(model_name, token = token) + model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) is_peft = False except: try: # Most likely a PEFT model - peft_config = PeftConfig.from_pretrained(model_name, token = token) + peft_config = PeftConfig.from_pretrained(model_name, token = token, revision = revision) except: raise RuntimeError(f"Unsloth: `{model_name}` is not a full model or a PEFT model.") @@ -143,22 +141,24 @@ def from_pretrained( pass model, tokenizer = dispatch_model.from_pretrained( - model_name = model_name, - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - token = token, - device_map = device_map, - rope_scaling = rope_scaling, - fix_tokenizer = fix_tokenizer, - model_patcher = dispatch_model, - tokenizer_name = tokenizer_name, + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, + model_patcher = dispatch_model, + tokenizer_name = tokenizer_name, trust_remote_code = trust_remote_code, + revision = revision if not is_peft else None, *args, **kwargs, ) if resize_model_vocab is not None: model.resize_token_embeddings(resize_model_vocab) + pass # In case the model supports tagging, add the unsloth tag. if hasattr(model, "add_model_tags"): @@ -188,8 +188,16 @@ def from_pretrained( pass if is_peft: + # From https://github.com/huggingface/peft/issues/184 # Now add PEFT adapters - model = PeftModel.from_pretrained(model, old_model_name, token = token) + model.enable_input_require_grads() + model = PeftModel.from_pretrained( + model, + old_model_name, + token = token, + revision = revision, + is_trainable = True, + ) # Patch it as well! model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 73aa06ca68..5ef7583975 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -186,6 +186,9 @@ "unsloth/Qwen2-70B-Instruct-bnb-4bit" : ( "Qwen/Qwen2-70B-Instruct", ), + "mistralai/Codestral-22B-v0.1" : ( + "mistral-community/Codestral-22B-v0.1", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/save.py b/unsloth/save.py index 3ad2f3465a..cae59caede 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -22,7 +22,7 @@ import pickle import gc from transformers.models.llama.modeling_llama import logger -from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters +from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters_bias import subprocess import psutil import re @@ -132,9 +132,10 @@ def _free_cached_model(model): def _merge_lora(layer, name): + bias = None if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)): # Is LoRA so we need to merge! - W, quant_state, A, B, s = get_lora_parameters(layer) + W, quant_state, A, B, s, bias = get_lora_parameters_bias(layer) if quant_state is not None: dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] W = fast_dequantize(W, quant_state) @@ -156,7 +157,7 @@ def _merge_lora(layer, name): W = W.t().to(dtype) else: W = layer.weight - return W + return W, bias pass @@ -527,7 +528,12 @@ def unsloth_save_model( for item in LLAMA_WEIGHTS: proj = eval(f"layer.{item}") name = f"model.layers.{j}.{item}.weight" - W = _merge_lora(proj, name) + W, bias = _merge_lora(proj, name) + + # Bias term + if bias is not None: + state_dict[f"model.layers.{j}.{item}.bias"] = bias + pass if (torch.cuda.memory_allocated() + W.nbytes) < max_vram: # Save to GPU memory @@ -643,7 +649,8 @@ def unsloth_save_model( model.config = new_config # Save! - + + save_pretrained_settings["selected_adapters"] = None # Check if pushing to an organization if save_pretrained_settings["push_to_hub"] and (username != actual_username): print(f"Unsloth: Saving to organization with address {new_save_directory}") @@ -785,7 +792,7 @@ def install_llama_cpp_old(version = -10): pass pass # Check if successful - if not os.path.exists("llama.cpp/quantize"): + if not os.path.exists("llama.cpp/quantize") and not os.path.exists("llama.cpp/llama-quantize"): raise RuntimeError( "Unsloth: llama.cpp GGUF seems to be too buggy to install.\n"\ "File a report to llama.cpp's main repo since this is not an Unsloth issue." @@ -794,7 +801,7 @@ def install_llama_cpp_old(version = -10): pass -def install_llama_cpp_blocking(use_cuda = True): +def install_llama_cpp_blocking(use_cuda = False): # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # use_cuda = "LLAMA_CUDA=1" if use_cuda else "" @@ -822,49 +829,6 @@ def install_llama_cpp_blocking(use_cuda = True): pass -def _fix_gemma_gguf(): - # Fixes Gemma saving to GGUF to float32 instead of float16! - with open("llama.cpp/convert-hf-to-gguf.py", "rb") as file: - text = file.read() - pass - - gemma_start = text.find(b"class GemmaModel(Model):") - if gemma_start == -1: return - - gemma_end = text.find(b"self.gguf_writer.add_tensor(new_name, data)", gemma_start) - if gemma_end == -1: return - - gemma_text = text[gemma_start : gemma_end] - bad_text = \ -b""" data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16)""" - good_text = \ -b""" # if f32 desired, convert any float16 to float32 - if self.ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16)""" - find_bad = gemma_text.find(bad_text) - if find_bad == -1: return - - gemma_text = gemma_text[:find_bad] + good_text + gemma_text[find_bad + len(bad_text):] - text = text[:gemma_start] + gemma_text + text[gemma_end:] - - with open("llama.cpp/convert-hf-to-gguf.py", "w+b") as file: - file.write(text) - pass -pass - - def save_to_gguf( model_type : str, model_dtype : str, @@ -930,7 +894,7 @@ def save_to_gguf( # Check first_conversion format if first_conversion == "f16" : pass - if first_conversion == "bf16" : pass + elif first_conversion == "bf16" : pass elif first_conversion == "f32" : pass elif first_conversion == "q8_0" : pass else: @@ -946,8 +910,20 @@ def save_to_gguf( error = 0 install_llama_cpp_blocking() pass + # Check if successful. If not install 10th latest release - if error != 0 or not os.path.exists("llama.cpp/quantize"): + + # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize + # and llama.cpp/main changed to llama.cpp/llama-cli + # See https://github.com/ggerganov/llama.cpp/pull/7809 + quantize_location = None + if os.path.exists("llama.cpp/quantize"): + quantize_location = "llama.cpp/quantize" + elif os.path.exists("llama.cpp/llama-quantize"): + quantize_location = "llama.cpp/llama-quantize" + pass + + if error != 0 or quantize_location is None: print(f"Unsloth: llama.cpp error code = {error}.") install_llama_cpp_old(-10) pass @@ -1017,9 +993,6 @@ def save_to_gguf( f"--outfile {final_location} --vocab-type {vocab_type} "\ f"--outtype {first_conversion} --concurrency {n_cpus} --pad-vocab" else: - # Need to fix convert-hf-to-gguf.py for some models! - # _fix_gemma_gguf() - command = f"python llama.cpp/convert-hf-to-gguf.py {model_directory} "\ f"--outfile {final_location} "\ f"--outtype {first_conversion}" @@ -1065,7 +1038,7 @@ def save_to_gguf( print(f"Unsloth: [2] Converting GGUF 16bit into {quantization_method}. This will take 20 minutes...") final_location = f"./{model_directory}-unsloth.{quantization_method.upper()}.gguf" - command = f"./llama.cpp/quantize {old_location} "\ + command = f"./{quantize_location} {old_location} "\ f"{final_location} {quantization_method} {n_cpus}" # quantize uses stderr @@ -1654,6 +1627,140 @@ def unsloth_push_to_hub_gguf( pass pass +# Corrected function to save LoRA to a custom directory +def save_lora_to_custom_dir(model, tokenizer, save_directory): + # Create the custom directory if it doesn't exist + os.makedirs(save_directory, exist_ok=True) + + # Call the unsloth_save_model function with the custom directory + unsloth_save_model( + model, + tokenizer, + save_directory=save_directory, + save_method="lora", + push_to_hub=False, + ) + +# Corrected method within the model class to convert LoRA to GGML and push to Hugging Face Hub +def unsloth_convert_lora_to_ggml_and_push_to_hub( + self, + tokenizer, + repo_id: str, + use_temp_dir: Optional[bool] = None, + commit_message: Optional[str] = "Converted LoRA to GGML with Unsloth", + private: Optional[bool] = None, + token: Union[bool, str, None] = None, + create_pr: bool = False, + revision: str = None, + commit_description: str = "Convert LoRA to GGML format using Unsloth", + temporary_location: str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage: float = 0.85, +): + if not os.path.exists("llama.cpp"): + if IS_KAGGLE_ENVIRONMENT: + python_install = install_python_non_blocking(["protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda=False) + makefile = None + else: + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + python_install.wait() + else: + makefile = None + + for _ in range(3): + gc.collect() + + lora_directory_push = "lora-to-ggml-push" + save_lora_to_custom_dir(self, tokenizer, lora_directory_push) + + model_type = self.config.model_type + output_file = os.path.join(lora_directory_push, "ggml-adapter-model.bin") + + print(f"Unsloth: Converting auto-saved LoRA adapters at {lora_directory_push} to GGML format.") + print(f"The output file will be {output_file}") + + command = f"python3 llama.cpp/convert-lora-to-ggml.py {lora_directory_push} {output_file} llama" + + try: + with subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True) as sp: + for line in sp.stdout: + print(line, end="", flush=True) + for line in sp.stderr: + print(line, end="", flush=True) + sp.wait() + if sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, command) + except subprocess.CalledProcessError as e: + print(f"Error: Conversion failed with return code {e.returncode}") + return + + print(f"Unsloth: Conversion completed! Output file: {output_file}") + + print("Unsloth: Uploading GGML file to Hugging Face Hub...") + username = upload_to_huggingface( + self, repo_id, token, + "GGML converted LoRA", "ggml", output_file, None, private, + ) + link = f"{repo_id.lstrip('/')}" + print("Unsloth: Done.") + print(f"Converted LoRA to GGML and uploaded to https://huggingface.co/{link}") + print("\nThis GGML making function was made by Maheswar. Ping him @Maheswar on the Unsloth Discord or on HuggingFace (@mahiatlinux) if you like this!") + +def unsloth_convert_lora_to_ggml_and_save_locally( + self, + save_directory: str, # Added parameter for the folder name + tokenizer, + temporary_location: str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage: float = 0.85, +): + if not os.path.exists("llama.cpp"): + if IS_KAGGLE_ENVIRONMENT: + python_install = install_python_non_blocking(["protobuf"]) + python_install.wait() + install_llama_cpp_blocking(use_cuda=False) + makefile = None + else: + git_clone = install_llama_cpp_clone_non_blocking() + python_install = install_python_non_blocking(["protobuf"]) + git_clone.wait() + makefile = install_llama_cpp_make_non_blocking() + python_install.wait() + else: + makefile = None + + for _ in range(3): + gc.collect() + + # Use the provided save_directory for local saving + save_lora_to_custom_dir(self, tokenizer, save_directory) + + model_type = self.config.model_type + output_file = os.path.join(save_directory, "ggml-adapter-model.bin") + + print(f"Unsloth: Converting auto-saved LoRA adapters at {save_directory} to GGML format.") + print(f"The output file will be {output_file}") + + command = f"python3 llama.cpp/convert-lora-to-ggml.py {save_directory} {output_file} llama" + + try: + with subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True) as sp: + for line in sp.stdout: + print(line, end="", flush=True) + for line in sp.stderr: + print(line, end="", flush=True) + sp.wait() + if sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, command) + except subprocess.CalledProcessError as e: + print(f"Error: Conversion failed with return code {e.returncode}") + return + print("Unsloth: Done.") + print(f"Unsloth: Conversion completed! Output file: {output_file}") + print("\nThis GGML making function was made by Maheswar. Ping him @Maheswar on the Unsloth Discord or on HuggingFace (@mahiatlinux) if you like this!") def patch_saving_functions(model): import inspect @@ -1746,10 +1853,12 @@ def patch_saving_functions(model): # Add saving methods to top level model if hasattr(model, "config"): # Counteract tokenizers - model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) - model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) - model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) - model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) + model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) + model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) + model.push_to_hub_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_push_to_hub, model) + model.save_pretrained_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_save_locally, model) pass return model pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index f10b2c0a47..395c3b7311 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -232,62 +232,6 @@ def convert_to_fast_tokenizer( "{% endif %}"\ "{% endfor %}" pass - - -def select_correct_slow_tokenizer( - tokenizer_name, - model_max_length = None, - padding_side = "right", - token = None, - trust_remote_code = False, - cache_dir = "huggingface_tokenizers_cache", -): - """ - Returns 'correct' tokenizer by checking if the chat templates are - actually tokenized correctly. - """ - messages = [ - {"role": "user", "content": "What is 2+2?"}, - {"role": "assistant", "content": "It's 4."}, - ] - - settings = ( - (False, False, True,), - (False, True, True,), - (True, False, True,), - (True, False, False,), - ) - - for (use_fast, legacy, from_slow,) in settings: - # Default as mentioned by Arthur from HF: - slow_tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, - trust_remote_code = trust_remote_code, - # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 - use_fast = use_fast, - legacy = legacy, - from_slow = from_slow, - cache_dir = cache_dir, - ) - slow_tokenizer_chat_template = slow_tokenizer.chat_template - - slow_tokenizer.chat_template = llama_template - result1 = slow_tokenizer.decode(slow_tokenizer.apply_chat_template(messages)) - slow_tokenizer.chat_template = mistral_template - result2 = slow_tokenizer.decode(slow_tokenizer.apply_chat_template(messages)) - - # If 2 spaces seen, normally wrong! - if " "*2 not in result1 and " "*2 not in result2: - slow_tokenizer.chat_template = slow_tokenizer_chat_template - return slow_tokenizer - pass - pass - # Return fast version as default - return slow_tokenizer -pass def assert_same_tokenization(slow_tokenizer, fast_tokenizer): @@ -508,13 +452,17 @@ def load_correct_tokenizer( # Mainly to solve Deepseek models with no tokenizer.model file slow_tokenizer = None try: - slow_tokenizer = select_correct_slow_tokenizer( + slow_tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, - model_max_length = model_max_length, - padding_side = padding_side, - token = token, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, trust_remote_code = trust_remote_code, - cache_dir = cache_dir, + # Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373 + use_fast = False, + legacy = False, + from_slow = True, + cache_dir = cache_dir, ) except: pass @@ -786,7 +734,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): pass # Count all the possible bad tokens - final_counts = np.zeros(len(tokenizer), dtype = np.int64) + final_counts = np.zeros(max(len(tokenizer), embedding_matrix.shape[0]), dtype = np.int64) def mapping(examples): input_ids = examples["input_ids"] counter = np.fromiter(itertools.chain.from_iterable(input_ids), dtype = np.int32) @@ -972,7 +920,7 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ From 52ab81372de919de6b52373e68b8c65328b42dbf Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 14 Jun 2024 03:24:28 +1000 Subject: [PATCH 0230/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 395c3b7311..57624e6d84 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -247,6 +247,11 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): check_chat_template1 = True check_chat_template2 = True check_chat_template3 = True + + """ + Weirdly Mistral tokenizers are actually correct?? + Ie below will actually load mistral v1 and v3 incorrectly! + slow_chat_template = getattr(slow_tokenizer, "chat_template", None) fast_chat_template = getattr(fast_tokenizer, "chat_template", None) messages = [ @@ -254,7 +259,7 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): {"role": "assistant", "content": " It's 4. "}, ] # Check the tokenizer's own chat template - if slow_chat_template is not None and fast_chat_template is not None: + if slow_chat_template is not None and fast_chat_template is not None: check_chat_template1 = \ slow_tokenizer.apply_chat_template(messages) == \ fast_tokenizer.apply_chat_template(messages) @@ -277,9 +282,10 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): pass # Combine them all and revert chat templates - check_chat_template = check_chat_template1 and check_chat_template2 and check_chat_template3 slow_tokenizer.chat_template = slow_chat_template fast_tokenizer.chat_template = fast_chat_template + """ + check_chat_template = check_chat_template1 and check_chat_template2 and check_chat_template3 # Try special tokens try: From 7ac1a78904802a66e6e799f03b6f0c910cd0735a Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 14 Jun 2024 16:00:26 +1000 Subject: [PATCH 0231/1088] Update __init__.py --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 93960e2fbc..0105199fba 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -18,7 +18,7 @@ from packaging.version import Version # Define a list of modules to check -MODULES_TO_CHECK = ["peft", "bitsandbytes"] +MODULES_TO_CHECK = ["bitsandbytes"] # Check if any of the modules in the list have been imported for module in MODULES_TO_CHECK: From b34a44e96907920c741553b9c49cb73f6b5fceea Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Jun 2024 20:59:45 +1000 Subject: [PATCH 0232/1088] Qwen bug fixes (#639) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman --- PARAMETERS.md | 87 --------------------------------------- README.md | 18 ++++---- unsloth/models/mistral.py | 4 +- unsloth/models/qwen2.py | 8 ++-- 4 files changed, 16 insertions(+), 101 deletions(-) delete mode 100644 PARAMETERS.md diff --git a/PARAMETERS.md b/PARAMETERS.md deleted file mode 100644 index 94d6379897..0000000000 --- a/PARAMETERS.md +++ /dev/null @@ -1,87 +0,0 @@ -## LoraConfig Parameters - -Adjusting the `LoraConfig` parameters allows you to balance model performance and computational efficiency in Low-Rank Adaptation (LoRA). Here’s a concise breakdown of key parameters: - -**r** -- **Description**: Rank of the low-rank decomposition for factorizing weight matrices. -- **Impact**: - - **Higher**: Retains more information, increases computational load. - - **Lower**: Fewer parameters, more efficient training, potential performance drop if too small. - - -**lora_alpha** -- **Description**: Scaling factor for the low-rank matrices' contribution. -- **Impact**: - - **Higher**: Increases influence, speeds up convergence, risks instability or overfitting. - - **Lower**: Subtler effect, may require more training steps. - -**lora_dropout** -- **Description**: Probability of zeroing out elements in low-rank matrices for regularization. -- **Impact**: - - **Higher**: More regularization, prevents overfitting, may slow training and degrade performance. - - **Lower**: Less regularization, may speed up training, risks overfitting. - -**loftq_config** -- **Description**: Configuration for LoftQ, a quantization method for the backbone weights and initialization of LoRA layers. -- **Impact**: - - **Not None**: If specified, LoftQ will quantize the backbone weights and initialize the LoRA layers. It requires setting `init_lora_weights='loftq'`. - - **None**: LoftQ quantization is not applied. - - **Note**: Do not pass an already quantized model when using LoftQ as LoftQ handles the quantization process itself. - - -**use_rslora** -- **Description**: Enables Rank-Stabilized LoRA (RSLora). -- **Impact**: - - **True**: Uses Rank-Stabilized LoRA, setting the adapter scaling factor to `lora_alpha/math.sqrt(r)`, which has been proven to work better as per the [Rank-Stabilized LoRA paper](https://doi.org/10.48550/arXiv.2312.03732). - - **False**: Uses the original default scaling factor `lora_alpha/r`. - -**gradient_accumulation_steps** -- **Default**: 1 -- **Description**: The number of steps to accumulate gradients before performing a backpropagation update. -- **Impact**: - - **Higher**: Accumulate gradients over multiple steps, effectively increasing the batch size without requiring additional memory. This can improve training stability and convergence, especially with large models and limited hardware. - - **Lower**: Faster updates but may require more memory per step and can be less stable. - -**weight_decay** -- **Default**: 0.01 -- **Description**: Regularization technique that applies a small penalty to the weights during training. -- **Impact**: - - **Non-zero Value (e.g., 0.01)**: Adds a penalty proportional to the magnitude of the weights to the loss function, helping to prevent overfitting by discouraging large weights. - - **Zero**: No weight decay is applied, which can lead to overfitting, especially in large models or with small datasets. - -**learning_rate** -- **Default**: 2e-4 -- **Description**: The rate at which the model updates its parameters during training. -- **Impact**: - - **Higher**: Faster convergence but risks overshooting optimal parameters and causing instability in training. - - **Lower**: More stable and precise updates but may slow down convergence, requiring more training steps to achieve good performance. - -## Target Modules - -**q_proj (query projection)** -- **Description**: Part of the attention mechanism in transformer models, responsible for projecting the input into the query space. -- **Impact**: Transforms the input into query vectors that are used to compute attention scores. - -**k_proj (key projection)** -- **Description**: Projects the input into the key space in the attention mechanism. -- **Impact**: Produces key vectors that are compared with query vectors to determine attention weights. - -**v_proj (value projection)** -- **Description**: Projects the input into the value space in the attention mechanism. -- **Impact**: Produces value vectors that are weighted by the attention scores and combined to form the output. - -**o_proj (output projection)** -- **Description**: Projects the output of the attention mechanism back into the original space. -- **Impact**: Transforms the combined weighted value vectors back to the input dimension, integrating attention results into the model. - -**gate_proj (gate projection)** -- **Description**: Typically used in gated mechanisms within neural networks, such as gating units in gated recurrent units (GRUs) or other gating mechanisms. -- **Impact**: Controls the flow of information through the gate, allowing selective information passage based on learned weights. - -**up_proj (up projection)** -- **Description**: Used for up-projection, typically increasing the dimensionality of the input. -- **Impact**: Expands the input to a higher-dimensional space, often used in feedforward layers or when transitioning between different layers with differing dimensionalities. - -**down_proj (down projection)** -- **Description**: Used for down-projection, typically reducing the dimensionality of the input. -- **Impact**: Compresses the input to a lower-dimensional space, useful for reducing computational complexity and controlling the model size. diff --git a/README.md b/README.md index 2c50f45763..534079ed49 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral 7B v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - +- Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! @@ -76,7 +76,7 @@ model = FastLanguageModel.get_peft_model( ## 🥇 Performance Benchmarking -- For the full list of **reproducable** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) +- For the full list of **reproducible** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) | 1 A100 40GB | 🤗Hugging Face | Flash Attention | 🦥Unsloth Open Source | 🦥[Unsloth Pro](https://unsloth.ai/pricing) | |--------------|--------------|-----------------|---------------------|-----------------| @@ -100,14 +100,16 @@ model = FastLanguageModel.get_peft_model( ### Conda Installation Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs. ```bash -conda create --name unsloth_env python=3.10 +conda create --name unsloth_env \ + python=3.10 \ + pytorch-cuda=<11.8/12.1> \ + pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers \ + -y conda activate unsloth_env -conda install pytorch-cuda=<12.1/11.8> pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers - pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install --no-deps trl peft accelerate bitsandbytes +pip install --no-deps "trl<0.9.0" peft accelerate bitsandbytes ``` ### Pip Installation @@ -162,7 +164,7 @@ pip install --no-deps packaging ninja einops flash-attn xformers trl peft accele # Pre Ampere RTX 2080, T4, GTX 1080 GPUs: pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install --no-deps xformers trl peft accelerate bitsandbytes +pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes ``` 7. For Pytorch 2.3.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. ```bash @@ -257,7 +259,7 @@ trainer.train() # (1) Saving to GGUF / merging to 16bit for vLLM # (2) Continued training from a saved LoRA adapter # (3) Adding an evaluation loop / OOMs -# (4) Cutomized chat templates +# (4) Customized chat templates ``` diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index fc2e1a9fb0..ff2e909fb9 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -512,7 +512,7 @@ def from_pretrained( if "n_total_devices >" not in inner_training_loop: raise RuntimeError( "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n" + "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" "We're a 2 person team, so we still have to fund our development costs - thanks!\n" "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", ) @@ -521,6 +521,7 @@ def from_pretrained( "is_sagemaker_mp_enabled()", "False", ) + exec(inner_training_loop, globals()) Trainer._inner_training_loop = _fast_inner_training_loop # Save max_seq_length @@ -560,6 +561,7 @@ def from_pretrained( # Add save modules patch_saving_functions(model) + Trainer._inner_training_loop = _fast_inner_training_loop # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py index 115bf3e090..47327280b9 100644 --- a/unsloth/models/qwen2.py +++ b/unsloth/models/qwen2.py @@ -12,9 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .llama import * -import os -from ._utils import __version__ +from .mistral import * from transformers.models.qwen2.modeling_qwen2 import ( Qwen2Attention, @@ -34,7 +32,7 @@ pass -class FastQwen2Model(FastLlamaModel): +class FastQwen2Model(FastMistralModel): @staticmethod def pre_patch(): @@ -72,7 +70,7 @@ def from_pretrained( trust_remote_code = False, **kwargs, ): - return FastLlamaModel.from_pretrained( + return FastMistralModel.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, dtype = dtype, From 7b4235514af9dbe4bcaf69e5c2453483493c88ab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Jun 2024 00:52:33 +1000 Subject: [PATCH 0233/1088] Fix segfaults (#641) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman --- unsloth/models/llama.py | 93 ++++++------ unsloth/models/mistral.py | 299 +++----------------------------------- unsloth/models/qwen2.py | 46 +++--- 3 files changed, 90 insertions(+), 348 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 4cbbcf0a82..7cbdcfbda7 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -51,6 +51,7 @@ pass from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig +from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING from transformers import set_seed as transformers_set_seed from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model from peft import PeftModelForCausalLM @@ -1028,16 +1029,16 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "unsloth/llama-2-7b-bnb-4bit", - max_seq_length = None, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, - fix_tokenizer = True, - model_patcher = None, - tokenizer_name = None, + model_name = "unsloth/llama-3-8b-bnb-4bit", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + fix_tokenizer = True, + model_patcher = None, + tokenizer_name = None, trust_remote_code = False, **kwargs, ): @@ -1070,9 +1071,17 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - # RoPE scaling - model_max_seq_length = \ - AutoConfig.from_pretrained(model_name, token = token).max_position_embeddings + # RoPE Scaling + model_config = AutoConfig.from_pretrained(model_name, token = token) + model_max_seq_length = model_config.max_position_embeddings + + # Check if RoPE Scaling is even allowed + model_function = MODEL_FOR_CAUSAL_LM_MAPPING[model_config.__class__] + has_rope_scaling = False + try: + with open(inspect.getfile(model_function), "r") as file: + has_rope_scaling = "self.config.rope_scaling" in file.read() + except: pass # If max_seq_length is not specified, use maximum fron config if max_seq_length is None: @@ -1080,14 +1089,28 @@ def from_pretrained( pass if (rope_scaling is None) and (max_seq_length > model_max_seq_length): + rope_scaling = max_seq_length / model_max_seq_length + logger.warning_once( f"Unsloth: {model_name} can only handle sequence lengths of at most "\ f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ f"{round(rope_scaling, 3)}, it can be magically be extended to "\ f"{max_seq_length}!" ) + + # Warn RoPE scaling isn't allowed + if not has_rope_scaling: + raise RuntimeError( + "However, {model_name} doesn't support RoPE Scaling!\n"\ + "Please file a feature request at https://github.com/unslothai/unsloth." + ) + pass + rope_scaling = {"type": "linear", "factor": rope_scaling,} + + # Add to kwargs + kwargs["rope_scaling"] = rope_scaling pass bnb_config = None @@ -1103,39 +1126,16 @@ def from_pretrained( # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 # RoPE Scaling's max_position_embeddings must be updated max_position_embeddings = max(max_seq_length, model_max_seq_length) - try: - model = AutoModelForCausalLM.from_pretrained( - model_name, - device_map = device_map, - torch_dtype = dtype, - quantization_config = bnb_config, - token = token, - rope_scaling = rope_scaling, - max_position_embeddings = max_position_embeddings, - trust_remote_code = trust_remote_code, - **kwargs, - ) - except Exception as error: - if "rope_scaling" in str(error): - if rope_scaling is not None: - raise TypeError("Unsloth: {model_name} does not support rope_scaling.") - pass - - # Counteract missing rope_scaling - model = AutoModelForCausalLM.from_pretrained( - model_name, - device_map = device_map, - torch_dtype = dtype, - quantization_config = bnb_config, - token = token, - max_position_embeddings = max_position_embeddings, - trust_remote_code = trust_remote_code, - **kwargs, - ) - else: - raise error - pass - pass + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + **kwargs, + ) # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name @@ -1423,7 +1423,6 @@ def get_peft_model( if loftq_config is None: loftq_config = {} - import inspect signature = str(inspect.signature(LoraConfig)) SUPPORTS_LOFTQ = "loftq_config" in signature SUPPORTS_RSLORA = "use_rslora" in signature diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index ff2e909fb9..291f0aa502 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -289,289 +289,32 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "unsloth/mistral-7b-bnb-4bit", - max_seq_length = None, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, # Mistral does not support RoPE scaling - fix_tokenizer = True, - model_patcher = None, - tokenizer_name = None, + model_name = "unsloth/mistral-7b-bnb-4bit", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, # Mistral does not support RoPE scaling + fix_tokenizer = True, + model_patcher = None, + tokenizer_name = None, trust_remote_code = False, **kwargs, ): - if token is None and "HF_TOKEN" in os.environ: - token = os.environ["HF_TOKEN"] - - if token is None and "HUGGINGFACE_TOKEN" in os.environ: - token = os.environ["HUGGINGFACE_TOKEN"] - - if model_patcher is None: model_patcher = FastMistralModel - # Mistral does NOT support RoPE Scaling! - if rope_scaling is not None: - logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") - pass - - SUPPORTS_BFLOAT16 = is_bfloat16_supported() - gpu_stats = torch.cuda.get_device_properties(0) - max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) - - statistics = \ - f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ - f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ - f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' - print(statistics) - model_patcher.pre_patch() - # get_statistics() - - if dtype is None: - dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 - elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: - logger.warning_once("Device does not support bfloat16. Will change to float16.") - dtype = torch.float16 - - assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - - # Check max sequence length - model_config = AutoConfig.from_pretrained(model_name, token = token) - model_max_seq_length = model_config.max_position_embeddings - - # If max_seq_length is not specified, use maximum fron config - if max_seq_length is None: - max_seq_length = model_max_seq_length - pass - - # Mistral does NOT support RoPE Scaling sadly so we have to error out. - if max_seq_length > model_max_seq_length: - raise RuntimeError( - f"Unsloth: Unfortunately {model_patcher.__name__[4:-5]} type models do not support RoPE scaling!\n"\ - f"The maximum sequence length supported is {model_max_seq_length}.", - ) - pass - - bnb_config = None - if load_in_4bit: - bnb_config = BitsAndBytesConfig( - load_in_4bit = True, - bnb_4bit_use_double_quant = True, - bnb_4bit_quant_type = "nf4", - bnb_4bit_compute_dtype = dtype, - ) - - max_position_embeddings = max(max_seq_length, model_max_seq_length) - model = AutoModelForCausalLM.from_pretrained( - model_name, - device_map = device_map, - torch_dtype = dtype, - quantization_config = bnb_config, - token = token, - # rope_scaling = rope_scaling, - trust_remote_code = trust_remote_code, - **kwargs, - ) - - # Counteract saved tokenizers - tokenizer_name = model_name if tokenizer_name is None else tokenizer_name - tokenizer = load_correct_tokenizer( - tokenizer_name, - model_max_length = max_position_embeddings, - padding_side = "right", + return FastLlamaModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, + model_patcher = FastMistralModel, + tokenizer_name = tokenizer_name, trust_remote_code = trust_remote_code, + **kwargs, ) - - model, tokenizer = patch_tokenizer(model, tokenizer) - model = model_patcher.post_patch(model) - - # Patch up QKV / O and MLP - for idx, layer in enumerate(model.model.layers): - layer.self_attn.apply_qkv = original_apply_qkv - layer.self_attn.apply_o = original_apply_o - pass - - # Patch Trainer - from transformers.trainer import Trainer - try: - if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - inner_training_loop = inspect.getsource(Trainer._inner_training_loop) - Trainer._original_training_loop = inner_training_loop - else: - inner_training_loop = Trainer._original_training_loop - except: - raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", - ) - pass - - import transformers.trainer - items_in_trainer = dir(transformers.trainer) - good_items = [] - for item in items_in_trainer: - # TODO: Support Deepspeed - if item.startswith(("deepspeed", "xm", "met", "smp")): continue - if item in inner_training_loop: good_items.append(item) - pass - exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) - - start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] - end = inner_training_loop.find("\n\n", start) - original_debug = inner_training_loop[start:end] - spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] - front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) - - debug_info = """debug_info = \\ - f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning(debug_info) - import subprocess, re, gc - output = subprocess.check_output( - 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True) - output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) - output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) - if output > 1: raise RuntimeError( - 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.') - for _ in range(3): - gc.collect() - torch.cuda.empty_cache()""" - - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace(original_debug, debug_info) - - debug_info = """n_total_devices = total_train_batch_size // \\ - args.gradient_accumulation_steps // self._train_batch_size - if n_total_devices > 1: - logger.warning_once( - "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", - ) - debug_info =""" - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) - - front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) - inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = tpu_spmd_dataloader(train_dataloader)", - "raise RuntimeError('Unsloth: TPUs are not yet supported!')" - ) - inner_training_loop = inner_training_loop.replace( - "self.accelerator.free_memory()", - "self.accelerator.free_memory()\n" + \ - front_spaces + "if self.is_deepspeed_enabled:"\ - "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, - ) - - check_batches = """train_dataloader = self.get_train_dataloader() - ga = args.gradient_accumulation_steps - bsz = self._train_batch_size - total_batches = bsz * ga * args.world_size - n_total_devices = total_batches // ga // bsz - if n_total_devices > 1: - logger.warning_once( - "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", - ) - divisor = n_total_devices / 1 - bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 1: - divisor = n_total_devices / 1 - ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" - check_batches = check_batches.split('\n') - check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = self.get_train_dataloader()", - check_batches, 1, - ) - inner_training_loop = inner_training_loop.replace( - "_inner_training_loop", - "_fast_inner_training_loop", 1, - ) - exec(inner_training_loop, globals()) - - Trainer._inner_training_loop = _fast_inner_training_loop - inner_training_loop = inner_training_loop.replace( - "is_torch_tpu_available()", - "False", - ) - if "n_total_devices >" not in inner_training_loop: - raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", - ) - pass - inner_training_loop = inner_training_loop.replace( - "is_sagemaker_mp_enabled()", - "False", - ) - exec(inner_training_loop, globals()) - Trainer._inner_training_loop = _fast_inner_training_loop - - # Save max_seq_length - max_position_embeddings = max(max_seq_length, model.config.max_position_embeddings) - model.max_seq_length = max_position_embeddings - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_position_embeddings - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_position_embeddings - - # We check the tokenizer first for errors - if fix_tokenizer: - tokenizer = check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = max_position_embeddings, - padding_side = "right", - token = token, - ) - pass - patch_saving_functions(tokenizer) - - # Fix up config for transformers uploading PEFT - # Not necessary anymore since we require transformers>=4.37 - if False: - name = model.config._name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.config.update({"_name_or_path" : name}) - pass - - # Log Unsloth version for future fastpaths for inference - model.config.update({"unsloth_version" : __version__}) - - # Add save modules - patch_saving_functions(model) - Trainer._inner_training_loop = _fast_inner_training_loop - - # Save tokenizer for inference purposes - tokenizer.padding_side = "left" # Force inference - internal_model = model - while hasattr(internal_model, "model"): - internal_model._saved_temp_tokenizer = tokenizer - internal_model = internal_model.model - pass - internal_model._saved_temp_tokenizer = tokenizer - - return model, tokenizer pass pass diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py index 47327280b9..984bf7ca00 100644 --- a/unsloth/models/qwen2.py +++ b/unsloth/models/qwen2.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .mistral import * +from .llama import * from transformers.models.qwen2.modeling_qwen2 import ( Qwen2Attention, @@ -32,7 +32,7 @@ pass -class FastQwen2Model(FastMistralModel): +class FastQwen2Model(FastLlamaModel): @staticmethod def pre_patch(): @@ -57,30 +57,30 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "Qwen/Qwen2-7B", - max_seq_length = 4096, - dtype = None, - load_in_4bit = True, - token = None, - device_map = "sequential", - rope_scaling = None, # Qwen2 does not support RoPE scaling - fix_tokenizer = True, - model_patcher = None, - tokenizer_name = None, + model_name = "Qwen/Qwen2-7B", + max_seq_length = 4096, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, # Qwen2 does not support RoPE scaling + fix_tokenizer = True, + model_patcher = None, + tokenizer_name = None, trust_remote_code = False, **kwargs, ): - return FastMistralModel.from_pretrained( - model_name = model_name, - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - token = token, - device_map = device_map, - rope_scaling = rope_scaling, - fix_tokenizer = fix_tokenizer, - model_patcher = FastQwen2Model, - tokenizer_name = tokenizer_name, + return FastLlamaModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, + model_patcher = FastQwen2Model, + tokenizer_name = tokenizer_name, trust_remote_code = trust_remote_code, **kwargs, ) From 8ee9cc73ec256d3fe2c06bd7d6580dfe089a53ec Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Jun 2024 18:26:25 +1000 Subject: [PATCH 0234/1088] Nightly (#646) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman --- unsloth/models/_utils.py | 7 +++++-- unsloth/models/gemma.py | 11 ++++++++--- unsloth/models/llama.py | 27 +++++++++++++++++---------- unsloth/models/mistral.py | 4 +++- 4 files changed, 33 insertions(+), 16 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a693389355..1b122fc8e1 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -372,6 +372,10 @@ def prepare_n_gradient_checkpoints( pass +# Unsloth only works on NVIDIA GPUs for now +device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," +device = f"cuda:{device_ids[:device_ids.find(',')]}" + class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): """ Saves VRAM by smartly offloading to RAM. @@ -393,7 +397,7 @@ def forward(ctx, forward_function, hidden_states, *args): @torch.cuda.amp.custom_bwd def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors - hidden_states = hidden_states.to("cuda", non_blocking = True).detach() + hidden_states = hidden_states.to(device, non_blocking = True).detach() hidden_states.requires_grad = True with torch.enable_grad(): (output,) = ctx.forward_function(hidden_states, *ctx.args) @@ -457,7 +461,6 @@ def _prepare_backend( # Offloading to disk for modules (lm_head, embed_tokens) -import os import pickle def offload_to_disk(W, model, name, temporary_location : str = "_unsloth_temporary_saved_buffers"): diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 5dd2a5abd5..985028364b 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -38,6 +38,11 @@ GemmaFlashAttention2 = GemmaAttention pass +# Unsloth currently only works on one GPU +import os +device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," +device = f"cuda:{device_ids[:device_ids.find(',')]}" +# Please obtain a commercial license torch_nn_functional_gelu = torch.nn.functional.gelu def fast_geglu_inference(self, X): @@ -45,7 +50,7 @@ def fast_geglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape # mlp_size = self.config.intermediate_size - # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = device) gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) @@ -72,7 +77,7 @@ def GemmaDecoderLayer_fast_forward( *args, **kwargs, ): if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: - out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda") + out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = device) # Self Attention residual = hidden_states @@ -134,7 +139,7 @@ def GemmaModel_fast_forward_inference( position_ids, attention_mask = None, ): - out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda") + out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = device) input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7cbdcfbda7..9327b1bb45 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,6 +74,9 @@ def original_apply_o(self, X): return O pass +import os # Unsloth only works on NVIDIA GPUs for now +device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," +device = f"cuda:{device_ids[:device_ids.find(',')]}" from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size @@ -132,15 +135,15 @@ def LlamaAttention_fast_forward_inference( # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda") + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda") - self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda") - self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda") - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda") + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device) + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = device) self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 elif kv_seq_len >= self.paged_attention.shape[0]: @@ -170,7 +173,7 @@ def LlamaAttention_fast_forward_inference( Qn *= cos Qn.addcmul_(RH_Q, sin) - RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda") + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = device) RH_K[:,:,:,:h] = Kn[:,:,:,h:] RH_K[:,:,:,h:] = Kn[:,:,:,:h] torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) @@ -232,7 +235,7 @@ def fast_swiglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape # mlp_size = self.config.intermediate_size - # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda") + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = device) gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) @@ -522,7 +525,7 @@ def LlamaModel_fast_forward( position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype = torch.int32, - device = "cuda", + device = device, ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) elif position_ids is not None: @@ -842,8 +845,10 @@ def _CausalLM_fast_forward( if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): + device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," + device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) @@ -1822,7 +1827,9 @@ def patch_peft_model( # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 max_seq_length = model.max_seq_length - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") + device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," + device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = device) model.model.extra_ignored_labels = extra_ignored_labels internal_model = model while hasattr(internal_model, "model"): diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 291f0aa502..e147f21568 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -239,8 +239,10 @@ def MistralForCausalLM_fast_forward( if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): + device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," + device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) From 6a5b3a445adfa4731450f3cc9b537f53ebb0c7cc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 16 Jun 2024 03:39:00 +1000 Subject: [PATCH 0235/1088] Nightly (#648) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman --- unsloth/models/_utils.py | 3 +- unsloth/models/gemma.py | 4 +- unsloth/models/llama.py | 9 +- unsloth/models/mistral.py | 3 +- unsloth/save.py | 226 +++++++++++++++++++++++--------------- 5 files changed, 146 insertions(+), 99 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1b122fc8e1..49b8ba3953 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -374,7 +374,8 @@ def prepare_n_gradient_checkpoints( # Unsloth only works on NVIDIA GPUs for now device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = f"cuda:{device_ids[:device_ids.find(',')]}" +device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now +device = f"cuda:{device if device.isdigit() else '0'}" class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): """ diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 985028364b..0cc047d214 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -38,11 +38,9 @@ GemmaFlashAttention2 = GemmaAttention pass -# Unsloth currently only works on one GPU import os device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = f"cuda:{device_ids[:device_ids.find(',')]}" -# Please obtain a commercial license +device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now torch_nn_functional_gelu = torch.nn.functional.gelu def fast_geglu_inference(self, X): diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9327b1bb45..f2f79de8c9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -76,7 +76,8 @@ def original_apply_o(self, X): import os # Unsloth only works on NVIDIA GPUs for now device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = f"cuda:{device_ids[:device_ids.find(',')]}" +device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now +device = f"cuda:{device if device.isdigit() else '0'}" from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size @@ -846,7 +847,8 @@ def _CausalLM_fast_forward( shift_logits = logits if not hasattr(self, "extra_ignored_labels"): device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now + device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now + device = f"cuda:{device if device.isdigit() else '0'}" # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) pass @@ -1828,7 +1830,8 @@ def patch_peft_model( # Fixes https://github.com/unslothai/unsloth/issues/10 max_seq_length = model.max_seq_length device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now + device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now + device = f"cuda:{device if device.isdigit() else '0'}" extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = device) model.model.extra_ignored_labels = extra_ignored_labels internal_model = model diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e147f21568..832189beea 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -240,7 +240,8 @@ def MistralForCausalLM_fast_forward( shift_logits = logits if not hasattr(self, "extra_ignored_labels"): device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now + device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now + device = f"cuda:{device if device.isdigit() else '0'}" # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) pass diff --git a/unsloth/save.py b/unsloth/save.py index cae59caede..940feb40f9 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -418,6 +418,11 @@ def unsloth_save_model( print("Unsloth: Saving model...", end = "") if save_method != "lora": print(" This might take 10 minutes for Llama-7b...", end = "") + # [TODO] Is this correct? + if save_method == "lora": + save_pretrained_settings["selected_adapters"] = None + pass + model.save_pretrained(**save_pretrained_settings) if push_to_hub and hasattr(model, "config"): @@ -649,8 +654,9 @@ def unsloth_save_model( model.config = new_config # Save! - - save_pretrained_settings["selected_adapters"] = None + # [TODO] --> is this correct? + # save_pretrained_settings["selected_adapters"] = None + # Check if pushing to an organization if save_pretrained_settings["push_to_hub"] and (username != actual_username): print(f"Unsloth: Saving to organization with address {new_save_directory}") @@ -834,7 +840,7 @@ def save_to_gguf( model_dtype : str, is_sentencepiece : bool = False, model_directory : str = "unsloth_finetuned_model", - quantization_method : str = "fast_quantized", + quantization_method = "fast_quantized", # Can be a list of options! ["q4_k_m", "q8_0", "q5_k_m"] first_conversion : str = None, _run_installer = None, # Non blocking install of llama.cpp ): @@ -846,6 +852,10 @@ def save_to_gguf( assert(model_dtype == "float16" or model_dtype == "bfloat16") model_dtype = "f16" if model_dtype == "float16" else "bf16" + # Convert quantization_method to list + quantization_method = \ + quantization_method if type(quantization_method) is list else list(quantization_method) + # Check if bfloat16 is supported if model_dtype == "bf16" and not torch.cuda.is_bf16_supported(): logger.warning( @@ -860,8 +870,11 @@ def save_to_gguf( first_conversion = model_dtype pass - if quantization_method.startswith("iq2"): - raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") + # Check I quants + for quant_method in quantization_method: + if quant_method.startswith("iq2"): + raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") + pass # Careful convert.py is only for Llama / Mistral based archs use_fast_convert = False @@ -871,25 +884,32 @@ def save_to_gguf( pass logger.warning_once(f"Unsloth: Converting {model_type} model. Can use fast conversion = {use_fast_convert}.") - if quantization_method == "not_quantized": quantization_method = model_dtype - elif quantization_method == "fast_quantized": quantization_method = "q8_0" - elif quantization_method == "quantized": quantization_method = "q4_k_m" - elif quantization_method is None: quantization_method = "q8_0" - pass + # Map quant methods + new_quantization_method = [] + for quant_method in quantization_method: + if quant_method == "not_quantized": quantization_method = model_dtype + elif quant_method == "fast_quantized": quantization_method = "q8_0" + elif quant_method == "quantized": quantization_method = "q4_k_m" + elif quant_method is None: quantization_method = "q8_0" + + # Check if wrong method + if quant_method not in ALLOWED_QUANTS.keys(): + error = f"Unsloth: Quant method = [{quant_method}] not supported. Choose from below:\n" + for key, value in ALLOWED_QUANTS.items(): + error += f"[{key}] => {value}\n" + raise RuntimeError(error) + pass - if quantization_method not in ALLOWED_QUANTS.keys(): - error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" - for key, value in ALLOWED_QUANTS.items(): - error += f"[{key}] => {value}\n" - raise RuntimeError(error) + new_quantization_method.append(quant_method) pass + quantization_method = new_quantization_method print_info = \ f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 20 minutes.\n"\ - f' "-____-" In total, you will have to wait around 26 minutes.\n' + f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 10 minutes each.\n"\ + f' "-____-" In total, you will have to wait at least 16 minutes.\n' print(print_info) # Check first_conversion format @@ -928,24 +948,37 @@ def save_to_gguf( install_llama_cpp_old(-10) pass - if quantization_method == "f32": first_conversion = "f32" - elif quantization_method == "f16": first_conversion = "f16" - elif quantization_method == "bf16": first_conversion = "bf16" - elif quantization_method == "q8_0": first_conversion = "q8_0" - else: - # Quantized models must have f16 as the default argument - if first_conversion == "f32" : pass - elif first_conversion == "f16" : pass - elif first_conversion == "bf16" : pass - elif first_conversion == "q8_0": - logger.warning_once( - "Unsloth: Using q8_0 for the `first_conversion` will lose a bit of accuracy, "\ - "but saves disk space!" - ) - # first_conversion = "f16" + # Determine maximum first_conversion state + if first_conversion == "f32" : strength = 3 + elif first_conversion == "f16" : strength = 2 + elif first_conversion == "bf16" : strength = 1 + elif first_conversion == "q8_0" : strength = 0 + + for quant_method in quantization_method: + if quant_method == "f32": strength = max(strength, 3) + elif quant_method == "f16": strength = max(strength, 2) + elif quant_method == "bf16": strength = max(strength, 1) + elif quant_method == "q8_0": strength = max(strength, 0) + else: + # Quantized models must have f16 as the default argument + if first_conversion == "f32" : pass + elif first_conversion == "f16" : pass + elif first_conversion == "bf16" : pass + elif first_conversion == "q8_0": + logger.warning_once( + "Unsloth: Using q8_0 for the `first_conversion` will lose a bit of accuracy, "\ + "but saves disk space!" + ) + # first_conversion = "f16" + pass pass pass + if strength >= 3: first_conversion = "f32" + elif strength >= 2: first_conversion = "f16" + elif strength >= 1: first_conversion = "bf16" + else: first_conversion = "q8_0" + # Non llama/mistral needs can only use f32 or f16 if not use_fast_convert and \ (first_conversion != "f16" or first_conversion != "bf16" or first_conversion != "f32"): @@ -1033,52 +1066,58 @@ def save_to_gguf( pass print(f"Unsloth: Conversion completed! Output location: {final_location}") - if quantization_method != first_conversion: - old_location = final_location - print(f"Unsloth: [2] Converting GGUF 16bit into {quantization_method}. This will take 20 minutes...") - final_location = f"./{model_directory}-unsloth.{quantization_method.upper()}.gguf" + full_precision_location = final_location - command = f"./{quantize_location} {old_location} "\ - f"{final_location} {quantization_method} {n_cpus}" - - # quantize uses stderr - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - if sp.returncode is not None and sp.returncode != 0: - raise subprocess.CalledProcessError(sp.returncode, sp.args) - pass + all_saved_locations = [] + # Convert each type! + for quant_method in quantization_method: + if quant_method != first_conversion: + print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") + final_location = f"./{model_directory}-unsloth.{quant_method.upper()}.gguf" - # Check if quantization succeeded! - if not os.path.isfile(final_location): - if IS_KAGGLE_ENVIRONMENT: - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You are in a Kaggle environment, which might be the reason this is failing.\n"\ - "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ - "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ - "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ - "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." - ) - else: - raise RuntimeError( - "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ - "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ - "You must run this in the same folder as you're saving your model.\n"\ - "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ - "cd llama.cpp && make clean && make all -j\n"\ - "Once that's done, redo the quantization." - ) + command = f"./{quantize_location} {full_precision_location} "\ + f"{final_location} {quant_method} {n_cpus}" + + # quantize uses stderr + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: + for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") + if "undefined reference" in line: + raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") + print(line, flush = True, end = "") + if sp.returncode is not None and sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, sp.args) pass - pass - print(f"Unsloth: Conversion completed! Output location: {final_location}") + # Check if quantization succeeded! + if not os.path.isfile(final_location): + if IS_KAGGLE_ENVIRONMENT: + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) + else: + raise RuntimeError( + "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ + "You do not need to close this Python program. Run the following commands in a new terminal:\n"\ + "You must run this in the same folder as you're saving your model.\n"\ + "git clone --recursive https://github.com/ggerganov/llama.cpp\n"\ + "cd llama.cpp && make clean && make all -j\n"\ + "Once that's done, redo the quantization." + ) + pass + pass + + print(f"Unsloth: Conversion completed! Output location: {final_location}") + all_saved_locations.append(final_location) + pass pass - return final_location + return all_saved_locations pass @@ -1453,7 +1492,7 @@ def unsloth_save_pretrained_gguf( is_sentencepiece_model = check_if_sentencepiece_model(self) # Save to GGUF - file_location = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, + all_file_locations = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1466,14 +1505,17 @@ def unsloth_save_pretrained_gguf( if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") - username = upload_to_huggingface( - self, save_directory, token, - "GGUF converted", "gguf", file_location, old_username, private, - ) - link = f"{username}/{new_save_directory.lstrip('/.')}" \ - if username not in new_save_directory else \ - new_save_directory.lstrip('/.') - print(f"Saved GGUF to https://huggingface.co/{link}") + + for file_location in all_file_locations: + username = upload_to_huggingface( + self, save_directory, token, + "GGUF converted", "gguf", file_location, old_username, private, + ) + link = f"{username}/{new_save_directory.lstrip('/.')}" \ + if username not in new_save_directory else \ + new_save_directory.lstrip('/.') + print(f"Saved GGUF to https://huggingface.co/{link}") + pass pass pass @@ -1604,20 +1646,22 @@ def unsloth_push_to_hub_gguf( is_sentencepiece_model = check_if_sentencepiece_model(self) # Save to GGUF - file_location = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, + all_file_locations = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) - print("Unsloth: Uploading GGUF to Huggingface Hub...") - username = upload_to_huggingface( - self, repo_id, token, - "GGUF converted", "gguf", file_location, old_username, private, - ) - link = f"{username}/{new_save_directory.lstrip('/.')}" \ - if username not in new_save_directory else \ - new_save_directory.lstrip('/.') + for file_location in all_file_locations: + print("Unsloth: Uploading GGUF to Huggingface Hub...") + username = upload_to_huggingface( + self, repo_id, token, + "GGUF converted", "gguf", file_location, old_username, private, + ) + link = f"{username}/{new_save_directory.lstrip('/.')}" \ + if username not in new_save_directory else \ + new_save_directory.lstrip('/.') - print(f"Saved GGUF to https://huggingface.co/{link}") + print(f"Saved GGUF to https://huggingface.co/{link}") + pass if fix_bos_token: logger.warning( From 9b4802f73268a12c0f057f3598c65bf3f5704bba Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 16 Jun 2024 04:32:21 +1000 Subject: [PATCH 0236/1088] Nightly (#649) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman --- unsloth/chat_templates.py | 1 + unsloth/models/llama.py | 33 +++++++++++++++++++++++++++++++-- unsloth/models/loader.py | 36 ++++++++++++++++++++++++++---------- 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 2e3761f567..a2a02d7e6e 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -528,6 +528,7 @@ def get_chat_template( chat_template, stop_word = chat_template assert(type(chat_template) is str) assert(type(stop_word) is str) + ollama_modelfile = None elif type(chat_template) is str: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f2f79de8c9..3d969d7d31 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1423,9 +1423,38 @@ def get_peft_model( transformers_set_seed(random_state) if isinstance(model, PeftModelForCausalLM): - raise TypeError( - "Unsloth: Your model already has LoRA adapters. No need to run this again!" + # Check if exactly the same and then pass through! + assert(hasattr(model, "peft_config")) + + peft_config = model.peft_config["default"].to_dict() + check_parameters = [ + "r", "lora_alpha", "lora_dropout", + "bias", "layers_to_transform", "layers_pattern", + "use_rslora", "modules_to_save", "init_lora_weights", + ] + check_all = True + for param in check_parameters: + check_all = check_all and (peft_config[param] == eval(param)) + pass + check_all = check_all and ( + len(set(peft_config["target_modules"]) ^ set(target_modules)) == 0 ) + check_all = check_all and ( + (loftq_config == {} or loftq_config is None) and \ + (peft_config["loftq_config"] == {} or peft_config["loftq_config"] is None) + ) + + if check_all: + # Simply pass through! + logger.warning( + "Unsloth: Already have LoRA adapters! We shall skip this step." + ) + return model + else: + raise TypeError( + "Unsloth: Your model already has LoRA adapters. Your new parameters are different." + ) + pass pass if loftq_config is None: loftq_config = {} diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index de1e2e57bf..d7c0f0760f 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -91,21 +91,37 @@ def from_pretrained( model_name = _get_model_name(model_name, load_in_4bit) # First check if it's a normal model via AutoConfig - is_peft = False try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) - is_peft = False + is_model = True + except: + is_model = False + try: + peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) + is_peft = True except: - try: - # Most likely a PEFT model - peft_config = PeftConfig.from_pretrained(model_name, token = token, revision = revision) - except: - raise RuntimeError(f"Unsloth: `{model_name}` is not a full model or a PEFT model.") - + is_peft = False + + # Cannot be both! + if is_model and is_peft: + raise RuntimeError( + "Unsloth: You repo has a LoRA adapter and a base model.\n"\ + "You have 2 files `config.json` and `adapter_config.json`.\n"\ + "We must only allow one config file.\n"\ + "Please separate the LoRA and base models to 2 repos." + ) + elif not is_model and not is_peft: + raise RuntimeError( + f"Unsloth: `{model_name}` is not a base model or a PEFT model.\n"\ + "We could not locate a `config.json` or `adapter_config.json` file" + ) + pass + + # Get base model for PEFT: + if is_peft: # Check base model again for PEFT model_name = _get_model_name(peft_config.base_model_name_or_path, load_in_4bit) - model_config = AutoConfig.from_pretrained(model_name, token = token) - is_peft = True + model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) pass model_type = model_config.model_type From a2ee56813ed67b7f5336793cbca84442a94140fd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 16 Jun 2024 14:51:58 +1000 Subject: [PATCH 0237/1088] Fix GGUF (#654) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/save.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 940feb40f9..f8f884a9d3 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -853,9 +853,13 @@ def save_to_gguf( model_dtype = "f16" if model_dtype == "float16" else "bf16" # Convert quantization_method to list - quantization_method = \ - quantization_method if type(quantization_method) is list else list(quantization_method) - + if isinstance(quantization_method, list): pass + elif isinstance(quantization_method, str): quantization_method = [ quantization_method, ] + elif isinstance(quantization_method, tuple): quantization_method = list(quantization_method) + else: + raise TypeError("Unsloth: quantization_method can only be a string or a list of strings") + pass + # Check if bfloat16 is supported if model_dtype == "bf16" and not torch.cuda.is_bf16_supported(): logger.warning( From 64bb8cfd512a9dcd860d21563b624676f7432ec5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 17 Jun 2024 00:39:20 +1000 Subject: [PATCH 0238/1088] Fix continuing LoRA finetuning (#656) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/models/llama.py | 20 +++++++++++++++++++- unsloth/models/loader.py | 3 ++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3d969d7d31..022be109fe 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1868,7 +1868,25 @@ def patch_peft_model( internal_model.max_seq_length = max_seq_length internal_model = internal_model.model pass - internal_model.max_seq_length = max_seq_length + internal_model.max_seq_length = max_seq_length + + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass return model pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d7c0f0760f..d87af0a18d 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -113,7 +113,8 @@ def from_pretrained( elif not is_model and not is_peft: raise RuntimeError( f"Unsloth: `{model_name}` is not a base model or a PEFT model.\n"\ - "We could not locate a `config.json` or `adapter_config.json` file" + "We could not locate a `config.json` or `adapter_config.json` file.\n"\ + "Are you certain the model name is correct? Does it actually exist?" ) pass From 87703089fa0ad60f008b7a7990f5cf3e77ccd26e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 19 Jun 2024 04:53:26 +1000 Subject: [PATCH 0239/1088] Ollama (#665) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth-cli.py | 221 +++++++++++++++++++++++++ unsloth/chat_templates.py | 327 +++++++++++++++++++++++++++++++++---- unsloth/models/llama.py | 52 +++++- unsloth/tokenizer_utils.py | 3 +- 4 files changed, 569 insertions(+), 34 deletions(-) create mode 100644 unsloth-cli.py diff --git a/unsloth-cli.py b/unsloth-cli.py new file mode 100644 index 0000000000..ddb0ac8b7b --- /dev/null +++ b/unsloth-cli.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 + +""" +🦥 Starter Script for Fine-Tuning FastLanguageModel with Unsloth + +This script is designed as a starting point for fine-tuning your models using unsloth. +It includes configurable options for model loading, PEFT parameters, training arguments, +and model saving/pushing functionalities. + +You will likely want to customize this script to suit your specific use case +and requirements. + +Here are a few suggestions for customization: + - Modify the dataset loading and preprocessing steps to match your data. + - Customize the model saving and pushing configurations. + +Usage: (most of the options have valid default values this is an extended example for demonstration purposes) + python unsloth-cli.py --model_name "unsloth/llama-3-8b" --max_seq_length 8192 --dtype None --load_in_4bit \ + --r 64 --lora_alpha 32 --lora_dropout 0.1 --bias "none" --use_gradient_checkpointing "unsloth" \ + --random_state 3407 --use_rslora --per_device_train_batch_size 4 --gradient_accumulation_steps 8 \ + --warmup_steps 5 --max_steps 400 --learning_rate 2e-6 --logging_steps 1 --optim "adamw_8bit" \ + --weight_decay 0.005 --lr_scheduler_type "linear" --seed 3407 --output_dir "outputs" \ + --report_to "tensorboard" --save_model --save_path "model" --quantization_method "f16" \ + --push_model --hub_path "hf/model" --hub_token "your_hf_token" + +To see a full list of configurable options, use: + python unsloth-cli.py --help + +Happy fine-tuning! +""" + +import argparse + +def run(args): + import torch + from unsloth import FastLanguageModel + from datasets import load_dataset + from trl import SFTTrainer + from transformers import TrainingArguments + from unsloth import is_bfloat16_supported + import logging + logging.getLogger('hf-to-gguf').setLevel(logging.WARNING) + + # Load model and tokenizer + model, tokenizer = FastLanguageModel.from_pretrained( + model_name=args.model_name, + max_seq_length=args.max_seq_length, + dtype=args.dtype, + load_in_4bit=args.load_in_4bit, + ) + + # Configure PEFT model + model = FastLanguageModel.get_peft_model( + model, + r=args.r, + target_modules=["q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj"], + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout, + bias=args.bias, + use_gradient_checkpointing=args.use_gradient_checkpointing, + random_state=args.random_state, + use_rslora=args.use_rslora, + loftq_config=args.loftq_config, + ) + + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + def formatting_prompts_func(examples): + instructions = examples["instruction"] + inputs = examples["input"] + outputs = examples["output"] + texts = [] + for instruction, input, output in zip(instructions, inputs, outputs): + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + texts.append(text) + return {"text": texts} + + # Load and format dataset + dataset = load_dataset(args.dataset, split="train") + dataset = dataset.map(formatting_prompts_func, batched=True) + print("Data is formatted and ready!") + + # Configure training arguments + training_args = TrainingArguments( + per_device_train_batch_size=args.per_device_train_batch_size, + gradient_accumulation_steps=args.gradient_accumulation_steps, + warmup_steps=args.warmup_steps, + max_steps=args.max_steps, + learning_rate=args.learning_rate, + fp16=not is_bfloat16_supported(), + bf16=is_bfloat16_supported(), + logging_steps=args.logging_steps, + optim=args.optim, + weight_decay=args.weight_decay, + lr_scheduler_type=args.lr_scheduler_type, + seed=args.seed, + output_dir=args.output_dir, + report_to=args.report_to, + ) + + # Initialize trainer + trainer = SFTTrainer( + model=model, + tokenizer=tokenizer, + train_dataset=dataset, + dataset_text_field="text", + max_seq_length=args.max_seq_length, + dataset_num_proc=2, + packing=False, + args=training_args, + ) + + # Train model + trainer_stats = trainer.train() + + # Save model + if args.save_model: + # if args.quantization_method is a list, we will save the model for each quantization method + if args.save_gguf: + if isinstance(args.quantization, list): + for quantization_method in args.quantization: + print(f"Saving model with quantization method: {quantization_method}") + model.save_pretrained_gguf( + args.save_path, + tokenizer, + quantization_method=quantization_method, + ) + if args.push_model: + model.push_to_hub_gguf( + hub_path=args.hub_path, + hub_token=args.hub_token, + quantization_method=quantization_method, + ) + else: + print(f"Saving model with quantization method: {args.quantization}") + model.save_pretrained_gguf(args.save_path, tokenizer, quantization_method=args.quantization) + if args.push_model: + model.push_to_hub_gguf( + hub_path=args.hub_path, + hub_token=args.hub_token, + quantization_method=quantization_method, + ) + else: + model.save_pretrained_merged(args.save_path, tokenizer, args.save_method) + if args.push_model: + model.push_to_hub_merged(args.save_path, tokenizer, args.hub_token) + else: + print("Warning: The model is not saved!") + + +if __name__ == "__main__": + + # Define argument parser + parser = argparse.ArgumentParser(description="🦥 Fine-tune your llm faster using unsloth!") + + model_group = parser.add_argument_group("🤖 Model Options") + model_group.add_argument('--model_name', type=str, default="unsloth/llama-3-8b", help="Model name to load") + model_group.add_argument('--max_seq_length', type=int, default=2048, help="Maximum sequence length, default is 2048. We auto support RoPE Scaling internally!") + model_group.add_argument('--dtype', type=str, default=None, help="Data type for model (None for auto detection)") + model_group.add_argument('--load_in_4bit', action='store_true', help="Use 4bit quantization to reduce memory usage") + model_group.add_argument('--dataset', type=str, default="yahma/alpaca-cleaned", help="Huggingface dataset to use for training") + + lora_group = parser.add_argument_group("🧠 LoRA Options", "These options are used to configure the LoRA model.") + lora_group.add_argument('--r', type=int, default=16, help="Rank for Lora model, default is 16. (common values: 8, 16, 32, 64, 128)") + lora_group.add_argument('--lora_alpha', type=int, default=16, help="LoRA alpha parameter, default is 16. (common values: 8, 16, 32, 64, 128)") + lora_group.add_argument('--lora_dropout', type=float, default=0, help="LoRA dropout rate, default is 0.0 which is optimized.") + lora_group.add_argument('--bias', type=str, default="none", help="Bias setting for LoRA") + lora_group.add_argument('--use_gradient_checkpointing', type=str, default="unsloth", help="Use gradient checkpointing") + lora_group.add_argument('--random_state', type=int, default=3407, help="Random state for reproducibility, default is 3407.") + lora_group.add_argument('--use_rslora', action='store_true', help="Use rank stabilized LoRA") + lora_group.add_argument('--loftq_config', type=str, default=None, help="Configuration for LoftQ") + + + training_group = parser.add_argument_group("🎓 Training Options") + training_group.add_argument('--per_device_train_batch_size', type=int, default=2, help="Batch size per device during training, default is 2.") + training_group.add_argument('--gradient_accumulation_steps', type=int, default=4, help="Number of gradient accumulation steps, default is 4.") + training_group.add_argument('--warmup_steps', type=int, default=5, help="Number of warmup steps, default is 5.") + training_group.add_argument('--max_steps', type=int, default=400, help="Maximum number of training steps.") + training_group.add_argument('--learning_rate', type=float, default=2e-4, help="Learning rate, default is 2e-4.") + training_group.add_argument('--optim', type=str, default="adamw_8bit", help="Optimizer type.") + training_group.add_argument('--weight_decay', type=float, default=0.01, help="Weight decay, default is 0.01.") + training_group.add_argument('--lr_scheduler_type', type=str, default="linear", help="Learning rate scheduler type, default is 'linear'.") + training_group.add_argument('--seed', type=int, default=3407, help="Seed for reproducibility, default is 3407.") + + + # Report/Logging arguments + report_group = parser.add_argument_group("📊 Report Options") + report_group.add_argument('--report_to', type=str, default="tensorboard", + choices=["azure_ml", "clearml", "codecarbon", "comet_ml", "dagshub", "dvclive", "flyte", "mlflow", "neptune", "tensorboard", "wandb", "all", "none"], + help="The list of integrations to report the results and logs to. Supported platforms are: \n\t\t 'azure_ml', 'clearml', 'codecarbon', 'comet_ml', 'dagshub', 'dvclive', 'flyte', 'mlflow', 'neptune', 'tensorboard', and 'wandb'. Use 'all' to report to all integrations installed, 'none' for no integrations.") + report_group.add_argument('--logging_steps', type=int, default=1, help="Logging steps, default is 1") + + # Saving and pushing arguments + save_group = parser.add_argument_group('💾 Save Model Options') + save_group.add_argument('--output_dir', type=str, default="outputs", help="Output directory") + save_group.add_argument('--save_model', action='store_true', help="Save the model after training") + save_group.add_argument('--save_method', type=str, default="merged_16bit", choices=["merged_16bit", "merged_4bit", "lora"], help="Save method for the model, default is 'merged_16bit'") + save_group.add_argument('--save_gguf', action='store_true', help="Convert the model to GGUF after training") + save_group.add_argument('--save_path', type=str, default="model", help="Path to save the model") + save_group.add_argument('--quantization', type=str, default="q8_0", nargs="+", + help="Quantization method for saving the model. common values ('f16', 'q4_k_m', 'q8_0'), Check our wiki for all quantization methods https://github.com/unslothai/unsloth/wiki#saving-to-gguf ") + + push_group = parser.add_argument_group('🚀 Push Model Options') + push_group.add_argument('--push_model', action='store_true', help="Push the model to Hugging Face hub after training") + push_group.add_argument('--push_gguf', action='store_true', help="Push the model as GGUF to Hugging Face hub after training") + push_group.add_argument('--hub_path', type=str, default="hf/model", help="Path on Hugging Face hub to push the model") + push_group.add_argument('--hub_token', type=str, help="Token for pushing the model to Hugging Face hub") + + args = parser.parse_args() + run(args) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index a2a02d7e6e..7b6da3e449 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -17,9 +17,11 @@ "test_chat_templates", "test_hf_gguf_equivalence", "remove_special_tokens", - "standardize_dataset", - "construct_chat_template", + "to_sharegpt", + "standardize_sharegpt", + "apply_chat_template", + "test_construct_chat_template", "create_ollama_modelfile", ] @@ -32,6 +34,7 @@ import shutil from .tokenizer_utils import * from .models._utils import patch_tokenizer +import re CHAT_TEMPLATES = {} @@ -713,21 +716,209 @@ def remove_special_tokens(tokenizer, prompt): pass -def standardize_dataset( +def _parse_combined_prompt(combined_prompt, dataset): + # Find {...} + possible_columns = re.findall(r"\{(.+?)\}", combined_prompt) + dataset_columns = set(dataset.column_names) + for column in possible_columns: + if column not in dataset_columns: + raise KeyError( + f"Unsloth: Your prompt includes '{column}' but this does not exist in the dataset. "\ + f"Only allowed columns are {list(dataset_columns)}" + ) + pass + pass + + # Find [[...]] + optional_prompts = list(re.finditer(r"\[\[.+?\]\]", combined_prompt, flags = re.DOTALL | re.MULTILINE)) + optional_prompts = [(x.span(), x.group(0)) for x in optional_prompts] + + final_optional_prompts = [] + if len(optional_prompts) != 0: + # Add left + left = optional_prompts[0] + l = left[0][0] + if l != 0: final_optional_prompts.append(combined_prompt[:l]) + + # Add in between + for left, right in zip(optional_prompts[:-1], optional_prompts[1:]): + l, r = left[0][-1], right[0][0] + final_optional_prompts.append(left) + if l != r: final_optional_prompts.append(combined_prompt[l : r]) + pass + final_optional_prompts.append(optional_prompts[-1]) + + # Add right + right = optional_prompts[-1] + r = right[0][1] + if r != len(combined_prompt): final_optional_prompts.append(combined_prompt[r:]) + else: + # Just add in the entire string + final_optional_prompts.append(combined_prompt) + pass + + check_combined = "".join(x if type(x) is str else x[1] for x in final_optional_prompts) + assert(combined_prompt == check_combined) + + return possible_columns, final_optional_prompts +pass + + +def _create_formatter(possible_columns, final_optional_prompts, user_column_name): + # Start final prompt! + function = ["def __combined_prompt_processor__(examples):"] + columns = list(set(possible_columns)) + for column in columns: + function.append(f"{' '*4}{column}__ = examples['{column}']") + function.append(f"{' '*4}texts = []") + function.append(f"{' '*4}for ({', '.join(columns)}) in zip({', '.join(f'{x}__' for x in columns)}):") + + # Add optional tags as well! + final_prompt = "" + formatter = [] + + for j, optional_prompt in enumerate(final_optional_prompts): + if type(optional_prompt) is str: + columns = re.findall(r"\{(.+?)\}", optional_prompt) + formatter += columns + # Must escape \n \r + final_prompt += optional_prompt.encode("unicode-escape").decode("utf-8") + else: + where, prompt = optional_prompt + # Strip [[...]] + # Must escape \n \r + prompt = prompt[2:-2].encode("unicode-escape").decode("utf-8") + columns = re.findall(r"\{(.+?)\}", prompt) + x = f"__optional_{j}__" + prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if input else ''" + function.append(prompt) + formatter.append(x) + final_prompt += "{" + x + "}" + pass + pass + + function.insert(1, f"{' '*4}__combined_prompt__ = '{final_prompt}'") + function.append(f"{' '*8}texts.append("\ + f"__combined_prompt__.format({', '.join(f'{x} = {x}' for x in formatter)}))") + function.append(f"{' '*4}return " + "{ " + f"'{user_column_name}' : texts" + " }") + return "\n".join(function) +pass + + +def to_sharegpt( + dataset, + merged_prompt = "", + merged_column_name = "instruction", + output_column_name = "output", + remove_unsued_columns = True, + conversation_extension = 1, + random_state = 3407, +): + """ + Converts a dataset to ShareGPT style. + ShareGPT requires only 1 input and 1 output field. + This means one has to merge multiple columns into 1 for 1 input field. + Use `conversation_extension` to increase the length of each conversation by randomnly + selecting a few and packing them into 1. + + merged_prompt = "", Prompt to merge columns into 1 input + merged_column_name = "instruction", Final column name for the input field + output_column_name = "output", Final column name for the output field + remove_unsued_columns = True, + conversation_extension = 1, Automatically combines `conversation_extension` convos into 1 + random_state = 3407, + """ + if "conversations" in dataset.column_names: + convo = dataset[0]["conversations"] + if type(convo) is list: + raise TypeError("Unsloth: Your dataset is probably already in ShareGPT format!") + pass + pass + + possible_columns, final_optional_prompts = _parse_combined_prompt(merged_prompt, dataset) + function = _create_formatter(possible_columns, final_optional_prompts, merged_column_name) + exec(function, globals()) + dataset = dataset.map(__combined_prompt_processor__, batched = True, desc = "Merging columns") + + def __convert_to_sharegpt__(examples): + users = examples[merged_column_name] + assistants = examples[output_column_name] + texts = [] + for user, assistant in zip(users, assistants): + texts.append([ + {"from" : "user", "content" : user }, + {"from" : "assistant", "content" : assistant}, + ]) + pass + return { "conversations" : texts, } + pass + + dataset = dataset.map( + __convert_to_sharegpt__, + batched = True, + desc = "Converting to ShareGPT", + # Remove unsued columns! + remove_columns = dataset.column_names if remove_unsued_columns else None, + ) + + # Randomnly concat conversations to create a long stream! + from datasets import concatenate_datasets + n_extensions = max(conversation_extension-1, 0) + if n_extensions == 0: return dataset + + dataset = dataset.rename_columns({"conversations" : f"conversations0"}) + all_shuffled = [dataset] + for j in range(1, n_extensions+1): + shuffled = dataset.shuffle(seed = random_state+j).rename_columns({"conversations0" : f"conversations{j}"}) + all_shuffled.append(shuffled) + pass + dataset = concatenate_datasets(all_shuffled, axis = 1) + + # Combine them into 1 + function = "def __combine_conversations__(examples):\n" + n_extensions += 1 + for j in range(n_extensions): + function += f"{' '*4}conversations{j}__ = examples['conversations{j}']\n" + function += f"{' '*4}convos = []\n" + function += f"{' '*4}for ({', '.join(f'conversations{j}' for j in range(n_extensions))}) "\ + f"in zip({', '.join(f'conversations{j}__' for j in range(n_extensions))}):\n" + function += f"{' '*8}convos.append("\ + f"{'+'.join(f'conversations{j}' for j in range(n_extensions))})\n" + function += f"{' '*4}return " + "{ " + f"'conversations' : convos" + " }" + + # Map function + exec(function, globals()) + dataset = dataset.map( + __combine_conversations__, + batched = True, + desc = "Extending conversations", + # Remove unsued columns! + remove_columns = dataset.column_names if remove_unsued_columns else None, + ) + return dataset +pass + + +def standardize_sharegpt( dataset, - conversation_key = "conversations", - system_message = None, aliases_for_system = ["system",], aliases_for_user = ["user", "human", "input",], aliases_for_assistant = ["gpt", "assistant", "output",], ): """ - Standardizes ShareGPT and other formats to user/assistant Hugging Face format. + Standardizes ShareGPT and other formats to user/assistant Hugging Face format. + + Get aliases for the system, user and assistant roles. + These shall map to "system", "user" and "assistant" respectively. + + aliases_for_system = ["system",], + aliases_for_user = ["user", "human", "input",], + aliases_for_assistant = ["gpt", "assistant", "output",], """ import collections import itertools - convos = dataset[:10][conversation_key] + convos = dataset[:10]["conversations"] uniques = collections.defaultdict(list) for convo in convos: for message in convo: @@ -768,24 +959,19 @@ def standardize_dataset( for x in aliases_for_assistant: aliases_mapping[x] = "assistant" def _standardize_dataset(examples): - convos = examples[conversation_key] + convos = examples["conversations"] all_convos = [] for convo in convos: - new_convo = [] - if len(convo) == 0: continue - has_system = aliases_mapping[convo[0][role_key]] == "system" - if not has_system and system_message is not None: - new_convo.append({ "role" : "system", "content" : system_message, }) - for message in convo: - role = aliases_mapping[message[role_key]] - new_convo.append({ "role" : role, "content" : message[content_key], }) - pass + new_convo = [ + { "role" : aliases_mapping[message[role_key]], "content" : message[content_key], } + for message in convo + ] all_convos.append(new_convo) pass - return { conversation_key : all_convos, } + return { "conversations" : all_convos, } pass - return dataset.map(_standardize_dataset, batched = True,) + return dataset.map(_standardize_dataset, batched = True, desc = "Standardizing format") pass @@ -837,7 +1023,7 @@ def construct_chat_template( \ tokenizer = None, -template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> +chat_template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> {SYSTEM}<|eot_id|><|start_header_id|>user<|end_header_id|> @@ -851,7 +1037,7 @@ def construct_chat_template( \ default_system_message = \ "Below are some instructions that describe some tasks. Write responses that appropriately complete each request.", - + extra_eos_tokens = None, ): @@ -865,6 +1051,7 @@ def construct_chat_template( \ assert(tokenizer is not None) if extra_eos_tokens is None: extra_eos_tokens = [] + elif type(extra_eos_tokens) is str: extra_eos_tokens = [extra_eos_tokens,] vocab = tokenizer.get_vocab() for extra_eos in extra_eos_tokens: @@ -883,11 +1070,30 @@ def construct_chat_template( \ "### Input:\\n{INPUT}\\n\\n### Response:\\n{OUTPUT}\\n"\ "### Input:\\n{INPUT}\\n\\n### Response:\\n{OUTPUT}\\n" + # Check for EOS after {OUTPUT} + if tokenizer.eos_token is not None: + extra_eos_tokens.insert(0, tokenizer.eos_token) + if len(extra_eos_tokens) == 0: + raise RuntimeError( + "Unsloth: Your tokenizer does not have an EOS token? Please provide one via extra_eos_tokens!" + ) + pass + + count_eos = 0 + for eos in extra_eos_tokens: + count_eos += len(re.findall(r"{OUTPUT}" + eos.encode("unicode-escape").decode("utf-8"), chat_template)) + pass + if count_eos == 0: + logger.warning("Unsloth: We automatically added an EOS token to stop endless generations.") + eos = extra_eos_tokens[0] + chat_template = re.sub(r"{OUTPUT}", r"{OUTPUT}" + eos.encode("unicode-escape").decode("utf-8"), chat_template) + pass + # O(N^2) search finding 2 repeatted pieces of text - j = len(template)-1 + j = len(chat_template)-1 at_least_one = False while j > 0: - found = template.rfind(template[j:], 0, j) + found = chat_template.rfind(chat_template[j:], 0, j) if found == -1: break j -= 1 at_least_one = True @@ -895,19 +1101,18 @@ def construct_chat_template( \ if j > 0: j += 1 else: raise RuntimeError(error_msg) - if not at_least_one: raise RuntimeError(error_msg) # Repeatted text - instruction_response = template[j:] + instruction_response = chat_template[j:] if instruction_response.count("{INPUT}") != 1 or instruction_response.count("{OUTPUT}") != 1: raise RuntimeError(error_msg) pass # 1st System, Instruction, Output pair - left = template[:j] + left = chat_template[:j] # 2nd Instruction, Output pair - right = template[j:] + right = chat_template[j:] # Isolate input extra_eos_tokens_regex = "|".join(f"(?:{re.escape(x)})" for x in extra_eos_tokens) @@ -952,7 +1157,12 @@ def construct_chat_template( \ ollama_system = ollama_system[len(tokenizer.bos_token):] pass pass - system_modelfile = "{{ if .System }}" + ollama_system.replace("{SYSTEM}", "{{ .System }}") + "{{ end }}" + # Check system + if "{SYSTEM}" in ollama_system: + system_modelfile = "{{ if .System }}" + ollama_system.replace("{SYSTEM}", "{{ .System }}") + "{{ end }}" + else: + system_modelfile = ollama_system + pass input_modelfile = "{{ if .Prompt }}" + input_part .replace("{INPUT}", "{{ .Prompt }}") + "{{ end }}" output_modelfile = output_part.replace("{OUTPUT}", "{{ .Response }}") @@ -1005,6 +1215,14 @@ def process(part, which, content = "message['content']"): partial_system = process(system_part, "{SYSTEM}", "messages[0]['content']") partial_system = partial_system.replace("{SYSTEM}", "") + # If {SYSTEM} is non existent, simply just use the content + if "{SYSTEM}" not in partial_system: + partial_system = "messages[0]['content']" + else: + if default_system_message is None: + raise RuntimeError("Unsloth: Please specify a default system message!") + pass + # Separate the BOS if has_bos_token: partial_system = partial_system.replace(tokenizer.bos_token, "", 1) @@ -1015,10 +1233,14 @@ def process(part, which, content = "message['content']"): "{{ " + partial_system + " }}"\ "{% set loop_messages = messages[1:] %}" if default_system_message is not None: + full_system = system_part.replace("{SYSTEM}", default_system_message) partial_system += "{% else %}"\ - "{{ '" + system_part.replace("{SYSTEM}", default_system_message) + "' }}"\ + "{{ '" + full_system + "' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}" + + # Add to modelfile + modelfile += '\nSYSTEM "' + full_system + '"' else: partial_system += "{% endif %}" pass @@ -1075,6 +1297,53 @@ def test_construct_chat_template(): pass +def apply_chat_template( \ + +dataset, +tokenizer = None, + +chat_template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +{SYSTEM}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{INPUT}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +{OUTPUT}<|eot_id|>""", + +default_system_message = \ + "Below are some instructions that describe some tasks. Write responses that appropriately complete each request.", + +extra_eos_tokens = None, + +): + """ + Creates a Ollama modelfile and a HF Jinja template from a custom + template. You must provide 2x examples of an input & output. + There is an optional system message as well. + + You must use {INPUT}, {OUTPUT} twice, and {SYSTEM} is optional. + """ + modelfile, jinja_template = construct_chat_template( + tokenizer = tokenizer, + chat_template = chat_template, + default_system_message = default_system_message, + extra_eos_tokens = extra_eos_tokens, + ) + def formatting_prompts_func(examples): + convos = examples["conversations"] + texts = [tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = False) for convo in convos] + return { "text" : texts, } + pass + tokenizer.chat_template = jinja_template + tokenizer._ollama_modelfile = modelfile + return dataset.map(formatting_prompts_func, batched = True,) +pass + + def create_ollama_modelfile(tokenizer, gguf_location): """ Creates an Ollama Modelfile. diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 022be109fe..9db7fcf2db 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1430,15 +1430,30 @@ def get_peft_model( check_parameters = [ "r", "lora_alpha", "lora_dropout", "bias", "layers_to_transform", "layers_pattern", - "use_rslora", "modules_to_save", "init_lora_weights", + "use_rslora", "init_lora_weights", ] check_all = True for param in check_parameters: check_all = check_all and (peft_config[param] == eval(param)) pass + + # Check save_modules + old_target_modules = list(peft_config["target_modules"]) + modules_to_save = peft_config["modules_to_save"] + if modules_to_save is None: modules_to_save = {} + modules_to_save = list(modules_to_save) + old_target_modules += modules_to_save + + # Combine all + new_target_modules = list(target_modules) + \ + list(modules_to_save if modules_to_save is not None else []) + + # Now check! + new_target_modules = set(new_target_modules) check_all = check_all and ( - len(set(peft_config["target_modules"]) ^ set(target_modules)) == 0 + len(set(old_target_modules) ^ new_target_modules) == 0 ) + check_all = check_all and ( (loftq_config == {} or loftq_config is None) and \ (peft_config["loftq_config"] == {} or peft_config["loftq_config"] is None) @@ -1449,6 +1464,35 @@ def get_peft_model( logger.warning( "Unsloth: Already have LoRA adapters! We shall skip this step." ) + + # Offload! + # [TODO] First offload lm_head and embed_tokens to CPU (should be disk!!) + if "embed_tokens" in new_target_modules: + print("Unsloth: Casting embed_tokens to float32") + + model.model.model.embed_tokens.modules_to_save.default\ + .to(device = device, dtype = torch.float32, non_blocking = True) + model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) + + # [TODO] Move old embed_tokens to CPU - should be disk! + model.model.model.embed_tokens.original_module\ + .to(device = "cpu", non_blocking = True) + model.model.model.embed_tokens.original_module.requires_grad_(False) + pass + + if "lm_head" in new_target_modules: + print("Unsloth: Casting lm_head to float32") + + model.model.lm_head.modules_to_save.default\ + .to(device = device, dtype = torch.float32, non_blocking = True) + model.model.lm_head.modules_to_save.default.requires_grad_(True) + + # [TODO] Move old lm_head to CPU - should be disk! + model.model.lm_head.original_module\ + .to(device = "cpu", non_blocking = True) + model.model.lm_head.original_module.requires_grad_(False) + pass + return model else: raise TypeError( @@ -1669,7 +1713,7 @@ def get_peft_model( print("Unsloth: Casting embed_tokens to float32") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) model.model.model.embed_tokens.modules_to_save.default\ - .to(device = input_embeddings_device, dtype = torch.float32, non_blocking = True) + .to(device = device, dtype = torch.float32, non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass @@ -1677,7 +1721,7 @@ def get_peft_model( print("Unsloth: Casting lm_head to float32") assert(hasattr(model.model.lm_head, "modules_to_save")) model.model.lm_head.modules_to_save.default\ - .to(device = output_embeddings_device, dtype = torch.float32, non_blocking = True) + .to(device = device, dtype = torch.float32, non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 57624e6d84..fe2dc06c44 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -735,7 +735,8 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): raise ValueError( 'Unsloth: Untrained tokens found, but embed_tokens & lm_head not trainable, causing NaNs. '\ 'Restart then add `embed_tokens` & `lm_head` to '\ - '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",])`', + '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",]). `'\ + 'Are you using the `base` model? Instead, use the `instruct` version to silence this warning.', ) pass From c053e42200dff1cf65967b617cce4c3d962b2ca1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Jun 2024 04:55:13 +1000 Subject: [PATCH 0240/1088] Ollama bug fixes (#667) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Ollama * Update chat_templates.py * ollama * Update mapper.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/chat_templates.py | 103 +++++++++++++++++--------------------- unsloth/models/mapper.py | 2 + unsloth/save.py | 86 +++++++++++++++++++++++++++---- 3 files changed, 126 insertions(+), 65 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 7b6da3e449..ee4235c74e 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -23,7 +23,6 @@ "apply_chat_template", "test_construct_chat_template", - "create_ollama_modelfile", ] from transformers import StoppingCriteria, StoppingCriteriaList @@ -1079,14 +1078,29 @@ def construct_chat_template( \ ) pass + # Check tokenizer types + tokenizer_name = tokenizer.name_or_path.lower() + if tokenizer_name.startswith(("unsloth/llama-3-8b-instruct", "unsloth/llama-3-70b-instruct")): + # Add <|eot_id|> + extra_eos_tokens.append("<|eot_id|>") + elif ("<|eot_id|>" in extra_eos_tokens or "<|eot_id|>" in chat_template) and \ + tokenizer_name.startswith(("unsloth/llama-3-8b", "unsloth/llama-3-70b")): + # Warn + logger.warning( + "Unsloth: Base llama-3 models did not train <|eot_id|>.\n"\ + "Please use the instruct version or use <|end_of_text|>" + ) + pass + extra_eos_tokens = list(set(extra_eos_tokens)) + count_eos = 0 for eos in extra_eos_tokens: - count_eos += len(re.findall(r"{OUTPUT}" + eos.encode("unicode-escape").decode("utf-8"), chat_template)) + count_eos += len(re.findall(r"{OUTPUT}" + re.escape(eos), chat_template)) pass if count_eos == 0: logger.warning("Unsloth: We automatically added an EOS token to stop endless generations.") eos = extra_eos_tokens[0] - chat_template = re.sub(r"{OUTPUT}", r"{OUTPUT}" + eos.encode("unicode-escape").decode("utf-8"), chat_template) + chat_template = re.sub(r"{OUTPUT}", r"{OUTPUT}" + eos, chat_template) pass # O(N^2) search finding 2 repeatted pieces of text @@ -1151,7 +1165,9 @@ def construct_chat_template( \ # Check bos_token is in system prompt ollama_system = system_part has_bos_token = False + always_bos_token = False if tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None): + always_bos_token = True if ollama_system.startswith(tokenizer.bos_token): has_bos_token = True ollama_system = ollama_system[len(tokenizer.bos_token):] @@ -1166,11 +1182,6 @@ def construct_chat_template( \ input_modelfile = "{{ if .Prompt }}" + input_part .replace("{INPUT}", "{{ .Prompt }}") + "{{ end }}" output_modelfile = output_part.replace("{OUTPUT}", "{{ .Response }}") - # Check if EOS token is at the end of the output - if not output_modelfile.endswith(tuple(extra_eos_tokens)): - output_modelfile += "{__EOS_TOKEN__}" - pass - # Ollama EOS ollama_eos = get_ollama_eos_tokens(tokenizer, extra_eos_tokens) ollama_eos = '\n'.join(f'PARAMETER stop "{eos}"' for eos in ollama_eos) @@ -1215,10 +1226,7 @@ def process(part, which, content = "message['content']"): partial_system = process(system_part, "{SYSTEM}", "messages[0]['content']") partial_system = partial_system.replace("{SYSTEM}", "") - # If {SYSTEM} is non existent, simply just use the content - if "{SYSTEM}" not in partial_system: - partial_system = "messages[0]['content']" - else: + if "{SYSTEM}" in partial_system: if default_system_message is None: raise RuntimeError("Unsloth: Please specify a default system message!") pass @@ -1226,21 +1234,22 @@ def process(part, which, content = "message['content']"): # Separate the BOS if has_bos_token: partial_system = partial_system.replace(tokenizer.bos_token, "", 1) + system_part = system_part .replace(tokenizer.bos_token, "", 1) pass - + partial_system = \ "{% if messages[0]['role'] == 'system' %}"\ "{{ " + partial_system + " }}"\ "{% set loop_messages = messages[1:] %}" if default_system_message is not None: full_system = system_part.replace("{SYSTEM}", default_system_message) + if "{SYSTEM}" in system_part: + modelfile += '\nSYSTEM: "' + default_system_message + '"' + pass partial_system += "{% else %}"\ "{{ '" + full_system + "' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}" - - # Add to modelfile - modelfile += '\nSYSTEM "' + full_system + '"' else: partial_system += "{% endif %}" pass @@ -1251,6 +1260,22 @@ def process(part, which, content = "message['content']"): jinja_template = "{{ bos_token }}" + jinja_template pass + # Check if system part is the same! + jinja_template = re.sub( + r"\{\% if messages\[0\]\['role'\] \=\= 'system' \%\}\{\{ '(.+?)' \}\}"\ + r"\{\% set loop\_messages \= messages\[1\:\] \%\}"\ + r"\{\% else \%\}\{\{ '\1' \}\}\{\% set loop\_messages \= messages \%\}\{\% endif \%\}"\ + r"\{\% for message in loop\_messages \%\}", + r"{{ '\1' }}{% for message in messages %}", + jinja_template, flags = re.MULTILINE | re.DOTALL, + ) + + # Check jinja tempate for bos + if always_bos_token: + if not jinja_template.startswith("{{ bos_token }}"): + jinja_template = "{{ bos_token }}" + jinja_template + pass + return modelfile, jinja_template pass @@ -1260,7 +1285,7 @@ def test_construct_chat_template(): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token = token) - template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> + chat_template = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> {SYSTEM}<|eot_id|><|start_header_id|>user<|end_header_id|> @@ -1277,7 +1302,11 @@ def test_construct_chat_template(): extra_eos_tokens = None - modelfile, jinja_template = construct_chat_template(template, default_system_message, extra_eos_tokens) + modelfile, jinja_template = construct_chat_template( + tokenizer = tokenizer, + chat_template = chat_template, + extra_eos_tokens = extra_eos_tokens, + ) messages = [ {"role": "system", "content": "You are an assistant"}, @@ -1291,7 +1320,6 @@ def test_construct_chat_template(): tokenizer.chat_template = jinja_template new_output = tokenizer.apply_chat_template(messages, tokenize = False, add_generation_prompt = True) - assert(correct_output == new_output) pass pass @@ -1344,43 +1372,6 @@ def formatting_prompts_func(examples): pass -def create_ollama_modelfile(tokenizer, gguf_location): - """ - Creates an Ollama Modelfile. - Use ollama.create(model = "new_ollama_model", modelfile = modelfile) - """ - modelfile = getattr(tokenizer, "_ollama_modelfile", None) - if modelfile is None: - raise RuntimeError( - "Unsloth: Tokenizer does not have a `ollama_modelfile` attribute.\n"\ - "Please use get_chat_template(...)." - ) - pass - - system_message = getattr(tokenizer, "_system_message", None) - if system_message is None: - __SYSTEM_MESSAGE__ = "" - else: - __SYSTEM_MESSAGE__ = f'SYSTEM """{system_message}"""' - pass - - modelfile = modelfile\ - .replace("{{", "⚫@✅#🦥")\ - .replace("}}", "⚡@🦥#⛵")\ - .format( - __FILE_LOCATION__ = gguf_location, - __SYSTEM_MESSAGE__ = __SYSTEM_MESSAGE__, - __EOS_TOKEN__ = tokenizer.eos_token, - )\ - .replace("⚫@✅#🦥", "{{")\ - .replace("⚡@🦥#⛵", "}}")\ - .rstrip() - pass - - return modelfile -pass - - def create_stopping_criteria(tokenizer, stop_word = "eos_token"): class StoppingCriteriaSub(StoppingCriteria): __slots__ = "stop_token", "single_match", "length", diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 5ef7583975..4b40065083 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -47,9 +47,11 @@ "TinyLlama/TinyLlama-1.1B-Chat-v1.0", ), "unsloth/mistral-7b-instruct-v0.1-bnb-4bit" : ( + "unsloth/mistral-7b-instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", ), "unsloth/mistral-7b-instruct-v0.2-bnb-4bit" : ( + "unsloth/mistral-7b-instruct-v0.2", "mistralai/Mistral-7B-Instruct-v0.2", ), "unsloth/llama-2-7b-chat-bnb-4bit" : ( diff --git a/unsloth/save.py b/unsloth/save.py index f8f884a9d3..9163c6d38d 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -891,10 +891,10 @@ def save_to_gguf( # Map quant methods new_quantization_method = [] for quant_method in quantization_method: - if quant_method == "not_quantized": quantization_method = model_dtype - elif quant_method == "fast_quantized": quantization_method = "q8_0" - elif quant_method == "quantized": quantization_method = "q4_k_m" - elif quant_method is None: quantization_method = "q8_0" + if quant_method == "not_quantized": quant_method = model_dtype + elif quant_method == "fast_quantized": quant_method = "q8_0" + elif quant_method == "quantized": quant_method = "q4_k_m" + elif quant_method is None: quant_method = "q8_0" # Check if wrong method if quant_method not in ALLOWED_QUANTS.keys(): @@ -978,6 +978,11 @@ def save_to_gguf( pass pass + # If only q8_0: + if len(quantization_method) == 1 and quantization_method[0] == "q8_0": + strength = 0 + pass + if strength >= 3: first_conversion = "f32" elif strength >= 2: first_conversion = "f16" elif strength >= 1: first_conversion = "bf16" @@ -1008,7 +1013,7 @@ def save_to_gguf( n_cpus *= 2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model - final_location = f"./{model_directory}-unsloth.{first_conversion.upper()}.gguf" + final_location = f"./{model_directory}/unsloth.{first_conversion.upper()}.gguf" print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ @@ -1072,12 +1077,12 @@ def save_to_gguf( full_precision_location = final_location - all_saved_locations = [] + all_saved_locations = [full_precision_location,] # Convert each type! for quant_method in quantization_method: if quant_method != first_conversion: print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") - final_location = f"./{model_directory}-unsloth.{quant_method.upper()}.gguf" + final_location = f"./{model_directory}/unsloth.{quant_method.upper()}.gguf" command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" @@ -1365,6 +1370,29 @@ def fix_tokenizer_bos_token(tokenizer): pass +def create_ollama_modelfile(tokenizer, gguf_location): + """ + Creates an Ollama Modelfile. + Use ollama.create(model = "new_ollama_model", modelfile = modelfile) + """ + modelfile = getattr(tokenizer, "_ollama_modelfile", None) + if modelfile is None: return None + + modelfile = modelfile\ + .replace("{{", "⚫@✅#🦥")\ + .replace("}}", "⚡@🦥#⛵")\ + .format( + __FILE_LOCATION__ = gguf_location, + )\ + .replace("⚫@✅#🦥", "{{")\ + .replace("⚡@🦥#⛵", "}}")\ + .rstrip() + pass + + return modelfile +pass + + def unsloth_save_pretrained_gguf( self, save_directory : Union[str, os.PathLike], @@ -1500,10 +1528,21 @@ def unsloth_save_pretrained_gguf( new_save_directory, quantization_method, first_conversion, makefile, ) + # Save Ollama modelfile + modelfile = create_ollama_modelfile(tokenizer, all_file_locations[0]) + modelfile_location = None + if modelfile is not None: + modelfile_location = os.path.join(new_save_directory, "Modelfile") + with open(modelfile_location, "w") as file: + file.write(modelfile) + pass + print(f"Unsloth: Saved Ollama Modelfile to {modelfile_location}") + pass + if fix_bos_token: logger.warning( f"Unsloth: ##### The current model auto adds a BOS token.\n"\ - "Unsloth: ##### We removed in GGUF's chat template for you." + "Unsloth: ##### We removed it in GGUF's chat template for you." ) pass @@ -1520,6 +1559,15 @@ def unsloth_save_pretrained_gguf( new_save_directory.lstrip('/.') print(f"Saved GGUF to https://huggingface.co/{link}") pass + + # Save modelfile + if modelfile_location is not None: + username = upload_to_huggingface( + self, save_directory, token, + "GGUF converted", "gguf", modelfile_location, old_username, private, + ) + print(f"Saved Ollama Modelfile to https://huggingface.co/{link}") + pass pass pass @@ -1654,6 +1702,17 @@ def unsloth_push_to_hub_gguf( new_save_directory, quantization_method, first_conversion, makefile, ) + # Save Ollama modelfile + modelfile = create_ollama_modelfile(tokenizer, all_file_locations[0]) + modelfile_location = None + if modelfile is not None: + modelfile_location = os.path.join(new_save_directory, "Modelfile") + with open(modelfile_location, "w") as file: + file.write(modelfile) + pass + print(f"Unsloth: Saved Ollama Modelfile to {modelfile_location}") + pass + for file_location in all_file_locations: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( @@ -1667,10 +1726,19 @@ def unsloth_push_to_hub_gguf( print(f"Saved GGUF to https://huggingface.co/{link}") pass + # Save modelfile + if modelfile_location is not None: + username = upload_to_huggingface( + self, repo_id, token, + "GGUF converted", "gguf", modelfile_location, old_username, private, + ) + print(f"Saved Ollama Modelfile to https://huggingface.co/{link}") + pass + if fix_bos_token: logger.warning( f"Unsloth: ##### The current model auto adds a BOS token.\n"\ - "Unsloth: ##### We removed in GGUF's chat template for you." + "Unsloth: ##### We removed it in GGUF's chat template for you." ) pass pass From a9c573457f5f83563d44fd8b7e41b8ff1aacd357 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 20 Jun 2024 19:45:02 +1000 Subject: [PATCH 0241/1088] Update chat_templates.py --- unsloth/chat_templates.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index ee4235c74e..026e6dbad7 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -789,7 +789,7 @@ def _create_formatter(possible_columns, final_optional_prompts, user_column_name prompt = prompt[2:-2].encode("unicode-escape").decode("utf-8") columns = re.findall(r"\{(.+?)\}", prompt) x = f"__optional_{j}__" - prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if input else ''" + prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if {x} else ''" function.append(prompt) formatter.append(x) final_prompt += "{" + x + "}" @@ -1236,7 +1236,7 @@ def process(part, which, content = "message['content']"): partial_system = partial_system.replace(tokenizer.bos_token, "", 1) system_part = system_part .replace(tokenizer.bos_token, "", 1) pass - + partial_system = \ "{% if messages[0]['role'] == 'system' %}"\ "{{ " + partial_system + " }}"\ From 2ba3581fd2dd75f40bbd8a7695f519b4f43b2e36 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 20 Jun 2024 19:49:38 +1000 Subject: [PATCH 0242/1088] Update chat_templates.py --- unsloth/chat_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 026e6dbad7..ee58ca2b83 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -789,7 +789,7 @@ def _create_formatter(possible_columns, final_optional_prompts, user_column_name prompt = prompt[2:-2].encode("unicode-escape").decode("utf-8") columns = re.findall(r"\{(.+?)\}", prompt) x = f"__optional_{j}__" - prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if {x} else ''" + prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if {columns[0]} else ''" function.append(prompt) formatter.append(x) final_prompt += "{" + x + "}" From a558f22992813209ef9a369da8ef5163e9782258 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Jun 2024 22:28:28 +1000 Subject: [PATCH 0243/1088] Ollama (#671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Ollama * Update chat_templates.py * ollama * Update mapper.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/chat_templates.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index ee58ca2b83..5f5b4e16c9 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -781,12 +781,12 @@ def _create_formatter(possible_columns, final_optional_prompts, user_column_name columns = re.findall(r"\{(.+?)\}", optional_prompt) formatter += columns # Must escape \n \r - final_prompt += optional_prompt.encode("unicode-escape").decode("utf-8") + final_prompt += optional_prompt.encode("unicode-escape").decode("utf-8").replace("'", "\\'").replace('"', '\\"') else: where, prompt = optional_prompt # Strip [[...]] # Must escape \n \r - prompt = prompt[2:-2].encode("unicode-escape").decode("utf-8") + prompt = prompt[2:-2].encode("unicode-escape").decode("utf-8").replace("'", "\\'").replace('"', '\\"') columns = re.findall(r"\{(.+?)\}", prompt) x = f"__optional_{j}__" prompt = f"{' '*8}{x} = '{prompt}'.format({', '.join(f'{x} = {x}' for x in columns)}) if {columns[0]} else ''" @@ -842,13 +842,13 @@ def to_sharegpt( def __convert_to_sharegpt__(examples): users = examples[merged_column_name] assistants = examples[output_column_name] - texts = [] - for user, assistant in zip(users, assistants): - texts.append([ - {"from" : "user", "content" : user }, - {"from" : "assistant", "content" : assistant}, - ]) - pass + texts = [ + [ + {"from" : "user", "content" : str(user) }, + {"from" : "assistant", "content" : str(assistant)}, + ] \ + for user, assistant in zip(users, assistants) + ] return { "conversations" : texts, } pass @@ -1236,7 +1236,7 @@ def process(part, which, content = "message['content']"): partial_system = partial_system.replace(tokenizer.bos_token, "", 1) system_part = system_part .replace(tokenizer.bos_token, "", 1) pass - + partial_system = \ "{% if messages[0]['role'] == 'system' %}"\ "{{ " + partial_system + " }}"\ @@ -1244,7 +1244,7 @@ def process(part, which, content = "message['content']"): if default_system_message is not None: full_system = system_part.replace("{SYSTEM}", default_system_message) if "{SYSTEM}" in system_part: - modelfile += '\nSYSTEM: "' + default_system_message + '"' + modelfile += '\nSYSTEM "' + default_system_message + '"' pass partial_system += "{% else %}"\ "{{ '" + full_system + "' }}"\ From 4af390ef2ec265e84a6f0e51b52d46d0ea5a4894 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 21 Jun 2024 00:28:52 +1000 Subject: [PATCH 0244/1088] Nightly (#673) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Ollama * Update chat_templates.py * ollama * Update mapper.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Fixes --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/models/_utils.py | 7 +------ unsloth/models/gemma.py | 13 +++++-------- unsloth/models/llama.py | 39 ++++++++++++++------------------------- unsloth/models/mistral.py | 5 +---- 4 files changed, 21 insertions(+), 43 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 49b8ba3953..7a6954c9f8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -372,11 +372,6 @@ def prepare_n_gradient_checkpoints( pass -# Unsloth only works on NVIDIA GPUs for now -device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now -device = f"cuda:{device if device.isdigit() else '0'}" - class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): """ Saves VRAM by smartly offloading to RAM. @@ -398,7 +393,7 @@ def forward(ctx, forward_function, hidden_states, *args): @torch.cuda.amp.custom_bwd def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors - hidden_states = hidden_states.to(device, non_blocking = True).detach() + hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() hidden_states.requires_grad = True with torch.enable_grad(): (output,) = ctx.forward_function(hidden_states, *ctx.args) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 0cc047d214..99374891ab 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -38,9 +38,6 @@ GemmaFlashAttention2 = GemmaAttention pass -import os -device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = f"cuda:{device_ids[:device_ids.find(',')]}" # Unsloth only works on NVIDIA GPUs for now torch_nn_functional_gelu = torch.nn.functional.gelu def fast_geglu_inference(self, X): @@ -48,7 +45,7 @@ def fast_geglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape # mlp_size = self.config.intermediate_size - # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = device) + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0") gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) @@ -75,7 +72,7 @@ def GemmaDecoderLayer_fast_forward( *args, **kwargs, ): if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: - out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = device) + out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0") # Self Attention residual = hidden_states @@ -137,7 +134,7 @@ def GemmaModel_fast_forward_inference( position_ids, attention_mask = None, ): - out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = device) + out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda:0") input_ids = input_ids[:,:self.max_seq_length] hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) @@ -220,8 +217,8 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): emb = torch.cat((radians_new, radians_new), dim = -1) # We must do RoPE in float32! - cos = emb.cos().to(device = device, non_blocking = True)#, dtype = dtype) - sin = emb.sin().to(device = device, non_blocking = True)#, dtype = dtype) + cos = emb.cos().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) + sin = emb.sin().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) self.register_buffer("cos_cached", cos, persistent = False) self.register_buffer("sin_cached", sin, persistent = False) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9db7fcf2db..2d8e6a0748 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,11 +74,6 @@ def original_apply_o(self, X): return O pass -import os # Unsloth only works on NVIDIA GPUs for now -device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," -device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now -device = f"cuda:{device if device.isdigit() else '0'}" - from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax @@ -136,15 +131,15 @@ def LlamaAttention_fast_forward_inference( # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device) - self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) - self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = device) + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 elif kv_seq_len >= self.paged_attention.shape[0]: @@ -174,7 +169,7 @@ def LlamaAttention_fast_forward_inference( Qn *= cos Qn.addcmul_(RH_Q, sin) - RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = device) + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") RH_K[:,:,:,:h] = Kn[:,:,:,h:] RH_K[:,:,:,h:] = Kn[:,:,:,:h] torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) @@ -236,7 +231,7 @@ def fast_swiglu_inference(self, X): # up = self.up_proj(X) bsz, _, hd = X.shape # mlp_size = self.config.intermediate_size - # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = device) + # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0") gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) @@ -526,7 +521,7 @@ def LlamaModel_fast_forward( position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype = torch.int32, - device = device, + device = "cuda:0", ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) elif position_ids is not None: @@ -846,11 +841,8 @@ def _CausalLM_fast_forward( if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): - device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now - device = f"cuda:{device if device.isdigit() else '0'}" # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) @@ -1471,7 +1463,7 @@ def get_peft_model( print("Unsloth: Casting embed_tokens to float32") model.model.model.embed_tokens.modules_to_save.default\ - .to(device = device, dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! @@ -1484,7 +1476,7 @@ def get_peft_model( print("Unsloth: Casting lm_head to float32") model.model.lm_head.modules_to_save.default\ - .to(device = device, dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! @@ -1713,7 +1705,7 @@ def get_peft_model( print("Unsloth: Casting embed_tokens to float32") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) model.model.model.embed_tokens.modules_to_save.default\ - .to(device = device, dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass @@ -1721,7 +1713,7 @@ def get_peft_model( print("Unsloth: Casting lm_head to float32") assert(hasattr(model.model.lm_head, "modules_to_save")) model.model.lm_head.modules_to_save.default\ - .to(device = device, dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass @@ -1902,10 +1894,7 @@ def patch_peft_model( # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 max_seq_length = model.max_seq_length - device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now - device = f"cuda:{device if device.isdigit() else '0'}" - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = device) + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") model.model.extra_ignored_labels = extra_ignored_labels internal_model = model while hasattr(internal_model, "model"): diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 832189beea..d8bd85d478 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -239,11 +239,8 @@ def MistralForCausalLM_fast_forward( if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): - device_ids = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + "," - device = device_ids[:device_ids.find(',')] # Unsloth only works on NVIDIA GPUs for now - device = f"cuda:{device if device.isdigit() else '0'}" # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = device) + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) From 933d9fe2cb2459f949ee2250e90a5b610d277eab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 21 Jun 2024 15:32:26 +1000 Subject: [PATCH 0245/1088] Nightly (#676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Ollama * Update chat_templates.py * ollama * Update mapper.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Fixes * clearer messages * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * log * Update __init__.py * Update llama.py * Update __init__.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> --- unsloth/__init__.py | 29 ++++++++++++++---------- unsloth/models/llama.py | 45 ++++++++++++++++++++------------------ unsloth/tokenizer_utils.py | 7 ++++-- 3 files changed, 47 insertions(+), 34 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 0105199fba..298ed13399 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -17,17 +17,20 @@ import sys from packaging.version import Version -# Define a list of modules to check -MODULES_TO_CHECK = ["bitsandbytes"] - -# Check if any of the modules in the list have been imported -for module in MODULES_TO_CHECK: - if module in sys.modules: - raise ImportError(f"Unsloth: Please import Unsloth before {module}.") - pass -pass - -# Currently only supports 1 GPU, or else seg faults will occur. +# # Define a list of modules to check +# MODULES_TO_CHECK = ["bitsandbytes"] + +# # Check if any of the modules in the list have been imported +# for module in MODULES_TO_CHECK: +# if module in sys.modules: +# raise ImportError(f"Unsloth: Please import Unsloth before {module}.") +# pass +# pass + +# Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so +# enabling it will require much more work, so we have to prioritize. Please understand! +# We do have a beta version, which you can contact us about! +# Thank you for your understanding and we appreciate it immensely! if "CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" devices = os.environ["CUDA_VISIBLE_DEVICES"] @@ -36,6 +39,10 @@ first_id = devices.split(",")[0] warnings.warn( f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {devices} \n"\ + "Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so "\ + "enabling it will require much more work, so we have to prioritize. Please understand!"\ + "We do have a beta version, which you can contact us about!\n"\ + "Thank you for your understanding and we appreciate it immensely!\n\n"\ "Multiple CUDA devices detected but we require a single device.\n"\ f"We will override CUDA_VISIBLE_DEVICES to first device: {first_id}." ) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2d8e6a0748..2368a37672 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1165,10 +1165,10 @@ def from_pretrained( inner_training_loop = Trainer._original_training_loop except: raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ + 'We do have a separate beta version, which you can contact us about!\n'\ + 'Thank you for your understanding and we appreciate it immensely!' ) pass @@ -1201,7 +1201,10 @@ def from_pretrained( output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) if output > 1: raise RuntimeError( - 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.') + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\ + 'We do have a separate beta version, which you can contact us about!\\n'\\ + 'Thank you for your understanding and we appreciate it immensely!') for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1214,10 +1217,10 @@ def from_pretrained( args.gradient_accumulation_steps // self._train_batch_size if n_total_devices > 1: logger.warning_once( - "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + '* Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so ' \\ + '* enabling it will require much more work, so we have to prioritize. Please understand!\\n' \\ + '* We do have a separate beta version, which you can contact us about!\\n'\\ + '* Thank you for your understanding and we appreciate it immensely!' ) debug_info =""" debug_info = debug_info.split('\n') @@ -1244,10 +1247,10 @@ def from_pretrained( n_total_devices = total_batches // ga // bsz if n_total_devices > 1: logger.warning_once( - "* Our OSS was designed for people with few GPU resources to level the playing field.\\n" - "* The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\\n" - "* We're a 2 person team, so we still have to fund our development costs - thanks!\\n" - "* If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + '* Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so ' \\ + '* enabling it will require much more work, so we have to prioritize. Please understand!\\n' \\ + '* We do have a separate beta version, which you can contact us about!\\n'\\ + '* Thank you for your understanding and we appreciate it immensely!' ) divisor = n_total_devices / 1 bsz = self._train_batch_size = max(int(bsz / divisor), 1) @@ -1273,10 +1276,10 @@ def from_pretrained( ) if "n_total_devices >" not in inner_training_loop: raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ + 'We do have a separate beta version, which you can contact us about!\n'\ + 'Thank you for your understanding and we appreciate it immensely!' ) pass inner_training_loop = inner_training_loop.replace( @@ -1783,10 +1786,10 @@ def patch_peft_model( from transformers.trainer import Trainer if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": raise RuntimeError( - "Our OSS was designed for people with few GPU resources to level the playing field.\n" - "The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n" - "We're a 2 person team, so we still have to fund our development costs - thanks!\n" - "If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!", + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ + 'We do have a separate beta version, which you can contact us about!\n'\ + 'Thank you for your understanding and we appreciate it immensely!' ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index fe2dc06c44..50b09275aa 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -954,7 +954,7 @@ def patch_sft_trainer_tokenizer(): "\n"\ "if self._inner_training_loop.__name__ != '_fast_inner_training_loop':\n"\ " raise RuntimeError(\n"\ - " 'Do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ + " 'Please do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ " )\n"\ "pass\n"\ "n_devices = torch.cuda.device_count()\n"\ @@ -964,7 +964,10 @@ def patch_sft_trainer_tokenizer(): "output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output)\n"\ "output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output)\n"\ "if output > 1: raise RuntimeError(\n"\ - " 'Error: More than 1 GPUs have a lot of VRAM usage. Please obtain a commercial license.')\n"\ + " 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\\n"\ + " 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\\n"\ + " 'We do have a separate beta version, which you can contact us about!\\n'\\\n"\ + " 'Thank you for your understanding and we appreciate it immensely!')\n"\ "for _ in range(3):\n"\ " gc.collect()\n"\ " torch.cuda.empty_cache()\n"\ From 499635a19013319367358dcf2a0211d30073ebd0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 2 Jul 2024 22:51:01 -0700 Subject: [PATCH 0246/1088] Gemma2 (#709) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * Fix breaking bug in save.py with interpreting quantization_method as a string when saving to gguf (#651) * Nightly (#649) * Update llama.py * offload * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * continued pretraining trainer * Update trainer.py * Update trainer.py * Update trainer.py * Update trainer.py * is_bfloat16_supported * Update __init__.py * Update README.md * Update llama.py * is_bfloat16_supported * Update __init__.py * Mistral v3 * Phi 3 medium * Update chat_templates.py * Update chat_templates.py * Phi-3 * Update save.py * Update README.md Mistral v3 to Mistral v0.3 * Untrained tokens * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * checkpoint * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * accelerate * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * train_dataloader * Update llama.py * Update llama.py * Update llama.py * use_fast_convert * Update save.py * Update save.py * Update save.py * Update save.py * remove_special_tokens * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Update chat_templates.py * Support bfloat16 GGUF * Update save.py * Update llama.py * fast_forward_inference * Update mapper.py * Update loader.py * Update llama.py * Update tokenizer_utils.py * info * edits * Create chat template * Fix tokenizer * Update tokenizer_utils.py * fix case where gguf saving fails due to first_conversion dtype (#630) * Support revision parameter in FastLanguageModel.from_pretrained (#629) * support `revision` parameter * match unsloth formatting of named parameters * clears any selected_adapters before calling internal_model.save_pretrained (#609) * Update __init__.py (#602) Check for incompatible modules before importing unsloth * Fixed unsloth/tokenizer_utils.py for chat training (#604) * Add GGML saving option to Unsloth for easier Ollama model creation and testing. (#345) * Add save to llama.cpp GGML to save.py. * Fix conversion command and path of convert to GGML function. * Add autosaving lora to the GGML function * Create lora save function for conversion to GGML * Test fix #2 for saving lora * Test fix #3 to save the lora adapters to convert to GGML * Remove unwated tokenizer saving for conversion to ggml and added a few print statements. * Needed tokenizer for saving, added it back, also made it more unslothy style by having positional arguments, and added a few messages. * Positional arguments didn't work out, so reverted to older version of the code, and added a few comments. * Test fix 1 for arch * Test fix 2 new Mistral error. * Test fix 3 * Revert to old version for testing. * Upload issue test fix 1 * Fix 2 uploading ggml * Positional ags added. * Temporray remove positional args * Fix upload again!!! * Add print statements and fix link * Make the calling name better * Create local saving for GGML * Add choosing directory to save local GGML. * Fix lil variable error in the save_to_custom_dir func * docs: Add LoraConfig parameters documentation (#619) * llama.cpp failing (#371) llama.cpp is failing to generate quantize versions for the trained models. Error: ```bash You might have to compile llama.cpp yourself, then run this again. You do not need to close this Python program. Run the following commands in a new terminal: You must run this in the same folder as you're saving your model. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp && make clean && LLAMA_CUDA=1 make all -j Once that's done, redo the quantization. ``` But when i do clone this with recursive it works. Co-authored-by: Daniel Han * fix libcuda_dirs import for triton 3.0 (#227) * fix libcuda_dirs import for triton 3.0 * Update __init__.py * Update __init__.py --------- Co-authored-by: Daniel Han * Update save.py * Update __init__.py * Update fast_lora.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update save.py * Update save.py * quantize now llama-quantize * Update chat_templates.py * Update loader.py * Update mapper.py * Update __init__.py * embedding size * Update qwen2.py * docs * Update README.md * Update qwen2.py * README: Fix minor typo. (#559) * README: Fix minor typo. One-character typo fix while reading. * Update README.md --------- Co-authored-by: Daniel Han * Update mistral.py * Update qwen2.py * Update qwen2.py * Update qwen2.py * Update llama.py * Update llama.py * Update llama.py * Update README.md * FastMistralModel * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Update mistral.py * Auto check rope scaling * Update llama.py * Update llama.py * Update llama.py * GPU support * Typo * Update gemma.py * gpu * Multiple GGUF saving * Update save.py * Update save.py * check PEFT and base * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Fix bug in save.py with interpreting quantization_method as a string that prevents GGUF from saving * Implemented better list management and then forgot to actually call the new list variable, fixed * Check type of given quantization method and return type error if not list or string * Update save.py --------- Co-authored-by: Daniel Han Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman * Revert "Fix breaking bug in save.py with interpreting quantization_method as …" (#652) This reverts commit 30605dec2322435eec9753c7f566a0ff610ab52c. * Revert "Revert "Fix breaking bug in save.py with interpreting quantization_me…" (#653) This reverts commit e2b2083b621208b15923595cd7f509584ff566bc. * Update llama.py * peft * patch * Update loader.py * retrain * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * offload * Update llama.py * Create a starter script for command-line training to integrate in ML ops pipelines. (#623) * Update chat_templates.py * Ollama * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Ollama * Update chat_templates.py * ollama * Update mapper.py * Update chat_templates.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update chat_templates.py * Update llama.py * Fixes * clearer messages * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py * log * Update __init__.py * Update llama.py * Update __init__.py * Create Merge.png * Create ollama.png * Gemma2 * Update llama.py * Update loader.py * Update pyproject.toml * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Revert Gemma2 * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update rms_layernorm.py * Update gemma2.py * logit softcapping * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update cross_entropy_loss.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update llama.py * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * compile flags * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * fixes * Update _utils.py * Fix generation * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * pad token * Update gemma2.py * pad token * Update _utils.py * Update llama.py * Update gemma2.py * edit warning * Update tokenizer_utils.py --------- Co-authored-by: Eliot Hall <60240707+chrehall68@users.noreply.github.com> Co-authored-by: Rickard Edén Co-authored-by: XiaoYang Co-authored-by: Oseltamivir <58582368+Oseltamivir@users.noreply.github.com> Co-authored-by: mahiatlinux <110882203+mahiatlinux@users.noreply.github.com> Co-authored-by: Sébastien De Greef Co-authored-by: Alberto Ferrer Co-authored-by: Thomas Viehmann Co-authored-by: Walter Korman Co-authored-by: ArcadaLabs-Jason <52756218+ArcadaLabs-Jason@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- images/Merge.png | Bin 0 -> 31406 bytes images/ollama.png | Bin 0 -> 67156 bytes pyproject.toml | 6 +- unsloth/kernels/cross_entropy_loss.py | 102 +++-- unsloth/kernels/geglu.py | 4 +- unsloth/kernels/rms_layernorm.py | 6 +- unsloth/kernels/swiglu.py | 2 +- unsloth/kernels/utils.py | 8 +- unsloth/models/_utils.py | 49 ++- unsloth/models/gemma.py | 2 + unsloth/models/gemma2.py | 538 ++++++++++++++++++++++++++ unsloth/models/llama.py | 81 +++- unsloth/models/loader.py | 14 +- unsloth/models/mapper.py | 8 + unsloth/models/mistral.py | 3 +- unsloth/models/qwen2.py | 1 + unsloth/tokenizer_utils.py | 8 +- 17 files changed, 772 insertions(+), 60 deletions(-) create mode 100644 images/Merge.png create mode 100644 images/ollama.png create mode 100644 unsloth/models/gemma2.py diff --git a/images/Merge.png b/images/Merge.png new file mode 100644 index 0000000000000000000000000000000000000000..a2df04874bfc879182cb66c789341d49700227ea GIT binary patch literal 31406 zcmcG$WmKHqlJ_0l5?q42TX2Wq?!kh)LvVKp?(XjH?!kk*ySqF6Ho5O}=FH4HXP)Q7 zvli05R`4&f&W14 z6hs9+RE*&q0-t~z^Gox8_)r}Q_o52{d=B$NOwI1Y2l(FiUyuRoB7+YfTm{92_`f-8 zowi$>Xo+T=Kel0JU?7M*tUdprH)$jMtPiPsL*z<`%GjM_;{*+jney4s-i<%FVyZ47 zSQV=O(-ljZPDV!m@!=r$?Px>W2G^x)UixH|>MAV}Yk13cYx)8g(b;j`ss!gGqL5Zl z0n$xvWsOp2iDKbfrfbIj!NJR;hx1q0v0}yj`_d<+^e0}W{j{ZdF<1Au$NiU12?gH! zQjKhgtM-Xm|Nea+cTQi+stKLruZZCMp#1z9WN4jM@#C*lDkeVe^!6;-mo5aE5jxm3 z%nO{rS44f4iz*cFt5n~$>l{Y=POuKADQ)78H&{r#015Z00Un$mkpvNS$5i3+Sov|K z(l!8n@LoU#1uGNk>JYl2Tfnazj2{sk1{}gA<=ab((*NtJM5#vcS$vwaKk|45%e5os*&!W%A`#1OS#A9$Zj!4&ay+N6VE zwn+5&9D2g?$7>t~294zI&8&KK@Na~tgKlB_pVIJ3mR>FP;dX4A9M-H-hLbfzoUUqT zVB?yw5^y2+^`eBHpWm&q;7ODohMftju;JNWAY5WnJ>f;vyTXl@rp4c@Z#z3uXW~6m@G@{2H#W=<;1oVW00LBh^ z=Vi#RzRAKvv`(A1J3Lt>+cpf-mTA7IDvida<#IyX-SYc%toFf>8`O5>`RBs9DVphu z0eLzc!H@I>&y?Uww#E2FbdeS@+@l~`co!dgitzZ>EauF~E8Tyw;ZkZgaOF>u)Md5T z50Qm0?Ol8px?K1Yl-)mAe~QO)m0g!IcED@zu+8Tlx-9u{$24GGzdi1r>a~mQyJSmm zS=64N=CV5F{)GSQ&lF0_lcw(65I7HCC1e7^&w%a*1PUc}6^tOx_y!L-B z_!~o|`g7y>y!mZ*0=Jp<`C9W=k%C7$#R0_L(Esoqv4owYVzizq{Kb{G8{*w$d~E&F zLO6cx1n=8XfA;T1M-i-3c&a%}lZ_fOXZSOKv8~wai=}?GkSNYy{+q`o5hp#$Pi&ac z4AiwDf%>$hr{w15BeFWt<6)@aV110d>h4JCGCI8Z5HAf&P-XYc(aX*$!9L|J?J9J{sw)PU(R&!K`K z>#5LwHW~W1#bLf(RlP8Lpmyxx8K%o68OtGN!@jBBSc0pA)S#2@?b7wmg6n$MEJ9r< zGfUE|=j#3Mm3};?Tqgdr&v9$5?m5a{Z_>QZjP(0g=(EF~M?=9Q_c|DXN}Bc=qiVJe z_^qU+{&!0xvogn-?f7VfuK`+`AK)AYj9xSFw3pUcEhPFkJ4ms4id4V3P-&y;%x~^T zN<&q^w%2_Plq(^b*B>w?zYhAzB;^{jku$lnrkRPdOV1%}a_)L`I`IR($K+yejLxGQ zG7+6eBE&U9`ham=Ii89e+}hRDNuKD=aI>(KX(AqRdIwLUd?n~rUBCYO`aSET%Aj7% zgZXHW@O?QMWq({nxJqdKMzWLB8NKLg(Oh?n4J1r=5txTy4FAgH-Ccb&q&!1nzR2dm zjM|XR@1uJin_pV{u()&H&k8$Tb+B0Jw_4G6{iUy?J}PgAk`DPfM_wj|1L@fc zq8onuJ=D{hAT`&0mSjgS@MrWPl9lcfm8aY}JFOC1`isE{^F{|g;d_0Zos{bH{9zzJ zSb|;6_h+Xj>qfi2J(GqU?{G;Pix-P=bp3pqsE5~Uw{suG0VX_gKLdXF#?ZOKv!2Ae z_7UQMpv1;_w!_t^GAi+-VXL`K`jKWTbrCa>o^$&qH7lILI`o5 zC~KWu)7p`Y9g8Vhk{7@HQQKkhty!^O$48^ftbg-}Vw?y=)w1X~EI>icrAiYeG!X|s z&K=+GHB++%(_62Zj|c|c?a?@70fcK}@fO&a4cZfiljdm|C5Q|e?H9S|jP)U!cXnx0 z;;~1Bz$rq~W^sJ5p?t-accdaSJ)+?`R57mv!n{h1=O6o%F}1p01qDe^n6TQ|`|%Sj z;}TxVZNtMq zuxfjiL~3AeZGL#}&cGF}`~e2HT$Mht)?%MCnWptj?Cy6$UluYlv3A9F?9t2jf^ipK z0bL$RE2%=+)N}>Mpuribt%{Z?X*$y-GJM(eGxEAg9ON;hsSfQtoPNrv!#+QYv4go< zv+<7A=r9R%&zlaJr|EHn+CvWEjDnQjda~S~C=rz-kH^M044Ps*Wc^Kj z>pq{>l+`CWWAHQ(gp?yG(F1KF6HLgNsv( zjWhV&K@4{UZf818HtN^~n1tmILMo5X_M$Ct5%#mo*?Rc?3`k>sBRPYka(ZmqwN%_B ze^P)&wOo8pRB?)llbhQi&aK~=N_hKR%i|eT=Q0M!h_2dTI;25#mPM-t3kXAj6V`3D zdJbHE!K8~ol5D`tm6l12f}4#V0`gCVHdGfuZ~kBL?JTa1b>DJOy$az6hK)b7J+{5z zO<=-~s%tk>XE_VL^hXwapXutl=C$$N@4ByEl2oBTj+pL3)Jk}MTp%&j&2ClJ2Z4{( zm!g_!RgAW%&K5Ng_#(+Jf%hdua1Bv!0`%4LcA@dSh>qO;=ttRi7(!JX5Mn9r{H^BHJ5MCw5*W zirGbU$&ZNodnywm=AtL)A^c&*u8h3AG#{IHE-qN?-fPlN_WTuEy!+M*WWGz!fTsgL zCl#v(t?fwY@CsBnY&&}!w0wT;JsW++j&^f57-a$EgHcVCa<7N_Ch+e1NyyfT9;;nj z@`kQHNleBcvIH*oj{wxL!*FsU=sjv6xw;~_-rI+u-a1XFR-{MkI+k^yvC zKk+let&kGgAC4xW09^?&5{iZuG&h!1tMynSCSedS2l-gZ7OcH0R}z;Bd7lU&ZEjcq zSG*7v!v~&DV{|kTUBOa3#k_hvb&xS-4f?7O!k=sG3A-7~y$RHtJ_#2yk3#u2llCZi z!|)q`2irvHRa^xiEyLru-#+?Dx5)6hB7u(o_9T)KWe{=tsY&Zm4pwz1xfUzx&9z4l z6DXAPUi^qflB^3>5ELJYTo)Jq9MDR|RrK1mKKE*KA)t=mT@%PoYl~5 zNEy?k$=>FZ+RPE^Vs86_EVTv zT4tEa`hx6?bl7CNu8O4gv7Fo!>G=>x)z9>(K34~Usy~)Zv?m+X{p#s{(&>&PJL^p} zM;9IDH~vOFOLb{I&v^O@5Lw6kRhUVDaz8lVCJ;>9KzjVe$Y*DDKXoeRHgh}*#Mklx zoD|a#Qn)fMmfPH-l~*(*4G#v6FMJbHWyh`V%b??zHHN6|-Mlk@=_R$;FZ9cV{D#Ri z#Hn%1;*V=LJ>#v4J)6=# zhLFCs1k&SbufSbt`cs+597u4Q&g@iP7kwcY3&G$0r2|_nbq2bNdDEJuhhO?>sDGWy z1VgbTX@-?gSL8j$0Q}_XM{E!(}fzpZ9 zk7dQVWRYAqP#VQ~kFFN<2JvF%TJROaDHjk>k*Lp5lb?t{Ubm{!Y3DH8Zq|(T4pEM+ zRgE%G96sw;F&&th#)->y#^F$%^gmX2K65?9`jFiJolQ7Vc7A+{-Ooj6pqrdz*ARc( z4IF2|z7q4{<)Fq2f3vY{>8zw&f2nVL8GTrS74+D3q@c1p@>qwIH`2(>r~N@1yU+P( zJwOA@Bc7MBcEg{(zv$Xrp{@REvZ~&Uy|g#Fn^LWw!0=RxIt$ZBrmy*37Xx~{PfiZ? zXw&}#jImrhsZGu}m(580r%K`4chj$ODcE%x6Nk#O= z&epn?r2c&XPi%f{zG;b!Po;gDTN24d(%fI@<$KQDwr2Hfj0j4tU8tN0XVO3EQ@kKU zKRiu-N$Ia48=$HbWTZctO1NvM_5vvkV5|L|W;%0xsnnF{zNtXJD|uI>VvjQ75Q^P} zS0Z7}A7*tB>m{VKyMz;=MK|rOQf4zEMGcY^f~sPp-_&i$>vIh977FS{ic;*&a1{N; zH+e{_u1DKV=*##~pFJN*QBLf)yyC= zs}Rh~tz(c$M=cD++i7sShvP;6u5jjU&x_XE*$F?3nz*0UW~(6NeNk`Md=qa z_{hnV9IkJ>>v#lkDI#VfSDsv}^(pZBG`8Ni4AG|Y_vVfa5i$c^3Rj@{&XmJH;Xi_E z3(}*P>2q_qY*%T!&6$3BC~wfGU1Rmy@Umyvb?HlnP$g@3ly@sm&=56(X&U*ZFlPuu1Jk3KDP?Sw=a8E%*+4+{ z#$2|ftu_;%dj?=mbf>qI0NOV$4*r}F;A&mUGDb&mGi1Ru(z&)uu$_5;KIQj3f-;HQ zvJ;ZaJ@s&WclOlgTtk65PlZ51c4A2@b1_i$b?wq>8>`$hI6NnkdiA?!x z9KMkpZ0LY&d;i*ZI0=F$AXkkN8gfe08K9IAkRMq)X<2$m(NCe!E$DTpnq*br=)lQ& z7h_D(CppRHkNwM2Hdd?g_~_o}&zPCP+hOGS@P$9328Tb&76tBVO2=cujuq$B;dqcT z^I554V#d9(9fnZTRzbQyi7!ZCK-CmWj2D+u?lOT4Spi&Klxom7&Eo+G2&0hYo%uUB zmQRiWYnrCE-fut4c<3mosdy>=8tBX&pm%7I`xwrqCtt#gwDDt5oF$upp6<~;?`xlW zikKLZ1#WRxvCZXXD-t-r1pg)kN2^`f>X`634qP+S`9#q}R-XHrO0z)gP5Nrs5MH~^ zO17)mPF{*!6h@*ur$W{itu#p4Stq-DE*duF;?C=V0aytM=N32^KXfB_Uijg^Y(Vh=A3marMb#qkA@d{Vv2T0? z@=^DMtUv4cFXK>(CL?;d)kO?SwB{o4;Q?O`JtL+P3fxNXKa4_)SAYnygb}RPP=s*^ zVClR5J{Zm{8Z~*`JJvlo_X4=RX`fO4gYBXgIZL9zP%1<)5~kdl;6~3n%;RTZ0E;!P zKQtIW?$6sOV6QB`ZeM@>r%Us23BBWTA|MEkncOs(iec_pSd~J3NU*D1Q|9Fug!<tHa&Ov)M@9kK!+-lw;SaHfBKq8kwV`h=gfS2<1v2H}eT}SAz(kr*JdqAl_-V zyWC%3ono*S8#UqUjgj|G8(Yq_+ z=g-CuUUYM0-6SZ5(Q6CsToB8KQi5<63T#gCyZR3fj+vw5qCy1GAcdD8 zP+b7f1ve3elKlK@FsG}xi_`WN@(lcIYiX7-n={G5bEEOtcH_72*w%cHqEYp%m!7(Q z?_wn!ZS$MCI__*LUiX)S2c~sT;U6}rB$BTuF^bLxVk*8X1%u0ghxy!>^eMYQy##;F9kepyB?2&L;82bMJGwsHCiz zSdgZPD|u(Pot}orJzRsLTo&G}9u+m{liSdN@R9b6;+?E1i;^;Y{gYgj@!h7dA233g z{zM4D>1*Wq?=&A6Oo4iE4k?H9Hh20dez#JLi20*^>+|b#qySqlySpYvlJ!;=k^XT- zr$?2ahe#j&ZU}i%#Vik&$X}k3rkV-xKjW08hP1Wc&F5l5+m;NO4bYeMgV`%1AZo0o zGjhPU`Hiocw~N)$$Wid)%^a=vd)eO_CCKmI-jVRdc>A)W}i6{d*OoRvK3~T2n6x1D`F5f;^K&WIX+}W{?ProegMcjffO#GNsQ%|tv zF6Q`KI+6IP!6H6@5ncicE~`=UuXc_VDy4I`pN@OLc;}c59GHgyT~X>g|4mT*oZo>T zq5@0U6zr!-e30UAWt|hFMB!P7K%X1oZ0iATX1()-(m|>pvErR;;DO~Hna;+#5DZ(+ z+CJ9kkEcdPh07TjSV+##k2kn_&eMAaGE?ct2!X}t@L}b&$W_YvjhWmT_V}$^1>sC8 zy<$E^(Mf3&k#RWBoSaOSN>YVTzI~0I$3cJB=-7JBap5DLwqC2P{u@L~zcP)jEb7ic z>L{f8^Kb(E0FxAuVh_y&NpDA`mq`$G_D(dfA6ISr<&lk2v?3!3X(@-K6OE4b? zP_eO)yx!&Z(9ho4zF79RY2ml?U30EC=CFr~zhJkb)w7kz9mldM8)x_^jgGbU;C9(} zBw0Bnw6t<^mIl1m4{fKfP+9$SW%xli)1!0X?HDCN2%=F{ z$t{iDc+H?r(c|l?i|!XnhIj`e3?ty*qh?ZTzXqLmcsnAlve#eFsn%u-(YLDjS^A8n zvWYaMrM1N(01PE8Xe%ue}-F1IPNyX#8RmVSabaf=gG7=U@ z^r{CDZ!%y|@~eZ8_^$ShmB{mPe@jgS*231m(5FgKX~*8#>1Ea_)8KWf_Rmcp?Sey+ zwJ{JK0V9Vu9KqsNd7B^am!7xuCv)56ilD?AKq5Z;J$C4MFb7jjyGJgSyjB zMDpV!>=iL2#ACM34ITrEG7QWjVmzIHb5^%1HnXM`l5x6X!h1V&v0z@SPcDS4$RehGLzvbU z@Kz{}UW6&?j3wsSACh)_d3RomvJ+hqgO2&j*@-a((b|Yw&~pHvkI)Cb9E!bfcA;P`LUOo&dt_~0rx3wKC|s1h?>im=<@|*O%%;w z@LZg-erzT2^zqN%!DktJVy0;VQyxY#RUlPb?m{x4Pwd?vLhT*gkc0yDl9ERwbi$yq zN6U!VkR^J#Cu)VqspyzNXpwL2C#!92o^Wh7Dj6k~M803D;i+zPp{#-u$MY=Fs>i}H zQA)HHMSSJvfq}h}k*`R?dQaQl+b$zZO#U#y<8Z%{1$)U?bprb+bX+P@Lm$7Az&J7u z>zN+^JoLJI$~MD4VruK24~e&#HSgrxHtkx>Qj7yA#Z{~dZ^+HVyc*2a-5s(Ab)BY) zixVu+qAj4PJ2MYe%GOv2jm>l8o|*J%yDJayi&P1jMfBXnUkS`$afud-mlsZlI9>bZS?b-DT!#~wLY}QA=(2-a9 zkm|rmLejV2CqLxF=J*2xsb;jH<_t&8YfIGK`H;OD4kAQO2%L8>l5J=x_^S@KXwWyJ zU)uZ-CfXbYQq0gS8!#qZCDR+=TQBlhogkBQ;Pe8}7vcm{S1-jOL>l{|DQYm4(DJ5b z=E)P5{XQAA;O#nSP61cM!C6u}V7a!_5i>Z|vEK~+b%^_v+vT7o1k%Arz~D57L}k%h zQFhU-vk%l!q;jwykSZvoe`jNGzrNc(y220g-S(hiSrT`6wiDREMv@-8wTE8PVKjp; z=tA$zRw1`}u+C>PYD~vKi#Tw9nKtE)cIovpjMJIGAHxaSzoU^lZv>3)?5S0pu_zoF zh2K=Xgm2g%qQeP{(YX?;T%9CP2`@~_F>M5gN-z1#*S_Ex)ZIy91dOI$V-JaUnWvx| zv}sJ131hM;F*&Q(ud`Nc&^UOAn$UI$HEiR4fbCc?tUZZ&EE`JgQ~u)t-`M)SN5fnB zWMJkit4KhqeY#m&X-X68g?muZit5Q^COF?ytFBRA`z=dcVsmr^_2TxW2DT63`6B01 z7XucRp-$xv^H7+PGM)-02bC3x*m@d}%nMs>NZF+Ad^j`0 zjfH$oh%Mt-(E`Qq30S(OD4fs6veRPJ;t8Qrs9<3zOSMRIq61hz!ItJoV#73GN_fOM zZhrnL@+q$z z!61wG(md`O+-Qv7%wm7C{U|%NmWI{`m%w~DGV&nf0bOXN+v|Dv<0w@n$M90#_F5Hf zR)&?B*i&UtAc|?+oIC>nPd^e=dhWToUJMM^X|ybYKL!7&GrB;-K)T)N2Z68Bv8o-Xqfy#`-$prt3#`w3X7Q>2aC{Iy^a^bvnNv;ri|*LnIkIcr z({TFi{T%80rd*gubSOAid_7sq6yv2w4P}&kz*FOyuY;`$>Zzt9?OPW-S%|$;!#^B( znxGKk9kMk3l5}f}?n@&?{e}e5^LD8Rbj0$(I?m_A!j_B6MNyQ_NEmF}w13Z*91cpZAuhO@X1U zFpiULA#Gd2_UDcjm{t7#WEC;D``0;kY-yI1UX+o0%gRd1l;?M`(Z&9bTp{{sARq;= z+NM#Uy;7TN;j@uPdm|bezBEtm%kgTsnrTP%t8=`%fVOzslXI#;S0tNoeG6#?rnAd6 zW4c#}W6D5@i6bKi+E6$RI7iv1D*o`Rvu_jv8;7uRQTp4!+Qy6ikm>* zI9WeIMg(nTrQ{gXq^hYe1oLQ2VcrRR&UdkfSakqVEefaAbzk3uN)wpG_1WT`Irvy# zP_JWMIlpikdu+Q>GK>hFM~6H6eva&4W#r3MKyv3-Qt*y?him`dkWK^azT>KPscp zqc&D%DzxqdPe+qCS#5DvH7&-P4Z+?Trex9ox-W1M_YzUru=uknY4=qYs8gocvc9$P zlX9iFUSUdL%VMfvrE?4FyUs1^a#~l8_a5$i@~+3bSfOX|BHOB6;lNDbKe3V^Ln0wl z7QQxAk}~X1Cjve>l&}#6uv?d%=@ja_%x88~;Fw>7GHqBGJzk$UEhrtqpV?$a) zkUroc5l;XnWMxaW9DS)a+TN}8Fewe^$Lko1kAZJ$WOyB$vuFjJQ;t!7J{)(Z|11(V zzmlVv$oJ~+cyW!-LQ5GGZ1ox^>XhiQQ$PL=*uN1A#&Ygm|H+RY#Uh?8f^kS@UWA?L zXizk3QzAm)5LGgPEajcvRY#PVerlMqZGefnz4x>8r)=wo6U~}3XvVG-v9ytvNxiRC z7+Nzi<)NJth~h0#=6*{7`Dr+3 zPj?Mb@~Oh^@rU!|{|UvBE6E4^2#kf6JjB}(HRHz}#>OIY{zV@F4hk+11=1iCExr`3 z$CSmH7GlLPTmV+*`lU9cJkgGAncE5nf{$m|2-Kax-kppTbcdRGt~-p^0VLrO^?6|;_vldDdh6r z>6QYeRm9`SFCK_S-VbzZzi~2!QRt#8QG31R=($yg`}>|8o+xS$iq&_dPQ+>;9Mx-L z!i~v7at-)K!B*8{%#KUG6%mm9yiw%##HPA2l-e0YVa@!;385RLw%>Hl8rYRg2M~@+|hd74rY(kND$3F zg_6a=R&ejY_n+(+DKj@_>*HwB1p;3O+te^QEfrJ!cO$BsTv%9JFrYyt=cac|D%C}0 z{wQjp+>R+f>A!u~t;UGj_nnNZGu-M3W^ zdA^9t83?kj&^Ts$(7xY3{V!4R4&xC^mY+6*;ln@5_z;LWT-Z}m3{K~5`Ndo{YwyAe z^JjeeOJ)o~wKgRr5TbJ|29e!fkFf4BXy8Z$&R^pFUVr?X{qkpN5OlomqGSf{oZ$S! z;r}OZ!1W|qdJvNdArMo$e4i1R>3PRG7QPAV!8PcyMl)Nk9hrfmN5O*?(|_pd7t;vVU3{02xfJ%rre5j@LPr0;b<P3Jjq{P}K zaMuGb4K>iUoP!7)`}pQwWip`lMc&I%_cqdfdlOVTw@`9$@=jiqv=5VF5D8oy33vkx zs$cPa8z@%X8y+5=lPnx^F5qZx7;qAOZ~aVgG<7;hp!(UJD2v^<%c${~{So+nF!>jO zFMKr((j=E5#!i_jOW?kk{92M*1geEcQT=G&@Ys|Zg~`&HRRfs`C0pUyS#3Il3qsqN zJ`F-0o3U^q#_VsFQIKh8vM^cCZM5Lfllz4J$7O|Q_NH|#LiOXB8> zK#A~2>mxz9Bm2#@IntKLlPUOhIp-Rroz&Wa_?maz`>S_G^Wkc!o^WcIZas8K(`{QR z=IJDc4%ZgPS+h@-Zb8jmnewH57H+`&_PQ}XQ&4I+Zb&kqt_xs*HH~8Gk()#6kS>2p4ec zC!;byU=cy>w%q=|0qjW~4sXDwv;i)amzXtcuc&NUfgY$5({7~y~ zxSop9Jf+pRK+*cnRg*tbNDvKz0oxQyogNuQKE@cM-6Zm%e`{@*p~cn&1`Pbl%YpLd z-`I1{8OdrMD^}+f*Y#oHg+RiqM?)LAp-NY(hsIXs5gt2LeYW+B$s(9x)IgGxy6n{` z)bDUf$MREYfE4&6xpl|Hx|59ZLe_+#*ML*YowZ=PaUe)=|ay!JL zMxENv6dAt(^%WxIlt4bRvqkslEWNMR<8}P)6zoljIEx#kz}fKAN5$9zHPoeFZ>uwa zp-wfQb(9Eg)%YbW=HGuX_BY2RJ0lVeJ`)jgx^=(NuPZyPCtmJHs z1Ww}dX|~&LKP8-rsjAC9se%=aDf&yuwx|G|v~5w_Pj!-jWwS?4r3DphR$`mhPFC?> zGlv*Dn?;_mT#I}&ryHeEP6cA7K}!9nZYpoS?mV4RUfd=?6_$HPF-?2;p!J3WivHsV zD<);VB#2Hb8tLt117(D~uaTIv9!*Y<{so`GV2*>F1*7*DH}Ac(M;elri%YT5mH0rt0-KnsHe@3_uiH7JX7en8k(|td*GjyWixZk}oZdUL!@+%(Q)_$pAE}R&up;O}FO2`l53M zq?&QMjo?_+bEQ94ufqM{jy;=HeTVXY&1UQ&YOV>4lZ`j{a7G(l>yTiJVD!!!?G zd!E*d6Q=5f(U%t+KBb-KH;WlHHqxb=Fc;3dBcKf{M={Mq4`^w`%yxJC;qy9$of6_; zzfKOI9|CVj;UcTuvxGkq3=s7)XDH>x6tx4mqWtT(W6cQ&7AzhElq#Xcy3{iAKqiaT zT>4mjSQsk=b24k1;IpT1z3%YeL=z@JrS>I5?^%A&W%zr(nt0uv-B9Y`?a$nMiUcrc zkGutL{GQTAU7c<(-tC*J%O?}+E#YmTISLpt!$Xhyazh2&tUwH02kDpyzDxu~Q5PPY z*s!IAiXKby!kIeFONFA5YnvO^tfZ1vX}TXS@R1Hk!dUFD)f&SM<)O9E|pj z!r;$<{DskT^Mn$lT{4zz{&`{**o~%rP}DybOx#^=1V?MKnmleZk5f{;PnW^O!|WxZ zeh)3=$f3_wvb^oKO=(MFl$Ji1$*hMIhINe$qQ*TG7kk;*sp6&IYPDAUm~PM`8ATJlCJ zYGzbZ99R4|_^>UOxMT*sx9qh@z5ObAy{ejPfW4u322X9yW!ijyukw$Y&;DdpvF|Y> z7e$P#NXBm^XSQGS0m6mq;{JwtoHp5RI}BE%F>PB%KzG%N)6jVbyX9=1bA6ugvre}uhMGYj<^$0RnzpqsI^Ux&5wcSqA)(0SUz3IfzQk3z))(KAsf zNyXMLC-Q4!q$8tUuwL289QU3U~Z4_Mdj=FZJ! zr40x@EuIQi6^yZCQDa~3BETh_t+l+pY@()NaW(U46IbAO+ZPJqAQZ;14sBR2Rb+YW zKcdE2ly8*lwD|_9r=2k$p5fM?g2FH!_o28oBvMvd8^NnrR z_L^~J>-W8)|E^{_LlFt)Q}9r=e1wtoSpf{};-8QGbRsKB4PUT|Z)vSF3A z0?~eziZ#OKi`@HeQRCJN;rD8`dcG{b9{qkpqW}3-BFDGgo&!tEfpJCw7&p7L{h-#Y zCjt6^ztol=l6Yrn5qxnZBs|~Ht<9>0l6XSCd(NX$i|LbS11o8gep`?@PqTBvdEM^f z&Oq*#&L#J#4xe^zU6+8%J2*P{UHCNGtv$;)lU%;%THOXgh=PdrQ6cjRW%qdmx`Lg= zIJ;@jTY$qNps6-*?I|?Jyyb*SGCW@@?=0<=BHzjSw!<3WakIzCQS!&+KpNZ9gAezI z#%ha)EtsUWJb}0?LOeElUf?Pz)0LQxN^=L${8JMe!J5jX;bS3F;KF z9hyX1 zTx|$Sg3iIdA9_~`YchD-7aZdegs8E}7oq(d?PnwV*kFDGmlNzMq(2IT5AR)F;gf}A z8&Ao+KkQRx(4lmL-2o!I>dc8OVQE=%e=w+Kj-3h88mSf7S(TNJfoi7FOXr^is>5kq>Rd+r*Z7OWl@QlVI4BZA4Zz7zmrxVF5@Vz2ax~nXv_6rJ`Wg7&HVgifb zW(GdUQY)0Ryr8PYLdCDwPViNUBA1O~ea1^_t`)j&MX8eO)D)xfAfw`{i9cQ!TvB1b zt)&)8XB`8Ro1ucdy<}rS0;Xj0ATF)jLPrra1(rOcYox`^tHMG(o3Xga?HfkrSHTo) zj~01rW-*T1RSEB_rTt)++|W{OlpXgN?&e5^!hsjMcwFwz;P1Y3Ct>1?t z3B$or-XWy@YBVNuBF6Rjo9oNjg(1Z$nDEoN;g@q`V`8z3w2=%nlU3=HUq?td`?ajz z+Trb)T84#wzQuLdUby^p{&Oc8>!Zd^J#~vL)?tN)_m7++6A6#Env9LR!KOd|{1ixz zzM0uy>P!4kp?HGS#fcA+!MOZ-1lu$+d0F-g+EHk)oqfFNi=Z+w91*VZ4Y zN0Nb!R?U#50sE2vtvp>D@aBg%RbtXE0$$>xu$Boj&cHViy!939JL{W9?6@gETW+Zow7i;)1sgoU=3jgw z#(9;B4Ji6r!0Ud|IBoVf)J#71lOF z6WmlmTtz1B*SEXhxC;Tr#IfYZ7A)66?#p&@0-t-%e$9=Yu9`o6Fi9;Z3m|1|Fc%dD zH!|vu&JLp<(v2?x>kYGFln*K#w25dl|4;mND0g(xNE>i})^8_!QlJe0D<*<(pi7_8 zLJRnB+BBVH)x6v5^`%GhO5(j6_Qx!OeIYCgG4+-Sh2Yu+CZ{Np!CZ@gogBsN`T1b0 z(6iqO5qUi{5+W01(yMM<*E8xSK73zpY(=O+#91UM1HYHR(-_%j1P9Zqli>U-k`cZN zS`}MhXX-v1bXfagncL35RmKOv{09$<_=iNWi?JH^Dv}ORI}pv+{;J8t^FE`6-P8ip zlVE!(LR3c_a;dH*g=Sg7OxXVL8g}c&<>W%M5)2|{1f;whyNE-EO1r zcs=c=rzfI)0;DE(wqB2{@?E|%D(sQ;jt}`v$)CVnFtk2f12))>YIi-auMqC|*5f@5 zgS2*xHtIrN#$z82jZTt!5WhUnw&S{#7!^HMdY~Q5-TV;Y z@^qqXF`Zk9t+_mxW$n=v@Qsq9u~$$H0y4dS%bxPQ4T<2c_{7a80ot0cwt0s(ISxv% z7FUlvYVybUR2eQk(1A;Vkwk-Dk6w? zw?)wf0F1r=t!ffvLYMrT!jv+=>@anpVE{NG{Wg~gmVm3v=eBe?5wI85<>9H zOyD&M>wKqJvc!3xJYuPk?j?XCYr+{B5>3~gJb+{a>S9`TG_3sQY!}=WOU}eqhFIvc zI>Omr7;h1z3L_4o9@mm_HM5_>?BwVH8H(HvHwzTJLIg57EPV&8U<>M7CV#jF6Hsyd z{#6RF*AKQNa)9kfr-Pq>{Z4?WDI7j4BMHk$evNt*d9eCx;D-pBhELtUqihQ*r5dxS z96OK>rb>K~VXkUI3&e1#Ivs!_U4mB_!1xE_!D)Yu&IhmfzogGLKmV)D6_`p$iX<<; zH*i6?OiejGYT2tfOJZ`8*yxRctE5AONXqkmGvciH8gmfq89UJ5J00`9VJK8{49DCX4zVEXqmUvERb*hOu? zVgE=pT{v0D>3sqe^p&@)lkbXMs#6UC!UM23gfHZ7E{hXtH7&vsv4cL9c>(zN@0Gp4 zB||*UX9BJVqUj8KntD0*1yOe1jK_qt2%sd`1!Xczjoh|AuZI(G_}`HK4*~VhUKH?z zfaiaEjId>MpfOa!#32|wMOmdoM))|~+?$E;ZBXkCA1Hqu|JQ5={Pv6ghQ@)z(DA=D zUv*erPU1Q2#Wd|*FMx^9{3hfWIcJT9a7j`!pa&oTb;!p5dT0Q}?Eg)N3_*iZNT9Np zoEu3Olm67aJP=PVt0$?hcyDIbN(*K<$NP!VYt2i(!5M(3>NPjiymc^$yRkP zQ#}>^MOHBjuf9}_f}??PaXAUwh(WWn>PfWY@`q0oe4D}pcyrAim-s8zlUhmyS?2lK zi!B?c`@L-EOSiT2n2-4eT3qpSqSwVP{i5Y*RQbXfUkbtb7!`1zTM=r)0=FoH3u2a_ z`5#F{#fpV=8X?Ba-p&#pu#}y6D8_zQEStwf3vqxyt}T(+6mW1m=^O6$uh_ljc?N!8 zE?KH|-kIs{233>!{~9~%u&TPP?JFhSAtBw3G}6r`mF^OdmhSHE5)f&myBjvCgmiZ| z()lg)iT9lIp7;5#i$C{XtUcFUW6pVxd;Er)i_nm!%A;L1o+KY)98bue(2QHv`47|A z<<>)1yRp1=K2wWn6c21eGSQx7p%vyDMoHhDRZPyN;zq;DP#Ir?HI6budA>0%wt$?N zTWwf$Xb1IDzC)vMVo7y^RT#g-&;r1J`W4FZka!@pka$Q(wlx5V@_naLZ zP;3w2ne=R$E|<_ZUocECF25PhC^sHVez_Q9oV1^_;4h8*dYN8B&OC@W#PRW-9G{WS%w)=Nz?fiWUe{3}aj@NAyD z(ZAi#FlmA`P}iILHsH7xO~dzhybeiTMrypayUV3ewkW=L<^7`3is?#kJoVWLzf`$$ zd8O8}Pt!e~PAobnjL%-r&3=2Q18Hq?qMp?V=<%5Ai@O19G1k7ZxF`;J_tg`NEh3R6 z>CFZW$TJ3az{$rbh|wHl^~spn9%dN|)o@K0@!8zXveMJtG0tlau)?g{{JgW}RW~mR zxVY&2atJ~U19t>CGu?FqZb+oY4x(w@I#s4|v!~ksURfQ(F5#mX1%CPoCroYEP+u*X z*f|{M7u#FIPM1sAtiz*Q8%GbuD{^dA3>egXG+sA%i$jBj^j_CbOE#G!NO2omNbW>U z>|rGYSY}*%p0b=MD_doJ6Z<$_+z{0EiV|3We-YPWeU6K@u0jR|(+G7vqxo9cYpH?4 z6a`@GP?lC;=(0bRt7JCnBWax!n5e(qW`YUZU%e`Sk@WsZ;${IJxF zR2yP#?K)NuYY{IF2(X`gz)3Iv6n0rLI=j23Y{gKFke}Zt8g1RWxKyC6mJqnFd&J-s zhXYq#S)qAB5-%eMIs8L-3te1*iWEsK848G@F&7U z^qDSqSazBfjJ!#yNf`y^m*=_eT*sJA8Zrmwi{!tizHdw)F@ZC*N(c8$D@?U@ZV1i4D1cZ-gsU( z{6KlMxE;$W^4}i4Uf}WD8jA6Bg>iMRhZZhY)gNSXM)9g*m%R+#4v@r(=x}2B`1T7| z^+*R>6Ex;JtZxpWwpV-G5N?d`Z>qYdidK|q3NgnhkqRMaV+~Z`B!#n7fXUDxRHn;= z#SS5iU{i#(>N{x7V#|CIK~9>Ij8_$pT4+O46%*CgR8p{?}lU)jId_x!wV z)4w=ay^Bcr{z6y1+SosaN`|nh@g-r|-DE6RpMzBCCTJ|A!2-3b5C`fz8i%xiqqNmW zJaN5hDO0qPI9T)M; zIpXX6l!@ID*f0o>AHsaXAi$YYogPP0e`G4KRR1+oAt4H4{!*(qP*shzzm1f71pE>hjA-V~^L;M3K0iI8x$4<_vAI}<2 z!oM26ta;9z(_AhXblThMArZ#GjsN+b4hfy3-!txuPGu`tNJ_J1y>h{~I$E1|ukC~u zJ>BC64pNbgmS=foNfW?u4cvK~JiU$|72P}ji3k6!)ZsD{_5ZJ*>Gr|3O^9~2&(4sm-}50HPV#Bbq&MmWw~P6# zDv+(@et2FlJ{&XR*u_Q#ZyOM=syZmy)(Psjp}sZfa+4Wyw-^MULZfP~ zD;BgX!s*w&%XXLU@x5F24CAljY4!PxU%Hr!013Jub#p=D)quuIBT@Zu^tk6I>xmGs zK_w&oxz%V1N{v(hTeRYPDaNhBU7J>N9bhE5AjZN-Pzb}UpBonqIhihgIxyCo)7yJ@ z7I4EIfrQ^8A$(mPFGY}ioPqR{R*2=sboz_`$uFaJ4hBqF_{r^$87mr!Jw?mpsDgw( z2ZBnhouvhr+o6Z8)m)=9Tx(t0D4nB=38T-WRiZhp`BT?0LZ5sIo;&MfWH!qU#=}}) zmBxDlgG0IkWG;lyEXh}xq2PWk3rFaCodXaN8I1vv81{8!@Q1BN3`I_&xtT~c7E&)y ztzHt3YIYHI$a>Gl6%!j7uFu{x+}uWS80M#ed2^=?!q=6)Ex1bu`Zl*Tfu`1{k!DX**NfaQ-miTPy~I!vAJ!D;_s^as0f*jvqiOL zYE6?HBmJ@=Qe+3)W-34FGBKv>+Kb-+x0e$ZI2H`uobQ-AOwQ7;N3&s*z9P!+xDk2x zdnH_s<7^vMol3;$9FZnceloN(*rzQwe!=O#&uE*p_&`S>2v<;5_G^lC@vKJ0*hRxg z?rX~rSQgoUF0v?hD3m^t|>-Hx|F z%k9dsA^hfiPhi9%S*E*MArtZ?aw$3{*`z+Cbo)}f(@x89 zcgafelu1)=JRGoOaYuCxA9+aWYFudFJFFfrRsw{mx~~op{XvggkkHqoq{fwexoK%V z8mekOl8wJkD2Z71M*cX+-Pc9$d{AMVoE9sEv=e9sMjb13M!@0vahtj<6QQUoK>Uag+44b} z!#Aylycb=CTEdiwiR4}`W*$ZddP!u+SJnm0X=J?@TeE2qBTb6xnaac0RJv$x;f2yL ziC^IeAy_(qFwAwwx4wmagg|B}p_ds>xl-a?AERZENpP>Wj9_uW%eCW!C5B*u@aq*) z%D_BaQH2Xd+XKw$-d-EIbayFGr&ga69u1bW1Z|ReOyN#v`>I_{PW#yC^Hjdn@%_BV zWQgbkrHKWDcdlwpqCWaJWk=i=BC!@=72@8dCe|!z(B&b-C|j@^<`|yfXMMsLn+_-Q z5B7k9gMoG_D<4fbyCQVp#d_F2N(KlS+!rTc6pv9-tf@)NhZ%@D)n1aThhFs`GCfWOwR z-f{K%=$L9vOv?jjlDx`xJ)!$^rBxhP0}^lE-L&GW_Ja6K_*TU*VTtVC=_<9`R)u!j zrx@v#;Vnvb0of+4*}&xqr}Kxe6Kc7s71C(2X`7%e5(W%k_lr-}BNZ+;H~9$xQ>|d9 zj#-=YerN)&^!IdOi`#rkpyw1&wa$bDdqm6WDgp2oalfYCfGG0iauF0b*a3iglz&*e ze*-7>LuoexhQ_p36GS6C?D=a4Ad@~0#qC2k=^U5F+p3!)`TqLCC=*h)gW7~}Wimou zFj5g3nWh^Tlhwb^nDZ@04gCy}NxDS|Ru^=I50T^qx3+~GX3p zhlZ@zhw2=kttWAfdnOniPabdbZu(e}{)S|k;1y*@xR#vT<dAa`?`he@ET>^o z2X^h4mMMHuJtYCJYd$;6{4hSa?b@z&#uW8UdL;rptPX`7f~476zfXx1zb1#65^&P| z9p*-}==On_Z}18Ta*~gP5@YRG2~LFfiHy$;&9CC)p)P+de9R6%FxF4`$y#L3cOXV( zFihk)U31H^x)6&fBRG0AMT%>(h+cYJ62=FACrI~Z6E|fRwv~w10~A!zgfdP4UmiNX z*vn`qi9ajS|E{H{AZ8aT-Tx6#p#9xU^Luf3A|p6!5Rrj%p^@0UWSemt#@@#xOP7K? zV$5yJ5o!kpa~=2X|6%=$_pXL&pyg)QFWgg-GoW?b)cIUoCALC#`hES*S=DjYaSXY= zLcK$hjH6t0#BV_lHIo6^2}?*44+h?r9O3DX!T+sRyJPY#1YjgUu2x#5zxbvtIbeTt zbM8Rum5twheG-G?>NsoKSzKe#C|HZul!Is)l~P55U^!)bAI-)R3ED4?Mk9azk2fy^ z-Jtt3#jF;ixpo^yQT~+j+rFk%MIZPSz)tN^e`Z#Vur)$4^io0VyVgN?8&Qa<9JxI*3@? z&zG0%zt+zK9!d1*2;w7|;Ls|2Vr)yP?6}Z~Oi)xL8ptO%B&X?2|Zr z-Ve#=6O?JP2rJbdu)#fDzkCxdIp9?v7kNbdz*(5(i(vUszQ}BVE1O0#9Bigj0rk6* zpY`7W15X2lO4&CknxaFettl_yQ=7 zE`UAoe}C@TA8y5em|EUXSHpZtV;lhasgfr$!9e_Di*lxx+5ti~h2%s4tr z-c~ixj)%_kn_#QY)Fw(;3$r$+Afm-SN)w}KSa)%qhzRX+d$KzZz+hY-#O<6nhq57C z{cN2IkrSno<#33Pc|t4Z%YQ)+B+R|XpKPuT#@?c93!X=ST{VJZEdsjI&bS`#&0ii{ zLQr$kK0&&!s^cL*e&L0TREx~go)MbodYmsXT`dMTztG8NNko}uii%*xC`rvSE;U8Iig3DF02rx!vSOcR zBU}yjO?K$HeN%vaf=yuXCjc=_t)*|>^2lvlP=Uc_UE+B|vD4RI!&e)Jp@h)-;jFj9 zuhez?;&2zU|1&0dnwJ}wd*SZodB6RD&bMZ+FU18VUERLZlhK`(`YZ`X&}~s#Ibser zn4a@rFXf@vRbhVnTH(*;6H;@wz0PTtocq2IAN_r9pVn;o*t}>#Se8aC@Y~FaDY^6E zD%mjh*D%!Fj><+tUoR8)^Qo-Qz@C0r|B+12d0`cS#?9=IQ!oL8T|2``1>guu z4d`+90Zj7v)`nY5UXF)uiRXi3bD-}s+v1Ls+vD0B)-OafAd%CaPO7OuOuJe9veUJ zR#?_MD53g4f4-78(Oti!@t}(b-GKuF<4#RaN<)NE6e4}{?Q=N?maNwj0tN_gNP^O% z4m1H+fwcB-SiuY|0%)dEyz!m04ISPaI+s1*RCMcE-I_ck^}U2&F8Onuo?Oj`3*i_O z(mnx~p8*f8b~4H=f-|E0o$j&u<=oduyeb>%J!*u{jPmegit_jyD1#CPo$HoR%>c(j zlFHZe_`OLYglOPU3T?5>oth}M`FK~8|LMLe)#43!RYQTnIg5oPQWgTjPfr7yw(eq| zbZPfw67H%h1%n1&C3oiLsMO@D=4taD>7M)$2vGOS=}0(av7_{qHV8jxW{FtL zEIrHsa?_+4730GPqYaZeCC&C$*A^SsP{x}3)SC3N%cG&QseZ}y(r*(};j}JA;sWl5 zIV21sNLsJ)JJNlnMkUYQKPuGy7T&a@aPYh0*GD!q-4%A5SvlK$rnJL1p4V$@MEnHg z{~N@Lr{Q#JyF@G!b>alE7_KZPv=QqLGmRMnmT0C7>sVM5>23WBEjq2jTebx$;BbG~ zGTCq?DJ00?FP?4M6u{`FQ~x594DKB5DKxr}vIAJV}yw0%kbfoMY%)yDZwrY#+6PwZoZhWQ2)GLetFoCxq&OCL>Sh#3iz@`VoR7P^t1K0;jM>WNVy`j%*hfu6?V zVfio$JT5mr{!q`cV9_TZZ+65OhgQ^}R|o4eB^AEm{e(NHR`m}do^j4!I$8C%WJt3^d zW*g6Su}n|wtDkU0<$bTY1$emH9>WBVfnq zU`P{JpPz7}g@0p}9i>X^Xl)p|C?o^~R{?frR~ygQpDz;M;WhZ9X*E!gTLki=ljkMV zB@m;KPn#&)$d?JIQ=PiM@1_fZe7!Y;j?^kQhH8VnRI_U{rH;NpG{cf3x*P;OG1rAc zd+QRz2EZK<&faRlmzRbMB;`7@I`I8?iYi>`r;!Ew?a>l0g0ZBQ)rvciQii1 zl_zp-33_=~1|3X&+Vbi_L@4|&Kkz3amo{5-(v z{9FG*^>D9?kF`9!0>#Yp3?RUt4#AyqGqPRiMzO7h$Y&ggMJ;E6D^S(daV` zAVRr7cX7}GLSd+7a92UQLs8X=q0mR3jaP4Ho>LGp3$#u@ zXIQxtbLybU?=Z{wY+?M|tq)qREy8LXA3-l-l#%rLt_KSDwqrkVa8ifZt*g&l0QTMTOhE7dBLK~sQaDAJNdnX z-3i7NJuRA?(12gU*9i>K*3iX5*9Zbcs$X^Bf&UdY3WHtg4 zIhI{81|VOl1a2HwrGZj4(z(=xQK z1K8(@y3*OApAFdXAE$QjnRI+iSR!c3Mp%ZkGI^j^^DmOTt~0JKyZjMwHby5ptqzry zu#b0%Qlo)+;(%^=t@n6{8Pa{Tlcd3!UM2zhwdzFN0LwA&SIH&gcpViyzKrhwzhIpm~6KQxiIDjUm#r+27wc^Chl)99fLX*jZPy+DvDNqMn~$&o!{dwaVH{+QV)NwkR*G)H$*xd3Uc zfDk<^pht#AvJfW(?Vo#DE zHyaVy12N%jx2xH{)?&YP*XW+J9=st5Qe$w=9yNKx#_Wb*kB!=4ip4!Ozbz+4zx?*ghG{87E2pJ#h)B!l_{CCP-}x4YUW z#{=eFycp@T^3f8vj9-2 z82|1REq93bK7(p#9+tEQed0nJW8w&TKbMCgO$O#-x1)P5u3?8}a+U&JU{@FFK}j;i zbDUs>_-!$8-4YWKZ1vg3xWcF1zc9&P3kw5cmrgETAyjHhIf7FjGpQ8VM9vHXc&a}H zrKD~$S808+Wk(g@T=uUh#a~j;Ct_8{9oJq)rSXzH@z-@~s6QmD7}W7`XYgCO=H-TX zHbckhF@=MeXymaukWF(m^lE-^ zy{prBBwXRj#dmzx4>ptN#f5L`WcX4)&l3DCW^AVRt(MVbCY_n>!} zzgs7=BcQ8XBSGd$Uxn^KTs#HOXgZS#t^pzt@>OE04AoXmntGZB1Nqd+L?>0B)L%eS z#;fuQ(0lSeQ*3t(uP}D0Zo+bol0qCC>JHv?Df)=4VoN4}?1oSrm0lCHuk&c%`__)X$iON0my ziG&R0=G2)O*zmFy$nbEL9egu;S)awouP|Kk4IrS3EBy?mpnf9NDgZL#zj7v?-sx{Z z)?z@oHN6XpZs|K(uf~wEkYx6Ugg21g)bgFqw%X_`(So|zM8@Z^F>bRyj@xj`UY3VE z)%xL#yQ`-aot9KzAqHk6^J_!qC-~m|3@?_Z#)r$*nbyCM@n6%1N(fl~IXgt~(qXmz zc$J#}AD@W6FR%1|GhkS1Cf!tG4IX;C$8ZCpdtL}{`M`&hFS+jhrKX#_+qDkM>6C0b_>XY z{om>%F_o{@JeM3>&ly&(g+FU-!FMP%$2VUz*I^&uO#FWj4CM(3>sbkc!kL#QYgh}2s#EAhkG z>0pzcm|@1lV=xLo=zN8`?W_X`|zr4?27wWVO6Ha|+r?}(gVrO1~? zuN}yi$~A(kPUd<~uyj`-HM^|LZcpSjF}m&*V5%`;#JA`dEYtyZ>fnB(Ftq|f9`av5 ztymV2y)v#Z=w4fU^f`&gp97lg|AJ4=E2m4mbu*9a{`xmRRnLt#%k|E=O1%v5s#TX} zW36+D5$>Ga!P$DTd&>h9Q+$H8r*>N{Xbnis(SeE_YkXKYKoPymD{=R&YYcMgR7I!D z{=*AL^WYr*dvxxV?~~oQr0%J4=5A+_xQQ|-M!GMMZdw5RDhtg(QaTG|4mL5q4sofw z!5vujxSgXSd4MkT0m?S!#t9G~(+e);QxY^l02}pnQz)7NqYAm|PbI!o6>&Rg^pe(?JVCZM;k%ZZamvSzo_ox3e6Z;e}c26 zQ3$i=o6ETnKJMlwN%mOZk8yN6Am@BdcONW&aGMF()&jbsLHT{Ky7+GdEaachv8?@y zMtv~;XCpME(S!@nZmE2!Y1a~I!()Fc)7fQu+>5a<hOguqj3KSkYJZF8Re660K8ai*d!;S#S^7}ue!+iv>0fbCjoQO*1 z*KRlWSJkGq6l>lZ-WTkSbXL2?9W(Nu6x_kc7WjEzBUdxEP42HbzvFuh1gTTn;Gopm zkKcaISwbu!^ZwcnS2B6}f5;Xt#Z5 z1)mJ5)pb&B6rsopAeGJyIyy3^biZhy8|M;>VdZPBEkTUl>EAtf{Rybrbex+@n5-_o z`$W6ON|1fUK#}^@v2qKEI%&QS&g4gsMRS$*TVk!$s#KMkk;TUaw-P&9qXf>lwc;9P zeT<`1`w`}fk{#hIKG_s`l!Ab#mHqY^i@A9&$hkbWPeCun5({rR&$#6TV;1 zExF=lmhB7DQz!rkz@g>7BHy_wU%WMY2`l{sS*-nppSItf%di#Apecmh^LE|r*#agK z>$P@Mt-i4wBvbK9{J67uKu>ut#R-osE*H{oqaMD`VyCu16BJSeWei?mnIDWuHF)1l z@r(aN=db#9M49a0_3cWyqp$)jU|9uEzyslS1>WUCDy|U0IRTW>`TzRKc_hdV> zJO%~#CZY$}KXxn-h0VM$l(9GuHicuE)F0az5Uu4^iEV4|p8kX0Ni>Oko2h`cqsL&3 zBWzRng%II1Yzi`RC`qLhH z<};Y-SY##j^vM4viE=~OwKT=zp$7<&8C+_1^iZ>cycolb;{J=&L|ecJFH&kcOX3W!zXHF?P+5LJI=x^ zXJ8ul%~-?6BHeLYZh0EG6>6%TfH7&yEy+L4E z#iv~9E12wM(p)rK8x8sYmhq3s0OBHgz^FF7+2XN#GbpJw!sM&Pq`&jsw*I&!81_g zCR=}dy(1HSWch706&Lor^&uZdoM}kDXKe8uh%Bj^M&YS*Bql&>d)|I7hac7vEk~pb z1mX>6y>k9NCHhOR2aPHcc{2>X_Tttr{b#V%qWAlk*NuWh5l7J_C-h%l7sj@>E7Nlm2CFS zV6eJ}Ygy4nKh%y{hl&o8j_VQYi6Vw5+^uOostjcjYYKui)4S%|bgmI>iw+&~AM6R;|9U5zx%?{tj`$7uU z#c}6Zev>Sn&nRh(ylf{^U~R40%Tv#DHT`DrbH1+*n=eDNr`h5Xjg;ygYKR&k!m8}} z@4qs+NH}0~rFgY=@FkPx?19jh8s$1BthDG^m#TXJiUmU8*W$7@l-#J9xyJVnHQ~t< zQz67bUK<$^S8*`w3%qoX4!KUibOONm&_F61)#8KNk=F@4Z}-&0m#kBrx{D*Zw^8zK zWy;5w2So{XOus>U?m1d;}T0+cx=vXO4#iktPReqd;{R ztWbERw$(Rpwu7cVykrjwk@8_!5WijIg%ezzss7IYLEHA{+0zDYuG@gN2=g7x8Oa_{ zQxREt`F>7|fefHRdeqNSVdNBvx!G|K z|3PY$8#w{b?-~wrRWwgA^xp&Na)awu*DlKJ4~Im&jjUZ6aU$s&Y(x)5uHB+_hT`FH zdU<^b6|tuQBS6>C-WPf6)sR0}gJi)a7A#IhAN<}}Dez)^{q@5oi2L9c%|@K8W&)Fe zOze7~BL5o3Yywr}xz6N`trh%tk&sD$WFC%?!!%v_4?B$H!}hmd9KKq)gq3akl>7^j zarx``5BfD1nTt@@(i0}V`~t2>@fJHM?ms8IcBWo-|S&j*MhxaQJL$5&zO=(Iel*n2U z1Ow8WSO9?_NRtqFPjGj?`+NV`-Ot8z=bkh3%roW8^?|9e-XRt~7CJh*LrDFL=5%xn z6xu(q1K<~ENP#-|4}E~S-g&yJ569=}=!EEy7tdLSI4n*ukK0?@Zc_K|Y(uKM#uOxU zitBV_FCazEnjS#LFbtG4Ynh)k>KuqU&*?{J*L_0lYOaSpw<>#Gqs-qmtA(?@hT;TJXc|Dx>XEKtgaGwG{l>wK^&R{HQRx zYeeeEw%tLVr=y3mBj_+EpgenjOu8gUFlKJj90GrmCs8Ml;Jw8VyWCQ#ip ztjxVi<71W;*iZWEoW0UyMoRg(;R0UmUPpZji-qR80J{VJCh17^?8PBtLC8(|Dj;>AtB8Bu%}j0$Irh*xEJB zSx!g<>?H)l@zYUpR0}?ziq&#KuUJ7iY1W{Fykllx6d!_H!hnD>3*yAGtb6N(fw|Xd zJCpN5E|@IFpASB6+&~Jd)^4TV-nSsJ5CWydu3DU-ofrpBJUf?RV0Us}t7s|jWAMxZ z!=qPlmhcOtH)0WibpmOD#O@izSTPzF=%Jw;Lc_~ioF!y09(EG z5o}bM<)w2plh~4=ca$r7h56jyd#q$-Uu-l+-KFU#A5z3Y*@MqVV#yk#HZs*8`Z1SO zdGG7cYb%Uu$;|s^)At=u(&V7M%vc6d(>t;cyHn}WgegZ-2(Ty52T|c@;v(c3`}7oQ z+tdrq6NUX_hb;YQrzjD*o4VlHA6$N$8U~?-_yD~z2C<12<3n}{h-HXlW?()3aAL#Z ztdZHUYDPv88iKCF5j8lgU^82A#zH{=AhT^IgU~*=YXj|qp{R3(<%sy*j;Ok}-tL*G z)IYCkLPk5`Qt4?%Ly%9f&Qii7&Om(s0edx`}d!Lw1RX6o_Nm8u?=b5dSM`U(c90C~QOehgS~pjdhkL%3-9P ztotXohrvIQG>{@%zjOrGEuF448R`$TqxsK(BzS>qN52OE;}w7biTO8)i9a;G>L%KL zp6ovhWzRC4R@8G{Z}qHe>+jwTdj8kQKH!9c0Qv^P?r+n!c?j6EFnK!eMKMO5gHb3VvsliG<_yJnh@29$6*5}-6$ts(%weT2w26tXi2_j zaf+#eRY#O&&b*7jHvJ6X34KH2L~rt1f03;2%KnL-KzuXTc%NmDJhGHVw$UW;fdhFi z293tkJSo0CY_Ap@`F-yG2m|)15I2niGkBWpNI1cWS987Hi_;`REfdY}5ShS-?0;ZY z!T_-X4<&OR?FyA4W6cV|j!-lSV*a#MUC ziFFc)nkkW!w!+bf2?#kd5x=K78p6~;?#B#@MiZ|?RucF29ssMUj67PTy-vvQEPYJG z2UcKwrvvoCd-TX8>>6vuV{HbpdoFO3w7b z9hG=i-7!(AlEQhK9)^Ja1FgK!8=tCJMWcYwLNNe9Y-nl&_`^~_ei&+z-dLrEY<%}V z1Mskh+mmvyDb|M2A@<{u4WL?_1(b${2Y?X_NY~(+x};-H`-eLy=ciA~6>Iz0t#vxY zyNtH|=hav`^W*pIYvWk zJajyw82=`zVT>+zUrqvXNUTjuMR6?CsIpuFNbY+CB;RYg4zx7S+C&k6Q?Yo*w7RoY zK8p)~%VyKx7DYz6q4(l=zyqdF!2^|=CIpT$;G$7iNG%^FBERt|3DO{QNl2GctyJ3~ z*>FecAnkM@IBJz1fWRz8r^%%#JMB@jh0$N`v=>Jc_2Bj#x!D338pO-a0j%70C`o`> z%1x6GXVhJ-n|R{SmXic_Js|Vb2&n}iYeK5Fr_0~kMTpO(1dLJ zu|@U`nqf%1Lc=BZ;BuUR+zjS) zMxzn0fW<|(Oi}{(;*Png)6RwqwF#u8=3jek2mcm z`08kMfGE6x;TN+%k((XS{+It-KJb&9Jy1rFxxz&{OcgNT9sEOG(ckvm7utbjppWAw z)1R<%inY|AwEtCuu`kZ{84Pe>y1*enV+*9_ce*6qwDwi}6GDKub#+gk4!0_!2fz3^ z)7*d!P|79ZgrNAmJN87H7rgxF{+=r#%b9B^#jp#NbajAMNr1CV2vKDM5G%ml)%x2m zNLOoSMA1H?{W+Vy&y_kvO?5}gm!d4Jf$bf^rgyYT?!c}KPm>!{*8-6ob8jHwe@9$@ z&ijlzBGXZKvLoQ7ar+NgfCsuGi>+YS0XUxJ>?-PFc7xFtH>CDcsc7N~`R7j|U^nCEG(kM=a4M4fY0C1U{Gt0mOQiyIrRL9gnvm%D!KM5c`G?BZ>khEMA zsx1z-UndL*@~+4&A=76-S10F(^rRQ9WzO~Q8;{-1$cccTQmN&)d!G^y5U36u>PJi7 z0nxrhz(&R--*^bs*H_%y|4(v9he;tu=@8}T8@m^!%V@_-1)gOh^?Da3BorLAA0FpydxG?xyg%DOOrL?^($_c@ z!?@Lf!VyQnHcp}zV-eqgelhr2eY;C)jDM7daB{=MVjaBD)`#ply7cHUb+iah7WB9W z(m?X=HEe$rlY=a3#hNfh<9pM8Tn+^eiD~}i8pLO5H58;}8C;r~BzTI>W zR!+G#N!;E*3Iz2wkmPL|8)IO2>1lFsav(GAqd{X**|9%X7H7ZTyWZB<-F}-L{J>5V z%LEgu5t=4xYRFluZp7clldzrk)^lC; zQLcSz;3MXJfe!NptPem7nS#V(YThliQ9snq#i+D-UoLDw>Sxzh<5YLMjWpeoZlNyF zF<)4|iZwW0t{z5f`I~hZPoLku7oiwuB6eY0jaH+PISGg$u-{KXYKN-K0L`nHtRP!_ z6P8MQ{zw(_OC{>fC~GNFSPcPyynxi_l*~aZK;EveA)43|*8f_L4wK8;dXDs~dr=*P z)-l0NeXJiwE`z`_-^2%c#9^Gx*6E)ay}t4-X^03dd2 zu?`aRTIm*j;eT7s?c`50WM2kIhy2tcw@^XAAT-+#HArec;> z2huC_hOHY|90eBdP?C9s*ey12YAkEM4EqTY%^Ig-i9@>dd+*V2V=r(Zgy_Las5(YU z=oE-H)oEPIBbsibXV0+h`zXzy6NwbK=1}*>BM@S50mqmf@f~RK-%lEN4MuJLqX-|y zoBE-N*%9?)&ToN_j{=7Ey_~kgK4$~iSacqaoe_xmyVd}7Q;%rLDW`UH{&@Ye%)C0FekFC-S$L{UI?$j*y1mD2&1{U0=?ksUs0 zCVG&`Dbf6Hk=Nr6BL2H}@S2*Y(!Zv&WRz2>-BMr&gv1F9XHs#Yj14$)6@Dsx&FO#3 z4nilz=dF7dV~-7_ONp2!08&-9H@XnO?zdTqEy%oJUoQH`C4jre6Myx1v2lZVbs^eG zJ0Y@=C!Mt^EPkfLlEffYTKM~KFeWz=KVb`8(5FXDz!vGj7MWg_xV?@Hbi{ zOrynO9a80fgB?n)yv)ahK$etx@uUZTx316C#1Hhu#rtk4x0zu<)= z8wTv|A-s+K(BIM!g%FHE_acd96nTV09|bm7PA*g^1{stZw!t9OX5PCv;_uBx6GZ?Q zjb29!UI1C~Hm&k}lWop^|KpCU{wO`;KW8<4M$}adyY*_RbRM>L5}2A_nemgE=toL#Kr2xv_E;c`@Jj^B@}ih#zUXLhA2X8i}{iCqs zqCY(p3G4vCZZwm2KWLNU_1mPN;t<-qe`@OewFgCiav*tg_PbS>RFKdGgbLN%2DvSg z^c3Fi@Zue)$NvZv6PiBVPwI9^Mc9`?AnyR%h>s~-$}obOhPt(8z!LO`?SH0HJrYu! zO;tDv90{b%5g!gh6G5i6nauT&c=o8!|MYa75qRV~^hWu0PyJWyM``T`p^!KuP~yuc zbM<$&OHbIDEB5RrP}0!o zuR0CgwMOJ9^k0XIMm&Rz^>_t5t1PY^z@(|MoMbUfSFk#aiKr!x` zs4$Vf4t>(o!vFaKqVN&XqG$2>UCMKQh%Laa>qjHY=jFtY!E25(^CL!^uKVqlzYnbo zkyqPh(`HoQo`Z4 zxtzMDi#(kFJ>>6><3GnHz%>WEcaL#H{XsPCZQd*kYsE$FnH5Iy{P#3Hh#G z)c1_o-yJWn85D20u>=g%=!;4e0OWQF*}Kqm^!a|W|3_C&GC#u=b7L7Wc_7pV{E+>@ zC%U~t?Pq=TUsbwd-uEm%E_jbW11$kZ!Q~_}P(b%@IXN&LUmuZSVEaE{z+^bHG6ON$ z5e(c=b6UyTcCXA4So`F>EcOKC-$Vw`5CdzgHL0k3@h)cJ+zM!Q0dGAb=1w21?aDQW z%Ma8kKmIQTB8(tI@sQo5PXlhFIhjE74pb{E?+MU9(9Q@feZpr<>u&vRN`H1xIy;`K zjZm}9P#*+|iK8RV^`moeQt&U)zP$z+DMR@G7JG1QEt?SqmseBS|BaNOMC2E1WBWIp z0>KI#Au-uY-S{fYT26pVl)8E*x`l#9%D*peA2{-lc@r34iMWtv5a>Tx|Vj!bE^^`>Tb^IC!){T|0f_Bp`D9rEs758Nri z8FiOQ)X)Ewsen0*RG;eBau;+m7Mu3W0){XZfxpo+5vBJF@k z&#S9R9MoDdYB@fv^S_$;2EnDR1)y(we`8bXxCX42i1F;5oQMkHn%gs7;;ExGh5o0U zQn1S@(HMj^c(?sKtZ)3>8~t4zQvJw(`H8(Ln;3+R9PD<+<8C7sz|zW8>{XDNw&`JZ zC)oe4NcZJpzOWoW!MG#%h6&RENO_D0-T_^7}srSAzTDyKxfxF6|>zy9aGfkscDX${@c z_C_1dSC{r`cuT&BENxxCu4NZ54~dxkSY0~iF8M!yB0yHyK$gM9>P`E1hyGPgAqrztyjR*C-@9rxLS0hMaq7@~FBRawSu3VfxOc${(&zTC z9zof_yR{}U-O)g;c=wD4=-e)}CH7`ZSVhC03UYH%DD3&gf7}!R3606>iYmx{Mc5I8 zo}|%ARy@ObP#&FSS<4jtU$h2F%7-04%D97D77JF8r5SyWyi{_79Qg#_D45?KwdaEp z0$rZ};pr%IxIRg=dl4oAGoHP2DG@AJ1j5Rc8G(W!+5kKcBH!t~LXN*R0?z?ts8FRY+ zB_M933R&d5&l844Dg%&z4Fu$Z=%rZe`UpdO061Jt)mUPh9H6)7&wBAcv@Dd2C-Nwc zZI=l*m+YFFy)+Vh&x`)l-rM=r%Sx;9h%vv1$_tF-`{<8Xf{XLv(9Hi9kM6)=pBeNSkC)IEyzyO?(4zexacVA_B!if zd*&U=U}-A`DyLfe@PJgVGZXMja65EYYI33x6*_79%|Z@xf%&3o$F(&OROP*?@*6Hi%Qg(^d@P?{ z0I*@ZVbL=>K}%lB2a`gb4H|#_GYcTxcLy=<f>8K7Bctphpq?DK2bU1CpIZE5^p85&=EH4$u2LCx-NMsw+m68l5p$Vs@?(?iY5< zq-KeW9Zk^sl}&!HziV!Mn$!_6Yg|Pz zIJdH?#u9EsLMc*A6VppSqyqbx_+)(JAn!&LA@(X{_z}e%ZW8ROxu+jya+k%67|>JLnFP9)y;}-4Xb~gCw}-1 zYaQ#p^xeYVL%kBPPo4iUfmw2O=2}zy6GJPGEPbIEO1APGKHMT{~D%IsrQ<&VJbD+DWVOY6DsL`^6{4 zx3?$<84EnnnJzZIPsR}d1N7#JgeJ-@?5@I_77JHs2Iu|U?hsY&#$I*VPgU-FT0ElH z?e^+IkAtqN5u-;Gv1M5DgwmV3NKVKd*iKq@TV--XU%U?BNyY*;a%2O36V-A7(^G^@ zXMMT$@u*dt$wEpyoR}c)$^iNolI+z6E79xUJ3R+0Y_~27F0TaN`koSO0&&Eyu`3cv zN>?RH!~xt33Mo?G=QOq(PcUw8E<82tI#(!y5L+1S*yeECV-B)m4D62 zd8lC!@e;2FsH2z0anW;Vg8vt_%Hwaq6o5@MBIan5#6+of z2!2^ZYS0Fl=(JUfMhhhg6&l>4j7P%^#fgVz1AV>}HLc|JBo%R#nME&Q$ z8l$Ekp*ITf9v5ByWC}8s4CB-=c$*_B+x=7gsL-igI@P?|-yA<1zv#)q2E>R#jro%& zr+wd~TsgqoWK4?fUUWQA&CHzz(uXLI7A3a8&8?H)MM-6c>Jfzk7#r%XS8?2}MN@3E zooTyN%I2?IWf^DR9RRmCu(h!zt11F$I*mC&z|Hxu1Je01uC#O|+pp>1ctkGV=&w)F zSdKYep-JuuLa?uDwsnqY1xb*8{|e{KrRd&;pa5gPiKW-+SHgIQebBjK^68MvpxNeo z=h>d&IQ2X-bKOw^F49*?9IU)t@A5vwhqYX3bA4gRbM`}idP4ykTpT}u&UlK0XaOl> zz!>GKxQ{g)%@r0-t|#iCZ{8Y|Evs9FQjXm`%ev|^>~7hdLQE9U;8NAalc6)`y2i(^ z1T!Xs%b@FUBEuk=;7Qq78g069ZIjUq0xq~~oE#h5^S>I`Ofh=~XSYxU65EUR&LM-B ztGN7i3!^Y0j%)FXALcrySqxfd;-o_4LxZwGdpm zubt>BmhXvn^w9S7Uj*I94(XA^)u7ER*WgIKkyKgG$*nnM*|Auf=N~uoT5(bw4!U~z zkh4xpjZE+wVvu<$VgImi8mKq^P+~^zmhnt)CmKi)w)q?(0OGkEu$M?Lq zg%-f&jc<*;8=shxo`13QRaXv%7iDK%)ghYjT@soU{YIr;r9_iB<}O4tRYXY0+aIVl zll6Xi!WP!r+kFD7VZrN(>-(LdoBH#xQ~BEY?S&^5na_@NJ1APP9UspId9iC2%4;j(X0!gvSrL z_r6<8A&Bkxa5f$0`X4cZ8C<|MFpr@&5!w^vudfv++8+V;mnN=J-M?+C`Ml4N&B@JO zC_M${f;2EmStHbz_XT;SpmT(~RQ1=FajHMkWWEOWT-S-6zJ2K|jB@dgLjL|xlF%*M zwx)EPQwG*3_xV&fL+Z2bhuutW{xAYKRQ-WwY^J2b+ge5@7w;e(Ag2?QN>Z^T9w+sv zQR;;yb~h>FwuDjf-3fUIT-<^jGX?@Ja7I_CW|-=6E{YEvBe!2MZ(a6(di#lv6}y|x zG!etVDt#`M#CyN@=PUgqpj=qxaf(O_-}a4+bO52?U1{e47q|fnS9G|bQp-SB3@$!# zy4o@WofFzZ^W(RlTm)i_qH{kCN`$?WxU@70TvFb}474umxLf9$4P<5bAqECw;E5Ys3Ieb160fb^QJm+AZc9EtCUQ zxk~oo`ZsXvv)u%dS5?$K$azK$1nM~`t$D3_(v*@bPA^VI|uj_L~OZeGI-gxmuKH$B8xu@)y)C@`Xe zaf%{V1#B9KwK)-`OqC${ET^7KN&5O`E4I)sCxlO>f+KXl%S_uwHGAbUi4ZqvWW&2%yItV4%_w`PzXWZ&O6WcLRA4iAF}#{)z;NdQ`fD zAV@62hG`=c>zcdKbn|<#N&iif5P%_A%%m6=rahz`|91#+#-I zpj3GElvPOAW8r(SaSOxiv)k9wH76 zjYnUpMfY^h&ha<~e@TB9GHmAW9N6)7va-b+RNUXJSl0$ZVyGtyC#c7ff=iDi;t)=z z7(U&s2Jf*F5LE?+)K1X7>~WoU?=8@b>s?6!Gfg%azN%*7v#P3%Kcm+}%I_|aHF$Moi5by9wUiWHltYS;I?L)q|65VN8`5GVi=8 zGY@B*zg~WqzZbS9It`}9h&2%9_#Zf#KEZ{|-M&~)a!rRKYT0nw9=&lb|C{k^R*WVScn;e)mwLMei8SP3d$MfrKt|b_nGqY^|$I+G1nPU?cWcC%EYKI)z^N# zC#08n{Ig>CyIraB;e!ytQ?*PzerN9$(5}IXfYGm|SIq{cX^w~_ry!l*h7_jpHb;g--@I@@=ke&#=0yGvO)K&IeI!oYZK+B-&(aPI31M3IT5$xE%ej5oqvA)twu@f_I-v+vekDU02p*7?!_) zdD^o$esCc_Fr9hhGWkTwa^peN&3CM6XY%VFeoQs@J!@Mg{~IU4En`?XkZrkQ+Yg$E zsAA*U+4b^<5A3T*%jo$X+2DgOA|Do#PnZ;G^*Lu;zk^tXz$Zf*)LN9tL~QA#0x4~$ zH;n}hqx~>^qB5wb=D?dd_iDl7&Q6y5>vzx-Zs_oyisFFVP95;KZ(mP*(WHhr0-oiD zz>USd!vft`9+>_JjMU9}ven%G779h>A-OB;&pHVN5B>P`ly~y8yyUBnwuQk;>2hQp zBjy!kS+VvWf|Ljn28~3HIc;lFv8;=p3K+UFoe@t2kxA*-jH&oSE2r<-WA0ruvnaSC zuf%#lSZ(-Z9Wpax?)8F&QD{Mg0&mlBw?o=3er%c6i%8aqY4Urq^xxi3%Bs7k)GASWg)g%$AC0Xu3`$%B`-$rvOixQZuJ$;j4|TrK3G)qo zvu1i@oum{)<;j}ZZ|~iycniUJK~b$Mi(g7=2l!2NbDsa6H~(nJh|1$0c*8I_N9G(d z{7mmzqJ8~|<^!T*_Q+r2YS9tCw7Bub_A}Web8Yol?Pov0mJ&;^YpZiV0Gk!ruS6_@ zBFr^d&f+S#{*bt#Yij*)HNQ;WfPZ3^-t$L?-H#zGQL%fGTlSlkiwcyY7)YQEOjuxj zG%lzdk3-y~8E`}Si8gtWG5of? zu=pqsjOa?upFrQFMhCrZaRc{`Exi*{;Y!0DPIqO_aA9N^QMsK{vt_$Ye;Mo?2yE5n zQ^kSFH#h6TI+MKDWsxBD)=1w-PA}hyz@1A|O(t5QH|8*1AKGnVsFH*}+Vn?)#fcAB zVb^6%N4a*}_153yJC0sRbsH+s4CCrI0As&>iHHYNam%X~Vd56@-hsICou!@EZ{e(% zH#sA@&&%AM)5V5yk1v4X=Ostc`6`R!AB6vwCak;8i=!(}7=BeZo>gD~MkH=RRlJ4^ zLr&SC_&hA57Oh96ISjwaf6F|lk-@%Ifv@e6DOpcF31-u9be^qG3E=kgM%)E8C}xon z#h-8jJQ6OF_WhmZ?qbx8lF(@k=>&|8rp`eU8uhY67@)&ov7%TV7AR8lSNHbixuVpQ zYdFalySKlS+s#Rn%!jW9L*qDX2sbNlE{$4U3^UJ$-1<>oDi_Z8B<-c;u6Zc-=0RCg zJ16xupipgtP;T#@9OWDwPH@|@I2^joY2$*XY)e(#_J?hVFX{wG;qXjGAx8$pGEJ)Pc>h0{26%%-qTa;F?Hb?E(0uP2x&Jl9ksk6gc!y5j-_-S}nODSul9FJwyM+HLy&x&pVQc*l|y zFNL-!_R}@HcY+a2pU5vfYpr-FGVD7XLF++M+wfybXKzn{A-F=kS;-^b?;0ClC4k{_ zduJkdmC9T(SY2FfqNh)=ID2h*OD%j>jIsUX{JkC-o+VLP+Wg*6t{a=$*tM;@gj#-x z4ojL+m(p5i_?rTu(=CFcM#W-2S_bpT(bH)NEv>pLb&_lN*|mE!R#}|pUA5sTyW`5A zMvlPq{;Csp$ABh@4sytEx{G5GhX;KML*$;&{3V9XBd=%?TvD31H1dKa$tdzkwZXyN zYmXr?8DUe>g$_$s^8`X+T0O@W3rl7?im_dCo~dWC1vS=jy_R{ywEYf~dwk8FXvs)wDU1E>HX?D%4o4JqQX*~ueN1zrgkl)Id20X#uz zg}J>uYR9r87-L?N&UEDo>o0+^a*(uLf_62RUfE4}KassHF8Px~k>H3`=5I2GQr^td z1y9M$yD)4XdId5Qgk;ePwFHGs=YrLlEAMuQWiZG?DC+lD2jdQ^`H7nE3SkL?XCVU2 zXZyPwg3^K;MmQG*`-8}l;z*nt-{O?qm_zXltWLtT%g8jjDeR%b1iC(G;S%WM7mu9} z?aJTO4~r?u0+g{0Bo=kUE7LpIRVV-mSq{_ESh5e=F;DB^YNFV1hfB>K1zT{*!Ma}r zT5?3jky9G5{A?1da+bTxe6mi<$F$195{kDg2d#?`OOK+0>mDWSnt&qSgx~u=6uH) zFTa^K7Mn5NRvy(3DZb(xkkhHHby-9|ah=oOM6IXn?L^MDxHK&l%lBFetvdQ*w5dGjrPWOM0y_N~)@v$%`Z_$#1Fm)U@1e zTY`rfFb|-UXtcKSM-Iki!y4@$SAshbq+C|AXU<*XOUS*$f|sC_=2Fn#k*#3Kzby4i zdpsbQlsndZKH?Xz#QdlHD^bhE;kUkn(j@L)g&0(dC!{Xh-s%R$*F*iqZetA9M88E` z9E`Sv8x*E3YtEkOQX=0mM_>9msb$RrHI@UL-BwGI$yCUd$}~CQZ$uk9H^~`kQ}5Q! ztzh2RbG<`$OkdL&S>xl@DHO+;w`=D8N-&XB#kDq0^(SRGKj)TkvAoq^Vl>(y+gt6J zu0V*n_jJVJKv@U@(t$>IjQM2z{&p#%W|=lID}Wn)zx=iMHGsX}uWSlsrg%L3`{dz? zy-|rrH!?ogZZzMryEW%yE_>TvV@NjU_o8DXxEoL_jh&bnT ziFiUl(d|gsyW_k_!!HL5d5T$ITC2`1@Hdy{mHtdxG6;EVe_s3R?&ZSWcQ7y2+Mag< z=;%00fkgtP4Ibe|7aVs{=oZEtFlE*Uo2~^e_?phD_7$nDVme%eLFpLERs*B6O;Rf@ zdp8Z4%^G>bRQ(FUU8XqU6y-VaZq(vh4HzRL3=X^lcOOVbRF;ARiBj}scdX`W?935+ zwNg?{OdchsrBs-Bp;c-7^RV5^$7R6hjBEW5d2;VLR9tSX`aHRK7bN94<=XzIZu4a+ zR3zlb>|Z5 za#;g@O56LG<#dPhAyq}f)jqp5(-7Ggk*!UHawd~FgisiU^U{p`jT`MN53D)|%yo00 zcNq3XXFyR|=yl&*i9p0D#VS6ZJGNtY?rU8ly zxmWkqeFknQn-Gw`tlhZ&5RSXTh&rY=S?iiKL+|;Z{C2iwMT+@MHZs?a>&*M^D$mm$ zU!Ok_9!~O=bV$oP%Y9zR48u7oJkcCbI#}(9EBm?lN$RRn%~!gs8z)ZQ;{zMg;ig)y zDy?v$&C}2RdVF^Q>b-bnT0t?)W48F94dUaAb^;igvtZaZSAvW%r=7x@g$mhrhP97J z-;8p+h-!b$S*1D+#}Fc^vxs)1XW+I=}8- zFICJ$sZLpYdFj>LgS2D&$cQq{rP)XQta*uZUy1AGX>hUm$#O;_IR`m*>+399+h^n-t zc9dSXqQ{TAJ|w?$3=_6vhG~qskiqewBJmLdIs4%T|0xm zlgYQJ_{4E>Ir(moq-p1mzIvIROF%#GETZ2OZE%dgS@t+9(D8oCmwZd_=h4G-&THB+ zd*<4+JXFih`i8r)=8;M%2OJ%f+0MDQ zH6V4huDp&(@kqU}J$S`S$M-T%q^p|F-)QSm)8PjfRG9%s z$}}Jm%v_t~$OkWWJ>i;jeNN~AN`WxlqHM8!|Ofq8>}^#ndx9xV-w zczEZvv(oRKU%if7inGDH23wDH-zf&5!oEGOV|prSpIK&b15&M6`?&q z?8dZUKh+}dhht}K%V<@7v;2L#{@9;zdG^IJL&3F;^xy)`wGl=FFi0$^Mtc3bom9z;k3Pg{Y|@Pf3T$TDHUzLzl1a7@hR&wH(_ zYmOTsIeVr!5$VrQ#p}ao0>f`+{9I49Yq#v*BHZ`A{U+>zTIr|{;;^7rMaa6y?7-&i=FwNZzrEZW$)`sJ59e+>^k{ENe*VZ> z-WJ(U&c5+}9F>(|b{Q8GVVoSr4cU{F)ZOgB26!64Ho(L}XPyS8MKn1ExJAm~8UGZ@ zQ{D*KnEaL-%6!CjqZdRVDX(K)7T} zuW#yGOl0#Oni7i0B-vVY$~@s3)I*k=l(T}5#kG9V&s457NG^cb7^Op!e*UR+1|}w! zOwoAI9A4YDBqx7WXID^G#Vc?0(u^SPck||D?}t)@L9ZUy z3$UJ@=#H%ZqM-g>Ytw1Z0{ek%crTu#OkZeIr^w^DcU*t%ZL7y);Zzab>X{c-h6K3q zsi%msb5YFwFFYwP=eLe`Md<2QQdJg|qrY$@2PR}sh+04J0#qKAhaZ|{F=^w_H=@aM z9`%;#v%XAQy=Kk7jMYsRJY3=2BFhZsAV*oY4Q%*}2RUYJh$^%9?nlVq9o_Iv-eEs# zGrfY|0E57%tX9;RC}5ttu-vW(T%P!`V#Z5jht%uq!0udcWh%v#hM4Lk_v%z0#@NR0 zat`X2FFh6i@+&S%l(Kcl>;U-epmnPIM4s1@vH3F@+U}b0jk7T0d=7mpp-t|`p81b$ zIy=o)e%IP?d1GH{{7!Xy<~s)ASo0lgFj`5zflZ?vt^@wwhw)Mg<0Gd^#s!TQcyK8i>jM_KkGaoApMjz{8kV;ICWIw{hXSHy??8b-mn#uE(wF>6#Xw$|;sDn3}_q*jn z1uaXg#q;|^N~&OAMUum692Xv&-H80QD>wf@EBEb8So0OlvRP+Ls_aZACARrwtshF0 zA7yhe-Kk@sHudJZj9(7;ig;F`R8FqZN&AxDs6vpZ8atTzmp1Jb@l|G`GK^ob9Ei=i2d}Pxy6Oi@)8F_Wn}FS_RchXt-UP=S(&=Hg_fmO9_QQ9w z!+Ux7ezcGBF);Jg14V&vAle_ii}+l&5=Lyz?7dTd5%X&K*Bj>?wT{H+1D-7Kjj1N1 z9SvLmWs(Z?Bxlo$ZnDBh4hC|b5hy-P($;e5D<$|a&Wz4t%9HROH32pMiyTUWS*Vaq zrIOV*Z;vr!%o&~W+Ew*xr8&IkWrc^U*J~fB?R_r`0~0O^wL>Q_6Lh%WOAdn%l&ArS z;zZcjlcwiu9Dfn+!&?n<#(eK?ia z=BoqfXb(z&2W=X zeg%G_Z9sA@@e;fTZW|deTZy+Sp67W!(Hu^Sd>`>raws4vRMa#T(W6)fbgSMEs*VIk z&?FgBnGA~-0Bs=J7ZnV{reYxb`P!aCdC=bPV8n##ib!A1*^X-vNiHI8Jw2v2*jA~o z_S#yOSSo?&Xf2=?OWs#yMU>WH=+LQ zP6yI?+U-(*@p60Y20F8Koc@tjOyuoZ3^T8~MFMFt_{^RMr>x(cTFwtKVD_49wH<5p zb@XVwzE|MnNE?m7RT*p?EOv%{l_v&eIBD+WBMcV6uD-$edUv^Ipd2{rCEAX$`<3pO{RoNcD13qMieT`(OjiiPyjW-8iPx3bUk|S)c z-0Kqp4Wnsw>2Hzw#fMobW~96{sDmfk1XX_iON-bs5Gl*CyH=!1^NU5su(XrOl(JK* zr+!b}&e+I(tOQjXksrU>v6Zb1_0S=9GPFjgL>b>YgdnhAoB zb>2qYxf&B8xhXG|wmtUP9+oCTbYmLU(>Fx&7gNVw;mKp|Jv&@y(3sX z&J|0gHKEgz%UB`Bv?dqT*KR6&o?Td^&Zczy;VM)pgmMj zG>q7+&-9&C={XXTO$`N)WZKivfewM>NoBm=XBITkqNsvG2{`CCx4w2Pr4WWMKnWRH$TJ}*nC#ECM6>(y73ko`H?uGKLD8fH z6v`UMEt4v}SFKbO$P7D}OVEE7$FaQ$&?sN}PP&RU{TUR|N<%Ygp zd$_;kx(`B3^?LyM==ggxuZ-29_zBhigJtQ(jCW1X+HvAa19Hje;LdJ&PKwW58D8|*4oJZ0eu|-*)u)qJo_V=Hn`)O#stgKE@e|gAi>3tqRV6`skf-< zl~TleX}a1(&YQX<;x1+pgA`(yN`vOb(GdEYTq83Wrs#Sm-NSWMm z2vX14>HX0{Ad=DMu!CtG{o$i`*=M~1u+8&Sr&EVgm3}#Ofi-e*FE}efbR{6Lnc!oL zt5;j5^u+l?iC6uDWkVGJ2BI|IN}zXh5FF|)J>K;v0_`MvksB*dI-dOyKeMYAg)a)m(-e!Tq zKjtEzbHT=d0-`r!`rUrIg!7c;K~SrXDUHDaVVNQ{P`EIf zVa~ExfHS_xqD9}V8qc-eVKrH7i;oMevdKr2%o2mDR=??U0-6?UgbT$~J@J+S6s`93 z3j<=CFO=v)$V4<+Oqf035}9YA6ZSa$#rE?2v}tHyC{eyc><0=I(yhhIpW^{sFO*2f zNt_u!w(oLr7I+M)%|Rc2j1{a{ftS=M{CdPHQqzLnA7iabb(H;he%{q!%fgo)chmOK zs6^GTS24IMRdrRECXd^8x6WFIOgpBWqnBXxa}2acb(;ycmjTsSOfEoG*tVy6i7iW4 z0s1&;R-QrEoyT|1H(GW!c<{`-T9Z{0xbc~Cg_nB{bsllhzfhi(x5byKpMK#v!5vBj z4f~RgU!?($%`ju6t69s=BMu;(I{VF#{>9aVxy?(ql^XDv7sxo`dXQ%4TwQ1(>;ubo<3{#lz2C#G zHX8GvUj;F&FVtnocngkq@+S+Sx)dxXesSW&j=5BBMgTgSNj{86Tu}JxVuS4dX?(Cn zPsw(_)Q{^F7+Z+sxKEC@+;*R>41dkQ!9f6i>2#ao%~FMGE9egWj9RGHH$c!|kQpS!yeZ#z;FtyECLWZ=XI)&9r!t|0cK>^__kuxA zubs}Oa|-&JbZ0h5Uko$+h&ZaC$6kMnVB(mTu>jzcbC7|wp)nbedxj`ivYh6>@!shd zB~R@;)aI^&>aK?t*$dlMHZOXDZZzu@yVsa%Anbp+qS04pfD^YmSf&Id*x_`_Bp8+2 zi^d3RbOiC1XPdAqC0$WZC)_`C5BOqW&wsde2pP9r7#qD+LiF95ID1pBvw)^14 z`z!)KpNwo6wj=Ew@uqk1a&u4{9mxnbf;{D1@KMwBE16rsDT4126&V)n}D~E0@zID({~I zT?@enLB+&q{(rvUdm{Z8A8|CWzpw5!BSLUU(LTg`PuV~dv`{l#3kBQ9O z*PCx$iNbL)6a4@KH{5n5z0Rj!IKBAwYY^pi{lp$*oB+iJl=^C?N`@a25Nt)$%?@p9 zPLPZng{>QIZEkKx(cfTzI8%#y{P~l(&R*dD>gQ;qsln!>w8~(aTFHoE~P=JXo>v7YHE~S4Quv8b*`S=W8zBnL!&{^@)~|- zu-@MrpCevT#N56WC=BHY(06SWQnS3*qSWm(`wnkUy!@Vv=b+*ZLxUH?+zKesIxJin zeuBCL$+}wZ?`bxq4FV!-K7k2WUrzy@aoBm_Gmk}kGk9o=7cpCY(u^_dtawq(71;5C zXvQlY#6W4=cMxZ+)w?%+Q@jji-SIB(@JDr4zJD?VbWNj8gjcT(HY)ef)?j7#kRd34 zNX)Gdc0-=pPNTG!6h2We|j+qUeguPg8dap$@G{}%fop7sYXtDmR^lMZ>i(P@*dv)*mb$5Vq z#WbbbdI9JW`|2<#m(Y|#5b~Z|2syeTSB^K+uAEe3j)LVOc)9*Y~ zpD};0%Ix#QL*PLOaxTuKy^fo56er&drTB@1T%EbNua0-AmzQEPW0;_Ws={&Od|<1?F<+i(*j#l|Mhhg6B< zqh9yEgAV8#rVF>}U%$W(wzN-4-H+?e++AHNX$<nW(JD242bRqm3W15bF(&bnkPsnlnAe|xC{WRkdRliq`_3;Uxsg`2Aq zL!6dW1QQO+U5~N5Mz>v|a8~o|$6qIG*n-m~G444ml*HyoCs;yv2d9GP>MEPN=FFaL zO!xY^%O{cnwfN+H!h97N@8h4A;Fd=ryQ>mhyTI(40MP16j&w#c{hUp}*T*2DRj0jW zZ@V;7d>gE=@y32;rO-`smr@amdV=98#bwA>`<|&Ld6zZV;#<9!L{P#iKhsfVskZ^xqE@JEK3p@N>6F&G z5=!432;E+nKal#^S-s2)s#%SXt=^o9rh@JEN^v14~N zp2!A-PP}~QgW1nZp+w+bSG8*-fD~RF*%WXreSeyifON< zcjFG5t>*W*Zh!qW2z1{bPg~)jgV^b;s^Cws)t;Utd-07!A4o5jQwVIrE17)PDge0c z4wbx(?T+KK+ z#wQ{zxfT^HuJO_sba9zWKmDnPfDlcGGC~wu#($P~tal+f^FZ8*V8`&M4>=BFokyAv z0au5jMKTZIoS3%`SykF`wQ_uNdFqVhE9W4=GUJ=W8GsyB{|set%ogAYFh|(dw0CxT zTb|qlG-CdcU?1`$F z_PqLIFwKpyG)k;m0!aGa!~n2%UnSZc`1*ml=Zza33hY`>U@%aP-Mlr_!!t6NrqR)t zQ+yy0DuM(gmi)?Gts1+G<;o!d?a``%PEK8R%=zYgoFEeaP@O#)2}plfK?5gmY=4A0 z>>)2aXYVprM={$A^P(e*0CB6>xxKg?XOjCZ`c$5k$s%Y8l%%ffY6~hq6({kCNO$8} zRjft=V6HeRg#I<0dwVck*Y5$O3}GFxDLp;_+D~gKgti8Pjz63@jO}LnuuC?E}=YQixvO)%{mV=At!*8ah2si>XXYOB{dKY)t&c!@ftR}?5#7di!7*Tt#V4uiDn z#{wvZa?vNs25ihOiR(Qvy6$P#(sw7~CJd}nVIHGEe9o;4V) zvkXA_*`PEXC8D~k4r>!^(l{umc_0f+&8@q(>oo^`?_$v;twc^a&r?z$znpjmt#Ww2 zmbXCT>sI#*RWjr2cc@Qr@D&k!LoS8Tx`#1B_i6xH#S&=e-S}wxa3(H3<$~`8_X4 z1%iEYoFq7+odZB-=$zF0fMJPtHXMmdT|vS5HwTbr&_9dqS|0~Mb#>5l70e)vR{^2S z0p63?R|?#nBLy;In4nWG=_6REJ;M4tG8nrHc=eLf^<}SxA9KP}DG_3>iG? zlt^xqS|n)Y4$^@jr0u=KpD$LDENV-pTmVi;W5C0xx}AQZj1KaQRv8dUgFZ4o$a-;5T{)Ru%?6rz+qUFI zo;Y~iMBT6NYS@F5Ad$h{6m{UcWk6GL*H9tMD*H4qt!Rz)gVoBBO4B}f7nkBOO)}p_ z_!*z*R7+7UDq8?LGwZR;6G*xP=1zw>C*yTNenLqREda(oBDs8_71B){I&iq=_pgys zODnGkmUL-NT8z+O&n9gIa`>QR-FKDhXndZgPr7?s{qf@W41;e3{wP*Q82?dlLt_Ud z9Br@-MeD9Rzy-y6Q&3<&L014tp5}0#{Uaj3hF|-eurk1|H^bW`L-5@m(5(P3xZwMv zNGoK>xMF+v;d1O*-w}WZUqORzTZ|!9=3U#8j1Lcbj}!E|82N-_h=3xH4zoKt4$xp` zMCrH{wht*Wi*!G9ekW6ZsgQ0+{KT&(w6rUb#vZ+;(!@1z9|SmOxYw?UMN{rP>G=R| z;_tY3fSiMDUqG6Vwm`lGAjeY}&`^5;?n6$D7Zthl%QPhRag#oTexBUW9TF^X0sKc6 zT70?OU(>w*36KPG-H1;(l02J041gF2VF0(C3l`w4LjX9KoUO&Na8IV zX9V89eC^XOsJaiNJ-)ul-nKTXBJn>g0De#1{IXVQqj8S~zrf=_@+&D-qkC3!`W!w6 z5%UF46Kjo9MZpY;W#p#i2l zLR%k30M2|Gq%JGVwsA&RNOjv|o_EpoJq5WL$Hm)uTJCVg>HAs0?LY2pE5t8g2gY_x zbdgi%#vN%E9Kdnk{u(&msZ-*rU!oU713G51ytfTjFD#mTpa3AavHA>&OOQi=-vfbG zUGIZdz%ggevRjinW}A46p9W;X4ik$r-%yaR>=ty_*yBi10eqY3DcQHDbBe#xl>X8v zvmCkkxzcr_7^KDU`pjV<*ms1`iI&rew>$TsNhas(U)%q|k#z*Mo5u^QtQo@`R{E$T zMH3(11Yj=@L6_rIK_y|(6ZX3SR}(|Svj#kk`n;3RjuV0}EU={8b^uopAngK-&5%M~sl0C!TS2kd*YM z{NFpmQFS9{SLZ-e&3R}ZQ)GJcM*+Y?hcbs;J}1MB7Fpp2$RN(<>s9&=K|--4HsHb%kI3|d3B#>zkz$N~kbFD{?o8j`WDh zlH9koj{I4FmO4^i(%;vcq_@RC<=fL@z=7&lnAVkhz++;y0)E!6{DoJ?HOE9=vDhnV zL7)fX+g_5+ypyY-{hRz4fCzk04|)bAxF&z7RL{c!KkYLOe4cK}T5%yDqMGdFR?w`u zqKf1utZJ!uN6OFhL$ASKNS3QfeD_X7ll;`Zof8EJAT{C!8J#(0XRY|XOv)g@Tbg<5 z&Z|5Iza)vii}0KSkW0;-nxF#Ood&*k#D}}Zw`L|LQ?G6x3u*`oT0L`VcU!?)MPE{2 z4&Ct8&o$$-{ORj>ns*z*vD<-=EAMzo_} z+gwaFvh7Pry@`hb@7i|#$vzfXy%gaqZ%eFR;eC$aOfpm}XL@~NI(=++LQAc!MY1`X zR0p#8EG~)Dq-f_~B^STUrF1Be0*b5~8{F%A;`5lngr1;oNrL8!3iwv%{@Gt2GrxK| ztyj1NWTelqRee01b~NwXoNRtXJ5c?x;j2WDsHnRCP?a2rHt-j}Q)8H^g1)sK+PphR zJ5G3h!>?ICb$tiCGX^YDbjAJHndXi6_kJ?F_}y`2YppVy{&@DO2YUU~gPCAlrR zC8Jt@$4{m?u&QH*%&+=^8%{|AQ5y?{WM=K|kR#9g=X#U1O-WClCRMRmd3g$!kOT`A5psOG%>Yt4{iCKnhU`K=YSa)u$yyr+l3Lb`KQX z%*id7SO2obqNE|v!{wcO2NuQUZ4LUAjiVo;TLswD^*E`6KVI={FWyZUk!=h0!cbg;Af+?s#E18=p|_)B`i%)g}G z>l`G`u)j+36rL`!_2*K|j|w7Jj)z;ld{PTl_8yygbEum0&bn(M)zG5&J|-Q1EJ zTF$+1wCUv28uE)O5opRrs>3ro>EkN+)d`WCH%Gp)^?G+?z(at!X)*M)?nRU8ksI0S zJnK8)MFc45nmd1jj?*bPJA}tH>hRMi2x0BmFmp13f)aL>8!dEzCpa`WIzP+l=Rr^K zMn^|)yG%L20O2rZ=iTC?^y36;<^+wY5$4z>u-(t(eL(|C)p)Co|SaO z3UPy1olFR!B2M=Q=^rflhTP*AUKKtCfzKy3HNIOs_x3d2K!XmRK=)Aa%A@NPkU9F& zGfG0OY6Yqxh(++OYRQt?p<(@9r z6s9l1G_OyFVfPP!Lv@cnHN!^N1q^-#K3L)S{N*)IK_wmc+R~ihLQ%L7wYzhSE%}f% z5Tci6TDq%7EI#Y+3Fl@Vr-VIJM5`Fh<(-X2pS>vF z6&j$;H;DnW0-GpL4NB+!`-bzYy)tM0(a;7nqsEV^9Tv>gLm8#;Hw* z8**2BJNVHp`-2VYc*xYbqv3TSloqp9)PA&1_H&G-M1q)@ST=a^Fz*`LDA0NgO`=9_ z?0e0=_J*O*a+EY1&MS14T%`H#oHw}5k6`zK#3ev?iR1e_BluMVleagjRPv>unfX}P ze0E%;u5u=O1Z5id#sS3Z%lH#U{vRwKl&H}2E`FioUiJr#{740}>?@H_Ogt1f-6YR&zY2~P=h7kX90g?@)JbN1bd;lI zt4NHKiXiKE<%$e)&DMzcu6<_g(jX~5a(Izm)U$GAzrwv)|w%cXMtIdM<7}rtLH=z|?5fT5^yS6*)SNc@PPs9Doka?RakL z*-umm@yL>ghdCk8%?(sU<^#I=A=f%4+#M3&T%;sgAl8hG>Xl~E3b|Wy$c=7h=&PGx z`;JlK7bYNIf(`_xK2QH0y&ER_j^r8rK3E~mJ4fd_bR`EHT&|a?1xoT73YU2K@suEO*;9DT!yFc zSprCr_G_{k zXPk9!ryB_IMd)}jHm(qKVO>D(v|hQG0VHb<&g!?{kh^TgEpNN&D|$3#bSo6+-}H1$ z&U3L3ec6p;0<9G{wMQMGVI-2qihG;Xpsw8TA~?YBF$FZHDZY272+n19dXwWwPAQ-b z+D%XYusi&g@7eQZAn#R%;kNa_j*pf2VN%aA<(Xq<5xHXEpp?E@sM{4 zNa|ozQ#|cXLPw_uCFlXZq}NZUO@l9iCnP&5_0U5RqDax1tw2a1D3l%z)`W+KbF4k< z=df=R3b@ zGd7Zt%8Y$%_;1@I6{e&$q2RZb4*~%e*S}W)Muw9VINwp*JXJKL93c$v0^9kHx=zC= ztKo2^J(Q?$CV?x%59R{4q=Wn4sL+3td0QNWe7dIVHE77UrOSp zubwa*^I(J`+&C#^jP35-GrcDY+wz4UodaN5hn>7UIZ^MwGw)*sU$5iv#h5izM}OnD~n z`s?fVu)BP2c%0b~$nNLUkaXF;?uOggZ-JSEH5J277TwRvUAMT_n7iEz{xA%^WZ2&s zvwF?06X?lT$c^G*%Lm8E^ToVkn&s)5tG# zwQ`KuNj|PDEk%IN8_rPdJnXNW%qNGOv-3aLUAMY7@8A}-?WvOH9!l?1iEVgTfF>~^ zA9M@_G$*N@gMHAR{PKkHy5>v2|_j(u}|~Q zQ=2Bh1$9n^2>x0FxC_KSnEns&NL}QzvgXZ07Mtqia>~x`rDk1pumaA(|;uxl@7fK z0EdE4&wcgY-?A@Lum?h{4B64|*Ks+CZ?slEw3#Zp1+MT*waKnmN;#XZJd%Q|YL%C2 zj;v3J9VDg0)t;EZUO-4Figrn*J_x}c&VECaH0Z1OHc<~xVAi1FrC6Qo4Qnnjp+{0Y z`JxHlpkg}ajmwT47snJBZV$&cH#qP5`VJ(Dazg7E2diAOOmhcYoqc`HZ0)3z#ootu z1J4KuV*sO+`_z|w#o(E5i$QmnUR$Uh`GK*`hb7UiYn62>L~k-tAW@#!v`Z%8>%s@;Ul+sy${LxWs?&%$hpd z#YNlL*f@Yj;ktwIj#3&^?gssCAvUPRnd}4MksHYY#9l_I4P)sdq0(Bxh9^+rwNW?q z(XNK8HudZ}-$puIYrH3N@#4jk?S=ES7%X%p8*6P;doB0jtH^+w#f)1gFs~RR>##ce zhh6pTM6v}#A!sNqhN_qjBiotzwX@{U>4`rKMV*FtoAjrDYd*|HJ`hwZ*`mbELy;wm z&lprkwdF6Knnq7x*DSQ8(B-A#?w7%6|q}<;evoChMX(63)@3V2)W&{Ke+n$Qc*x{7)pgB zE152vPqQ}MXI9D5ga)Y?v9vTF7AVimiFs;D!eQ#<`}AOKve~Uh_|m2DKB1SdbYVxE zgXoGUXYW-7h}~&{SwlM~f$)EfZF0?D>;;>et~?_MK0g4lV2tz`%`EzpCF`s2Wc(61 z%tv+;-G@hia9F#v!ymbom8y@L5%Biv!KK=PV z&EMxl$xrF!(~-iPq@jm~M!`NDIFbpyFDwKYigs${iw<`R_A5=ue)(VXTnbY@DE`gb zaC=X5So{1Mrx9^y8cmu;J}7xo^$LMUfir(433ljyCi({xX465Wi zJyO+uC;`dR?FVgb#NfKU`TAOl6TrjILVfuZz+v#{trA%uQ$WMD5L1itVf*3Bz4wDp zV>-tIw#KVs9}!EUT9+xvHc|&+b6fuD@aNtmi3EuqKDfK*#1&>?j|4E=)&aw9e42hu zc_bhi1-4Bd6RGgHaB+I!uSKPVv0|#LrOnLDsHq+)2EJ;0a-|)13*CGur#$n`mgpu4 zj`Bz!@gClYH6q^lrOfhg1Gb>ZU%bPh`~0G?uohjI3^zQYNrf3_jYO`-ucitUxyXms)QK+6wWkX9(Iamdv=}9&s}R0!;DkVR6hJs!3LwlFvA9; z8i*9C4uy+q@A+zuwwnT)#u)jrMn^E%qcJ-I4&`)dKD?tm6Q`8_hymVu12HTwVZ1GN z?S$o#R;GYL=p!p@Iwf@wbIQql(4a4&#FnVm3N63iRxo8AG}!=gy#urQo;DBpd!Vgq z2=nVU`g@yu$K|T|+;XW13OvXXkD}wBln`Q=`vM1TZgyVqn*S6PPbo+&lw~Vul)F~% z?d@Gy%sv6l<6b*Yi)T@uS-a}(K?i-!5Sj9+6B}fxqayN8G&v#N#ye*7t zwD>H3q=?(m7(C=LfG%JbG9(9}{}kH@edGtQXoZH;l)eoBw8M}vM{x{XhOnl^MQmF~ zM|iXQ*1TTXkn{hTV))?GrXN3|OugfxY26)%Kn8{19UA|H$P(N0l>2BCY`GLAispG&UY0pK&%nrYm zuvA>;siw7mqrQbA?_KL?Yis@fj6cI1QpVjC*<>L+SS42NCAk=$&JC1ds+gUsy(2vv z&}gRg=l-1nk-pRwNsx%@a++vZo_Kp5jfQHQmXfsTtDVqMIhTMIcEb%`F4H*p|2-FJ znBUp34yt8@N8elF^ASz87YBnU+SQ_07wrO6!%)&ti8)HN!-nhNp?rY!xy)yvB`<@s}^q2u@dvw-uNO@J8Z3x3t# zm7{rXEVNaD7ei-JO1ey6y>oQ#ity6c0md&y6nVlFz?9fk*8w$i}UFA8f10T{C$6tzJOyuUbNM$xz;$tx^oczumD_ z69RVA$K==^w{Aq!+lzyw50SS_=Pnh8p{_#odin;RF}I6#1La2cr@KTj7bb2vdEf@( zSnF9Jl4hkOGx|NeRk)ol?2IDfM^GTJ@Cvs7V@d!w9V*E%2){hHc6{GWb&~7ipfw}? z>CA_=(qG!u5IsS}Bz<|&EuabiD&|RQWsO<8iNnb;-}@9D=B1=g@3Iv=tIV+Ih>~kWJS#z=x%!P`++Mrnx$p$s&6_u3a7ZMUGdfKfrXZU+H z^va!ox+q)?p&_VH5gR^Ndu(7gni&_YuDl}U2G3VOLyYg?+fv0=s7;v;qeU79>p3s{ z^HD-jX3&et6N~J=uK0|*6F0BIPCjmW4$fARzWOujjH(uhcA7#1Xz}$`pi1)-_y%*9 zr)DDlbCY!;dir|qJ-Dl1x%AlnrHg1V9?k;aV?LO^MwlDIBe^jGx<{;v+X z|ELLMTD}wI`zMI`y~X3XfQJI~n{+_|jbJggIzmx{ZSGt6L#?EvR2(Jjj9L@O zWgZ50#`Z?bTF?zGF(3v@ZpJJr(NC%gB}xCMH^8T4sc|D!I)zsn=e{AI*e!AUTLN6T z=M(opR)}8Iru&Q)>vLuLe;TtJ3K+GJDaTjq-9r<+X_3K35Zq9SM${5eWAArxGvA+G z5ffwVkiYQX@q^}0qZ%-V4WwE|k>6&%)7*JTi zOlbdm3}rNCP`y&Kba!WEY-JnuPmY1J50BK}jO=X*+-297K(49Kf(o(np=FNC$FqY;TCLYhn5d;nXoO%$w#|oWk#J$d?617;u)^Zv zsAcb+(LzY0(G)pX@b_8FC2HkC8gf36w%6anGypR{A1aktXxeizX1o}k4m2qRgHYcmTF@JD17{>5q|lU+eSvnT@h zZ;p*ngj^$*U89L?Dfz6AqrCul_BF-Cv$$dSqdk|Fcg4mJV4>WaA5 z_^qYj8&;`D`sGFBXzjm8kT~%iDQgNfWvYLtqy(Xl+Hyb0RBBP0sX5YU5y~?`o~tu{ zFdsg&(cR^UACBcT2mdF02tjc{y;epA>^fd9+?l_4XG1udiYFsUYw|NQu2Dz%AnUrZ z#QiryF(FuHK0P^iuhntklyCnN!BRj1;V;tDpCuXAg&IOjiz#C_5Kk0N|j;B-v(sWq2)pIiyx^^KX zTkAh8fQ{75V-ytxG_|nRSJg>`29~5KtwcesZZ{uBE6+$XxlUezedI?IwuPO4I*!!) zSD9Wv3pDFwbVyO_NNOMR;7H55&+PbfX5TjzCwXZPmji7XDe!aq%TQm{SHQQ4_&ZiF_RN8f@y)C%FsD}Exz2@O|A2fDdl5Bk7F7&We# z43?#I+nasvf%kN2+ByOJXbvei6iy0V5{sTFn2oPRV&*JL8xhE$te&W}2WO$*$k$Cw zBYk`LhV-U`^b9A2(!c6fFmXG_J7#W2le&`a1j*!=I)kmOVQ|Lw&W4Lq zFT)-IAM}adf@hNem19K}NR5Y~i1c8IG_qB&+cSYcrr^o6!gsP{n z+NO7%yITbP;o z{QWLZf4NSL=SNNfD2MtaAZp;^0nnqjXJSqP)#y{g;rMkAmlN?zgpw9FV%2^wc(kLl zbEn{9&V*MU5x7aJL+pi<&!^8Y@2&z&1rte8)21^b*K6*@!Kemr`h3|AvN4RvK8ACO zzF?KE#hfW0I9#yXdJ3SU$k9o}lU8f3n#9Hk)EUTl=VRW%)d9xZ-T1NmSAj96MaM%A zSDan(oF&=}lir<;E0LP<{k$ow`|{(lwyrWf8QBV1=a%qy&vH~oPF2w3{3|#fn4Dj8 z16&jI2|Ffrn4jca4rss!<}xR$|APy)X||nF1VOg4+qfL}IpxEwm$PeSB~!r2qt(5G zc`t$Fv06EvX;l1NuBC-pR71vt-ai_L=TG@aY9sr`C(?<_wLctr26uqS6yG4nZWBt% zE`>hFo?ma0qg-un$W32gA1-1ypR^NN;w{9PuM0j4L&oj}Hia_vY;Afh*Qyy4M#=%a zU$r-}*I~GP+gOef=iT{|o?G`3IfR@rQ@+4;>k-@YeejLI)sY}e4cF%SBGVxnSNnGS z1rcH4WWE#`-v=zF4^N;`kdgE6WpP*t3&h%>e}o*|pGm0oN#Fn?d!JiOS`)Sf--G7ri}aGQf-KvW|NZq25)dDk0z^bO6#a+etHv!_eJgY-|iWqKgXC z`EHS%0f>ukt*>ECzKrOSqzp>huS5+^`_pW-oTuL9CnQk`qU@&9rVBtaydz4L`@;i&7-T|L28*03j2m%FUF z)<)D%Iju_#yn}7f`nS@sL}Ukh95Z8%Iagx5QJJ@VD>|fc>_pru)@!dv2AcOjsSq9I znTR}GdyvxksaNc>z#w_sWk3B+AHilgTBjRuOQW771HTHJI=xIt8FwY0K^R&aZ z(lTq#zInwZrxHK0ki!^Mx_ z!x33?hki6Td%vFLZSpp<%G^4R0m&^Tqb!O4lNQsx3JxSnLTB!iidls@T}S9M?&XQ9 z$??#DPWx{eGV;fM@riy({tZfM;pws?e{(4Pl?swhS|O`~b;lqo47*H5mK-MtI*lMN zWO7D;Pw_fJjB-^$8CAKav-{O#+n1KU)cly1y%y~U~Bxcqefk)G#!wEOMb+z0+r zQ_Ay${1IuyzF%TT0@=sRwjYMdf)+QBU-$G8vE@lCJ0U3?guqPXw}J*_S>7pZ|BI)~ zmnE#9)C{~xA_jC)%JuWZDjyEM%~^1<0}U&Carj3%yv{xDtT z!@>*e@Z%6+^4BG(3(H-WFYw(YJ{>K5y$kN4(!2WwdzDl7jewUrpN{Io67en{yzkD~ zuEKwIWs8$P!y3cot(2_-iPP^Vi}g*F8N=vD-Qv(F?o!aPIM4!!^_-s4Ijm?Wm~0R9 znps%Tme+JT1bH;RjW~e5F4|Cojblxv{qP1%$EAxw=-)R#XkOBO&=kZEoT(dVRU=o{ z69ks1*0;w5pJ;%nHe?48S0COEYFY*RcQoknE9GeuqjL+d0wH?Fdl98rt_9*?gPp5F z=nf;h)BBFfx^4tKzG=*8JI}o|*q?q+1I4RH4f!1!NNX7312xn(!OQqsD7A_@dmTLo4N#;U4ru zT8s_Mhi(geb-21G-gvvd#EMM+P`Dv|3NjA!Y0p2~gFPI4>-50Z%j;KqG!swB&V->c zKH8lZ`u*l)Id=ctui1Sx=@@b&q;9&49r}X@?f=s$E_L9~HqZMr36fSR_`wlRyiCP zCxf7+i|i^)UyCeF?#Ru}?UHb~DZRQ7uJzI9-8O1Ch*8MH)gCIFyG^%8Y)z=_RgeZy z6t}=@G|%LEM>1ui`PL+i0{cKwWE;8h`1My+DcD`aB;a}Qd1-#U+<4I@QUH5LlruE2 z2QO58nML~i*M7?k|({d zmjzFc2vmc6Qf`uG*2zzY&4#A&IeLPy*tKQ#Jwu6G*U$J@{G&if!$OwgD(wa1lIBH; zE(9DfK^ShcfXc!N<(b*$Gb-mm_}1iKs&hGY;pH&Ftf|+YxLvDZIaC$+sq)H9^;1=Y z5jT<>Z18Ro6*KS3G}nVtSI+x(MHYdK=5mm6{_gK7g?6sRUP_Z^vDbz-Ya37#$;bz> z>`d@85CeuaE6s0x+5%edh;FXzeo;P8K*@i1$zI>*i_WjU zJOsPLVj<-xLGigtWm3?n-s?^&iHO`_agM#w2)_V=f$TUN#qIjKHmv@=L(QoD(eT58MHK~=n(J8p1l$?=v;Y4E|W zdl{YMa5eN|NLADLVfDer&#!p;=L~%V70Fa2X4sL(Nlo!oPc|!i=-1OfLbEYDuTBu0 z5()@OKj*6})h#+>V@`w62k#6Uy=kOC9xo>{fZ7R+hxio7DAv7Q*Jpgz8#k!8nuj)d z{{B7JZFa3o1FR=@fcN_KLQ~bG{DX4u5~{<3f&wUM>7?Zkg6t>PJH2xefrM?CYHQNP z*{U*5-{Npk_#;;h<@pDzNH+umRh1h&rj|kRuLX*HlaXxIUww6FlE+xp|1se-ANAI~ zq^GXH-K3aU`VsilZZz}e&y7-r2&<}Bj{`r~@vCTO%1WpC#+B~t>P&BztjG*HLmQqb zMc7rI$v~2XKkd>`a4Y7|3&X7DL-^i^*fj+O1+12?dH=Sbj?RwBh#(2%iwDn-br~NJ zPVQ3URa(;ucIx~|Dev0^M{2G=LWx3zu-B@LN=Y>IzJ8)%dM5xFiwtEV^z-1EjlFRo zWQ;X-3Lo9qFyhe9cIA4?JZcdYG-1K_dbg%%!=g*!jUlN4atN~+P1~1a+ACm;iRZOn zwNHn=y^dhTm|z$CK{36PA>ldHfYp;+e1|n#X9-n|-x5^B-ZuCX$T1+5gqo0N6>h3R z-EQ??OW&V=Lva4&Qr8mPQ!!3zhzrR=TdO{M2o3Jf%!H*PoSqcgIu<`rn;gRK_kiLx zIrG0_sE7t1Xo7RY1ad6(nd+7q8^(|1!`io_bT-A~g?7ca)(@$aLoiD#XrHE8#pJ-Z>Ke1AgiCDyq> zIxho-98PV|P~c1EP)l}#qOY%^RPu2Q8hXi~-`kZ5XOViC9x4!XjcmD=k5vQ`GTFI! z`->#E=mswtFfC7a%0qI*Ll52yJ6w1Fl09+I(etDB+E;Exih(Z%dzMAoBA-?TN4N_{ z(uFMPKk6DOl-rlOlEiYiM35Wp#fFNe<7TK-^@y;g>^!Pr#-M&e)WyDieQ?n>ftY2Do?r(j-1Z88~Eya!p19(XUaw8`; z;`K4u8Eu4O4bl!`;_)8#R>ZJ(>!6_K#_uYMX5%d$C?G%R^)pZ1F{8X>QS>z4xu+ky z8PBg)>tmg=_onaJB1lo$P@R{Pu*&VWE=Qe zZWzmFavc!dn!H==1;7@vtvLb+owTQYJO*gK+eX-PW` zk)CfoZynCp$!`!{YCZA6iEuVeGf@c9MT0RjvdlPxQ$Q)pcl}sVg0S7c2u*N|TF4C*Yd>ybXdXFBdY0*4!BEhB0^XTaO;zqJ zUebBGV9(k2ac7V)@AzVskCXF>w|b#9*vjR_$R6xQlAq<{XEFKShFEWIlj9I?{r=!F ziNUHyO@T2o3QQDXiORkY(UZ5mp_Y!b)w?3i=jiv7$+>vZUu(UBPId(U$sx!{gzcMW zX1U?;Xd~C`#cizB=pUJK?8#hfmV1OdBhP;4j?nLYV%SZ>T7A%y8Pgm~>dA_3<@xj) zbs09Qf%xGd!t}VH`e71w{n-cSO{H&+WuL>ssLJNv6n&Uem-xNN!94!_D>^igtbN-z@u=8J>MG{5YQKYNmNQVQ1N_%1(ZNoLD)Y znt6~PRcKY^>fBeHcCI;xA# zS6vGNn4xCFLFq<6Maqu&=jx~A5TYd5+? zEql|*+88u#xzq0jg0G!jg*q@qG8`nN7`LzKy_c5g{S|6q@ya0w)PkMPQ25&8I(KZ| zU76kb7}r=NHYI1f)weEayj{>8^?o@)v(*9rNANi`NYRLt4d8zO`l$Esm8BYOoICa| zhR?W47sKOhHe=Z~%Ytd?{h6IqvWR>AS=7a{Ss_&+jqTqX@-4DW`%4aI)YN~wiqYm> z(<*+95;_7#8VC$lMeA!=(M= z0Ez=&{Qq(F-tkob|Nnn^opMld$V!D{Z`lnin{3C-%1RuP8IF9B#S5}oy~({)r-C5*qY*m1{7m|V%naF5@@H~XR+eHS6LTnzKtHFC2jA2XaJ zml{zUEJkowN4@SM{h&>OC>3M3%Q%(}GSRy^dj7C8huemoi0rj%itj^;8=H*Z*I48? z=6EsA4gExwmKwI|wGw|!+hd9a$62>_50kB>29CqJ4RTp=!RmEq0L{123K5R=DnGai zyXzHTJjXzFj1i5=)Q+)Mq)tmqV=8K1ep7vJE<~ui<6(o=>XuMt3Nc15oVpPwg`aab zZyc!Lyoo!^CUh??c~-~LwZs4?6bE$u1p^S@DwTnTA+WosXU27`RCds({mbnpJ2RU# z0)f|FE~^kfu9v@1dzp2R`m$r0Ca4p7tI{&7?@a`;BJ9dJ!<%;k*T)}|c7rk*h&N;9 z1lnE=pF@qpf?i>pvq7_5u)AfK5im9?Q-*SS7CO&H)LYM7;gJ4?i;=b8%z$B>Qw*8O zxtcA=85yRiJi8DIxstP4Yx}-EhGm3BL82uMLf+aVn)W0MkK|&o*FVZKtZ7fQVad() zZTlJO({$&nH9%!JHLmVv)Iig6e53oXvVu2U~JW^3Pooi7lDpZ`NxYcppGO_sa23dS< zSs@$XIa#wLZ2&n!scSoK>9dbDii&Tgs~SN=cbl;9Ahvh0oQ7Vc6estPZ1?`08n01s zPkbqV*m9HOv-{=;c{v}=v*t^_=c;%hPA&(&?kVv`%uHN@R;m0|r`vPE)67X8sz&wq zRaPPJo*U0920v%$T9;T`TQ7(%=1e2@N~Jhn+&vXYAEz=et6Xn6#z|~m9D-1#_Ffn79i)AakVw3c3aA<31 z{T3&-Mhb_c_UHnloU864(o1#7m4VHtSt>fuqByD-Z~2E`@yZgtW}%0x5xeVraBX$A z&wLBn5U0Nn4u6=|DYLB?K%7sF?xenVPgM*Y{7bl8zGKMS>n6)vDmm1bewnh zqQ+x*e)|uw4n!|qA(WMRbGAY?0~D;mpm>Ltz)@Sh$*dtUgp4miAW{Ry!-$fOg(&LYhgdQ`{M`Ni5j1N9pdWefTqH%@z-L( zPu~&aIWOFDgY4@gR^N`@B(ThZJ`XQLUn!7IV24sEGNF^Zdl$kkpK^(8K{eVPhW3^7 z#jd%K4i8~kR&2Jpi{6?!-?-Xog{{a+C&5I;kU9DTW$%O-mX?-28BZE$lt2RehkS^k zUGRMF4Frku{Pfo3!+~ygBqHndzPn;1C7mHC3#;@NY%0^PcV_tA@cH0rz%^1_19S9m zsk1kmZ&PwbGPdsT?9M8;xRbG{VmrS+jn~*#5-8wMoc#7s8YyhoAN=-N00mRaz*} zoG^VjxK=9W(_j0DS^K;9)%cJMhQqCu>`4+QH#~er-SNYH04&bUbCfgw;<^L`^l5%= zO{z2)AU#8TtEqB245mmPR1*^uU*tARNJymcGFAW;!z5NoXg6Ac^+uJ>L;{vf%4^%V zRiEZ0U=o|Tjm|`xSX@fGUBT`}pj_7-31YH+E6*9p7*BX_@pCvOlgwu{vOlXtx3%R# zqnV}7HG*xP*W>@hTl91LC^R$;8&+yu?k%5>QR0SgpEv?x<}ylEsWJ;QZH%8v5&m!X z9q7cjFWys+4B58zw=Q>faCp6SH2d$}e#oA&tK5YfJtP@|&Z)ZCD((77P_0q2`HW=AQx}Is&=HqO*RN|xkJuCU__C@%b4x(v@_pX5X zR15g#E5dfVbTWWz2(8XF8J~s15Ehg;rxUJJr;6t?Gx98Nu!L9}A%!O7*iG|?E0K}4 z$f)O}i?wf-gLKlI3v6V^mfW=X3@!(pvu(O=GvZAnd%ZEpKpyOBFfK2Pv(%SdrvtPc zeO}u(Z`wg3<%<>!^%HZzz91B~gZulltx7>ie`V__HHjA*`9t|t2rcX?@rIs_YF1jB z$L>0kv%x(7wZQ~(Hk*Lbr~Nb>H1#Hb`-?*089luOuXi%?aRd2=LmRQ9Qv>!FA-cuG ziBYGNGNa(A(*_eMW?RReIRi_>RAd(M|%AwWB3G!pjddZ^B`FB<)U%DK$1a>&h( z%?FmJE?YQQ%C3PN%1`RFo1HjIE6Sy={+KPtwCi>&4V)q4VhtkJPR2YhH2U1=sFd@t z;WCd5a1P+xyR-SXWe3jjns+QE#z(}!lMLkNoDL&dbl>D_DVZ91T&ADrIXd9|j5gVI ze6Wiel4Kt)rzaetDJe5{S$f2lZ^zl7=GxiW93A^(Om7>o<1#wUx^2z=i6-^N>MIut z)%MR*B|tIH!%|~cfwj)X-JS6IqVGu!)mh91Qza;ahOA9^nO%H%U8-S z6Iq~)b~RIa_i%BfMxMYQs(Ujuy-RS;%flb_Ixeo3dGT{X>gY6G ztk#81#9k-o_|(H_zkM5Rj7*oNa5&mx$4nO^GcS`wMHp58sO$A=(?T$EwrGK_sMA~k zf8Yqa>ycytHpG2U%xSh6SG`N&Uaw}hE&3rWMPw=APK#}H(?-+MbsD5U(6jZV;fH6$ zkRH#6KX|HfoQmumjwxQC?}+a+J70ngH;GmQ&n^z%h(}zSIdiBhOu~vy#E$@FmX@3E zzyno~Z?9$xp+_NzSHw4j31op7q+0D)K_UF)nDXPmealaCR_cvT4`zD*8zK?RwtNw6 zW~WFcm6(1l^=T!Z<$HmmWZesvqT4qM83{D*pOBsycP73tk_s zs{{BA&ix$1Ux?rUO}v0)J-QsrW_1IV;5K?58fu1EwecSXo0|hZxMA_g#UWKL#>jqQ zp;_0wW#Sd>vl7jjb?QV#avPAV`COwz zG-bubouk*;sn5RgZbA00wj3nZJgHKRLhQ*E^t-h|GE1>#Sm8sbm@>Mvp(x;sKLc8m zi%g$4{7H%pXy;z1o1|;HdsurDoHtf_LneaJ;pRUr zK-)eDX#eMb{JS+&9d4vs{*B+1vR)NI+|Ox;8+F?`Vk3x?7HXFM9LlPYq}gUY-NK<( z-fP8yyOta_>E&cmW4evFEoX#s-oYzeGGzZMLZa;bp=JNzbb@_>G<4`H=qT>um9!>L z7a~>I7l$X)P4LSBOKgb$RvU6LWR~pb>w0f=2rAmF$|<*nTKGs$YEhIbONeeN9{EhT zh>YydUA;=PTaVr6^mk6<$JfS4d6)if_ss8hd$g;c%+%$co^@I!)rHC4-=^W5No zrRfyxXI-?l?_b9v6I~E3`t_T(f{|HfIO~p)YfMW-eRVJ4(?@QozGR}xE$;No7A>r` zHCl0fY+~~9a3e}bM>i(8?115JG-P3AZ6D{W@}jl`XLSKtz2jV3|A(XoMim4mdZ`~kEIeuw};mpzS6Cwm1I`h-TtC|iz&Bx&3 zFHQe*UKA+B&?p!<&QnUByX|(aCdA#h#Pv=|Z%)RvY9Y3o{rNKG@ZlNg-D_iQZ-ML< zx-?oR9lDWpZi>wav^6-hM53E+}EMjVL8gOPt5#8e0mFk1*01Zvc0_~_z0y2AEKkRFIiu?dOn zh#TVcyr^)gE=JNaPy-ywefa45edT3IF3T@)#lef$wJDY^7Gc@`E_fgQOQQX+>e z_ZHes?lz19raa7A_Fxjs6ESE~elf93Wb$r@(D&=!_hOcjXWXfLK%Hk%#q=7*NJjqe zf*%)#^mq&yz1&WkZ|#wV6Xp4D+avaFj@jkWl8{d^{RhA~QFfVYNR8`Eq&DX)!qZ8) zie=50*Phwh#nwqgxrXYS8_#8DsXdchBG;1r7Db?&MFS#j;}8mN8PT7Ve9U=Rg=LLC zH$w^a;@HD%HV(xhre9Uq{rCYccK{&}fyZaK8m&YEohM)M^MEctpiC9L`FaO#bd`m+ zeFwX9jpnF>tWIxBj7rP-i*(Sz$}rs1QjZ*`~7F`5{y-MOX`i*BZ|p z4(8z-G497_&V#j2vP$MsC8tQ$e&btE+aPzE{<9ADA)XQ{lAmRqccbRW*1sEcW5L-zDbJAO7db;kK>IMeVg4-%w@JL4C zjKD5jSddu86Tf}&LV1p&RR`ji^_nDnlfO_TH~M*G;`k<#4Si zY6|4EX)3a+!dqihS1iEI|4Q+tn&A|=bCJ!9?rWX~Zne!TXGX9}8aw0-nU7-kTip;_ zm-CWxfsg+H-D#+QSz%ihbX_*{qLV){g?jtkQYEn>wFOVz?81;#)$Q$0k(<|gU?@!> z*(PK;w50CUH;3G$qZ_gfJooI8?Pd0Ff!qAmy;;LSbdPL4wu>s;ILwG9FNVy8aIm$7*@e^$6TX^nrSf_Wq|Fg% zB99?5>J99GYup5udg~y9=`ETDnCV1k23^M#nQ_>X>3R-N4R4?a*I^wczyK-tiAtd4 zwXBdGp?4`n@J?=nd^2*yJYLNjSF57|#HYY@#U>faj%aeU-s3n%k-Moo50dL* z9E1DOxQsS~Pg*ZOU4vsCXDbOGGeHGhCyv6|=GqXyuEfnfad&C+w%-~8*#rr#@&ino z1<8I)9l8>&qE40U>LI-bQt|@FzCEO7TB4Mj+6pybr@ge0i3T=9mvE&5__B}ga~4~B zr&eQ>=P9-cu4kzsnZZB1_dQ;8rt;tqykI+N{8w*F0QW$7m5T_TwdsvF@nJM&-zZ1L z{&H9aFv||ok=7sAIyLx|p~~=5j)YQ_?^fx^uS&%f!4i@hIjksXev~zG)fm@;gx^W7 z)zqDLg5O!?zxTV*xm&mr@33O>7BaGH>TocPIaS2v)xC;!$nq`Ce{*dsgsjva?a)$R zLax6fNZA6X%}?*yYC%-N0=Ifg*rL>d*iRQ$S|~Y6ABl$4kD3w$lw&rxbXrF6U<`$R zKvbHJ71cjCa&~DcMJ(77qfgd8Lp!u&Hj&>31NEV+n;V6#zVZTS_Jr(zJ6vmN*jt1d zj_T`e`ygMc&!YpRt&ub*vM(vO4|NAlG)L_L`^?3=8_qi~({=Dkw##UJIcazf`Y0a{b&T6b^Hn>C?Vj`;zFn;}(&C?b!n z`?0@_hFoIZK5Y@`i@=lOPiq58CZ4>HWCh zxqB;3a_=(lZ41QS!@N6gG$egArvI|WbVVMOkMg0>BlC-4xqb62>D&?r_pQs*TTCHk zmgPi^DR%FNcW4gVBfLR=vE!kptUuA)19yj zdA2gQ#H-ZmR&{stn1#MAJm@g!e#Ec7Vb!s z3Oy(r$C*20%eK@p*A`|f$KK|b(u#TsTr9%wEWc>z5MT72PHwst?)==0uid|mW|xR1 z&;z%Vf(L2}V--OJk&lEksshV+tj^}m)HcU9>npG5vN%(ih z_qO=ad1|UmP*0G1NKVallE9yye_&E^?bq{9PQ6O}-rAKtU}{V7r9XA7D6i%(vn4E) zd>W7+>Ce_}2{kp#{vfML(`bRPySE2KExx|AI`#}}x(-mGz_Y1SpDyd+Ij`ML z=x`$!w5e$9U#a2PHhK5+=CM9ee14MQp+rA0dXK&C0{7YB`Z>AC)>yNxyBCFkYB0P7 zy{losJx@nnR4QzP=d2ze?sOPqWf-uNQR zk*}c)f-`CfM_$*pY#P&pQs8xC9~Ciobm{!rHioNWGj)X)@HM5onpOJG_vv`owz1@l zx|v%dc;x`!6OvgrmgFd)t}9*$dGl>;TGU0hud~wdZR)|+g>3C|;%C>X20h&lJ3G7N z;o;#cF@1Uh5Z$h?w`{D;w+Z~$4x(WSrPvVPPv6YT!*kKspZ#qO6?%WW?Ezv>sX(5i zGN(&az~ym$wc=JuEyWhzXkbj)1K|OeRou5%7oDpN4FXk>wXUZ%g3Yg%Rzcq`Y>r2J^f4pnou1JAc$wRtgTHU_?(x*dF3{(} zLN0pdg!;;QqMRdH0^M4RT~MA)&QEe6-oGZ)1mxTqHV)({m3dHz7VYuuS?)$A5PcWN zNcV7pTEmPxPt(T3$qEl*_r?YY9-(fW0R-J!w8e6@ZuW|n$;r6oa>RaT9L>khL5*5} z$T$`o?;fR{*0w*OKAnVw>Sr+sCbb%wZ_#|!-D2Gkh@ZUYk`1D$&szr&TL|RbkZXC1sk+kegRh&QMhy*S6E?)c z)4xrC6Kq@fn5m^p3il+NmMzT9KRlTL)qB#*F3$AdlO`?7*SZrGF_=Oe3!RkF2wbn_ z80>96Hq%QjbMPLDNiKI&9Cr66$Grj2dm!qfb)6&BG8!3mOBt0cbD{m=b!pDI?qnd5 zX0BvL22H<)}DNlocxO|VDm1pytL4umY2 zjLY3$8dyyXpg0|BtvnC>h2l2X=EWRC0+6evE}m65OPsesO%>8E{XySoUhbyk{XL2~ z173>mP*e6hHiVqV1%Tp{==58^TNQ%5;*4_QB(ppo&Z=UA=w7

  • ~O#e=Y;t5!n&C zan07=pTy5{QwBZIM%OA#%8q<-iR2>K#8jcP?z*?2`+XFB49MQf#Ux|xm#ZJoiO_)NPdQ1*+)OSJZ>D$4~i~esdP2x2oOcZ6Op1mIQlGQyTIE6(U5} z=1o_Zle>Fif!}I+{mG`Xev_SH;r>w`-;$MK-YiARQeUsrvzl6~Z%K(S3&k%e52e)D z@(NrGkr*Ba?jb7JFIfc2AdTz?7Lzjcu78^DS9u7Tmtp6jzoVn7r@ep}e{ zvof_(A)3%EY_o57h{)JraJjch4n|2FzK!LDv8uMRgT}Lczw`7+vvyTbg!;mFo{JFN zo|ayU?5}XUc4xuKtTt;_L@ez?Kl!!nuKWuP>XKdov#oI?)0{9qJQaQAHPU5{lggF} zef@cSQdTJlvajo2kxKO24-1Vn{>eBuiMj92`vcnUUa-@UZ+7rWHIe4~+0*l{k5EZi z;M{7&v;e7iJGRrls^kPv?R!x1<+=TEaOfc5GYv*0p@%Z$B;ghPxi`TBKzN(1^74bV zxU*-{1p8Q*17S@xSJ~RB!9phXlzA4X5w6A4hGK9JWeU&rvsPEAEkefjhv!3dcb|LE zM#Q(}LH5S%t#j8dsK%goe)91)N2?Tur>D(Cp%wV?${R>IAKXXwWqsx3-cK))Lv3k@ll&M5<_mIuRai!9mt_MUlS|~!uQ7* z=6Iutn@ghj+O~pj`v?#2t1MJ7x}PA~eGD0Z@mp7qWO;%3eF*PY9p?QvVYXm*Ar9|dfr+cBanT*s6RS!3svY+# zG@rshXk1UJh>&5v(&Q>7RCit*qI$PMnC46`R zy0J)PhK-sE2@||1_(IbyWCPHxP)~h&?{9;x{jz#??S(}mMP&a+?JdpUx_1}5DjzC0 zb0!yOr>JBh_7a;4IER-`b6?gM)U6aPtFEpFPH3%ADta>{0h@2FJ1GU_OWW-kSE@&Bv@!>VtY^r6#)*Ki=2WuePNWwN8iR zfuOfgM606N*1@v%6FV?rrE)iu)8fB}_zlqlhfW?J0|>DD`1G}RFISN!n}0yAoL;mk z<8u?2uf3?~Y0#>d_sKH2qGoMc2hzB+$%_J!UY!R+yxO~{EXa89PoI$Zd9?_osJ$lm zohtfVb|$I;2DEu}R#W;wP}|zddUDUZm>+AQRr+zrnSq$p;SVV(ABFQ-^|nOZs&FX& z5XzQ-W|p=?7WRBv5Q2^s6r>>Ps27L+kgs{kYI~LI@=?k}rOX#-TvIx%7$0}aE@rDF z-?8lp>enG(`@cuVR~gHX5c^5X>}%hicWybCgt=_p-q1x{{q0o$rWK_U32EkqUpCu{ zMRg8>gF<&6#`8f`*&8L%-sjac1tjv@Z^?lT90k|YnB9JT@l@UgmcY?@_h;5Mw(a-A z&p@z@<;;e9K5{MFgZRvuo*R4Ji^!ruUh4oz*12}jyd&gE5!R(@yFWP2&P(ZDFKLNz z_^FBEXPBq%!zL5P<<3AB7}fyJaWD?Cf64rZb&-je7590%l!WH^0|T|z#RinLEwX`+ z-F6`8=yuh$i3{?ab=!l<9ry~09r(C{UAYcyc$oFb$7oRPI>BU2aMY}Q@_J=LM2Fx0 z)}`|9ctdZkR(?OEEOB6bH;(+YIX>P{jjR7csHbuI$N%2uyHMpLwlc-y-ypa7z~}UI z!Yqj<@(s7-qlk0HQ%K?u^GoZh3SA?+d*+a0*Jxbn+Nj{<^l>F$MvqC%Layt&qnAoh&1 z%!27j`a77!q~6gn0qDmEt>zH;xjHRJrQJrMqjZT&@7KN+n(8+!?1kOiAJ_j{cjQM) za*ZdG`T6Y`Z^%dEXBiDfokw|ad=+jXz-o|_Vbpytk_lU%?_H<|(ly}Zg_A$=@nNeg z;nGg|atw^WRpDN5+IDpE zHi&&yqdY8i%fkg=JL%|9Q=$qRClv<`-mV6!9FY?|Nw`R(ZcTH>m%VJ$K5S>^>`=;p zRq)a${ya}UqEr>Uv1919b5>aV6Xx4VxZiVmAN0f{L2i*0gZ_riTR_$vflkuldpJq59G-EJy zVpnG~DzKE*cw$U`b*QBo(jG%YItcb?TCJDqiW2>Qc-OSw5nTH0r};hO`Iedv8USOBi09 z>v9V}RWf|52oX`k98P(boE5Q?bZk5MmG|n8o9!fi7|+)%b#zOwDwhc&%mCZ@9kzLa zFR}p~g{}d+JRydbq~Cz)U(}eU8J5je1Bz9LLeJ#>A597GR>$szO>^F9Z5h{PStCFq zjvbH$god_Icq;N?>5J2N!mTL=frO9Wpqq;n`oL?3t^7!yZ;j>NZvnSsUiG#BP#&%w zN1!JCYHNHgKxt|En9i)~*WyiAUtWq73>hkZ`YiYC0B~Ia*6U9pn^y0mX?yPO2Q0g$ zGxnNAKYvJ&dgJ8B6=;__4*b3so&U)7b=x7=Qc6?knrAd(IY{>m=b+xQHOA|+5>Mmp#cS6o?t2W}x{+ob44c55* zvj^L6^WzQ_=MOBNz1%^tl$RM6asn&ufp_FC`9Rs=J*TyBnC%Dze58F)K()3%Ugzuf zq%?T>{dk#xq4k(IxCNfIFho;TWuNdVvs>lf)bgyR_?K`!aXR%@rK=?hS=+AjAI#I^ zk4Ki$FsudYH;x1)Ba8xV+Mb%u6&d%0Ge?Gn!~QGDBD{V+rXZm4`YBmJ|aM z(4KKJv)o%-mQ6n@P#y42h9&onzDIn90?^Z=-f?9H}sU6IVP z*4xt)jb}8Ka&wMVZKYx=s;t1(#Iaew(wK)VO~*p3!#v;JaCFS}8|`F`YF#wj>f*zi z+yU5F*iI2+Hn(Xg6-zoEW|+w_`#HGAPIMS)^O(7DbN^a<1?>n%v-MsPP~HuWyBfbZ z-#L|~f+=~Nt~pI)J?Fl58-db#w86r$M69m|b*GN)$w2*dxvu&skro3Nq)wPeMj%Bl z`pp?di_}4=-oqV=;$i)H4_&J9BbvWXz0$V#35 zciMRwTr|`6$QSko8q6WP<6`LDIvV`CL%=n*lT>^#bh&rdiH^i8!d0S%COD!v;{B^A zR8+DFIRsYcQFtUltTS_39?a5PboI0EV+RZoYgaR8?OCGjY$jXMkZNEuE?x`mA=IZCu zG@LF)f&xJyq$QZ%%=YLnpP!yh-@Cj-B+TE&SM7fv4%0qE*mu+;#NW;te!3**Y`c7# zIYc?ci_|#mlMGr_nFvH|b+eFrMsiae4ye6LyARx~Wb>t=o~jPO+kRu-BQtHUd|h>Z z_}=?xV^k>p5@kAcH!qWHP?LBPo0v;eQybKu!Ij5%LjA{0E-_7LE>ItejU%_!S zX8Sue3|jtzvn^(hft!$UjA8ffF)BlIfY5s1Ktt-$7Roh@?Ti*mELsf^>@ut_^#GDiK@5j}?O1%qA&_7EEICBOf z1yd-%K9t(R1P0czS&-g=NxHVNveD-Ovk?jK!I>r zd0-g=bgC!g7h30juBVS$0U$T^8mf9cuE>FSGY4bG16Llp#D1KO_o#^aRMQ!Skb&d# z^-;zn<~N^kDu20vih2_?9FtgKUxRvDdNC5JK`wT9gW~7|-`Q4g?1HJ(=71IJ`6!eK z9l7}n^upJd(1Q99-Y=Dgy6#4DYQx`QT-D)0auWJZbQ`QJESdcP|EvG`KR&faaeZ&FZvCTdl~WO&l@fyBj>$v!MuVBGIE`x3`A z$Tz4@QyPzIjo+bBR|trYshlCRCRQ6pylg>zRO5)`3ctysMeKp%9|@9B-|GPW!flb6 zn_^$I@IK~gP98&M)Ls@#g8a8{DWAB@`68hmpNt-HQa_A%+^@ep^{v8xiRjKypa z;ulKeuN~SGp71?c%FdW0K@fWx2AbVh&;$pf0*JT3;oDuG@2?!!{R2rUK6{_2`Qn8= zf&1=dh(bl#xl-eh8E8{MvPKy{1pZBC@>di8dC6ZP%2@37mTp@}`GE}VZmz|zPFt2p z#SqeCb1Fr7;q?$O>}z#eGm)1odMV8Va_lFoy=~fSRVHFYN*U-5`6jJ(5V+a&>@UT} z`lFHgMGHk|Mv03hrBNuJ6q9;@-x(CqI8Og(o-5z9OGBhq->L)*AqNkt1(em1YWMGL`ZlRt{T+GF)S$?s@r{~5j#$! zuYv^PPM(sk$o0&hq1hsXii49XEp6z>`Er2GzlP&5zRMf);?7;B_?Tcy)*9s z-||e##`Pk{a2#zreD4oct~?ywz}-UlkAFdF>B<{s-1uO+RYo{G+^ zX%WjhwzjtCK)^OUznxnpg0my}WhQ0eP(2!-Ff-8?vM}ZXR((-nCc}cx{>($`N67$Uzgw<-70WIm& zV<)4${~+fL6t7jDyHC9R{^V<#i1qs_qGFW3kE9`MN$>C{Q6Z#Jk`tZgIXQrni95xq zbmdZH)bU7K$Vx6YtN6Cv1UwP;oOJlt0`Z7_Xe(n{hqh!9MrpO{Vl^(He8e4#z9zEK?bRdf* ze_MIr5twtcZ6BpR&9e-Dy^TI3SEO@t^ojHdNgbv8g@ij?-}Sz7)Zw?^b6*Z5O4y#u zSd}*u^G+;}NeNAid*-6{4jO)DtcKcfPVIjT?-fRw(t+$_J&!{^i+=EdpMd^e0+h5U z%`taw8-a`0>{B!=_tw-AYBEp{M8NY~_z!g*N|jkRsDBBm!m9E9HDn=|=g%5!jB3xvn1zAW8gmDzQvro_qa&@E|#8 z(1H0#Ltxoi#o(%wGMnrVTR{M=O<@6jpN`a{AjB7vxUDI5y1q&;g0uWLDBv%+teIx5 zWSS7Bt(?$$0Wl`OF6_pte)2cB;&SD&Ns@Leg`SP_HQ4X`g9m>IM<*g8w#k~sOZ2Te zX@$R35GPa4otMJ9`M0Fz)32`_t|(7FZ;U`mP1dLC{sIw|ph3!c&mD*lv$l9$IuR%* z_KSG8)i6WD*bc`gY6|JUE6U2NFh!}qz(uYJ@DmPMWX)6Q#mcbv%Q(*0Cw_1J4y<7` z2;(;`Zd|0!@u)q3K}pTXh(q#L{lnQ2Gj1$hAO)zp2t8-Gb(ToZAJ>^f&)^BDiotPcWFwRs+0LW4 zxl4|Eu3>ept@)^dkl3Tn{bon$u)4!00D9_9+c8!Q9b4DG2f1yig7cmO(2nuFJ0iC& z5%kk5RaN`DN^fMf^DyFI!anLecWd-x?tgHHkguK+FNTs$HiR$T`QMAbA5pSIpl#`J zWq+DXqZ1W@4%JX!$;QfS1ek*`;vDS4(|Yy(}`1WQI42qKN&!{vT?VK2>H* z3Xpnip0GgK-0IfAk_}69GEMMp5Xwi8e~E=}ObS`~*f>z_1|5?V$Nn4mPos|jyR>A# zP75USHMV823_2F41d1LHMi9-nSNqQ0(A)qkxdgrWRn31hjm$ey_ws@W1r=;?!6_VI z$AvXtoPx05DED>~+Rs0Qm%7gbaA;2+M;H8KdTm~faA;v&I`JaDHQWq$kY3iX5pw2w zxi`O#kk#;(-Y0o_#sCfQFCzVq|NGaK7NCv8_X58Yh5d3O?t@X(9>9RV$Y$q2)$&hV z#LtdeUg7v3`3uUh6sVed!5V%$32R3%A+`rx?gS5k+f!yu&^qnG4q3#2s`2@6*-!kv zLH1jJT?n)-!l2jYqS8tyaaGI|A6%Hu8mlJWpNb(w}?X^Pu`X7?p_ zr5-hU%@H@DP;wSHB0W_jR=-&W3#LBOlnf;h-#G82oSMah*SEPMR9_J95VM6_)dCL}nUR0Zbfp4klUM z`QQ*Z@68QvOW^;1TVJ4O&2j&IasDDK4miO?et6dD1n(Z*ZpL! z1%#rg;@0U-v-=D^YKl1U894bv=_{Kev_ygUyC zP{ECF>n*5y&2cFtVb!BiKtp-!{+~?1F+2no%mpAA|M@-m%_#?6*vfqL9?Ig!nTm2w za~y@9z11=h5c{Yp7BQW&3hk2Mh0+hK+&uZWvsL`Tt>LVET*@G&COD6fq?b?IXh=is z@Z8M~_{Q_Ao>6S)0E_Q`6%)V{O(Y&^Yu2kgsXtd+un>Pjf4*ds53el$JJ&%s%XWCj z--pk}{ars93bZ0ta}OVZl`5rhp&^7FSOjQD@@#y$n;h^BPP!^)c(r!x&-fHa9ieLN zho%3ni-=f#9V2w}3Y31|5KI0U^|k6%B7`zaO$lU9i1$5huR%|HXBfoara{cntC|}h z=y3`151RLz{c$mbRLpUAh^{_l+v^aKcfI?`*CZ(3IApu|BL2WLw}|=7D9RhAOUT2wLaAM6)^BH^HiSggoM?TIEV`RN1H{l*N3aJtGo zOqiMazGM#0c;O1@1%+dfe<{WHBHzUFWL{zlJy{=ui5u0qqdZ5tWY_ z=YlIMN@lU;)Q?A@2vM%eH~x=#`SmzJ@;l2u`2!eB6Wm@cSYYC}ll@8uCDqcCR`2@$ zMthE_>^Vu^3ZW-~Opgl)S4xtu8vk2p>iKR@et2XY$MXmORg*;VfNucVQPJVq*7b03qb>Q)JQre_y?wB^Q2s31!p0e+7Xu*6xuK z(kQn->a06|9Cp_sTCeVd&|ieGk8o4x_xBH7939(AcA=;Kjn83-r=ugNS>!@j7v~3v z-$@|OQlG@x*84juby}Rj1W1t++(NmWtO>-j+d?S{d=AQXB%UAD=dJ`90To44O#HtU z9@xDPp>Q{d9DhEtNTKb+dKLEvge$K?D9n+07&{60>F|QiPXrh6TEki!@1EWG`;iQV zDcqiD6@2#9W6GwhXhtrHauQ4aV05NB2f$f0%7t=6(49ndr#zxD`qNn$7zQ>zN+SJQ z|JIlzR7i%k=9vVCmXl?3xxL}Q!e@*#+>)xV1e3vo4IrgX!0uYb*}VM*KxvgYpsl~N zF|8v|1^ZLa2fR{mNF%GX>!;W9@dT4`H|)Zv_csSB;1G)JQ7{^~;Y5p{a^ezT?})m| zY9IP0T?tixyBqz#&$9*9<-iq;wA&X@CCJP>f~@J)BhRv1hS+jksh%?eQ}#uXJFX|C^SADG}J!@<@l4>qIQ~uot4K`Quhp%eQS) z!m}L{UGt3lzF&eTi66_yiAafO-XQFFGVVIOFvp_BjCY@-vA&{#M)ie)T~H#M;3K}}*? zrx08E)hiQ|ha%JCPoA38YpR!eemz#pa+T|9@QVyoLwV~;;??W47yb@tN5C3%_Iv== z_QfYd*ad`F?yA*oaOrVvAzME%r(m!*Rbelj{%wR52379=bg~6-%fIigo$)W}@y1LM zoUJ+zYb3mKMvu@^h8?#|cQ$~)@!mHJF#Q7>)66FuzLTh+L1j{)UW^C`?jL)q{J#x)VpYPN+c*q?4?l-} z1r<8}e$POyCaBH163MLP;ru$d_>i>KY=I!o=uX~?=EL`TrepfK zVLLOQ$NFulA)YPG%Y^Mwg|Obrt*7VyTx|s4B1ylHhkwauHk`09OFI4du`3+k3-ujE z=ytijK0k7G$Zr@P31%4@F}*!n(78Gs-1&DU69nWa-Ca!#)yJ6~96w`|4MI%sMREez z8fiXXyoVyxdQ-ODzd+ZbetdR7)BRihJV4xHau(k-ILZ^)L2rQzoL=#p+k-9DP9M$r zSqrI|@!~n#qNTB)oN<5U?0QBlzi>DElb=g*HVg_ZwObOGI11_Vt*rZzSzDBcbHa-zHAhkh%%l)ma92<6;! ziQLV@GCrps+(17#4B^N_%rcO3P=Igy{iR}4I0W;4zW+^1crbwoYWRqn*K^&2Ed@Gs zV*2^u8~cYBp7i3;n#p~QqXO7H!gVsfUOIo5nH5=)MJ#dm`1Y+IO?<~1Q( zF}T*HGfy1PFrc4SXeKltKh;|DZ|rwwt3CWcY5=rAr08HU;FQGN$fe^2I`@Ykf#A+c z{l}%ViP?D|Y6@3Qa?6*zQu}WKy3KJIu|k)ugm{J0GrKprAojZ8d~|}R!KIq|LVJet z)D(Gir|qS)$=LvcV3crhk4~Ho47Gn9bR^Ops*WGj&YO}+_TI|uebkwYB_C8@qRk*` zf(^Wro3_wi6}mBd%8|YFcqEj0N8gsV|C{8clz*o@a0ft?UFc|*ZmO(`2+vn2q@q(V zCr0_0>K( zc}I7iLwy>99xZ$LoxOqzkU>0_Op6?DivOIz;T&78{4i2rZbaE{8Dv`Bap36ievu>GJpO#hVnblfrYJ?P?Ldc*Wf`o*bC8s&gP#`fPaY5>Ip z75PDsH`oij=0UqRF$Ohan+q*y*seoKbUybm{V-TvJ%bN}nmW){34jflZ|^(5=UG{S zWz@9)M%`EOqZHTjRnFBorbofG6C!5**?~d~V7TydY6~@)s*8*0bYeUvp{$Jd9AgRgVDxk|1PYkB0k4 z=hT>sPCA#^x|Hk(gxytaJEHR={BG%N=$A!*yW!Ga-!D5&y?v>~^x2EO4^F0^v>5N6 zE6nxiS@Kb|?a~01RoTKzMw9L=qqv`PPhS_W9ZD=dCPk$AuF9V)xQw9J3=D1oYPwDkD7$mh(^K@3rSg$>+B71iioPomvWHn)KEH{dC0=UEfN%+I?chb#D^jnGGb zzX4E=n?Za2`m3$QArDaDTJ7d-4^R{JLM15hCYB;As!~B*gKQF-8oX0BN5fX9$loE8 z>oq~2#iALhH{0!-FBV8d5Az;Be3zj93ln(N8hboeIBN+pPRKV_%wgt zx#eufh_%l)4=+l!|NX5+%o7q#&IV00+GQY5f8?zXk2EOO{QDK9(&qR{+F#?gKZ_0J z4!7-R`n*)r9r7xwAEHgtdtF?{9T<+yDOy-e$h^vxt2OrPl$ghvKb_flY|?hmEMRMA z+D^=42O>?mCZ^b@<4(m($u%A%JZ;DlntvA@($i4;ZJ+A=da+Hd_^J+H@Sl5^I<37N z_#((cd3-qEjQ@6Ws;!yTqRjQwBVA!GRXF*j6`$@Azb(Z+HRy*1kyE348#95w46!w=)lB z)s=@s*+zXPvX$&oVl0!TP$48{$~uIQB|@vMBqbx+=GAMfUQA@mlEzk{Q1+#4Eku?? z2GN2vmd0*ozB4o4_xrA|f4Z)E?&mr8S$^kt&bgm?K6zChWG7Gu3!copA;F8rnte-& zxNS-_JNTa1ylamkGj8MS*%}&%=r)v5U}kFFOX5qOf&`g;~U1p7?1kV>gR7 za;PuX!93`cre2pfH*rxhtXX6v|L#ohe^RU93k#K6K7^Y6Z4ham)uDzBicKZC2EEt# zoB~EaN9nshO>Pt8p$qmc5OD9|-MYc#*?fELr)n=GP*P0r7~-^{W}pr#o_+L}O>Co& zQBK%7rJ|3f7r{}T)kVMaH4`@?3{FuBQ3wjnXK9>`#HJ=Ke^wwVw+}n_6WoF zZ|snvRuaax= zr+xZ!S+lQg?5w>`272a&1%+vQl6Xo+MjwUZzqComl%qDlaj?AJXJuDN{WrTE9&0;f z2Yg2!z!(%q&nEVm`KD~Xcep?;Pg80d}3F1Noq(vlR{wq$#gw9j3}Rp*ZF zvXTMcsk82CJE}!@c)ozWJju_meO0xVM6gsWF>>O?C(^y8(8uJy#)T=doDkWy6@LO< zR^gTvv0`>I${}?iCb!&Bu1E|Vc!83rrHu=FGcP_a`xTv6Ywx8*JF9=_P}(=Q59b2T zd7y6+C`hFe~U`4aqdQbVp4*jH8wLr`U;}&E}>{N{!efB|+8PgMyje{>P!fy{QUfvQAZauI2mdz7a|2+!q+|i}- z5u*Th*4=OMeQP(V-*l{CVczsbWo6||U1g%VJNis{57iv zY9C*?-zU38ME<~*fWK3I__st4Q7>V|p9ac^Q}*uD(J9oe=lxRJs4VK+IMa zx>@pG8m>J#ckR&k1N-+spXodLt`YGwMI!5)R7y#@7GWB9Z8%cuMUC%$O60?xIrtES zQds0nqtfoL9`L3qRwpnM!3>dyT5sOic(a-v_)7!}Pyt~KN=KWGsHZ~+1*6_%Q zK7}%l);Tpjy{EVLgc!H{`pIMJM`RhMDR)FB8=~{VJAZGj z75LdPePh!|$K4D7%9(W$e(~^{5%cbRl-`BP`<1#2owKCtk=W&ve^}n3As26((^*Vw zyg2QRwNfvW-a3TP3UGzz{;|~#W+T%^!7&1sKh0m3$(OF*-!rGtRDJ!|e5QYn2@`0u zjqxxSZgIGJRcU%^n?{e8SPZRVb9H(;K@d4$<*ppX*G!7LM^VC>@b}vc<9@!ZkuIxy z_ALAP^XIh+nG-C>C^&+Q>ZD4&c~eZo+@7+>T4|O2V%`Hl*2?=(6kjXJYOfIO=6|<9 zN6UScmXLURbL?NTI!gRh)iGk5~ zvLA81#Xys=(&1cjxv#zbm_08v(t4Yl4ej>}?`4JSb?$GJmMm)K^wd&asijt#Psm8i zRxDrqw1Qq;1lLb;LSZI4kZ(u_;;ZjbK*EH{YbzGcu;H(AUR}%h%Y|Cs-&y7UGIyq? zQsxFY^SAuKSvFG0_&h0nv#-pIE)KpeKSQd1z+1DX({b=Q)A+qI@#DpzwL7+#aN7h4 zeLnA=ezTfzyA*g{=jC^6e4j}pN|Qr0^4Js=kuYu2on?LohfYr2ZOKI3YL2U4`rH~q z>&65_&8!eV(+Bo2SBt>vtAlHNgCsSAzi}AO?>cvLOx2vhp?pYimSq-CnR>J1{kEJq zN+#8a#?9Hk&Yor-7R(x;x0o3rH57^3)MD3=3S{LOFM;GUkI!P39`TbF$| zs7Vv=w*Uma_3q9WUc$cdi_b3WpLfq+xUwtxyeg(NYXvc(5R8knx-pBeLwgd&Y=g(g z#|I=owLLr(qAH-ua==jm#N1lb*w|SyefazJ>%WYRjZ33((a4JQlflaAe>KJeHtiAe zdxb}kX*}3YOfDKfB-M0ni56bTqB-mw=>jL+JUT=W5V(b^JDA~bLU4N+dX;gU2pgM zs-l}!k6#iI=mM9XONSA6{B+so&4O2!mfs5V(B~qCvVQ5!8R+u=x*cY#CRu!Mb&Iex z^DcdeD#`hsalhNf*CgNJK8s$rNSIKwt0Q}&RoDX-d(t`wbNrSb$-Ag}J$-2@JmLB4 zQ&C!KDi%p-iUN(TiwGl`w`oCC$*=hp_)3z^b%+?r!7HVyq`w3aRNnaMr9G_+IPS_d zgWI?03C}3141f%bLVrJ6nl(Qyw3E4f7eYsVO;}hs)@sp)U+IcL!bI36IgZT`($a=^ zZ}J~ojyoCpfId!;;y3q2%r)s+_FJs@g%YU4pAZf*%w%yz5#M_+L~6v%?{}C*cUk*% zo@mK81P8wirnIwryHg`6EZ1~7POAF(u^pyV#?oVA2hK44`NZz{pwiCqwJ4WETH-OZ z5&M^op4xm6414pcR*@twGa^(t!9T#>YVYaPiJyDcnY(FjV0Xj4#-`;f5Hr<+)?hBleedp1T zZMWHmindm5<>S@g;A+>Q_gU|quOH8c8=Pt8zk<>loOoK@Ca z^sAImNT$9bId&^3z(#PD=h?;Jn7Dl_q5y|i~V|WWo z?8=gMMpzW_d8V(~^3l*M3Wuw1MKYlS6N9)UkQhDiPo5Y`ufKha62gl}0_S#kXFkrw zTe)U!^g1l~!+}Dn%Cb;ukk6NwDnAQR0j!5A3!c80E%a&kj=DTkimzABwbgd3@bl;f z9bb*JIJXzIH4QG^Ja_qtuRe?)uh)K{_Tn|qdSQp+9J4?zH)0BWkhhpn-r9bDu(Wu) z$tobsWT)RRS^uRq;J4~dT9vU}&BdWDjT##jbq+#_O!lBi)LTJ%+$Q9bd8^L`JmS9P zG5)5jODoIL^^#{v(&rv$eL3fV&bK?pf>#S}Vs%Xei?=-I>8@TKy6k@O*VLU*0qx`A z=d%{N3c_pLO4lb7#19^d-l1zXckO1X-d_&m-VgV%c1qqGKSic>+mu?p413dVd2W|n zS0X^O?L6gX6fBjWywi&fnEw1w^J4eSc)Sa>*FklT8NTI4r~Uk_0d0LGEWO4}DeA#9 z>SgNY201bun+3o)YzQnGhPoB59v@-kQuB*3&+}9ueizpR4jjT|5&ZoWxWb~3RH;fK2JrOU$ zVKYRU89fqG0bY@$^i=7rDZ>%#bS#u*acIl#3xt1upFIt;&OS&h)!Gz;e^Onj#Jqc_ znFw%bZ~R!~Yk2KvEP=+KxD%Ce*1iAw>8D%v+k$5uQ>ZsTQ?nKU7_a2A19#^-eA##| zLOb_$@&A^mE@GyBxCS|{1s50j)sdr>81HTK4NRwS0=x<|g4vP>_@J%X0Fy1z(M_yH&;e$H;l$5?Vi1)~svR1_-L2 z>dni`l`!o6a=5v^_0w2G>5?dyIE$^vr(wKn*8DJTlD+*oSN}<^t1ucrqsUE>hn9@6 zu=oq>MyNVgpApx}eS!=1UXQ%8`daRTg>PvywXG})J8nRwZ$T$g5b9rCC%=8vMu2ztz#roCO8 z=97KO4s3QQ+qc>{(Y#m=lI)1*vl^QC8g?(}FjkIUK{_H2ef%;50O^a|(HR{=8wE(C69vuSsGJxK<0MN+661qlVc6(tyew5KM8n7fxqbfz+0b?f40s~uR~E|h_=d8W0oCx zl%}MJl_|&l@ZoAE2Uy+tMYA_zn2y+je+gY%GXgL{Qed zg34|Cu{`A3!bkYAQO-D?hKPMD+3Ntt{+T*(aZe^9%_Q3o?9`ec&fbd}CFh&3V(h5! zSrRXA@5hgST=YqfSn>Xzr_ORVGjY?&?0W4hSH@fKUXZB!+^(wnWdGoi><+*bpUhpY z_SNAvXuIyA`A1ZfWYY3RQC*tuL)7E{aq(7$B})!P3uLYWzxg4P33u$Tj%oE=nH~Xz z3ZR|Z;wjb4BNW#?~-+-6%E+HXdFJ=k)Qz+jUwj?{x8S&kfrCRPcxPVY3qk=>ShZ<&jxABcM1u9v9&xbx`p=E8w~6>NJhSh zSleZTPjuBh<>=V;`SWM9Ze977n6-)`e;K+CqL2ZS`c2LK$>B8(C%b>GRw*S}kr!6P z|AaQC_V#0>k1<}i71+?#g zsz1lnYYuj#$|`fbgF05NlW_llz?CJ_<<8I2 zQ;-Wq;##lQ!TAm6Ck`d#pzU|0H z@`>)hVGy`v`Za{w^x#tZyF@{*4U{oWd+^|dH}muJv%2Pv_cm!=tdWu(XH}qV?ATp;7bo}#AMTgGk(Uj}gn^BzH~cga zIMI|ON~~%bYcBEhz0AH13Mw#hBs;=QFesV0Rqs|SiW_-FfSnL>pA*rHoI0#dpXi3M%usIE zI0sET=Km}?W?`XZ`NP+*>Qwi8`R6-7j$zMwk^sSGS`Y7kN9L||?M|;gFtoCAqv&K= z-kzol4>sI?9})8&{c4GeFnsuY^w2+Sh`-t1>YKCU=Q($ys0l=q`y>8UD)VpauIhPh zbkkn)l~aU+wM0=pegjr&;y02XJS0?Lc?r{&&QPJwN;uBe+fF*PbHaYxRM( zL~WUwnCsi{OAEim^3g1G9%Ot}I=`^6@G;lI4Xiq$-toFgIaC@rfOZc%WLIsYJJz@0 z+ID!q{?k<7lV8(>`hV4C#F%n@oS5)Tk_&o7ylWSL`2x-t2+n8E4XG`#l!!BnCDF{x zZ8ugSeetG?a+7LR&8oCLsF0W60=)RyIaZ-ZG$VZHyxT1ra6=}L{5jqq?`{PcG+ z6tq#ht_Gi(C~7+Rn#zqlUJq(t$t#b>q$1)HC;`|`#V6J{wI@o0|90N^`KC!V4o~ch zc4+)%Wwxq^b(1P*CmXQgy#Z;TEaYD2cqbHCMMR$}UfPxS>iP5iDtq^C>ET#n_9}qY z9YsKy1qgehWvAPu@M`76_)?f$~WQIEV}$1dZ^dCHfuldJ+1?{W)d;Qe*Q?rY4QEenl#qi6J*n=ct=N+L zl&|HQ(fs&w2$Y^QtDLtcNU5J_D;u}AscYJ9`h2%cSqWgmC_Bnv)g4vme!~PUH3Bpg z69r3jHBN33#Yb>IaC{KHMs)Ru9ItWUK==FHjoibAkl>d55ntbljaA#^lFGhGMs}lv zYIagP^ zleS$C!NuFGyY8Dn$%%M=>=;yn{}E$2jK#arRg0sSY!izKt$hTIQT#uQrNIaZeFT?a z;6FsD3n+N{0EJTi^3nie`v!0$uP88(pyU#%dSQW}&$|`DO_YOT@53n}9>Cw|rRib$ zNvS#McA#3M>g$TE?i93Y);L3DC%3gv7sZ2Of&j56JCp=vu4FfUeE*gP)l@5XeeaeH zHl&Ri6~g-ND@(Tqu{6N2Jr5jmj*b>1I$fQd1`i)NFz@L3Z>(IzUqtz6l*L^r!py=K z>OhoN^1`z4xS^g@kkTB;zd>PS#ZyW&TMyeF%+kwK(&27&j-c!#<2D5~<5v@Gc?XJFd z_A$E?CWbHmY<_Cft>MhwNH<^%XE1=I(ULp6802d`~Ah(diP?8(j)c~>3VF;bXsFIA$o4EZQpvFN>UMFOZkQBG;u!K0t9c_YAg9oe!uwel7l|#G6D!gN@}2a|Hb=1`Si?!HHg12DZ0+V5 zO@dTUhPAp4OAvy{aw+kTcCgypDwHCJ85EdmN%vX>Or0A^o%bo;*liZNHaV0VHoA#0e8ydiKR_49h=q-u z5spED%@wgFiRJYKeoqlr(j)V=V}tTjT}G(KqXD%BcSEWCQsT^EZu#A#?8G^?9$<+=k4mk|c1g@3z$Vd_NOn`E!(sbFae!c2p zme0p7I0wq)IT76Jw~09Opybq$i}9N)xnS3dP!>0AF&YHHA3}7|2(n-4E?ASTB;;0> z@W)FURR}aip%C>o*E*LTM!)6TH2dgxxF!i^Kv3ZO8vGW zla`e;IGd*JD~tdJ@PizhM!Y7k$FqyIS*P8zI#}_P0hG#4@g_!sT7=Lk699i)0P7sX zdYA`Y{>Am4jcH38@y{}ob?%EXy_--a}?xS3MQg}_A`62UeiIbd`&zKwa z5eOWaPdy->HgI(3<|al*0iOi9z(HsX4OPJ>p30k&qd0f#agDGI&De1_d##EaX_je+ z$1%XlpCPuoZ3{*aGkO|i89SR#54^SEuGrBXSm+OKOYD{EU?}MY0C&HoWfY{}zr$18L%FewUc$`8$A^C}Pko+BwG;a&1d|haMr)2j5t2l#@KmntC%DQg1+2$GM>d+d z%TRXHIh?_dSIwN>ypoEnD^PbY+&wZ2X2I-0+$KQdBP(S9 z*UcTH$gm0xlFORbI6vI!L?aA616I#f(Dh>?v5t%du?TdcNX@deM%fcc>nQy9V6b@QR> zPBTuO-8w2K0@})cf~Y}-ICe+1Py1Hy;23ALfF8p^ffA>bML`Twf*ps(cK^8@G;qfXP<4%Q2vGZ>aCw2>!9e5dvj-M>UZT?hZ{WHM3Q9EF47{$(P#8Y)#uS zk_pPZ1~O7cIAa)t7GFphU>Dd2ak#ItaVvp#6QsF3>t4z9w$qT)^e-|_W))x>N#~O3 zXl&5%{8DCIVK!eH!wsrcuiPV|lPT!-y#aq%RV)gaYH~Ez7IxG43d-z4!Y8ronr4j6 zwIXiHcbI6Fb)KKbOfdf8JM1<|xQ88{K5L7YopY0$teRdfI{?B9<3GO|k@3>y3Ei!3 znF5&43wUIIY*PeiMiQVUh7ej-P&=MAE{k*rO+@2n>9UVgL-a}e82S+K9I8{_#>U23 z%s2~B0y?u*~ckHUL5V3(U9 z7b#uc7!phj6t4wD5Ag+Ja}(+?11K6r3HoAv?EyGl5n~3sB%;8bKLrZ`cVYlLT4C}e z!BTaqQ$bAjE9|j?gXXm|`RM)aRX_E$pO?30ePlS-3RFNp)%y(rdU?1-xYk=P)76uu zlR_>bWCRfag>Iux*sOSePOj+}VWrz;Kta#kfHJ&*rQH7D+tSLD5=PWGg7KjpyiLYz z_yC^iBnlc8asYJj$ffC=2B?<}Q3p?1%4ybQ=$Z>`VTf&T?N#tU{bq|Gnt#cY+}B7kdw9ar?Ib$lv3a}`ir3N(rcAEfSZy= z=&>e_Fx_*@RrO(%ZYnUNjWSP5)UfV-=_wfRoSZdP^|-)cp0qZP_X2qZJZr#?Up%6j zo~#MEJcXFA+EB8Vm_s|dmDCLxBvD~D?)}f}r(AkFQwGl7u;>wjjf7*wA#Q85FW!-0 zII<(Ou3Yr*uDyM);D;_Om@M9*uE%K1= z%?I7WEQD(4o3K}f5jf#!m(_;*{uU?HZ~uGHhyrdjA}-Eqw@aFViIGYKS{pX#lC^X! z@O4T1(Vc{1m8oRew_0U0XIX>1#%Ex#Cf=)PrjRla7q1Ak2;y8$>ydC1gGfvsLd!fh z;*VJDYC}7G#MDXPAX6bNuu61O&tc}O49-{uHrdIzPP$%oK^wMO+@4!rc2*ACk6ksh zZji+4>n_H$tDa$OWf$P_%p1e~$C&T}{*g$eE;yi!s7r-y7Qq?f%OeC@UO~TkHE6p=p*$ z_iAjeW|<*d4I;n3vT{3E50k9|hTx$$-QHovW*Cwp&(C@3z5I1wNk7NoepJilklqr= zx~7zkhMGUa%WQjEPBW96y%0QH1WVuFawgmlIqGTcV7^+h=f$hA%%_d?q;ivPa!xD$ zhP7|3{78Ff%B61{OdZE(fY$}sS~fB~X&5+1BGFCN`!J`#LUQctZGu!{#qs<{^q0i2 zc*>H|-Hg?OFmYg$lm7l&HqP|w165dS9AnaIJOxZNVj4@N%Md~z7R>RyDwvDqHHzPM_BA%Gml-pW>l9&EI>Q=YP%u5gOiZn4@?9WhK=s# z3*-zxDL}*R)@5Y9*U!j!uh*hyJ?hP4cf7^*n&v&CU9=w8owGide|YM*(Oul&qgEb@ z_4uyk0=iA_4BWWWh?+?(peukN4gbnfaAcIeDfB40(AIQ62OIps8d(|^A3goo{{Z_N Br&s_0 literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml index c2cf4ae9ef..581b86a54e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ "tyro", - "transformers>=4.38.2", + "transformers>=4.42.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -185,9 +185,9 @@ colab-ampere-torch220 = [ ] colab-new = [ "tyro", - "transformers>=4.38.2", + "transformers>=4.42.3", "datasets>=2.16.0", - "sentencepiece", + "sentencepiece>=0.2.0", "tqdm", "psutil", "wheel>=0.42.0", diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 260577912f..dc1ad269fe 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,14 +19,17 @@ from transformers.models.llama.modeling_llama import logger +@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) @triton.jit def _cross_entropy_forward( logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -58,13 +61,19 @@ def _cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + if DO_SOFTCAPPING: logits = SOFTCAP * tl.math.tanh(logits / SOFTCAP) + + logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) if label_idx != -100: - x = tl.load(logits_ptr + label_idx).to(tl.float32) - loss = logsumexp - x + x = tl.load(logits_ptr + label_idx) + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + if DO_SOFTCAPPING: x = SOFTCAP * tl.math.tanh(x / SOFTCAP) + loss = logsumexp - x.to(tl.float32) else: loss = 0.0 tl.store(logsumexp_ptr, logsumexp) @@ -72,15 +81,18 @@ def _cross_entropy_forward( pass +@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) @triton.jit def _chunked_cross_entropy_forward( logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -117,7 +129,11 @@ def _chunked_cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + if DO_SOFTCAPPING: logits = SOFTCAP * tl.math.tanh(logits / SOFTCAP) + + logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -126,7 +142,9 @@ def _chunked_cross_entropy_forward( # Do the -x separately if label_idx != -100: x = tl.load(logits_ptr + label_idx).to(tl.float32) - loss = -1.0 * x + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + if DO_SOFTCAPPING: x = SOFTCAP * tl.math.tanh(x / SOFTCAP) + loss = -1.0 * x.to(tl.float32) else: loss = 0.0 tl.store(loss_ptr, loss) @@ -135,14 +153,17 @@ def _chunked_cross_entropy_forward( pass +@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) @triton.jit def _cross_entropy_backward( logits_ptr, logits_row_stride, dloss_ptr, dloss_row_stride, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) @@ -173,15 +194,27 @@ def _cross_entropy_backward( else: dloss = 0.0 - x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + if DO_SOFTCAPPING: + # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) + partial = tl.math.tanh(x / SOFTCAP) + x = SOFTCAP * partial + pass + logsumexp = tl.load(logsumexp_ptr + row_idx) - y = tl.exp(x - logsumexp) + y = tl.exp(x.to(tl.float32) - logsumexp) y = tl.where( col_offsets == label_idx, y - 1.0, # exp(x - logsumexp) - 1 y, # exp(x - logsumexp) ) + if DO_SOFTCAPPING: + # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) + y = y * (1.0 - partial*partial) + pass + # If y == 0: dC/dx = 0 ==> we already masked it to be = 0, so dloss = 0. tl.store(logits_ptr + col_offsets, dloss * y, mask = mask) pass @@ -191,40 +224,46 @@ def _cross_entropy_backward( class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod - def forward(ctx, logits, labels): + def forward(ctx, logits, labels, logit_softcapping = 0): n_rows, vocab_size = logits.shape div, mod = divmod(vocab_size, MAX_FUSED_SIZE) n_chunks = div + (mod != 0) - losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + + DO_SOFTCAPPING = (logit_softcapping != 0) if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) - logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") _cross_entropy_forward[(n_rows,)]( logits, logits.stride(0), losses, logsumexp, labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + num_warps = num_warps, ) else: # For large vocabs > 65336 like Gemma 256K - logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda") + logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda:0") _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( logits, logits.stride(0), losses, logsumexp, labels, - VOCAB_SIZE = vocab_size, - N_CHUNKS = n_chunks, - BLOCK_SIZE = MAX_FUSED_SIZE, - num_warps = 32, + VOCAB_SIZE = vocab_size, + N_CHUNKS = n_chunks, + BLOCK_SIZE = MAX_FUSED_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + num_warps = 32, ) # logsumexp(chunked_logsumexp) - x # Do the -x separately @@ -234,6 +273,8 @@ def forward(ctx, logits, labels): pass ctx.save_for_backward(logits, logsumexp, labels) + ctx.DO_SOFTCAPPING = DO_SOFTCAPPING + ctx.logit_softcapping = logit_softcapping return losses pass @@ -251,16 +292,18 @@ def backward(ctx, dlosses): dlosses, dlosses.stride(0), logsumexp, labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = 8, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, + SOFTCAP = ctx.logit_softcapping, + num_warps = 8, ) return logits, None, None, pass pass -def fast_cross_entropy_loss(logits, labels): +def fast_cross_entropy_loss(logits, labels, logit_softcapping = 0): """ Arguments: logits: (batch, seq_len, vocab_size) @@ -274,6 +317,7 @@ def fast_cross_entropy_loss(logits, labels): loss = Fast_CrossEntropyLoss.apply( logits.view(batch*seq_len, d), labels.view(-1), + logit_softcapping, ) n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items diff --git a/unsloth/kernels/geglu.py b/unsloth/kernels/geglu.py index df80fcb79b..006e8c0f34 100644 --- a/unsloth/kernels/geglu.py +++ b/unsloth/kernels/geglu.py @@ -41,7 +41,7 @@ def _exact_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def geglu_exact_forward_kernel(gate, up): batch, seq_len, hd = gate.shape n_elements = gate.numel() - out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda") + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda:0") grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) _exact_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) return out @@ -133,7 +133,7 @@ def _approx_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def geglu_approx_forward_kernel(gate, up): batch, seq_len, hd = gate.shape n_elements = gate.numel() - out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda") + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda:0") grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) _approx_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) return out diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4db89b7816..f26e596530 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -119,7 +119,7 @@ def _gemma_rms_layernorm_forward( W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols - inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl + inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var output = normed * (W_row + 1.0) @@ -137,8 +137,8 @@ def forward(ctx, X, W, eps, gemma = False): n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") - r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") + r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index ff6b162680..f81b7aae9b 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -41,7 +41,7 @@ def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def swiglu_fg_kernel(e, g): batch, seq_len, hd = e.shape n_elements = e.numel() - h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda") + h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda:0") grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,) return h diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index ddee198b77..935f1d430d 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -105,14 +105,14 @@ def fast_dequantize(W, quant_state = None, out = None): # Create weight matrix if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda") + out = torch.empty(shape, dtype = dtype, device = "cuda:0") else: assert(out.shape == shape) assert(out.dtype == dtype) # NF4 dequantization of statistics n_elements_absmax = absmax.numel() - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda") + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") # Do dequantization ptr_out_absmax = get_ptr(out_absmax) @@ -161,7 +161,7 @@ def fast_gemv(X, W, quant_state, out = None): bout = shape[0] if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda") + out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") # else: # assert(out.shape == (1, 1, bout,)) # pass @@ -179,7 +179,7 @@ def fast_gemv(X, W, quant_state, out = None): ldb = ctypes.c_int32(ldb) ldc = ctypes.c_int32(ldc) - df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda") + df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7a6954c9f8..73aa0c6c92 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -21,6 +21,12 @@ warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "multiprocessing") + +# Stop "Special tokens have been added in the vocabulary, ..." +import logging +logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.CRITICAL+1) + import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer @@ -31,7 +37,7 @@ import os import psutil -__version__ = "2024.6" +__version__ = "2024.7" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() @@ -80,8 +86,49 @@ "offload_output_embeddings", "is_bfloat16_supported", "unsloth_offloaded_gradient_checkpoint", + "torch_compile_options", ] +# Just remove max_autotune_gemm warning +import functools +@functools.lru_cache(None) +def is_big_gpu(index): + sms = torch.cuda.get_device_properties(index).multi_processor_count + if sms < 80: # V100 + # log.warning("not enough SMs to use max_autotune_gemm mode") + return False + return True +import torch._inductor.utils +torch._inductor.utils.is_big_gpu = is_big_gpu + + +# Torch compile arguments +torch_compile_arguments = [ + "config.dce = True", + "config.memory_planning = True", + "config.memory_pool = 'combined'", + "config.coordinate_descent_tuning = True", + "config.max_autotune_gemm = False", # GEMM is unnecessary + "config.autotune_multi_device = False", + "config.max_autotune_gemm_backends = 'ATEN'", # Not much faster + "config.aggressive_fusion = False", # Careful changes results! + "config.cuda.enable_cuda_lto = True", + "config.cuda.use_fast_math = True", + "config.cuda.compile_opt_level = '-O2'", +] +import torch._inductor.config as config +for _try_compile_argument in torch_compile_arguments: + try: exec(_try_compile_argument) + except: pass +pass +torch_compile_options = { + "epilogue_fusion" : True, + "max_autotune" : True, + "shape_padding" : True, + "trace.enabled" : False, # Output Triton kernel outputs! + "triton.cudagraphs" : False, +} + def prepare_model_for_kbit_training( model : Any, diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 99374891ab..4c4515b79b 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -247,6 +247,8 @@ def pre_patch(): GemmaModel .forward = LlamaModel_fast_forward GemmaForCausalLM .forward = CausalLM_fast_forward(GemmaModel_fast_forward_inference) PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(GemmaForCausalLM) + # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py new file mode 100644 index 0000000000..0669e4220c --- /dev/null +++ b/unsloth/models/gemma2.py @@ -0,0 +1,538 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from ._utils import __version__ +from .gemma import ( + GemmaFixedRotaryEmbedding, + fast_geglu_inference, +) +from transformers.models.gemma2.modeling_gemma2 import ( + Gemma2Attention, + Gemma2DecoderLayer, + Gemma2Model, + Gemma2ForCausalLM, + Gemma2RotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, +) +from transformers.models.gemma2.modeling_gemma2 import * +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) +# For Pytorch 2.1.1 +try: + from transformers.models.gemma2.modeling_gemma2 import ( + Gemma2SdpaAttention, + Gemma2FlashAttention2, + ) +except: + Gemma2SdpaAttention = Gemma2Attention + Gemma2FlashAttention2 = Gemma2Attention +pass + + +# [TODO] We must randomnly use torch.compile? +# I checked the gradients and formulas and I'm sure it's correct. +# I'm stumped :( +@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) +def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): + old_dtype = X.dtype + X = X.float() + X = X * torch.rsqrt(X.square().mean(-1, keepdim = True) + layernorm.eps) * \ + (1.0 + layernorm.weight.float()) + return X.to(old_dtype) +pass + + +# Logit softcapping +@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) +def gemma2_attention(Q, K, V, causal_mask, self, bsz, q_len): + n_heads = self.num_heads + head_dim = self.head_dim + n_kv_heads = self.num_key_value_heads + n_groups = self.num_key_value_groups + + # Grouped query attention + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + + s = self.config.hidden_size // self.config.num_attention_heads + t = self.config.attn_logit_softcapping + + Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly + A = torch.matmul(Q, K.transpose(2, 3)) + A = t * torch.tanh(A / t) # Logit softcapping + A += causal_mask[:q_len, :q_len] + A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) + A = torch.matmul(A, V) + A = A.transpose(1, 2).contiguous() + A = A.reshape(bsz, q_len, n_heads*head_dim) + return A +pass + + +# Logit softcapping +def Gemma2Attention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention + pass + + bsz, q_len, _ = hidden_states.size() + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q, K, V = self.apply_qkv(self, hidden_states) + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + if position_ids is None: + cos = self.rotary_emb.cos_cached + sin = self.rotary_emb.sin_cached + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + pass + + if past_key_value is not None: + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + pass + past_key_value = (K, V) if use_cache else None + + A = gemma2_attention(Q, K, V, causal_mask, self, bsz, kv_seq_len) + A = self.apply_o(self, A) + return A, None, past_key_value +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 +def Gemma2DecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +): + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: + out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0") + + # Self Attention + residual = hidden_states + hidden_states = fast_rms_layernorm_inference_gemma(self.input_layernorm, hidden_states, out_weight) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = fast_rms_layernorm_inference_gemma(self.post_attention_layernorm, hidden_states, out_weight) + hidden_states += residual + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference_gemma(self. pre_feedforward_layernorm, hidden_states, out_weight) + hidden_states = fast_geglu_inference(self.mlp, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(self.post_feedforward_layernorm, hidden_states, out_weight) + hidden_states += residual + else: + residual = hidden_states + hidden_states = fast_rms_layernorm_gemma2_compiled(self.input_layernorm, hidden_states, gemma = True) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_attention_layernorm, hidden_states, gemma = True) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_gemma2_compiled(self. pre_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = self.mlp(hidden_states) + hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = residual + hidden_states + pass + + outputs = (hidden_states,) + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) + return outputs +pass + + +from math import sqrt as math_sqrt +KV_CACHE_INCREMENT = 256 # KV Cache update size +torch_nn_functional_softmax = torch.nn.functional.softmax + +def Gemma2Attention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, + do_prefill = False, + attention_mask = None, + use_sliding_window = False, +): + Xn = hidden_states + bsz, _, hd = hidden_states.size() + K1, V1 = past_key_value + dtype = Xn.dtype + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + attention_size = n_heads*head_dim + # assert(n_kv_heads * n_groups == n_heads) + seq_len = K1.shape[-2] + kv_seq_len = seq_len + 1 + + # Prefill phase + # if not hasattr(self, "paged_attention"): + if do_prefill: + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) + self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + self.scalar = 1.0 / math_sqrt(self.config.hidden_size // self.config.num_attention_heads) + self.half_head_dim = head_dim // 2 + self. t = self.config.attn_logit_softcapping + self.reciprocal_t = 1.0 / self.config.attn_logit_softcapping + elif kv_seq_len >= self.paged_attention.shape[0]: + self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.attention.resize_((bsz, n_heads, 1, self.attention.shape[-1]+KV_CACHE_INCREMENT)) + pass + + Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0]) + Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0]) + Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1]) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + + # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + cos = self.rotary_emb.cos_cached[position_ids].unsqueeze(1) + sin = self.rotary_emb.sin_cached[position_ids].unsqueeze(1) + h = self.half_head_dim + + RH_Q = self.RH_Q + RH_Q[:,:,:,:h] = Qn[:,:,:,h:] + RH_Q[:,:,:,h:] = Qn[:,:,:,:h] + torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + Qn *= cos + Qn.addcmul_(RH_Q, sin) + + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + RH_K[:,:,:,:h] = Kn[:,:,:,h:] + RH_K[:,:,:,h:] = Kn[:,:,:,:h] + torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + Kn *= cos + Kn.addcmul_(RH_K, sin) + + # New KV cache + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + + # Handle sliding windows + sliding_window = self.config.sliding_window + if use_sliding_window and kv_seq_len > sliding_window: + # From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193 + slicing_tokens = 1 - sliding_window + Knn = Kn[:, :, slicing_tokens:, :]#.contiguous() + Vnn = Vn[:, :, slicing_tokens:, :]#.contiguous() + else: + Knn, Vnn = Kn, Vn + pass + + # Grouped query attention + _, _, cached_len, _ = Knn.shape + if n_groups != 1: + Knn = Knn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vnn = Vnn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) + Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) + pass + # else: + # Knn, Vnn = Knn, Vnn + # pass + + # Attention + # if bsz == 1: + Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 + # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows + A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched + + A *= self.reciprocal_t; torch.tanh(A, out = A); A *= self.t; # Logit softcapping + + A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch.matmul(A, Vnn, out = Qn) + # else: + # A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) + # pass + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, attention_size) + A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) + return A, (Kn, Vn) +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +# @torch.inference_mode +def Gemma2Model_fast_forward_inference( + self, + input_ids, + past_key_values, + position_ids, + attention_mask = None, +): + out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda:0") + input_ids = input_ids[:,:self.max_seq_length] + hidden_states = self.model.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) + # 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32 + # 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32 + hidden_states *= torch.tensor(math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype) + + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + SWA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = self.config.sliding_window, + ) + GA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) + else: + SWA = attention_mask + GA = attention_mask + pass + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.model.layers): + + use_sliding_window = idx % 2 == 0 + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.input_layernorm, hidden_states, out_weight) + hidden_states, present_key_value = Gemma2Attention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = SWA if use_sliding_window else GA, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), + use_sliding_window = use_sliding_window, + ) + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.post_attention_layernorm, hidden_states, out_weight) + hidden_states += residual + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer. pre_feedforward_layernorm, hidden_states, out_weight) + hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states) + hidden_states = fast_rms_layernorm_inference_gemma(decoder_layer.post_feedforward_layernorm, hidden_states, out_weight) + hidden_states += residual + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_rms_layernorm_inference_gemma(self.model.norm, hidden_states, out_weight) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + + +class FastGemma2Model(FastLlamaModel): + + @staticmethod + def pre_patch(): + Gemma2Attention .forward = Gemma2Attention_fast_forward + Gemma2SdpaAttention .forward = Gemma2Attention_fast_forward + Gemma2FlashAttention2.forward = Gemma2Attention_fast_forward + Gemma2DecoderLayer .forward = Gemma2DecoderLayer_fast_forward + Gemma2Model .forward = LlamaModel_fast_forward + Gemma2ForCausalLM .forward = CausalLM_fast_forward(Gemma2Model_fast_forward_inference) + PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(Gemma2ForCausalLM) + + # Solves https://github.com/unslothai/unsloth/issues/168 + # Static KV Cache was introduced in 4.38.0, causing training to be much slower. + # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. + # https://github.com/huggingface/transformers/pull/27931 + # https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py + import transformers.models.gemma2.modeling_gemma2 + transformers.models.gemma2.modeling_gemma2.Gemma2RotaryEmbedding = GemmaFixedRotaryEmbedding + return + pass + + + @staticmethod + def post_patch(model): + # Patch model for Gemma + layers = model.model.layers + + # Torch.compile fails on embedding matrix?? + # Workaround randomnly fixes it for torch versions < 2.2 + model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + model.config.update({"unsloth_version" : __version__}) + + # We also do this for the lm_head + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.lm_head.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + + # Gemma has tied weights! This means lm_head == embed_tokens + if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.model.embed_tokens.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + pass + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + # Downcast RoPE embedding to correct data type + # RoPE must be done in float32 for Gemma + # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ + # and (module.cos_cached.dtype != correct_dtype): + + # module.cos_cached = module.cos_cached.to(correct_dtype) + # module.sin_cached = module.sin_cached.to(correct_dtype) + # pass + # pass + pass + + # Add 1 to weight + # return output * (1 + self.weight) + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/gemma/modeling_gemma.py#L89 + from transformers.models.gemma2.modeling_gemma2 import Gemma2RMSNorm + + # Freeze all parameters except LoRA + # We do this first since += 1 seems to not be liked by requires_grad = True + for name, param in model.named_parameters(): + if ".lora_A." in name or ".lora_B." in name: + param.requires_grad_(True) + else: + param.requires_grad_(False) + pass + + # Patch RMS Layernorm + for name, module in model.named_modules(): + if isinstance(module, Gemma2RMSNorm): + # Must be in float32 + # https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36 + # module = module.to(torch.float32) + # Leave + 1 to Triton kernel itself + # module.weight += 1.0 # return output * (1 + self.weight) + if not hasattr(module, "variance_epsilon"): + module.variance_epsilon = module.eps # Gemma doesn't use variance_epsilon + pass + + # Clear deleted GPU items + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2368a37672..e19b85726b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -15,6 +15,8 @@ import torch import gc from typing import Optional, Tuple, List, Union +from ._utils import * +from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention from transformers.models.llama.modeling_llama import ( logger, @@ -25,8 +27,6 @@ _prepare_4d_causal_attention_mask_for_sdpa, ) from ..kernels import * -from ._utils import * -from ._utils import __version__ from ..tokenizer_utils import * if HAS_FLASH_ATTENTION: from flash_attn import flash_attn_func @@ -78,6 +78,24 @@ def original_apply_o(self, X): KV_CACHE_INCREMENT = 256 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax +# Fix new HF's inference code +def _fast_prepare_inputs_for_generation(self, input_ids, **kwargs,): + if "past_key_values" in kwargs: + input_ids = input_ids[:,[-1]] + kwargs["attention_mask"] = kwargs["attention_mask"][:,[-1]] + kwargs["position_ids"] = kwargs["cache_position"] + return { "input_ids" : input_ids, **kwargs, } +pass + + +def fix_prepare_inputs_for_generation(module): + # Fix prepare_inputs_for_generation + if hasattr(module, "prepare_inputs_for_generation"): + module.prepare_inputs_for_generation = _fast_prepare_inputs_for_generation + pass +pass + + def LlamaAttention_fast_forward_inference( self, hidden_states: torch.Tensor, @@ -542,7 +560,8 @@ def LlamaModel_fast_forward( inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma - IS_GEMMA = self.config.model_type == "gemma" + IS_GEMMA = self.config.model_type.startswith("gemma") + IS_GEMMA2 = self.config.model_type.startswith("gemma2") train_embed_tokens = self.embed_tokens.weight.requires_grad if IS_GEMMA: @@ -642,17 +661,38 @@ def LlamaModel_fast_forward( offloaded_gradient_checkpointing = True pass + # Gemma2 has alternating SWA and global attn + if IS_GEMMA2 and not hasattr(self, "SWA_mask"): + from transformers.modeling_attn_mask_utils import AttentionMaskConverter + n = self.config.max_position_embeddings + self.SWA_mask = AttentionMaskConverter( + is_causal = True, + sliding_window = self.config.sliding_window, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + + self.GA_mask = AttentionMaskConverter( + is_causal = True, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + pass + # Go through every layer! for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None + mask = causal_mask + if IS_GEMMA2: mask = self.SWA_mask if (idx % 2 == 0) else self.GA_mask + if offloaded_gradient_checkpointing: hidden_states = Unsloth_Offloaded_Gradient_Checkpointer.apply( decoder_layer, hidden_states, - causal_mask, + mask, attention_mask, position_ids, past_key_values, @@ -670,7 +710,7 @@ def custom_forward(*inputs): layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, - causal_mask, + mask, attention_mask, position_ids, use_reentrant = True, @@ -681,7 +721,7 @@ def custom_forward(*inputs): else: layer_outputs = decoder_layer( hidden_states, - causal_mask=causal_mask, + causal_mask=mask, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, @@ -838,6 +878,7 @@ def _CausalLM_fast_forward( logits = logits.to(self.config.torch_dtype) loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): @@ -849,7 +890,12 @@ def _CausalLM_fast_forward( loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, + logit_softcapping = logit_softcapping, ) + elif logit_softcapping != 0: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping pass if not return_dict: @@ -983,11 +1029,22 @@ def _fast_generate(*args, **kwargs): pass internal_model._flag_for_generation = True + # For newer HF + kwargs["cache_implementation"] = "dynamic" + + # Set pad token + old_pad_token_id = getattr(model.config, "pad_token_id", None) + old_eos_token_id = getattr(model.config, "eos_token_id", None) + model.config.pad_token_id = old_eos_token_id + # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): output = generate(*args, **kwargs) pass + # Revert + model.config.pad_token_id = old_pad_token_id + # Unset a flag for generation! internal_model = model while hasattr(internal_model, "model"): @@ -1013,6 +1070,7 @@ def pre_patch(): LlamaModel .forward = LlamaModel_fast_forward LlamaForCausalLM .forward = CausalLM_fast_forward(LlamaModel_fast_forward_inference) PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(LlamaForCausalLM) # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. @@ -1056,7 +1114,7 @@ def from_pretrained( f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ - f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ + f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() @@ -1200,11 +1258,11 @@ def from_pretrained( 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True) output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) - if output > 1: raise RuntimeError( - 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\ + if output > 1: print( + '********************\\nUnsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\ 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\ - 'We do have a separate beta version, which you can contact us about!\\n'\\ - 'Thank you for your understanding and we appreciate it immensely!') + '********************\\nWe do have a separate beta version, which you can contact us about!\\n'\\ + '********************\\nThank you for your understanding and we appreciate it immensely!') for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1760,6 +1818,7 @@ def patch_peft_model( elif model_type == "mistral": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "qwen2": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx + elif model_type == "gemma2": apply_lora_mlp = apply_lora_mlp_geglu_approx else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d87af0a18d..9134d4a226 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -26,8 +26,11 @@ major, minor = int(major), int(minor) SUPPORTS_FOURBIT = (major > 4) or (major == 4 and minor >= 37) SUPPORTS_GEMMA = (major > 4) or (major == 4 and minor >= 38) +SUPPORTS_GEMMA2 = (major > 4) or (major == 4 and minor >= 42) if SUPPORTS_GEMMA: - from .gemma import FastGemmaModel + from .gemma import FastGemmaModel +if SUPPORTS_GEMMA2: + from .gemma2 import FastGemma2Model del major, minor @@ -138,6 +141,15 @@ def from_pretrained( f"to obtain the latest transformers build, then restart this session."\ ) dispatch_model = FastGemmaModel + elif model_type == "gemma2": + if not SUPPORTS_GEMMA2: + raise RuntimeError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ + f"The minimum required version is 4.43.\n"\ + f'Try `pip install --upgrade "transformers>=4.43"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastGemma2Model elif model_type == "qwen2": dispatch_model = FastQwen2Model else: diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 4b40065083..cec7332ec9 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -191,6 +191,14 @@ "mistralai/Codestral-22B-v0.1" : ( "mistral-community/Codestral-22B-v0.1", ), + "unsloth/gemma-2-9b-bnb-4bit" : ( + "unsloth/gemma-2-9b", + "google/gemma-2-9b", + ), + "unsloth/gemma-2-27b-bnb-4bit" : ( + "unsloth/gemma-2-27b", + "google/gemma-2-27b", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index d8bd85d478..e0b51a16e8 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -275,7 +275,8 @@ def pre_patch(): MistralModel .forward = LlamaModel_fast_forward MistralForCausalLM .forward = MistralForCausalLM_fast_forward PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward - + fix_prepare_inputs_for_generation(MistralForCausalLM) + # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. # Inferene can now be CUDAGraphed, but we shall retain the old rotary embeddings. diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py index 984bf7ca00..5b9fff5d55 100644 --- a/unsloth/models/qwen2.py +++ b/unsloth/models/qwen2.py @@ -43,6 +43,7 @@ def pre_patch(): Qwen2Model .forward = LlamaModel_fast_forward Qwen2ForCausalLM .forward = CausalLM_fast_forward(LlamaModel_fast_forward_inference) PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(Qwen2ForCausalLM) # Solves https://github.com/unslothai/unsloth/issues/168 # Static KV Cache was introduced in 4.38.0, causing training to be much slower. diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 50b09275aa..8727ca03f5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -963,11 +963,11 @@ def patch_sft_trainer_tokenizer(): " 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ "output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output)\n"\ "output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output)\n"\ - "if output > 1: raise RuntimeError(\n"\ - " 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\\n"\ + "if output > 1: print(\n"\ + " '********************\\nUnsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\\n"\ " 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\\n"\ - " 'We do have a separate beta version, which you can contact us about!\\n'\\\n"\ - " 'Thank you for your understanding and we appreciate it immensely!')\n"\ + " '********************\\nWe do have a separate beta version, which you can contact us about!\\n'\\\n"\ + " '********************\\nThank you for your understanding and we appreciate it immensely!')\n"\ "for _ in range(3):\n"\ " gc.collect()\n"\ " torch.cuda.empty_cache()\n"\ From 5ab565fb2c811d0b85d68dadd2ac1b32dee05e8b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 3 Jul 2024 12:12:21 -0700 Subject: [PATCH 0247/1088] Gemma2 (#723) * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md --------- Co-authored-by: Michael <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 534079ed49..f3bc0608bf 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ - + ### Finetune Llama 3, Mistral, Phi-3 & Gemma 2-5x faster with 80% less memory! @@ -18,48 +18,40 @@ ## ✨ Finetune for Free -All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. +All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, Ollama, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | -| **Phi-3 (medium)** | [▶️ Start for free](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) | 2x faster | 50% less | +| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | -| **Gemma (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 71% less | +| **Phi-3 (medium)** | [▶️ Start for free](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) | 2x faster | 50% less | +| **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -- **Kaggle Notebooks** for [Llama 3 8B](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 7B](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral 7B](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral 7B v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- **Kaggle Notebooks** for [Llama 3 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported +- 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! -- 📣 NEW! [Phi-3 medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) and [Phi-3 mini](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) support is here! -- 📣 NEW! [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) support is here! -- 📣 NEW! Qwen1.5-7B, Qwen1.5-14B, Qwen1.5-32B, Qwen1.5-72B now work, courtesy of Firefly's PR [#428](https://github.com/unslothai/unsloth/pull/428) -- 📣 NEW! [Llama-3 8b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) now works! Llama-3 70b also works (change the model name in the notebook). -- 📣 NEW! [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here! -- 📣 NEW! We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support fine-tuning of LLMs with [4x longer context windows](https://unsloth.ai/blog/long-context)! No change required if you're using our notebooks. To enable, simply change 1 line: -```python -model = FastLanguageModel.get_peft_model( - model, - use_gradient_checkpointing = "unsloth", # <<<<<<< -) -``` -- 📣 [CodeGemma](https://colab.research.google.com/drive/19lwcRk_ZQ_ZtX-qzFP3qZBBHZNcMD1hh?usp=sharing) now works along with [Gemma 7b](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) and [Gemma 2b](https://colab.research.google.com/drive/15gGm7x_jTm017_Ic8e317tdIpDG53Mtu?usp=sharing) -- 📣 [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models +- 📣 NEW! Qwen2 now works +- 📣 [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct] +- 📣 [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here + [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models +- 📣 We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support [4x longer context windows](https://unsloth.ai/blog/long-context)! ## 🔗 Links and Resources | Type | Links | | ------------------------------- | --------------------------------------- | -| 📚 **Wiki & FAQ** | [Read Our Wiki](https://github.com/unslothai/unsloth/wiki) | +| 📚 **Documentation & Wiki** | [Read Our Wiki](https://github.com/unslothai/unsloth/wiki) | |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| -| 📜 **Documentation** | [Read The Doc](https://github.com/unslothai/unsloth/tree/main#-documentation) | | 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) | 🌐 **Released Models** | [Unsloth Releases](https://huggingface.co/unsloth)| From 9b4cc934efec66abd0a77df011779b393a99c026 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 4 Jul 2024 13:26:57 -0700 Subject: [PATCH 0248/1088] Fix GGUF (#731) * Update mapper.py * Update Model Conversion Command in `save.py` to `convert_hf_to_gguf.py` (#730) * Updated convert_hf_to_gguf.py call to align with changes in llama.cpp repository * Update save.py --------- Co-authored-by: Daniel Han * Typo Fix (#690) --------- Co-authored-by: M. Ali Bayram Co-authored-by: johnpaulbin --- unsloth/models/loader.py | 2 +- unsloth/models/mapper.py | 11 +++++++++++ unsloth/save.py | 27 +++++++++++++++++++++++---- unsloth/tokenizer_utils.py | 2 +- 4 files changed, 36 insertions(+), 6 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9134d4a226..82d177e466 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -108,7 +108,7 @@ def from_pretrained( # Cannot be both! if is_model and is_peft: raise RuntimeError( - "Unsloth: You repo has a LoRA adapter and a base model.\n"\ + "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ "You have 2 files `config.json` and `adapter_config.json`.\n"\ "We must only allow one config file.\n"\ "Please separate the LoRA and base models to 2 repos." diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index cec7332ec9..31b3ab6df6 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -199,6 +199,17 @@ "unsloth/gemma-2-27b", "google/gemma-2-27b", ), + "unsloth/gemma-2-9b-it-bnb-4bit" : ( + "unsloth/gemma-2-9b-it", + "google/gemma-2-9b-it", + ), + "unsloth/gemma-2-27b-it-bnb-4bit" : ( + "unsloth/gemma-2-27b-it", + "google/gemma-2-27b-it", + ), + "unsloth/Phi-3-mini-4k-instruct-v0-bnb-4bit" : ( # Old Phi pre July + "unsloth/Phi-3-mini-4k-instruct-v0", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/save.py b/unsloth/save.py index 9163c6d38d..1ceea3c19b 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -800,8 +800,8 @@ def install_llama_cpp_old(version = -10): # Check if successful if not os.path.exists("llama.cpp/quantize") and not os.path.exists("llama.cpp/llama-quantize"): raise RuntimeError( - "Unsloth: llama.cpp GGUF seems to be too buggy to install.\n"\ - "File a report to llama.cpp's main repo since this is not an Unsloth issue." + "Unsloth: The file 'llama.cpp/llama-quantize' or `llama.cpp/quantize` does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" ) pass pass @@ -945,9 +945,28 @@ def save_to_gguf( quantize_location = "llama.cpp/quantize" elif os.path.exists("llama.cpp/llama-quantize"): quantize_location = "llama.cpp/llama-quantize" + else: + raise RuntimeError( + "Unsloth: The file 'llama.cpp/llama-quantize' or 'llama.cpp/quantize' does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + ) + pass + + # See https://github.com/unslothai/unsloth/pull/730 + # Filenames changed again! + convert_location = None + if os.path.exists("llama.cpp/convert-hf-to-gguf.py"): + convert_location = "llama.cpp/convert-hf-to-gguf.py" + elif os.path.exists("llama.cpp/convert_hf_to_gguf.py"): + convert_location = "llama.cpp/convert_hf_to_gguf.py" + else: + raise RuntimeError( + "Unsloth: The file 'llama.cpp/convert-hf-to-gguf.py' or 'llama.cpp/convert_hf_to_gguf.py' does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + ) pass - if error != 0 or quantize_location is None: + if error != 0 or quantize_location is None or convert_location is None: print(f"Unsloth: llama.cpp error code = {error}.") install_llama_cpp_old(-10) pass @@ -1035,7 +1054,7 @@ def save_to_gguf( f"--outfile {final_location} --vocab-type {vocab_type} "\ f"--outtype {first_conversion} --concurrency {n_cpus} --pad-vocab" else: - command = f"python llama.cpp/convert-hf-to-gguf.py {model_directory} "\ + command = f"python {convert_location} {model_directory} "\ f"--outfile {final_location} "\ f"--outtype {first_conversion}" pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 8727ca03f5..b0bc514f67 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -376,7 +376,7 @@ def fix_sentencepiece_gguf(saved_location): """ Fixes sentencepiece tokenizers which did not extend the vocabulary with user defined tokens. - Inspiration from https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py + Inspiration from https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py """ from copy import deepcopy from transformers.utils import sentencepiece_model_pb2 From 4be284bd79d2c4ffab378b93d7282b54f96647e9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 5 Jul 2024 23:48:42 -0700 Subject: [PATCH 0249/1088] Gemma 2 bug fixes + All RoPE Scaling Support (#736) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py --- unsloth/chat_templates.py | 27 ++++- unsloth/models/_utils.py | 211 ++++++++++++++++++++++++++++++++------ unsloth/models/gemma.py | 43 ++++++++ unsloth/models/gemma2.py | 18 +++- unsloth/models/llama.py | 15 ++- unsloth/models/mistral.py | 13 ++- unsloth/models/qwen2.py | 13 ++- unsloth/save.py | 7 +- 8 files changed, 306 insertions(+), 41 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 5f5b4e16c9..596548df3f 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -416,6 +416,21 @@ CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True, gemma_chatml_ollama,) pass +# =========================================== Gemma 2 +# Same as Gemma 1, but with sliding window attention! +# https://ollama.com/library/gemma2/blobs/6522ca797f47 +gemma2_template = gemma_template +gemma2_ollama = gemma_ollama + "PARAMETER num_ctx 4096\n" +gemma2_eos_token = "" +CHAT_TEMPLATES["gemma2"] = (gemma2_template, gemma2_eos_token, True, gemma2_ollama,) + +# =========================================== Gemma 2 with ChatML instead +gemma2_chatml_template = gemma_chatml_template +gemma2_chatml_ollama = gemma_chatml_ollama + "PARAMETER num_ctx 4096\n" +gemma2_chatml_eos_token = gemma_chatml_eos_token +CHAT_TEMPLATES["gemma2_chatml"] = (gemma2_chatml_template, gemma2_chatml_eos_token, True, gemma2_chatml_ollama,) +pass + # =========================================== Llama-3 # Weirdly \n\n is needed? llama3_template = \ @@ -1014,7 +1029,17 @@ def get_ollama_eos_tokens(tokenizer, extra_eos_tokens = []): pass final_eos_tokens += extra_eos_tokens final_eos_tokens += repeatted_tokens - return final_eos_tokens + + # Remove new lines, spaces and HTML tags + filtered_eos_tokens = [] + for token in final_eos_tokens: + if token.count("\n") == len(token): continue + elif token.count("▁") == len(token): continue + elif token.startswith("<") and len(token) <= 2: continue + elif token.startswith(" Date: Sat, 6 Jul 2024 18:50:00 -0700 Subject: [PATCH 0250/1088] Debugging (#739) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py --- unsloth/__init__.py | 5 +--- unsloth/models/_utils.py | 39 ++++++++++++++++++------- unsloth/models/llama.py | 59 +++++++++++++++----------------------- unsloth/tokenizer_utils.py | 34 +++++++++++++++------- 4 files changed, 76 insertions(+), 61 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 298ed13399..feb550be5f 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -39,10 +39,7 @@ first_id = devices.split(",")[0] warnings.warn( f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {devices} \n"\ - "Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so "\ - "enabling it will require much more work, so we have to prioritize. Please understand!"\ - "We do have a beta version, which you can contact us about!\n"\ - "Thank you for your understanding and we appreciate it immensely!\n\n"\ + "Unsloth currently does not support multi GPU setups - but we are working on it!\n"\ "Multiple CUDA devices detected but we require a single device.\n"\ f"We will override CUDA_VISIBLE_DEVICES to first device: {first_id}." ) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index fd1b87c532..e0df218a93 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -21,6 +21,7 @@ "xformers_version", "__version__", "HAS_FLASH_ATTENTION", + "PRE_CHECK", "platform_system", "patch_tokenizer", "get_statistics", @@ -32,30 +33,27 @@ "unsloth_offloaded_gradient_checkpoint", "torch_compile_options", "patch_linear_scaling", + "check_nvidia", "create_boolean_mask", ] import torch from typing import Union, Optional, List, Any, Callable, Tuple -import warnings from platform import system as platform_system platform_system = platform_system() -import math import numpy as np -import os -import psutil -import inspect -import re +import warnings, subprocess, re, inspect, psutil, os, math # ============================================= # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") -warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "multiprocessing") +warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "multiprocess") # Stop "Special tokens have been added in the vocabulary, ..." import logging @@ -74,7 +72,10 @@ config_filename = f"{model_name.title()}Config" exec(f"from {config_filepath} import {config_filename}", globals()) - config = inspect.getsource(eval(config_filename)) + try: + config = inspect.getsource(eval(config_filename)) + except: + continue if "rope_scaling" in config: continue config = re.sub( r"(\*\*kwargs)[\s]{0,}\,[\s]{0,}\)[\s]{0,}\:", @@ -345,7 +346,6 @@ def get_statistics(): # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. try: - from huggingface_hub import hf_hub_download from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled import psutil n_cpus = psutil.cpu_count(logical = False) @@ -367,7 +367,13 @@ def get_statistics(): disable_progress_bars() disabled = True pass - hf_hub_download(f"unslothai/statistics-{statistics}", "README.md", force_download = True) + + from transformers import AutoModelForCausalLM + stats_model = AutoModelForCausalLM.from_pretrained( + f"unslothai/statistics-{statistics}", + force_download = True, + ) + del stats_model if disabled: enable_progress_bars() pass @@ -659,6 +665,19 @@ def patch_linear_scaling( pass +def check_nvidia(): + # Unsloth doesn't work yet on AMD devices - we're working on it! + try: + output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) + except: + raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") + output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) + output = np.array([int(x.decode('utf-8'))/1024 for x in output]) + return output +pass +PRE_CHECK = check_nvidia() + + def create_boolean_mask(n = 4096, sliding_window = 2048): # Creates a boolean mask for attention mask = torch.ones(n, n, dtype = torch.bool) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c7ae67e428..32f2c6b71c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -83,7 +83,8 @@ def _fast_prepare_inputs_for_generation(self, input_ids, **kwargs,): if "past_key_values" in kwargs: input_ids = input_ids[:,[-1]] kwargs["attention_mask"] = kwargs["attention_mask"][:,[-1]] - kwargs["position_ids"] = kwargs["cache_position"] + if "cache_position" in kwargs: + kwargs["position_ids"] = kwargs["cache_position"] return { "input_ids" : input_ids, **kwargs, } pass @@ -1128,7 +1129,7 @@ def from_pretrained( f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) model_patcher.pre_patch() - # get_statistics() + get_statistics() # For debugging - we use a download counter to see if environments are not breaking if dtype is None: dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 @@ -1180,6 +1181,8 @@ def from_pretrained( # Add to kwargs kwargs["rope_scaling"] = rope_scaling pass + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + pre_check = check_nvidia() bnb_config = None if load_in_4bit: @@ -1206,6 +1209,8 @@ def from_pretrained( attn_implementation = "eager", **kwargs, ) + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + post_check = check_nvidia() # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name @@ -1235,14 +1240,12 @@ def from_pretrained( else: inner_training_loop = Trainer._original_training_loop except: - raise RuntimeError( - 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ - 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ - 'We do have a separate beta version, which you can contact us about!\n'\ - 'Thank you for your understanding and we appreciate it immensely!' - ) + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') pass + if ((post_check - post_check) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + import transformers.trainer items_in_trainer = dir(transformers.trainer) good_items = [] @@ -1266,16 +1269,15 @@ def from_pretrained( f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) - import subprocess, re, gc - output = subprocess.check_output( - 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True) - output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output) - output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output) - if output > 1: print( - '********************\\nUnsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\ - 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\ - '********************\\nWe do have a separate beta version, which you can contact us about!\\n'\\ - '********************\\nThank you for your understanding and we appreciate it immensely!') + import subprocess, re, gc, numpy as np + try: + a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + except: + raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') + a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) + a = np.array([int(x.decode('utf-8'))/1024 for x in a]) + if ((a - PRE_CHECK) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1287,12 +1289,7 @@ def from_pretrained( debug_info = """n_total_devices = total_train_batch_size // \\ args.gradient_accumulation_steps // self._train_batch_size if n_total_devices > 1: - logger.warning_once( - '* Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so ' \\ - '* enabling it will require much more work, so we have to prioritize. Please understand!\\n' \\ - '* We do have a separate beta version, which you can contact us about!\\n'\\ - '* Thank you for your understanding and we appreciate it immensely!' - ) + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') debug_info =""" debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) @@ -1317,12 +1314,7 @@ def from_pretrained( total_batches = bsz * ga * args.world_size n_total_devices = total_batches // ga // bsz if n_total_devices > 1: - logger.warning_once( - '* Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so ' \\ - '* enabling it will require much more work, so we have to prioritize. Please understand!\\n' \\ - '* We do have a separate beta version, which you can contact us about!\\n'\\ - '* Thank you for your understanding and we appreciate it immensely!' - ) + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') divisor = n_total_devices / 1 bsz = self._train_batch_size = max(int(bsz / divisor), 1) if total_batches // ga // bsz > 1: @@ -1346,12 +1338,7 @@ def from_pretrained( "False", ) if "n_total_devices >" not in inner_training_loop: - raise RuntimeError( - 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ - 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ - 'We do have a separate beta version, which you can contact us about!\n'\ - 'Thank you for your understanding and we appreciate it immensely!' - ) + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') pass inner_training_loop = inner_training_loop.replace( "is_sagemaker_mp_enabled()", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index b0bc514f67..440a53c1f0 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -24,6 +24,7 @@ import collections import numpy as np import gc +import subprocess __all__ = [ "load_correct_tokenizer", @@ -907,6 +908,19 @@ def add_new_tokens( pass +def check_nvidia(): + # Unsloth doesn't work yet on AMD devices - we're working on it! + try: + output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) + except: + raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") + output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) + output = np.array([int(x.decode('utf-8'))/1024 for x in output]) + return output +pass +PRE_CHECK = check_nvidia() + + from inspect import getsource import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * @@ -957,17 +971,15 @@ def patch_sft_trainer_tokenizer(): " 'Please do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ " )\n"\ "pass\n"\ - "n_devices = torch.cuda.device_count()\n"\ - "import subprocess, re\n"\ - "output = subprocess.check_output(\n"\ - " 'nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ - "output = re.findall(rb'([\\d]{1,})[\\s]{1,}M', output)\n"\ - "output = sum(int(x.decode('utf-8'))/1024 > 4 for x in output)\n"\ - "if output > 1: print(\n"\ - " '********************\\nUnsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\\\n"\ - " 'enabling it will require much more work, so we have to prioritize. Please understand!\\n'\\\n"\ - " '********************\\nWe do have a separate beta version, which you can contact us about!\\n'\\\n"\ - " '********************\\nThank you for your understanding and we appreciate it immensely!')\n"\ + "import subprocess, re, gc, numpy as np\n"\ + "try:\n"\ + " a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ + "except:\n"\ + " raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!')\n"\ + "a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a)\n"\ + "a = np.array([int(x.decode('utf-8'))/1024 for x in a])\n"\ + "if ((a - PRE_CHECK) >= 1).sum() > 1:\n"\ + " raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!')\n"\ "for _ in range(3):\n"\ " gc.collect()\n"\ " torch.cuda.empty_cache()\n"\ From dd6ea68d609ba14fcdd13b4a200d1f11135411f2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 6 Jul 2024 23:59:03 -0700 Subject: [PATCH 0251/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 32f2c6b71c..64bc6f84f0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1243,7 +1243,7 @@ def from_pretrained( raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') pass - if ((post_check - post_check) >= 1).sum() > 1: + if ((post_check - pre_check) >= 1).sum() > 1: raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') import transformers.trainer From 82e6a93ef25c7d3da50b4a4b717c44736d3bfc42 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Jul 2024 09:33:01 -0700 Subject: [PATCH 0252/1088] Fix exec, eval (#743) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Check exec, eval --- unsloth/models/_utils.py | 6 +++++- unsloth/models/gemma.py | 6 ++++-- unsloth/models/gemma2.py | 6 ++++-- unsloth/models/mistral.py | 6 ++++-- unsloth/models/qwen2.py | 6 ++++-- 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e0df218a93..725e9b3ff3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -618,7 +618,11 @@ def patch_linear_scaling( f"from {model_filepath} import logger, "\ f"{model_name.title()}Attention, {model_name.title()}Config" - function = inspect.getsource(attention_module.__init__) + try: + function = inspect.getsource(attention_module.__init__) + except: + # Most likely already patched! + return None, None where = function.find("def") function = function.split("\n") function = "\n".join(x[where:] for x in function) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 4d3db8d393..c47e65e87b 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -281,8 +281,10 @@ def pre_patch(): scaled_rope_module = GemmaFixedLinearScalingRotaryEmbedding, attention_module = GemmaAttention, ) - exec(function, globals()) - GemmaAttention.__init__ = eval(init_name) + if init_name is not None: + exec(function, globals()) + GemmaAttention.__init__ = eval(init_name) + pass GemmaAttention .forward = LlamaAttention_fast_forward GemmaSdpaAttention .forward = LlamaAttention_fast_forward GemmaFlashAttention2.forward = LlamaAttention_fast_forward diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 4a1420fb42..fda78534d8 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -436,8 +436,10 @@ def pre_patch(): scaled_rope_module = GemmaFixedLinearScalingRotaryEmbedding, attention_module = Gemma2Attention, ) - exec(function, globals()) - Gemma2Attention.__init__ = eval(init_name) + if init_name is not None: + exec(function, globals()) + Gemma2Attention.__init__ = eval(init_name) + pass Gemma2Attention .forward = Gemma2Attention_fast_forward Gemma2SdpaAttention .forward = Gemma2Attention_fast_forward Gemma2FlashAttention2.forward = Gemma2Attention_fast_forward diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 28f664ca2d..6eb3fccfab 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -277,8 +277,10 @@ def pre_patch(): scaled_rope_module = LlamaLinearScalingRotaryEmbedding, attention_module = MistralAttention, ) - exec(function, globals()) - MistralAttention.__init__ = eval(init_name) + if init_name is not None: + exec(function, globals()) + MistralAttention.__init__ = eval(init_name) + pass MistralAttention .forward = MistralAttention_fast_forward MistralSdpaAttention .forward = MistralAttention_fast_forward MistralFlashAttention2.forward = MistralAttention_fast_forward diff --git a/unsloth/models/qwen2.py b/unsloth/models/qwen2.py index dcd05af60e..82de1951b8 100644 --- a/unsloth/models/qwen2.py +++ b/unsloth/models/qwen2.py @@ -45,8 +45,10 @@ def pre_patch(): scaled_rope_module = LlamaLinearScalingRotaryEmbedding, attention_module = Qwen2Attention, ) - exec(function, globals()) - Qwen2Attention.__init__ = eval(init_name) + if init_name is not None: + exec(function, globals()) + Qwen2Attention.__init__ = eval(init_name) + pass Qwen2Attention .forward = LlamaAttention_fast_forward Qwen2SdpaAttention .forward = LlamaAttention_fast_forward Qwen2FlashAttention2.forward = LlamaAttention_fast_forward From c2f967e357e21d74e4995faee1559aa10d2f2e31 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Jul 2024 09:49:45 -0700 Subject: [PATCH 0253/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e0df218a93..c814198774 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -360,6 +360,7 @@ def get_statistics(): elif "\nAZURE_" in keynames: statistics = "azure" elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" elif "\nINVOCATION_ID" in keynames: statistics = "lambda" + else: statistics = "other" if statistics is not None: disabled = False From a1e59fa0d35c33aa3ea32827b7d6d3faf3efed9b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Jul 2024 10:22:59 -0700 Subject: [PATCH 0254/1088] Nightly (#744) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Check exec, eval * Update _utils.py * Update _utils.py --- unsloth/models/_utils.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7b31372a7f..cecacc0501 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -341,18 +341,19 @@ def patch_tokenizer(model, tokenizer): # ============================================= -def get_statistics(): +def _get_statistics(statistics = None): # We log some basic stats about which environment is being used. # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. + # You can disable this by commenting the below out try: from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled import psutil n_cpus = psutil.cpu_count(logical = False) keynames = "\n" + "\n".join(os.environ.keys()) - statistics = None - if "\nCOLAB_" in keynames and n_cpus == 1: statistics = "colab" + if statistics is not None: pass + elif "\nCOLAB_" in keynames and n_cpus == 1: statistics = "colab" elif "\nCOLAB_" in keynames: statistics = "colabpro" elif "\nKAGGLE_" in keynames: statistics = "kaggle" elif "\nRUNPOD_" in keynames: statistics = "runpod" @@ -371,7 +372,7 @@ def get_statistics(): from transformers import AutoModelForCausalLM stats_model = AutoModelForCausalLM.from_pretrained( - f"unslothai/statistics-{statistics}", + f"unslothai/{statistics}", force_download = True, ) del stats_model @@ -384,6 +385,29 @@ def get_statistics(): pass +def get_statistics(): + # We log some basic stats about which environment is being used. + # We simply download a README.md file from HF - all data is made public. + # This is simply so we can check if some envs are broken or not. + # You can disable this by commenting the below out + _get_statistics(None) + try: + vram = torch.cuda.get_device_properties(0).total_memory / 1024 / 1024 / 1024 + if vram <= 8 : vram = 8 + elif vram <= 16: vram = 16 + elif vram <= 20: vram = 20 + elif vram <= 24: vram = 24 + elif vram <= 40: vram = 40 + elif vram <= 48: vram = 48 + elif vram <= 80: vram = 80 + else: vram = "80+" + _get_statistics(f"vram-{vram}") + except: + pass + pass +pass + + def _calculate_n_gradient_checkpoints( n_layers : int, method : Optional[Union[str, int]] = "sqrt", From 36488b58677a2283de55aed0461df0237bb88fb8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Jul 2024 15:46:36 -0700 Subject: [PATCH 0255/1088] Update llama.py --- unsloth/models/llama.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 64bc6f84f0..7221a19a2a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1037,9 +1037,9 @@ def _fast_generate(*args, **kwargs): kwargs["cache_implementation"] = "dynamic" # Set pad token - old_pad_token_id = getattr(model.config, "pad_token_id", None) - old_eos_token_id = getattr(model.config, "eos_token_id", None) - model.config.pad_token_id = old_eos_token_id + # old_pad_token_id = getattr(model.config, "pad_token_id", None) + # old_eos_token_id = getattr(model.config, "eos_token_id", None) + # model.config.pad_token_id = old_eos_token_id # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): @@ -1047,7 +1047,7 @@ def _fast_generate(*args, **kwargs): pass # Revert - model.config.pad_token_id = old_pad_token_id + # model.config.pad_token_id = old_pad_token_id # Unset a flag for generation! internal_model = model From ea330bdc730454ee4069698babf65b2dcc0aaff9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 8 Jul 2024 10:01:20 -0700 Subject: [PATCH 0256/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cecacc0501..9eaded31a1 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -400,7 +400,7 @@ def get_statistics(): elif vram <= 40: vram = 40 elif vram <= 48: vram = 48 elif vram <= 80: vram = 80 - else: vram = "80+" + else: vram = 96 _get_statistics(f"vram-{vram}") except: pass From 4ae30315e3bfbfd9d2df14afaa40134d7f34623f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 8 Jul 2024 10:38:49 -0700 Subject: [PATCH 0257/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7221a19a2a..e41a45b266 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -898,7 +898,7 @@ def _CausalLM_fast_forward( ) elif logit_softcapping != 0: logits *= (1.0 / logit_softcapping) - torch.tanh(logits, out = logits) + logits = torch.tanh(logits, out = logits if not logits.requires_grad else None) logits *= logit_softcapping pass From f1dc078c266b914d8fe652c893ed94f3b35919c8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 8 Jul 2024 10:44:19 -0700 Subject: [PATCH 0258/1088] Update llama.py --- unsloth/models/llama.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e41a45b266..de7f7bc49b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -897,9 +897,15 @@ def _CausalLM_fast_forward( logit_softcapping = logit_softcapping, ) elif logit_softcapping != 0: - logits *= (1.0 / logit_softcapping) - logits = torch.tanh(logits, out = logits if not logits.requires_grad else None) - logits *= logit_softcapping + if logits.requires_grad: + logits = (1.0 / logit_softcapping) * logits + logits = torch.tanh(logits) + logits = logit_softcapping * logits + else: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping + pass pass if not return_dict: From 92dce38e8b3c1db209cef860d90b60188e95f0f9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 10 Jul 2024 01:59:06 -0700 Subject: [PATCH 0259/1088] Many bug fixes (#754) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Check exec, eval * Update _utils.py * Update _utils.py * Images * Bug fixes * Update pyproject.toml * Bug fixes * Update _utils.py * Update _utils.py --- images/Assistant.png | Bin 0 -> 82593 bytes images/Terminal_Type.png | Bin 0 -> 69402 bytes images/Where_Terminal.png | Bin 0 -> 179005 bytes pyproject.toml | 4 +++- unsloth/models/_utils.py | 26 +++++++++++++++++--- unsloth/models/gemma.py | 32 ++++++++++++++++++------- unsloth/models/gemma2.py | 48 +++++++++++++++++++++++++++---------- unsloth/models/llama.py | 8 ++++--- unsloth/models/loader.py | 20 ++++++++-------- unsloth/tokenizer_utils.py | 16 ++++++++----- 10 files changed, 110 insertions(+), 44 deletions(-) create mode 100644 images/Assistant.png create mode 100644 images/Terminal_Type.png create mode 100644 images/Where_Terminal.png diff --git a/images/Assistant.png b/images/Assistant.png new file mode 100644 index 0000000000000000000000000000000000000000..120703475091e1ce74a38a05949ae51af0a36f72 GIT binary patch literal 82593 zcmcG$dmz+l`#=6B)mS-JDuqQlIn_#XoN*`{m3Flq9JeK^DXI}UjG5Y^5;`q8&ZsTx zX_4fZp-rU{GesuHDaDL1XvPdP#(b~$pxSNs^ZV=f$Maa?p8LM;>-f4}*L`-|Ru|=O zw7x+QM0wNq&OaiE5)MITwf) z)ykHlqMlAS8ryELoX2odz2TQ`BzRU;&BjW{dPSaD5k09rLdjFg_)~sXj_=UP1-~V?53v)q()GvAj#|yJ zYkbK*d)vCLbg4kQwXdn2Rx9u^aLpe}U9!RXZA;KgOUqR{ZACk%wxLOR3JmpqL3E}y zStl~lgFwL_WVN#VA2zoTMI{;w;w{UA?UT2-zPf^^bQ5M@)&+7&vjrErY9q`v#dzgKAWI$o8{<`@xgCbRq%m@Tvi z$zAMhcKeUx8E}GkY=)>Mmb=qnSz?)Wo6VJeY>8iB~j_r{%|_Lqc>ceCtRn(~}vx@LA{Siv>}az0JUh;cbjaZ+Hl2=4`+ zk3Jjq*o%hldSFe%?TF1f)Jxc~D|2A6+<}sq z=M3NMfaD_VEBz>`hvn^VFj3TMI(j>I{f!h)Q$v))ptH>D=!+flyE#f9a0`TzN9&_>GqdzWPkJ&hv^5>1uR#D_8Z$vY^Co^7gRsSMD|fODD<`P4jGbz$D-A51quJR z2*<2&IrX;1-X($J8MB6TIi16 zpB>E6=LG-Tt$<=>FGUrLB~YDcQ8tn1V3yjCzg1=3IgLbeT9P-GtDk6UE$#8mX>J!c zF=z)ilt<0WW@u5KezoOLbjygf*tmpm!;WAd}d^{9%R7RRvDvz@W6qCPW%1O$yKBl!>gB7<7G-nGGgv9CEfkz;+2ohHZk6rKkk818z%@_lQ${@n(y#d5NJ z$MyI>#B2EJtnr@3SgAU0hRmcro-dMHyJbYR`sADP{QjRcsJs%=5UH9cAo1xTyKKmb zYF!4pV^2)H6sd7pA^W|TymiSV;04eUC~sogZcrRSlcxPz@>?xaENe zt=Y~1TwbQe8ng1S{4;q)QT2*cr&4$9DemkS|03)VmYeTKjlj`!1*6}P9Z*S+H?WQm zN)OlUjCr;51|ENQMHibi!45fl+|rBH%HVBSk(NhU^&rVp+}z%GDYh!~WBQ}@&3>-8*ynfI z-E((-)FLvB<<8d&DVf;r{MIeM`UMV8Hke^J~0=EKGdS z2+=J+(@1`(-`9_4;deP-ckWvOUD8gM{NUU%li`3U%kJdLRde}$X&-cBX8uL+X*sbJ zxBMiVB-E8AS^lYwbJ;u`Up*RMde9K!R@IFoM^1fPEXXTo8g7_(nW)QLf-4wFoipHN zge3m4zh08k`i!S+=aRSH(otw3Wc_;Pc@h0*qZ7l;lmc^pMsdx1OtV+}a_6SZ4*Y@p zm3&dN{%Av1X|lx(&YAk6sC=-86<`nh4^{@SinCVEl{+3*a0#&vbvQPjS{$D}lOfwL zE_XUgtJyJs)7~f_I?+K9kvhkOF(TDqHJlRFOC85rM)r+EL=`hQCXXI@k*X3AQ3*|f zfsdnW%kkGTwQ%90kitZ4^*2*~OoOly+2SFoDY&qq@=BeGo}BgYV>W4!C~Z{6&t_B) zDODe4r^NK81h@Q%fR4Q^E9b{!9%4BdB#WK0yBRUCG8N%paL z@hQ|!z@$=qS&q)FE#`uH<&-#qxB14z3-?CsWR|sdo$sANMse}A_f==|yxONSEnT^_ zvfEqzXgV!9lhr4BRxs{Lc2KvKiU*}6*3eIoGxoN6>faoFU27la_V8YdsJZJ z8BFPaQzEYkV!1fywutbzn}al7M3;n|Dr^yzH+Pb9^KAVTxGe-{Bt4IUACQz9>kIp% zk}j+Rb8MyzZhy#c;ce_s&og`V!Cvngx8GM?A*j#Ygcfs1{W7FxXOFMH$ISrCd{f`d zR4a_=i`vM^>@b&qb5OF~O`u&Ncw`oHq$1|*nFe;dQG9xi+LTi>{1gMnAqMVJWXRMn z*aab`Bazzj`{ejH6sZJxPGqcO_sgMj1F|MAsINBQEy3k03mleN;C(|P|GF-XH=IPW zEk2F0mLKSENq+puLg>@C$SeMWjo3clitO8GE=xUkAeDj%w?o}y`VSkju5YAP_H=qJ zcpEODg&3Mbg1uM&bwgHp*j3w5$~K;_x@VB^>RdC`0d-St2bYXYJBYoBM!XDcuoy`vEAsdeuE zIK9ql5G~W()P}53b~lOG?;mbIeAfN`noydf)h2N)B`67s}pNrtk}?8zws7cn6Dbt);`b)W%5-Tyz*==?iFRsqbTzF|;M z?q|-!B|V}$AVjekBq-giz+f)Im7b^AKS@Ory>CMLObp#z*&wlTXc2}I^Os#m21RCrP&l0StGxlo^$ zw+6UTRr?_+ji(#K+4{c?3KCG@@rLEtv8N6x=cxt_4O#oQ{%m`$!*Dp%+1$tqEIXC_qV#Mr8z(n-N9ZVBm*GXgA7^_t7nH9z(PnHZTYex{3|D3F0`z z5WhrBd0?6C-hy1z-a&?lb<=r}?EkQ%f2np73H)k7fdt>noC~VdPG4PWOS;Z-Pc-K{LbI;DB2$5TKYhYg?|A!fzh9>c`s6!)T6>7(W-`f~|YhN0a zVCQ;hC0QU~kHu!~H!&>amt(BYKdSS1@ZK|IU)IrrZCbj_7y^$G!_JTAtXI0khU)SM zo@Y;NotG-Lru?wg`GI?U(HbA5<1F@aoaq_ODR1{#TBGIpjlXUp-@buZZ%&}PU8!=Y z2z+8h_DurygzW#S#v*#*2&fj>_dxy7F2pVqiKHQsXFZ1rK?f9ku7x2!*Wxo~GR7=C z?y68zRwPhrwjtVG^w?o``sm;r%j&Uu%b1C2RlE6jRa<($8TBR#K3cfA8*2vlU&t6( zPWIgh0*2DC>HPW9iD)k+Dl;*$R09`Jsq5DxbN^I#WF}H<@NhjdINQuma%VT7{%R#8 z=9w0b97FotX?K$L$)O`YXi;;a>FkHa-l?d&VEnG=63{uHnu^<5;ody z*&Ve@9HGG2E0~9i^Hk@1z9q1<)I&ER+6SB&IYLbwo4L7gBYd}k;YPVmFeR%@XP8aX z@eo7pd#18n?!hhv;@Q#if*lyz(im>~;t`e8Pg?BdIeWq9ft;KYo`9l-_aYU0;Zp^~ zyT@-DzwdAzUs589j#8r9fYNFX6a4bjD%QqujTE!vxJKlLg4AE~kcy(Er}9VUWX33% zOgr?nX@`DLx%&=Ww9zVv%(o@UnJGI7CPS;2z z4I5sjoXc|GOOMDMwK^{59lNTrurx8Nc5}i7yaN24T}_(sX5Pv*p#z+;45(n7EVqpCZSgE^Tim!(oDnJBbT>4siX?As;^PcH^2bb}7yI(Qrl5a+* z-4`5bx!1Wbjua_YU}R{WquA(k(jlSX*3vVZ)kCo{+%q!0Og!`{F|mj$EFQHAK(!n} zOcbf5?nwPgIlirnDz#{|9I}LlE_UMN$CZ8YakISuEkh;f-Ry!Dy|AH-zh5qgRW;2AJ9fNdU)Zn{f6l2MuxG#2 z?pswKZoQ-}-1%|$Y>MUmZm!6XJa7e)GJzwB4aL8!vd>d|#wuA40AJ>qeLx5gJi`Uu?!kMi;yN#SG5jv#{J8SmXY* zV-CQV9OxS+Jh^tu;!$>>W=Ep>`{b2lfBs5e{N^JV!2M##ejB%~HQD%H%f|`P_Joy! z;O~(KUo5as^~(iv7roEAC%xFkxb9IB8x z^81dzNrP}1I3*ow$mdnvXq<$CN}gGjwD8`Eqo?ZJJn$o*%$MbWbL zu5Lw$o6&^;AG*D9YN0ouf>!`O`S{y(9k5C9ecEWbhnB;jNsLNM_PaFCnT!;b3$pZP zbtWRZYC0c>755kQ5^V~iI{IGrHH?k3u@tUQ#H^m^uV}Pz>g&l@ZV^3a+Y8J2q9GHo zqh1R(bQelkuL;q8r7?YZBNR!0P-C&4SM|f!v#s|le$TB zLN?WO=Z2h2w1|r05tgGOg`)aOEY0cd$9DF?5u&Lf8ZmxWQef0Jts-YFwx^_NIVR1l zWB<}!F2`>NJFBL*50!@;P@ru@e6*IPI5TbtKRiF1QeZ_^ant9lWS*mdGb5Xfd6D`; zZeVLhODFP3Mm|`k`}jlm-^J24sh~PDpMu{ft>&jp{8%yZ@(WvSD#wWWqyaHLuL-ET zskQLYC;MudF<(2$d~*%DEA%JWaL7Mz{BZQ@T%1|)QYtrH!+{C;TH0I(vjmEb$BZ7l zD%EMzltu^wRz%zXq~++|QN8w*&NmeILnf!PK4lqOl>OC1Pd(qQKoiQG3;w{OS+z7* z0sY#Wa{4M%8022~*t?J#8pWu%d_%>z_{-AY;x2Z>l{32$B0XcuGWwr3tuHsXJPCHx zj)^=l-rn+~g576V>e=JWa5;neqOzNK#a}trn>W8*jH- z@SPG>_o~J^cRiT{DfuNhqdis2A%%;zIGME=8(yN}K%c@c9ZPQBytju(8W#O^+e)8v z{h`cUE+dg%GBrAQ0<^sO8*0u&`tIl6kv>7Lw=+&RI3@R6T4ptze7{ofBa{!>7skBr zSFR=NRK%?K87d3*ZY_uS$;(v7`89v`90#JF)>ZFWe|Umb%|*j-To;=;2rV1bv=kU^ z`kyo?G#3%Ir)SjW`)T0lXDKzWV>|?D8V(ett2MhWMCT%rn;zPR9qK)nS%~pO?ELmm zU0BKQYqxSU;;AWIlIclPxKOV%RQ4365Ah?mfYf3+r%KVfsMHc0j%o?Sf(@vG+}FfV zllCWoj~H)X)S9yv^cNeaCC-ee3K!zs6)!q%dJgAPxuJd+v9`2?suDF$CcC)rCl;>v;Q0#Z;#O_@`4# z4m^BrsK6N6k}zp|_cax%Ga0M@Kr3nsy!8TyL92fe&{DF_a&pxcgqSJ2ru$i6{?X!d zRH4qKSBS#=ig*6&v(qz~!Lfpph3hNNX;WZWDpJ|$vH%Rnx!;WW{J|y=LO!XSGL>H5 z#D>m}imO6Z3AR0mAX;N{tsPj2KL~= zJ$-WgZDo~VNC)$yIRN%4OUZ$&->~I=?JVkebuqC}+c-n3j!;oIKl+3+5 zdQgFJ-%yd7>WUVKX<7;l?~{nA4A74)h#kZnuR5e6aI{?==UxN_jVw1- z-Pg&5)-NoB603P4`u}6eTS#QWs1=4vPuIX!3yKReD|aZMSopz5?cYa$D@na4Z5lF&CezJt}8-UP?)LKPq z2wojmFdxOi%B+TY8TiS&*s(d`3LE@Uu9%KGQv-mj>3cp$=PP}>>q?;f6hW7hwF^UQ zF2YbxxM|?bUs=p#cqpHKD7&1Zy@S#hQR;(`pdG>zu*dBU$u|w<`0L?Y0Ixl|*aZ6O zdEdb&-@Y^&(NH{fSu!+otZCaH-dgq)tyU8LE#Zr7WAc-Wkw_z`ChJ!0MwGoTA!)gQ z9Vt?GUq;eQ=HlF!lLM8Y5O#WT6jY)uaURZ{R*FPc84eTJy*ZSv1$}cJP^0(G`JC8o z%O|~l{Qz1tCXeaa-2}zS(}0lH$*sj};EMKwZYeOpg+^uoV(&H+BCQcQD`(?)ySi+x zFQU70wb1R4{aaAo`FFH?fGz>Mh|o;#`E9UZ$0TlKL92SmSpI*47Fjfq?l!=1AM za{Zn~TRH#BV(1EpwO%xcL^-y6(5r4q$@Cle=eG`TO{i$vz)Rj6j_)8}S>QI^Qm62zp5O@iz_rKA)Qv17UfM2a|r zCp4zzJBJl`;5(uclQti3Fg+Zs4M1%{(di*igtmD+mF+wPf{!@&}a(tb4sQ=G@3Y3C_1&z2-@9nnp=B4;rGOutVzOP#t=?c|Uz1o;J z-}e2FYwjWy-s2H$nTykX4cl$5(vAL(C43v#Gm!r1xsx2Pd}ypGO^-i2(7x{Kjx~HR zp_MOq7Yx_%FNvFnx)wYa(|$yV9nktkVS8iHQ(3f%m=UU@Q=p9vf2_Z6*c|o#NB*hT zkf3+5_d`r%&*0=98E)TKQM4B4tAQ2fEplY4P;4x^-n|?e+uo-ZsRW05L^*b6sN7wF zw#5Y^;bS_n*ol)iOeiZsDt4Um)^Jp!wp<4A?+{mq)h~kP13Mne4)}|(QGsR`rAkdXMbGq<3mq6LCzt@TJUpDCCfX+#Djczt-WRe) z$C41yLO5~eCACObG_0^nm`H{B@>ATnXNKy03k62Du0tungp%nEMt`xv_TInVx1xnJ z7uy|WBm5Iz`$T>pIZ`fkS7gU_Jk+yV>U;o}cWEp^kw!Xx-nUcRD}+U0dvmCR1H8&r zkWK7YpzRo60!#VY*k)-t=&mGlNn4ODTiA{qs~6_X?2Vi2XP}5<4A*%oQ7cpVVk6#R z>&tKAzoDiaP}f(Y%Eiv$G(IWQU8y?rNz1VgtnNM2%?W7!=^wSFJZ3*)XBcB2p0x7B z(ftT9c(9PT+-~QunB9;Bj$18{&or0k3|Rew*v&JiDBW~3;H(_xwVxTAfffBszYfvk z+Pgw>MDGX@uG{_J@u&6W%jEbGCNl!$*5V*V7gj9&XhJjfO0imC=H7Ch5L^I)-G%tv z8Qd1?t$|CL`J5oIS^Pu664<%y?xutTaWy0UYOMfr#Nhd6OmG?^@58nh-YOPC>mZJ= zS^n{)m9oqYl=u@utLNoZ-hUX4KYLOzNs#78AjDNl@${Jw_cR=Ho6Ou2Fp%Ld@l;^E z@;@NQ?|Dce+<6TVsWa=k0)x=iT^^FCW3Gi=FzZ#yonoY7>8{}UpjDIf7X0c@B?XH4 zUB*M~@N0$8^paJLXuXLBscBN; znRH?o`;fIGVrS{C(?$rsYda5zR>*l!3W!e|W*;5+!@4L35uqtu!@DGG%sVz#mV!cB zb?$TcY!b{S4b3jvEADrYX#j>QbVihKOCh@-Z_l0YpgNNQ8T0LK#Ci_I&CcIPd0RE@ zXAkqj2JFbD5qQGF)}7E`irTM@W1hLfOoZ@(a~WUKHziiw7cgE^^2`QyfVlrJROKfN zjfJYz+ODp`eP!<}EVvdwLF%7Z3Mv2FkAUc_;RfBuQ(eX@&7xGOZm8$0%G(QGtc_wL z&xxA8wl9F%Toa`X`cpy6uZ_Pxg(!;F6t@SF3JCOzDr%u!vmX#`9b8;9#OE{sda5?b z_6_8pGnZ|gE`BUlVECY&EbwRu^0$45ow^=8DG%v;1`z9h69_u)r6Wj%E*eQ?>uvkxa{#j#c^4#u{L#fQRB^I113J2E z|AhT`Y`SZ!=@7TTo}{bCe<5o}Oe5?0e|=HGRooo4;;Sz9g8TuBoS(&Vp|?h@uFPUU z9lB>f;$uLyhemLp299-iBht|liRoAc27o4tc2gSgnaUS{KcImtFj5Mpf5M>3)gJRXv7`TG_cIPBp=a{Q*g((jRa!1`F*ZOB|>#cZz) z3|X=&TkpQbRr8&Ta6v~1XAg!v27(g`60BVFu{UjH-<<+s{cbN0>1}l+{~{uiljFx= zsO4~N>3|Zr*DGK10-ZwB-p?z|#|5bqlv`*|CRZvc|LY?|v=VQc_xX5{SSXbMGu)CV z̞CJ^Tlka1LP@W2Bq{C`~V}UmGfygQr1{?rhrrR1IGOJKn^O;Ei{ej)onk)D4deb(C`G6{I zM9MZo>(Hr}*7N!RI7YKLD>>c^eJ~clSc#1QdaTxT&@}{RK&QM3Ip+4(&nMy}YKym1PLw)d-A~4d+cqDP6MC4Kn?i3?7E6`{%Sy81_8Zo53;u zzUoweOTLfnLQQj*{>=3wtn#Y93G`9K`?}q1M)pPpyAmnvrVw2xgoK4|UQGASnC^xg z`;pwmS<>-zJsrSRWFDsqivQbzRB$xY&N$7AP=;75T{K%6^%&m_+|-sdw_J5I3ipQ! z_gvYB7|8L5CLpe~%JIubt(q{@8Xf51hn7QE`3$x6Rv2QZi5ho=JZD6{+cV_qbYm5C zdI@wG<=^Mtk%;c#Z$1;;+aZ&3?%v>2?S9yiG`(U$>MfZD>;q~92t}u@6mtBYjcARx z+S1$pM`-PtnrotobK6>IM3$@b;~E~Kq=HV!Z1&_~A_u@@7e?JZ55k+&wzQt-d*nIB zw)V^0g3cW*AlxD5|1fC`sf+cJ@^rF!i_W>;f|6_Ac**81PE za}9_}dMYSeH+jq(x_T6)i}XA@;fM#Jxw#2@P=SoUn<8tN(+7SsC#!e}7T>e?&@e7N zL&bi4hmB@O7{xt-Pgh`wOW&I?JZjht;D=S)SvecmLw0Tj{nDCUx0m!d*I5YDeo+cJ zF)eF+n)yBGWH63@A4UW-iw$7PHXyL)!vfKP_u*sxeM}0HZ61V6U zU4po7`b&dAM>S_~CT(|_x0aDRv5tR55+?xPYmm_(@T@C-^irN9S*%2@*`{C@>f_v? zy99GRNegGz5@a0B1>ZkNxgEX*ZOTKR{ZXA;ftf{jm08SRcUinnp@Gn`C%4aLTI045 zdJX{pyEW}jRNx(a!KTUEsk`=mO^c46SJc)LdfL?WEX0`=E{#zku)1t->VE47;H@KM zo$?(2^$Q&6dgOfk(>6+y2K*94*$BVxpaoOj)a$#TFl}7Wi6hJIg@H88TZ)<2Cb6_9 zUGF!C9r6pSztAs2J-yr;tpO8RE1gb^)w9s&G?tYI0_2hi`qXAXwokmF&R5#n)ng3y zN-u)FL}92l_)}=8%S?k%K1u9;eaVV2idf$Z-qK>8@htzIrtJQAduXnTJ2(KYd}~S7 zZ4ESGF(@*72Sy)+zx_7Dc3i@gJ5Lhih|5^DSiF*MwAQ2&+UrDY#J*?JF+AA;uj zryh4nKtumJa>IizkCtQLxQ?#6$JOGa?inrWQI`ez3feTc=weq{>G63Rv-U!G^e{SQ zs)(j8>(IK1SM^*uyJ*zP8@fB-G12;?Q=ZdsNv+}-n1cGJY|#W-0G#<>f5^|$n-3Xy z7kX-;TBLDw)hVkrq4k*bFti4$^n;9lfo+KI^XM%-&M%6uv#+OWIQgxL)cMXb7CHiS zS&INn)%AI*&n(97ZPBal<%6T#x|gXv>EkAQ;?Rs#W?+GhH7r#DUsSY@W*IIH^84El6qKDF9qol}`VlBIIKy9MXbZOPp0 ze{Abc{~|GL>(75~%dY{iBT2Mji3i8?Y(gZg%E1QGC~0CmQ+MpCbhshOo`;pY%^>mp zh`_#=uwvdfq))VxBH6{PiN`i(#NN7>tT^=ay8dPD%^_wWPkq(n2^p`^_fvy1<@iPB@6{yqdJcDHSOp;3 zcltb^W%X_k>v-|i6(5t>xKBuINW5x^?&KPH={>(P-V?kpWnx0M>j>07F8zlw`Z8Ig z^cYD)()fGb-`{E@DI>a)oqiR(|AkC6_71XO4@ib(P#8o|;}u{%Oh)$~8O!C;WYWVR4`XR{uHP2H!W-;`X z;@$T$^HxU1f+u8l@fYBysW7NUf0`MxiUz6RnHM^2F8f58Y$X*2PO)zO=3!EOX2XK# zAyQcE6+RDO`d)7I`}1Et2pYRC`1m=66>JV0D7KP3N}FQ5fIE5#DUy?_T`0SpPBfM6 z_3&y!pb!+{mv<1{4DZlt1ERds|J$Ens7{(oD3=G!H z%vz7V;*q_~E6)VglOg9{9piksk@cP6_6okbi^de{O}K51!ShW?_P{l zwK~Oy^!{yJ^*W!~GHEug3Ch1r@|sKs@^ zuwnmE{|Hs+{;@Nl8Q3-gO2p-|?f3eQJ==LRs)^r1j}c6CG|&x_Y%g);kUZk}JKbt2 zYb%@GRxN33wA)o{HvYh@UHU$xAv}g1ezR}Sp+OQ#L|;{D6%ze!apOo7uMsP06jop7 z&BdD436M8gV02Yv<1n2|(6RgbQD0`w*cU>Ybzm+oBn8r(WZBoWJSeNLjz;V*nT-h1 zuzWh8kO!$04W=!=41S3VfuvZ_xVUrZ#^P!f-_Zxyi~0WWU94HgxL9<#x1t#`+~y5W zO#t+@=cUryiD;o0^|25*&Ri17hk!`js`+l=*k1=n69>Pq?sQz>3_$ET(=+yf+izH~ zsg_=Mr(nimI-nAMlTv40|A*8IfU`Ho-1W^q*sOHAN;{Fck{t1#5v zXonZ%qD0j*AU80PZ^@mRlcHSyg(ywN$62CV;W51-(h&!`h(N6A0%fKfXD$fGJP{WPlGvfl7gI_oaoD!hHhoDQU3W!IiJgtg0lhHY`8G?iv zbPXTT#ehEiV-XYCA|YR-a(`O{YD8^ARD_oQB|_Sy2={&|LcnuM0H3mpmFM6wX>Mnz z%;FrRwT-e!pWL|s`fPw?-9L(=SOE%*F)c)a5qIHHgVUY9QW+=RLYCxFXUeU;zfO@_ z5QjwS$nh(n_JcwW^)qHPVQ!r6LeKRPLfZ?B zvzjPe6x*}7dU$aoF!s38|Lvr$@@X66=U|DX?q99Kx8(NwN+L#X`DOanZh@Xo4wv+c zYEODqZ~Dt(7oNMc7fX|Bv$8@adP24qyge!_?4ex(Fp+jqid3^<0Xr8uJ!i7rg}XGqr^IK) zU3tu4|I#xwR3kjuvl5T;kwCDOQNR76zIE!KJ-E!7_3Q_152$R)g{y|IEqJAvGn4VI z0jN!cvudK&860kmp}8ecbZ*@vHk@Xz$@zg-zR)Ug6UJ_@G~4PZMv`7VmM*NWi}vNE zF@I;CHjcK>Bi&)CNbqI@DdE}-+^G*Wkcgrv_;>Vkj#fZe#;{8VCfP4d=<44<{Mu!Y2@ z0QrwbTe~@NOhbMBC;BOpqF%e4XY!mvj#WA>jsc^AE?m-b%bfW1qM@9(4`a9ei)+Ey8-`Y_Es(%$JyowbAw#%THxR(lMzi3Qq}6 zDBU^-2yx_w%vd`6K~H%fzM>g{I?sN=I-ygyl{){*{qpoiqFC?fg0+;#Po0hcE8I@g zp3*i5T((kffo(9#VyDE`<@|M2+ULK9rvu%b0@VR2s{g{BOO~JP^hbzVSk;*gl4C%a z9KF}qi#AF+)rb!f(c0*k{=(Xq$1?pUy8WuEKXzV;WdK^?{*Ik(=@dtij!~+s53tnv z+mOuq{haos>-P#WAJcQP3Br*Za{T?H<8~~bU2jFhY?wz78x3x$SA!*WqUs2mn}q&s zPxzH_TR}3ru^1ggSqq6ec(WOk-qY<%@44tx=Y0`1@pH4C4el`IDY#SqQE1KyHL(B+ z@}ZsB)T9=;FS*dG3VglHPgP1kY|9%DX-3DVH)kL{44 zkSH0%gH(~PrraE01qEEZMjfvVwRRgxL#-;26_M_U>M>n0yk?q z>H^SF3pdL4kf=rWwo%0ut3@_d?`&QclrmJQ?1yxs4~8bRbO^s=X-|oz6FWq^8mq&M z5BtLBzhe{L@^WJ^J_aymQ11`_Z5qnGsg#v63uZ&u9~$?;p|K9*xDV07o^ zMh3*2YVfk{*rT~vmv62^vjdpE{Nh=RV&k(^_Zgg(Q5jXK5w02DS`N&^htxv>t^v*% zfM*lWJ9vMn^OI-5eR!oQHA#gu(b|`c_qNB%@s99fQ0}%y9%z#ykD6g)E1Z41gO}p?rBY$&y=ce4A z1L6bvl?X6x)OkwH73eUbU>n+@QKhmV9lF6}uG0j(WhSHe6~Kxn=xEe;k~0xtLPrlh zO;0UV!_|5_DG;GyAhinInqh0XuhR)yj}$~ySuD@Fbf$a`<1-v>qCg5|8n8;wHgCvG|bnC!@&)3E9g%=&Z2vJ|$)mFrZ z;RNtgh5)llY$hPo<^t2a8|5)IWB;!#vY*PS+ z6Ur9_)Qsn{=rEhIB3DobRAMAF9&GfQ$`|txMOPI1!n|~q~K?DOJv zdPKi}&dZGKBxGIY79}T*KUB@3ulc>3C+Saek1C1FY7?0EWkglE+3jS-(QV)H%*M|< zy&b+la5$O^7Is8!LEI~%DyUg{NIwMz`w{D>d$}XaIMCXi$?!6Sd_NQj zo4_AHGauQs`E>oWZ?BSEXbE%wx87RwO6Odpci_K|Ul)i79=$zEgO&jFBlH_8t~XTE_a&D=4AqZvX{F}(P3JB(Dn z94#*jWo5&L!Nk>Y1wf~Y)#m^Px9P6z00qtHP}oKYJCNSL%U?qd26p(UO#mbex=>p@ zWbJeh8Yo?FRu~9_UgPb+(}kx7_T$0b0*%8}p(AfMcnVN8aLDl#f1nrp!{p?Yw#lA4 zm>xOuzD$5F;_c(XYddypeVY3jsJ}St2+VK= zqAzDG=KG$1gtk|J<Yt_UR}F9@diWE{*;9a{?Yb)n;Jfb!UQz+=mYYX|Pp5~z##+hKgH zYC3#IseoS@fw=(oM!G-YAt(~x7Miys4_Vs&P?p0Q%VVmJ2FqeOJfxvW6~f%^LWl-? zzhqjWMY8whY)IHvb6uV}!PtS1kY1awM9_yB1m0_3cD^#o=m+kNMEuuBurJqfTcM6+ zR!BGeC&t!r!X$y3(ob9Jn#HSRbImo^kd8kvH$988dFiFvbj7+#U>KGs;OUd~Kdy!0t(cHF~&pV(-hhZM8sL+rYE{HvA^q&8u z?)N>r#?b_qDAO2_3IklFDgG-}xuU=bFK}~Yz3jVl%;~XwPj1wyLZah-1v}8fRF|m>OqQ1P!o1+$t89!<6Dl9VIG78b?|~&))-)YLQ$hjFDRl zG;lzn`rFP;fy@^nXbBqBd3YXzNOS*hNJSOg*#6NW!= zG7AT@m+sKSGS$@wU#gXv&r^T_2sXDAI#WEcc03gt0>hGrbT=3&gdt9K2Q&2LlnDNTB=u|Ot0V(`EX2S$K544E|-Xju% z9OT7Zru|8sZ?4q5#6A1v+T&>noRDwu}kXoowPvM(s9(Ilcy_K;eR?3*u_JBgEfO+ZM(H z76LTlJ`D%6B7heXV9@1*2}2@2KS0~4;N$NOAb=NoUwiZI$SQajw0ns5l2M9oq6RL! z?-!V}RbZ%MsBwzaHaxVV!Oh-5OTjq zeEiTEg3BrxkhI} zw8B5of!o@E*lkAaf}srBh{CUJchRxu`DiX`gLh^~Mj@ewD_`;- zSMCJ)jlOk)z^U9Q5%ojwnj0A`y9p;>{^mpdO9{^BTj zH#>;7vR_HE0d4UxI?3%~gR=nnu7hk1BmlqfD$ltJd`=*ud1&EkWjwJNo6VuCCwr1? zs@ZXP3sAICj>j$qZBuQ>qz#O>XHf4`4z6r;C`r1l!ks-vpX=C|j1r?l%T1sxfLSIW zXeATYj5M7o-wrYB@&?9@sCw|_u}|FQY-lp!5S6$iLq51L z+_YQdj~0R>Xcb4 zB+L056*|6)QDFj8MN14NbVonfJ4z}TDhFay1+d5d_Fw4`hu6SA!^K&_s|a8Q#gL`m zKx@E5Ug%^z2AxHM%pA(^GcXqhWYf5`kdxd8FpkSWQY^l@I>8cL={Aj3?g>$rJ%P2 zXL<*A0j}Vd64f8d_7*P+{&XE!qz|HPZgH}qQ`9agf znoV?>bykZSGO^+y>A{LXpn#R2DbFV0OrTk5=yRYy^YiFFL+C6P)aQ*uR)I1OP`joN1Nv)lvXfX!5rXW4epuj+%1qKpahSkdjarTTPc>} zX>+kTwXbc0iU9Jmn4xHE=pdLLx3}~- zDf{B3dJX&WVp2Ql?s2uCYA`gI1cxba=G`Ea(y@aue!1?L&gkc=zSefe;7C z>dMQHo*^_ddaVbTxa_s3vaH=$qsHV}e|#~I zENeczNkb)|NVRd~M0n1m5uDzw?rw<{_#7R$3dNqb-ddPVL!TL`f@u{mqL_C97&Uos zBA%BotXLaqe+^!qNZ>>~8DAaYITXF!;}?$S3jzg+tboEdZz%OUVB8dbJEjGfAzb*K z->-#gBJWe1qnz{z&lzpm1zX0ok3DO`P=p`6F&+8<_8Nk6+Yz)IPBY*Yj2!slA9BXG z4&ciD{5jAx{WL2HM0DVHb-(5SU4tvkLK*h7tSdCbUE4km^`eSDQeH`7Ms2 zzpn>a!ET5h-B6xGqe&L62M#XUqkV4qmCsXPq*z*>2?i$##8YqxNf~G3@zr|eJHDqi zz1%hizBISo;Vvf43N|{|!5uPLn1=TT%m-bsC;cDAdl|e;dMH(6aj6Pbyv)uIY89gs zhatt?L5=1k6(O#3Ovr&Ma(odK(*Hg?08a%loguMdJHB(<$ave&t3Mx7+c{AQk1u}E zVe9u=Tzd~=4mNDZed=pHAlrL_Ldl2VdBDk`Yvi`sSJ_ekfTU^;tk7P>`n)~s`Dhr* zQiphNGV`%MI#IP5-jb~GqUMx4BidV0P#ZlN(qH$Pu`Od?+_z8%6lz-lY{hzxJ?n%# z$L`@H=0eD8Arq^I3UM7A%3C(H41X^8az+UyNuerS3Z8OYJP+P}S#kog7M^vF?wTZ- zq7T!C4iwMDl^UG1|IZgl%9a6S4jB}j_|70<#T*qT@GlKqAW>|)gH9g7@K^`DYiB!k zf-hO?Q&!XuY+L3+NZ9JC>4huSFIhSDZHN{?wlZNH+6x69P?EnUzE3T&1eS1#r^a=4 z-G)2ikFA1Pt>DTiz}by~B%b`B%kwXx?;Id|!VLb(VSNZ;wCg^unB{+p&2@w(sJk zLpp}yhsT6jdm=K7W8nAz7I!40qk^CK6#vZ51e&-w0tEJFSx!;lP*VCWG&RGMXEEFa z>HO%^7YBAB+OhKQR)*L&G&jLe`yY$k-HFXB)d>qsQoN%M953ko>*FqTObBW+n5FH= z!bD#E?+=4VEzEu}C+F{f+LoU4u+Hr_s9U_@l_RBi!2S0O0HJ$Fkv{@o4poQo_FjnjsXED0Kxs2Z!>eCZ9NNmQnP`_&YEF1*A$E z)T|qKEdfe8-p(&)V`Y?<>LA1!NMCL@^MY5Bs{{~ITwxdqB}Omml+XOkyZ)yOdWV?1TCjCAX7YuX>Bc>AYn< zH~!+yBV60}g12@z?7IPasV)jIwfP^)f-rVN&eZ1kUK!>Gh*q+i4D}3u8!wrpHIlbN zR(-cZb%>0J?f{y&pnCLCuju-JN(6C~P0+%dLuPIW@$!G}Kuv*h_@F$e4&@(AlVxS> zmoLFNhar=1gX zQuN53v(*1}=wy`N;O3rC4(8;fy$z-mJ$YSY`+baB0e@_Z_~dt z;dzY({8_AEM4(S5e9R8fo__m~5c2jY|JR2;BRTkh;2VPvw^?c-@8#ChXJ;E5vU?v^ z?02s70C+|!kMyK4L9X6wEM!gg_3|a2+&^5q{wF5mvpMJe1D$sN(YS=8+`ZEfbAxNO zEnj@muvUImR?-{n)f4Pvz5v6P52#VPMBW<1VT?^+OEcNh0be%j2!YsN(SHAcqInmq zB#z|LVkTHxgJMj?b(&Ff4LxsPKQ2&32l*^Bbz@GxrlT?e8CJi4gyVPk%9CvJP7=16 z#h&MUmmBu=>vGMOICS^N*7wHdfKMw3gbwvh>88SV<0t$qoI`75J-xrC1#HWnU7?x~ zF@wZNUj6sIR`a*^>4;%bcKQUGrm?#IsA$Fwg8ctA3Iw*g#0gF!z#su&49-I#z&3*N zAEVX8SwfTPI^+y_@qsU1v{T|0$fX5rNl2`lG{eomnGyBczd+*e%Mmo^w>6`n+MQD% z&B2d)hJLI1x)_y(ybi{EDO1H|hPgzG;>L(DOxqCNH9>F#BsaugQ$FV-#7z`j%_BA2 z=7Il>(metz7j*uEkAN7;+4Kmo%AN?*fi*R@A}W$ap9O7i&!*UO;F9|O(0OXE--&1m zm~@7I7sNZ2^$A2aXw(O6?AeoHE%xGT^ z6MfBo3b;$yr%!)+&-h}%b~K$*@Ok~E@W;AOU>N$QD)Cx2wZgbSj{?4iM#sLCla7=) zf${sSRILD1Ixeb5)o%8Tl$qnWU~=3E@CppT0r?LC{3E_A<2!~>9vdF)QhgIpG6j6XQZJK>YsDx0@C zS_y$G@&?zS;u!O|hMIR~QMvJQPT!p=`FnlqbypTMnh(7IeVWcP7#)>99t96b$PT(U zH{SO!siAQoxwM84MdL_`r zp(!W)?)r5JiKDc9e?yrdNJ}bkD6mM{j;h`EV>G7RU@|=fV@XMEJ&yX&#+pmwfQV(^ z0A8~w!X*=I6v~pkbR2~=QsOr7E2981K+u{}$h|`pyOzEmhLIsQ{f1VnW)-o_3Z;Kf zfB`iz9_yAJ%0GbQ(*R349}=`1n#}T zmT(hURa0~VBP4a12f!&cLO&?IMEO3TG58TB9R6vUNp7=}T;AF^0LP~H*z zXsgOuW;e4Dkzjg{-`aZt&=KwHyfM|%~NGx;Ld!2qNM_yK5gIEBG&CXT?rLS0-pvK#8+oFW&8o|>jDFVFVu zKhVdO$`^QNd+sPOxq(aZ??3u95=BF|V$`2m|BzdvHm%Q{357urAl(~GDi zK?aOuge!p`w}=o`3P5DeYO6Y%b$g$>MUhr5-Ay+reBSZ+DA$4wP&@Ga`tG(4(GqYL z_*HHc1GxJA#5|4fxd7oNIZxB5<8N>X?@f2O$B0YSB!W|J%;0?oJY1jKJkTbg++whY%o+9GS;8jD0Tc>KoX3qTX8Q5keb35s`B zaQ^_)C&yX$`!jjVXd<0dV?B`>$A*~*GfpdkZdj4Z(TDD*SCkOpngzSH&gZf$iIquw9)}NdoDe8_EOS`VR?`n+44J`rIt}dG=E& zCAUd}HXOJA^e&guCR}x3g5XB5stjCY05zSqx5%o1G(q!PGI;2<$adM$3RGNXLJke!k`hyFL;?lnw z3{Yb~Vik>yeO=mupm8or1n78XB?U)QR}){P0CR$9l61^(Mbi%=;`718!KpW|Sx~y7 z^qjYvXrmHvx)$0F_Nrc7q~2|LQJ+G7L|`JZm}2GT4xCe6CbqXBtEY|?K_BP|Q%!D;24{`6=igQm)??_hOHGo z{frWP<~$ETHcg)G+X(Ds6$&Z~4q4zDiwWS#3rdTjoXnkR#C8A3HFm$b2DJn{9yrB^ zq`7E4ah-Vx934$0kr+trZA6}`4l<-m-YnT+2Rx+Hlxcfg?{TFE41xf}v)(xO!v4`VvM0CPkEB(0gn7HCO|+CH`}W{TN)rMN2%!L94a< zvAb`+6!Cd!bzS$3=~{u}G9OuFp`{2XDkg$^)pmh;-87apcu>GpUH1A4Pw3c#kTGKEqn8>N3leUh?>d+|>I24N{d-*>kb}w-)T1?NfS;BiS~W0)4_~O4x?w5WEL7sEHK+c+}tj zS+U?(+V;%wvSM-l>rMyXP6o)=H;&cUw9L!zp;LL_jik2-UaY?G9iB9d9|!r_2`SG7 zdmqtELph9kJ6cJdrcTf}uYsT1(K2pGBa~_AVXslfAj9c(HF>K<9&H6#Ic+ zS*TDp(cbJwL2WzBw@((!Bq;sB8~-pAsr!9R(`VKhEBNe7JB*q#a$BK@Xdp`ZRz`-9 zz9~$rZ@wrtC!kq$_TXJn)_BvpV|iVVbKX9>9v=PA>G2WvwAdOun#*chi7QF`yCVvG zHx836i>hPNUuPOxJ!`4bJv$+Y-u1|#DZ{Dk4YGF>_OtC0<8XpM_Wg#4q;N7)*vx#%()ZKKIGe(7oT7opHt3_pXl{hX=?R9yuc$$ z;OV>TTvsND%m+B!3Xvr#M4Y+yO=9KkwZ#5t9=N2t)%h-fB^|+nmc>FdnYf~9ztcYe z<_MUrT|{UOa4XX+{_+t*qqdR$8HhARJq;0*K-l1*o!wH}GBb!GSMV|o6Q5$#m{_fN zy7n?#?KNq6*M|`3ZhjBCmzSdO1-f)CH5;}8(?g-lIz7VL=Jt~e+g}4e33O7q^LCg} zFMJy9H%81F9e)~BpJ%U8QhH;>o}Q&hs~D4q7Kecv+W!|}-ctq=K}$%q8szriApPJp z>?^>5Bwypk+e74(NRkhldHCB2g--DiGb~4`E%_O`jZ{yCo2?T$J@;f|U(^LuX9hZD zj|;lN>Pu$#4YU_aX0u!)Q$`HZ9Z%Y7`Mqe9Or|z|$yp5PL`%11QtBjUy#zkNK2?P! z&PU;MQ_3vO;5|w6r1S%MCT^1bZc(YDXgAs`I0Istd}V=28uRZz%DZS$GlLpq+ z-IV{K%W3ZiW@>*4SKiO9m+!IwnQ6MGOZq6fG@b4*Uv|@^tZHlX52JyoG(#8=xbofu zFP7+hyIIG*iN19m;l4F~uoPXkl7POZ;aH*$PAE`s~O(;|Iry2*vyB z1U1eeNZK9br()uCA6Nc`gUz1vlQ#qhuYOrA_!g~2x?&}{0(0N`v9T`xpXcNcn+5Cu zulC6ePmzLKuH>>|P@@G6J#fI@h38!_m(>@@-Alg|Wgtar%o^{%mQXkqZZvy?B3E1g zhg-*q4Zpd0II&1}xve0LO5VO9dOpkUf0Z`ysup_BkzT!#;%2AMFucgR(k0$>U-rxE zy{Vw)`Ec?h0$%7!HrSu1O9TVX+4gSa7F_AAQ5Jh8>3wzWV01Rupk=~Yq669fc_4b(iu`Xg7b*B1Mv!FRa>Y|F@&T>#4BeCyTT|(=}Q$-CsYbF zAW4^AEV!k}8|M@8QZ-9&r9nut5)VmTg+@Y!lh*Z?`X))&!Y0{Ylm|(rJ5~J{u9bykeUue+lk%!&~FS88>luX7+4sq8s)Uar5anO1v3`>y+TX z#U?2CIrj(rFEk<9ru1U=QYbJ2^$-WhK3Y6%rntR^b-6LE%WQL-vhL?6`)|G0+&BDf zk%@!=htMerwRVD9 zvFgBqy?F*k{tHx3k>48vi3j)9PVmIkTE1=z*AbMLAQUBlT^O3*IB9d+%@*S(kCc;!?)R^ zK?%{w@_%ZcxlilU3(&=LlEx^NQH_*n)o_l$3f((TaYwin4e3%MlC_zn_5$lKL|8D5 zH)?$vQcvbb(i*xh+euFL6QlH;hJgpa8w62G*ovkgj>948y5#}#S>UJWP{DU9WVUDM zD+(R{IXiJAZGH$37Stw#6||D|R68dkeRY|nH|LzV8{Lv0sJ5!8iddb}3VV8BbF<55 znr;!scI230i!*xa>pLSeYi#3=qc=3>e=^GaY!h zLITgI@LFCex9#e?#o^=XTMi)YLi$inObu(Z+Dmjt#;D{D}e9`$P8uG zKF7O)e292MYY*(hgx|9u0hL6&*ioe;31xMs<{CL+s>+tt$3^A7&}%Z{zkpsxxy)Xd z)01(|B{K;%qWUbhq*|m>|5~NVh|&9eO2I)whb@oFUy|3YL6}Dp`q669wWz!WdE3WO zbu{*0qp0M%OOA&V@>&!NJwM_2l$Nc0&A(BjLB2n??{CnxH$C){ReIGHh+6$h7_O&X zz4f2BuXGnB4?N<0ot86O{({qgTFSZK>etF}JMWsFL`p(AY{@gKy@jh2l0k7jS@#|X zz|i?KYojgt$|C9tq^+RkHgbRC&nP28VkmveHk&p&KXUHt7NB`pTYg*#M=oMpCDfSl(Nr^k*Akq;1wIP2|o?rs$cF^8#u_s)e`gOXvq~yK(7V@zDcdvC9 zvim@OmZjCiEQpRvKkz3{t2RJn%=Tuw9KR~`+fn&*0(u3qWdkma_Jne4KIi8)D}hnc zN;BC5u;&LnA&rpVT3v2lDJGq}25OT_%EvS3?jlZ5Y3hSUR4+ghgN_-L&H+Do>@fKD zE0qHc4rrafsN*xmG;JXzo#5a0#9l78ua~!#+V|%!s2(iH^oFuK4C`8X*>dx0?f4r= zjl}5;jG3DwIeT+r+PUmXWYd+om|VL$tA`#w}+hxPA19qb1&z zZBI7#%qZx-#xRvJxG9=wsbmqx2~iXbWLJUCILq+(*@&r@ocl%YKZs&7ydk-TOEhH< z@FnWY+x3Ew0plJmN%n$3f^r^oH&O7ZF3_Hn23psBuDhhCq`tpI`bS$ynJy^C14&+e zNj`hjXGjjeMD~hcVR@H4ziH>6|Z}p9|!aW3(El~qXqVLCEV&I%@^C# zZXzCsAV5?0+tN~7NLez~V%F_DL$Dt>1p@#4IeAH<+p4VB1Ah@f4l@n}u}wVzZLOuG z#-y=h8;qmJF>Q@)60Ocs-~7e3$-ynGp{D^x3ka2XZ9?OeuGM{YG4fuqt!e_prnpw? zBA@fhs`1!aJDlQOPxyjYMSlwJyj;D?td{TMwy*tC_Gj6%{$~+(NpP>20nB1{kl%vc z;lMXaW-Rvr=arP{`nn?fKu!4_MNaM3J-k4dsj{&8p0FmqlW-JO&NBEm6-e>X9D5gQ zWn?;0merB<+zgG(VSkBK+=8s`N5o;Nsck+#1?gvt+T!Eq_{s^=4)1=pV`=5j4uw!f;6$(a$p~0h>y;PC|es?sRUs|*Yo-nBV`Co zy=^$rAO-ER@2f$IEV@nDJPdRJ$&tfCvB{MSoT$TMfPj_Fji?KN=heD|8GcT~Lh?5t z-5Ygr!bRoS&|Dzayy9?W7`0GH(DKl!Zr}XZt)Y*X=6I^uTl8-28=G7Qmm-EjRr**zCr`I}~%L0#s;=C`8nkb2J_XWkil)s~4OYC|g*d4RL9C3I(ts`D{+ z(2^(6wD1|rz3^y2KiD2aQ<<@LYP)SO|V=Yg46p>{NxWs0$Jh@OASWY9&$SCH8*&oo9|f8OYL2Kqc@ z)G5|R#N}i)!gWTS0}|ZXsA#*OkcciWcTZ8IR+GP+;&TS8ykI4yi^*e|Lg0UpWNS!i z=7Pf#S57mH850A<8DJOy9UOyC7j2T>OEYDCA&Nr?^!?LnzUgm)jpsbd1wxxEu`U}b zvc@m7NbhO6#xT6Z?)kf~zS`(dDxR$c0MCX;TNj0yL7d7s^4CmZ4OSaK)IWxTY8R7> zDh37*q;s&=84;YjIQ~hhQ;lr>5q9-j=8ei~K9IHa?NbvYkUGWSZkn)=RN{_-=F*I+ z5*&X$VpFxzn2{o*V@pCvHj1HXS%eQ=unrwuT_sQot{doY^DS7G=eedmK(MUBqiHaG zkP+)2uAe_L`)$Srk@)!KBZv?jYFPZ{W6(R-Tpo2+gP11I(iMN9g=r`USP;1TMD7)} zlA9n6Bpgt3p?UW_ELv86CDU-=^)D_3Ntp$* z)~0lEp~!<@If=c4F8T6R&scYt12VlK3Km4QT}Rd2cB%@^dvv@u+|1gMe=Q`da;9(; zvRxr8rP?*kP@7k(RXv?-^OtNhb62tRzLFWpT@l`kBA8(#kXj{z<-mEOp5wzWMER$v zW#J)L;Ncb=KMPX5FTb!j@4yKS1#fnQ=dRpBztO0klVxRq<~Zn_Bl!V5DrqzU9HNwm zSJhtbs~h}D_vX^w20^Tf^L0LwlEO{J)l;6elU86p5gBXz^1tV*OH8IL4^d7)lW^mD znZkw}_Rx`qR_!1;>y|m0Hw{|jL`ynQDa?|kPn_btmZ5w4HSpRhk6pm_5*#Vk;}?G? zB}=uMiYxLxO0XEZ`_-{NZW`5tc8YZE$rU{7ikabnLIC|3d{M!tc&89gj3jt?CFI6t z3KP_9M>s9?Fwn9ra%0x@Rc6-t!Jf1soNr&%Q0qWegJJjg4Lyblkb7G(E7qkTxM#uh z#Sh|iMeyL8+dS%?>;IiRL=Xd@A#jR!(9TlOJElDQq~EK1X=MZZYuN@F;N26+`J|QO z1JU^Q&I|9W*C0GZMGJv|wIeL4aYgk#O;(jwTgxcDtcExp%gF&5U(H}E0mx}|ae``X zt?w(J+vyl_12s$E#v{Di{=$NuDHVCN$}c^>oM>r#=6qrHxnrS*YVG&UlUG)u^PEq1 zk;e0^q}*S#A=a5nk~l%tNx^TQ^A?&4sC%CoWVm)PzRW557OsBC>d_aXj*xPQ6QLPV zl1lXzE_|@?G675QT==6_thu#{!yREXGOt8}*l6sE(pq{1q>>jKGqphFdFiq!NNbVa z&+1kqQcBK+j^YJ&+>Js*Nd41Wgv!4(sHQxzV4Rz{Go*)gEpXQf`8P2Wwz(~0whfh5 zyC2wT$X+s7ohJ)|qUN!}-F%-^6*qNxro9Kb=}K$bJp`hlm@**myx)e@KrX`9D67xrkZ+6+2}LB-~jP#$YDpbhKmN^>I#)S$jR z5ZzG-(x3`b_g3R`FegW1VsF-AlkKK2usGkt49#4} zeVgI^r8E8QK8c-Puwj6P!f>M%LrElK`ZF+Q+jkd3$~Ve1C6xoc-N82S)x#Q`To< zafN+-wnwg7-#L%CtS+u}re$gOwoQ*a`20BCapKiZUbc?ldGlj>PFwj9V?>FL z5+#6EC-^nbu((dG^D8b7%xTW)xd|$b#uqT*RKAQHXbstv0|qx9&$VCHmnQG4^*AAK z=VBg4L;MVr0X%PDwFZEGxa1BsOF0!_`v=G+Y<6GQH)V;1$g!w578@uEd&1;PUbi;a z{psw`|5J~7yB{~jzsvgNM(@5Z2Q@E=MCFNOF=Tx@%sFjkQMJBfvJa=N9V$$Tl`sJ- z0eEop5@Z<+Y!I3JV$c6u`K*L=9`^zVjkKzcm`ZRxYi0_6grq^>Z{VL2U4MT10qU>t zv*EXBBSn4oSR7=qFuG`J4(>Oc<#77u7`T-1UVDC6m-}n4*kaSSGrW#ixbS`Zc5m9~ z(%DZsc7~a+C8@4|nuGTJgb0lsvFHE>muY)bKK2Ttay_fIv=p443!Ws893-j`6dhU> z=mw>Fg(~>`tZYg&QNlRx2GcBkJN6aBm)brC$6R(CL1GLtPl4UwtHu%HIZKRFs*=hX zk#3a$ag9^63~9+;-G8*{!5mJmh!o*cGxJ*a_{uj)x)OT)=6=J%!R47^KH2|%RSzfm z**>L5GO7LF^}3+JQrYjf8w{*VIE8rvaPz_50RW^t5G4ZsWAFKJXEL~#DBZ?owt_b_tTbZqqiE&RwZ+cnCPq3)k=)g{MPh!!@rThy#@tOctmfq+US3zzEJVaJVx;8F@n+?FNmVr35DC5sScPa6$~jfXtu+ zJhMr|P$CH=39Qs}uaM7T?_($XJ4~P)N-T#Yq~y9G&GacDr9v+q+gYj;RQflZ*t-JH zT_Z^eAPGWE4!VcJKS`Ocu!Q$lOxFKsed5-K4u!IG*_|^U(-(=qZ<+@s__vYhpryG5 z1S)9WNME>j!<@l99(Qwp60NhTXNJFzJfMx03|O46gN8xcGzTK#KS=pZq8=d&pPqL- zf{}OI?|}t?aOB(bj-Sj4GTHv1%L!o;Nt6bJn^-{{C@kJR7}=iJu=n%%pMsm(_G8~_ zHFJJ|zi9AN=cX@|5Hyc9QO@&=z1>8%d0d|;MqNc zj-$7+CnGYYdF)ZHyIaJ(C&uT!b-W2#^wM%4bxYsHdRtPbt>X~sfX4hhlhC*vw9AWV z&F~4=TEjgaFHPudf&$dtraI{Ev3h2Bi_so#4y3)f$@r#LPCnhC`<3lq?hYC@oHn*6 zX92UV@hkLA18@C&#aDjt+K`}4F7q~adF?C><0Y$+A}($+4i@0Ak^M_Pu|n@v4axc%B-cN#?Lz*4EB(swSOO zLL-X~3B4Vl+U~>s2EiEOGf1jy=o9G!h|?OA$Su)$hOT(jZ^PPln+LVQ zbrNiEa^j5SNuhreb+kk<8$?^|Li(Y&#kS05(f1>SNLAviVXvp%JY&+J!W&W7mFin7 z`(gbJ{w3m(HfdQC_diKm{7Wk;BkxQmp!V|?>m9M!C5|zfGP5YHuqB(Z#3(pb)1v*s znYn(e#H#}xo;MgJ3W}TH_y{z>!*x)6nN=oQzj|~++B|0JMf4$=HYR~ld*L2M9yR-} z`+#6~9Ni& zGs)?G6a%dr606oWKRRu1an6;#;fC)&#j-ad?n|##QE&@Y%9u9DQyw7SecHAD_*X;? zDXB%%3vdhlUs~&jz9@m7gs&zbkSlQ}-rp(s92A$?Rv0fQfPv5vFOS8$BR!?*6&#*v zQ52ZY__~W@KI~{2_H~F~xhXw-La#qOq;O2RBY=U}QtjD^j_@lQlRKntje340L8Hx} z8lrDDM=dJP;fV+DqmK!Jl1u+}1BBw=c27e+X2=n)K?~BQhyj;LxzqP^>Db}6R)uye zZvRiB8Z{eW0~Zib_A*kwe_Mikqldj2lnda!qDWErLw_e=FaqOH;zgKuZ`A1RHt+mX zw<*I`EW8kr#6j|@;rYNb{`Z_8x$k<2lkX7L8L~j8NJ=i=DXc!8Y2(4yTt8#RHqLUl z1GVZyGXTT%yPvF~$w(13!Ao#_PTW^%%M#~s0v5@}ns#j3#@{nUtQNNz(lX#&EpQ#d zf(6~JB;?H!{iz%A8(-@|7FTC?@1Dm)g7V>A1P)BJ3?bN*ov*7&5q`f=+4N4Ma$b6S z-bBv&1Fhc=ea!0abo#*EL1kZhC(oO6I`xkxah!g;%5~9C+Opfz<;DYd1aw1}Q5o;J z!rM0}h}|R14+s!8th!rIRRjekUMz2>%+#dfsXQAsU0y!jxoMyKvjOkqLt%p)woPTF#t)_8p_d6IqbbQeK7<#=V3 z<`+2lLqcPSB>NSpiEh?c++nch4N3~I#vD{vNmgT7J4=#NF@EX9GM^jV$Tcc?*14Ld zbrWJ+(~X7<;H~Z$kZ&0y9{?$miax2fN+r?M5*nOxqy#7Gh|d_$(q0%8PBW|9n-}#u zYp7_qZPzcHv+8PwUAdDJH!Cc;0&!!M`w6{s@U=$qVvhys2D7Bq^(;Xw5*PYp;W29g z*_u?(y`+3W+wa9M;%M_BBNcQ3l#hv)cYg%0Oz|B)YsSo=?erm0#M9rEcrU?Fge>LVz2S{M9p{Ej=t|l!n&RGXk@>{u(Ec;(-Q>InsJ^ux zXacT(Y5vT*im6#Ejw}cYZr%$R5oagWi5M^bZ*b`a)30fPqhZS`hxWx7hSR>a9)e9+ zY8yXJ$g~B}l2pSn>(l@bg9S#BO8g9{P=TNmWb|&$e&`i10wuXWVZWK|*)K>cvaOd( zLf9=KK~qDp@>aE{*dLN$qd%C9FuaMAyYK3$~iwMH&e z>5ZOZb_Q~Pl32g|DFsh^oA#&dHVPUbl}#yqLY_G(r=AD5ktOkEO9qmE^mpw%74;k6 zvYwDsr3G8sN=Px-N0YqIt&=E#3}pk;mi`a&A?{aSPhxyhNWiM~MS5}mC!!bnsJqeS zs`5o~sj+Hp!A8~q|L^p!#Hw4>rwN<0ckC~idtu?&XEC>oqgmvC39FRl{XuY!B>{_f@L*O zkk{eY`_L`URn@7h$)4TUmR7qV@6~rGI`yX1_od`;)kLp06qvd=So?B}+6fPqF^n*M z7XT=-vd^RMPOsK3y1bm5?x1@l4nK|#N2SZ|f}j|BdtvAogUqaMK%4&$%aI{U{R3)B zz~yJed6kUM9)spZ=zMrraP_8NY1M_LDx9L{jV25Uq%YS3&offB=)#+q3%5&hxY1-GpyK`&s?jLfUF84T1JYZ7d~I(4io1c# z+Hc5~63R?eILYXf&A+9$r=yj-kP-5rP7#OtE!SZ*xF2TGsC^Iyp2^_`z4{_Ylx~(B zDn;&0YMzO%+YmfN-}?|GuApo3`(*Nzh_ts|Mq$Pgl22UHh6>mO0EaKCiBK-ZzbJ}< z5PH4}=|Joardyv*`mu8ts-QrwyV7&=Z2vLT@C<|fQ}9C!&jt-k53Wy}bb-twDS+j! zAX@iZ4Ijxvf|dUZT=c7X$t}zDzCsDjK8(YyZ9i;ZSC_n!h5{L@XLWH*D|$zIeZA4l zcuQORCZsqeKO1Ug6-ilJm#!viuD}oPeE@Ze+?rNzJRAA110-oq#1-9XY>AGBzzlCj z!N9eygcs2l;4;`vK^ouWntraVcZrQ>@Ak$>i6UGR$D#P*mUVzS{toDgy^*@EM>FH> zgej6Wevw(v#Al<|cR^2P)}gow8zixzVNt<UYwrF)6bA7s z@(w1I5W`5-Tmac*6l_9Je>!zA>%LYXP(^ELB}Rm23bmHn#o`e!fJ|FBlNi+QA&nN# zEHekndg&sFWL;7a>)m-tL988JiT<`$8f2;*Ny?$hE6R1(f}lDkiK&r92<2b7FG4N> zYH!cL_uDafIT0-|>b{H)qNK^$4D=LdTsfLaC&BRG`mYsM4_`qf!9XzcxZ~v*o$oETb%s; zp}UK8n4+$}3}v<~$*FdamMmb0AlHj@)b4_+V92#l?b)iqV=&^RU?F!QKlhD-SOfXu zG|=OaiHQPwXriUJ4}@;KZ`zz(Kq$$Z?T4;G(kts$8O}%}bFV_>EFvz`6o`ZigQafW zfyzQ|^zCaH8jq{*me8ltf! z^Ibbmh)q5Fiajj6+eaVhNk$5X%f`7>qQ99r;vOhZgUe0$r=W1M-l~-idNL1k& zV-gIJVv~dqa(jli@%;${_6tsbbk5KOivl8QNC`rv7K{1WH!xew?C+^Aolsh;b0m}9 zT)l-BYhICc%x^&?U}g45?k-trF-3>#!Z9JblV}9c%$7NVi&!8tasGT|TbneRpF3M- zidDY5aL9i1Wj+One=AHhDh*D=#xKf9+}7cZUSl;`U=1!hqQy)CNE9(WX2_l}$e7c! zKg(J{2xNA5UM@HSx`273h$HW>ZO>?|j!Ayirb)?X6+^Y&Et6j!Xf?&q5Gs#8{=msB zbpKJ5v4ZDxP%3&1E+wAk3(`$xn4Uy9cxr2DvGvW+SWNK$6>UnRXw1i?hvFNZEq3M>3O*1Evl zBX93P6-8ku<7*cNs1wXK;PY@Z=bb*eE1?w>=Yd24CDoU?9|_6t6)E-RG0_ctm+r3B z4~%RpU}F3r0o}I?x7AP+0|Q=uHVf^|^WrW^CR-8`*Uf*qG@mQ@fcwEp19XRRlyI{S zjE?6zX=w3JBtZHVwNAI06!rkj%A!YO4&|A5yo>~{qXkXy*rTMmi)G4W|nYCap3l}_GH%X>qNHesn-cx`w{NsoEn z<268mFs035!s2$)S;oG?G{_(e=v}PqVTPANc@MhxB7aTvK2||9KTuC3tB+&OrNHDI zs~`LIBDef({?_<}w@2 z?Ic>+29Np|L}H6PR1iO|=42vktdH?h@OkV6f6|qoHJ-UP7h=c za#cZcUFK~i^h{lNOdVqo@jpO~n4rtxjs8vVBi<|wk)~sSIv-vvC_p8~#{Oe`rUOz6 z(`AbT5|2HUSX~!|d9Y48QpTTz1vrvt`}gPQUOsVfmuCNamr@a4W;=LV_7Dx4yO%#M2b2m#F6dV2Cq4Eu4|g!+S!!-+=M`&OiUm3 zm<8Vfo!KI71x@qDcp_Z;gFV@6DgZR&(J_c)AfilgC3R7Gkl3gg-={%Acl&;K1mFZu zM(oRn?}EojZu7r1s8zm@I3rlUut`@@1;V}B&4cRp_0NDprI!h7I~&qaZ!4QjfX~cQ z>S-?CjzC3Ip2<)lie=_|-*_fQZJXx1y3Rk8+6CAtngVaUJmc%b+ti4_*`7xM-l9ld zVFJK0=)VB9g||fQUDf_HO>Z$J!(@S?D_Y~cj@Jhmo_%B^*?-|m9f&%o58`lugkC*o*F2w0snl*V;4FQrR`UR6fAVM;YejL$M{@=Nu`tpZVWx2nze~{ zS_|(8I(43m^V6qs#{7IY%bd}&aQj~a5Pi*hZE^;9Z?1yR)T;KH{V0$^JloiaA5h_= z#7*WyDd+APWm9Be)AJfX;c1`kG0T{tAE|XBRG{=wp!o92k}GlhBEHZI4u5P?aDerO zKPFa(*jphtVL#|#+t%MYcm6Au%0ZhFb_Zli6(l&cEEvRkQs+gDJ26-;)H6)WeZYC49cn`2(M5`N)l7= zRQM=`9K@okgVN5yu z!;B3TH9^C9H?U~a0B2;@U3}*4fhF-fLEx4Ezw`sCoS}M4U|f4rCQs&%1y8oC7}x-` zk#_q$$W`}yi*IOoD=+7gc0Y7q-9vRrq=ZV5*$GeuwLV0wh-C@eod6WDIT3k8ok#_M z0oei&yoNeLv#zgZ`bHDEi*!3X25`nENU=8SsG|4CpJ@|G3WZEFechnKz zT}*70YwGrOx!ZdJ{#WU32p;b?9WhihGOS)+0PgJQUy3^x4Km(eGEmvS2oc;_he&L6 zN_H*N!dDJLuqNes!q&Gu7~&&FY1$~#Zq0DeZTKEk^eJ zcrDd7`O2JO$2e^?9-CL*5J6`FrHfPr=z@<>Uunx;up`dY7G%XrA_w|7z;hRx<9Kr# zmh6VANX126A(pEX@UP$}63L0-EA|aY1s-POS&HUvophgPJC>+%22qpJVv=;Pc=tnf zRIW$D$Xcs|bb-kf4h`xC8`1$rvV-=tyw7WMekCR7d}9*jAfA2SL8L0HDzx3VGYUpB zL(#+!59K{mm4OOtTW687YhTh3K`j!+v6>h_cPSQ;zywdRG(lb_;UCYjb`+b=OBOkef>2Ap})GZ^vcF| zCED5IS3z-aQ{rJCxiR_Tt}_c1mbko(a$B{;I!{G3IWAdR#b2lmO$o|W);M9&_X4yqu{L0dv@(Hj6R{q~t{LpG5OElv(h z>D!@2dph@HTMyenxM;mnEcu-J|J&zCt}W)5Hw5&5tqoW!Xt(~?Cm#PHuRpr6FH%ux z1Nplca2v=#{(F@@p06O*#2fDY@?(2+bD9#*-uXnlyvj|=cxHyG?#IkPP#zpM5@3`< z=#jeJzl;Hzj`Zad`uGT%?c2iAzh<`^r6*1%KH@<|E4b2!KZ18!I_6`ua|OaiAvQeP zt!Zx<;X&_PE$VT}LzKR)#(n5*U|CrWV(bA76L(k-?BdTD!+I_~uQf88k=b4>j2kDNY`ddhFB^{tznOpdi%_Rc>cD>aImH05`bw*Lgp^wV=r$KnbXz4u9KmAtKK z?>ckYdK7Q{^%_U9^Q$3eJvNz(ElfkpkX4Ir+O}6aqZ7H7S?^pG@=DF*mGUUG$KX@y zj1bOU*QV3A;LL;JZxn|p>ha=t^Q{Sx+?^M3iQIbu!=I8*uIB%?W=)=QByau6+@r$u zh4$}!AYyzT+91J!tBx3Q)!9MeRxeBv)AyCd;oqAE@B9=pN83pL50;H&foQaK7q)5w zr>gj3UfDEn8;>=|OE){dcL(bA-mUh^HoTr${;VoDT~q0)9rUQ-QFjTPA6qH~d+? zB+mj0H!1hhU=kTo3OSJND7@PyG4GchUbCwFmy}6f zz4wea5|QgcK(U+>3!|R2`;V=$U*c<+ys4qxnrp;)%=bnCn&}t?t6^Vh#o|k>y}HBA zplY$L&-VsZ-XFRfd1O~S@|#tAi}mZ}yCZpJaaVWL&nVkuJ25cL`Fe8l@C{Kt5ta|} z1W}Bo-b?!3wY5_$ZT-Kt@z7c&j>Wose`1MFealY}w|%pXS~kELj`-NeT|>wki~c|V zIN2fp=O4EpG(4^lf}t>CG}`6@gc!_OrlPMB+s}k{i_g4^Cy0;0q_z~Qb=2cu7K9nf zLTu?h_U#tT#wW>l>7!`0n@I>6Y!eqp%<5a4wZqV#CoB64v4vomqH=$?c z>HipKz8=bTHvpue#<_Xwa}ToXuGl;#=LpQ z&6*JD254*DwF6Tu=d8bZKD7Ji71AUd5B%#G8iyax94l=ZvJ`n0a;DBM1LA`ad<|^X zY)OV!K$pz;yGB-LGHX%HsGyXFNkNz0vuT;nGuWa6vjNRG`O5;$0TbGS z_1D%;x13W}_tkad39WndnVtq!4l_;Dhb+RumzVh92Kt;}ixH%tBMz{p#bR^sNFf_?ZDL@Qjr8=6 z?o&JD8i^i)e~l#-twQ%U?pFY+69sm`Q<;zHlhNQr@{-|JFeVYTv6eC&y!{rMY?NES zOv0Jy*L{I|MCw_xG+^LK1bacuLs^}6z(5^@OiWakqegg6=Tv?d&9no~NaZ)zJW&+; zoH^i_DENfe(@y2VtU;EF`W=P z?J3@)W{I*b>y8spN_G#b(6!H|qiy4LFO{-%{seRSl-7A_u}_9rym%`7byDKVy9x&Q zMe-{mul}%tj;GukuiXY7viTb1LT3b|2b$OP4L;rSn&?xST@ze~w{GHGEyQ9%rxo5JG@I;^$b8l+|EX&u7n^=7{=tj}47 zM`=f24%EuMl;cke6Ur9PEUUvYjQ2FCIvv{Gvg+MwL){oF5w!5C zlmeA(TUzW39Q=JtVPRX!^z&pUJ0YxoUeP>4>@4TXjUqSEtSn%zelQ10U}U-UkFxH(mHr0@UhS(3WRXT zR1OZCDqa;kq}2jDgy;LU&wZQ-4vC9XkC#69*!7X{{AoO1;V8m}BpkGl&pY``y~{rj zTI)FvfsZ#D*+3_FwUUaZonG^J1)m9p`S40!CYs~l61CW0L%UxjnrH3c#=RJpm`O(Y z`LNlMI@3dIJI6Ddl}#U53m4n+JVwXMU)a|(1F~lHPN!cjsc)wbR2bjop6 zkm2P#FoW**(z^!9SzC7JucHdqv_YmS`A(omSEDw&_qNyTpH%yOmM%LeVz@DH3Jz;4 zdqnl^dVTr!s13}$zrai4N@%j(+~qfos?Fb~QP7p|C0d){crj+-9B^J49} zANR@n&Tqv5526=}V}!k@CRwvz=E+j-2u*OYdBIxUhAI~da-Vu%o@Qkaii9@n)qtR< zd_zN?w(Iyg?42{2`8H!^_aHz+7`0VTAXvq2FxcnW+Mq3e^4iZR72naf7V_B$+PPEv z`@QK?+Uycv`?Uyld?s1iui*aT)_<|fN?0%Je~G*6ZxLd7vSnU!x@x}DqH*VvVUd}I zBu;hwd{fA$@u%nZwa)8Xjq}jyL|oRKcPBB1L%5SWxC4rHe7H1bcor&Zthi6l+ig$F z0yIO8@vfzpK7WCJ=Su_Pm=C=ttrD;cxOj}CT>5^!-K_c5{x6i7YoD4O9B=C3Pk((| za;Ry2zuCChkW4F(+=+pH?7YZH_fro2pxzd(ErLJGX?^p(%*~{Z_sk~S@ln`*CJM1I z3v7%^Uz)XF$$T8gFz;pWZD;l6nd1h(ZpK=6T^XrUAf>n!GlK^5HfUUB_n+&Ad(`;?}Ch(&hg4&xPph(?Da+pt~^_`*gOu%K7cw6t(3M4T4Ko{Q3_F z>(s=+q)ei_oxqzqFi^a`;yHP_V7cg9v{}pWsySNP z-X3Up(1Jk=)IXTNRXHJIV774iB%6r4f|@Tlib6AtdJKR8c3T>atngF5y18xQcqRmp zinvD4_D!6& zC>^ros%_7qGTl>jhwkQ}MR$YTcISBXQjPbverh%@KJ6$sYq1T_k4Rn!23iH}R>JSq z#Cz-FXKk&iAFdm|M=*Yd%TLQ~~3Jg>{&)1!q;;dv+Um z3iuMqvHFMdCt1#M${oo>foTF=)zOw$Ha#IEb2%4n*R`GtPQCV9pqk(y4oIS9|GIbu znXu1UT4s+$b=C37k8F7Qij7!y<1P93xLIaLJOR;L`4C3Wm09^s3aDp2ekTh$vc981 z{Z;E&d^_p!^-X^a&m z4S=GVEUlmGKauq~D~P4RG*d`<#1<6;c=Vz!Zd!-;wOK))osEs3cx2#+R96t6?#a3> zc=7`+_R&fnWIC2>EiYhyj~+v_1GJ`F0R>>8DAdi9^+L~mYbY&yMZej`#fP*;@%;MD zSo6habPicSn{pz34k}{mUZ=IJgqZKFi_bdql>4Iiq$MehycBx;CXT;}(9&)Ncjr82 z(Viw zY4}uQBR+LX1bu=BKm+ImtqoRY;@~9$0XjjcVVqG7%GqBAmBPZ|828jaH1Ir1{#4eU zpGoTjn+yJ(Esk5Xd5i@(xI;AUg@Vr>yrqVrz>DoU&2kQE?C`u&>Z;6ac#Khy2-FDD z`?_+TBKC|@)QQqP^O)Q1$`(szy$fhz&CG#L(Na!G)-*bjuWqS&Uq|e?5Q`u)oHI(l zP7#|0Lyi8g3p@ZHy3c(e_1}Jv)}_JrF4SjoSDL%wm-)UKH(jndqm4=!b*Dxc=QKOpDC;g@@s~S zIF>uuQbZG2vy}Gk0r+-n12Lg`ptx05PDcbvpaLyVHB@rNalsO zW2R838gFI4>>Fhl4NC#~My*;kp7ave#;WL66#lkfDev26Wu_r82+Hvd=i<7jT1H$D z+(tiv<7n(;7L@;Yj|t_0$05%XfAj;Ts23`{W;F}L%vnL<&Ldy!@`uXs#DQX23n-bYbXRxjLI|Uxl+UGQ z@9#J3*&uI(D!@QgwJmDUG{+BBwBUCYax20yqS%mEe;gmEe*~s4tyi)K%ETjindo)u ziRwDmHx|>Z6HwwmH||IAhPu8jJuQE#Jtw;t{N2M&(3*i)ke82*f0t~wMGE3r5c$a8 zsOIt?9XTH{&9TR<_4j9@&8;-%No;5glOwIRLOXBDBCe4V7rfwc@uL8d-&_KLsv}yL zr`fQ3ORjMjlRxzWulcwEr-lw-V943$ucigs5~rFO4KjxM2da$~>q!#?SPrap1)oJW zJn%|Ykzy~MY8kbX>j3&w@y4CBT5Bk@oJuQJzTlrbjrql#pFJmAv9o60$$znc`Z#3i zyRjc-C;xd_r40gA{eZ(_1m${8qj+vT!;`sPCVA1E%jE=11=`<$b%{eTPE4#eUnoPW;+MFQ<5RA76YtnNFm z^@`@k+ex5%oEB||Q@2TTxxZAjunSHvBq1WnIg<|-1D@i^c={a7a^#1$FU%lj*>fIw zL*(lvd%`mDVPW;B@dbUZzxUt1pRf;km0vUKswD~cImwHHJg1S1B?WXm^wsJ7o**O;QxXze1}ec=~hv(IX4q8USZ$feozw{y{&6SZwU7 z4BRFJ3qjKfP9d2EgR$|atCZni(=8Z3&;osN_FvK;Ax!2pNh^0oRwKN1gh&wa#;U8) z&gsR^DZ3G_V?RREY30KGs0hN591gn(%J_fS`}S}s^Z)%(+H5DJv?>%yCu~U)W2j_X zm92I=5pAcb6xAp(j9GLPHnk>V#wMvxp%M(j^L(Yn&Q5Kc{XIz}-XV;72&YRWsJ>lXQcVlZ zZqGh|SA7!Ezet!=^P_)OBcwlY#Zvb3v1r1p%r_Xp4XAh{= zmN3?VS^}p5bmgad?k+Qw)7sbLkj#Z!rO+E=sC z`FYKGa5NAG*>GUSmh$gOQROb<`YZVZ6fK`t(*e$u{+F^^+02U3B&&mgq}L9dxXNA! z4h^+q>QDggHVenRrn~t$eWj^fEq?O^vqFcM6_d!m&5a(i-W&2?Xr1OGre|TiNewS5 zP>kYqbZ4F;^|}h)iB8}&)q`!}lg_CmZCF31C*NpGHw7L!A|Oz?~_H$N?G zvj(GzR>Rh9b@OGzB1#2W8wL?G1gI@+fK)dajo^o2G4^l0V-dh_<#^yO{f!noc(Q)8 zd%1j^n+%4Nbez@5zYR4)ccbZJfgUE24s}e3hT=y=s-?ix6lF3-wx`-WTLxUvv5WoC zmeq&uXU371Y3PWq;oQpZ@Kj>M{nUqKbC$=?U`7Pe_x%i&Umn7ri$+>78An%bKashZ zvF;tIw?Ap#$U41t+9JiG)%z7SPx9w_X$|yacY8NuU)JHhlPt4k)SwZf-TMje^P)o_ z=OT`ZV!;58lrH7yi>UnHdO!0xF}Ye%!?3Q!rf72UyQSULhG4t--6C)QFm`t{c+hL}BnO3^9zCA0wT@s1b*%p*87tQg~y0r4e ziZ<`Ulvln=?8KX2UKJ}X3m@}qUXq8{ey0$@%dcs7W_V3sFG+nmsX>yHTXuIh#AjqC3&^cW$lbt-fII>Eg>=w^I-JGAH|nq-Mmpq5|yF8qH7FC z4bn*QpEl<1QG@dY&LQ$5k|u#8L@uzOW2?o{kYX$N@>iD_VN3=@^SCp6Zzr5ADy>o} zybB64)KsR|KQ1zOC^RGfLD{5HwpqW%wtxb1{A4TT>s4(QDr8FXagt9!DntHV zZ|iOVSJ{&01RQ*9v6x*5A~a&Vb@_A+8feaxBQKKmkWV=PxuTa}6p`+nN*THDPbkLz z4sKZ7bYw-S?x?|yTw!1AC+GB?l$-L@++oWR`#K3k15Us$m-tjG?FL}jQftmANW|F- z7+*w3ePZk1*J?5wdN%gP!vy1W$poCjBv8A2@uuX{?(3}Cpa^muSQHARKQI@F?3^Q^ zx&ps*8$k`n>^DOt&m-6Y|F8z>MAjy4{QI|D*e)K3hoeL4d<%;b=X1%YYj*hc%H!KX zv>`yj++tGS(Et(w;@h(E6YSa!xIM^cS`TKl-dMJae>*XD@%3_KXTbS#{q=Q7(vFmSryBxngJnfU@-!Nnv{Wl>picI8lhb0JyUo)(e;V|BD z{Sl^9ZNx}QUcJVGLg#QUH){&ytGSpu~bIapBhPhhJnH%%hr93t%`9{gs>XS?#RWhlQruq2fZ^ecjUqozOe}j6%0{ZN}_rKU~ zPLGQgcs0f+0Nffi93C))LziPvBP;AAo^x2kGr&We=CR8_V}{ZLEvM`m4lMb=f&>F* zf~uA6!grgl^7B3dz2aTvMMgv;33E&|rkJ{${n=Nk1ZbC;Il7qo_TJ~Nf#*A-Sh!~p z%u3vLdg)lB%)_liQjTWubG&{P8Pm4PaW?X6SVKqA1Z^M|!_Kj3ip3sfSLBZ&E?H{$ z@quP^hJ9>OaaUR3Xa26gl5c)a)YoMFkB?9++Pso)fxjO#kmv1)`v=>>{UE;N-Jo$* zsAVnLem6|sw__RZOa#ux+z0aQ;Ll|#2Gv5Bh za`I{@7Fu?tukh@7%hLAcNVr#9QNK;1eq0A48MbWFw$D8rJfOwNou8QbPSg;`nUHOc zPec-~7x5+c`G!r_>LszJdbYl&`SmHKPIO6uF+U)iSXGffef}ezg#O?d*#P?oujj`k z!~=%c{tRD>uK7?$>lcl6?q1&A)m32Ge%3x98z|;aigbp*)!oeQn!sv7Ws;p6H<+P} ztCueOc&{9`8{`}!dMGD5gWC1S{KWs{=DmO3!p#BUtvxb5YC!tL$+y3wtA8)wB_MF2GSF=+QLc@)N zi4s`iOTI0Ca+Um5S`+?*3w=6Xw&B8vP4cQZsE;uuOo29uwBs)wX`%{r*v@vUJajAZ$>Kv-^6J1uWn*X-#QPx_2>Jn z&A(CiOdLbKRM1lCEX{Qe5WHOh0kf-HJB57$8KavcxTNgcom0Ja4ZW+)-0rN>?7Jf}W%Hm(;F0LON61yTyf&wr#sTwKY?`uKPTd?YXzOsfyd*Szhho*;z)N zQuzD%e>eR=IP5)rO!m7!=7(5z+_X0dBHayJ-XiJODYnSpjCZe?e@g%L_eae(`aqBH%BfLJn8zDlOOW%+!1>@wYO?JcJ+4ED_ zhVR-4Qo>@FM6UXPnXp)w$Y18^eCQ53@B`lCaW^pL%fZU2LpH!|m_n;3bspU=ac~_p zBK9>9s-Atas1w8j)8PF$eOlsV4FI8SK6FzjvVuKRpLd+KV&D}l< ze8QYIOOV?{(I+QO%pB-#{84K0%?05G1tl4QjRQs7T2c zzfm74a=MTLOj)}}L41c#96Q{>_?s$p2fnH8p^u4rB>Bls7 zHt67U^ZcD;GAN&RARjjX;%0iQ*)HvVk!_Ao{92#jFiy2OG&Z?O&fe|0beZ&na(lr? zweiWJRc+G8)7%2u#cwcw9MN-T4kU8$LGhitv+i%#Es0tt4&C(Q;liZ61D19TGzTAn zovQ&eIIDFkf3?>aMe9Z7jpxlcToXKVfzOqPDx3sNigheFTZ6A9y(Iykn z)>o>_MeSYTecST{@w<;FxKtmx_QSaC;-=ps_=_f!ZA&H~3EEf&;a>x<;1?^O6w@9H zFL{TB_%!Y6>g1UOvuPh&fbZkrMbGppJx{;(=5eL(@~f-tp4qsLiR-F~WjT$Z2;&hpUnz-msQ5)Vns^JBqe=n)?twQIk3<}5#?mXyDx*aGV2Fh8J0)ha0p zILz;+S_2B}Vrhp2>STG8j5s(x4!#y`yLTe!p|BzjTjAkWy}1#=yav!RIGIbXyoL-g zWxX+d?JZLJY$ex@ChMoveeF#qBep9@)WGEekl|P9v;CZ=QuJ*>@^8NI%dY*GJlz#_U(A%oBA;i0Xb+^SnP_8i8p`_kzMDBLT4u8+~aDS?0 ze}nyLg(HUBi&oK%%9i=5Qv@Dkfo*JEYs1q^>ftya-H-p=3fbQ@n;Ed3d+BS*Nypld zh6R}is>xPTx4$m0Nm8zlG#1)caUkU zn#zjrpU~${3f|kbL+UBM;VDgL6|6E&-)9*LC^Zy$^1KYD~?@YcX|m( z9eLC=_s8EWYK|&m)e|@be0>~{Uz)nv*~jF(SJ$jjk7H*pPek#IcNxf5)(2#`cW!v) zaJ1!81g*roGsXA-D#xfw(JT#Lp|Q6B1y*F8de9NZ7`yNe9b5PNF%sEyr(uw9s(VA< zDvn1(Q>@_4vvdQUIv3%Krn4^;iwtUZDlDcQ0;C8i+^5tty!1(n0b3q4!-l6ogvSF& z%T~Ju23yK5rJ%dSd#C^_gpy9+u4$<@N)~g-w_YZ>@U}LVjo9}X08EyJGim$vx8d=; z*Nd2jUtYGW5yTq=McC~H^UpI97OqZdR=VfGTR^62^+zyo6G<0NVmmOpxNg>tjuXLK_QGV#p`&XSuNQhn|7-Wh^q;JRnf`j zXFJ21oXb_QV#RmI04)7hohtc#@%K(HclVIU$EpC!e0}Fc;Wt}JK3eqg(RLINn*jz6 zd*C021i5GP2Q?@T*2}7$O=mILSRyS-l$U)DSCZ7vo?VG%VkB18e{%M~$>NG&s*zKK$=Uz-ElvScCg4m^%MsLZyx!;LQb~UcYp|(=tS14z!T}Y z8r<8eCRjVb@GKg!FJb>=ZKL8o;}5%$(@^TUx&nm-aiTPvGms>~Mg)5P=`{%p%NHqX z9c*uUmUOizOtGlWmCRh1U{mB@vc3fq{L^Qoq6BLeb2hZFg}GClGrRb{@4NB-^r2kr zVOR9wvsxvuv2*7HFR!g=Ow6uYyeuIIW2(G;Z1iaAm%rLQtTfqcms4$a_r&prt;OCT z|3QbELvF-6WueyX*;%Rl!cOv^es z$oL<2^!k-gIMO=(9IinD(V%0G3I5TZgZr0SyG$5k$SJ()0^wmr%>;EDDG-AKo#obi(674qY6q+W2LdRTV;7IHGl ze~QzI>MDxtLvo2WgasXG5PQe$To7&Un&8fa>J8Z6omvh;2I$KprI)wsDpu8$#Zu$j zxuxI%1edh;`Wsi&tV*Nig1dR}X^{3hJ@}{(lVR%6tovyb+jpSH))^Mx!#5||2-lQe zx_D&~fso3s5u&RPQA?G6N+srji+8IDl(Vc!+g;}cxN5YFA*^bH8u@}=^}=z{Gy-cv zEPPCr_g2e}@k8^@YAg{w00{hKp2MiobNv_{FE{7%$Abnjin}*ZxJu|~e(SA=(?$8a zc9cx;=$g7fd0P~!;E9Ih9t=@Iv4>_9tr^Y)gYd(;vRae)meY8et0!selaU1{hyjQ% z-oJ+OvRmfaDCU&^0=1MN(XWdB$A& zn$k6nz~c^04zE>(*vc-(s!G5&nSPw7T2tdNmMBenb6Zss4sbeV^WQVtKgAL~oA(oK zlBIqF|Jj;k!Zhy+kBZ2?Rv#J4*GQKX)V(&d<5cEA{HnIq`&4VjtQlEEl2P;){(_0{ z?lOIVc1w*97>2L?R#y~Zx)2sdWyic`*Wc6yG!T*+~9BIIX_uZPu zH_DbCU7BMCFe-n-^@lZ~!g2Mh9_djXd!0-K9$$Azck|48G-xp)E)U$|V@^Fx1%P?| zHx~Mgb=Z-GbYV;Sq{i8L4_aQB*)0gVe9?}Y+}m>Db<$~{(&csP?MDNRy|&IjkkB(8 zx3*h&q>%kvsIg)0GuA-AcdH&tNAw`_%~HxP*J$tN^c1#deR!KEoiH+qgJR?Qqgh>Z zP@gc@Pix%557YN!&_nVizmtf0H5cDX%$f>Uc_z`78R{eXCZ?8JFDyMytZVLCyUhoH z@OwO{qSCG%PFu3PY9opJ{eDvBF;Y}=?>N*a?QWo^t5=VSE?-%glKtTO^Nu@Ry)AC| zw6sN{wqnBwkyk0OWAI^9l)QqCQCTYS^_eRJ58Yt!c}MUNk|R1rh>%ROEH2g)HZbG( z!Tl;0U1qyt`GuUMy1ofvj8@`J%7}`MFSM4|Iw(iGjCg|@m8Z}6#i)>4(6gXa$5Fxk z$_>w_ZaC*qDm+^L*;!1jpzGm z7>*By>otw+l9^gBe&OA^P{EyRm;F=&l{pnNPE7wwaFn!t8VL&J>)^xXx0!A3%Ww=l z3<|`n^|9I3YZbRovlP}_ntGL-a4QtY)12m$(HLO{&o=PLEdh1!KQK6wfwdsajoo7f^d->M4+3J9Vn z;_KtX+BH9p(%)d~#_cE>nV8jkV5#roxor$_p=nNQHxYKF-yob2X1@wW01||M__zT| z?}e?>sKd&~9xJ%(fUkj>Wq6W*w!7!}2bj8JGqrgI-)}_!^vYyHRdFr%1Ha%$j5~kflU$#}^zv0xl_2KU zDS`mdm%c=TYUxKjIz!iP_EMANW7iaQlsYTdR_odwNi~(T8;<>lr_WfuP(s8bY&pFz z;hDW5j6{m-L@a&r`2hayAzO#|{a;On7*8D34hHEe z74|iprbw$}h!~l&_nVR0N<#GLmJc*RpIlqLq1)mH+#$aM?H!#6v1?m&_fxtbD4$BA zzhn9Ku#_#kOblW|?f4<=^@}n*eA}n^sxny0KB5(Yd&@Wk30}1?W22?CTKP0}VKbP) zeA*l=A$V53U|d!gx0T#Y7heeRo)uYnks=dQd?6X*+1n78lXM_c%rmbxCJsrPBzjk8 zx!Y;M+cGL=`{I1~PDJZSTfnZt{R_%@e`4uFqD~Hgy}#nx_q6{ZaLj}u`~0tZ6Vig2 zgp>3?1mDBdtn9%ZK1hi<^1EBMhZis{5#K-<6;#V&RV&J3>FoIII7s4?p9J^)q}PlkVlA>}F%_4F!qG`Wc|2 zhA;JjAai(It%9_Oa;Bj-DAyf|8xIfvFMtCG?AIXg<`>bAmo9OdzX*kzs3Z=S7f&{$ zs^>#6cX-C(lSU(UEGNJqk`14UqUW1GtRj(?pIp7t`g;t}@Tqj8cs_I8x9pcU8-*++XOU`5p2mtfMJJ^h2pE0#IWn^g(&sGM{xqtcAfh|dsI^Uew==$#p(|~9nxEZ!G))*L zE%PvxS!VZ!JCEXNcM%MSA+vf8P>(-~oPr3|i2SiNPSdz#xxUyo4V z&Oh7&=sVg8fr)eBDb~kHR^jTNGsZJxLHDD_iE@eSH<1?_6a&rJn0 zC8pWpjy7|nj&Ocwe_>WnbH3`)7AS_SNy~l=mV**qf;c0Or?uR-|BCb2?5;PZzO|+P zWZy}=2@Ks=wAn;79Hr`sKK+GV6#jgflylOU`VKTCFX=Lk7rjDR%`a`z*y|64zd4Ni zH4gs>r18h(FSACekd@VYvaNMhorh&tIY^Rr&-2zQw^QoRHR^wGkuQ<;IxeN1C3!*i z{r&dOoQhIVRsh$<=6-Q{ddiAgGrn_ehCSJ%xz!>qmvnFQZpfFrNi}IMK7yOn`jT;> zAXtdOHzx)VlG^8b`1+1}vy`0YYlYO*RJ{JR9ZXHY@wB%`9}DotgUIx8(b;{KtyOg1 zmfq1}#aSiSEp7ApZG=W#RT(UZ8hOAr%WXQv_9yYu?_4$EEZx*&)~^9g4hpVrrls#b zK7M_-#bi@uG0M@w88Oe5ZN9liiKtGK{Yf?C(0yAXRVBqEMFdq-(%7A%BT{LZGyOzK zPI$-F+9T5GqjJTv6^hpKmG^Es_u&b7 zJzqu(HBR$6SI%6?UK&xkC;$ayc^n&(&qppK$%F^;58ZN$+Xi1AE?67kr6P|D=Ly6Fl_`Ww4lBNaw`XH*F~O zt8h_m#SA$fpyDOyy@i)g(o(fr2VV=RmfuiRw{F zWV@Lj=j|@-zE@l>Eht7<0hb`9s?B?~7KMK{Lj0X)kg2uvyG}ZPvem|2XRgs9^fK;3 zGAZIW&|r8LIZiY#ZE@H+ox*94=h4Qkr>!n>OcI8|gPOd+l+wz*;fXoam(-ZqBVv8xy?Rg9q3loILP3XLu`ECQ% zeX^~|@5-~q4%JD)u4k^fnGof>SYu$rNF^94-VViQ6mAq7Gh2R~UNOPCIZ`Ec@A2T+ z_V&;@CmNDyv7B8`lUUxuxnu|*0iCQIEal=X5%wL}S*NEe0Zl%{bhwMxvBjQ{UPeUO z|8;jN%T7TO*}Cn0T1c@{2Z2mRFwp&}o0KYbnS0Gm+EJ9`2lj+g@33b|gOS+Z=r zKEeh`+5PFWcRKx^x@n-Eip=e)pXEPx&4KfaEY~jH(W0jG`p zgPPt-(HsbUjE&YU-pcB{+uh`EMjd~o6%;5p=kz>7zGK$LzHRC1HN5drF*L*GsA)3S znMfo0x^*`dV__G{q@g!SJsZ|IzP~NvJVqxUZ~V=@aVetwmZdvuZ{9Hzph%cKCML1j zzJ6s9t47#f^;ZOxa+brgNTR~^cGXqpXv^59^4jgADB*kMpu|PS&n7K%FOlb}IgdYuSRh$1w#a~gd;@l>~ipMWag%Opr8{H_2v89)rFwPN^$e$jaQXqJAQ?^Rf+&R zxDr-_^+MA+#2;PkJrt`Xy+}jebNU;@@Swd%i=`>#bdyeg<&_sL&hOG=KpWu&&H%_N zY!oi&Z(<=a*kAhlCIsfbVbeRG_3v`=;a9|-Fg2b0&S}iam@@KLzEu_nw0v9il8VU%;g0Z1k$5AWU!sRBJ+wXb!vC_TCgaC_<2-eln5>)7s z;}PUGW)8eI)q2c>07vo}mYbZS1w=IN`G>bE5+(YbBZx)t%$OOx^V6JD z*IDsvd<16e#XG=uHp^<=0o^*NcRV8!Qs+x2TpCp*e5S)lI0nTkv4&yP!pYXQ$VPg? z$_6P5So5?wDMv3aU?zCna`WMR8J)g6#LeGw7eZT>8>QYHf}`SIi1BD*Lur~xRHv^b zX{uQ{m&|i?ZC7td2Mh&s;bgu+l@8Z-@P?>d2YJA0fyfx?hJvRg6IVTEuues6 z66;}AC@tVjn}NZf*Mb53<%iwUzub)f*-K~Y<2uD58RVEsr}rR|qy z#M%i=do%5*M*Ux~c!yMHI-AEK)jJzpLsP~^hr8sW&T+HseWyednrb0ILC8{|0i^2+dr6 zh|EVpGJ9KAde3>cpIj%S*q)%nYgvI@p6jnb3(a*`x8iReDnYlpnS>nJYEmY$nHA+a zidvW0FaInv5#H`NsIYh!tQ$MF5!3MbyMj&BLmE~r#R9$d){?MQg?dS-z2DEYm^1!@ zi=e{d?hmA>^L$k+wX+Yaoe2L#9gHk?B<{3_!qbXH1u3Uhyih(n6)ZtD#4nzMLPE`o zZ;HH!@TEj9uCM89 z-Ai70#nlC}h;*+$6O1^|e^H7yih3vXxZn+_=MY zY2w>(hXY6ogP8LLaqL(s;(zKWl8w{(WPz?U%;oK(VseB+WU(yNSom6=66(i5BBRyM z^(U_^iqCmtvbF`X7#p7@Z9A3wAvZbiK6{!KE2|%tB&B#6 za~-H&v4b4SPqifI!|p2m>PBW}shiu+9sjEl*+uD>m@}RSr<;^E?f+3)!Ja8ux{obx z8cjcrJO=uoAl#8N*0GY$|B47F4)LS)fMYaS?6IE-T@A{B<9J*J>bs<|SNCHF7F<5V zHVQu%%KFN-h{iS!js^$Ty%bP4fyT=T{TZIF#izKCq9Pj%im(*wZjG%XW_ipK=D@ov z&fb}BO{u=Z)Q3G!SlLrK3%i^7g_K)Bg$E(bJ@YWbJB#FX;iI!q2h?pz0et^Z74%!! z%(fkYR2Dm>6Kp(n2qCV!t|Qu=uX69$SDCB!Rn!!iSe9=2cNtx1Fmm$Q4}~s2(i*(& zkctIGUg^VnZ@1ORaXQ$a~7_}wA9A7^GI<|;1j2otuK{|XCqRFSeK6s^I;;ag=!{QKZF#~PG0 zM4C#y`uA>66o7#rCJe+4Vj&CIh@n8x88Gae{y1~JFH28haXJF^7a?^Lee4o_ZR6J` zf{M~|!(F@+etVza&_xm|sNU6t=_*g3lKFtQosgFnKm{#zPj_V9LK;-@cDorUxNm(F zIgNKi;7j?#$Gc=@!Ija4>s2%xRIT+*)*m&%QYRossp}~NYep>;=cssjism!fcS}k4 zI&aSms9FIs6DE7nqhiih72{F>c$8ru&GrK$81YSg0XXAs&)&ec!+Kl~-n}ePae@$Y9wnY~bF($%Mw@bHFnB2E+IZgcx_Kwg)!dED5-AkGf$W$>)3tu zLdQgsSCMXj7JL2C&OZ8aKo)$iGZZ9C*Rhl0UbLN8r{0|q;=;=~380o5gk%Ydv}I-U z{93T5fjw$vsn~LnVnb^K65LpqRzLcVcSpLXv zdBR;@5;C$uT@^|F*H{zRX72`$Ko^Hj=~UPXx8$@vJs+JLb4&<)T|V`1SccP`6T#i* zU#b-35Sjj1IMwAdcm@K3R$I2BJMy;b0y$e?{>!XUy|)!GDU^J^kH?uYkC`=zY#X*J zpSGeY{Zw~%yJwKD$c)+oHX&@*7iao0q>=My5)2X#O612p1~>|~J?Mv%&qsUiY*oNU zh~r&9>`?*DzX4P+&pW4E>8~jA_Na0@WjwN7?2U1D4{~Z3*NR6K6@CN^_--EJ^iYYG z@B7aVp9nT!Z}%#hDxhOiMUl1r{IkD0*k*j#`V-pIAOGht1mI_P7=cdg%r6e7|8R){ zrobq5F)X%=E<5Y7v;U;hn`HuW!)FdGjNK$Xet38c$kMjxF6ZfrTsnzmIYKlm1TvH^ z1op0P+eB;du2pUwJEDFRco`|I$d}77!9n-TUJ5Q+e!?UEYOq6vpNlRX7}ql`I)=iD z$<=_B4J0B6NwUD57J@x(`+??eGf!DFUncIjv6V!2{%G2p@H*8+@~zxt`J3TA!Jl&S z9(Wro|GuTcJj5g)EeFSX!?#QO1uJu)XM`vg%+_b4TcOOYzuwz%>MW!2mrwV@MgUtJ z;eIvE^sCw>wP^c7blNLVel}g3C8y%C2$3C2rUrwT;*Y8dZ(l*Tvn~BGT(&(RD6>wa z^5K$o3K5@JJ;}PudtjnL72F*5@oT15{Fgd@1AjWaVHOP7>+qU+90R02|?jYDqL@G&Y; zAohCwNYMj>(?u>INL=ca5ERZ@S%f7BxI>EgT~EBO2qunu~)=BvBW~w zQgCltqu?IWZ8q1l4;CalaVVp~L}(pAjk22bVzW}-fu5++S;T0$b5n9EO?Ymedv7I` z3}{~b>y&a`SgyMg)~_wVB(OrA-Lm^TxvhOrJ3d9TotT>kI-_l{h~f69#;JHUFU7Gz zlW%y586BopiJBCB&$P%}XD=WmpjR!KZm_)WwQ=V}YpB&BqLI>OmMW)13j#yVRh8cA zf{S<3u#6qYToLjvK^au)ZJO;waG#dHb|LlGo$pzgB-d=;4q%(huKGw1K3Frn`$XlkW*!Bt8GjU2j?lIsWJXZ2Ld)G4^ zC)XpZjL0$dTmU1VDfNQ-)X`k1bfi6gR8eiW=iw5i-U18+5k-MBubtvILRO3=)e31@$1ttxP^EuteKPY;St9TN?9?~BN^JY9O2y(l=6U?&^= z&IPo^HNu}#trAy|3PaJIbV*z1jeQv`&)FWQndKKSMbaeP}FPPprpy2>~#kr)U-(;Z!ScY#&3C`WhDmd7@U_f z8i^|!&l}Hsg_w{7h)HczyW~;Vk6kOk?fc5E*^G0ftxBfyvg)2yJ&549ye%0a67{jG z&7IF(PPBQnQBH#D8ZUN;EPt0|Lp3ln8XDA_f*BDY+j2IKnH`}8+Kkm}Nl{I6zgD*r zc+{cG`yYYKoz#ysK-%3gIN+MWp#`!au%ntk6q=K9DowXC8h=47u8*8N>J^YpdB%1? z${Pl;<6xugDJ0!nb%<06y4iTlo}8+T{t+TAG!7mZen;47o>FQ0I{6FQ4c!ZF zZK3_~mW^AQlY}kJ)nTx&24h;^uk;2HDo@L*>&)>O>g~x^tjNmVjIM42he7aL{Ec*p z>6!YA>8kFHKYJb!q_lxQxTy#u#dR;+vA>W@(9Iqe* zCf)l(#i|t!30N)H5l2#pOQM2%p_4qH=S2hp$FcYXmI*n$JTE|oo68Bea9B(TK{1Zz znv=PDe2s#jGj_Z*3HLx&>yAHro<`E%8P~7UF7*>^vmJlZ4J2V#e?Ec7OrOc*b@3`` zb4%8K>sNV`7Pe*TLVXr*Uq>aN&B}cVC!f5>&jIWto`zeqcGg6YcLAGpF4m~Oucyk+ z5MWQU2S*r>E3A56(_F~n0^@3NXNx*`$L9I1yMDFKyA_6T-3CSSS36-2!d#(=G>B$k zG>yiy@=l52${mD$oD5a035kf7a?- ztq9DOrXw5~JZVDdswc$V=8b2@`3dyCwY0s(zk@9>j1lIdiosd?!m)4Kn%pOPA8xP} zNHEBmdKmbDNjJ(dFj}}2{Ie(8bEmZSiy>NGNeOUkzfG_#vPt(cv;gzG$4wat;%p}{ z8=}ctJgthO-H`_;%Xz0kO$xgT09sFJ=3;Lxj6UHnBkC&WDCE1H3BCx1y=UTg{qagy z<5DmI)gK&f6yIF}Y+0aEZ($eFwdTUu2z z2wLZNTN7;3Aj+^_94gqcUvc|XM5Z}cC{(Z#@0&aefFzhid=NW=I}zw-e-|{gH!IBT zr6ih4R$*=jEuZ5L2B6YZ#EysqIQUCq5m0^uttTwEkBSZcm$)mV@gYVHlXs<# zIlPYH)aO$0ga7Ics&9#LVLk;RG@n);9{~h`t(8ESx?%r%iL^h*<0+T1y zg(;hqxW^T_13jr~R*7hGT^M*XX$P^^@t&GyOeb?WZ7=bsYaS%-9^#eD;ry{t*kxcep@3bxxvi26b345_lDJ<`kD4#t$lJui zYPxOr?M@M!)7XR*PI!&5;`^^0r_Rx~Sv`)sNt!XbbVMCn;InR=PrYmbuVve%3S?6< zBn2kyW2;w3TbO$7W!tGr%D_IJ@|9QM7SBVqz)t%Ju#22vle~Nf#dtd)3j#|El?64z zS1J@Y4NKuRn&NmHTW+FK(D+ZtzsV25-Sf{%%HjFdgneUN!BImCq*nc}!xR zx>(pE!qk$Q!a}NdpEx?#A)2+a0C7e<4(~1qi8n>W3&iJC)C+;>*xXjpXVwVOQ6y}( z)EXzB`(<>FrvVQ!t8&Z)k99to-q+z&Qk4nv(Dtn(L_b8~2>F}c28d!G_%siV5WNaj zx<@4@AuE4{2yr)gImLFP5RMB@0fNgPanWsg{CK{@)R{)(Z#XEvx}c|k=})LYSP*ya zJ5~Y@1l*?C1BDqh-+t48mo8f;e(i>V84Pq3_0p2Zg*mWIwoLpQ!>NVf0p87YihQE{ z3crKlVx3m8h_P-PvD-+VGdG)_xrqea$2R9KxSBeE)`%K6bVr`yxPeR+ zdtNMG6S1W}#iG;CY7T*a@GTpGJ4E|IHws2he`DOa+p#y&;kUj0Ug9`aO7*jqSdph6#)1a|a6cF~`ZL|^eJ@=`!SO5Zkf zq!^NwSf$vMXYO40?ehB9KDKxYaJ`sV;3UPY(^C+a!PrAi5*HCZ#x#Ps6nD{d$?QRl zY~07$n~4zQvT{b{F3(@4X`#c&f+3yXVpC*XQ-d4;w(=5iC%4Zga&Sixs@b5mfHIEr zTq9+D2g1k}40X%A5~W`*rtl1c!ZAbrfksWu>f0^5NDIroZ!%L%WJ&;RIiMrjm~KZr&dKwYHone2RI%s3|6y#H-AjDnktt5?Op4LBqUt& z{ho^b-Nue*&Li*(^JbuKChe%fUc;#Dy=NjyH%!%6UN7DjcB-JX(|HUhZou2Z*u-?n zWbF_^QD%wO&J{N@!%Bc$M)I3_z5c*?-Fg=&l8}|Rje62EEjEr9X`XtjYB0xEE3^+N z>Jq+QnO*yH&+By7Lt!4>~%n=0~IRZ9Kl#{MFs?XLuX zv}Sr$Yytgy>F*Fe58%AW2X>fF$B$(({Mo><@%QkKYA{6|J8D>++sP8lnM%=FUzUQ|{0-1u8bnAQz zUTwV#5j_Y4k#1fg3XwRp_23qjAOa z4H4P{L`<~9-oBXnhYdY=P^6Hl_X*&YZRDFTeudM}3MAmA&I@TwU0*H&Pbn8%hvS>f1KmwUEe9vDyHq87m$_P=6MkRNcM_SK*E|j$Esn| zazNdLPRS&n>yF97CxiSh`>MCW+xy}J;I_B>Jv4O-Z z9IVzXM_n#^^?aiw9855}K+%ToBXDd4s#W_v7vv83;SF-d&eLwz5{IVkLyRq@$Qrru zrhbzxc6#IeUZ;15IdQoNOhbNd>m}t)gR0=f|(8t zIA}EAb666ho%V?<;7gGT7V3yTZss6FcMv!zQaj7I#g$8Wet7dCR3jmeGsf3br?;?k zP@UG!t&v30irM6k z`eB>-`(gmzO@ydEe}_S`pRbEOcsBHZ>ZL(SjbTklmgHy;v=Q_eF{3Tpeo_t)qfrZn z5a+j_Lq@{zuYO~y161$-8tL>K>dtPhqk+!#7s&jzr8|~p=mVka+w+Fz{Cv^8bcs}? zp$9DO6o7QAJ$i~xTPE)d>+c!p%GMqbn!d$D51&wfUggtOIXe68MtkyP-orO^L0^@B zwyAc{6WfPn7Q7{t;Z8b*?|sMzi9`+@zBaJuqx8GW_> zz3+vZtm3sEptf$y2l5C$%*Ud(E6Kq1eU1U*;#o%0`+<($^nb%~G1TbqVr=D6hX0g= z;^SBN-vD;edd7n6^TZl>|0RmYYX^U~Qat(3uZZlFd?1T6cuI(B0XD%zQN5t=UA&u5;{)I0+OqDvoUOz# zy_r7Xk==jRmeLvTZ1_$fs|0}m6^}~kE0dQG#boz+$XvUH4m|}_OK~@?pPSv)Md*;d zp9=Sb)U34Mv(LG%SSy*Xy1X?fvHx`$s&Tqk zc1NaPx8xg19~uSFxApdxW@}lRH?(^bE%|*@wfM4%mpS6*cYwu%tw=^vO}M*TJFI232fs|F7s7)LC+ zukXS}njO&6xIt?1Y90j9D9T-62+!C~-5?idtnY#?``T zt_!?1bKp4N|M69ktVh-WFA*jQbujBh` zyD9zl_LN??tak5iW!gXn3V%g$mhR^XJSpSPn{f}xIAXdUdMYgb8o3D;>7G5&4CgTs zM!9)hz{a|Jo>4{p>h*OX?vMbAB&djV3}gu-=gK#(yBHuHk!2@j#}IEG+-jK510ey_ z?Up?tjgeLRdi56D`^>1y&_{m2u9~(^ar_ztw_z=n1AS7E`GX=?lFqtXXR^EwUZ%eu z6)xLF{D3-Cxgb2Vqgjs-mFw$nqEe=<%1ckt5s?uS9-O;}56E(4{kn1RL+3!Sf}*^# z=)NAJGZU>6wXI5FheIMm6dubDd{|d9^BrV6%)p%Rf<2#jd1m=+X4ZCxl|-WD;`=F< ztd+<2eTSZM4aRWUjU*JJMins;+z0IyobiQ60%v$8ggxkZtt#o3x*rc|TecP6=hdXH zudMiI;cpNV?Y4_syVy#x(jI~xyrjDba=gF$rAPdQ3X4?+n_o%tYCy1$EErL_=&qCT zsx0@;ts_y~8`5>HOcWWm@OSyPeK)nV+jE9rOPsAD7ugL$MFvk;Cp;+co)VDmE zKx~9_t48<)ki7nc=-tJw^7jDWc|=8oszkL$cx`YSU;2wgdFO_@>HRArV$g@+n%J*l zZA*Ro=w+XTXWkkh^VH*8B=IN(-hIufIl}RTHE~$rSlm+_zL67CXYAdTAsEm9#uDUV zr|1b99E~XBj*4rYWiH76=)ApFC%s9lzY%mbyT94(v{29GWXVSU`G@sLT@?$T{IW8K zwNseBZB;QCLh||&n$32SqBx#FBAn&P#Z3I_<}lU?oqD@sjL}5qo=$-8W##EtHu;1m zF>@x=t0}Jk{kv78ou8zIeBu7$t0ec?D&MI*)p^65bM@-9RM&uT1&`jXFM?KQl@v$( zdXz1r{VSs};a`kw*R(hH8}8jIRi-RcvvzX{QtDmMPMZiqB|L?7fx+}_^`#gNETz?B zc5EPVCy`l7zjm2UA=_)Hu7I)!e;ec{NM0|@7v$#guP%J&NaA)@#*7s0yOZf&3jBy; zfa4;aQ^6HIUoCCbw897U>=a=0{LrwbFnxW=nG3ArW1M#BPn8#xyuK$@s?Fn9 zf|^EiJXA*jDC@djuBy|Q%mwn4Fm-}{1 zNGsngR08ogBR3jlM-St@tf-u3#UJ%YEV)={cm4SJ;GgaKuJY}}%Z(>5EPa#*G%&3z zHIsH|jm2b}kn>S?Ez_-l^1WuVDq;MUQAKW?DTD=dAK~7cFDF`aO#XVI-fA`EkT!EcwMVrn5eopmtL~7KI%rQq%qLx_0sQg>GAJL+mopC zb~yDlK^R%~W3|YNfZbU>r+YUSjK=QM8w2UQUaBn=m@%tzL`qvp+|12#!>(&4^SaI2 zSB|wkF2%NLgR3A;{6J8J8vBH=tZY40o=-Glt?XQ5Q>u$8MAofo-2LakHvy5Cme+-m zmU+27`?>M%idH zLc>YOcc0OGq0!zUylu*K{qpCFVV6A%NnW>e~&{ihH;6UeMPx1ch+?>tlVW6Qb$I4S{=d1H6@J%TA~t*#y4(E}sjz8+*Q+k*2z^?zMb>eo-S)z@w{}Gy1Mg^e zJ>)5+vnstyznys$oPPRD24LRlbkcUOkZl;6i#M6+OBPoFdbsgiNYXa}6>1BeXEK8W z_+=3a?sbDak0tud>zfZrBctZ}1rKgvzlQ0Ex@0{ew+Jex%^-`=r>RPGttE`k?q2jF zqY(-?6MoLh}lkNwwz^aM`40sElr*4&IrC5GKoTo81rjtx1Sj2aG0oPJt4`i zyGMRbOb;YQXcc#xncFF94z8J;pfl0WjcbI7(+NrgUD29xpnP=i7jH~JhVTM*-eZm? zOs&b5RDwbQJNjwf;5WTs)pC0R(QjfV5R35#3~toS5AFCtvJ%-R4^2p(Tvc_SeaeYL zx;M}RG{I)@Qf?j>ZHpLXIN)4Ut?db*x%t2Lowv|&0_cuwcj!ju-UY;6lU1#rUo~a$ zg0-w|2dDFa4T}9@S$|xv_UcfAwT97%{Isrf08zn;S5_=aL<%wisKQ*nNmp1+HDI?P z->#=^fP9mdRmd3DsG?)7gVvZBoQ6NBQG|EW>&u8r_d-ou)xi$A)!W8r_%87q_{F!U zg|8!W9*|`V`i}IZXW(2xjSbbrI$6n!JVMkfjB^D2b8|$wDa44E89$q<;BJ4$vGE*+ zH|y4@B2_A(Xg4EWQW4=5BQggo7qC0>!>heRw$vFQtt#7DjNnMK10TlXRv(1kbVY8` zMi1wzzSTW4=f`{sk^6y_iX(AFV07vFF&7|1h*moQbR#_eW%Mp8JrV;%sPlh$ysFXABh5#h0+?MhsTk3et6uKExi*Zm&|>8dWxSYdBKRfGUivC zqQ+OX^N}e{1$aZv%6AVGOW(3HklgAg=3mt*c?()>fYV`P8twY)UT0Tfyf+i`p4^f- zAHcZx!_lY$2u)JT2vM;IXnBe;sWv`1>hff8!)lHMK9zz~98<^O!_-;pqhT%M=jCdD zr*)PiFqy{;KJ_H#;T{Fa@l{_GrDS}|*NazmpS#@YHC9|Hz!rpgq7;{a z9Y1(nGz;D8{7dJ8<^%l^w8nH*FC2bjoKa4pZO$~zeex7JJD@aC6GCTgxZ}8C8c1{y zTG-)W8v++M%|A+aw>}Wt*`CBYox1Xcu1;Nv=4N2Vc;o|>#Z#^HV{}A9 za=gOv#X4G(bt$b|mpR`vJu3X32rzY-Oh$rf<~wm#tY*x^#tmoiJEVe%6Q*z30W7;J zo_ag&xI}5$UC~ie3i@<_EuZ~ zlO;{B%(2mUq&zAhS9drft2^1Mv?@iCg~kaV3U{xRI%eE5^{bjsWG)Z2E}CL(J7TYV z?`o>ur<&aDX^#9g3iy3CWeSU(RNV&;fTor!zCFeHZvAb#?X;5+3n$V9qYD1dZeE$U z6L?>Cu21V3PtCMGyLcl}L-k*ywA^}G7kB0t*M3;O{5$D$ z42$em;JO>&l7xGyz^rlXG}!7J=L13M%nZ0mxI3xjPX=&m&l;Z3=LF=l+!a#n?*Jyx z-!C{9E(W&0q#fNKKV8js;iXTugly$|;F+wKq#f6nrsqz$BdzRy=(#(nF}_noWj*lT zY+!cdnq0mAQH-#>ylTTc&iR{xiC9w5F(14LcCr z#bP@@8y4JCZtznQ{POwt6sAWWTlR=3ya%>MIOciJV}3TRI!gJ4?&0S?40r$OLBhUo ztut`WtgKH282nwZ;OCC$T6i6J7GF}v#m|Okk6u`O=y|%gBG3sns-_pe1aaII%3hzT z=>x1o=bvRgl3hDJ4wQ2UJRQCmJjl3X+g2x_YwDIMy#UVE=V&$E10IM4=`Aoc zFhlREGDr>K(528(ICf#t^WMGVQ0r!cSFyl=5qiSJPZtXA|aU$YzDA!T?cg!lwT$abqs{RVKvkx5N<;(%oz~= z8!gy59AJJzIMe|US%%$RP}?E=8=-Wxde!{t)BiL75qvD68o~@Zxt+n&)z4*}Q$iB} D%a;-n literal 0 HcmV?d00001 diff --git a/images/Terminal_Type.png b/images/Terminal_Type.png new file mode 100644 index 0000000000000000000000000000000000000000..e83ac484e4257eacad1c7d033811d2ece59a444c GIT binary patch literal 69402 zcmaf53p|wR_kT$>B{E%zTozr{mQ+$PjTuqe6m8ndy;3`hOf@bU#>`rw6x9|bbFsU! zZ6PTdm(iwDiJ4L+xlA`^f1Sv?>R_+`R z-%mQQ&WnN|3)iFn(}<*oZA1`N_O~uhTMqkoD)$QPNK`@8lNf1&B=u$XA9K#`d!>Eh zM8T3>3ZFgcr%qIV_f?3e?oZh%oT_U&8)h!vn9M0?-Gf>4NY5`rW7jg<$N0@Z72NUP z!PV!MJ==GIv8$kZxwAsP*jau(x}FsC?fMD@g|&eHpiEZV^;VkFKfb61Cw)Zu<775v z*pmK?JhZ=}YeM~ps*&errL8MQ?uphpS9A-99T@%|y!ZK`ykYW4r%A<#%+aQqkY1ru zs77iC%0$)sU-+f(eH-l4^X#7~76$U|dle%sA~*i9lCM&RsqDqI?}iH9BPue2y@Ljr zzR49sk*d%^W~OX>!29vvlY0HO{Sn1VDUi^8H0;|TA^ zC5sf~;oOR|Ewao+Q|j;UuJAe*8GblUj+I@g3$7F?FVJnvFNSpwy1DaJ6ICzGntF0Y zgSo}As(~jq!%yUq7NBATVNf2-z;~db-Jj8!L_;jUCQc^&;i0m{$R!m?4pp zi*uC92!v)rVv_mDW5S?oxkW-v-fW9n(`cGVwjnA-Z)bQ!sLSsSBb32f2k{eHEz?VO z-1)lFJ#UT6tA@xY^DTs(nYCQmB0t<(5zV_L@EC?$C{Dh$tdWq;6H9F04%9Lm24!o) zn9cL2XN5%EGrA`3ixICmLmS*+zPcrZu1!;Px|Ch;OFw$j0r#tI+Z9Jji{6|=k_OH+EvoVJYgT0SpqT7eo!gC5e;%_(DGbv)flI4{40n^z2SMrh#@lP8JnM+7# z2Wzb>-&L7Lch*Y@nn7wgaMp6Q1Xunrjy!#!fKvhc=py!>G|RhG1#jl{t8misyjyfndXNh_?xCrXX9)( zOgdx3^qXx`tY;o|dC2uGlg8tYlh}37{xdRy?-}^Im1i5QDM`9rSh#xQ)tK(G#eI+J zWRCrRP8~V+VWGw{^=1UCKW_%#dx0(^|G+u)^`rye#EGTKKLg74JU|?NNuq!782NVJ ziGF)IzV4~`=v4jW7`<(`d+7T)W?7aBT>)Ip=zI@d5>2nx@&+IZ&_5NsrCL)4UGa&1dKz+PIcfEc1!2Chz=>KUBsaC+W=zP!_MCw@HMG|)UX^9%dC%#(QC|WJTTi&nfE7p4)9>HJS%A<+b&dTf}%+%;T(~%f-Fckwf z*D_dNJei|`8XKeSqZ`C98y_b8j4&}Qm6$HAsMvE9afnMwjd^PsdBuc~$Rk}aYQ5L^ z-kaGNx%K%IivP8@<$V^K%1+#Qqi^`(q4Rh3bt-eU1U^wbF57aoqEl(PR^X#rWRU=R zS%B)rpaArerV(~I+GQ4tNy+V@BpqHg^oTM+KvQJeGE#2RQli&BVlr{d7O|+qdNf7S zVf#k*Q2{P*;C&(?#?8Pf^51llPHo*Z?d29NzI14!syl72rUWy`KdSXCu1JsQdgT

    =ZQ1iwX zzXKM(3CkcU45vG}t{Z-w8Y@p*bTNhAmR$HR7Nsa2sTZx!4p}@&5G4|+B3DRNi7JIp zn=Z#Pq5qWGz$w@U#Ghr?#21W^;#HA(Bwuflc;z9) zui`lP!N_&DR<*t;u()2^Z8wkMi2pVD_v%@LJym8Ey+f&pk=i@`{@o&tWtYE63~+ki zv=xmPWl}SxD6~bDIx=icnJB=nj_C|ED-B$r5vFC-AlrB%*@uM_+AhkKQjF~xDJz3g zq`UJdIP7+km^_!!qIOA_7q8Ilu;+`CdD5K=emto4!Y7s|P#n*#e2d9Vzv+>LIKavo zQSW+v^steAhWj3Y*df@BiG4fR%$!}ULtWd^!(lX?nAn}UG=9-fLH`mJtZ9!8Xjsdb z5i#A7n6FS((&;i1p`Xz7VSGUUk@l9Y7RUMv%!UeZ7A-Y>HrARaItMIf)bve#_MwT( zI=Gj{w*2L3hzjdsysRp6T0vAqXnaA#NI{z;{SRWf%e(fsk5DX{Mjo3DKMpwh`x~~x;UE4sPF!^D z$2r9L;BFNjBUNf!y4Zh|-G3B^2C=KB@S>+TZ=y%y%@d1b4;-sbjgxSBKPRPJqxg)! zMPU=3ZDrYLDmv}G$3LX7D<)z~NuocuYQD)O1msBVeG(sFuh<-9>wOm)k)Nd@d)kq% ze=w@AEEm;Di25ivtnTS#c?Q0*64&-_IFGqi82wC3kfr^{8@5;JEN?_dZARBOe>5XG zn9*@9iIPiKs%{R*2y(Y#3}?K+4NDZwWv~*F!RH$CwAZiU^{xm_=&vc^R*F*)>bYMh z2$&zN>wEb5{}2HhazZeZ1V>jmss_?kIj9Wi$|}0DZ9e_OeChM`aD3MG#dyAXod#<> z?v|`KZ^+nHmm?l@x42SUyMHFar;Tg@SB5Dl6bH@x8osMvgRp2Lxmx#p=)BEZf`-B( z-JjD-Jd&P&G)=dpb-3tz_KyrLUGJ33@s=gE-S&3NS%*SJH#~oRyWvhfrRB1A*7(1N zcr>Wo4N%@aen!Vk!+}1+Rz9gwy>3;B(X-UEoSRBlzKIN(%Sa*OqIs7N9;~K_srRzi zV1A~hSWXmBX!EgKN#df(4Dpx=K5w90pL_##uhj;c|;S?%iT^a9PfHP3f}j3qee z)~epYxh}sk@qv3gFI*U^&9q&cax%I-8ciW7`w)C<6NXb5EXHusON4=`_=paL+1O#) z^sBFC(AMvDE8-;FT$}zl8$>emPZ_;0OQo<5=ucSjTeQ#7bFY79E8m)6r4Z8e$)(j` zPH<>8S@W44hUxlCV*2bY*4Olz{plsN?^HH~r^}hM zHQ*y~lG1sk_?8B|B~sfr)ebiFXJN2tq^~$=!R;Ac@xdmi3COU9K|ml#az*sbf}aIqH{}%=7QYSza-5a)qp5TM!mfyIMob{Db_Y zyuxUA9ZpW>?4GcQ{XvFKie}O~{8-4j<@+(rA3wn^wB_2&cG0|&YZIqWoNCG7!I_G} zaAd)GN05^8Y)#44JLQlOWj(Xc?Hw=hd?Wsf;cvsxrQCgxZ7(%vpk*|Iw0)G?tIY_L z=~My7?P}4l;{`O*1PysJ-<%PuVxq@bxKR6e@eBj<@W4_{NhUJrs3BkMI+gR&STqUB z9W$rfTyXL%*)@$hw_Z37Jn(3V8LVTw=Z@}Ec;@C!>7z}QZe#w432ph(24lb*lFSBGP0AywN>%8sS{5v_{yu1#v9P?Jc{s@Ld)R5aU)N29K&Vj`7sxT8Q8EpAf^+O zjt|wY{C>K1@nN2I(n*}!YYy>#zH}-y{uw3H$NROZryMUjO$aTb+_DA}pr!O`mU0wV)ev^;;Tt1qexatk}aMZeUtL$RCr*OB;nr24oIfN#s z%*ghVq?E$1qC$?ZT7H$aFXU!fFXG@kIlYog$?>PV8xSL3vmZDcxM+`Aa@t2rPGUDY zm~7CDw;nIp8KmX+YoE@ObPLH6ErAS6oLcCDF8*F%$S@chCnv*p6}XN043-Up^~?9l zCPO19VUP(?D6t@EgvHkAxlI&!KE21!0+-Rl&+Xx_=%N2)MpyRuRH#0lAfvyS-M5Tf zcx@i0WI03E(^69Rl|9Y_!JmK-557pSbSWI({niW?&Y@*JVjY`Tn240vRuvYduyK?% zg1~X`k+?vT+LlXEA!8dWNL1U(y zMwlLPoZ2Vmo^eBJ{&@Imn;xIm+guFwL_hcGH}R}#g!Fp}0-E_Xqy!Hh5M-3K zW;Zx^9~{GfY0M(#kg?k;PyJcsxv{!FG+z050n_SW&$J%OrDwCx1-}t*t|^qSsnL`u zGtAp2b8w!kC0qFdE6a}=?&6$}#dd?~yFEXB%W&6s$~Pg95+*#(jT1Yjua;PA-M_jf zFz}6irTEeF#oXl|x=KBwTCz!A3#g7~@nY{D%KJWX;H+uRHYRws?XuOO*i{Oyd|&59O4r^nav)s34qO5hQIsBX2%q!gInuiUD-R zS(dQk=|o|*<{Rs~cYWxN4C{}|wEn2f`+b{iW;BOi3YMA#Ky3kK4wh+_Wosv>%6h}>-}{)lJWi7%KVdES^mqz zw2&Q9u&k5~A05!g@NC{l*~pVrSG;W#DtjZF%DM_{JXB>jLL?aE)(2Rr@%fLDIBP5t zuPzdM$2^wypT3}aPgm7m2|6bTlqW6)=vF+)x?J1Gv9e(Yx=(;%WA7R1}zUi8#vIwQ|=LK91U=KBk zL&ui~44e%(-;FDQ$Zk{i{&vl_n7iA4qZgeKv*{ykE#=YeY*Y*fnm?-%Fo8Cegj^Qb z+i$-cH9mwfcM_*qYC!KXs5?xtk{+(YJrVEn%YneJMH7Apa3-4o2qte|F2>=txZBW0 zT^&?;jwJ);jfL}l0OUi}iy+%iBUAuDGT$Y$aTb!s6g^^k4~5i@DS;fr0tIs!fmWYv zq9Hl=F5etATR3RnBIPvAAood}DsCV6JxEE`2)vX{SSQqN8M}TVjF2UvR$Z7 zVfis_wg{5gb7#6o)8McxQjQ;cOdB%jt86-ti*tlHJGSc27FQVVbAs)x?^k1$kU{A6 zB-lMPh<#;nk#x|%CekEEgmj=b?rO;P#t2V|kmmCX;`9mVk~V$wc^}uT ztN7;0d2KVCYZjK^T~GStw3X`e$c$b%MvPyvL^2!6aLmD8@3)Wa(g@pbIrfMHe|)lL z(&ZvkLgK=b49_Wcg_1sFr%+~w)S;iJQ4f9rNm+}~n1jFUU1IAR7LPbAG;~T<>-GNU z;~(VuVUbas&V2Ks~w+sZqV0G(P=A=JcG|cPa`86-}gKa zIu=-Bz(kE@O_9GL4v#)AY-P^po&8Z9TPZCYcxFbGKO@@TZngUH0}Gz5VbXrAJa`?yX8++pB{h8x1#gn z+957`h%3){|9wF3_dhE<4tD$RTz@6z4&reAIOi@O)`v{_2r8L3+O7$>stg_(yt%`^ zZzQ9lr&fL?U`W`)eSn^+Dtl{?$+6VJXjbW3wX2g&BL?JTkCV8)l3gm`AGF$TI@`*_ z-lopLgssiC4csNMhu(7B zd~~#oGJm#1?Own^pC**kUVV5j_I*cjpNmRkhc+sWO9(ZjKW3jqmZ_a)bN3;nA_YB^ zoGGbQ3HgkhaGXnTd#ZPqcw zlMX$RsqV1*r?-z34A!dZ$;0(ws`}4XxFOssZsfI1O}=(?3hELv>%iuo9_HRPREHx8 zs$w@Sv|Ot_N&4f&>B@5m^^c^~oI8A`IX0{VzFC3|yDXk%wAc*K(6t-0(dR$&-Vbcq zk7+iT$NobnE0I;vt5@>Lz;cdIuwn6!ytK*{W70RAP*uLKmjBDk0{%Cj&7vDA`Ce(9 zXT=EJld2ImYnoGWY=Swq5E--!vEBBrLlKrVJ1toxdbZ2RATxoFv1xiZz$Pb3-|t)aYFQjFVbq>Y>=?Y=xl*Viue4Y3R36RBw*dpTK-*DxMTqr@CTx80}m~_gRWA(Cf2IW9pWQ9%PF{IIlU6PYIYQ|?=@_z*BsBqtOW6O@8n8ra=a2S#XbAAp%CDiU1F zw2w!qnczak)_nAfHD@QQO}F*x!AU+-IYYNwp*Q;EwCE^ewEbr!BTGwAIf=8^38^sx zzXY2Paf5RMwGI2A4mb%3cMiieZ+UEY5}JB4tS_D5*xcnaU#uH|?%yRmJ-LVSIh*v_ z9M2a{BP1Srels3FL14#UIhWDBu%{Wr`O$6?$8bG@&qOlxp=a!b(8{$0RnRB!09+Oz zhwArSONQwW`#xqCKIUH@4%2sMEAPz>v1f!Djst+VvPlFTg@0eXrbGr9iuFt)A4{01 zA>ZTtTAbg1EdOx0RK_Q;N20zR)#+W-ok0CL+iL0wHZG7a+NU)BoQ(axO?P(s)n`y& zZ})#au;}g+iX(td|9GJ+dZ3gQ(_2a%xqD&{6T|fp`X~_M+ms0KmzCdd^*Uz<# zLyfm@caRny#5m=|nW}qyW|o+2AFKO0#y?$iy zHY4~g0qYhrE{@k&=F^uYjkSJ_2P?g-&GLXI>FLoI0^SQ7b>j;RmQK_NtH}8D){8dr zc{*&5-XjlhK#=3ficK9O|Gv)~2o-u&dmnu51*ZHhQnLwAA}zrq_^&iDgXzS=@n8`* zNGawFUPp$eBuk-BOkDweTeU?Xvp#1_iBz?XP39Cs-*hh9?Y|y3s-xVQ#8hrRSgrm? ztG%W7cY#1}>RCUxaLYN!BkJo7oC1!W8-l1HgM3MJBLXqm$KI*UuiZn#h6N~_+LIw8 zxNgBPbw_ISuj_IuGr$85=HbT+a=RCR5kq}3nzkPJoV`r#eYo?@G-&sc@qlv$z5H5B zFa`DrjZfoN)4+b09hd<& zpoexdehAeJDjGdO*5@oy*g;Z9S>y+yyTe;ka-h9Md>d_Tod5XDul}(>O$i<~%Kunk zJzxVgg?;2Omoy!On}ZBKvCEQ{nD+1Wyc44d1UJ-vYf7VbWOHfvo?V#P{ORbzaJ{V5ood@Z^yykIk_LUSQxbSJF+ZQL8 zPQ#}@Uh2Y|fmcNB$S&5`=5*X8NHg#8O|WvBk2Xi_ps@51iyf6OZ&C`+acy?+W;n7l zY~xL5uVNQ&nNpsOP}j+uNb9HITY1)*3DxNsPU`+{b*_k+C!ATLu$br0A>I5kK;aS;a)nH3!MTD_9+{w4q-g$eweG~KLXJ)-nGWud#i zW4eo=G0a*+{=Aa6c%Gm{W`Z7kt9u<-AywP187vwge<&qE_i=3qcfJ(?W+OS}6$lH9G@+X zXDx{7-k`(rf$%aH;3@gx9=^TT`%J78ncp%_V)crW^F1>BY6AbXJsg8=F{|dq^NNs? zAz4*L>jm=g2s9&f-2nW`(4#&o1Uq~Yj5HfgU!9(HXrmSl%Cj?A`4dR7Kb8$&RLaif zb$`BXf}%K5VfLY65x%2o$u-1?;|EE%|B9!+@JDO3@R3U6i$L{&WO1`PvFaCRE{v$f zaEeK(5P-$21ao4$H=N*zkFhJUC>HnzvUt1kf>t&J*Y?dA{5D2SEK@rx-+Gh(Y8OE; zY%*!AfT{Y3Kbto?mxrvh&jHlH(;905NbBG@NnM5i+J`iaLlJ%oA>Yp+VXZ)1E6KzK z-|zJtCo%7lWk*k$bS4-I6%rd4DWF*EnQL>ZlIgV%c8QN8)abk=j71_n{M#$;`?VlZ zHJ!bL-d`$ZM|Ek)PfGfI(PrM08(IQLCc)GZO#e~}*dChOIqps4z}Hj1W>F0y~0^q{&Bf$HWH=KAUV3Li@VjmP=w{^+c) zU(5@I7UR6w?nhXs;uLn}G>tIXId7!g9<`OuOsMJku$%PT;?1J-b{g^iQ3E99nY zs?kcKin{vkViG(kZ@TKr&*S-Xdt}S~oYCdzT8lKB4Tw@}AVE!38;P$tt#1agV)#{J z=zoKmxx*-}FsQ*k`lQZ$F<=N(575KVR1R8sZGCk}WbgS}e6osab>8|b15PswC+7V0fD3rl8bs%+6e(E)0MB4w8@FY|xX6^4$iqIb+uBUUb7!Pk9hB6nx&9dks(WDWW(GDx|MMPv^*SHRKiLww^9st>5AaVjSrez5<}DiXK^ z*ofvku%N{@@HAlIF>@0I;Dy}KO${6U6FBy8gNELoKfC6}H96Rc!r#-z0hq-Da78{% zOCU%<28%Kf2dJ2EKF6Wq51IKW)yncSthIM<9#XPOv89M^g{E7Yl2)h<6M*qBpAqVT z=U)JGNKF5?4o(0MTq}-$Ao0Y$OE;iHJFh5t@@*m_eeMUECuNlPHsU=WMLgnN< zTT{{q&hAyE$PbuP_5Zn&_GRl9sT5Dz!)e!o`44K(dH(0afNz0M02>A*NUfe**t4IC z&+BK_JP_|Dg(Y;FTqC)+Ud19Q>kt|n?RG4?4#`;V_egVtC&n60C`Og5*3QwXvG0HS zX}55xySmIwbN}hB`8CSzllZKr$i3v?kIxZ3hLlxEK80ie;wP$yv|+=ftn_1G0Dlqg zVEUeeloOpHwF?ph%p2>ToSZcf#y?4GV5i^f+<(bVFrtMT={`$Mia71QstZU7k*S!tmmhvIEmk4RYtB2JskadX}U zxS9z>kk1a-VK>5DQ%$+wU9(?Z>((E#3SFcsJJaRoW*?%(u!O*7;=NYjY4L-G9AJ3k z6JRuUnv&XVJ!&_G+NDkt#8aRz_EFLun>51ot!&Jr_5D3{D^?egtetPS)#1-~mSmqn zSM^ISw3Pb%14HZ}LZ?@rV+_2ofc6k-nEPuGhfK77Wazq;jrT@qbQH2g{{?0FCQjlM zE{^)P!L~q!lu$}m{Oq@)7}%PK>l{XG&C{f)JQNd{4ts@jLF=LD5d$EFhv)=)0An5X z#@=5G%{3(p*ZDj5g}820uWqBA1|_QIQiv+^;Z94$_%S#lK;6)lWoXE`YL^si3XL!n z9uAi^K<1a0XzINjvl}AJeyCj(e$bV()@X@4QER_F8bl!&)55yuF6cJ$~Y3^{{h(d)>Rtg3(QwYDyMw(`tU*#RqQ0&UH%T zdkNLT86b_0Of@FI{~ctW&oP6WmBtXiOw7R-fU>0xO+b*I-z^^wH&g@cg$X(9J2D&71tUaqHToK!@wyK zmi^r!gE^$=eSxi6S-mG^mJ#mxxtLQ!zqE0Ut<&^jJ-{J>C~MYrlx=yaTl?31KzAs$ zNjnC7`|Y{k4C+TUm$|Nd#2yscQU8y>v3=409!%6geI}0-Shn$ zqlRRV%Xp$SC-naV)d(O8!7LDdKVIOqBNM#0bA)t5MAvs`&{p1gXZ`u4Yr$>}cC>39 z;;p)~HC`t5=@TQ{Fj+Z(IOt>Ldz+d-DHN1b(gjd>>Lv6?nP_Tp@FFBiQ#Ha!DSA|> zO6!&ayJuAUK3nrD#|s+5DGyn%wnQ|Ky~QJxm6Ks5XyE9Y`tA)efEMm<~v_ z*YhTDw2x-4*5(xNT?hDG3(pEPxE#!VBPcz3GROHcuu0xSfBVZfT?U78wI5zH?HbAc ztY4&-+W($yuUROMh@YJv9}R3O#M%59_9sZGMeQHhr{7k@NGWP|s}^tQHYo-#6rPDkIqqvU`6 zxqa&J11WdFO;uCj_UkGnt;d?)M{ZK4xWRXrBn%htjh}eSp{19ldfNu`P&RHV9=cB=6IH6o zf9qZ{K(6zOE97)c5A7goIexeJPDJ zs34{^B`04rqkk{}cl*&4tBhN0tn1NZtae_Q?WYb_K0*>#UrsU+^dyvmRyNJ}p+>dC5zyXs55s#whH-DKAF_sD{M~n`<@bJb6OoXsFxUOr&z=u_}>o%xYa9cRic#wU*6~a*H@!} zOn|CaHEcsqyAaUxzOrI&^F&nWC@BV{<_y);!d0P=3e9g6%`Y8!p^pEfR36ailN%ER z7V3Plh^UqY)FhGD`fsj{h?rMeJHjeT!BB^PKPB9RO$8S^b1C)wvJ;nm8(aEy03^yTpzSy+Y1kLr0$}QK2sP zkE>hZ96PntrV;ZOCNqb23#ERe(fFgP{DJ|&oUjiNfDQ^UYc7k1PWIyHV(A8cP`fy` zTkK!a*xGyFGW9nSa_fm>?sH3 zeJTu!iPE|QXSWP@2bA@)g+8Le>~>!EB#t6C`7|KDiTw@i;f8H&5a}&e%!i-ZF}X{wmWhJ4Rn0 zX}y|9RK5|ukhD=e6XnS*W};m~Lya&V>FY7YkT>Y#Ang`i_|k9rR?t<`D+b%zNU_g$ zlj$AeY;ol*NaT~*sX!O9(F|&Xek`M13#yj;zzxScPvZP&iN?8~u07|1HL*V1D`T$m zxk7d60$aI&eo=U#<$Z#@#4NW=m$B1-fF)h08hH^V`Ef}HH1Y$Rjp5uqGj{Ht*csnPJF(IqW`k4G<01d z|8rfqlj?O9f03(wT9-tdld88&m}J(vMj&hOMESsSEK!<_WN_io?{Lt7f)*I|G5k$x zp9qsxM;AtP|Lj-s{pU!z16Z+){nLnMiO{j@|IQ!5v%zhoC$TF=I#f38M@L(f}Fw`KdC>ZKc4D&?k-Q~+wO14TS zB=j`*P8_*qwl04s9)%Hl2k8!x!fmmw+b#+pwbrpmHW_rg}oa^@{R3wFuOGFDSB93 zM-Pks=h(miEL5Wi7!JmolAhOzQCRt^KJcAAed4Ug6n0XU2mjo(bpgj%Lr?DkC%x_c z`^xz2iMrqN&YcH9x zTESd0kK<@yUNtyep;F`sd)smOtHe+6)!`e><%HoJmAuy@Ji7diiFBQU6bqiCt@rYW z-r7i>ZFwHwLnZg9h;8lqenJg&G3)F|82lV+w-44iQb_OJMh~aPE(iviJ^CxV`0mwW z`}(Ih7i)7$E0%|K;p0<8ao3*Rt)bEV^Xh)L*JgWcyR0|385Z0&C5wNA-|)~voCXk% zgiFrFIzvEDs< z8IHp;o=nw17$W6MZ?Z5{=r!$CHW{o;AM$n;^hon^I-WE#`7i-6i%5Sxoqf2M)m$=T zIXmIW4Rbk5Q&QJI;M?TOGokXaVM_gFVN6~Rx2#Az!{d#Zx01RKp&2=ElwvOFrqgh5wTP>D_0j}-31=iKk%g9Q4U6|sROHf72W)%jBDerprh4H}hE7qkT7udAXo!lI+6aufhbEdi1V$WWk1 zUH^1d{W%P^S_3Ngfu|?+s1F2(Hvm}GQnB<0)7eWsxJ79QRe2#3=O=)!)gdPC@|29i zuN*vQJ59x_G8e`3+EzHR-`$o_c2cfn9h3<;+|nO5Ak8`2XR9U%avoA1_D4y$ei+(D za$I+qll&ZXrVN&*>To>5%_%OA?7pK!)@-9a+m=mt+-gXGV`S@vq3zcQGbE(yoUhiS zzH8*T`OPZ-;j8TvP~z2QgVoaEjRI!BpCpV4q`jpq*|LJ{G5k+C2O>x zBLNoenPVPiu*yS+Qx(t8sh^Ba@`Xc9)N*NI1PT$@ec@)U#@y^rNQt|0CEbWP5i4I7 z4W5=dBm-b>mJt!!UN~n92@sYRFNYw%cCiBeJZCjq?C^Jp za|G(y!!ww{Wen3p_u4OPp0E9uwuk9>1Xp$GtAf{iAo-LmR*kzS;b*lq#co&uT+Jh22j~(YI^B`|jk6?)1x-ch(9{}1eqp5MP2fq|Qx8D7$%2~zRJ<3jGY_252ulWx%OS317vcx_WUvl2@%cL?7uarg z>R$mxFBDFF4_UNb5~406(X2MGEwZ9iRh9*;V~e;@6GA}jPqJ1@ z8&x|j;1S$UG5T}U=>&G&k;h_-HuLDd(3BlfmjF-4`*rz1G$$rnmX9x)!h!h?BOmA& zW`>`@%Ikr{2KOzfScbvt%bH#v-wcEq=&vn}?S|gBFt$s0F*@OeH{v&&5SceOE=3ey zaw8il0a^@@x_kg4tOFJ#;>R22?mzJGjxZ9| z+h;_y8R!$6lS3A5#7j5dim~$TzV0x8%F(PLXvSW~3~mXSfnT;eFWkzPH^Z8}FTCZX zh_I5|Ac2R%0e3F|6tWUoyO%7XF9ZkSkFS(>4Ki9Eu7xXy}l^`X)z;@VK z4ekQk3E~^NXp_`@1aF&$IEZY&2Vnjf8|SA51}lFGsHzWwUlyxtN2cVBG71&}T@Yyo z9B?6k643r+0&J6i$MnAxnRC>v9k>sm(fweHw;S1DL2xq)3s<=3y77yh+b1%}DcWom!ygP2tdCM115Kf5o=nRz-pXW4Qwp?9@2PTy)5<#Xv(I)ofVM z(wi5V9ewX6__RE*MxYfF$(~Bn8No1i50nGa2AIB6Hd$FpGSn)9$q0aC(Zt}($qhz! zr!`~bv9vT=w4%J{13oza^4aG6W#aP0{^v);xZYc{Mdw(y>8UhPv%t;@S1MjR#_TykX*WOSA7Ex%4DdQ2xg zfq4N}EkBg&u$fHH1I`?cBm_SQP3UA>{&+PLH6AGeg4+gsY3qdx;{|UNo#&AfC`{l8 z?q4uYlKpY>dz$SFU>eFz50doaZ(^q&o6D$Gw;lgwcOm9L37s#TaMvy2etP#a@sHzo zNecdy*;DQa0)G-y+LE#o*Yas?DJeOQB0Wz+WwwONpSDE){N+Gq+7dZH(hE_V8jR$s zw}6h_`jIX}tCdXGwLA4;6O4Sl=;zw@Hu2D?Zk%V%VA%r8wK)(y_14g+kTzWALgPOZ z^*|1=71t@~Sf;ZQxBK$YWR7?~!}KEVvae4{mKeCj;FtYw`OXFpRcr;@x&f(dH#~h# z9Qx2OtXzPUB(no+42e}4Fs<+}%CzLpfSyQl)!I7dC2ZJ!poiz_!SOWqo`@2e1>Z1O zmfwV$K<42XxsP4GJjT&(8wO6dE~Q+DYEyd`WfwvJtNnH2ARPH5PF151Ql4H+NG#Bh z8BG4B!!aClAU4uv$hR$@dF@#2tICS{9 zwWVYmuNa1*?GK^MX;eL6VFrY!Y_xfgZ_yto2|heT5z9@3%Z9nOtufHl6SpAcS{m|I zP*FA40R;GWLoA(W5vAJ)g#D7)}*x(TQMdb!#3`uHX})@!SWrztW)_ z+aWzmJ;CC`X^43(GVy5!(l`O8!kuqRvLUj-(k{P_7wIke{Xs%Grax9;<=Dn5>oLMQ_^xKplr|rMgMke zW335s>ZBhN?^M8K#+kw#mj{6?1>82Z=28BX!}qMP@{|2hA$ODy6-@+BggKE9AsgiY zB$;)1jFWs7XG#iIA9$dJyb^1pIA-|ct@wb^`&#S*VP=%oLR0Dw9pZ18xabWxW31xyPH?~- zxu&H1=n;ijVlkFbKWbRD{$M+36M8BkK+W(LX@s#C9d|TXGPR*_8>%V{2NGYH^0<_7 z(jJO-M;&%mLS0UY9t2%>e`M8G1(1q8kn-boXrFz|xJ*#v%Cnv*5cQuBof{c2g>(k3 zFVv%)W7B`PPWev$3wJ4VXo_6{G?z~#9nML(#pHc;GaSM9m2oX3JW6m}E6hzaN4*x? zPeRXV<59EDQB{OOU^oqq>kCytk6KI6b*6`MIoj6~Wxsf;ueXTpk*1)OR~8$gdieoy zg0}(Pu{Y8jhsMe?V*ru*hNZDt$Canu%qaoen+8kFH7M0+2hh747px8UzJNEIK`1VA zx$ZlAp-s5I`pPLxi49;H#o*fX38Hz7(D|`f2`8Y73~X7L%T7z>3~9E)*K`se5fL)&pqX}^6MQj+qo zqNbS3xN44>MH?55iY)#6q0Lwz4XQ&Vdm%_wtR>igH?RmO!Wm;_*79*FN*vd^c$If3#eL;^7Y#%jB^ymitY^NI0!#(Ca{zK~VC*lyOC}T)%^)ViaaaQSNO;NRQ%N&K+1u&+%^Ch3 zU3{o3!bm|=@NdhYHJu|p4$1l8FJedWE(s5^i088geQz4q9ucJHbsg6Mo=%{|N(@Xp zTJ88p*8GRyikXz24*)96VdM#qJS_0O`;d}(9;F^t@6F9Aced8qCqmv;_tE)xz zg@a@BzAx8l3G(;$_O*ep*p7>L1d2oO6eug};4X?vF2(_qzk6AxaQFfZ(t5Dj5CE&;GT=X*|N=Q(BfUz<3nya!2R99{rO0>HUAw2hO%~s z00hs=-2)6tRCm1(QM(>2S{j>zx_U4wP!8~|A@-T{3HsODUUwZNuA`QfrSwr&N@b5QIFw6}kH(N4Gqqg*%( z<@nxMEpgKb6KAZJ1W)CNsYOpVYlOLCYBqzZCz(mXe_6&klDiOU7kX-@a1txNrLxrA z8h~XJfhj2?pYmTG*AlHh^lGs|J*htWg4as0SRVi zs1PkcO(Hv9fX1xRayKR1S|cn-O-IEAquh$p*OBb)KG4~MbOHui01SfU*@fS%@`u)5 z8h{VC#i5X;z#uE-GpkSCy}NYj7?ENTFqV#%T0}(B8z4?EAH~_yj|{PEngNo zb5XI<{hrZmHL2cpN=@e2j?wWMM>3?gr{TyPRnNBUHE{xJPo*iOG267@8jo(6Pla;O zr0p154m%;t3)2?c!0l{sbw$Cw7E5imrGMU}^Ocb8Z4b1RER0E!aECg8tO(SbD_IJD z%V5(tzWPM=wuDE{aM41)ecn(zd!&7vN*oLc85A?+3!0-J!0WdI$7B!8y6iydq`;xR zJa2{)y+KNtpO<_N9PVAIV?hy2j9CFg0|Lz&cze7chfM2^6T>wf=Zu`#3H*Y#W9Uf1 z7sJ-0bOz(2z*Ks%!Y&mEl5ThJ!8&V=mdJg7Bv%U#UnbS$&SU7p&r)V()Pr1(n!Q(- zaC{$IxGSuY?NKvxH4LwzAO#F}Js)M!Il~PNO{jWfRJS!9?_a|)ook!2I4b&<{OC1LIcs{*`*dM;Csa)w&^o4TanB*U$e zx=Xvm{3icWdSLL0k&z2YBdj36=5cAHHO&vZHwY9790&b6%Nyp|#z91#^ z5HH=uo1vW+?IApw#MNfmz#3m*N^B!Tp!kVwi{)`ETk5lV1Tt@CT|*!Y>8LdVV_&F7)WNvB_%1@N zQQvNXPA5G}5Cno6o@3DYa?l@IlR-8+8&C3vdoLctaPF%Q+#TFjFXxzM;WJl(=PzQI zd%9L&e&}+sBc$B+FnoGtVHz6lK*HR!=xrA}FF*77wHiac@1S#fyrBA>cIHt1-mhu7 zAXb8QMq(a}6OcNdo%X|7`-Ytvg zUBC7_prFm@oANG^V@Ao)k1l=5BnJ*niVGC_XMhXYYc7w+EHB(5z%5^<>&(iP!aZy( zlnXC=r~u!biclSU#*L`Ku3;FwDIV+-I|AKkE0nhByCg3p14ii088A(9@1DT5SFVCA%HtDCY8P%=uOrFc@mlr7eSKrT0zHApjdb>ReXgRfmi$pVvJ@xttEN$&}rl=LKy+pp~WrDf$a42ol&4_!V<{o_*BhZ|5S-J9^(A_zLV ze4E(&22N88v!RJLzwml=X|IAr>#DDo;|F+_8RVWFPF=mKzo$c9X7GC!C0-_k)1W|2e#IHy+z~> zkMWnC${{i7wYvABDLH@NVwJ&?e_vq5K*>rOMcNQ7D`qEzF(iJ?e!0KVsc`m1{a^Gr2eQoj;2$NtsS zdaq3wJ1#x*5Y*J8GJ`!4fa)?NmFhs@$CiO$ztf@6>Y%5sV_9ZO^9gFMx%=Y^RAf?F9=xZ! zcIH%5O)G+inQ)4haB)Fc>?OQcPkq&qrbtqccNQJ1=hzz-kK7CN?R-qmASio;weeF4 z$eOkyC?cuasU!t({hL_UI@&={y0D2gOBit`JYj+O7K+hHc?Rz$u7?5bDqZPOzVv{L}m3=(1w+ox$(!0@ySQ}^OL!%{OU zKIK2BsgGu#J*cA%S&&DyV0rJIzY zmmHxq_N=wIbI|+yS_3kIs;)Z?@Yu;vqYA$sK&8M3h)`WTfYKZUMGX=q z54!QcaEbuJdX%eQwCXwDxiGvuY~#Lo^EV41m$TU=eb_L8SWb@h7vOK7$De{K1i6057UM3``5M z{jYV5ss&+*R~Ijn?Ws~p(jYwb*!-(IRh9ys6BE@m%JHY4y-ytoNDhF;-v#P!hy}C~ zAbpDW7KZ!{Iid{?I+cNh6zu^9^99ur`S#D}!#eB%vLTr4kPg-73iS-iItBCar(ZJ| z*hkg?%l`0eary~b_*eLj29b{suYp*M1ExQ{<*raS4N)NUb@CTHO*Hd)KTYyk*<{61 zD{6xP^OSWJM+Wp7DyaBu0BaqOHpM+=e-HdqKo&95gf>BWKm%@D1m=>ljFtkv`{mB< zhKN=7gc7(!=a0C^%To@k8;3<4R;N`@8F9l-nJqh^Z9B>z@GxkG7=@CpVL+J@{8z>g zuK*;J*SRR)= zg^U!AEBEbKy}xogjP0uK+|uhyTTsy-X6;A7_l1wzn{G4mxfD%=x(W#W3B4fb36)ls z5PTZ(sO9a3Y!?c`GokJYD(pbDfiU{X zP%i!fT)1=5RZiR|W0CmBq1-|) z{&=Kz=shTo7M71OXoQr99`nN-pfFg%&f;Rkh_$&4q3zsJhk)yZSg6t09O)K8M23;o zJAnL`XyRqBV~2wprAY90U!Aukwggsp6!w9{BjP@2og4~*dX!wf!C2{6 zmo2ODyABvqS*-nQmd`3x#B>;10C1GdxA@=os!5-T?u`p_H3kC_xQc=EAZ9KKwE?qU zJD@9*kF)1|b>hFz!=Q<+fI4m&AMn?d z%6T;mk_n&^Ks-EdHFrL8$#pbccvG?_3c zTN%Gh!60Fjywq`^;ur0JICAVb^j@EbO)mOEh>LAUwT~gCOw;4#&IoWLxhKNp*zoTZ z&&?fS*()x?G@rvXeL)^gyjbl0(;ceRN_?TCg9gpFgJ1%_DIEBE3KX5fz`?bYjY~=L ztzec87$(kfREyc$Ll>cx1TuGA4>bhGk_C73_sfdPu0suY3NVd-sku`>?G|v8oc_#Ptp;G}fR<=l>(uX85)WttTg<zjP=W#>{r}MtW8PsS@O$yiyT57WA6d zSNG`>S7cy9XJ{=Y0EA$utiJfiw3|?6K@>hu6y1-zz7)q=6pxW|Ur* zp*_1X7Mowfo;GmL4SeN!_6MsIR1pw6ABtT(iRNAp`Nw*|c~5F`ldAwvAkP4(e4g$o ztf17~Qm^;TSc22EWY27+iMI8|0p3Y<0z;rNu99B0k!6y!s?1IXbJ z69EK~HE6%OqpE($J)D3Hnr!=kHzMaG^k(4(PyOg($~+(OASDfaDBJ#Cfu*Z$=c-WF zt04r@MKw&;YN)#(%lY6CI;qA2hw=2e0f-%6OmZPRl@E?2AAlH>V#7-;IfyuN;0CW; zSk*BKI4>K^irv)1-WTe4=Y@z{Q~WyWa^oqPht0kTeSah6fLHL_<3d4lsPMUS1^(Wj zgT)6AKq4)4r#M7YMC&ohwQXwwiUUS&G=O;D{xs;WAy=U)Z4^X?1$4H3ZVAJlI@K>Y zf7;68S$7z73GstusUVnHtLrgDsap9{IHoVv%6Wy5SOyM(frtyTy?v`x0`khNG{Rl9 z#^b8Ij_Rv)o3m}tdF4(Y!uicp!K6KhDpA|+h~GMAfK|RgG{pf5f9N`>G7>y>*?q58nN$+%qdEqO!7 zd~(E1<YyKA@15)=hv~g}k~ccEU|`?;A#bj5=nrQ?fn}XVN&ZoY zD_=x>t};dK3qd6y!w2XJFfr|mh66jO*JnFICBRk#$)vTaEk4f>XDo2*)~6#X>L`YT zHy$%*Rk=Uqv*%}ZqT?sde#j(l&J0pu^98KAYubq zD`rs*_0bFTLN;F($Mf6d4H3E3s7!z8;<$GUAc}L@_L6;}9Y3;d1?xf&%mB7Q_1kpw z4ZEu1fMxDHA{_?ni0KUskdEtb4$Tr2+5)9_+`<4$HYOh0q$kz}No=1Xi{S|?1AnF@hbA5HWmVG=I z1%C!C>c2A};I!E3Zc?4-XC4u*0e1B5&+(u5<^FT9g^CUT4i*mLwxxt6h!$ZI?*(w zvruQ?+Ol*COqVGfbU}e-=vHz@J$sih>@)=Nj&MD+flO1>CQB%qzFm4!u6zMbuwuf# z3$}9dn^3ZF9a?B0V|Nc`Tm6?kS6ar+4S9D~SWl!7yQ+mr+8L4y3ddq1VlkK7 zy>`zvO!Lmv_gBI-6O8BAGli$LP3)kR&ahrokt6AgjC_9bLeL)3y7PZ(4091N_R zO#zi5oVkJ7FXg+Y`^~R6Nv>naHW?s00|Ok4V9` z*WdGUfO%z@=zO`g)>4cS5EIJu(cK@@>PDBtP4lD--DRuuldn0x*6|KABgSk{tEoP0 z>^c5UqfIp5BsXw<(+A(y{@R#t5H0rXIv|TUK~td_%a2tcmf?2FqBAw`t8fJxfG5AT;$ z;1(4Ma8>uF`Td?RT=u?ul8<+wR99g^fZMG@@MhNW=a}!~=*6d0$IGw}>v-WLe*(&@YCY7?HadW@eKviLBxAz{i` z)`KQaM8y-|E67ly2F*pbg;QN^%@#()Ob%&f0JYp*$1n;nDQ?G!rSqP7nTMJv2*H~? zycyCYMRKwCtxySQwS!bc;hW_TIoFQVBbD50o@5^MW)^hM;=F);{qR00!LuOZP`8Ch zgkSX~WkNfE4?4#*0lDs2`wzbhU&k4I!|EVD-Q;R_fl~GC8vonAuqeilu?iK47$wSu zcBs?tjNlq|7?%MoM*Rd@0UOaN%asM)4bee;p^H@C@k+HL@}yQ>JFC)eC>Pc$tR+A$ zC?U-WOFy*R!d|r=Hr^>BbyedC{k+Q7kYx3njqNWp*>hC_0}EJ5ou8&D?2HmIf0YSv z%H8oh_N4moCa0vZ(DdLo{Noa0IO#oFr)2yinAoSq1cNOdHC5c7!sc*sRjH{X4hkv? zj{afvu|2?V=YiJe&U(y{)@$({wKpeR@L{)7sV9STZ7p|FalAxYqdAqb@mWXiuod}v z#{e`i=v4BAQDF@NXhsL=aoBe&8!MzKJ6yfDN#C7wz0UPI6v~1URhfwc7M8;4 zKimR9zV;HwLMdJF@C>f4LbRn-KSMBVZYagY+it4}k^Km5Yo|;B8-9x4g&5p3)+_@X zy6hVGqWXW*EQhMR8zC*TE>S@!gkn64xjFO%uXM=J#VEy(KaOenokHlyPIR6dGS9B9 z63*0F%5(111=MxynT9ru5x)@`d=($qeggn|+~Yc4YuMJGgcYtSRk0V6J%?<>FP2Q^ z=^}N@%%6G|7$>bJSlQHrN9t~9Q!v7ZptlY|)N#%42-=@-$3D|qQEatKeU4v548y*d zNcY`)flV1=^Zpvngcq1+ONH%^-LKHw(l$m6ClYNy6Z{-nXAugPSOuRpm9sgpPJ77R2k$_6=aCmWkqurbQUM2KmdR`1 zL#+gLchamtWw2I%`%n|okuyZFYUZd^s=u^%#gn9)! z=V7DgraH|=Yo%t0<421zq&c`Z1y~QUxV700RMn@iG7% z4Ou>iFQ_b)DBOr|wgc#W9PMo=#2i%R#?@e#>A@`G{mj?kVFLj@zUT?c3&%qo&K%gc zc3ldEN-wneK!8YnbGC`6Cif-ApsS;q0Rg< zm7Df56Np2BF#?DrKW`yDJW{}q!@&MOcmgONC21Ni#TDsEA#p6nS>toO5rG2rj-Y&$ z9~9!3T;sOpg}mjy7d%giwDSw@e$YhiF?0a2$wm_0Rrsm@#&ZnJXsyCIv?0Fze z%FYXW+f(1y9QmRxZp7?!))uHfWEVbkES)(GLdV6V7%M6PxQkPEO@JtQ{po`e)#l5W z58_x*XiiVsZXMCr@|ZgjUsUb`Q0q)HVdR2(iLqwXZP(KiDeQ#B5r{I`ZAk>F$27d= zrYNk}J-=t9j!|zRN|OoeI}3o`Jrh!?;cI(6_JUYXq@}18ry_V8es>4d;C6fLowI_W z4#l0|Kjx&yb>dOSF+jpWd3Y|u%?{}RML5ARi1R^9uTTj6-eA}N);V*#UC9kVuR*<} zQvkp;D26~mI~`HbI+mc&J;V;$ew$+-w0%r@Y6Even+!Vqbgq zvTwi*Oa+)WK*xCrCPdb2w%I$0jn4N4OFG5)z0klA@>_q}&Oc%w)O7|QsrFQcg%u>4 zJ}%V@5qDStY6b$Jk92l4^MFk8W5lsSAH(L4rP?aOYq&5;0wJ&0Q~K*6++JDWL$2i% z(HcKs$iA7MFrk0p%??2?P5pxhe|thdH8F*qEew;T6#{_pS;s+?J#Cij#0!!niaX&| z_VR2e3~><&cAq+Rg|JeV#aI+-zhGtaC3ur&cu*ftwWTjB>t#)1%5qT{DyfFXWKh18 zjS;tNVgm$!`^45M28$K<62)xMUizrEn6aO~rza8ebAIXR<<22AmqQJ-zin69bH~RC zo6<}_IXweXWoT(|m78vX>&=6jGf#JD7{M`*f%Fd7pyf3h*=&N`+LkBWZRV_1#dw&j z1Tdj`x5PiB#SJRTSkT1{F$M(NR=LyM69DIqFOU^6312lr1S|ihsY!lOcQ$_65t7}u zxqIa?C?y`|#gTttFjX5co_=dEo`n7zR49x>K&Q=W#6{$^iE&)03vo&1s6zWjDpVsG zLTJhqnD`v1l&=am5ME-k0tbY9k%;m!gk^_X<1V@bDT&u1sBb4hMhS_^nxKurWhacC zupLsaL-&UQ2n$g@`nQXrq-X9$vy~nD0K^x{f&O-J948dT&cC(mHm{vs0tEJl0YC@1 zrpSCLq)xuns6p2LFsR)yC_IPW)eHQ_n;dtk++|=HkoLT3x?~a{>#ONdI?!+!R&WLgA6N0|5SjqbH!lLv4<%-hPPdRVl+G|0tmK<9p`<3JsW^8@NsC zG&~%9)*t!-9P|AUf1Qc~z_Qc!MY>z2f~GofMbN0BE+}0Ws1>I;h-LwBzd7b|u|3fJ z4hC{>Dx{WV)MIIzy)n}Jv$z_OAPH#&*I#WM)e`p{4-7h z0U(h~kExaSFrgAFe;T&RJ_Qg6#GO7zTkfE|=1@pe7K>K|84d3UWJ5#W3&ug#G7rca zQAz87g;4QXVWrGMU|R9r2!Wi#mz#pjETEAh1g@FV5I~4Sqfb;(#6&ciApoc@hI*)$ zgM%8tf5Km7*yH$t0vlf64aZ$g4gaHPL&P!6h^6k`3vg> zKi#%1PVl+Xgzv;>-5bzB5$xnemp`5%fSDr}U_T0jYHe*>#R9E>i&IvHcgM5COHS!$ zf+98fJ}v5;YbMlnyR0fl*aPWDE5_9({fK;ELjKBz$Wo^5P;2EqKG@w?8rS5HI=1y7Z{2?r~?mWgF zf#iwnX{rm}whj1e_d!Q6fL1K=r@+sFvlvVW$lMr{yiTn?J-P-cDSh@#X8sHde`Q!2m;iKsf%u8lWbuyi4Cz?6ckuixPIX}XVVWz-$e)%C5TKA=L!iTJj)1PU4 z<*<=5;2M7awWgZGoPdAGy*C9(`GWRU(U2SJLHXrnEoaXBn(6$7!~gFG6+CmL(BHp% zs1TM{juQPVH2<50CQjHd7KZ!d1?B~N{}@{@>}jpWV0c63wK7Z$^aN;^xgArf-H*#~ z_JRbPxxsIK55QvXdwtEANBRNCq+1R`FCe%ldC4OwAN0~rF>2bvqj56=#+-SuBKnB&Ze3rp$K-R%uRB#=Z~Xh)6bBWr z?OU%UE!W=fTrhv_pQJ0{tkzrjyNB&LM0*03kk0I4b2G_0MQ?jH0?7k#1BGb7iU$r~#ei?+edk4*UOnMch70hwe5VqC+JYnv3ztJ7#hO0Ne`UHB%KR#h1tHi%Qb6CV0!1=!!+_QP7d1Z+SkeB=g zye)a7p`q{M;Z1{zj=<@r1t<4KgjQdJUKT*)fZDPA;}ha#kfZir{SOnC70SFhHN5Eu zAzHdQulKu*neTVS$6)+_R&Duh6>NI-KMl14{*p`|gf3!?AH!X=we#c6C%gZO_q0~M zzi}Kbr36ea7|PH8G?ajPRaiLaB`G_;d%m``XlIto57k%JWG$k$lE= zfM$F!R&U)O?YrhEkD5C;YG7-V4T*L<%LIC|929s|XWyNwychOh>5j)TajC2v~5VEnb_t zW?nQ^7FvW6_76}Pq?QBj_mA2c?1#3>6KacsjRH{AV0ioSVKFoSImb9p{PudZl9Z$P zxy@Qr*MN$etuUbqXz_8v)+uny`St_@o9LQ`rmbhoel*Pm z6?_#6LK}@iom~&9mlE|GB-<>$k zr(NboN^2oJWB|K+LcY7n-;r5I8uhFD=4Sx%ia9}fo=n*hGLRp#_z5&R(|9M_I+sT{ zut@3E0kZ~m+wcW*qHQU3qTuh&U>fgY+tPV9U;F||G&I#2l4!Ko)%g}X+4eVilKr2* z9?9h%g^?SOSr6;-_i?soaH*TSr1z#NNnFkxB`#xJIf&r|Px_q5H?O{}%LW4Sy51PH%b8RqPH zk_{X0;XWHnl|G!fuHj7H3CPqs1~w<`+_V#i_6HXHvpr3Bfy;|0 zt6K*(1uCU=REt54&MpERJy^QbUF_2tcKDsR&*XS1T>1*;I6KwSY?=d6_X?iuh9h5f#?Iy|A3_o;UxT_`80ODiy_q}Nv0qH)Z<+HdKkg~&v}mJx5YbfN z#e{R_{9g}fGz^GwjhEp2%-#_coS_X~L5g5iWivpR04(ossx1Z@!EnB`GjOTKlGz|~ zF}zYm@qi8|^X0<X{qwEqK=v+oLkpy9gj;XB93u<@uycgyrq7bL^_V{*2|jh|v5Gk4}O| z#dSY*B{V-_?fd6PHQ~|Tx|LmS&Cgi-|M5}azK=HQ^MpHd?@k=!yq#d=RVe{U)5b93 z?1G|rGm$?3LoUm(ZujH|vJDu57HItL4xQM3?n;Wwz|H+Zeka1C?h2RHQ%uge(GT#u zxcAQ6(LrjQr^uB!26G=U#qa}YcA#<46S^FgsJ>lyFUYuB;QovfGEc6+;R!{t@*G`0 z+jXQ*DjQ$Qzl3O+qDid0S<3dM^5B%MLU^gd9RFYY;QiP3;((&M!Y{I2L|?cR17ff7a-rCpYFkaoG9)!pr+FHfF~{vdJo# zUX|R|`)u(IJE6xDxoyO)KN}40`~6RHOXRcF-&17JvJx6``O865pL_UM+_eH$&wxjU zielgn{cIEdlP#0eH$x>Hpqf9Ep)s2vD+(i#XP+BB8_8dJDj+A9MwG%;_(m?WlmB5D zm)tw)Ty88OH_i7om?6Lo+JLOjU7L0Kij02h-$=>mW zN`hpa@kmM!exYEQDl1g@I|Z}~%tjfW9!qx0<_yGNQkI9-^#<{P~iw zbvh$?&*Nybp4b~!j2wvoVtGuDdItO1{*l~jAi=xL6D%>2%?8bq`|177bo*|}5I@b2 zh5!TfsmATzCiA_~Bdp>2zsXfisxAB2KUL#d-$+cHW!V7~Yb6U5y)()}!Yj9(9PUDy z&w<|H@1>~r&3CeGqIr_pE57yxD5Sp5OTe8aQkk;lL{7+?JVo1X@lLkgQC=?3SUv|? zb3}L$xjLSU2wgLGa(21OmH>Tyxv7HB*D^YTtLNEe*CmU`eK~lqg^~%*al}Zw5x#Ev ztzh~ zUA%!%G5R%EapFX?YQHJF2qR4_|02&4f@TDnrbBs#`yq>Fh0izZLz2C!qF+ zi3#wV`_&^Jo45ISU9Wvx3v#z^tq)C>&V{Fy6Ylx$&_~0#M*a8x&B!& z4t}GQR{w0)CAs|k#J*ffpM;N%xmrS{R&UM!ctI$ULX%TjQkg6#1%%**@&XWhURUME z!LJKVNoL8|#E*6v{H-$gB1<7VsM?~uH9FAWR1=298m2IWkdLx15;ZOM?pZ75r^&X9 zHC#INl?UsBIP3fCraM$TW0Sh*&e;AQHubFty7g`wrvLhKuU&dXw0?fOgkR9#axLC# zEtkC-!=zsRkiAJFVUPPyVU6<1zyi{UzB#T&&`&P7DR+v;{o0<~+@9%Cxhat3Hyqe@Hj{(8j5)5<&B z`ymn=){N(Te5hgiC+=nU15MKRJu~sUV>=#cV%HUvK;eJZp155ji&D5(NYjPl%O7OI z{i(_u>s0&w=5g62qLQ5gqoBozC>9uBfSp#XuObA04+sFRM-2Uc{)q3z80nM0l{MVJ z&Ligozi6OZd8Mq23;ch*{9;Kf0V?JaQpKQd|0~nU--v7v+d2pMm4JFp%K>{+vVMEG zE6|+Ch-Ec_jY{ce!DVm)2Cg((X1q74XW9d`l3foDw*9>gnla)g()^xY@qwOWs(Y;6 zz99ltc{G0!`FfGts22AO2VnO|0x|@}S@7Ym=@_`h?*6a-is8^MrF0$x@n)#b0_6x*;YEM?=2i8x7vKZ3ewt;;~)mPgh%Y0S{ zlHA_Q^^Wy^$WH1TvY(N1ubx0@vr#UfgI!|tyE4*7uA1zy$#L-Iq2Y4#&P2-r?w?}VSm2Ye=iQV?HC~aHL5EeJg(V$9LlUIrg*p>X({Ri;7Q*x%y z#ceMvw_tbNj5=R@gP|q+ENVYq%6E01*CT!?WvZ7vFcC5phJq(hQM`=bGGe16C z{9E^IW>0fsYrTGx0pnPOb7#>kzdxyh`h7DU{4VR&20rgNKbf&R@Z`15&Og*cTj^J` zsXkBXRq6rJ06@Pw19}1C`s$s>(;D+H%?SBHjX95G!h#lsr_ZH?QLaK=6BOy~@6Ek% z858;}C^aE@$8TYI^PmF_kUbYYg+T9~)XAaHA6W{J{p4&dVJar4nO=IDRy+`)g4+Yc zY&gR8`>SV#zS{QA=>>hEIYDcpuG8`Ny5RgreM?$_kyh%e1l+`Ae|zM>Cmt(_UEWab ztCpkM0`T?yz(ZzsXuTS9DGY7~P|ct7BVLDef(38_Zs^U;yb2B4RNq}vhbBHArBQ#x z!&od33f{ta9MBm;?>e^m0&@OzS!50kNihg@>ut1m?+)F`&P_*_)!6Lht8ee^Er-IA zPU>kW0@&S1)tvcy>xA~y>T@^jQVIp!J})2kzBys)G-z(d(4d)DyEziv!@5{Wj9FA; z^4#b@A}Jf`DNxdq{FF8GwT(qzog=ic34cZG;^meloDR3&Y2+|>{3YM6oqO3cYb-6+ z%soks?20XXkqTXVgr90po-TgaoW`K zK+bQ2YsKk>_UW48PiI0s<2NDit6fB*)K7L-F9%!dYJ^0te7mnpT}-uHT_$BScP>ke zidfljLrb75i62wBX~(o z_si=nx(bI3XX&BH6Q77p!Z>PaP(yEK)c22`4C5vLfrFFCR5Up`&%Vp-u&+=v#gI%MQ2s-a-ChiQBne{payp( zbpMm#rpN@X;gkCIHEW84&ip;B{6`lvp==zVre`0Nrt6^Tl;wPpIp4;Ym$!wTW|dNq z(q)rAbCCuu{VhoE7Pd(S)2Xfd-m96=VO7utoga%zaGJD8`?l?L-?8OB+&QQYGv57p zB{Y0LJlldyk<0{?fKMm>D|s{j{kZWoPH^LCp-t@zYoLt6noYA2dFqo)nvdoBmcBan zh{5aRW#cl#?qM#utCeet0%L$tZPL+L-JdmRH8nP5C|U`+PO1J_jYk!O8SvEh!eW=Z z<8MRbKRl<;W9U3-Hw>q^JZjujTP`uM&Z&sBauY+f<>oPVvXy>^F5TGi4Qno&LfBG* z)1YNHQAV}wFgjw=h1y9WvjA$Qv6ja%mketp?d-x=)S*NvZ~ky_`cY=GKH22?!RRNy z{sv6)V1?MtZb&n$S)$er}E@fA81g9tO8?l2YdZ<1M-0%Fze@$ zDb1NxUW5-K?9ko2TAs)CC4dt)*SVaqaK%AwU1#r}XIn3=R?kGV_--&2)@CTbc?(m) zKYF{UE4Ztcp3wu#zY1=5pbkqmhRmP*gZkv=C(vT}D{FK+j=>?>oIY&>cV^#!-pPGj z3wp=4?#F4w1auZ7zLBjJaA!Ak*^?P!qsyh)i`2?z z17WP9dpHOK9`ow^`723H7q-4&8bdW~A^WfxU5-6pzSYg`45369{?eM7u20r>5OJ?h zcEAIj?Qe>f=OFBNPdixQu0D-_d9Zd9+;h+d{O(uXH(t ziK;C?S`eEE6-hCH3R~<1y}UNxg|@q5$W6M2b>OKkIGgl*@%y6%{XU< z{`d0lem!(AXrd$HQO&-|938sh1T2xR{s-l+xdttcfF^s%&`%5)_X!6eQwD^Iq#@Md zK84Fa71?^R`Qn%9*B>0@wlg>zL}@poX)eNADfYN#32+Ru(zI0h=zyelGOM758v4uOCfF*b8pe+5OkPhDS7Cf#TDEs;loc{flRptDA33N38M6`Rmy`ku9DTIZ&bO6LmnOS& z@AlS1e$D&Q3-a?dfK2JIBlK_Nn~&^y9v$4>mgw*K0%2z7BiG+KkM-%y9&gvq&x zTF!m}Zn#}p0zK81`?DT$uk^YlJncFJHQxJo7dJ;%CYf{byCW!Wwbr;+I>R3{e7}wN zTZzfI6Ol-7GHb!8muY;M^c)MaNi!4*0?Tf39dwETG7;9?6?`c)Q69Q!I-G`2>Y#Vf zo&AAd59q@hwzlz(H^5~xcO=;O+7lLwv>k99(RfbC!X5e3wPFj|$1$PVnJmA9 z2TIs+$(ir+PGLeNn4E38209kF%2gG~ce=Cx0x&wS21N6+IqD3P5+MVoqdP?b_A+LB z2xtikv@M^JTy}^A9X;uVFJ=Tk?b|}lOz*1RBs%bx0_Ffz+ZU>b8W|wF32tV_i9iNi zxJh;5Tu`ny&;uRLrF&$lkL18{C==BeJV*A6<5r?ZfZ2l8Cfw4~@gl&dS;_DQXaoxP zELg4(`6xI=<%9{!0wUm;F{tgQju+x76$!dpY%8A@!pkrY>u*8yBJShhpuNI_i@Q?~ zzxuHodbAm6L_t66!kMZqSwZh9m$xx4PUXcU(;vS$kv+ru>dPQ@C8ch3He=;G?ao8d z=JztTRC)+z!Mhz-94e#aTj3@YzNp4hbwtGV zlig0C@5b0csJ=c`T|8bT;NapTl!mn`0V%zl{AvAPbzOW>5bmA-?+}QGHFVcib0u!z z)N+fsMm@mN$H@if75@p7^HUWtETbAr!w4cGwq!-3x+>0o>jRDk%lk* zlwD016MXx^_w3V}uR)8?hSm7%G|l&6lOc>_o4kxwZdGC`v!LF64CBfh?YDSNxp`Ia zt20(+!V9W(%uj|bH~a4S{Q9BRp~tBCak+0{fwTW&hx(7Xu2Gj`b-%&aq%O>M>aMjE zmRe9Oc5a~Y`nxXPi-1XwE{9zWjolWqUpiyBhFR{(sQiTt7IGu3(c)sZZReM{l)=)P zM??q#ZJjGEf)J_svzjXzJ#<9p^n$;2r=Svnf$ra_ZLFb&45@epZ|QA*SZB0%m=MZ0 zY#UMdjLW{RR_`r&UG;+BjY3j{zYv&;+&CG_0nQ$D#uSfvgyaxgMTqTL&_A!vF`0z% zJOR4z7k9_Eczsg7-jMO2fDi&qLNfnqj5`3GnxZD2V<0ohktc zPfk9(ZXDdsq9LzZY528xj8rzxZMzp9>6q#?>V9TeZ)b69Kc(O}mqIX+uJhL{Qr+`b zAeD!9hB1Cwrcm{?U3V;Jlu8GJ`1n)R%5TRS0p8#%2r_@~XS4pGFYkW;Y*yOnthSu# zDRo6=7PvDzs#SXFR@AB=v|G+S3%zH#kC)>5vZ(HCp}9$F_b2 zv*%y%ERlv3q|A(a5!_w6wzjm&+Yk+!QBTN60 zwId!v%mrNPzqGyi{1hzkYZmk`!1g>1=vqVic#di-$oBhf=7kA%SV9qm2_>De;Yp-; zSK|9k(YviEdeRo>)^L)Ky|mRe1!vX)H29=uMi_9xy>M#@1HN~5FG0{NV5iWXKR(jy z`;fpUmLM`#%I3zg*zO1KaHR=$Ibg4SFEugE?{2u_7B^_r-iI)+%F?kii1(OZqTJsC zlL`?KXotb`f<78Rs{)l;U&N8gP0&7N&!l;>obCN7s(TC+!q0xrvWVwEIGQ*Z=KRG9 z45mw?WK{v9e)NrggJR7?LEtKg-EI*j-tD6dL^RcDdQ%+uLwRbYW!&!OUu(S#dMK30 z;?XVJP(1q52JJQ85OQpQpu&xVuQ1{`qchqLrqEYq2p?>;0GtT?Hv>uJz5nf~w9@L( zR%4fWEnWiO247(yx-Zpo;0j=MzKqkypHQ6s z{Z2V!f4RXYb(abInVWiFB^F%hsICZFZO=~}+nbk4I=FLj1ITv1b1_b)(h^{Q7uO zVS#&LZogD7g}_>)a6JEuIomj5q(}88MixKE_H)}-J@ElULswrIIhERPy6W%P)5qDw zt>Z2i^gm}*@I{FQJ&CQh5=p&g{)IkVmuYyL_jQ4@u+BRylJmO7JcvE(4`NIrL3d0( zdQH8)a2s&Q?NGPVwijhixLjb@n2qlZY}`gL3hT%{q%KY66YZod7h-Gb{&2b+2 zE3P8-gw*#*IWD-4YVTeNw!eulko3QA54e8q&WCLNT*nslFYol(D`R0&EBExZQ^PH! zL56~2e(%Q6BD3+_^e5( z*xL7SNln%3zq#GDT>JQr8=b)L&wo%2hQn)_Gmfo{rPA+*JS&|7{Wry(c|g}Ox`^#_P{44U9rhA}k1E2- z4_1O#@ohffP$ngRD&yNu_@n9+O)~J-Y@mF%m3CWcB&C$@n_mRJn)vbztPaGy2LI4w zGnUkMG+esxO>bsF@N)VVwKYbG_9R^oQCbjLs&OV0C3_D}dO55|#NcP%~=n~2Fd{lx2}s=lB^bDNi`AC^~q z7m*ec=)L|=TWvd>jOwb*3aaoIhq5YG@5b>X}%r)u~mqqnFMWuhpHjOAx zg}3gdOkG;V%RY&fye{Zz3fE5UeXiI4tN0eZ&7hSqk>=BD3rDbUawvq*F-#A+uTCq9 zQxR&5AMDp@-!Sf=@9vasx_YKjw0L`)OA@O%Tf%7v#@_~H`;*9JZsrsJ#8^W{=WP6{ za+d~f!z~rzi{&BDxK^QXqmw!+iwfLlj;E!BRb8rH6jxO2VgL<@@wfv?ww9zApb29@ zzZKW@;4#fw!5L5Ug>)16;3O018vFWUNGuG@Q_tKR5w}rep}Rapj9mUoqZUhKkA`Bl-9O0Q43hg^<}&3&5v|6 zaHrT6_gf`0ojrR!u5i1tIJ%2*6ufF{jLap(dg~wYnzDrBi+sbP_vG5?M(!02S{r9>-LfNIx@H_7HE6Dr4C#jVe&K0R7vp)U(>c9BIk_cF zAc%u@SdYU#spn|WxR7Epfi#!{L;WJZXMOo5aH%(s;<|&+?*&=J9QDu=jCFpG8-}?~ zQI7tn^GY*ns-C4z4%*w2lx%)CXm4+Bq!Br$|CWST4JUY3f7p|4WZP*Vl<-fRu{UjE*H&p zU<5}CUPAl`Y0DI=?U*_P4=-~Rg+E+~5ij%$2k=616xGjnrh_b*p$S2+x&~twd`bGo z4I}*#8|-CHKA>glZADHaO7NB6fzYBpq8t_BA0R)6)#@}Uqsu?ZBc&LN4(Jv!Hp^u< zJz-&DLFR((KK+$Ur}n=2R6_J}c;$IaC=(OPOS(oSw)FTuF13J;;E_k(-N2dFaTu>e zry?5a_87gJGyXWe+OQ?OeujkzD?%wS-SJV7kMZSs>;C8(brymg8}Dt!lo>m!2$N{p zNf20Pok~tTUJ3zntGA%F+zP_#kI=weOxEs%oBo_n@QKJW+tBRI;5S3Y>m^q&eYp>CLHu~*%nEjG+uo;&=XCY@3#&d#GIj*1O zJc;I)Y<4&NJ=d?ZuIlbC3^am>qqeK72)8|UpU_^A>1XQ030hlO>ju1LQ@_#4#NGPD zGd5_>L;<=?i2L}~Aq5n1iQ&MGx<0yGGxG#B+Jt=I;45R%Y)G!gyXQHH*FbAE_<8e5 zfHjEMbqWA4auC9z9?(Srlp2JPbc}UWTaqxL+iv=27rOtMGQ}abW(m3DO4CY=^cJMr zou*5;NI%tx#KE%ty3KsRsF23 z-TBTFA?SFKZ$@UZZdoy2S{2sAC43^}KyPl!Wqu;D{u4(jq&#fB{wsx4c%b0C&{ z5hLE72BAA^KF|$70EZZ^B7}?vciM0gQ03x({P-rdYw$kM8lSSpVr8Q?BsBkfc}eQ_Ml1I`U{9@*iqZ`OEAX2OKmhRsfucC`TU z?O|bURV*#VHeGLp-XfC8_R-~Vx6fV#nQ}_x-4hpVwrbFF+2!x?AUEhuR?t1C5X{c4 zkjsfqawqt%SPCn4z$kQX#+C_nu?Wwx0ehscZXAKN^R(r%^iSF?CjKTdQyhxfOS`J? zm5*kD#XDpB*z&>(hC}}fF&6~0=C6sgex7f*IM|z$gzw|A1?JY_y)5y@dzhTl7Ng(= zq%i0;ctb%cG;hHWF93_>g2$dd_jak*RyS7T*?W8TsVrqcFwygrQ+XjpKcMaV;M3(< zg$cf4Y4+$Ce3o`)LUT_KFi&wZ7HX|dEV=Zz2 zmA}!T<&|GJQ0N9$0$2#!&*D#rEMpfvZAW^R4mcIf(M9gSEYn+CZFnSq`f?2#BM3&j zN6RFdT8sidF6}*C+(qH`isln||BsZC&C57{U@o;lpPz}#hAlE@J0lLv$TXnq1Rb-+`1g6^$|2-?Es!DS$}e>tX{=sB4wOuC{0b}aXHVGXsoVK)Halm((Ld#$>1c6N(w zP1azfk1N%4=uQ*rtS5LGE za^Uc*7iV%>oY}E#aca-T&NwZh7jzE#3Bz37>%p7jU}BdP%{qF!JvO8$;|T)|&_T!LVUxwm_w%_g^FL*SX_wRA%uVH} zc?$AOj@WPSt$lC49vVq_R4%#H0mz0A+3AIeiB~TPwueW|e+B3-3oW6R%m$8T8nhTU za(AR75nUmIKl_$!vV9deu-VOdCgiknR7uV+A*UG*uCO+qYQ)U$?B$LayZIy&l<3l3 zmJVzk#s=UW=Lz!+0srJ#d91X73t4#8mhGT>c%Ql;W$I}v4?lmu6?+U8ikduYlIxvf zI+t>xY(R{$i!~1UxfvbePNs18 zssw~d@Fj3Dg-kTuL$QX2Ysl#kyLndjc8938JhFlWXut}=dBJ0L1v<+X_fAsvsHSAE z^>Tavmo(n`a$GEpi{L7(iIFLPBXzdDk&q5;nE_sK&@IX4&%<|XtS1~GdOdF_wXokx zMaB{}@rn~TP}>3TfgW36FUn?LqbCo8z($wj4x%JwG|j!3T?nYTgXh)7&yXb&QU*%s1JdxIIe-jC(g@1?SND?rgAAOm?UdOE#i%ZD#6x ztpIJT4m5bYGw%rW6Sl%3K40Iw(U(#{krYU~+W!iHC5DeO#1rsWnn8i@_yIXfnYcMm zpF>`DbvbnEP0<1ih`ZeejGa|wfE$$q)yTTANy4PbL8q%NR=$w|Lo5K|2~DzrP}W4_ zu%Svo-DetI7J;N}Ku}|VfEgX}G9;PofQ2D-1ZW92Lr#?&(1EaDsG;yd-$Hib(e7>? z9Ik&~X>CvbfpBPmTe0K_sJ9@11?icQI)XGxu&(O2O_C#gf2#gEvNI<~(JZs|t+ zZf17R+x$z&7%7?9h`JrxfuXIJC+#5vtB+GTT58DS?%0r3%h^g`X(hK#EZ>wF+)*I; zr4P>)Kf;Qu03+}X*~^mLXW8HJw8uA;TX)#tU1?mWpb@7|+d5rtGC=b1fKBdd^*ki? znMEQmAfkqq5bSzzAZ1t9SL9(OrtGW?yz2^)>UjF-a$qQNt*}1g)zAV8b+;J+{w;!H zUx7zK%JZ0I4#5fbWCu9H&^ob)Y1m?nP!M5@_w{v|sx~g76DB+O`E5|E3va_mQ6>Fh zKS2CADqiA8>}VccE@}ik1(FU!a(zWDEd&}aUz}r);C|`>3E2uSrNP6Mr!8s zQS(+-@(G=8JR^>!IWE z$^7pKFtV7Ia$_m2$Wx_a$pilFVEk_gEO~Qq|#EYM2UNUZ2ge(!Jt z1t3*eVGeLe?DE;R2&BQ_1d(5Hh3vw10m4eMKj5pGMk3r2uVq0`dJ{B_N+M3n%V}Y8 z*a`dY%>@$04Qtm^UO@Ay3pns}6qCS|IYz@dC=T#fsn)YG8M04=0m>tsZR8}Z?J$z< z(&LK=Li1>3a+$&$VNc#Ue9r;+Qjs{sCF-eb`foFX#w^!|5fp4C99!A8(m%7fG|PC_K1D$H$I;X0xX_+XJ9ff#gh}1!8w#0UFJ-S zJWLUGs*}yRlR`Xn6f4m6J6yM@o5WdI4vH{1N-#`ig0DRum6l}=mI;$b75r{_%V9fh zc@b!hQ_)1bQQGjUPH^kU)1;f?Xa)4K0K8$b z`gYvG9OF#GxjrHZ<7)2$g#9TFkm^}n-_D^@BbMnwM6$VqWjXd?Do_Yp{`$24FR45V z^r=OK#7MAj7`9=Pa;l!6>^r~rkvq_0W!QDH`w6yX-4MD;_+Zc*q_gEo)8fZDisDks zFpC>-I4s7z4$%qrhV=Z6p8QcL0Es;rOV=g(=7C|YB|P38R9(q3H3{(|d( zejNWXRR|cNB43Xm12z?P1}R*cu*a7fx!UwD1Rt*kd!GauHj>v7o#-&5s`a#ABn%d< zZOOMngp#4RAeJXht|{ux&)Gmu>;#?#_j2VBIIjfZK)+P1PWgf@G<7%~YXdB&QwI=S zC~EM_Cen(D%GevK8=sNkpGKnrY(?`1LP16&VZrfFC-NrNQSH-{An7@_X{_@ ztE1<#LY&$%N9DT~);1i@ykt1HS~V2kz2cX(Iq7VniA#2Hmk98ci$O3eXi42`P{777 zk)Q&D1Vd|%nRcU|#2Sp1Z;V|ie1dqpPanbH_pFsS%PzzwbYl%?p2bt{dPD5A+(F+sZ3aie}A1^u}Iw zYbl?#^+P6l6AbP zV|5gYD0^EcGL6HBic43L@H7t*7sdUKbnv06tG<@>*BUtSzJDliv|*rYZksQ7O>h=Z zuz!kDc1a5WBiX@ffKiBJLb#vMwd>tJIJ$fkt*N)3pFT8of+_2~gKhZgXm*A4VsCvU zTtJtS`-X@CA5YFR8z(^!KY?a5A|Yn0VF20~q&<_oT)o$V8#HFFjd}!{Sv-4dcS>3N z##3Dc%Jx~;W0qXsX57o!d3!DoOC;z`00|vILWPmw_|SZ=#q+zP`Z&aBujxcbV(E{2 ziCL2hO&!@Ch32~AK#auG1>AZ%$)Ui$wTs}yP3!M$@o0EuF+^mwt3z>Qj}!>nd>x{C zxeZ>{Q+kx|YY$E9-*+olzpecX9RT@9ho8MyZjXJN&z2`^gCj=5x?ZOu0B>fHKJ`oSo3(%_^)QHxO_0vWZqaO zKPzs0P^mcl&V#^~GDJ!-1uRBX{Vjq%7h8Kpsno{%9|B(P!ZT*i;rd$e0XL*uN7d6K z;+q#gblKv+MJG5MOi_TzI79Q-A{xUWJaK(kTDjem7P%Jaa~2F4Mhf)4A>tP8z8%lx zS&`zye>%E!Xn*X!N>e`QpnDW5UuR%On3F*Y0;hDI+sjXLDrE|=BH?t-EDA;fbyoXdzBR^L!D#y~s+=Rd) z?j?wF0Q)Ln?zHef;hl(h_vn|zR;@q1KJBbA^eS+D?9V8+)8*MH4l%F+;UJIm$G5@m;c_i8)ozN}!hVg?P zC_Tz0&kwJ@?Gv#_YPMxp=D{RDF0-Q`g@ZcW9*L#r~;ZDTvz9*8XWm_0`YU)gu-|rHmGa>CbjO%UQcvbu~YNTwJlh$k9N;hkArYqXGy>Zvtw%nFFJ zWugPm7atIb6+}Un_I$j4sySu{j@Ic3^meK)bw=~ua&Mf0^*X^SKjg`-EfK^fnzdgX z9-f$N(Ea1mb%j0K9uAgn%n@8Wbdi8ri!ZFbw5zQau>o~$JogN#W$aIZQ)Sa$1ZP@D zSG-+=$yj zva<3J#^OV5Q*c7W@k_3QW_xj1QWmn0{<=QPMx3d}09tdSc6MFcoey;Kk$rT}!|3VU zAU2|bQI5P-YeS43qu>7aNg7mzA2_T!)@qlddFON>+_2$c5K{wdGTSs886a&ukb8(^9>HJVR&JBaWM@FlF3aW0xP~z8&&oJ@dWH4-nh3Pq@lz#98X& z5uz8IvF0-WnR|Wd02iF(e`gJMt5_wy%r|o~c9{}9oEKXKFnOsiLvdF+nwg;PeGFgV zRCQ-rd#ddTz-qadOdLbb$iEfrp#s%!A--J0g61&Gh$;nMckql}fydYpwCc`|GP86| zc)t;89;~4M#u%s1-zVyZ3bHqI&!n7PB%m;Rjc}|A;7fo(aIApofrNL(XP)DkZk|sg z+`Ck$Ik06KEs}g?OLm0l)nRRHhGmi#rN?;U^`c|VraA$>=?wzF*?Wr5p_|V;7ugqA zFCe#3=RE?A8mpojIn1ra8(M(|nmKz+3#<1aSATb!M;)=efLOtbku`AQU;EycUNCb? z@U)0(yOY;@0%mTdk@HATi->rCUBplhnw=%IFr<`Fq&Y~o65`Msc#xA$_S7;NVGRDl z+eJcQZ@oFWZBD=x#p)NFW3adlQZG`KBD3RI?Dg&I4LE-nL~#0>&6az8Q6YbNv@_ep zl-+Un6@7yKnQA>A$_>PsE*npn=$~O1O$-}>FRDTV%cXi&VGbvJc#In#|7D$XWB-1Mr{L=tX+G;B1_^zn+}K`e*tc(YG!KT0H4^YYl{Xq3x2Sm44au zg5tST5^kn3G^GkpLH>dHrq6fV5Ptnw_ z?#DMLaiXQ{yWKsx&no^SnV((fL2}F(TIusgMC`z=p|oBfd5N(R30Eb}7d{rWku?tJ zQ<|bb;#nM^w1hcr`d{4~rE?Uy zvnC||DnqNaJd}S9v|?65RKJ|ZY((3cMGHoPp8nOqB%#pR+RRaM7eAJRnIHOKN}GJoWb4>Nuu&(HxyG?1J1aigh!0ltBRq5&m6 zK>Z%(_u&z-niAC;2L@!_Fj8#zA8Be>_f-t60LM(LJrJ@$P|-p$iv2*Y@~mT5DhW+^ z%D+{Nh|qF{s&jYul65!mZ~GkJ^i*LS z^#a!gd=%|ArhYf|f`9$~vmm~U8UKboD%vyDbf{7`nv?Y-qg}8^|P#V^1wCYM~7TL%=eCL_OBR4E``4eIlo^la1fm` zxJucD_<$Db;o{6)#coK#Y0X3JIgO%ZcmJcTh{l%+8%}>(h;Nvp+hG@O&@u`#8BENn zr~D?WC&*-;#$$YrS7W-i8agIKHlN||ZV`NG3^`o?hqSP^)2Cy-J35ciDH(;E2-w|p zi{e1OaZC8~aw%^rL@Am-T5Om~kN#LJ$s&*D@PzdV$lCQcQ$7NWWq1+bwt)kR#GE5Askp70Oo-wI<9HbbSAmq5xF8w(_PDX zwRN%Xh~e4?P}9O>HLC&|7*o-8oWo1pcR$mmiNM21=U!lZPZ{0r<+R9>t^<4#&}!|{ z&YO_XD2>+aR2ti&W|n!RopJUYm6IH_$&nTbcnmOG{_GvGlS6+!@!Pv+2EiKD&Mu=D zyRuh9Er?|XiU$F{$qHLj{YX3c$f>aq9}e8|kPZ$yF@bslF9`WdUqF6BKz~6d-dQH* zBU2)u(3Dj*YR0q$!a*-j%+}6lSIFa@eDgi^LL&o34bm;zU62?8S^$*TP!>)m4E4$L zrq_dbhXiPu(2!J=;1D1Z_0zrRdmI8(YoS$!SN^Dc;CJWPJ-a^lQ#p4{0c+kqeH@SmkZ)%0f@-ku4*?EU19l8{o>)c|wscroa@|9pxiG0e39gp#s zUi4c0$W4#m*B8`khhDm(LZI5~rQl-UU_KO{448S@*S>(9ULXRf0F<%=&v&I4fF5ZXxL@d*vrgwxi%03FrpSGIgA*$i?f9Sa3^P{0#eo$X)E8+Th)>LsB~~ z6xe284H8%RzjbD^woPhcX|0@Q!dwXCN4!T8yonONE$ z!H+t;oCfA#26y8UFuEUjx`9uvoy@gIdqNAQ5k|G?uru8&k^KhXk^cx+92GW`ds%I& z=I##Snl6t!5`)|&O9YZOm`Khqv!&Y%9YrH}Y2X30a@=KwPzf31;1+PC^E;q0LEw8p z0SL^8^r}yf=Fsnbdb5E>`>g-#MNZ$|;7}XxN?gtz(L@79m)9bY9{#)?q2_f-SjSbY zanN@+Q=`ZFy2rF4j)L^;41zOQi`@q|0pJeHU}3})7e6Q!)5#COpCRc}%IT~6jQZ@E z6$oG*q65(?To#6`P$w%httlHY60vHKFHr5Nsgg zJyjzM`s*n@<*W}2B`*d5r#;9hN$HEq-Mg-quP9$QZT;ms`>~^K;!dW6p}f-LAV0l$ zxt;#e#GzvMu69F5j~eD!XOEg2KhhX4lc({1*wlV(ckgQ5BDA1Z)P5oI9{;5z5P~k< zq*TqyrOA%v1BcZv+!ExSefI@Is(SCHQV0Zek+?5ujFTVefDPI+Wmq>110kO3mb-V; zC7@;hJ!K`RMx+>>WqB~&9uxD;wgZKquWNgpnhk9^i&TQ*0*XD+p-A4I-@LrseS;`j zercM4obib`eAC1!`g}HWZqz29!aaZY*p<*j^imNKKEQ~4)6P~)+GZe8h@>Rr=g=(; zFUylTUb^>OTkWhGTF$!eUAtcT7}`BN@5jkGWfPLqPLWO6edt0=s=e)6-_FI&`CbeQ z{X;{0t*JJi z1iL>yy7*>ePw#1)^yrSAr8tShkxrQZ^JXt(407gx@AOOTsT|~<1c!d9j`(r5(O4vd z!5t?g5C|UuD7_qaBC+^*=&!3q`7jrc7d`Y;tqrdl?ZOMVSJ{(iHb=6cy&PP3IvZzj zCHU)PO_WXMZanx*PkH#yHSt9KPSTg_R??ZNdGpAxuj^q#f0ag^Q!!N*j)bm-u6S$u zljpqHi>xQbxuHucumfYRA{`@o23Ss63w+<(&Kt%`RE$FTU8;pI%TZS0!rDdc3dhg6 z8=<Ao+BFT$h<}b|s5I9@u6~N_VAhsiPDEpR$XW#2`9Z@S`~i-(L|V~QD&t}lby5+d-X1hJ^d3j71HTM zqG?7vm8c5-{6M{E5<41xmuP`M*!x3DVYRBRf5!K|5r#xPYN+tJx$JM^m&$6HjY~Q> zbH+NeJBCVghKlDt;>@80MIKO3INwNTj(a(#h7~v94_+xSG6+sGU@{4R<0QcCJ7T2n zJ$Pn2k0a+S1a%5?Sv@PFEt$-Vxy(A)0TRiNaqN%eC+;uLrDkb6u=~fqlCM4d{%a3T zBVs{kkTHedSYW>2mq!azsz&K3T5C|qdL02Rau5v=g8&T$#EnMY#3!}mkh_M}`_=Vv z;YLfc${!<84}h(=^VXfHJt8Z>P-Ck8i01WA`}&A0#)vf$nXf_vH(5K_~`^zl17K3{?Yb`~2A!Y@h1-<%MPp`gT&#k5!y6b@Z|Mam$ z&2+FG@1(KOJoi&?&{dc8nDtBjdN(?RWa02>q7|^0jKBMw)OePH&7H}x z$~17a8FnPSz#k<8p&)lk=LdkXFyd%V{#>iLv+b~^2a^U)I65ith%7(IlfJQY|MS_@ zb>9cMi8XRf+xKSb2ZBy!v?)`Q*o_h!xzeZ$;KFI;OKGMhC&h`8s()E+=4us04Ia7%Wpfk-Ls_Zdu#GER0FZI zkhKOeXJDRv81Z~fgIRj?Pood=%(f7Z;)wS?q%d2r4d5yMBqG-$4!brLhVnS1@ zQ^CpowK*A#(#J(fJL{8++y^%s8SMI2c=~FHET8jpgXPl8cC16l9)d76FSY(5_*PR; zR5J_P2ZKk=@a$pySC^nuCLH(vYF{deN>nYbG2K~6T)!FI`qi-lWKSQX)a4l+GHf0- z#!phS!Ei%}*KfC~%Lcr))$se_z2r!R!w!MW21;iZgmNzoJJKhuTafnM?v#D^2&+SD zG5y?#sMhlu)G{Soy@9ppU|rT*aF^7Fuy2pYId7#`c81_C)roh)Oj+ zPJX)^G|sSiDk7{3xdzR!!W1i*)1;{~t@~;!l!U%RnESg%MQ@M}Ln?|sc5Ig6mm$p# zf>BI%z|8g5S5JnvMQof2{>+}HQ;bMx6WQ082)Y2Vf*lY%JGiQHci7CSK_*DuNUh@1 z%eqGHDTEi_MHI+3R9d5-uSOU+Ikqmtwgp58m*?BLCq*kSY4q~xo!>$A_J9yIjlP9^&L&doroNUE%;oR)5UyuVqBKBJ_cY-Gu;PL2{g64 z3WaGsq22&PaIOh6QEN&8pa0lWKc4#GW^SgTvp%nzJ{EV&3<1#!OL@SZp_%@og*>@a zGErQw-Y5NCUbC9H2PyY9g}IFJr`Pr2gI_e?6`z#`yvE>D6{I9P-=GCEZ|c0qU{enb zz^|XYM~ZjP8UVxIUy=3)DVxCq(6&2LI~r>y{r~QZ$d_T@{|BhWDExQuba=rehEg;5 Y=hbon~uiJDDchqbtDQe?GY{I1UEez90I38J=+ZYZy#l{P(?IAkT!!wdfXX z5JjS1vu||y6VfM-?)+^2Ipx)_SI%B9&BBIIaQpvo)%UVIAv^siZ1em7ebD4PUd*l+ z?(HX*zxOBcCw70k@ZfIW&*mrPV^``oC0bqh=ui8@-BUlFqaKL)0k-V>!+%`1eD4=J zPT!WQU`F7aswv0_$AZGx*V6}^W?%ST}4@&6| zvbdcwbJ^E%(RVP|3-KKo=dY&Mmo0vt>STJ|?BVhqi=V=NZCpNe>`*F1wS_-jhf>2< znf$u&5%_)2dXrz%jxJlbf`T}XcMwWI}PksPq zx~ZE8SVD@7gF4~sTCQq({MC6yin`8$%{(PLrJiw~&c6QL)7y^aavtv6CEHDM1RLC}5y9j`Y(sa3)iuR5-LTNyT;uJ2;S zlkW&n)F+~Z&gYL@&d(&jCjL1S2*lXRwQqVrzjOqerK7wWIfNTSzL^!Jd2=B`qJfxV z=XQma#On-1Q8)UcKDVoDkMx8%j0#NxGH2v*w87;3Z~ueE9j^-CAS?^SzOI?uRffrr z#$OJtkE2UG8?whBzLO3v622qR@xOdtI8(N*C_+h1j^S1dl5K~h>LTSY^S_#No#6cV z7@Vi+A0WN-_9v2I(s+qS^bO?u35MckNh5Lka@l{;Jq0>m z-l@>Y0syhS=epFTxVFjvh}1>>ufl8@JN?a@0&(4;$ITQQhSXV#&ATC$VTpXZ5%Aj~ z)ZKsNRbZ4@>mtp|f3t@?fsI~6BMZLGC73i;5ZYgfqUQ9TMAo}HUP4rBSDG!HY6D}t zTTod>_p|P2zVXWK$0jK}!aL_AgsSXQyf8th)9{TuWX(wmu^4@DPW>@l`c$Sgt+r0pOnMm9p3^_#&N5b$rdOghc%AS(Nh8q(S?ZpUhHdneah(43;RMxtzn{HMd+)*WpSeD# zOS0sS6)D^Rk-B=Q4mFozj+PF94eQC%FEL(81N>j=HFkx@%-bwZp79Fd5H~*uPpuTc9pdFF-8R1-9rU z+L3x{+#Nw~Y=cHQ+EuB0vC~5LD{WRN)AhdWw9pm1=_6NH>ZFwjZKwWa(RqCrN!Ip| zJEoW`J?}PMgx<;P)IaYQ2Pwaq%X!GtFFK5DXll`4mb5aFrrFBI^|48HZ%wVF zy#bY^?#tGb6|UN+w3*R|k(qk(d3_^SvlYlGcMcL>!CV)R=I??jDc^kCHES(7j=Ls8 zYaVu>>*1T^9Q!tLKK#pMh|mWhPb|(MdV(li&-^i1@*F&>YaszH1C@SooBZnlPwVSlacLD zs>C^)xI5}J89U)mb>qZFTjIg95K@){u#`(~<#2^Wdea6Xa$f_FwV05zJ9+bAA1z)`di)7>U+a4u~cgM<4%7KlnAU0bh~I*7b) z^1_KqXm54>Db*#yZblI2Fp}Y5zF=!H7md=#pO$EatCXwmH_K^cYI`uFMz-RF6488+ z)q26;izU|=DgN8R?J7U~{oFJrD-L(kgcjdC7v_J@lK$zKep}KrsQ=U2SPHG;x(T7$GyoXv4c8)G^Zni?m3G3)U znMZz7w~45?qQf&C2)Fn+f+`)&A9-$dFZ+S7yiX8kZ& z%gKL!&&&xf)tbvS5^a%~{@xxIDW>`$JSZX72nJf+bce!-ESn7keReptj8HGY~^jtoBy>-i*Cy_866(tD5(vtl5-6s|rLozVV~@LHI-!?QGz;ZHlhC0^?5A*j;$ zMif*Pm;9B#fHb$yhxk<^efIL{NS$}JE>_1WsEAuOu(9Am&bKC`m}Mc!0&8**G8PDR z#v9qWo>BjhBJ%Y?`V$gSk}vtYe?U#=6{%So+jj5BPf*c=3$)ge)m7zDr55hWKx>i{ zO1D$*?X%?%*ZDg0OLQ^n+tu?lGR#jgh8mgp*NEzt%Y?$5r_J3nrV&!}o(HlG92!6N z1~#xr6R~6U0t&5)_(tT1pbV8Fb?MdGSY0*l$D9?{zn*huA_cb2vj*N;({5k`tVs-` zhGX)aS>r&5YEYZ9u9nH~E~-;SS3ZJ%Hk6W%9RAa&)xjeoY8 z@apnT3vtpU-^|n}_U|=B&8i_*K6!?Pq{}PrNKgS>48(cupB!0!8q`05T(!=utU5Hf zx(#SYSK)eLNMQl}=F|jdhQm}~gm^fC+m=M34RbTn!xxb5r>|{4Ls=!#5Oqa>Zbi~# zb;d{~Pi%-(cH7L#2E9o)&!goVs)PUM@~Sg+?tKd zv~)|E9>sHQ;%sd2cKAx#t1$iM69IJr2F+={K~p#OMAVs}TybLd6oTwl2N|@HSr-B= z%GIb|Zr5zfZ8{|Cv1KX3g^-ddLU0a879@{G20E>LyMX&y?S65T9Q`dv4g`KdZOqJCOr;|UskK@Un}aue;iMv0A~pGV=(sQT++GojdN7ffj|W#}T^oH8`%8)r?ZOTUffMUJI+*H z%Y^z8U1ut+gshJTWEi%UsiR{0;EEdB2(Uw&u6`PIi>##X$1K>w=Q$w>Dx~F$&a7t@L;-cQNSxUyk!wA`#Pn-hWyx>KDWY4xmTsaZA@$g@mFho8s5j~=Id4rd@h5$ai71SI@BD z!$3Cz)3Kcj4%*xl#S}_@}oQ zBtd>?PsYAnk_XwqV1I-iu1MdR6MIp5Hv%wVXtc{s=|)U?|4poiuL49eVdH!hQ93); z63K^UtTjk5JDgZpn}6dJD$+b2KZ4fwRpu{$ld&1M-?Q3JO})4z3vqIL)%u%s@s}7> zwQ9yyQdewIS{F&0y6pDt+sQ$W;BP?dElm?V_NzNe8M=e?X+FW_1yK)gDFOkQn{F$q z|Km5}rYJM{_3RvGxKt}zy6BVNEaj%zb~m@B2jG6OA;q!#MGxgrYGa)C>7r~6>~KhL z=qzM7hS@(Sy_Yba8fVDqEN#^_AIQ9VoQavEToW90_0o8keego{uH0j`$`~C4nqG(< zM}KdJztt>o*LYj6G&KKdCM`&~KwW`fKj*1|7aog$9uunBJ_KL>`AU$8@F!`?mSi41 zt@RNT&`KUSjN5*Sm!VVMYX#CjV9(V~Y_`U)9qn=sg2pvXgP&Diq6e7*z&vak#EKeVD$r+M}J zj;?oLkcHXu`##kSyFF$RP4|G8$@pEG78V8!`@r}8GFki1KN~jv_T&Ce1KXABC-W6a zvePLN#cVR{9}}eM39=czg{K=r&oS%|E({}Ju={E4y3z;0C(#Z@gQFWy=Gz7bjo&~{~Zdl<1@75?C$T-VZcGdA`Islqu`tX{=N zZhX5k@Y~p?DpDB~nbyMQJoN*Xh>c2+N#p4?Bqh%TiH#@CRqB;y^o6MkkneV$?*zBs zqyNrrY$fb({rU-RO8*M$I1^o?WO}{CFMiQ9_v0e;$fF?d5Wj3DJ7qBYfal!x<*5^- zIZ6G*a&pZI(qycT|GR0{&v&X=G2o_ptb5KkZb+h2K^sDiv2`61!gstyGr<~esC-Pg zLCt`8L%_3@_ST}O&!SKmet_5+>Gc2|g$|6_K^@nP1ko*W zNzEelC!pbH?*eh7+wGZ+rSQb-jIyI3se$ZBP3InxrdM-|hZd3Sq@Z2*O`kd$bm(D%ojdm~%$E6~IeEc*5wv3U%?nXi$0Y>Pgz8R`!)1A|Fg!4UWC!zWv0Rf;TmZAxtZ~RK{cfwld1v8 z8D2DktWf2%_MO$G(X6*~FX{rj=Q)3zey3w!Sd{*@9!^FOI$7Cw85D=6+cHz8#>7Mg zLOWIBmi$FxV8mvBX2fC^(1;4bn|@>Vt%=_SHiXVZ@QD^V~5r z-GhgomdR6N{n_XbK1CF>TMn`oZwdD6P2-+eUCU1g6Lj{;6S2GNCun+6rCw36`GoR^ z+Dpv4hjs3_s^|6Y)JL0^)YbMB7&qRs-{jW>P0frMX*$v&_eRRW$`N4@I=^;WP8RjA zig#K}6j%We`1iqi3Bbmy%S9e6)#yaOmc9( z^U-B97y4yi&Rq{{M(dhM!FD#5rFo^J3t}3>w5Hc%JF3jCXNIKMK~vPL#l%xzXg=hL zh7K%QmioX5%^HZlG{@vuPdhiWArP(Is?i?4^T)+yJlL4Sye^Kk-DAno9;REid(Dhz zlD3;&sl3tS-n(Y;z%Rn)S$+8UvY8K?1!rfz;9OIX^s}edXmY|xbMc8mi#wzef1YOx zep&5yxIT+C?8D1BRchSr14d=T%$NvAYufxe(i(4#_s1`hk9#}krH*KNeb`}=G|uMI zLcVW!JrM_3W8cS7i?5nIK?n*ER|U9#)1B1$m?Z6Xmx@1}D$b^vD1RW_?F=J+ZD)b2 zA8%ZbR$FrZ6xw$7}9`AG0_O0$`eoF~SwF^sD z3eU48DJWG`z-U?P^49&fuBbBHdrMrCBcb#AgPUGAsrq9w-@qA*!J2t z8GMMqoFZPR(EL8fVOBWwx9tI^8C?0VWn<>#@8x~Wp?eZm^$G{+yptw*apxBf#`}b2=oRt!iMC>{v@Y-a}X4==?Z?PH8&x&w>>Lz}s|L z4KY3rm7+baa|}sc;zQp`S#(@)_X&F*deU+rna{xb%cPqbp-%IrW<=Iit|{jCwMCw-P0-Pz*b zOx@IOt`f!FY<8JNF!@!YX~kOHKA<0|d+W4DxVoHZw6xpUR^CaJP_cRrLF z*!=Sv$5(+Zhf*`zF<9=eJp|XQV9uM688uK+Yo1e@Hx7}<1$GALqebL7$puShPA$J1 zvSs59?5)v(6$6D;15w%6ZE4-me!C4O6_OVk&73Dcb#B-C`_1oM%4QH%ZyF4Y(D7~o zqKW!tVF^FZd=dHx1dLAcYie7p;lf}TfE6vW$f?kIBKL!;h{NEjq4Kr-L#d;-wwc=< zJENQ|eckAcPraHUBV?H}%ueyrZE)Z3Gl&^Cf1dF!-4PrCoF;PX&#i~~Z6?1)+;HZZ zaR?N!29aSCfC6sybE@vuZo2VI%aoaCw1(@uZK|$(8tnLo`%Cds2Bm_bf@HvLPzv!; z2AAO+;b4bPnS_ESM%OkdwlIAsE&mB<8F&lX;K?sVrfi1HVBNiPIjDqy$m0I7CDiUA=M z8PT;9@TChvL(F}>{4OuLSZZcnw;UYr9xKYi3N;W0pml3NY#QQihfK)y%KOX&j57LN z2q^I_4g+5FK%@15(MIA+FF7V#W* z=8c|T+!MCI^Izgu8vH&o!*&nj?CUQvxO=aIiAy6Un9(-`xs>0b%jUoO--%Fa(pH>^ zKX8ND*|@|!!(fKkhJs36K(<9k%NXJ~&n>^3gQm;hUn2P+tAYSFl@|cqi>kcCbh)W# ztLyS53t&OF7J{OXy#S1!_ z3CRiMZJJU*p|s)m882gt*UL}eFr|26up)TSQs?J4>V~G?YJYl4FtCBlHT@S8X_wzE zrexa+kCztg@2~uBiDH1kdaQuu?P*^=wJJgbcJ6ij-2}t~uae#mP%ri%d%k!>^F#E5 z^-@4|&Heb@D>=mm-YSF$(tY?ZcpYjF$PX=|Xz9T=N+`Jw;v$rq^jxYa<{7wskN&uE zgm?1}MB#s_(jG3=u6g$oKx~--EivU^Fy+DWyVs#T(*6YzZo_&{4nC4ydN+w~!#3WV znuz;W`n#`>*qdJeLv~oo(j*2T^#8|5Q;?5+SBe^BVc)49{_lXt{{Ljr2ni~dqa6EX zrR8p|9OpLMWrj`~hjJ`o?OSO{6Jv4_>-<)qf+q`6ZqQNt+j)c1Msz+a@#+6^qUu_I zBxR{qsWxJa1(Va`$7_B}itb0>2vjdFUBLEO8Xy~l0=y(S$nI~IsiYN4l;x521};zS zwoIAsGx2(CNz#sY3U2j1^Knb=vs2VhX4J!%9R6d5l;1!i@8zA)T`9vA?C{YKwOo(T zhQNh&c&HQb?;ygEB6?Rtg(Gv)R zynT;gQa7t0)t%I8WHh(kgsFW}yFLghg5GnM7wGF$mfu$F#z6S^B5ZwQkuD*p6OeU5 zy8JU`GD_Yct6ch2(NR!BFQ5R3#75g)E@jKKQArOQR_g5vyV?p_MSukN5f^oFzQ(S9 za44kRpeNYs4v-Cl_kO5%-Yty#a~op5c|)IMjbhMguY^TUxH}-HzZbsO?JM7r6Kx{j zk$s(?!<)r?iGA6puIN(vW_R_)2;zp%CQU`^7ZP`5-4;^iehkiE7E&hbpbG-%+eeLv zal);MN>{h!nW?_?9;N6%|n+XJkwe; zBGf$YM?T7)EAV8B@bFA;*W|~-XCEp(s*2oko{p)s%dyW5a~kLY9Ep2d+emhoj9vG5 zOu;`E-ugxI*f==qrQ7)ug57`BdKzi)aHrPgC8GD-0MS&KVVb{Dn3_I-+h(NR#=&(s ztf)>i>+bkSW=rN?8?f_@&a}dcH_cjT>n=oezEBj=8>sD6%218tP+Wj@=IkQ_45_iA zYtrmMyP2?c;3-YGh$Fo=`U8N*-w;QcT{!2fUR)HhppXz!C^k~~TusdQMp{4axB_=u z@r&T1aTACJwx6NOZ~R!}+0wJWxA+=BYkc7$NYV{v_Rph8dQ@hPKV80=UWbG*HEXShOB*iav=vD|wq7sluAHWFYOF2i z+uN3L{StkYn9@Y|YiSD!X$R_7uU&o7$hytCI3RW(783Uw*^u;*TeMdE%v$~VQ?cOC ztT1nWLvwB)v5zQl<#Q1Sj1-T=RA0c(bX6G$TV)|}>2mWwok_2LyapbdmAqOPy0^+} z690`a2VM#Xn7-!GKKL;g<5^v2fgntcU8QOFc3dCPcr~hKbo2xdo1~5k{O#n1d71C4 zA&c9@b@CCuMl_VjGN0>xotHvxoWz=R5)Df>aHY|`S^0o z;kfAH(d$svmepQwMQ5!QMwS*(ICHiUF*lf(hN_U6g@nw)Rsa<#81hFdvf4L(Z0v5~ zo^JqrSeLQ=G=Q*$Gq%DaXdKVWtpVUcA2dfunWry@%)|Yju+ahpKwf4>uw%{&Fp3w7 zHAZn9>)Ee3QCznH(}=qZ*Ul3)K{49LX_-z>I7j=)*erxC_IZe4+A*eJ!3*`#?5^l&$OCL>lU?noCL!A(0Gn}0p7v2ZMc z2;p0rw3mN@Zhn>rC}c_f?4jQz)?en)p8h3*8&a?1(E#ne&qA*!xf&`Tv}(cOPur#E`_EBTlFP6+-+UN^NMs z2?~-y&Kr?DgOrXHB1lco!z7l%))Qw1waO2w1?qX#wVVCarHh!&nLB~%>s>_b$(|vX z1|bmzcl{y1Pd0zV>;WvZEUY@H(xJxt<|B28HR+mI#F;T=$&}A0+o>FalmF%L3R?tu zBw&tHD{Ox5>>G1aBoie@U@KFuiEB6naudeEb08n5__+U)VIb<71aU`AghyXwL+p?F zuS%21e2k%ejM5?Dz_n&xE7O@ae|(Ip)b3vh>g}7v=@kA#nH=p!)MuO1+FHlrjP@j? zu|r&uoP9(gRhS8xDwW`fLC#g5ACuZL6~A!P=m~$f>G?0_%pCUKdBKHmjplZkpq(B! zX%;*(xDc66H|=8tF|M~wpBl<>m{u0mJy4LrcYWU z#3wP-)!3x>Tps?WSob?U%cVAbY~`%%;k8aAW$I@>lvCSkM28#8or6{N11({8WuF#l zj(PbUoh=kc*&Xl5E2qjt!ENO<>*At1Wb>=|$C|lS?p0phajUzBXN$TjP<|rK)ny9Q?VVJ0jz!#56f^OtGmf^y&Nf3jjB74k^wJM&#;@n50jx z`C}-hD8NGUd@?(IL^|r6$gZ=`CD%0-crNFop(ZhU{Yprvu;xpc84cmum zlr_5Ql&Yo-2dRK+dI|)1r4>s*En`E-HJ}bX58_Z|!1jZ@$0eq(?eRcJ4HBoYs-cd3 z)wt6X9s}pO-Z}1*^;E~Lk+18wtruQvOS%J~u!HNHJgQr3PSYG548YY-J!U+Z2sn;^ zrN)1YzmmzBxgcVN2-F2Puyovhr|i!_v-LHN8*-HzqYAj`(8ltvc%(XH-CZO_Le>w0 z%T^W>2<>?WjdFvzA6Rh*)f9n<6+uV1KCsnDjDwE!qqdkC91*q_V-$&ya|$-H!Ch%)JzO;b z$b^C#L&OQLB(ntpy?#(W=q+8$QAAYoZ`8B~hPb(_+wBL=4_r^=@Hoe|tkXaFERRr8 zpL}b`Ji_D6`a|H-=jNA&nsPpS+&TuW?_=v+$H)&_jm2KhIiRpOcmFsy{@Bv!b*3@@ zHrIiQ-MDENer>1LtC1EBt~U`K-TT)9V+Y&@-_rV$83%03PfcQ3j(VLlH(MD$8=pq& zgTTTn`5>EN+sfl*{-Tgmzna$W##vU`D?+m5aN+4nwDdwMiY3MTD z9C!jK$c+h(5Vr-^JiZ_fCB`>n;hHB;@&c=s+I?2IuBa`Q@)60g*WQ4+9g~!K0bz-} z8!*kQx>+SbLeKOTrpQ*~?c@0LIE$JT-jPRj!1Kbba|1MVrwC~|g@e00gA#DSDG#Uc zu@kn8me2p7w~%+L5-V@g!FE3uG9U>Dyn(VNm2`1>1q&%DZfOvdI}@i}o>!u@t9$^| z6ClaBBhkLe*-Tz?J0b+hEhIoxGgyBh18W?=5~ut3arPvfnCK3m_`k8cO1a$;oLN($gjFR` zVdNuhZTWr-u{|%iJu%E)hEq*kf=n{xaz>>MDMa0*1l8OWT?dSQdmd!%NyQUc=5Rx< z(~yF)aKvqi$A&kq^5VO6S*!oLrq{77*a=q})nR1vHqm5MtDmTVGgriviSmTa8eq+~ zp=Ou}sF9!}rF&-=x7f3(^^UU_1{gtir`R|O56jPidr4iX-bdpCuT56@vZQ-eVzy91 z09P3hB4I$^u;Y88&6;9L{osgAhS#Pf!oU&vV_ z>WZ8tAoZKePnpGM8r%@WPM{A9FXdgj4!Dg4IYIO&2q52pYfR?}YCVTI;EVnJR?y)@ zz_G{_3Y1>s`e_Qx;nr@$9|qQ(XDSa=+j{{$0@MQf>3%%*my`l9=RpxTcLpyWa&@>r z+Twi?R@fFKoKUUmZm@vzcn~|-mNE#m4kA#`$Q#Zf`wBRJOtt~$(@}rr4sO;ZJW@$5 z8)-^^eJQN5Lmj?}NXR^*h~pD4=SiBofYYD>Uc}T;R!fpVb(a7)j|FJW6h`)>GFa=Z zDvzpg^{+xuvktd4Kc6Y^4);-rMEGY!PYyu)Yi3%!+ZbjE2kcRorHF1xPFB;$uG##u z8EVg-R3j#QKl|(BM5i9XmBr7At6mOfLsZ$N3m&4c2QzJ-e_|u**j;?Zv z;anYI5uDL9YR$EP(a(OE+s+ZRH$NxXWH;SCh9^Aw!0t}rRf=4+Y|5-*aKQ>E@bD(nM z=DNHnzN?U9Wnj(dWKeJ4ZE;Uee&eH6A7 zQV`Wc%A|_!TA9sO=8U7&>C!agw}dovcr_DeQ&uw69|d*x4I4*X8XESE1H!MKAt-s$ z5G1X$xtqYBu0%y0@I{!zv3VDhEp_?fkwAfCF<^ZiEy2N5IGkE$Ai(H8*60zLJiqGk z69l0qtan{g2&7F!Jz0VGz6zAyGPJivwLX7(_1pHt_Hh~f^zZ=CN(`SZP}MWC%sJ&1 zo;91UmU=MvSG!$hQM1a+P*$SIL&_AC0y9gP<9JtwVd6Yl>U*kC)jG`vPE=)#l38s5 zT<@DfmCPEe%i>?8?1ONYszvm)uDE2gi%qV7l!B6y_mMGqMPrKzUUnZ@{@0A_Na*)r z$r_RV?6Fd9TOl~a6|#n#do}c%dqwn{=~6~TGH&NYnBkI0S=g;0+R>?1xq_+m*Q^@T zZb@zyf8?lOO!lr`I7zb@4|mwg^KiuMkT~qZ?TEds@<>Ga$H#s`{Im$aj{g-)aYt!~ zil6Q26tdP-yXAal1NR@^ZTK_2BAHm#S|vyhMfgXx`fJi{;UNfBO(g?&iT`PtY}=65 zRA})2z!O$g>uspv`lGY#*5?mtiqe8IfWC*if-FE}Y0|lzwW6oFgH7P-*|6_ifDMi| zTT8h49N^#yDitj&d8>{S9!Ztia*`@tk4ekRTc}iT;6>#)=(axb28cM3`mSCErEiQ;DidEjyl8v>;d zUskCms>M8{=t*~xg*0$j~a-&na31;rP@m#ijA1)uMO96*dWC8_n2hV{!{im zu8|_PMeqjl+UhYx1o^SXm{eBRyk0(4LQAn$9E=9Ap0gxPVik99Au+17s=zY@Q6aPS zb%kP!h)9X(a(%Yphy|lEPZ85wE%;`lf~Z@VpGlw81)OfAgbXMCEPO@~f7lOgwz684 z-`k>gi0l~1%c|#lAS-v~usw)HNzjT|&*QAhI@j~RYOePL*Xxrigz!z>G!XSs_~5YJ za8OYzs|XI*Dfc&j?9TnhHyMGN3CK*AMpjrn0_R9#2*isHn$Na$h9Fp_a5)?usZ4*~ z&CNz|-GPgLoTdq3cnTeoNRBR@%;KHTt~Pz_lzjs@mBw%G)R+p*fPM*Va}oJPB4P*o zqY*qzOCF}Q=F*U@0~KWXfV6SnCo0IJZo{r9>ES}zfL0_@ukqzP0zN-*#%x*9f)laf z(%mjLhTEFEtcjIaPLXXKxO^quNLG3fiJGw9UDj!`>IQaJgCQu;w#kzNYl9jG@&Pd- zFM>O0W=8Q4mJ5!`_9A1$y(&k{N2)w#Jtr^a-@%*4%%>!2UUqx3S?wFk+cpoN4gm}t zdJ~yvX}gQ$RiWJdbm$vI?8S*{C!XLExY$sarGDD18VcMJ2`}JfuFKzBf@SlL4spFA61P+nmE1*bmTK+B7I| z|065VXE@R88oSCjB{2N$(W}O;6<+;p*YO?ba3ZsYlBLU%2ez5Ns{=pDo`n@T7h(DS zarMxV_jG3^eQnNH92M~Vz?0ntEQQ^qE=laC*>rV0%BK><&>qaQhT{kBlA2M|*~KMH z&Ke<#9#uTye(jE9fB+j(Jtea6=?AxxAGbb-<8HR(CpAL-Ma>0$rAb*$XR2{OE1nY6 zQ}N9>hWD0ZvnV(L+-^l*p4vzrxx)&XG&Uc+19yn?V5+wecA_Fwnr$wgzTkG{H@TL{7k>e(eDUmh=`1mneD=7P~P(D9sIeGAaMJn zqk*Twl{)|D>|^vC&ukO?kN%<%FK~^u9_ZsDZ8?f5i+e8K?zoj%!oEFs3EcMtDTFln z;3H?FuI%(R&E`(OOv^SUOF*K6nmI+e9viO_)k@L--6nb$7v?pl2a`N94qWTIr2m|8 z1XfZnPg=Hsb+H)1U_O~2P?QLYC2V=!Bm&e!M29hxR12D9KVR&X4}uuzdyPo0q3{AL z^1$6k@xG2ODDOe$D_e5l{)z+DG0-2t(B4K?^Q6|f0@Y#v(?KX!QY%loiDS;G-I-JA zF%y-r+Oe*rGjr}*6PLKeF{L||1>0D^8X61KM8nT>tTnF505cqJf8-{D-uDbj4} zzRIrx@5BgaUuu00EJEg8LWlrWn1sw7^po}EP!*Vt^5D2jS;1DKV>@C;29+MT%To3_ zXWS>Kbm{m@m-UIUMafp82U4~wsu-(qBsA`;?s^Kpig1fNKc5d`Z+*7&$_Ujtk|NHJ z;*jrnFp1CO9B`MzO(0I7PNNMj&H6$9Ah@}mo=lMv{HuPjuv ziaQ7?9}~smI2MZ#ZKQ0BZ%g6W#mJ*DgmeY9G1S6w$1Bo>dh3*0MyX zq$lbSAfE2LqEt;)Kc}|I!;&dp`9E|+mux@`_i&|U(%93&Q^8?&UMVuPW(WPsOmPS_>;#I(VSv zuoRKaF6#$-bNH`qBy4gH;^7l>h*_j-Swy1`nr4f5v`=EN-oa2}r7TMxW`&CenG|!+ zmMOv{F&<>!2KjfCfsMe`wn}!YJD<~gbx;5rM)_a^x5Oz5@pmEe-=)e_SS7Hq!*Eq( zvpUgA#0m!J<=P+euTmYs86tX2s$*U!Uk)b*QmabXxYMc=;&p@kaJWJ6nTitv;=252 z&7O`5dO5o!$efBtnhK(mW37?_wJU~973JwkAAqfg^$r7v3b+Ve)abb$t7NO-SRe3T zLZxeSaqBP9(5;~Dd(i0M{iRJe$8h<5E2FRU! z4j`r?&bnYmkHCFFz~sTqLc~>1WlU`#2YAeQGEwfh4Zf3wyW~-vf~j;BrR3&PdtZHs zJ0pTe-^#A=nDlHCKa?V!=CjmUBdnURYTR(SEh1PXLY?B2gI5MTQHgq5qA0ZBQh6X% z#35k7n4h>DPXH0%daCnImq@Q$*&|J@yzs!2e4+==($|I80w=Pdn5r;p%T>p?jy9|~ zURqv(6(0@n0BY|kyI2--E4fpyaq4WwmOFq`AQ+Ih)ssi6psV6YPfX=e@+`mh@sm8r ztw!|*MXEPOWVR(61D zN)?X9IS@l%uIjNc+WB}yigvoVUnX$VYx|n6Zgt3%frOR!X;pexK{`)4L>82m^N`f^ zEBT~PRj!@&L5dF=!Zdk;GLYNAf+Q%XoLSsyNNw0(&6G|NDh!q$9zk@l?qmeHmN2dT zF%J6^bj8nS56>RZ*|jATs=`Q=d||E`M!t$X4w!w-+h>Q(Gc>4>WbibAnQ&$6gz<4g zN0ct6v)1#oNefOeqyc+}{bXlUhvO3}Z-F=ht0)?6XE0jCc}&O$DB}iM-ifCu)(>8e zqINf{hin^Se`XITp~QE>k2F-N+|(+LC<;S76b$g2fP6nG5I;$;N$^Mrw3je4pFk`- zqDZ`4=YbQ@OTqrqTLv&h<%8C5Fhp~7WzsF4)WHJ_Dn57cdEaIR$EkiwTJ;clkcoc0&2}nt(p2AKf#9ZS`FL`@3wLVUs zKa7%D1y}!I`3^~DUZigGNBNv8>98TqeMvCmOy&be^DGZb5B3b{`+!{Q`)$TQajL;vDdb4$eWtf>gf%D&G^z>TW}F^z*(-~w&=BTFryDB}5<_ht zN??BE3(s(xwO~o*pUr-d5H`Qh04Ov0c^6MT;2ZZo8RXdwFR$6(568Cn0nh{kDqe$Z zEl6=xW39^6Cm20Nern)N8k)^NHPpI&!ySYjQG1mZ(PZK_lw!xz?4~Y2%x^!1j~>N= z49of2^DkK`2QYBJtE?s@wH)GA?}_iV47@5BITeO*4RiIOkRa zTQ}S7)uFXbU?9C)h_NE9sKc$zFO#p5QFWDyrZUM%$H5Rj)g z6)H*_42c?`gQF?}oIZjQ?RSu^DxyzYiRB!x3}N6AE_&J~oH{6OZd0?L`7o`W5+pgHYOjhCC`vn_NHeyP_19f8AR!~?)cK8E%YdXk! zpB#V$>Vlk=e>4tM?))a-Co7S(V#3ElP#fDgl7qj{+6(Q)KM|5XL+vti3VXMJ=md}v zOr%foMD~tJI{;^if4xy294xU~A&1HocD~Alev;l$T$7GASc0s2~(%9lMcS6-6s%f>IZvzCyhY zRAfz^6s6i?er;wx3*WIy=c)q}**e9RmhZe`Y5a0CzqUEuIpJK6kPstk`4jYhq1tO{BIf4UH~h8nQH}MijUZ8)putdCpc`8w=Qh#0PIM#w&1;!46II<<7{$& z*EH)d#!BX8)FZS53|8Nwn|mV3f-rX{lcBCv{j&|lGWnUUu-kUCE;*8Cuo!;11f9?- z4u*9+pha(+hxQe?CeMtqXmfH@eYk7u)2k=g)oPI4IufT8>Pf2A7dG?ujsq=6C88)9!;>)^K5O4dJ$rM+-v251P| z@Pp(bEQUNY2e=GsY_MTpQZ;xH%-lTc@KtsO4M9|mfs${iyK}d=zZ?LV0U+;SFzlY= zkh&X;8)GRsUe41+jTTEGtUlwVTk`{^+v?zm54se7;6*l%U#RU*JQ>|_Dyd0mYOv>RvoIP^X`e z`+-W!CdCrCX{zD%n7_Lh@UI}l;Hp5%#OtPq5Jpz14^UbOSqz}&ntHN+ht-C4-D-f% zL9eY4|20uCnjEB~P2hqpCi51YuXlvUN|L!RDae8(T={@i>ywyrk+de~clhkT#`CpIKbyuNt&M;)jy zZJB)MYb`KfhQH4CzbxDxq>0?-^&AS9yzog7L^kF3X#Lb`#`ZmKM3ubw5xByPsT}OZ zOE}Yr;MB#JLjWNB786#X!}R0DiJsWXQf;I_R{+q%7h76^57_!b2B5WG$Pa$wThA69 z8}?#mdU7S~{%(~bK$o)O`WW#TA}Baj<@Lxt*eOsH=?CPR2lbB0pK%8!6GCB_ht9AT zK}xHL{gR6%gqS9(YjcF%T+B$~rJ-*w66PU1_XXh1q6Vam5n>DO6Apd4H zV}2>3^Q8h=Y8c#hoc7Qkk^MhbpEd#$7eMBgJtZzj{Kvc1=| zVu1O#!=~BI%UL-A1&l{{ME*8b3oj;z^m?8 z7pUwfMwopuX7p5ri)u9fJ@uG*P(qx^ubA;*C2=!umgjee7vO{f_kCWEXfQDlBGsj6 zt?O`y%X3%B`UPlTMW{+u6nz$7w-74Y630+X2P&fZ)2rU1em|xuAjlfPA{#<7;BeCA zfH($RO{`OV3`CS_T;nQ%pvNHy!l1Y-Ex*X$_*+DHy)|RLC%`!auNwmElot~KVhQ>H z0G;`m6bP-smrWb0oQIATOV|3^&0jGvZxb9u*4;&@Z$pU8-rBXU*#RH5lM~Y-k#-C zF5q%TwBS6rw^0QpfzAd8i~s}m_epfxEx9?k)xJkufR_HBz%cWhLC(bh761^DZ^k(k zo@FsUp|atTJQ1PV9gnRn-~%5SKvFQo01~L}Lx|bjZ&Zm11vw%HXc`0R|EH)eYX~xt zi@P%f*o{@5O6U+D-r^tb;Q1sk5)M*MC{B9*RetHr8YZqQPcRP*Hd>MFYMF&$A1L+% z852JS&avS-#Gz-1B<>;-sz)uv&FO*@Z{XK|nQS@p*dQqX7H4~pnG4*Jsev{x5x6YF zc4obH1DNnw8u{$tYS-x(XAcLLRZm*~A5;W6SNKy{tiz*y2M}x~Yq#VTx8aXO?CBT) z(2P7lLD=tNWv4~(PBc7!>i@ifzB9M^CxH8uCME>#u+PoiOcWdY*+mcMC(-nT%9eWX zn@>R1L*#(#YDqurfjzJV{_IJj)9~cH;(T%;J`@kq^i2=zMBQ888{->CiVj!Vbs z)so7EDCI6r-FdacLXmyA8!o1yJn(00l*S1&*-m zaTNfaf@F~2-L8!pD$F))v|!e>R&z3t#)fC)LEuz^mwQ_|TDZ-#DsNZ-PV zvHYKo;|^If_9p*^8@BzQ+`S82Q)jmK-;SrX9j%UoN*M)bq>2TUN)!qqFf&zXl|m_U zi3rFgi2_13BoV@;j@BCp2N4hqPkrDmUqN|C}L=3L7h zdAdIa(B-YjaaOuzaon|zYs4tL*P&ZT26;)+0!LU9;|1XxBW^4jw!a*8g?)4E=PRwqexW+N0G4d&nJOPZ{&t5MUHx!;7*&26m;o@fGfB-ii zIMzyHpuG0wBWvXzs5gTz1UWbvH-J|wVDtniI5Jm9YwO~SBLt@h_p@zsS}d5;y!Y)o z$ng!6^`spOEd)@5D4BH=P{Cf4t4z5O^u3N9o)?};BQ|dUp0cPNUp-Nw@|y%!ApqRk z5*Pt;EYLMAZTCaTwU3DwTFtG5BcO%=`zD|io%18$2PufP$cXZQSh7fJzjxBVRXvU9 zy~dLWH-3V0beOtla`5~yJ}m2VQ4}J_8D=Vg^>H;8bBF@U27of=HG4(aDl8@=S3|;& z3Aj`oDUFfm$jK92hptB@jz5CBfP1E9&rsNbyyuVt1f`Q9ap0CR-2}KB% zM?M*JgQt+M@~EU?Z#D=4gRe7nM1uTX!0Z&iaOld~9`be#Ui6D+5aau;%DcVsSs$!S z{+%HT9Mq{Z1H=;bz#4?zBYjse8v*T1v|;9lE5jep0JFzkOu4xBkCF z-!{9B;3kodJMS8Buhle&_^e9k-c)fcm8e#=e7WE#$KnC#l)1gqV}{_^yXU&IkP_ZF z2L5QQ?+}>NY{}uZf9{V_8QbxW&(G>f?(W1Z-r9Y1=*~yWu1^X3^2Rj(j-b>UGu%ZB zkcsb4Kc^lh{7E3$so8lf0pcd8jAg*ox>#!o@{SQvXnh*6nH2%z9?Dadl*_4p;o7AO zbp}{8WR!_;P(U+3*BDQR?ZN2L-IW1SI}|?DM~_Q4HjD#8G!7o@pcOlbk(NRO$s> zYgxo_HNhhgm2*V|0K*+gvzf%Nl@nGw{3;SFrcf&J@n=J6>Xt8XUXiHYlhjrd;WO5$ z|5IpOkN{@V?&`Lb7DJE#B5k~D4O=-lrk!7<(L5~)5Kzc>$QxM)OFbj0{$c-THDnmZ zy*FpIgo<)AGVE~aC9xP*oU&td1@gr*lluw*E3Qa`xV059BgTin!H1H@ub}h0I0MOB z2}UlWNo$p~?Ne_!OHJTv4ompI$!>=*;w1|dgD4A~#@Xc-5O^OOZ+wArvxVv{PCVokB-xZ%eleXr zfV-w%Zu$F1Yo7=9!7^cM-6u8OvE$);E~SR;0lNXL+1_3T-Bfcmt9sJAXk;LE(#Tm- zwFqXgX9Y&GJ`>&YM7N*23S%=&sbH4@;JLMs>a6WP6m^NW(@XEn$+C_uZguT^sx{_VupFcd)LCH#R;>Nw5!2td{4 z!Q8s{S%iOxfVu$Y8VCx)e2j2vw=<1_eMkbLFvc3-?c_lUTZI9uUvlWfH8OioTSG7rZPUUuzTdD_gi2Z=+Tr!2ZgvlBF4E&2)SaqFEd>7o>eZQC#)`7mt zZdKHe#I>^9d5V~}2+IR=(i(aMrZta3t+RG_joE96RClb&;mI3$yj?MfcHR9Fxz1$a z_CSaZvh9x-#t3yO=gCt*WcLg6AhZTY>Mf^N#ETb@+z;ezV zJY~uSI~$-~Q(DebiG^Iwy(-a5D@SpaJ{tC{AEj+j(^;2QDaVfF-^vfdt|f4a^@ntk z71NU=UN%`ll*z#<;1oJ=A>u=ol7(}zt5+w8?*sdz|81<*0ssoDszsu3)lO6JLZaG4 zoNcu=Z5{+}NUt-X-^6Y}YbFTBVF+SjjhGt`b9-A}8W0SB&$h{w9)g(CSC!fewDVqB zuWD?paW&K3eZXf&tm<#kLII$K0Ar+82x=rKYzgZNY?anV9Rg|9UnM}e!310mC$rX` zO`5Q>k?h~NEvXe2L*F=1G=HMi7QX1Ynv@FOiBZYNqtvQ5F8Fb8Ta69P{2X)btlSHb zk)IzILZ|Puw*SK(`gf2vcd*1xi3W+n0%8O7P<<7JgpQaA~1IFQYf}bQKE#fpQD|=X*Svjq=#D@rT+XHg+=$u$}@?MFNs6Ty35m6#j6{^?o zp7dSFP=nbO4E!#@(amvx6!G7(h)*HvrPiun6KtIJi15erMDN;bSN*x^*y?EXponnECP0 z2VE8T8vIDq3=xHSy@cj79k&usy_0OzrM4^NB7z&PkGQVd4>Pp>gO>=l8Ct1g9$G`$ z0rnHuaoI&W5XlEX95N{Xqx03XR1mS1>9OvA2~G!zB?>m1sUF zE#&CKy79f%W-v-BJ%|!r%81C5HhRrcw=SmNqZ{j@HUDNufs{LQiJx+J0&o2{^-jqqI zQO39|tNlgRRm!#~KQL{){k2&!(Yn{OQF>jo>{Z>#3)w5qob!-{K)J#AhbVSx7|wqr z*AJ%^v|ZU5?uOE_0q?CK2$VzR?K9pBsHsCh9K}>~Tg2dMMQgp}85(Vyu7nmY7Gwaoe%K9YVU*u&>TLc9p4K`xKnQ%6`)g^ zMf#)>!ZqsC)Ut9=a8}$Bx3v96RQIQp(0!m4+GH) z#JjwsEV&JDo;2&MWPnA%l-AN~VnI4Vb20K^ijir46WI&xsWf$M5d3y@vY(S4$Yr=n z-jSmylN>9d{@FhcLfV$o-~2F9`TMd6XJX`aYU@VYXy8fReCsnlkR7ycN6*ebPC^KO z6Zf@Bap^=k52XQ92Q1`elCRF;oGZR6ZZn4Mie6SqrZO$Bkkh)DFJuZPPxVeO;4d@A zx+ve>F)H+~?I?#-RL{068LS2TSFh9g(A{`JeXDc*C@^8QB)k4 zdEkfTR?H&NB&X}p+!h&An)|9=IPn1K>LOUxSIsQ&OhA1x-;85UPPS+QXOW=@>sCV= zHa<$yW`rxP;MVBYz`l8qCQ26lCVTruK~cC8XFq+m*MpoV%jqifu`wj!P)=QnjrKaQ zMLV@COu5dS?#za*gq~w#e6oaB8(>*Y*7d4TknxG`#AQp9lW4I(3VC&hx95WO@%e31*T0G2qgrJNth zg1p(<0B>1`JUyG$qNbh0{gn=jv)fjy4b6Kz*XD&M&idqa{fQQol_~tw(Rk%_Pw^P| zk;$Nh$Q!xR*jC7;bm~I)UH7Ssb0c}d?s=EgGke*{g&NCv0`O@ZEG+|UaLwW&QU=(c z#a%fq$Ft*h6!(N*Kmgfg4|%#rZGd3HF2gI^Jl%t%wYsZR^)eI&{g7w`EoK9YV71H9C^w}U?=OVmN5e!U2m7+!b%}nWr{;8e` zFQ+y+jhNl$p*E)U0g+B1D{qOzL~WrP!G?h>rgXdWf=AWk?f8Clax75BD1|ymg|d>B zu1fJ3sm#^=nFEIJ0yNlr<;Vbzn)2_p@~hAJyY$0d^;?bdJ!#><2d55(z9Ms5nD7K!~$d>217}LwM_S#Hc z&1@FvrS?s_k^n?7Q}#w}j#4?xV238C)425^piRm7uAFlhwx$Vu;gba%&x<1(Res_e zt*E=Thfen}WC`uSzP!tep9?%)vtQV0jt9-t@4R|uNGAWI;o{``Vooil>9QzU(9fxs zaB$Wrq=8xgm?VMaPR*x14WC4hcVDRlyB+b<(2=meV~*vQYE6rPDOYoF0hT5&u-3_l zkHwtVSoj!OHjW|pEO&l_eE3-8=|e=7+kl`}O)J5i2J|hUED#Y{Jc`*(u$nvqNv;|s z{NZYvzbU;s8=e~ient}gjcdxi*+*C6Lp0@tPQvQ^Nve4c_}b2+EisR5{sh?OHYU=* z09W}RNf_0PdHzB3fZcDs?5_#8YGV=CCAxHXAb;ku*-8j3i3ICxfVhbk zvp}mIS@VARlWQ*sS?Xj^wEJ-s;r-7?kzGhC*@09<6fel1C^IYK&!aUy<{n3c2Ef2b zmF0TOnW4UlYS;`M;fh#<5&I`kGG&x0f&~=jEH4x_G=o;k`lCznIAj(B(T;^8T>Pd? z(3u&cXGkCD2Kpyd&AR%+H0(4|C7O&AORl{ljQOX$#yzG0&&LoEUhCj_Wc6(=1#})- zEkgUTk609=C{1!v)O*0aD3An)2bilNAoRZpBjP{6Pt&3CCX$I%O13HDUyH@ zEg*Xn;LdljXNHTo#Jwa_fe|8JH6`^|W34;O3;n0#BuW=W*`$LXk3-DSB0|L|EE7{( zLQpS8QlmWY?kf~-W~*~74pQSHl%6q|dXS6X!xiTnv#w5f+3>z$jg{0t7(GIeQ~`^C zY5e)c&Q_>gIFwSZp9B|CiAdbr1hug<3fz5&P+}mAB+5A>wXtQ!5!hkzLtO%V1?t7n z9k!^~BPFRLP+8H6pgx=S8xT1nER6 z^(R?J5E*UVY3V)*dwhOYrrfP4LFO#j&M0~X(LK~cD)%Lvx0CvW0V2Yu5Gq7&4)BXX+12%Z74^EKa2~6Qra=P z4f&gK&!?Z`!Q~~bzsga5ny_#h`XiD{3iBSVCE-jFEWg-PC6~7^k#7ZY_Wi%V?g(jk zS+VCQtO#XslP`6Xbg;?Ql9(CmrEmQHk>*nbCi*U$CyXUrh|?UgNpCc)H+aK$U1RZ^ z2d`&0`cTBbs9G@>5!mx!*mR_mSw3y`aTZeS|4iP%S@Y3N4IF}vwiu@9 zIm=Ja=h6FX;xxQxxvY4zdc!w-mp10Vd9Z=l=tmK~S!FU87uY`mTUbmP=Ij;RpBoi< z5l<-}_yBMEcq(vc=F6p^jXbFj0#+G*zJ63PpW=PZQhekwUm7?T(HZ!sflN z->%s=V^-F;_vP&}zab@>a5~rD=)VdNm2yiH_{L@7TtGDtQ&CP|@02;oGh2VK_WgPV zykbO?RqPv-s~Bv(W|m^XA6oD0DP610MxQI zvQp63zxMNcIogb)QKvRca+-Raeg^TxGdmrCjCz=4)o^?`&t-mZ*WW+V5ujtYVQo1x z32{G7hRHZUgw3<^g^dR)CVrOoJp$ukxPA?Rlw4eV&xOhs|sF`AJ`1+Wn(QE;4|Bg0d z7zQ~eu-2qR4@<%%p8MSQb}qst;~9x&Kyq%Pys#{*ea8Q(wCx*C>-s_I6=@%03cG;7 zyBEwKSpB*ft$$LZ=T+?HygP|2+$RtG5%f|QK+CA-j62M__uBQAuhV!7=$)f}B4AAQ zEPv-esnDyIEv8-wJFOn<87Kod<{#?U?iD{HdQ*oP;XirPHxg{X%XTu+aFM)%9+9_e zzIIVg_ss=T@a@D6d$F7U9N~IaAHA+XL;Q(;Cn}fI@#6!6Udgai_&!f0t+4mp_^-QO z7mj>y&hh+6&S$K{C3${@jy|xZ<3a*8lHy&qV`ZMoWC@e^U?rUSg^ zDUt~;TaN>I;YAHjP^$4mT8EIbhW=5XH6=%09sd$xlQ1%wmf*3hJZ#FkbOnTCVed}_ zQo4BRK>-!4lP_d@zhsyr=<36Lx?gwtn`dYcjsgFm?4H0~q9;ghzfdPuKz<<1fdH_& zh)e%g&~$E_ss7xHg5IBlvMZkC!K9a_OSsotIk+ess-DmO?k%QM$e7Qf66bGpwORTw zr{ei?kBVk}icYc``1G-(ciQ|y7pm{UjQ{?98rKomy_0<$P&Qr2zxWL=Ok(vprd(-M z@r3vj%!DFn3(c-LybX*FL+IU}66cY?qRLV!T?_=|{0wj#CRt;16_tO_&JtsQ2>& znBp)d4*p(v-5E{K>pbe1Qqjcx30WPK&y1U7d(6G~wxJYA*G}=I3TN}xmE3&7Crifm| z{bcn2KTvP3k_hU3rtM}a7s80lX-XAFP`|EG4|(3dBF-k@KoHwIJ$)<{6U9geDYig;h`iNN;(~ zZW(cywDb&T699C(W{0Rnl#A-8`VsfXuu}wIlr3a#V^O-R6T~o(gvUTVNG<<2TFup1 zxcdUFYE7O50=C8_id-290vOJelKNQsjWnDKCyhwuIE#}wi*r74vXmlDg84a0C`_DK!hfw;tc*2^!;& z+237mSGKVA1QrV`a*{7nsTK&jUc`+-vuAtyON4K9R5!ZT3G%+K+XlN~3_G^(MvSC*!}4=OKy6WMuTe`!W~ zy5)8_tbZ0xeH!ImnQ*Ed=Qq_&eTi)nRiZ1@(HUk{pj;)mQZRw#@}S8VQ)CB+B& zBmOcz%X-=oqy`>p4lV$&pn{)An5}{Ab^lg#y@&n~;�j=5Jta_fGEWTn=hOKq{L< z%KO&*>OkHu9K#A({z9U-6>_Aa2Ro9Bt@o~;Sa zkg{A+o*0QK4GTf;Y-_^l;#h|+%2YX`g1`FxKGBET7}$8_Pe>bp+nuE)#pt5-k(#pb;@~J-{GT+Dff+t1ro*&qX?|5Rs^PMq*-GxfQcDQ8yCzre1_VQf4d6XXhi7K z<0H0#h^T2S$iB&Ha(jT#^8B#`AW)_Vmss{Q zKjdnxL03Sx0()x!OgPm&x=CPSeFwIUm|)J~A3cGXC9#z`O+d--m_JyOY?P5tOYLO7 zl$5|W1h_V1xpzSx7*ogX#&GLS%7_OQ+`3_;F)e`6Vxf57Ozxfq+ZxK~Dj?baNjf(Q zHVcJ}Vk{!GV@x);LbgA?2t;5^VN7RF60!kKG6e+~ZGfpt$7;nqCSYC-G=xPC67 zLg{*MU5j~5{|G>%YZFrl7Jg-teMj8MANV}8u4ym>5R^&RDV`}kDvLk>))5DSx=2;U zyD&zgzHVTT89siWuMdS-q=&2L137X|WC#&kGNhy?m}ou z-9_P0NRlJ~tXf0MA##*|xEsWCb5Fz@B5hO<11QKqP9i&73v|g_MyXza zA}c@D<;nD&%s{~v-lmS$HJDB=@$O<#EXwBY-Zge>UI#@8rgYS713AbdQ+Iy}1bQKK$`0Er7V z&-AgF#_d)c5R1rzPF%TrGDc93spL|21U_5%t6$q?b_T)E0{~mthh%MbHUag1;ze+Ns&z zbYBJHMPO@JPi}#31bbBg{rhZ%P2)FLva2a#q_MhHMwI0SGkMSYf4R7vIx+v&;r~g3 zxF`TNvsPB?Xy`GE4V~VwP+5&)?soa-uFb)g1LU@k*G;?4Fk8BlgoUIkjs1ruFkSalPB!a{;GlOBeoX*>>F5iu-_K z97m=EoVk^IW-{_1#YLKOg7hSt6~pR~8ex|-MI zcLfaM1}ynh)ESNG?Yd9Qa_nk_T+TYYBJ%NL$G$Z|if=DwN0Jh|N*FgdW+S_Bqu$*k zbZ(rD3y;I8)HZMl+f&4d)KZ4*DSmolWZYjwf7cb+4AU%EhcwVn4a zc3e7st|~J0Y7#5bkT$JwIEkk-1Q$Ss!urK8Zp8gUxA$o9l)OOeLsn|f_B_pF7C4CO z)YtEaY2-BJ-b~TpwTpL??@stD;uSMrjP0Z5xizNygY2NSHqWI#Mc-#IaI~Am*zL{i zAAiiWt6rv$X2&lPI!jlS<+w=4&eLT^9U{$btvNc7HSZOF#^#KZgyLH5fE4 z$4i&bIpo7oKpb|JUZ?`Oz319EGHviOTVkdrd&kzC#Lkx-Or$j**10JK>h+0vlJbaz zKM|H-znI8|m8TiPaTQn-@A8e!=&s+5-VwK6k8t#&E+Lmm^V-f4@QUF(DN(BhSs6x- zn%ji4B(u!aDq{m7RPsCW?fa}fFWn_(DJIG4IRl~7KqtXl=g@vD&QPwmkWQaPC%Rj1 z2*Pj9)a+NBAtVi0xM{8(#Gl?nuFIJ9Dpt0)=3O%omHQEw@%?3vKUYU_%5RubN1H^` zdhXqcs7?X(QYsm{W=j%x8-Fxiov=dfQ)QgiwM}Q4N%7^KA4@{tnz%9k%@?F+ZylQ2 z5%qlFeOBY&m-=;W00$aF8SymDybfPC^=)s-paUzxjBjoqLK-Bb9yf;e(fX)dIDWod z*Pv+89}6=aAGWVf9b?zUoKjprl*QVvm|5Xab{-v=GHl?tPIF`KOnkB_B*<*ki~5Nf z{-ERSx+Ow4OQ(I--FqmvBT>I$`6isT{y0P%c8~j@DdjGlo_@<1&vYy8Ya#zixH6+{ zn?qCZ)VCMeRUN~GienPb?E8`Z)YzyS5~etsV?ejW^AzTNY!0OuuhumZpGA6;pVu$b ze^h?bx4G%XS=z5gL#25i3dz*W;e*qTw{jetOPJ;An1Yj2+iEn1RgAPCre*gh<&T2m z@7S;5S0KZacVtl$T zsS2Y<15aZN$AhM&GZ!46ioNx?T~mh!&)K)Po#%W!%HcbPZ;tf54t zzP-ly^G*HnoM%R8)?J0_}pLL@{fd0pdOC_5TXyRhKsQ|Yz`)bONdL;Brq{&!x z#rXBe1TR&t8+~YSps}B*6400|_19BfV)GiNk3QLrzJECPl*$KTtT4>2 zXnW{pxlE(ZmhqcZa|(^K1Qq*Qh3%E?_^Ghs(Ps%!8yx#j;LGUKhiE-vQN3e{{Gr+M zwt5xs`xw5;n?a)P69sQ;dhy97ZH4lQ%v}+LCX2|q9-7j}Z1k3Kl(f2I@_Vs(Jko?# zml3fEr)Zrk_0$|9p@F}x4D%mF>2$)?Z6M0P#xcE$Wk!0 zHH1$nK4$o6?V;ju8a=4yVt>0`ak_EcRNxs+_yCJmS0>w&z!7*w=-*}G>}B8PG+w2q zZdoU~9GT!dyqpz)rpnPvtt|8Gl5|CvUHDGF7N0)p;oZ|v1uY>p7XxW8mj773q@F!X z$8?E*c$VS~6P(;FFK4V3rnG9K86#OUZJf@Nag&hYubAl^i`F(?*EZP8+WhEa z(Kv-d;V=?yAzK*cpQq53o(%WQ)W0ub#~?9TpyH9&l8-2v4I|GQSTD-umyLXt(QOIU46^p0BUmHqb1*5xw+WWo1bWw~fBH^|HC0g*%EdzgW zb1Q1X&U9UQG@j*0Eb7pN`s}9kT_$?0r9?Q&a(_WH72kxrA!@ zvYfk2200OATF;*1(mLj*EK>U($~E0b3~N6U!_AU!E;klk4l{x4nul>;^^3&@_##{N z{;|*bv&Az5ghFmqTclkKxyB;L{c;i~!?ml$pU0{nZSgO2Q+~@WOW$oF$Pfokjq<%E z%?C0ojtj4|B8&L^9G9+bAX1C9=Wlp@nofc}6HRVBPFvXa2sH@yDJGHnbT-0)Eo zN2&G0SqRD6lyrXHS0fF@y7JrFLcA0e$QJ1Kmq^^HoZPz6k-V^#RAQ!Vm~76s$o6;7 zefsv_6#O}v+vx$$R4CO_XzjVFX`kNdEsZuy-wHd5q>E&hIfa&fvXgj@CHKMIl%9&v z@;#2d=`;^C$3D6fVb*j^7RRy6BxRRe3@V+@|C4@d6fXlxvV}}+{3U}lU9f!opb1%e zk(t=8Hj1%Qw@T=(R_`DFvtrKkNnYa7o*}gX&c=mU#5IM&dsBa0W*GwS`FYoY-4vfq z)Md)2cL;7{vZE6#z7xucExWv9r7f`;ZCj5wH`ck(8(8i(wj{W0mn_2Z`#eiwd9+1L zUk<5wk6EkCjnGM}KJeE`%bvt~Q~gmhRsoT@SU9 zM76?sS@XU(W@y-TOPJ;1@c1O}x(tW$3!JXLFIqn8n$33u!oVp0&HavDfpZU%#Q`i? z%v$%6?fhT4dW76wGJ|WlGnRh`C^b83=u<{4oPIY4ZJUV>HSj zbJw1$MVAook;n4h#jS~GzR6h=(eruKcT0lxPpjs>Xo>#50lVjNc?jF2sSYzOPmqjq zs+n$0+)}9tx|?83q`zj*P91w6O<+Q`&gWt6nus%9qW;tRl7slSGeuQ|ye{R}3hYkr z$REmvx!2E3ZtVtRu+5Eea29OUs)#l6Grp9A#H7iM5zTid!1H@hR?70P31_>V8XLMc ze*3`b$yKArefzn+>#F2;!Ex0C+LHc@uOER;)3@oamia-yYt_SzzS6YV4aupA6rJR}k z6%=t|aLk=!(#AI>6Vk<_kxVV2RN4R)-_7!E!0MRsy=+x?b=_3)_X6~IVBNyQe?9xo zsGFrGX!gE%MK{T}r|HG=(OCNQ5?D}<-;@B4Bn;PVQnnoxSygTh5(SIZn;hJ5tQ`zVV;kEAAn{>OBhrpwr_{it3&J}%; z+^zpnXP&X`_}6$?bFt&zlJ8{KpP7t;2e!=N;y1U2`fgyudqMH2$Ah7z1+kSkIhi7v zvF9cnGGPei`cu5V5L6u7f{jnpio+s@LYKBNeOIE(&Y*HcpENbRzDIu%Q%XX&KVsWq zHEkY|A4FI=3X1|5jjhIIis~k16IRYiRpa|a?{!%YT9+rvTbOF~&FjsB>XdhJ#(chS zevtplQ5Sk${;t36$;8XME!VZsk4p%bdA=rWx3}apd0>9j!+5EnUe#q+O5RRkJ2cg; ziO9Me3#|q%NZq>n{@QZ=yx5oZa~H20zbR$@E;GNhcFlM4;F!&$u@Z}_B;W3)7Y0Hm zr$Iln*&&@ZMyR~GEm&4u|Ip<4nQb*I+2Wg=wWD6__-w`8yT_N_;Xv2*e3EdMWQzLb z8Z-Lx@0PO5@5Y{`*?TjBQiE9LHBkp;=MC_0_nq|Z4i=c#NcImeuc4slhq|~M>!t9w z*??*fU&`Np#@D7v_uKB4Ts4Co&zl`EE};5Q){?iv>Tsc7vEx3FKP2!z>bXV#U3a$c z+-GK*W5;?*a~L-jXQAno9pqbf=fL48!7_fjKKEAnUDVrxV@}g7q}R_4m}P1X;+JMt zbMo}-y{tXWlB&-5>OPFBTgK7<5w({Sqzbo&^xe`=ph3-^8)&bvylsMBe!!$S+EEgg zNvo4E$Oh#1yIZOV2Esx&-Ft2dL%n+3{+6;?*}@#Zb~@Uxu%izma5y_8Yq zucV&JUQrVsJaujWdkPCT_49vwH5n%iWEGt~`)StPh1ILjPmJMg|KY|mEhSa9XU=Dc zb@pt1PJ(;V*_qSnjT-myU#IDxXUx}3zBQBuW5IyuY^&Aht5&70n9+O8JD)#`er5>h z4&eim_^aBnXKuT8DVHU>jOuTkz0bf;PMg5<{yL2@_4-X7T8X0O?|!&(AWbS$#pD^4Q^~zw@8LD{m6YvT_ z(#A7OgbPn{KVCiWyw2CV!B3_6T11`-EMS;!oG?){CT?;zZk#ZtCu+Db=xm(GP8e%E z`tn|(YQv#B6B{??pPo(dyQ>qbI`XUQ*G%(k&yN{mC1f!hwLe?h|BZQ#CX3D3!LC1h zw(r~iCU~|Jvkm#aj>m$}oULCu9mNcuiYkB{`Q(N-w&thIZZ!7w-u1Ynsy_<$1DuL` zfy?CGZp`6J6VOf5cn{_?*PKe+0Mpjf{!Q=#_l;l7T5OzH_e4(!mbFC&?oz%xFL$<_ zKJsj_?AOh}X3eXn64ZLRc}?K1$7^-WLu=$vQ1CPHnRLD)gq=5IE3n0GKmDwczxxBN zu+ePs@`Yl?%KFED;uf0)c8|aP$8oWM(Ea}ICx6kog|&dc{l|TAF!}L|e*EoD@nQ-6 z?bo5tKXfe2H*5dy`{2OS;bnidDzy52(%*g^`uzOA_ch~A&d-2$>dAp0Uu4%~-0=L? z-#^w2H#)6K>wj{jQS($A*P;4#8uc0TkEJhbt%IHT_1l@Of`}c{&hClc4yi%L?wuFN zyxkNRZXl`u++F+p$2pEdxx;MaQ-{+u2M^mTinT0b(XMBVM;o4-sQG8Hb8%1rX@S3< z)f60}`$f}t7?hlyTd|wl7_8aBR<9-%%FNjLO~EX)UHZlIr_KOk>Yo3*7r3N_8>F2d z(tNRlzttXpg&l!>e;`Epinp2ir%8!Pq%;{MtNHEv4<89gaMf(oSjHI#M<+!TyGm~bSjTaQAx9MQy z9EAGAzh{|)?8R9NzCHxz23&;XCMP?Gxj9o(1%}!Fa>F3JL%CCxWiYEWhhMbCaIq-Y zwD*5|NVkKTC5|r$*fITqj_O;k1rgN6qMfhO485lr-m3rZupTFNlp8Eu#+hgQV4zO8 zC!;V6u(whl)@@6qj8SuUh>2wh+8Y zcQrD)kQHC?ac%e-{*or}iGpciTg5f%-d+AkHpY}Hk=?huys2t@t9k}mP_Phy;{B*R z9R23*Comk{zVQ3nQDQlTori4ukcIgiI~ne%=-auw_9y+Qc&u*;zh;BTQL)a^PtIZR&FR5vMEhg|>0suNTkUWD(;AGfUC% zT=^u@NAop7zA#?k>{OopPm7i7SkIS#*ED`^p^|6uW~k*YuCh!c zmIkx#pEDjBy#8V9Zmzt5TbTXn?q`fGR|rQ`CerHJL%Lz*liDXMbT1p8=Gn5ufd9DQ z8qH(P+RMXuiNC~b|czjOf#{#R9DmIQAvlEQ*LsxM@S(9A5R_pJ~T3OLA9QvB+WUU{E%AMNR;$`h# zrT@L9lNTqW4S7KJwXj-UyCO1$gPxl@PE1S15e+|dHjJ3G$s!iVzkRzB&lJcy=;J3) zSIHlLFMpZlpk{G2oN#M=qN4qP`(ee)!6ZRpRi9;$zltFxEOcXCSpnnMqEh;_+4sik zpZ3gjLS+xio;=Dfm@Z9=+Q;(ilXNG+>9@5wBEQa{vYX}IV# zlqS`69k^RU@G_G$`L{QOjb9pxVXbGTl=34vDtW$Xmun68qLF!Rgd?doB9au7%gp}v zqHxgoJBwZ)k=M*!leXTt|J!B7c70ZEtv$FDjpISSJ;8z#pl+c@;}-F5$@MW8T-lX% zoO0V{RYbroSltI6ja?cnJ2y}!ZMY@;o(geyR&$;g*+l(Y!BZ%NU{i@a5tCJp!7OyS<7tTYV3#Owik-`G?6@^@TbN_LkCui{w@0|t;BpGx zY&u|13AOA93x}88hjM0V!KMhQn+m#$i;iVQgwiA(gd*_!vApd+Deh?@yN!IjW5~CA zVgkMgTx;xjPl&ayDXxPtI+9EAD{ne(948(`;k{RVf_RTkfRP`qsIwNTXt||um*9{$ zyJNSR?)w#|X{G2{*P?(VdhW3_%`0nt#A-f`O?f1F?vS(HQi1}`*g7t0(%en`@w>=R zIg*=C8e5;JlNzrb{ir-gHXLHh=^#PPlb=vh6-O@;mL}d)$&!O$?T7i241|8=K1YL_ zc}>x#x&E`aI@honNkaOt8RLaXU!b6!EsU>yIs1B^|dSHf^x0E*9c@ zm<^`#Y|$#@E+0QP(VpBgAzL4mIZ#OSp%{`aV#quvRZ0lyW4o~Oya+{QIzW(YxO|kAV!gwjc0nXdjRJRu9T;vr3@7O_l-Ez4t zGH=pEKS=Tpxj@LbOv(>JexIhgbkW0wV#*hAH`oLM*`|?}) z1bTD2@>pv66Py?F72?YAvh{OnHr}$E?W_6FOvB|a;Tv^VN@XV$??5M`umkR%GoFZl zWsuAcI9VUBF%!yI#609v!_22xUnj2I>Aja#c{E_rXVVt8fw_hsAIkU~wSBiB6_F?& zZ<|;SQ)L{o=DCpDOOp#j0FI5GtAlc#b&5g)ceZ~&1U`LRvQw5QUSHT?)pDe*w z(d97sE1@^5w_|`cO6XrTmP~$!dLKp?_sr(nEF+GqP5wt`3UXOA?1BiW#a!vrtxx=q zb8;2g16#o*g@tReN4yPwypmoSiSKR~pXJG8sQRJog`R3>hy3cBS;US zchn?PU&e%yS5wYI{x!_U0;a=e{-_~a{{wPSytF1Z^Oqq@0t|#)>2{gF`*9(`EhK*# zs$!!W)rV2F%^yCGfp5cZf;lmRV^)qj1r2N~?>HK4CQl=dg;=2&v_pBmY)r^_@S~q` z{{Hte-HI+*+&wZr+b#0G^~e1!yXbYc-^b5Q1hBMROR+~LR~tsYKbY25#Ltfv+uj<O2l4C2BW)RDnNq-Upi%WU6<(mFFRb=H}yg6;)R-FIVn4Y;=X zcEJP?d3O)*!7V^G(pU{+^cNP!MVzI$wZg&-@fR&2aBit5)4l*#C@Vcg*^%>6NQ_&WE=vMIcA6HYBD&K_GLov!M3*BfzP#+iU8MI-ZX>3@U^ZD`SE%KLFrtZ`ENzcW~ z`byQw`|6H6U#hPCP$;YIYMHZ+bC*rGlDqfZ6TU|ji!0nRZ#D1=FB(NewjQHL(m3u- ze8FT&ae17x`p=dWm3vH+vD+=l#rPu2lT<68mgBX*gHmom&y&Hda&x(nF529eM#|3E zMlT4k+3;0rRz>JWy#G|dOK;P;CmSv|K6lw{-)P^YlC^=!wH~G+U`E>9^qIDOCDb#P zN!l&8q1h0MYc*ntD=bgWM!0X{E|XNL_EBIkCK7ER6pMJb+hz{Y7Sex(W*k~l6MzTPUn zGu0-|-WI^+Ty32~jdLZ*voSEL_{*|o z^%nA;Z1`-~>J0AkUA~OgW9U-qB}&8@s8#f9vl|1V`Z?cES84u&-gG(C8VbmHW#BBb zfv|wbo|s+w7JKGMyKyN_ zQ_I@!;do`chs_lLR&Ml8(^>k9q@&}cBFik1`%vDAsXfK%m4gma2_1r8PnI*?XoCb4 zf_8!boPnmu2S#Z{zi^yCtUkwSPaYm5CXM-DHA0^^%xSV2K`jusBjhpUd)6dTsKg9m}E& zBxktYtq^z*tZ&;@`|Xu=_OiflG>23Knm!b)tJ2zU)2r(gmZ>Ou24IhczI?{+AcA~& z4+)l;iG|~WLUh|!n2li#ieRpgJbtLjFku1JkekEyv|f8`}veuD$?{ z#UYdLaJ3$o5JX*EA_4zKk7h9bA$0-Gxnhw?@?p4}rOIH+#A*b)ljoB@^tt}44E_BD zX3wQM5SBOZXjg&x^>)52yQk#ib_gLX5Pd1%7_A&7dA}$>DN(nLesoz|rF>7VMiH~+ zj$B+N^*8t>=#pXSSmm``V#_`+Ive{#6WN%)GB_q_a&u-ALItJew{>ORNo)Qm#T_k);dBR9ov{T9`gM@^&2XU^%z)RXB?r zoCWJ$KGaUc+lb5)ugB@8PAS^uVDKNcxl_*tw+5^Z*d$i0g4J@$(!Ug6b+ zE30*-rGgcv&F;$fVG~YfaRCA3l+VUEayc>o#&2JNZ(jTw}I|ll>{@b*2#lj`s z2(|f_v3vxJ#1$+zGiHb1IJq(!rw0JiC7x$@j7%gzLt?`lFuu zw-n`w>SjQ9#o%Suw%Y_~XJ{oEC0#SndEJwFT6`VBdhKpKBVd8~(8R+)@&(LIvuH{` zUQf%4%YkF%!F&cyG89^zA^z-m!oZ3Jd!kBS@(j4I7XRW<#_igyKd{s*9Ey*51N;hw z-jWAgX`Vf?bBxA;-OX)yz(^yL!Bl2~VSa8F#y8~i^!90Dc8nzImXcV6FlVR*EO}~nb|95Do5)p-MUZWl~ovS$NC9l_8LkYUM!5(0kZlJOSfH}Q;{(4 zR+(N+uQTa08WRr|JJ3?%*1ilK0=Jh6`)Ic=&zBn+3`y${Y6^UyHFMwy( z`M3Glv*KQB;c<}^*Gu|5+-1^Y;r=Ghimw?w@_ON_Z%1ruETp<2j<0b=e@`oZG}?!o zF%!xxla3qMQ=&Pa*e32(nJgCW10#EZn+^3?&xaX!6l~^~QM;eW-?+aiu z6>c^c$65xxGbMx9(l6}O0h5hx-W@q_xC_X}K|+ZnpEI_L{PEQ9Y^0&9+0K5EZ)dE>?sV`MqWY9U?1Q_v8JGxEgQoapDwp zDB51;2WGB;ef|=zWxl8a*v4{z4Sz%;b6V&Wt!JLSi(}GDW*%TuST;=GL!fYowq>wB z!p7<0Ib5CctKwGUj|^Uxtalw+G%U`wk!|H$l5jTp5n`~o4e~xN9No&Wr=>!R_2ic_ zF*HxvJLdkKSucs|p*uBVcUj2sw4J|Ut5~Vw#O8->pqg>!=_ zU?>Z?2H;2gK%ODO)j**03z0YV6L8Y6`Txj!^MIzWwC}stsk9YQhbpBMoNAR;5LB!b zLZDMordC58b`jZ$3xZ`y#IR_s0%BoW1Vu=w+8Ws*sUUYrsoz{}3Q{!NmUwRYDj>V~e@ zDZ-Hxgl!_D6#s>Y`9lDn*Vx;I`|;2eRn(xV2Jc%(G6fdBVMVn$TWI2W=^Mn?eIqV` zGRc~-eIw4t z049dwv(+i6taCmd(LLyn5JrFC zh4KO9y9zkrZB%~Ug4V(DiWpEw1-j~k>$3^@sNTCeW$!0Cp2AjGv5XZ0d4}s{fYv#( z7XzP$J`dXW&~$SER>i&$nK<}xngJv&O1nB&(k#4kOn9uog+sov4Sp+Jj{)6#8<4<6 zIa=LZjkv>;njB@ZZUw6$N&53NUxHSwV}ZEbel`}dV!+51tv!R5D1rGwiwd#An=CWV z#q~BWOMA6pSrGR3cC@mM%Aq_-VXx%76_s^r+-|Q%yT0K# z!33?!`vy~@)u29$nn7R{a52|&YMuqxTlFXhA(vi?dC9)0%nBVG?ADH>WEC+{!xaRA^BF{*0U#(4FZ5SKET>aJL`>+sWVao{*kUv{f@T~2(yY*jT!V&M{A2i0A2=A>Ne$1Xyrbw%F=FgVH{k3Xxt3KLx#|eAc9GMW%==9 zKoJ3&#T^GK+VU{)l4sX86ma53(f5k4G=khGLOQnn&?-VJZTC^PDi$pN5ua$dO@oqIUW)ac3C$-OF}^@WG9OMX zQl1pRm%V#%p%=iiiv~loB{zTxaq#Z|wsVHgbMQ~UaxWuOa&wo~G*>$>S?mG#O%>S? z?aVwWhRx1yNzLy-Rgg0nr*%1d#hHP2JU;Kf_m7sh-PonpdK@(h>C;QOz`6&t&!MhJ zzT%wsjhN*e2oINy8i`1#g96ui&@q#Pb~$ht$HebDH{Chh9&8W$OhwxD8{KZlE7?Hl znU9f{dM;FM3c%stq{aBA55Z3TCY`yW^(Ks%%AhF+E#Ya3B1H7kUcFlQ#qc6HMs5z- znungzq#aai?Izls?N|NAZ^Y-E%O6!~j9E@kQ8PvoPQ}E$$&6Yt3wx6ej0>QtVrt1@ zeW2bhbu=s=${wW!Xzt25)W!u z7vun!={=t?T%InvFU`jw-bhE$p~><2cSUh^ZvIjxtlf9p38MClBzy-SYPn3>iD*0Y zCuo_XqJCa-lC)`z;K6p8JkuuxyiiR+x5&?qx$P>R)I*%hW&o)wzK$L5*ZNU#2G%4d z(*J^p<6D#0Q=E6t2I$zyMP5;+@C5wkfj>`eH_%jYzPc%!STBoTm8`WG+R$mv)|N{Y z6Q2}>cU5UD4qGPckRjq{QUo9O%UYNaremDBda_gdpP=*7-;RpHm7L98^d}Vus#rs< zlU?x6Ty9M0s$`q&;V)1h;k@T+f3YryPXPeB*5cyh6M8#u%9PYv_#b6agU;FdQ`@E6 z(dKsSi?TWSmn{|gKMg!J{)}zB#ujm*vE5e3Vdvnl`J2BU(MHhkyQ9g4yH~|EN#R!a zBQK}r${x+Xizb4^?}Yo4UioflyTnz_@R1elPhOdy?6Xw4!`PHXJ-JCllR=-{{lN=fG|LmERCP z$pVqlTyE|w-y|V?&bf%ahc5TL+ZypY2SAuC*gr303m2zPMjZ+8plUl)Ub(Fl=U@-* zss@Bgc>a}}N&ZIPQD}UH?m2IAx>#}-&J7(z4vsA|gp+&bPx=>AfpeBe*-lNw#@u#) zUKkbqWQHAl6fp_=crI6iBgm!G3ptz6*-#x>ztE8IKgHeMY~Sl8_!t_fY`v_aZ<^c2 zNT$h@g^KtQhUKOa^3vI~8AcEl?-WTB6oHJySjnQgcT>}yWna`syuggPv*hQTWg93) zO7!j;Yv|O2$T=m)#NrmgUJe1BD*)e)R}G=!en($F|77pF?8mVs)5wd6P~hB~7~Vh* zOJ#PfVN~bG>kn*EqDL{40poK!jzwrGpSudS?9i7Nrrh3{{V`=)qKHLYTbLk6{83>B zKp$8jr`fb->QC|!?F&}y7dSA4H3^tkBcj@(hSRo%MDMxPY~eM=w5G=T5duWx)=D4~ zIK^nsvc%?;m{7|Fid~vqZbU-FoWmaOdU~ko#I=C^fI`LXtCYwBo3dB3l8nOOGer@} zfkPvlo5SxmJ++=D=h&7{b)&c)-wF9gq}NrT7yFc$qyz+k6oC`At-1K5&=(qQ(#e?`ThjTWT#2F;G+8xy=JW# zeKho~8q(x0%qng`r=LBcqP75l9+3{LpXIjCp%toN-XZi+gdCCVru+nzb69PLVX84p ze;#i&+TK@1zNSl2?d)|M(`TBOlFC2pxKHGS}p;X!>)uNp=qjz|L+Uiq}^IDaH;QIl(WP zz1b?-N8%|`R;yNrPP=&aVcQ!%K$yHwyQrByXNLCk`xPv7Sg+T**)Zx$=u-sV%zYl; zo9qYl!+;NTi=swFC;=X{Jx%Bz3A>sguI(xE>4j=D)WjJc(xc9iuzqpyOi|tsPn(Ur z!cKRy%~%4M1nM563wHm31`8!C3ge(=RkAR6qiVk|SJK44&}o>nWYQ{oZ{TAKpX;R! z#^Yi1*~g;0e7>~1id;2wC=h++ zZ2O7R4xvMxOCdQ*=3#Uvx2w7IPU~Z0f`|TqLvXUVIh`6p@oef*(ak?Ce@=L(4zBGB zMJ2E4;d0zxx z-P9$P6FRjlAE!+9KfYL7v|L%T25F@&`v$QtlE zhb-F6RTqCVwa z?@T~aP;)Xl#GEd=27F{Dpm5V9>1>`8FAZ&jgC7PX>aCjSa<$Ak8E$CVUd;Vs(G7%m za%a%Y9CK);`je7+{In6aV^4u@TFi?E`G^}R zU3ciux)_}jA67!CYm5_A+gtSo;t<%VK4?#GK1MJ^J*04Tq_?ctj|Y;9AHLRVh0%aU z;qZs5UfuOY|I1j%0B~_g7^HUKC3E%#fD^!mZRg1A3t5}mU6v*<9lX!5C3no7JjG{g zR}LXsk&75lb=)5s0k>&KYC*dlsiPOVH)R^n*wvDJbjzw`ncb0Ur<>B7-JLNUqktrQCuikR5r$0(Jtt^Q66Z* zI-zDWIRj2GsId?!YS7;b(ETw_p?+Dv5%VEA(*q7oXh39{!Kwh&Iolh3vUW0Ajbf7R zUI6GLbhK(cRk$Pp_+F+hMi}y_A6beMs?x33rteV6~=x2h<{*3+6QB#Yi3qJG;p+RUoE1 z@GIX3(#+CfeeKq~mh10BqZ{Ct!pW`C84~!(<(GTfnPI?6=9nuH)>*rC$4V0qjW9FC z_)mS|m(EqgOKZ4cd;+jMOP?88U}kfvY9$X=8*z&?oJ3*~OUO(s=VlNIPc$5qr!OK1 zcApqcf0E3}r&*)5Rn*Cgu}Q=uN(Ew9w0wmdA!=iF6NRI3%`{l*!>7K706%mbZ915P zlSF4lx%$wssh`jP>rEN%;6V!k@NR%R_HU<}&9Smw0-F(*lHWxe821V5XSW>>H7ALD z2K&DwxTbqp`EpvH~W08W{Y(**<+{bDC_9}b{#B-;Pa5cYl`vi zGV@-tOWH-C^%`scR<|N)VkxCGV5N5`OI?OINjs6*A;-{-=>-DPr9>T;I*+;@TBBiM zC$a3J|1BRgf>nw(YwQRO*QlE16#MnZ>Kxeqk$>%eztlFFUNWZNDQpMLPsx4+G8Tk8 zgxbQG%haA%iRnj4=AD$Y1(_~^@xqfy0Y4wpj_Y1Qh-ej$IGJzrUFc_+Y-2z_|3fuA z*MTVuu_kXZv9I$jp=FLi_tK?+TSGzg-9RO+On}!#e*`SIa(9Xd8N42ojbJ|V>+5Ou zzd_sqH5qzstpCRJ)y0^36r#oYxzZ!Rbh6dC4(q>9(xa5_UKyNPR_WextZr4Q_i6F; zD{WRQ)`+aj`~x6=e6^d!iaRZBr@KW^T8`2#XNn?xP4=&R+^Cy7q*aY?g*8=KFtCc20P_-c#DwACqYhfWZgzXvISZxw`D z(TOflv;(~a)yXu_L6Niz)opD1+cQ$5T3V&iEDogyILO^Qgx`aNW3M1=npy;TTi(3=Y&lDv0ZK_5%q`1 zBjN z_{6=&ci2<^7+;)595!Ea_2Pxr(V=H;JOTTTf2NNGK^S;kT2+2#AbSXudhD?3X>?Ud z$IUN=Qh{whL{~mKRhJVGIY0^MjdJOBb6D@7PYrYAIrrPQwd{A`UEg+E2UQ;?9HvD+ z02%-x>5|o2H}t7|J@7Rlte_?dr#iCm;yJeCVFP-_32Gsty&!`8Q*ZG2kb~dpz5t%5 zibl_mw914K=|9X4gsyv*MWI6cIgfMKb4(A0G&u-NXF(#RIi+2tC5h)6T)nv!@b9=zO z0loPwH-F1)KV(1DRkZQG0y>1^qKApg+Dlt+u^T2F>j?;UA-d@G?#O-AWmF}Dnj`Y) zVe8=1LsC=W)cpvg2`i>gQJz`C2MS@OAdZ+*&>-GIl2p0@C-^%*SWbW`Y~~OH1G2bW z0j9ytfRcxBqQtHeUXSA}Wfg3Q98!cvo2IAm_Es%|&}cx$2?Kyl^22-6{L3FiXGW`r ziZ=JYn(`;c+RGMR5p5COcs5&004O%b4z)3u|BJdAXSVZqX3m;}OcqxFByafnV9 z=?K9k1+}&HA_K%Q}W;WC1#5$eGy*97jN%i}ZJ?SSU@d1&SDVb)r%noevfJ1M^ z?rPLLgj9XYr_Yz;v=Wloo0c7*=UHUX;!ECUS}~WO=>-q$o;)ti9h&sl!xSX=jF1mR z1N%T(u;i3<>WXq7;RW&w>b&OCb5FBJ^1ad1eGXRh7>TQ?EN!GkL=6DWk(Yf8MInbd z=41*>yUjsJCfw$g=~5W#(6m9I|g=|{9#UQBx(&{)(?y%W%B zb_lJ-;bt`GI^eC>_3zL>)X@8BXhIUvHe@Xn8YfHI5WMdYBKGnr`fVq zB4A?yeE=QSZ0Xx^VXptO)QhezX8R$%={emHbX?#q>@_ZZN^%qO7tKJ$kH7`_{6j+e zT#kdktq+snk?u*yje@sHPBK^-Mjit$^EubOYc`cP!#hqZ^+?SH&Xx`0SJB-w1TrG? zjM^HK#4IQYZx>qJ2yps^%Y$p;5=J830UjxZ#d6nCp@64?9Pbq%CJX}It|90#beP#W z2Gf3+#*1rVWhp=nIS_qGVzZAqn!LpbsnoETtMVdJNCo6PFAfY>xvj^0MYm<0DamJs zQCC_&+3iU z=;1V%2R;Z>YfL*1;!nI72xthh5Rx6uH7V>!kiTv#VpR;>C+rj;WnrknemMAAt9$w! z9eFJga&U3RIntI&3XqGXgd6|&$_=~Jr>|r{@}WG!Uu~W?>_monZ$n5s_OmAX=2w(3 zTg9(HhgGt^V01qT-N(NxXspo?TD0`<6oT?DlV?U3-L2zBRj_%qL7BN=&TtkTwtlWe zn=bn0p4nUcytWVUjdmG@kfd_&0hcOYe>qWjwAY@8q(4pseZepwttmpwlax;X=%R&u zFJ-#|Z1wP;ANxKX(`c)qu*dS^+WEk!N6u#y(E>t2bcW4802_AK~m&28$*3L zD0y(E>^(Vzg3$j1I7zBsQNjhcPg^_1RlVCz-_%ZZg_FEL)O`2Sbm=uZC68WXb%FW| zI50aXLv6?-my})ikbhg2O_H^iA_R4=BtwLHMwqEnp>%m;jbA%!KQpyoGJ6Bxp#Cz< zD4~I~5zMJMbfX3V_1C*`R@uo^4gppP-01wHE_r}!pCso>RfmiLiHO|IP3nUGMq+Z8l6=!V=r+6mlhM28=# zPYY=9g&Wf3_5BFiKocg`6&P~2C8^s9r`{>$ySE!~jseL?W}{YX--VLftXdd7p*F?U zV|#3yHe++ACY!DfU2HWy&dI))ww}n4M7`bKnqMBNCf3nU(|zGT>yk*J5BACeqCyioyNE0|Bq!uB%&Tt%3j~6+{+_@}MLxkd zI9X7)4)r5sr`Hi2se;{19T!1rH@mo6W-k0-5IHX?&1GnZ6lg+gf)@?AoIcSFaBAWr z!tyLPQ9-~)>19bq&*XdU!^V>L=n-DGV7eMYfj%?KhkC}T=rb0hc05rg;vpGV;Rqs- z0Luv@7^K{W#s`AR z>l>8P!mo;3?L=-D6h1IL_YU@t2R72O#SVbcOC@CIy2E347&;>1Xq6PC2 zrv~hwIqGA0HE;PRWbuP;5H(4b%1DToB=J4+UFT%L0ed%KS2TLJ*7op(OicxDS#K_{ z4TxI8065ZFIDX(zx=M!l*hRdcIS0KH44fHsOQ=D1c$K}`dL0@wnv{#dv)DQ%?-uDq zUgif4{HoPF)#tyQDZ0K*tCiZ|-tmDsHPo*cGnsfL44S-@4bdA_WxFDJIo{J9bL22Q z$K)U`L8A|h77l;4Ai96;r3KPGBUBZI1w<7aSted1KieDu&G2Y!@U9A@#H<%Uby?7- z)(%Kr+YQTQ7(#_P9TlkwE&8k64*sJe!(7pfbp=F*5NH3tPh{w1PxUgo0iF0R*+rQg zE$H7vtHjgt1@%FSJ;n3Vzauo+R%Lnd=3y9XqdknYh)jCP!hcv`$eMPJAr z1te_@hv@=qMl_ognF4^?!RMxbRnm%Xh*r$yr4#)O#@x?SfdUG6PF%A*sHvtSOP?Bh zo7Pt$u9$(Qw^q~wZ8oVjskl5%U`{e7rN4As8R{*+H%7q#=JAS)qqLpYU8n3SoW7n0 zCnt(#?hT#B!4sc@8L+}Njw~S6NuovH%RQ$K*cuwkr+ZzQ-W3nk zl9wzV+n_WJTBc8)>dtv0?J)xDApGgd8L9Anqq8G=b)n5TwFX$vpyE1VW{ z{V0m%L!@UN8@*h@PD~F^=T2PE`abYxWChSQo2>&pE214$6zB5vZznp;mk!oMHPAGX z<1X;RRD{8D?Fq)AWvXxaa$ZmM*2w+$yFb&{0(^-4=CA)u|KRVwB6{HO`kEaN0Y2nU zd6>=GjAMa8`>($C+SJGw)_=C&_B?9-qA}WYsV!`&$+st^izcL?)nq&)0_8aEvV8QO zGX=?dbcho!FK7xQlOpt&4u<~__}nmX_%|4vW_`zZQ4g`sOQ|%3Ul94lNJX4dIJZp+ zN=%+CYk*Y=nBb&WMQX%_G8PG^?Q6?qy8Xn66McL`l@oFGg^Eoo+H0uhLPcx+se-Xs z-}HBc`sZf3!zNbJ@?w2$y?d88!7&`X03)PIPqnt<_$Nxk;47SeV;^>mxU~25Q{!f% zh^sDC=s-M#N5^Ir69-#={D;0MtrtXS6K~L#DOtvU>YG3yh%IC^*8$l#7OQk6PJUCi zu=22efyv^*a0nn?mFzyVPxkd{IoywXB{jL1Kn-gpkFBz;aGkB3G%*|0?08x*>=FkY&fe=O4-0|4k(KHDIuD)xT{zJcsYX+OgD!6h(5MQk9|1LRiHd6bO^1pvx&VJjiuP>BT~ zH6Q9R@phK4Lck$%6Uz5y$-N-)2)LP&;`zv`jNvw9kRqcNEY;)yoM6`fN^9Zrbi?Ny z-CzA#v!$60tEppI1V8!nc}%N|(^qj|ibz=W_g(uFm)Tyzu?MN`^%6IV;t{Qq^MaNy z%{HPs{cvE&Ba??SZo79=fk4AuQfRPmg9_uarIbjF572M%U=9P*1d=FU#7ineW<$NE zilcO0gam?%Os83(nvOhuhmwx;n%JHRLd;dk-=#GlRRI}FnUPAg_4jTzdSoYma5l>} zQR05cc(%cCTUX-dsG^H5jR8}CC_PqYvjJ&7^>5${1MQxUWKo!6g7oQx_KYrBJRXF| zqJJA4(8nMpH&MZI1E*4muL3ZF6qdRqHVEps#2vYrCG-Dv9H8o){I3bXfBnIAcHb(9 zQ&3l$2Mt>btgPlh;&M*UYLYnP#_$Cb_AZq(&%=1YF?b0;XLO5&)Xj>_V!O=V&>)dn zrlh2X5f~*)BFlt5l!_BW^005A&9LKWtB2^zVT-aqke3R+I`E8#dZh@2eIS?GX>E;D#HE?1%sYUrc?E?W{- zQXN7GWi@Oi_bF`vZX$nKEt*?w9By)}%!m*$5E@vkLk*RAOhc8Yd@qobQq%}BN&_3Z z1b}F7F$%Ck_Z8?k*#(JP!|DR{JkQjf>DFR%&5MhP)?;4?vgcoiDfyvmekCt6W+e2U;uC<2bOgDfFuL!puyN*$O&gg86Ee{pat84 z-RfQs|JW!c%0q@L(XL77xgdORZFV(k ze~lYu$O;u{N76IC0VgK3G}iPWYb5qT+M0!>({-Ec@*{6T2FDiVTY+}+vzg)vX)0*o z8k_hsFX3M3uP=4#9fvtKGz%lg9vNh2SYbeQ-FU!5bga z;B?E=QIG}Jed3;g?==aFT20YGj?XICsrV6UtE;4bF$nQKKv>>@=qU;WLK;I*7Z!Cm z5Elj`@t%FnT7$BEYJmKi0MHhy03lup`Uxc(^+3!*;lO={k#rYYLioxQDFf+kpU|6K z1UqVeL>3iqbG%cJN^c}~i+F~C1(v8|7`5eM=x%NSsgY;Y8gZwegQ+j*_}hkh1kmcV zuk+MffyMJVFpT(YT777v47g(m%Mp=>`@NN#!itzO7z|=I5cea1xe#kd3&MM<;=wbZ zvQW<8fVe4s>rxYKBz#0k5%>e?HAUS^l?1&W$-w>DaQ#7pUkp>)Iv39$3`uLQp&+Lm z7rKh!dypxLZdGl;tNDfRwncS4P*zQ%12HSmf% zDl{LTXw+@UaMP!R&kxs<(e25@+NlNbbwYyuH+G_R|7cQ=^~O|N0MrnzCqXGlN2#B| zhj%&Ay&4w`c0<9SuDvr)dfiX6ecc_(^eZvk2%^EF)r1vEMvTS22UOa^3iS>SKx{Br$N zE=x4_6&#Tk-D~8Z&awJZ216rW*)A0usBcmvAlOT;o9ot-Hl`l5NrosSB z0LFwEBxQ=E^|QGC`^L=pzVQL|bTZ1$!Xa7q5UeKz4GNQ|J7ed2%K)(TA|N%{L>%VG zu%mYl`y*2?vh1F35b6CoE3sILN5Fs&RGZC<_nQ~6OXfljnRDHG{qM0ODIcB5kX-HwsHBaL zx=}(cZ7@=aoQW>00E%qeE+lwFzR~J(g-r&}-cxv!V0DVq^O@5UP`@|-SD=1S%CaE1 z4R+3P6#=+;{{W4?@sTJS4SCiF#_U?3x)9LfPnf7d!4PdA9s6$SwL zw#_u^Z3g~XD|blG1?vy{FUt&yx6bp#`K%c5TcUq>Mvhpp_OWMK?bh^AjPcD2)@$P6 zx#~P6B}nonFXdZFPb(2_@JQ#Nebte%2Op#1C|CfMqe8*iV(gGJ_iL|Rxw+3MNq zJ%mbDG-mLR0>q=^((lD=-7JhW-R|2;AGAj}w*)ZrewluHej zhrU8n`CE`lb>E)Nn9b)ytrRh4PNTXl?<>*0zls^W?T9Z^<{A42`+>9{&`mU6MSer? zTbAkhsV@Bj4dwH}a;D1^5W?iFU95#YvDKPutdg!-e=&ps*6;KyowhO!^v4RtbB=m+TQQca| zl&biq3>L{?exfSH0t0p$H8V zGf{Fd7)meV2xG3q8{JC6mPnx1!?+=VYK~6mER&@YI^(3zRV=}ZXJDzoe&MCSlYzz4 z_ooe6V|HaN~G^{jVBGi2qJp0=yYI;kx%y|}arp9o%2@jNx&+l;So)AF$G)gE= z97SNT?%HLZLX7{d*50q&SE%39%33=O#t4sH`jHRt{0}t`2XB zqOgC2f?CD&Mb4A}&i9Ra!TTRb%7w({B2gQKaFE}?ft!eCS!xo_pH?44<24w8q5c#y zT!Yel&6~?@pIqX9J)nn9ArDAmQM@5?^J#|QKqV|lx+&2Bl_3<=w>Rf`(f*6@>7b?M zv=gY)4W(}@&m}}jiXfR94tOV|_mmIEKZngar`f>*t>;Q^s1@gzSHHX?I#f`uU!5kIx7r&|r&%)Wf&Vpg&9QcOL%Ho368UOZrVUqH0;=h^fjy5$@X3?)Ht^MyJ zko#wdemk^;u^7UoD|;!otiwqvx{iCr zHnUf%aKv;s2|2eV18G;z<#ZX85Dq{jINhZ`FT`SI8#5ZnX>Cpdhnn${KHVI+c^A^5_tz|vY6_+w4 z3?xW&g{Wwyoidn(x3S>3i*b6Ler#aPZapLhAST*1{nmaTK4T=rplT**%HI(0^!c?E-{H4{cNC&|$Y5b2l~}yLv4((_8Tb5LbZ^WcD&oPw z82s`jfdZf>kuLjk%gtSON7S!Y+n z>fIDt{Z&nyW7+;c030bu5>~=G!7vNwEQ5I+wD?2pjflpQa!|RcY3t@c^i55q2Aog>OtKf3Oa>@6cfi-NC_0V9IUzJ9JYkwy- z!H9^Ov9V!bwqCJHg8hWhL7+ki@n}O8l`Kc8xgW|>pmQ4;AEzCUII<608o{Oq4n+Lf zm}ytl{$V?#?+Xj^a5n=r;qUc|rr;6L~H=uyLi0j#b0;j=E1Mn%WQul9V3DKd^TQAM0`ayD_3%5t4u@G!F(WvMNI+End z$pwd!A>$W6N^MP*?m+V?NRvhq26w+A=+}wQ&uy%O0QM!w{;1cKfE3=)OobMZZ739k zYU(wSogc7Um!U0b_6JAssTkrFg4g}BW-F=P8=CNm=mVVX zZx-G}anjCK5CZkP-TuGK+6cY&5NLB^)*e5KTG>_8s}rGchPO@*;WK6=yOz|i9rbsPl85@%~{H|>c8)X zgc|;5ze!TQ?cM-M#*`ucR2MiH6B?!t3{Lld znNM)P0G>|gVMW3W&?xPnA}QPqq9g|NKqlfDcRwTStYB{L%IlNg4J3^ACeSFrHFDA> z16`-@E=wkZ%tz;Mg||xV@7RJtukWe#Q*>FYY#+krQv^~u>WrN<++CXB|} z9b>~*2udTP; z2y8w!LDEAC#gCsyjHEu{M|AVk;S#mvdw$5^3)#wz{}iA&wBR+f!t$6>&B>zgu<>tVJ(A`eYpuA77`VU3 z+-NuvSlMwjs_D6j*5aPn4o%fIoH6{({>OBFdLE1 zKhq1EB=(6 zj%JoG2^ap0wBN)!%1?!5DubLcMgki5@y3V?1Z$I9kZEGIvgFIzxRheP%u;=jj={XW z&AsQGcSk;W?0&@0eV1h{eGGe4A?@`cJrV4Gu;`0!U6_p5F%#NA#d`OW zRxK!Gdq4vf)-!4#yVHUlMlT#0kg!Zwpl|e&1EXH29a2`lZjg}EA_`TJ(mqqtD*fh( zQ*6FWX)P2$3iITEWxehN2)pD=Bq9mpZ0VnXbI4h)jJlT1Pv6lLAp|uTMZ!cHQc%Hm@bP=0?zGDrJFQQEEC!z=|FxWMd~mz8Z#%X41s&b* z5I&J{X{QnPu{mC8wVH!NcxMr}498-wyKo)nf$1{{xsJ$t22NYwW=3l>aoX?quh@uy zE#rVmRrWG)jYoY0J$nJi|IEK*uc!^rPI{^|xRPB0E*PA^35>x<Xl^kN?IO&tZc68X`Jv`cO#?$3tJXNQ7UNVM9yFKk4IdP3}f>+`!f5-|cNZJ`wSYCEN(^IwvA?VXw8 zPhM#ff)PiSfA!?}A1qAFi_T?zdWHEmKaN)$q%!XvDZA`i=aAN77dm9ownUuOL&{)0 zzzW5|)B5~#nPkUc?Z5>2__v81C#rxkVRjE?4c`gj-N>%9`M;YBUY;=Z`p7j(`z^`W z-yoU!zTRcp$hqj9(&ysab6o=wP?bpZ20V3+Pq2!gbRO8=Eo7r{|F-6$2QL@6uX`?t|G0HTDT?{RSEU`V@;0C#|_9!GB`Vf7BmD{@7tBjde$_L8P&$+@2 z)>2l$_GR2W3^>tk`5370I!3zaqsU4OyHK4Wu6YT#Xj?hsyaPjvCYJ`W&MnouZw4$A zxM`y9V!P%r$CsE7r^HVte9(@84i~igq@SFCy9H4Ha{|gaPpjgc!jGWkqWHetdUeD& zIw&F5zXIn7Y$InP7pV*Z7h@v1m;(W64FROeYjy8m3(AB|xO(wP0*ZbHvfLu9=_{f% zmf#@07aAnJV+0v^MXOQJ>+<0=ecoY0xE8~ZO!Wh;v1)w`e=HQooJpYwB;cd$1|Q`i z=(B?>$(yZlF+D)dB*3PMP)$i;$@Y5 zfBewr%6I`2eabeain5`(_w#?!Ryam_{(8vuL~0WnTq-lS%QpQ^HMn5zI_2RT035I<}QbZX>Dq8dLF8UI>qLQpTl7-hq8WkO3sz39Z9Fi7OIM1ChT zO%umY2I~r33-0BFC}GDy4jpMHMJUA?MR^UO^iIH3Pga;4nDZgjol7B*#DBgTOqJe2 zG4rdy*14wj3Yb0%{fhvwvW=GBC{IKfr^&`VzY_uR5QPk}WMg-0(rzma%mHj_a~W!p zf1&|Irx`bY*Gf%9I1Dt#1A7W=T@qN0qdIAk+aYkTYqo(7Ur`I++9+@2^;KiCc4T3~4 z58Br`giu3=9dXL6oxb9<1k54KvWdu#IQz*MKy1_nkzsl=JsJ^>R8feNFZmJj4ld=b z$Oe{&jI)^q$IMQIB6((*GyjeATs74R zY$|j8a4flrzpY=<)PW}vD0&;9%M&1$yfo023qDJX7oz0kkO7UWWa@)>9ubnHZIB1X zfdDKaxR)KebAJcLDLF;tl%!J|1>d7!UMM^OLkSVpmD}{W5Fj%#AGFsW8>9#SJbdO! zd3q4p_8*FCHaaxKL(5dJD;B=(HP(88U{7ujCU`?Cr?*lCG@T3lsExY~ZN$68xvFw` ze!BQ_59Oh@EN!5chO!3%c2icn0iDw=KO>|z2sC{W>_tN%#P`Kp!qN&LG2HTJ8Z%(` zr{M|hoRJ>v3)B$V5JVtZx(KRK*_!02tq^^*FP9y3Yb*0n)9$Pw z4ZjPJxdd@dc{9cx@F9VeY^N|Fxw`mF3aS_+(Htk=2c66UsXykabbY_Rg(DI_x`XuK zQCa$y)jVwjVUk7zAaW~2I}MFHp!0TmXM9sMvEq+FeDx3$4(UpZ^&tvd8H1gx%+>^+ zdq;o`mHNtTO&Iz$?31?QRNzo~$k5sEP;KU7fGK__t2g-g=lM=y2}+${h&_^f9#fK3 z2s@;o)GR~cvsgCheD-PnZoi-T=Fe(*r5eY~77hxp@#0ZS9R?AZ1t$0<@xsZsE!f|P zQI0`bX9$lCFcOqXzyY_lJXKKV0PRlI*|Kt%5fTI#2iqoBhn>#)6nYcOV1%1g3m$KT z1-{X)*eBM_;&(8DfoS&;-J&FP^t1ONt$q&mAi>_kVwme zgEs+OoN6g*qu1qte9-~{i>jSWfBybe{Tn6UL%Mv4+kKFrsY69>W%~ACxBcs~gxml5 z`oXfdq@j-@lYq+cEd0SHt4v+uCJ>4phmpe7GZ~a}#|2v>`+A}_kL-5zdsaGM4$KwG zl~eXW;8G`?2cVWtEl@yVYY0%`82V36W0wEaK&_Rb(;ouumt5s=DjEp%9G|byWfO`{ljr(`*|kr? zv?QL+Y6bDAzWJx7ntY-iFa(AT*JTmkm_~qEYGyYWdq|g8%AA!I+L+MQ6e{=tb@}@O zZvanZV*hHQjYgSzA3=idhaS^QO42`oOM>nFSmf<;IU^kXf?}yg$!ccYCQ+|hRw`$p z-_vu$^?%(78IwcH&i09ZHKopkT+`)0_@a1PfGG{r`(>Q5^DP-Y7E0o0*41npJ`~o- zgZ61j`tMK#z?4CR-&3P{ICnGX2b*+1U^Z4pQnic`N%Hijdv4$t% zRV;in@B=KT2q4@s>V2~;t&Z@-6O<{5lU8jcjKMO0JUL4v!z9~ zCVx~$WT0axR*`^wKL9R6n_mrW<%KiJzZ zG(e&|A%F3VIdcm;u<&^?p^Vhj(tr3M`4>WJ`p|u|Ar~mJf1@wWR%$b&d@it^{9rtC z_9H2gyp}suFZzke8{5z*|HjN1#lPwZ1Px-Z@_YSVS-<*BJJDdNRGxMn#RFiw<3pMt z!Zfhe1d(E!OcN+2j$|#Eb?efD5urI1`mEfs-G#{k_)ovp^(H0#uGvLk3ddYgkA!Nu z3nO9WU_DU{&HkJ8yM?H@= zAgKYw82E8Y$u@FFPIg=HFuxPrc1Y3)5$(yKdrN}tyTtdRz|?i34U#nWi$hEV6933M zcD=XBkpDZMQOZUOXv$F0@^x$=XPbeEf^BIFIYYUoZ5csowbs)VW>NqXRLvpJvC2-z z!~kib5f~*H|5l&ZcbH2j2_N5Qbl%S$(8JEaB8oy2QFOab6VJnnDss(RiMPuuIoa*Z zmg=?|QM^7cKa=cb#3c#G*EG^R;_(0jEsdbG;<{|f&VG9bYF-x$#^CCWdOak+ED8YlFFlWGr|%O6Y*G3o`Tenn{(CIDpW*KQWzAwGZ{ zqhLVNmm*E)g{1bay?M|zl-28P#C7~N*Lt58;t5j%e5ar*kUF3TeszE&U0uKrY0g29 zwo0jY>iA*7)ancxv?adHPmp*Z@8U(()F&`#AUBJaNv!pC@nfcDh;n@>#IH|;15-SG zheQ|Wls?f*U4X{YB;j?S&DbsgloztploK}G4VV=Ei~isR62&3p)=M$M2UHd{R+3o9 zid-VOBipRqc6~i0H@|P!Z_Qaf0dqnHAz;6G=#GD`+G;KbpBLwYFt8;)UH zx_4)>NaW9hCY{cRW;)A^E9yB50Jd8bzehKVUmtkn-!^O+iwFJr$VyObHvAGy{oK13aD>P&k2;_<+N#k-l@74 zqNX26zh{N>u1pkrLNmwLkg2>4u`0Mv73U^HTL|8_1GPNx^BwioPQ5}~Rco(O>>+wA z2E`0RIPXyfeoqr5Fh)@(0M@?YKBlWr-F`aWJp@@m@zH(~i<< z2j~n(QXF!m&86U91&kG$$p=WP$l3S?Wi3H?6tFAE+Ja_xFFGviHVvbk+&5c~DoHaZ z7Gy>1^N$#r&J$cN>QM81NB-2whRFN zg)%oD!v|i0a=&ofF31aET^;`#NNkZ@Ly{WUK8^n0h6d8O#8t?nh}(Hph#wWNb-{pY z9|+xxKpdlAz_&;C!KXiIQm^ZBIFx+a+G&|1ye1Pc&+C*n%8(v`kPii0NW@0ZK8iBO z;1C`v`~6*qtm(HL0^!sO-j}VZ^+hX+kjZ<=w9V6tS^(JE_8})K(C|7?1CsbA};q2d=m41d2mwGgX%0$bh*$MCI5STCGA^=qJ>c{`DUf z+m>)R&Qp7tl4AfxiSy<8&KioPJ)oNg1I8zD0QUiRg?8;=0Yd(^RZENz)rhuDZn*L9 zF2s!~$6cyeIA-`580qjXESeY{TiPBiJBjB(TZ*Ae5>k7Dd}xUQ2O>RM^LCo4EE?AkgI5qS&1x?7gU^TZyU|20 z{KM8xV$oX2WBZ^f1)SRY(x9LRnW@xkLi`InIEGEBNVQ!ypOXmVscc-1Q3g@x#&$yl zr3V3287u~}I6x^jKiXOLabU@O79>yO0#OdB9P@lQqq06n6~^4H$9&k^ugp%K%u6fL zf)?UX=%6c0l(!uNdj{hDG|m>mFJ?-DpkU>}DeZmJMAsj~f=35AeIZ^w zv0}yYzjRlwccL! z>CKPd4YZHRj#+;4^IGP+$3DForuWtR^|zO7KJ`t^g+IOi=*`ui#=Smo$7gRVRCp>7wtp*Ve6kgGdoaR5-#M7)apv#TD+sIYySp! zX6&AQL1mFOW)bAYJKUa?ZGUs&XEyH!@hx84Znv_-=h>D`*49<>Eto#L(DEd>g%vv1 zLhVl>gxMZ)kAdI%fV})1Yge9UanGV15zi*JTUxxP>v@|v{Mv@?Ns29%raH@)?_53- zx#!27N`h_m!iX&hM*X7;~M{j#!=TrGFKIeNs zH{1|dbC<~ainw@j()9Wp6OnCYoP|vvo6YhiL%3zt=fSt5jCZWT@97TjIre6yFX*_%We8vf*#JFjz3SKZUeM!L{hl2QKWjMf^cC*_U()Hs zfAQPb!}0pkuU|d)Zc-!Mia01eo@}t))s>4j$(mT1V)+`UbA^S;D^8v#M*q_0 z+(vY{+;nf>mA2DYA9Q|B+;mrVUisc3CUU)}mfMfXK4}R0dGG%HW*6RV3H}Q==~7OP z@(D4nf#+L3*sG`yia34NZ1a%iRr%-K?`ni+j_(WJG`RILdEebo?aRrRw^+R9rmc76 z(#M+uW0qzX{jzh7sm&($tX&KLOtjpYr~Ghhl4!QOE|a(Rgsi!u{k;9o=%V|NFOKdj zzW2+OEAKB(*D+0$UoX>AeKFX=zkJvtjr(QS-j0`Y1@IO#)1-xZG2q^n@Ao_xMGk0`{SI;{& ziihp<*O<{IeJkhV^Nfr?KY+6rt~;-V_#T22Hj}S&zKPl0v!`!=(%=V|t8E4w z@g^$%IY=4{zdDD%dmks+>~~+J`;(hAi!;4jI`;ofew}_%xfwqQ$UDAo)7|&yHlFy= zY+mN}9bSG;dyMC0CVgdVxFPSuU)1I{?z^i#ztQ!w<&tv-=k?|`KL0Ute&d-Fvh#m7 z3ID3%=)UN0Wj;T)EZR{0*_D;&iF@v<&uzT*`S1oaKj%+)p9ibk?>~Qd!MV-`=CVs7Ow|_+IXnK|P=JR@Gr05_e9rqoZs@tY7h-sZObAj<>&n$=v3 zb?rG(x1e46Ge7j{l#*^4)Fm(GxKprSavxV?vK0$OzT`?!cdm`M_?Oj&z4Xt8AGj5>yu;DD0qY8l}%Zl;SfrcOGP}u1n3zoroNV97RH*m z+1yTiQQtJXORRq<*PILeV*=<|z9ZlbY~nv3#(lC23*+b^w5rAm_K!9ugzl3zMzv&OW|r@oc6oeopG@hjX##seqB<{X1Gq7)C9Xwm@?z!lFftU-9%sjB3%`>{;84?VVbbCAt?j-iC|pb=1~C!O9kN3(8Pnom`R4 z2p@XX5T2#8%XqN~ZXZ%i9^XdvaOy1#-qwajF^rE7# zL=RIrcZSg2ca$t7UH3lAr}PW9s*&w8Rv&KO(XyUQ0G zk%kR%{{DKkR-@~7h4M+)nUS--<9Mvn)x7_2}jB6=%AUG0o z@Ta%BK5BW?^1g(=I!i^a@WR%fABH_;+_Vdwwn|$&rnMN@dTnpJD!_Rvn8^;fxMTY5 zjO%e~xCs;8!Jtv|gSH2a&fxN$-7PJ6C5SCigThNon#k)8l^V+mPgM;)c;*hVR;$?j zlY?7`6<$Y=P?DdsJ=cK#NNIkSW>SEW(Aepqsz=X=EINcQjaFBel{ z>0iEKx@E-QTcxn`I^mgrI&Z4~*8pPb#Pcs+)j~iuI+CoZ(>BN>mSVDASd|a&<(1CG z8Dx)hy9|4LN3&maf3g`zZ*a74aw^g!S(}_oNHgww#pT>3jF3!K6Ha5V$(!x59lg_D zi`|S)cRedxSIT}EF?f;~#3_`FuyQ^BTJ?Bx>jrCaSytjv3@fsr(DROa>BGEOlVGV> z2j~7BeOBfyjV>&orO;_N`w|~zWt4}}dy^Aa78jpr(i%xSL!S+Qj~Z{{tj@TQc{1qA zyKC6QqBBRM;cYFY7lYBg68#^SR;g?Cu;Ddglv?Mk&qJTP$}H9qnK|F{T$~K^R}rHj z;ge&3c}A&v-rK(&koDezoQF+RR-gb*M)sg2dNkLSdci&%xRR@;ORKo&BiZkW9L2D3 z@1OvJl9@Dk8Xt|(b5SZ^+p64nW6`h0y#~N zX=yn(OtNb<*tEyjr9j|nYd$M%a zaJ_J3j2Fi{qF#mKCC-V&8!uOXf_zYDAo~DSUsqe%o(^`EL)>-&Bp);D_=0Ch?G}L? zzqLO*HthDj0D>dMb1_E)pZml@MOxo;2@*oLL(=uAnu+XsBoVR6shGl4qmBuJwips^ z7Bb6xxUfE9zc{UbS8frPtqMu(^L^ZDQ7fQWX{z`Q_12%INPWo-8h;&2kH^v%$2*s3 zjIyx9A&H&teSh-_rIGyo{$8TRdeiMyNs!bjx5%Te7xlUUUN%&*y>tn6S^2_rF^(qw zG^81YR=n`dg)F>X1WlD2ABCx~>gknB%TXXYa>6U7Y*XQHU0yxC*i5b-vhv2Qk6VQ2 zZ^(EH#&7vtiL$&~WJo?37X4Hfp4Wg=yv|MO_7krPsh(a5`>>I7q4`z(62&5Ay*$fI zk}j9xa|oArM2(NFwtrHE!+gms$4g)ILd~$4L&MX>IQn?kkh}tX58iD=o|TC0)ULJ{ zmOu;^*-Mup3r>LTHy9`h_X^+com$NK0iVB&D?_!=)n3hMN?E@@+3rh*8a`Ky6WeJu zL6knNx6Px~gyQXwY%@51Zi9XKAF^%A;D!nk-$w~D6` z?6zf0+1Y11*P{>OYD$x_YR6p{bHbXOgWfPx?8G@FO2s;i(@IgektkL!Y*atuSTRvU zBkQ^k*WtAWyVEF%hSKYKQ+HMLdVFh=PEa6$ zlNT?%sc1G(znm%S5){ZmlR`ntG+gmW@*~EJk+nj%IvPbGL0KK|O!1O#QK8&~_rhv^ zTeW-%LoqjooZ86C>fd51Pia4@K=PqgR2g2jHEumX-I?m@m-UQ$Jn8E!(wO-{(WhxD zMf&K^iTB?a4vjqZe^?*xzB((qY<%4v?%hMWv^#E$%c^}=da+y|?TRXmk4tKC&?x7b z2F_$4fPPXtbB1+&oHlDy8FU=&?2=!FBITf$XDH zJAWsaGG1-P$`+jfE8ayUo88jV>o1U-d67jvW2CvbCy>3KYVMENa!b|nCrPw@uPWXn z2`~}y2tj>u$4U#fn2q=gz*|N*2F)#FX_tzpHkcVrp3&5LZ5Ds;6z_D(@&xOYm1EW8}_aNgjRCvD4KT`u?n+edILZSFwacoW)`rkSn}Ptntj?{_3T?MV~4 zd_$IKL$%{Pb$r(fr{@h<&547pl+KiDCR zfVVvunr@i<<{kG@OB8$f*c)<6q8y*fOmC$m$_=>CDEGc~sxaiakSX^RozlNex5%R~ z%iPVW^^9^8LhQlrjK-;@{Wi7)A9uZ(pRH;eNmym-STgra@V9*PK`EkohEOhF?z|C2 z;s&1y85bu6pOnyGX&PT197j&M`E~ojsDi!_ywcM&3-rrm2Hr!4rcU;{9Q1g0c{fiy z)_k0hT;zM|5LT+TR2Rnfbr#I)nO3T?tAfTGg%e^Fs~ar%WTUOgxL6GnF9YW0yE^Lf z>FFw^LFI@3;%y_HHQvnw)2Ve!n1Q3_r))!7EHk-z&!)jt5{hXTwq|-8dN@4+fPq!HV=0kR0FD3D7Q*qNZiUT=ERf&&Vj#Aq3-B)rI6*9S7ga>v4_DC_UIldCm4-5p33Rb8`t zpXs)rKxfiihx^E{yH=lq6PSt?n{wPP5Sg0jiRKGvdcAICIpfdwK_I8kc znhlr8+1y7ch;mM&U{9N=yA!^@CWu63cSD52$^pjx&TzI|LWFWQG z@Gc8VqK|){Jzf{vyE_ZX=W8aUSns`j!h|kd#InU@Jcey88Io7nq2_E_8P>!(qjdwa zv#<(N3N+i3#J_w?y)c3Z7K-w+%-&1SYw=Q`S%BZ#ParG1*@|-9(^}W8B}pF+^{Hb_ zc$qxpRlzN6|6M!CeavK~Nu-LK>o5X!NPaa&RW%0&&(%msw}?MYljc@ebtdMEgu7#@ z`REn)JN(Q?xZ#NCKgs{lGp*bgui6;0E>q2c<46cWjxwwfEq$Nu;`n0}0fc?Zdho%d zw5~{H*pOJT9gE}8mncUmg*4KEfaH#FPHESLc_%-V@|sW7uhdQ(95~c1Cd$z;A>iEV zM_=!-=B4roZuOA`Fvv@pcMO=yNAkQ|PF!of-EWhIu806{x9x~wv+Vr}Q=wMIuqDhM zYZVqlI*D%B{3CgT*GcMBi)Qe5sXT+ssZ8#r#3qlmhnLJ1-S)ViS(~L|WPRVTo?r5q z)$84w(Cp$tE4qMS8J`v&mbS zxe5s~ruaNEDR!))FJ5jSRd;N;8C>~oGPx0NjM5lxBv=)DH~Ko4AVu2EGm1Q^GWSsy zPt{^;{^cQtt#geR%)3)!f+&nrjM6U~KYG_&`g5Bk6$nIVC1lLn!xITxj0#^oM?orFp zyF1gR&N&CLQv{F_rGhunHcaV?RmM#Gv<;)LOZ%kM0TwT`x?$K~pS8aIejq0}f+%gv zUfv`-XiD)k{AD;8y@>~MG>}}()ICXFXhX&74X=mfP&WPXwjw<|jS46C5`joGA+;6!$u9;@RD}@Bn3+^0f z;=j)kFb4FRZ6xaI)-JGho6WVbR`j|wH|LY{jt|S~OYXz{N#Rzm4|xw;CsM~VS}vsJ zHpvZm+w=~;(;mLfd}6cBBPN~L)7gJdrPQeglj+95kNOjs;%4?eyHHM-*)w?zu2gEHIKMR z)f%P|PHM5=x*O3K<=Q=&aM8A^Ph-}?U|OTbOINhP{i|vGwZ?Isi%?R7n{D&>YdMM4 zvsLb_$3DHyt-}#x+IjD|@1^)noD<5?y9}GbMVky8B0ndSY##Fp+BFWDmbqI&r> zowOpw)1ipxL2p>n@SXkYEn23L$mxtGUJKu$nAW;KH`c*x{l^7IXYI_q9@kKw*58hc z#o)$MTUBgprwlT=;cA39W(G^Y{@{%y2!Z2|TOcP$BgAfFX>)ey4>q~wnb7jE^!>H> zOlaz&q>1=+yI}MDexE`qr2zyzNOY$UM^!{wStQ#@V@wk^)(-4w(b|WZtC$n&G<8l7 z!7(gJz!kzp@>0$ZTr1@aNH^o32{#Y=K*ADblKomuo@UWnHJ;j1HE(0fOM8$mnU8o& zhSR9Cvy>e%?D013kd!@PZjc*prSfQe$GpW=bxLW-l2J|eFnmH2!N@zo>PNRnTHQad zz*kJ$nHpnB_QE0I$54T;N>kM^&umYN;8PpO21#qOSbv_VO)v}=@>a9_ejBKVBoySa zlAgwz404(`td9TdiLOSE{8rae_lkBrHdv;0|B=d@4!ljoLh!Z*nauGU&1srx8D>fm zvyS$1qG5J4CaSno%l`D%r#R?T#ZF7s&92(8Eu1uymQs0@J~?XLgnuOmso3@K2`QhGb71 zLXH$9wq#*ilw}&B3NM@{L=E2Tk8h5)vG4G=9>^B$PeZmIU=tJZeeFaR<_`I;vf0(~ z*P@qiY&30Vtt77Op9}g~P|zr(k@^HJDP{O>w?Vv8)*T1J^=A{faN&BAJw?03^GPm= zc-U?!^Ko4`l)CoE+RfaV9<>ljx^xrtAYGnchI2Jj z;fLXKIply=kFgkQ=fKgR#M_XH1SPJ?`k@T=`jb+)ho$R#+YGuR{|<4)mPqERk_;}j zb>KVcF2W_q<)l+9Dz>auz@cGOr_8C}UbxmoYqgM8%lOj6)p5v?;MS>vx!sUhp({KK zA2N_VWYXJ;+%2L#?L)sYHNDd5*h4;_BqPM>D21TKmq%!9mSE!J=bp<<>3&LRt7_>hv3Jw!u_ULEH=W7d zRTZHz6Kaw41X8i5Zo{`5mLp+A6@NNJvY>JK5-xyUM#*-!D1aWL$lQ#d$%6~C3qC)< zYm1q@(DttKmr&>1xhVb!U;)22+S!~*+9mrfZ0r*N78z3I7sUg(_azrlHE|AlacR#h z)F@+bx4ex&MdA3QHy$-*D_dMs+Sj?5pVWA>*Nxxn4`z-xb8|v!GP$s|o!?hOxdN(e zv#}NZPl)P5a*$|vGV!qQVmaO>XLQbvDVAs@B8_=ZEY^CU{@3G1i#f6QFeq2yjYBE` zaazPUixw6;wLF5pF$MTQ;x1GwMJZs!VP79)T7aiKYF)2D3(Dc|C8Grly@R3ACd6te z23c4kRV*96Vg2h}*t;iB^*MAdbb_6c+odrOL0g^03~+*&9L`lXL-GNzf@A(j1k~R~wWShb9f&8AkZPIc zqWFGYGFo?q8w`4AnOcOR?p#<;e5@7_M{*5&&FmXti zyPwM9|FpbKkzyrX96OVsd^F;PIV18nIU6E?l0%B6a*H!XAj2#ZFO)~ZiTi8;7~sD> z1>4PBDT1$E(tjb?Qm{R}CWvtqYp_e0E7H>G`-dygdXcEvph2c_X7uso00J2g`Cpm| zE#gfieh3d+F{-c3+3iq|C8k7pj^774b1Qsrv`I2zWAQ9$lRSDK6n9fBE2`uyBmc#? zi>JfY4_Vo;aK}YW__;glBTXIPF@%hS_f>v&tg?gNHei6b@|bmU^U|6Lt#H!XRZRb$ z4t2S?l;C9o5CWP`*YNc#^dqi*W)XAI7s;fRtC)rrdSmYKmweplhpInvXH4hZ^0oD zxe8zXX($V)CV~-O4Rv|NiZzE9dnEz-1;xzGSz?0jBE0tD({lz5?p`TsX1GsXV}{w1 z9%k=oFwZp05s#JqIKpqyW(p8}w)yAZ-}m)qdwGCfo)#xx;%vEJEN5nDCOf;wCEtdI zUt%vsxjbu{?#_!O{%8)N@ZQ-Dp>w$Rj<7Cm4ai@k?mn|R-y-Z3t3{t%Bzl(C;j(RZ zyvUmim(NSUehgXvs$knJJEh5NHa$Y#=CXcX27{!h&PR|hfj_r0XE8YBZB0A=G)m-4 zkUyrg=rH=86Z7^Pn&}^zqs*jVoGDhL?|JguyysBkt)3m6(O2`hE-}B%Gcw}V%;nIb zZQdNebjFq_sYhwy=PL{1ZRj8MZ~jjjj`6_QcQmqNk2^9J!^fXbIsJLhS1Uuqdi;PK z_4UNfO1{))-S=919p|9Wjb3ViC!{e6r5Fi;RlnC_&bubB8NB^WW~cW{U2W4H8N^IR zBi}qPI7Lc2cb2T+qV@TbGLH@?8y)@Ue4F%r5T*YaPiSu3tFxr96p4Q_?sc70o2V)P zTFi_aPCszJK0?hZxyFA>cUo_iOmDfMMA|V<|Ac*msm<=wcA9rQkPGe+Twk(&s;^V) ze}~+n>c$7O5MlYqoN*=i9csrG+DOYe9SXlEHcW4H!;$ze{k=%KMBqHt0?)uqWG;qs zJu4&rF~j94KkoC>Ci;5J((*s&m#Cjf4ulv77jIjrR{S-FO?mX@M2>b4PeTMaIuhb@ z*K)zVV>Ncg_D>jxMOp3cVs{&cW~fe>vLB52)Db%YFVbF=Uo@Y+6J`9`yYBNd7BoJQ zv;98<-Ur->t6B#91rVfDnZ(lnn&tu2$~l73|FHSpt$t@I&SzCvm&tX;O0u?oq&pmP z8{Dorz&oj{4Y|HEl$n*yd!{A(q&mqirC5Wy#Lb@4o!*rGAhSv-b=rtZu$+x-=+!wi zk=*_3@%)bbel|6T_z9P%>sGpVi-!tXSd$;*-7$YzYBR?F;tU9U%wzsDnh0rv;}bTp za+h^$>3=qMd?@ee!Fqk(e(KII8G8|^c0F~cISYgCq#6D}{WsPCv5`F|zOSv?qXxAh zS^n}ROe-pOZM^e)LSOGz`=wT5^LPU*5x^YsOs4y_kHjbCpbiy^*AokQY5fT(LH>fI zbO?0|>P)y!>}P?xO=F*L-VdIk@GISDiSyp$h#>a+XK{QR#9(9RyltrRUo!5@@^Y4O z56oT|P#)cu7Jw;Z^X-73uo~eTSVDaOr4&9oE0jY(Nk}qBnNO+?+X0f=xErCQ&F32+ z^?Qdt4myh1u%VI77m9L7#Q!$Wz-KO7(qM0hDKDC&`RtCuA>~F}sxbnjc0&T)7Z)Bm zher>x`}4^N%I%-heH}Y>*P=N?eDB!u8TvqYWcjtf`2gW=zJBzkOKr4 zE`Q4yk@&EAUJOmlDrc^eUwVik1cv3}2n}z4vTxg%4@ zV=$cE_vUNb8(H~(4CFOKYY=O7o+*INA=_X|PKWsJ~rB=rBGM+X>eJ zV7a*M9W4DzLV%9P=yX7e?;qea6sP;*WGJ3MlKm`k7M&tRRXtE5| zZ!zu7S3@{|r~T9_z>H>Ifb#-(?F`rZz#hWqB2{ei{%h-Fb`dW(k^$^SYv91pt5L0V zb3TBuV1QbRV(S_J#J?~l*r%~C9B#IAyUtpJ+_m8opl~otJkSYH_THHaZEk z0^ToPCN64+>mz3CV7IwQJ*qc5G!x3>a#!X!o|`9}G#+n=`JczB@j9Pp?OEZ;ByzHQ z@b$Z4P>+M^vw^L^!P{ZbAnTae9S5ES0vm5nRwkZ*efcmUPtj?kWu(c~Q4@oqh#Fzb zfL&VMVrK0T?Csr3i` zs(60nfNzmIMLm>ZfDy)YI#_Td6Mx&!q&?nDt-`X9IY^lyFAyHOkv3>S zT2Qf3vA{Ei;jjpo245BE#da%e@8=}8cpc!C!L4}q>ec*p3oj^j==LY*!7RKzi=F7w zcH}d;;aa4)QU<-P@U8B2r{Ff|_tVT1Emw-vIL`zrznz`0E~N7yr2EuQ}XH(EO5InToM z@3MmD1v2FFdo#Mx*>V|EOpb*`rmx?cp*t4yuP!m*HB-E1OQ9JYEn~p=$RV9-P&I|} z+>xvcnQmRTl}QLQsF#m-OM!Rd;-!DFu0aeJy}(Tdm2iZvKzGpI)chu)Zly>)c1WGFEvYWV*d6!APU zZN&EVgeu_~Zbc@G0+clAT>U4dNPCd8ZT>D`7q_B-WQ$YqXz6m=3MC7gNQ&|9cMqYC zLtcJIZyU4}WzNp{5U^@i=63VH-%LDAk9l&E>Yn~Buc8_O(;#{g1a$|7qPeQt3f%d0 zlNyu_B$U!Wt^}|j3b$4ZpYpp~c2@1b0o8P%w*1-k5%$*&bUk$&+FO75Mzob{8gC9k z70o$p5t1YgaeUdvDiJ&K&6K*VUVB@3`G!dFuH$Om ztF6Ermg{m6v>ZxIgI-+jdC=l0iphk0^Q9 z(!y*=AvOgL1Jz~t+-R$jrChaniRTQAN@*y=RhI@Mq;Heh3T17__1^h+2A`Ax1Y_mI zeGQy(z>6R8YPE8ABBY32K~}t2qO=0!Wyqdh3k?iub;yt7`4(=jZWk_3+;JO{VI}7E z3EHJlv=c!Xp)q1ZEo%$QuDt&8P^3G>3aEJd>)yo0R|FmPj|mk?S(YDu1^O?6B~baa zwLW~%)SsgZv&Ye>kFR9zbr=x)Vc5Bj9LRglRw}N~1wxwH1~q+1J2%^Uoe0a~1 zX+C2K*8pW2^;L5-_qrp-pjeqXK0?s9;LUtB-pv`LPj~80UryuhT)p}|8fO5^ydEcY z+MJsFNf4-CyZRV6+B>(Z(_yj4~`4C^yV-_&k!yJo%t zUJp^%zJ9vFRAnHDkQ*m8bS{^L{yPGGahX+h3NlWvxO4nlL-J81_snyHcsq{OU^a{W zaNL>_{|uwYva%=FC9dwIhtkB7O@t97Qp2HWM`<^`WDHFD1u|)ij=0YEsnZsvx*kg)@~7XVNNcUp+>Geiv18SEnH?Xh~8>ppPw?zhO_5zOoWU}m~P zxOqSm%TkOA`eW!;o9zqw9NM@x=T`Zf1|ID}+O1~4Y-=u?48)X0SYbpmbxE!^@?9R=zji4-xAlG$dM} zM{fzU#y2(x*u^MZ8V$vr?SO@Apz}If^{x2 zfgCd4@1({^Wj_l1t+_bIwO(@@Ai$X>w>>oF;`*dxfPl}!i#UkLG3CKJ~_Y(>`)uA+#C&hSkXLS4cvGvSyLL@h^jxwu+dtR$%qI$roCTZkqI0$GeFqYW^sZgrcU zhWIX!*!X?ks&;Q61R9w>(%cTX&3}bQN|@BM5)%t9QKcrk)1T^TF9=;B!CG#MJM-}) zZgoVAg!zPR5&tYNL_oy>02O0>LfM1rt3+j3#A}Jo9sq3y_H>v@i0zTz!@9l!S=t)S z?G+~hYI;Wx|9xJ}L4hyk%4tuLRfm(_3l8psZpgAo;)+$w;GQ1!FQtAy^mCq!CKCDG zdJ5k^{sG{e0AN&_89tnM08ktY@KtTVSAVgKL;%Am(;k)-K=@Xhgl%V=+ZLM(zo&hPD4_|wqVy=u_eZU&w7C*HcpT?z`YL%Nu1RQ)e;{40u; z%#5R;!QDHB4mD1BWRy5phecU(o8u+M1+CD4-&~cinl^8wGsv7{jn2Yw{9IqZM1&6` z9DjO$kMHKJ$jR;)ywYLi-3x|(;W?$WCqnlzwDvpw(b^vq_(@y>WX|Y;&KLsDS*@7v z4!bl9WVwhHYA$55p~V4(q^GcZ6et86(tSFfl3?@iVz0zk4 z=zN%9U&pODX(w!F?NsrsF^m_#_xvkHxA(}@2drlNhY!DtSNw{3``f|<*8OQNLj?Qw z7xq>ANhtiZIcsWm|KzwU^g*1(TcN+I+1pYCwhNK_QjjwrQ%qWHYj}vKVvN6rLa%K9 z2iMQNdnB&u>eIU+!S+I1nx%7+d;ACP{b>u84W|nak3Flx7ELq;V_PO)8~YI~j(wND zescQ9<=^<^slr-2o&@-KJ_0JQ%ZgYtD_H>O=CR&h=@0K~WiaP|QVu>{)Rqpt1Qktr zkAFO<_@Eq#o%?g-gnz{c8be#HP5x8{XY4Dv9lwmbU=Hg?M>NW zo?>!%kD#@9XGWr%0SO)|ZeC}2NsjFI-DA3p z=wnC2*$buL?-^?f^F81Hr_#<*Pv6Z?FGw;1C_46V+jfQzT9j^TC81r zo>pjF?-#GK^DD~Qe8w0rqzp<1ZAY{c7QPFy77!edwINH*uVvw_01uRvS`SC^PkC47*!?yKG|}D zltScpdARO`jrh}Z;z52HiylA4@41h8of>Jr!iDuUY4?IDs=oHpJo~3Ny@%WK>f=n` z;RlVq{qwC)@ZcZUHKGC^ATiwwI zUbb3OFYIsk{RE44bmK!SVP|96YR(Ob^R^ItvQ{AxY2ss)+i)uo7WjB6HT%+g1G`~Y}y(as@0s%M`VynF$xy1K?N5xoD z$Hwp2Uw^)NLtjjn1_YoZF}m!%QMpC!OGG#VRZY+&jjS4JR(G*in=i6}3LKyh02AVW ziBUi><$mE3B^EwKPHCH+f0p3hDgxQ+%(b8j^HQmcUOt6(Gv8sojeisD4?+%UMZ_DC zl#$#|1WFFu$&I`->&?)aIyrW4Ks&$fc%;$u2YCvfjs8+Ck{6yGW6)T39d=!BRf`uk zvVX)_c`uH&aXV1H?6J*7#F>1ROhmlMQ366u9&L~{(NQD9Fbp~e&r)v6xXfuCl-=+S zBa|4^-rP>DV7UmJW}EeIPd8jvc;y{hB+Z!BkU;y9FLfleCr1{2+P&Y{Nq?Wxo!A%; zJK!$B$qX@M{pZbp;y(fOchlZOv}xbXRt@_lxHSFDU~ci5?(uX@4L>-x9oZX|k##ps z7JO~vy|`zh-lN_2JND|&*ZjXpWB4aB8B#7qGj2ZqKhnxD6-XJ&9s2%7fD_BsBQUsx z?SyRfZf>532Knlg)2e`}-%Ebl^1a980(Fjy~ zY;yDo1i??w3mS|-wJFd^m}SWM(Enpi5FqfTWRm?b+h0hW#QwCPeIaA#jDt004ls;S zn#eUmN$Y&|iY6sOnLc!}6(>&;N{Vgyn}nUS`~J3%K~HacZ%#oy;X=o0&7XcE4i9@a zc<;2IsI_R{b$BPTuKg;07+6;%j{Wt9CSw(!AS@9hTTzxAs$qi41W^wezW3^%FVtaJ z93YICFSCF?=fE9m*o&%H4BmcNrx1Z^{hZkkWdl>;wA<0&HJyx_LrDs2kY#--et#V-YZ&b9 zA?0 zL{IjOp^dfZnYblRU!14?rPQ`W`_IDN|9!LzG^EU9j|mHQ zTMU2=uwG6gk^<#;`w&Srw+N&uLSZnN&;_k+VY?~=fziF;X@)(IAQ<25a&wkY0osJ& zfAb0KL*25o?R3%}K`8M7Eyw(Vs93D#s1mMZ=apr(ks7pL>{SxjGd2#N4R#1l8Gy6g zGde8gm)luA8{-#g0C$Lo4t+Y2HLzN<2S*rMqUJ?i@ z?5FDcJ8dF+tx|756-V5Nz$HP|6$}H1MK5~E0jb39Np9qQUXxR*n{z2X3@9{ba$(v# zWdYg(`hp{Y{nPzE4Jh@YQXuHF=Rh|D9u$=u@Dxh;W0? z55h&a9wMpxsrysBx7XKpIdq&ZKCN8Z7s1QxzgoKSXYBju{x8~b-%r2q?f8p`I9Ka* z6GIgtK+?d7W!ENtQ4oxOV%La;IwQKQvo9#QhAiNfH}0O*1Peyx4`LQ~iT@$21Zqv& zJz@f&)(6#3oLk+mot8)q&s41#TIc20#yh51p#n@$fz{<=fk9%+yXa-o7kAky0I`P* zq8fvx>ZH0t@AbE&1zs+5_b;USJ{CzV_rAnvtHS3Q8yO_6$_k&rY9Q^H(5ExYgHZ|O zbo!hMT~$kTV3!|9i|^4USF3#$%m&z`bF6UUqd(I^K(OT7^I^J}Eyy#i>!5pmAb^nD z;uZTwUyX37a>cG3OTQ`Zi!YSQa=Scw=ycMA!x@p39$`2CcUthsCG{lave#d@9_2CfJOn*kOkfHGGs5q%wxt1-ZM0d0D|s>WLZx5 zQ`4-q(C&^=)d2VwX?Oqg+o1h80n3+%?l0x2VuwM8)Ov$lk3d0H2s7J4Q$*^gO}pcM z@FnJzI+2D;2Z}sYjq8RMjEh&i{i@``Nn*|&y|F5t={tWmHE`8KCld#SIlBU)$N_-N zC?L&f;?PNK0WCL|zA0eE#UI5uPn@^BZD<@~Lb6pItOlhfq^A?ne!%j#ozO$N!nMGh zzyWd)I&zL187S2nD$pFMWIv4v{wWS$sb6bGt0CGm7m%F*L}=karuTKOZOV;1g1XmF zJJPD;FPNX&xY@gWe2pj+O6J+J#PvK#a~XBuPC*wm(t@ZIJk#?3u`#{&UjE~ypvizq z&662)^fKW%4*DGTbJ06xEA1xgk@QzolU<(ng6@PU%nVzBj5foo(j6ZmH-X)7X%7m% zyR~jIpf?e@6Wo9QNacBZTG!AS52D9!f4&MHiAX+eOZDBVAZ#R@SkZ5r$u%b!zZS?{ zEwxb9xPx?y-_?t~xh{=e=Fl3hVU@HB;gKju-_s`#n{pEI05R_J94ThR{%jSsu0sJfPYwoKmb2H>ick8jE(nGDLc4eD}{Hsn7lh3a3? z=%puxujY{oJ|Wp4`!TnU=uS?pKa{J9@Pm9z1f~R}=jX59lE{~1<+NZFxOdILj{{12 zaAe&vU)|xJNk-3a3oXBPZqVkR8gaTA*YSnR8jpLu8(#?IZZtH7OnimZ8Rpl-dT>fWA#HD8EcT*!W(Cu4NzN9 zt&(j1i`s&xxo+|aB{Q8U8{w-R&X=Stlb15Am!8*wm;(2IMAC4(S#s9k?zRD4uBuzZ zB@nRsGgHTqm%fyN^UO4xrw5Vq2U3sp&c{bouw;bLh8UkmN7|-WwE+3E8A0geC6;|s z3(tIEbm-s85OA!nrrZR<1rQv2cJrB5@+@{;*5?yOMAV1KgP)CcYqi(23)e(ajFwL%?wWjC z*>Pk&Y1wC2pN83PP#f=$n+h0r-YcyKBFM@!n+m9)JrN`us?_VJjy2tQKSZD5@5xWi z-^J#izdCPYUfvOVGNIX}eGM3Ao{ptLtPhN_7Z0GgCS37Zm zK#u}8zy?#t=q%eT8$R`kBtm^LV3tAtuP|o7e5zj8PmK54Xhn}1Lr8OoRv)RATV^T% zoffhex$ofV3tN+);0IUsL)Y-f3OSmgAu0-yVdWt3#MizC+>-o$Hwi*g=4HCd-Fgeq z-BF;$V@W37C&&kl4aoCV6h=`lWkuC0%c~rk%^_T_ng+^=q|G^BdQl!b#d|#UkTL5!V zsX#Cugv63q6E8k9E=^YDf79*{UF}-M zYmp?iP7)()b`@c5R&>WnSRAD9ly6Z?Lu5+AWmy@EAj^ue4kjFiu6WR!lfKf~@XXr{ zCBH1=Rz(!(julE^7BUgZ0GxwYp(SNH;$3;qR9>6g6Qvgq6geZoI+Mq|GlW&0VtYCO zLf8;*dI*z-ftywWq^Iu~+9RF~PF?P-Kw(tTGl#3uBe7&&TL$LMua)svRVqr7s*mb^ zTA1LI2+M~^!+5vhz4fD@P1A}-#fPpYb2-vxe`QmMN+qSl7Updp%VsfMlx z>4_V%XfVDi8cMJ+KN!lKkpl>bql-T3x>}TsuVQZqA5!GSF+R z(_TK_;Cf1m_iOM2oCR%1Fb%QLhlq^g3T`fYyxG`(!hUQ` z?)~vJmE68=C>!roA-^_EaK?_^U>w>t)RF@b(AhHzyB8$MpDBBt&c33U=TfiD?~ER~ ztXBX1ZOmAmT7v!LOL4{E_a7z)bc&-x(mr>_jieKC zj(u@~PMeVG1}b2T1bAZZB5sWN6YTavpuo&%7Xz)`mZ(8x%@;FpJxP008OS@4(yrR~ zED{u$%%xqV|4?oKvpNt?fQG~E!bIUKH5~6TqRYKOwraaNDx{2S&WLq!xecDQac{2# zg&qrDeb!5PFf8xt`9mE=c7{wNgEU!x)PZ=3aDY@%))#tlS8aq+1_)P*aLe(@C^-kw z2GVe@kr3q|9W|We_YFY;0Gdva#mqh?7J*QEh!0y*Bh+w6g~Jxa5i0e0?^A%ryDsge zlB?&k-Di)i8H@B|7=M8sZ^R|oPjM`U>dUK;TaM3&cMq}qZIEV; zeO@u0?feN??R{MhwH!K5(2{;i=nh8Y6M!v-XfdEYEb<@$LA6KJERZ@JGM+2Qt^~dd z^a(*z&vp-xl9*}!LFx0jRv|K4RL}yhj~d6J2?r6uENQG*Zni7P#yf%}sOMC61usKc zS(Pjdquz-k)6#L5chDa>LftSG$%$bqlOVM6Tnf#}_s((mHFirf|RnaK`jG zaf#TRXngLs>XrPGupAGH7f_wSmK(guv7KV=u_N!E0b^Kk5E7uhbeJY+N+1fWP#rTz zdI1nsEmopos{z$~P5cW)SfCtF5AXs(gEZjTJ?x4N;N(6lQ|>zjJa1ikMU`W<-4dpt zgf90Isn8>!+E3537VUcTD~?nx6RaL<@9SCKyW?DqTx?E`!v@g?hOyLt{WvO)rwL2r20+)j|guMRG~n@b~Bw+EB-}ZAEL1{o5qEubfc6pC9~vz zG1T$7iY*{8g)W3#8|aEsS_;KM>K`pL!73?m6;Sa6kUA~K)6KHXBgm_>HiAa$*%~tQ zo@P{-+`xEJw12s@hcV>w$LRqxZwUhX31s@)4H3oM_U zx`r>Db+w%ouNnr=dh4B#{;`?KXAFMV zN}1BcbTxm!TmLmNzxS1IQJ(<~dw7c?!tgprGea`Ko z`600M3$f2K=sDpBIs4la@SWIFJ9$KK@e)-T$W{t-t=rK)W4#+T0Z!htL1DYI&4soT z40eL+3NfqD{sYEdCZwhx#A&;7FgAd_BmSsVeofyaL%2_5)3ZOzUR;&Xq_{D ze2)?tKEWO*$5w2(4%Gwrsv7r#li1=yC0O z@ZLJRk81?Mc`x^@+ST1OoYVb&YQ(Q+a!lug+Py^#bL-R0L)B~k?)yk$V9^xT@rBIQ ztGHzP9^gdE-8%V_|Gg0a)5verKz^$V5QyS?6CXEvVk6Lx6Ye-N-b zl8j{Qd_Rn;d{}KKHVR~b!lAmq`D)e1WWS@-o0l%tJ|FyURbUEh30k(@bW4yja(Wqa z4HLc_Rqb_uCE5hS75hy6?Rv^|$4&~~YXFVcAxxY1vq(-*(F8j|6GgXejTG}BAhEy6 z|LKQ_D=`wz6C+P@jWoxbE>_QP*>K}MjmDzKWh1OyP_w;J5isu?-Ggt?k| zX?6r6K8{;%mFniZ+EOO=6>i78d{fYHrqzn5^wV#I{RyXwEJ^Ge&PC<%XA;jnUjHC@ z5&7u>c^cCblpNMC_xm)N0aC$NAL_0G0fc&Ha!LaI%`WM1t3zfw!~ME0dlPC0m6bBGvOb8{EFRg_x$uLVL!6d z%M&IZBlHw@Ve=uVRI=lF;rD!eVRW>6iqmF_do%%aR3jn zC4CU~-1n?=lxp5RfN3~s2UHP_4^urKx*&78?}8*Y7dFf4=h)H9CID$|lO)xtGOU4; z<@8~S?|K1%N(6*HeWCqi20PD#P9(u0ywi5R+2}(L%cX#^(|V$8=-0{>47h_mXODcC z9!WqhrVK?uaM|#4>j~`|>FKw@7ruHNckV@?_)dK(&%WBhs$MC6Ni@Gpsp#3Qwx4=K z6LxjJSfTjgU81M7@5?pG&+b9J@(dPYNxn}qL+SnQD8{|hm{pGdu-nnk-D&WFlL|6GCO=TDT&PxWWqNG8=|irpT3C9ZB^bMDd0WJk7L zoc|v05jwwCCJWrl2H(@=M}f{|zGCAIoMrO#ct!$}u-npZf-*!xuV66WRr&tpXmsU; z58!$VM(@4n=KFOa^ubdkc7c%Y0Ak);)MN#p}q&OpP~pWHv9w zHVlY=N*F^Gs%D5;f``BaTlLfaAGZQ2v8ndKF1-&&fB(XT^y7z;-fKj=cULTOBy-es zL>njfJ=JJEx^UQZhXy-CVHm1^jOLrXLbj;uPdE}MGb2y(=1s@AK`p?SELP}s!SCe? z=BGv-$u`fGeC5^A(Ou+rJtydB`xmdq>UX*J|0Y~!_x56R@^hkZMDfIdTQ2vjomol+w6>8x7#o`SUo76P#*j4@`1^R z@9+FRslS^oKbnwyfoP}SM3=lcky-X(-1tL_Wm&EYUF zGg54MC(6it&NXBY{)(^=C#$?fB+u>{gNzme7*|c%CtvC7XmP%{ohJG7H1JRx5&v-39ut?$J3XEA8p1rTd~;=J zdouApY=wA)%gTV-{jTo|)JbvUT_e68q#+usXqVN6IN@2KFXm4MFJ3$Ex@VjxJP`LJYNwv_{A$E*<;X0$gV(Mg!5cQQ#_=04P0Jya#=`T z?pl;v+PGIl1Ib9r2VXcW5lOKJw$_8GsI%br!t#4=Qn{s06(eBkkLt>Y@zFqeD(W-imcI@1fMAAP_>=2mh+K15QLwXku-?}HE+hTjhrHn$(i zSi=^+g2SeB@21u1Umq-DJh7i1WoKpQ5%O>O)J-iI|3ySHIfS8Fq^;|Ly);>5CHwXd(BIdEo3= z@;VbW7np*|NwpzAVX@z`e8+8d#sQO%u65fTZi8~rs|H_Q!CC3F8}93iL*s7}5&G&F z%WsPHYw@=UXUa~FrKylJySP5R;K7MS?F7Z2#L}H|X{`j5#rf1w<#c{t{nm-8pWqfy znEk$TG8l{OsAGN#C`(81{eyvtyYo4spXfBE!?8Fk0q@Zh1!<#Pl6KVq6 zH0WMl-Z9^Z)QQ(aa?=Ms+}ZtNiSf}1=TXDQHe!1a56<{D;nctQhuitE;1|S~ucz&1 zbx-q(H3#$WW8Y*qNf2|I?hU)=6FqeDzZbqKu$Zn?Ip1Yk+#h3&M4sc`Y1|)bM)b!b znY5D0(I*KA>HF8>R0QxDS`j3QZCL;9-%4MdzwEz4Yw`3~A66p4@Pc?qB1HZvrtbm) z*!w^M__#{CaTjF$#nF|fX%3L}Fpj-fUBOzYaI{~5fK@pVzr7&ok|%~rGSb8ih>Z5U>$Hm91su? zRAdl^6ag8ND5xldL8izU5EI5Ekc5QH|FvJRt>^UI`~U8}=ljmn=R8lf;=8A{*Iw&4 zJT02FV%W;-{om}2)!JO+Vb?SSNk90e@C7=a!}vmOb zo_q95skMF}oW{$lZ@U5EY9J(*<_hSFr$xWyn{fSk3buyq&njyBx?$&=dAb`2fKx(u z!0J;Ze-HJUD>}x-zthH^-1J6JmPg|qx^?2XV`J-`kiKcsJ&=|DtBzGvIc+iEGZ*<6Mm0?$ z`F;t*$wen-rG}HlwoNzgy$RM5a z5%m!?g{>1y!?O~+xfWUI@lv=nxV6s>&=2AgrA;5~E#u^#O;YD5M*YbEbv&X;&!w*f0S%;zHE%L0<1_ocyuEzuWA_r4-mcB9^Xw)X ze&*Y^dj7+~>k%Pesy)pAUhqaJzSj70b2WeY%(Xq2M3 z9p-XU8oVDcDsZ5%bCctoP-YJI>l$(x9X)f%@4_8dz9)$9z&Z!-C$HFXI z?qPX}^B&{Z;qgDF+nkftxS5%zY6|@6s=J{z{Hg1Gc*CS5K3!1Ke@9f_IS32U2 z6jvaZ7kC~MVXTekCO7~Q1eZEc-(r{uMXx|(k&6BZ|&I#8sunGe~a z`tUlfqq=x}=WJ%EBEAUGNqDZW_5CfhNWf??NAuWGXqyj{)rt{60!!DMh#{4K@M~n; zSpRCxZ#pXu3XZ4-Gki&p3RFB_J*vl{%8qIvdzA6+%r-s`?N)SAG)z89rAZD4?)`4q zit)V>Qxy9J>8HupDF)Z&w;lUx|8~FaEhVLU;1&(hHaACbN3q>Ql%3l*u)T9s2S<&> z7C@&*=NNI1!oLRm7s0>m=0V+$((?+{s2QW}9HMIXN7_lM{fPb7ACsBR)VYCVj9TW^ z`(=EF>vXO=5h3R%`{HHwe!ia>cF7{?We>CJe{z1#cwu}LfUPbTb#Ioy8@82Tqop37#?r-q!d&0tV+46dnOt_rtILDhmXYu)UTBMSRi&uMR)W<8bQK?+ST4#w{4$!=KYJI89KlEk9;Y*$MyIl`fSsx zPAl8|82AzVyw4JMD&a=)8Y!4GA>YUqpR^MI68?|>6G(LYG}vX-4nmt=2SLpt0OEPu zmwM}AyN~G}t7kQZ@4J)_2~A$$H-yR#Eit@=^;mOD*7ZFuO>D=_hKY;1pOW}C;)54n z;g}6Aq31N?)-wj;yYPI@JS3jgw+fqD;MilFj2WL)7-8W#+HQm0?<=!)MDJ)^s{EFOV8M9?fECBRE95=GV)8h zf~?eK307Lxk^fcO!2kPGc5ee zy3EGO`{#UvXWcK}j6nKt|Kin$fr`H!g&JofD7`EnY}=;)21r#*Ndz?8zO4fZZ%`T@ z1^Hh8G2)-B>PE2i`KoEi>1urXNm(zaa|sxH{Cp6IE}tvWq2y>peD9z3`QIAbJ^hy5 z^vUE7S=0QXh_~{ST^^F|71!JLrO=NbRtTHswV zY5@mbmveFqo8+2iy!q;Ei$8?beB4G|>?VLMQmrx33@i(53`qjbt)*E#DRCu?-;H^pV|5P(K3f73wXN(J?Yk)2lv*3r zFN$`lX}fJ@Qa`qu00h?dIkuoHR9)k<&p6KcDw0mkV-vwAXs5f#N-Vhf0;9Eya6R1bo zUH;!}cxSkVtY~JJFw$@(G1_*~)6la0%&bL)Uq8HE0BVf{F1FtefT{v*ZtB+iX4{C* z#u`++oUGpm>$ViF8-i=|Tn)@C?eEi}C^JU7HgdtRgFN$`T1X}i^(n*+p5~k><5c2}>1&vBx z=wXU|MpFxoK{EpDK7*?#xIFWhPaFZIkHEoEkM#V?JnzWAk?$jX;be)aSz>*<@m1GG zgy$nhCI0q+aK_4Nw-5|tj>%V!ZWuSf3X=_Uj}&VREr-n%RB5qiFZP@~s@c*BtJ zBRy>OtF@5u-$s~$^9si&$xvw?d^I-?6)644hXQGRuGwtOB1rL~hr)eBl4!M^FB$4S zMhR{5bLFoR_p25uwf0^NhyXv4?+f4s)`~h!@F(?1-L}oGlh6V}8Cc1b^eV7C4x)0A6{3{0UAzUXZz^7Md7F1_tGo!^ZJH}syE=n>F~)q4g~ka zJx0mrt&{r~fSe3qjngM`{JWoQ-e&*MX_F+3`=U;~h~>w+_&0y-j%ARqyd@DZNMHnV zDdY0>b>MIKb#FvCT9N_d9=m2Jw*RCnGASPqxw84IG2IKO)XRX}%*L7!lg(TFswv|s zJy55Fm|N1A+zKhMFXB_@7X5Mpr8j-RPIDusmR&@Tvnt4^$4Y;yLF{UClp_^gRv%N# zy3A2|t-~q#w&owcX+M9EcHRp!XmTS-;V?25V2APXe@^5ObbR%L?(AVSMD|ViWO%l2 z;oj-x&fs7CdT1jU9B&LlcBqwr?)iCXHm1%u+r(^9IE}vRupX%^MH~j`oMhelLU8XxVB5K#(Y{;Jbof!mW>rLbrG3ixie)e7Z6rZ$ z$e};B$~)*H%P(BkBe8usje(1R;<2Q+NIS7hHCYA+6bgs`34O+8c4Q-$zeUlrx0Iuj z8CWEf4#25+an1jLCy;J}mOk})yRG$%jlYNQb_$YlJkS>f`c+uBd7SWE?s@&6kK+F7 zTZMi^1~OMWPWXf5kQsr*#Qv?-)gONZb9`F2IN1%EssI-HQcrmJVkp}V7%QO^%KbLwgHjB41rG`x*4BDXB8_!m znf6^{AT!+>n{d!)EBDzD`VXL1Wn+>%HX7t50z`MUtq*b*8^(>jfI4s^G9DFfk|(z` z&pM!pwYYz7Z5~6#llbV(g=)<=E%r;fL$bN-;mt+C1;e1g(R3uQHNa0VIi4)<;q`jB z18ajKs~|jeU_;B1>6JF};@u|9KH6={zjmCLS`oh>cpHm(rx*vSLJ#WBe?<0%Py4s> z>OaOW_5w9x06qOPs1}KBVgh^T7cjotqiA~Y&o+93g)Ar7{JkCk_L>9u*cB>d>Xuyh z_ExdBh2Z9n8b|N`akLa$8Gh7E{4w!Y?-R&gdC!tdTH1+f8{nT zYJNPaLYNjrUzM7C>EC?}(i!g+Mg30#wgi1B(J?-vyRQ2#S2r+!tc&?8DA(8QtkBI@bdKzp=A z75O`AWaE_`dd;^g$^i9SdnD`SOh4mA{WNJCuiV4^5Y*X;zU;*Nm0dwLw$`Ya^Z%HA zaBC4`=8C<9q&jQw&w=8{D{{f*ZB(rjOheA~UT*PAw!+xXyESeG7Edtx$S54eAcx1+2(5_ofh`a zH~M?lYN^hf7ms;s$a`r_K2|h#^VKxfGmcLYuK{Zw2FA?4oIKUtamzhu{RaU=YIADM~eqvVRe)M&T*6EvDHYf;wSa@<3$Qj z5p`Jz&zsmbGl_!z(Uo}&-=qh@0asp->MZbT#HW!VCx*#eiLmJs&hfxC&0Y)1v;+z(bH_;H0C|h#UUh2N5ZzXA1*9x z+E?*%hVFViXQ{nokluxhsCHNdX-}aLHzq&ql==H#&TtWi(69&+TG`}5bYK2Iw?KRt zUl1~XH`Vq}n^JVJZaHV>{xOyQZ$QHq;KMGXOM0=`c*Wj`6Fph~dQj(xUjFnS(ld4c z09cAMl%@DDvjaEF(1YT9VJ3RL`Kf9Ql^s@()~^YAc8WKVwwuDWS7`VGwGRhh{CYOh(!tu1LvY^r>wS%p{|vUB zDw&>8ow;sjOrns)B(_cG8<~r6Wsz5*8Hl*7K0B?4>H<=G5Snv!;_!>bKV=TyJ)`Rs zlpvXC!>pL~A;@jdpU#G1NO^Z_2B9!gacg(qLa(}&kL;-t@wZo5=DKYIhCo88^j+s2V zNcox29e>ye!LVJv2qRM>NKk-&=h+~mmH3`%Jo|MBK5#W;7xdW6K(54Z=T<<=mfpor z4B5J?`Eon)DvRi()VQfcn9b7UvvB3k2rU zTW8W1B{ht#W`giX93ak8T0Q1NMQ66{XxHLfuE^(5f#e@uFDkcX1q(_zCu3#;e`q6PUCrIv0LTfvmEXz!=1ZREskLcXi>%0YpYx+apj z|AN`>`rlJytY6Q`z6H&F@0{p(Gk~`1OuF5^l}_%C;~n@7e4{SiKa*t^n;TExRgCR}3{cQ)S{* zd@DbFb=%YDx>PmltPb3ZgMYcJ?mP{+>XJAI?8=>e=BIye_S5#s z(<~`Vj^o|=hB?}ZapD66=zC2u7RNUu@mND*2+hPSGbZ@x&NPQ@NWFdG-*a)Yq#ya; zH`_1K?~PdGb&h+;?D+WfV_--b_CkCxd!x!^@S#2FEA_dMLkMO9$wdzq{PLlb+%`AU zwFWx#@_?b>66GERMH)y6d_;=1eV43zm~~1^4%%{(a>}>Ac}5=K45jW zuW3q?K}y=jPYc0d!7W?1GEcoSZK|($1^Qz;EM}E5Y#9bZ>K(=)H+<7P&2WU~pu_U% zX7fYWXm*uAafcJ&brLS8ktO`(;PA2KOgtxU_MW7kn#IJ8Bonjb*Vb2aBdX;{oA)uY zfB;if%n0I#=(d<(3-i6+FU1EVylL3)V-yamjpxpZCd9dyiu8}-@5Mi|3wI6^?gmj^ zASR`-zp8)n)&%)_x^pAj`9~j@)h&+m8Ai=VeKLdGXjtItyb7Ir`w!!;!LY+ch&MS5U-V@CU%$o&FvujQ>OG zyN2H(z+DOBn()kA`KS-v(^m+fjGxHz6;4HwygW8thJaE0yYyjzWHE$$u2tFl z!!w`KWD|=&Y^g?Bk@?lN&b}>WF9L+4lCff3F^JzjB`c~3MhY) zw6cEZ*F6ZA*AZtv5**obbGJFoPxsj zc+Ez74&?dwpu%$kRd|xAeJ`flpY4k}0mbjUN5+=0u&T3*t;R^APGBfA(`~O}v{zKw zlMUnp+2LN+eU5(i$wvTz%q!=7fU<%28j$SFC;Kc;3^`tO*g6ZtV{v~aFB2k4LS_v9 zFu*a8(xYX%F#w9y7+mLnK&TkX>6!a`p_|=dx`SIKC!+qnFnxU)Zs+W6ho3LqH?(8MJiglYQNsEkn64^M*SL^%^>Aoka`Ut*(CL!_1%a{5Ql95VjRd4;H=K6{k4s_wy z`2jvi`6(7qAXxy+J_W@ZU1?l10@wt+RPHJr)6JssPx5?vC)qGlHCRoKHR4_L<8i&? zPl5N71_I#V6FU=JR5OShh(bhC)=!6K$V}vHfk_w7Ot(Mn307HO_lm4nU{chh#;i@3 zKl1I6Y7KDUG9%@YPFp2g)1V{ioCbW?QHR$kQe;%$I<&gS{iL1zH;APb4g95Ko8*RT z;9yJG_U0t#6TNDxBgkF=OKa?irn22EB55oC<+`HSPhJli~(iJJw8A?GPI8?0eOca*R0H8SS)Rr z;r(Ir8{i1Nve8mq@v=&4-AV(5rx;4v=M)xZ<2CVTfbS7<=eyX{cy_ae*VL)I%=ZwD zGJ_bOGQ)J)IRGRcTrN~y2RqtJ7ZHYRz_`mXS0&`sD8)94ohg8cERTgWxQs z4Qz22(`}|#Ib;e1;H!Hfjq$M9pn}v;=F+EVn$pzeFn!82x-LQ70+6B+#MTn>oqT1;lv!D|u%5&>vPd*9H_wkm&JUUsB0 zdGIo?Tvl3q{=S)iGjOJxzwWz&BzetS0(XMGEvd)}zFbR;0Pz9ZC!u(5URF2K5&`vY zDX4dA2k%$|a@0Haum#63&FpXH?5v``Iqo|xJ!8xtoUO}=|H9ci@Bs=|Cd!K88Ux7w zpIZJBsSD2o>c^ATkYk+pj`|${EIU6Ww&GU3veF{Gn(eh3l7-)L7ae)>=DVufUJ0O) zI%^$Q?lf<%^O3A^E#4DRE53Q3s0@Q)R33#|vA4rx(@Mt2(~>*29>T%y({;p3E zdp?Mm!La{gRUE$b`|ZU5s60oRO=tpc1q1Tk8tjl4W+KHTRPk}iK8sX+dF7Mly3Y@# zf4F2A3j(RS4Yz*LWdK5+;OpRZWvU%s%ladwe=sY8^w`ymf~jbFb0tVf)HUnvDd5{o zklty=1b1W1&)aCxP{kEXmxEPs%&Sa>Mp2rdIU9>GJL9<^>iB8_!-oXavDNfghP`*& z+)oD3V`UrHChKqd=Khos{qao|)216kfxUz9BHB~E_0X%8qoc`Y+RV*#C^rO@e1OOK z@GPh!4L!6!XO>)d>K_4zvVaJSvh@j4rxMgi%Z$Z?WUzO>rw^)Rq5TDvgRy+eyhI?D z@h%0qcaN=3l-H#;-AbJrlE9v~hcfac`$~OVAGZ81GNLW`j;((ea<6N>!$F&=V7{9w z8^wo?Fk}uKNTYmc8bX}0KhDER*Rpy3lU%5gsMU{6_`Fb?kJoH}p+_RXr(xf!SK-hI zZu4cR+TH9{vfUSzC?kK$ahte^a_QvN_ec$o?r#Y%)#k57udcBp?1O7=0?`0{;r}SX%Ak62&+dOGE5T&uXsHpb$P07?@h}R3${S# zgRv+@J<`uY3@iBGKY2jEIBW6kS@k#c93TfJ6+otrkaegv;LuwL3?KbxL))O-H@Ruw z>{aKL>Qg{^Yiwmnqx)$H6CHF_=v!QJ=#sEZ=kY0;q6`+O*Eo~U@~)PMae%~om#SHf1F zx8`gI zAer%Sln-LUNN!8r`_bHs5+Wo{8y~_&Ehsj1i6~z>l>6~tzhKU~Z?P{Bogo{)A2kgA z^aU&O+rL0x09`A$cYpbUp5*9~+-;5}_WJ$P{B5+3Vz@4~Nd2lG;a{r;E19_fd8gL1cK#NS1kcf#&#%>snc&uCd6@@HUtfzI_Y*kM=y++oWm&JW4bK7g)J*yf2! z>qMRURG6@GptYl4dbjGuD}IqcSF?&d0oe8dpMW_q2N>3Q8Rx;xorhu8X9*RZ(-Vg` zWyAZv#N`kdzt#dol$suSUV(#U%^O<{)$-Xcm&@gd9Yr1|ggDmOE()2Kj6vp*-NW7+ z#P)MxiD+|0ZBVz_Z_NF~z(CWKGbzjDGK@+m}>u$c!xP~hM z-o6GJ_@Cwo)wKOGcGHEos5yqFzg`(sArA>p1urkmEcqzx*{bjLU2~r3z4{#{wbn+9 z1pMKW+_ZQErScJw)ex8F!HJ=ML^&7bC$L4?`xrC`jv z1qb)yV3#2bM9yC0kPe-W=l?*TUvMehM9&g?XQFoo|LiUpa3NzF%S>z^Y9o2?-h6BJ zs#kLLa6=w_ocH;GoK))!kf3}GTk#N}zCPZtgGGC*jURq4;dj9dzJlv%V4DrUK8E!l#IRC(1sQ%N|id_r}<3Sf+L*(MeH9(X6LBd-6&_~t-(-jtGn2xT$G3( zUAtunKUSSs58;aQonG$yM2VL!Op*(7lG(ILJyLeL);LZVp+oaJ5@Ew9Ou#&9c(`j75ceRyM=Iw?TX%ocbXtO_eS7e-3-8U3D z6qW>&`mjSthpol2KOMzz*fjLXAQn3TKdCS7Kd~0ufme5bc)c*>cHCp17IG?XrhWpXuLoG|xSabt9 zrXiyrIB0qMHOt@2qbaiJ=9QW`VL8$^BKwtpf+*hEL^JiAcOpqr@U?$+t%v%^+g13- z?O=y;(!Xxk7BkcyB&Jrt&sUsbie%Wk5u198H)D;sp>Ug{Yt1N*s8uLfa1dtwipq9< zB*>7U)BeLnHW-Br{8I8`<@0fyZ)3T|r;E-5Ia0t5P3ur}qIW9-7QElnWDR6Tj8+m@ zf-*5{)!1qT$LoL`bnr6ah=T{B8Au@5U70rGf5Z7!FMBl14{C`k|FHLYaNc4M;jJ>n=4I zc3|EefBSd7FJ+#$7(PIzoC(HO4y|sLU>r}N6QmXF37Qnxug=~Lyjo_rU&)Vc&^3Ju z+YNp^pIUb^17RI#(<o*u4G=OQ{BmWYE_zN93n|BOY*Hd@-vX%l&BTVQRV& zv90T!+hg)Kiw+l>f;xrU}k>LjXT?WeU%+7^tkbDu9kXAkBA1S2%MEW-}yG#~!| zzY|VeM3ta{(2^g4>q|5P*rsjK+|5_9-RQ~!BZ$5EZcDA&gPYTMicT;KIm!prOR2gB z6pU0{M0o;&4R44Q_5X>VJ&!Cb@e*H-U@^D90EtI;xX@l-~%i#?4()F|VUqCe* zAG@Iz@+H%ehY*^t#aOk#_A6Uc+-53W-W%W`x{LvCQC&s}qB0~AJo*ZQAO~f83bD*n zSO|(Csn5;q3L8x){9467yj9Ikn;u`0Gdy++9~@iVnh^3 zn!F+ZLZ@a3BII?;(FlpA@4($NSIssO39C*-fXcPjL3$*Kbsqj2kvO@*GSGtVHo5pfQmzV>>^Y% z1dFHbaFqdqe__1&StR8HFCEy!Vk_PCiQqZmSG60R%m9zgZ~tk!QqwD-$Pg zOFREV`p`A^3`P2BqOVJuS2_?2!&@{l)ooF~e8pmZHoxZTUaI;lgWa>v$d(qcKyIxl z`%u|AeYf|Rpf~GKj9xpM4D_9;iR5)I*{elidveQ;mCer++IQjyFWMhbLy@D1wAvSa zIg&#JJtD=k+qNqS+M6{Hrh>q?fE@wN_oXg*XSZ1Q?t)ef(lH1In*Qtgqv3uJUW@i| z>|PRF!pTqAzPTiJeIYfgK>AX9XeZnNsn->_=}pb?_lUpTZOZ!WiWY|>pc(AtpDXQu zqrEQgeru@3FPFCsWRdjt3MdWB72F}iAmlgOA)u8YQG3cD?t$qolRYoO2m(* z603nfwP&r>UQMI^0S07KECPLzRPT4R6&a(+HleA0U8;iFx%#%gI5E!etg!(RJy zeNh3ZZE2adxYGVb8$t|`rSIJ%o-QM7?@_pewo;gX*Z`tv!Ja|`34y+|CT3!W^Cg+y zvsvgxT>8ArVxd=l;vNX@$K${=?u6g#^|@yu9ZF6gQe-rc!e7#GCe`r|LAGu6agF;% zVw~X#uy&md`u%^oq_57B57=JG$ZIRK=r}vQX@0|e^j;Q0(R#L975M9Yr)>JH>pPgkJJPDf}8tf3UW&WZoQ;#G(c_;?aD~HFPiY9(-zsA>43P z`o2d<`yOKKAb@o4i z`;ghCm2(l^5)(6B%jZLQ;7S=9Qqel>HgiEk`})(+*1^ywEl7*@0V{>2`zockoo_l< z#-@2rY*AeMsw#dOY?aM!Vdr|~vkrtWT`%3R(b^r#|F=6oNm;%A8ZH{mIK8EHzd*bP z-@?5W_ll9yP1T+RVI&e@`Are-q1IaFfUYWA7Zz-*@hpQ90AH_aTF?zfPyw9O!X(lH z%meGKmB~v%1cv`qPdQFsV`K!==>{k$P|SRmYqyHBl7eJL0F4dAma97G_8Pt5-x&P* zP^Gnc-SP(;OuJ`76n%JrUe*%Za-d<8nSWQ?nHAM7u~r%x)7MQcjMm`crgitd)itKu z=i|wr(=sC#G{k&y0{3m5>348Zf~Lr5X~N|gXyast%-R$-t zY~~%12pT@l%>FL@wy9KJwBFy_U3YecHM$KM`x3P87F}oUzgK8;{%7}P)wGpA^4@>G zmtP?UdK71#qju9}^`>Uf4f=(ncg|oaZ3|W}CH9mmSBGMGHOvIkan6PV z`g|ztfVxM9^lW)o%U{41`a~Fy*r6Q;8*Rk*^m1?Hy;;o9ZN*23!%N05@^)1sK2G13BLJ7uUK5P7-2CwG9jnU}K}l2id1mxP&S z7`h&*VJ%5*(p#T&X5-T8?GTuBti29I#_q|Ep%s}GynKA|Ft?m0flV@JQ0v5O;kxB} zS!Gkaw^z;JgFWqEA`|~TB(c6j(Zji>_=jR&HP<&56n4@qsGOIoyu(Tqq+)9$JDqmJiL52PPLOp+WU0 z!n1U;`B~P1JCLj%bwEhn;dYVLyj-lnGcpkXpRFj}J+1s)Z%+fXTIN{W?~Y7xNOF?8 z^wGEZQ=7lyEV7^mKzZzZv996XTJwm*X*QoFCk3U-O&rv<13i|#2t4nNJ!()M76*(_ z%FnVRzv|whgOlk&c-(hAp67C^cTZwdKTstMweOg-LqwiXs|(HE)&yd9%|&fc^#Qg` zW*8#BS|G7xYq z7f_$J9Jk94e4*U3$`+EpGPsD>rVRX1z>GhP%$&E+riUECwH|q+efOPcVf_?S?>&8F z?^cJcd^3QPoOgD%1e%?#TiDm>J#kyi_ToN^A=41o*&H&6@02>Ea;VgoB8!FZP-b3g zedUdEbtw>Pk5xSaurwFs&3_f~d?`fE?~-x)F4YKgTZITDNPF!t9W)CvmHT@@$m>1k zLmT$%yJDM4`xDIBdo=2mgy}gAQ!4G>d%b`<2~z+!0ODr)>De{~_hrN(R#6oq?DG!~ zV={mt?PGBVAx5XJhJv_Bi`Q#F%D)eALK`Lr>kBuH^o7w?o(?zoy5$}R&78on-3~s# z(;8phG(J6JYbCjLG{}B%qt@p={a@{x@~L!Y?ApxcwH=jH#Z z4kDs`qq>5jw99XsQh9IfW*3w(>8*#*P08dE%2sOLVeWGwpzY=h(FdPvS&8GGEGu@H zfBf|VL(TEcnSBh%cA7JP1gF1sNQV(gTa1WK9Z%=MIF@)I$DN7{=k#Q#6!lo_s&zZ? zqQ5@ljgjY$y{7v7TqlxuDm+pvuCex?J1pn!np_X7_LoQEUda6>SCtw_Rt`tYBg-y> zou|YH$>s8WyG!j*leFCigi3eC13}M^wg5Hjq2drf6>5A5T@c2?aOAZuWii;x9?3t7 zKvV0K2Zg&>ssVq*qbT>}H{s9=(jRip)d2G}j)N@jP%>MYBh!c|YWr-c)vrN~DR0S1 zBoWxU0WF(&CF^vi@Ai-c`^BTL4iNbX*WEhXB-g}jX1XhqtnDoLv6;Nugm}{X_+`iG z^_8cIcYb0wA%gHKxC!sY(u4M9f4ipu-p`-jB4SOpRFKu6a&~>MnLgv|?+0swy*DkN zkv5^55K>g(xR-(w9AE9mFu=J=zFQWxGcs$!;9C|s3UE- z7;r?L`sxxwjGJgCxz}Fx&{wcA$zmd8G0_pq*AXGI{rLVcnz9|IJVT(fq;fZ7c8?_~HAi}+meiWQe7!VU zDl_qs*)USe=?>#U7q(V>aiVC->WU{f-?c2SaB~z)^LHT|^i&xz0;R(?`DH|>-381w z{yF7l*_HLV%wtc@iHm05Wll{jJ7SnEwW_eX+@BlsYa`2ZjQqJ{=yyh@fv0Yk`PFhiYm!vg5mkt7_Qsk)h`lswAG&*I$S&K8RwDdPowyZhKv$>F#O}#V%iuU#nBC zr8O|mXCFmquYMCP5W3RsmB^naaG@p@NOgoK;4oD#8F-O`-k)>zr_1JgUL#VgLji4*I{ zz0}lY)*MG$rc>Q@f$9GARwMEAxlcGWCYW6Nh1N6ELJ!qDynGA6W?(FSrD_A7l)@TJ zM6pD2tCcYGu9R5CN$$aK_LPbr)4MOzyLpEG+pU|r%2=dU7A@p;SjZ2-D(7MN@0kqR z^!g|`OoY&cUT=oPn?M&xRrDTG7GFWp2sDy?R*dEA1%?9AvAmE(_8t%Rqc5Ia|M0=T zNU2OB*F|iTPPn?}iPj9_X2lbc;bF8&0Z$iORy>g|S2sFnlz%hI!%bbQVU(->Zj`a~ z@kgTXE*7yO9GP=;ssp+2^zU|>Tf*X1`nXci;g1%mTNy-oDY@5Xa5{dFXBfO4#SgRS z=oEI&a3{GX!cNdPo5XP-Lu}#31ixTLFlF(~tyGE*PO={t(5(sWyk7o=*njzY`=Mx! zkU*B*)sx>9xBV=!CBN1SQhb$wCJ;hFj5D1nc=$xHZ%q3S(%-eLdtB9x*=y_K7{3Kr z1Vwo)PAYJWq6PAZT!S`k=_z}=Wzt5^t3MTZMitzU$SPqs`go5iblsMDK?SEzV98v? z{Dx@soueez$Y?#2V7`-J>q)X(zMg%1i)*M4I(%=@Gj*|f$jzl@R38w{#^ zx{Aomt~4zu_ul0gn&6=_!d7qQ-OjnIV%fjL0zP(oMDK-vI7xVUQY}uN&3Cuqy1S{_ z8mWe)*k-T?X1yqklT3vQ6j?@Q3lzyx>GBlKFDFk}_jZ+u8#(_z@%?1PKH| zmzj4Hy~hNn_t-pv;kZ-!BS?z!JFs{Wuv=~ZvRk?5qK?R9(``DY*UUdd^t(0gzO#4q zEidPS43T48QLKa{iWS@;7Ditp(pW?q)r}{L;Z@mu_1FEGkFRBgdfQbuW*%DO1@Zi? zRm~SC6q}DQ8V|knC2MK5zoU7QYRyTt6%b$W7OLm?0>KcC&WcNZnS^4C^hvSUT*GCD z$g1^J)nbFh=VF{^MCKN^--yudyV+J|v|Y37n0!ZFuL^>qY$gOn8bd0TPcHZ#ukh0J z=TVzv@Eh-iHq;`HLYc5k;r6xG)zwBhl}((k2q>i5ODFM9d!&2tYJ^9OvIEv!zHR8u zw#;5mJm24Rb2-tTKSUM`(SviC`=6RCeptRfaiL8_<4>M~pr{V@cEZ@51RYPE>YG8) zIjz=Jo;$Ez=@{z64n6iRXc{}`_Hg(VEiJ42tW3CXeJQe|3|<~BBu^k|tr@4)=IcaO zKcWm#=_JloSawBgt+0lzZl)+W3RuRtwyuQ=w4WKuGn4_nHu_g(^$U%klSHs>E4Gk7 zZb^|I*MZ+qKZmWF0#{7DPW2wuAYNTYpqEL94SHxovSx^)A}~lV-@;8<7A*~`GGcd0 z_clt?wfr6&dg!sn7;m5QGCD$$A7K|b8O_5B&nBMs1HR){A<(PaLW{)?%_4eFT{6( z&W`JrgzK{-qvFSCwUhYC^bdyg4^`$-jnlL=40r-DKnjVZI6NN(kD|O!+c+&ko+fs~ zy$jUv*(#2)`Xysn(ui}8pnqwuDtq7W=FPfDVy{w6t z2)p=x#<=bVa1Y`Kel&F?oEz6o>T*18svvOu)r@0jz z)$RO;DU0&iGD}KVMHt)`4=S{n-DmK_X9zZmY`lWw%ouWJ)3rPEr(?9eBQf^*m=ZZ7%}jw}(5tgk%9H(1t#SEZ&(mHi`u z{LzWHCt4)8phV-Mq0e@B%DCvBJk<_ZV4Q3r<<~-AKfUF9>T@b8Aq9wdK*H_2W7d`6 zA=NlF@C_4Mvn6ydVs=Ej*O;WNF<*jiFw)aw4;JdntX*wQbU>#g2-Fm8f(E<^PZXYaQ9t<{8vta9!6um7eaAXN> zT>|l(Ka)CToQlio8l9U6)%%DYfx7$+E)NQ&i|(@;R+u?>WE<>KxTUXke?_ZEDD`$K zq@H3*(`j9eZT$-$typhtgr_{$4|yJVYIz^^cP*OW&y;n|rwy`}bZ0Gvx0ZL~ts=R8 z(Lof{r|RGB;&d(7gDL7w=)0`D$g?;Lt{C_?=X zA1u?;lxha5G)tBdb(tS-+VtFXqjSCKT;8MOMX+Pygf^5!R`4IWNmPI~mk=2>)N;mf zxnAqdfPx^qHDjBYMXadSJw&E7g~o>ACp}t@hOcRWqsIQ&T$Zf{#aV0chK2ifzP|x! zOfc2irSq#NtOe!4ZhKr#T(x^EVukzr^jLX*H}kHb@)o2eaYx9VJA5}{spWjvdvy6D zhMd!-Do&INT^W6WJM_mJJlCcSXfu@e3F6CbRxDN;L1swt2qt(9ml!Kc@*J!gHl3z6 z3u$4dN^?yWMI=d=I%_@0Y;`kLSRji{(L5T5@AFbU zjy6`+7?TvsTIfD9*>IWqJ&s7M?=lkj-y%`J(%Mok(T0RM<;XjWn?og!RV_v4jn?d& zz1Cg@Tcb$hopq|cE10!G9%Bk=o9o4M?=?F*T(?eKo>yj+d2C!`-xLpP>chY>>ndKv zkL$J+UC^nnje7_AuvT&HtzMj_lcE$hCaM~ZQ7RU-VyeqKG3B_w+$a(|6cR`JP8A9Y zwF=9oobEx25~nF^ew09nkQYlD%*mbRbmb+&pkdqc>0Zn(1Cj=gAAvkXRo5G9J|-$} z<}p763~d)AQ8jWZMTSm}HI0$&!eiez?Q7J$l@rv_8jdGpoCN9-6>PVmbwiLyco^eF zUg}D73c1lg_)DDh7)~mjOX>6tk!fq%amAztssn|~RNfXFM66K-k$EgKuldn3fv78E zBv-0Yhe8mJG_u;_sP}6V^EvT)YoGPQ7T-8Xn!K`}YSD6)RuU_Lt5eJnnefnlZr!C-eL%acH}q4LD@5I8_jIV z#-6kyMwF(AA{Ed{o29!FuZg+;lzn5^*C@O-HO8S+$( zIFYX8vXyZJGKx1n{zck&ZkaMB8b6?|epZneQju3o>tj9KqEfy->@-Q$fmgzlE+|JJ zvu32SLmKN$X{;x)QC~3Kep4H3Dl|e~eWoKpdXgX&dXuZDK{B+NO-W=2p)0*2Fi3U` z&o86N^15@?)L$Og8>^BTt7(EH7n%2k>Lk@}~S8Ng>q0 zZY*R=LK?UURW)CTa~S%##Fw0|H?v_=2}L;}t#1o%^aptqSo<6cH+=0Dwxlm;NBy|& z7{?07P`YWvQ!Q)SL?iTC4LmUh@q>%i zFQVC!W^!|@v9&UpA&z#l;q0;Da9}!cF*eataf9%Wu!7uZObmiHpF*-MkIGO&s$IcU z72u`SU1mlfm1P>(u|hL~$jp~|yj6}R;z}%bahYoLHm~g>+SO7{$R{&8%!`%2nr4ke zqAN%;7!oafbf`lq)u`DTp+H5{w2j=lQk%n0!;1G1*`t}mS0KK^`&uL7eZB9oS1{FM z2(A})kO+T#osbegh`n91UDS}wu!D)W%F^Irt8R*8cWd6^RPTC3bo1J8y2Wg7jR>CU z6w%lmiiD|dZ1y_N7}D2#AcTm_Ug!;v!T*D_b0L?y087+)`|6y667gF`o_tXO6Cf_E zsbIP|Dr7hh8SzN~gV(s+Goc>VCPFiaS99QXq0@G(VFe^(>>4YT@J?FQ_KM1*o^K*) zLSJ&iOV-gSjwJu3S>`{8^Y1_IG8PfY-?3PEcI0v_jPXmsNlMA@E0}Ued|#Ds777)bK@Mzp%Akawxv%!X zhnyLTFm*L9Sk})}_nTK-N|YQ*ln5`A-x&vsR3Dg%AiN;FM}t>IaSmni9aQOrA)zxF z3Nj*GCw#vOCGtU*Jn~^V-dsIW7eM(dUA7~nR;Z+iajGrGjCOtHZ{|8-iOM&L%2#5I z<{V={pYHf$FeLRN&(o?1HN4Wf%oD+h)`xcpSQ2Z=&DyQ;+T z6g5*L^R)>~VLmz}^Oheif@_j zJLL&}<%d93CVNPTVraBRsZmfg0*0)}Pd?{>d|q5zL@jG#?QxYjke#!{ z`nFnAt2h=+B{z7H8sgYOE0WDl&6&p(WrSw1`Ox79#Y|k@$V1!P^2?cbad8~4%*QIr zU!pt&TLYl(G@E%4N*~jr?`Ctn8eX!7WnNfwD%`#x8-nm7%CQP-m|=gy&UBC)U<*#B-aTc$_z@^)-eP zO1CymM($*5q#BjE4!QQ5SK_!djR1<0Wa7SLgCu%S9KENC5+6I0mZI4?a4~WCqA`9L z?g;Zl?Z&RMSQH$)Dk6rb;y-ZUK1qb;{13{D5L((o$jg!=*U?inS#QNsDfZu0-Qc6` zCF3O7^d`B{n*$X-J?wW2Q#ee>e)K4^2lny@=5`E*nx&DOQ}XN?hm=^(-F9(z*Da~6 zbFYN9Olw|Gy5*Omn(E~hzrPilIoi(jSPNCa^&u9 zOX2^hj(_~Yde=kGSGD!scr0kl)u~QI9X-#9@+f#L1|~}-OFs0$i#oOi-l2N@UGPjd zzvJtsrS;40k)N||=}_!Q8g&Vl#^Ch)FWL_;ceHtmDo{aXt9ahSl}u%&xUGF^IE1`L z3vA!QeFi&q-sq=nd&qi>)ogdSEQpa#=%1qHu-fRMXM{fql&Ojsr5lAAj-lM!a98?8 zGt(24(<>hD+!XQRJ4I5F$drIRzn*O9e?xz+h1m zAqf;AL9YJSegle~Ip_Jl=lS!@W1aYBzk9E}_S$Q$-)}8`I)vWuFe;_m9!i(fU~md&0YYm6dO11_DnU~tO{LwQAJ%*L_7E@ zh5gmiJf+mdT;RWbJmB*6A7Ke4uQRPn4=r2F8TOA;f9l}CFL?XeJG2$+92}gAmxOlW zy~0ct_?{rNqntbcwgM8&uTss2mL94YgH={`Kg}2fuM}NZSXdqU7zv@V5=v zHo_nGr3721m&1l5M-~U_f)tb{hnVaQK9~}_T<8au9nTPSJJzEj526~q$t;hcFSR>JLrZI`Md*LUhPVR$85`5 zhYR)fNW5_DUg==VD;?x~rk%R;uO=f1f3;7OT`kT(IpyMQ`QiFEN-9YSubzvkdV%+F zDEY64E|=RjemOi|^Pa=04qT>X?b>3a;5`rrQi5F=B+B9%2(cw|MFJo@3#Q?GX?wlnS|N)H@~>S2wx;F z$#l=60^S>P`gp(r*fN>6cSKy+*kl{^Bp;Y16kLhqR5e*Qnj#;81sHrYLzvmprpBWhO=tcZ%P-IVZvM zZq~csuB9OToq2dgXd3;mC0Q&^$8l)$#{=${PyX!MUoWG?T=q2Pz;PCyak8c*3$>8g zaS0r9ad5cx)iAwrn0|Yh-ZV^a)(U#r=L39nJ;e5DsJHxw_GEUvMzMX zwVDe{TiD#|?a?%( zp>7AI zgcW~hS}3sS1TTexl`fbXL+XBg6MNQ>eBUMTy0qD8Y*oT(TDG@`GEEw6Xy=EizuDTX z{2~B}*QKr1{lZ++A71a+4%2KLH(mOlq$XqQ$)O7$5BNS>*8;sC<~9m{-WZZ@QD!*N z6!_lNW9gw8`#6)$r~12E4Bah$42aJl)!A`=@cu5MZI6>>dc!|V01rPmZgiy zT6AK|pmWeF!M1_mx!8c235#4yyVWDTTl&`M;NW~y_S8n9TnjG@jWd&0~nksx_W?Z|VzfNLKQ%dw7YSZhD%pVInX4^F1X%o($ z!^I2q>C*HIPpzj94S@BzaHecZFx@D4t@sbSr#e(^ZwltPfOqTkoy^A5?Nauw(<`H; zV|Z2>Gu-Ai?&L9buRwb4MM=CV{oyzGlBaoP>s8*fMahuuJ`cIInis!_b2Bxb3t?>z z8D}+zXKdupiz~N3wvLo)ijAMeXTN{)d{nMZt0@hw4FZt*F%a$hv2 zP8$1pBGnfa?58mc#+TFCS*N|24gVJt+3*khAtBg)5odd_y4`rxYCQI zjoon70$HE(q_~q7Mri+f%CsvzbKEPD8Kte+%~j;sYPCgc zRh~k}(BN6lv$Mjv{JfStzAIfcM7(dP5)uKPH^~}Uvj+pagdyE^0Wx8zdItR)6|X&f z_4<|408)3Gs51Y^UM-k}WKTWYvs)#cE2>?vW$f)=zjQFco3%{c&`ycjV$EfWc5x0O zKI6ll0%FlhXU;*YfHL3L9(VD8~C!Lihz0|?tfPA2-u?k8-tRBU_G9lZ{ z8?-5fjEmM98MR*6vYT7mNmU5vyt-iucW{emRW$hvbb|J4 z65K>m>TWeDPjSS#*f}BpDULW6uRRe%tSc=z!d8>#YrUMENx!5qWHUHo%Kc)&1N)B9 zgJ;+*ow=x{wXe7=XVObXZSUZ=Ig*46Vd{VzhAVu&AH)$4J)8EqnUW~eFCOG?cd0U5 z$>V!lNdoOv7ijzQl&o)nfS1OPDnmrdxA8labXNlJb%_jb^a;?(*wf zx~N{V84VfdMuhbSRnd~=7oJ=qOGAYVyT|r_R-EfBIAY9M$0H6O!HqxmwUqc0vpu`T zOrIoGe$B~z3Vf4F3@os2md%hrI0yOyjL+tD>FfqqTf@8-O)p;C>`S-*)ti{@X4<&b zM`9u7TlWdgMU1;%-87*-Ox;9IdAX^0h01=L2JV~tTcwe)w|{MrzDLYT}Fwk~#;pZkbR8O-9=i-tN#IwG$oWiKU|dnAgj8{jhTRx%cf7{T|ckI}yov$;od zCu@!vt(}Iw4L-E!=7k>oCrK>_$8zA5H1`=UExX3CQ*z5i*GLjNMu5apj?gIc#B07i z1xJjNod!i!W2J5t(?!UG3A+~KJc?Ugi&s(byqc4t;n{MVK{P>>N0D(?{MHBQg?`mn zEC~fxf&am~n~HH)cHj&R!U8||#=Wi?H4=7DM)eUwT`c3qF7YZH?Rid`=F(4m^jBE>Nv1SlepY16Co zIcPlG6(5np>nSz>;TX|vM3#J3q^42`@t68{e0{HCh(#p}kD( z67wp6ofTfC_1b2V*+xnq8ZNP_8siJow~DyM;^~^}`DZ<*_fRpyM!bv21HW}uuHi~z zAhz`UBwT1?<+h+BY+=hDw&IJ5eF8aSu2$A%fY!m8Uh(`WW2e zsON=Y0)BcSqnOyWOFF%~zrY&hBT8&2Dz=ipb6oUoU>^>meUS8#6b+4H^HoT*?5I{K zgyyUH)#NLDhNsOYe`Xm*((=GT7clMa@D}%g8&p^!68h2}7}NtGZ;&s3dJkx(Xp^!^ zUIPzl30bC{Rb)?IrF)Yrd{%AfABaiaFC+Mf_-1aZTHVZ_%g9QlXfCiuPk3(g_XI13 zo>=B1TQlAx-3t&Dr7-E*-L52=oFyc&Xi*b|VbwjWUi}>eX_7MtDQ? ziR9v93OpPw`=l?UnAE*b*<~ByJ*0A!(G0~oV<_)%cwj(Tn>;3&AiE2HLaxupXK^&& zL4#UYz|Zi7ZJHvxv!83HH&Nk?=;yd2HGbQGWw)m;pJyezCuLP%cPI4)h!iQ-NKxNF z4O(GLakN6g0x`+>8L5G1dE$X5HC{Uxo!IvavwscBV!kmMHPILQVUOtQGEwU@KGgO1 zYl;(aKGnJdid}ttq|ebz(Vmw4;l2RDwuF_mWJN8Ak|4gG57xsAc4VSzdXL+==^EOi zkhis7JGoCTuts8EgIP*)4I#zQz z!F0nwEzEGmnPkb1yr^OA4pk)UAk}+q6sti=+L~Y?f|y`(xs}RfrpPPc#uM~$tNMU_ z&GC26VBN}`UA>dfUnJTN-F>xN;i^sBVclEx2Dnlc&+&%;ylPgN_2PH5`l zWc6@94=5-`d@!}0|KQ~LnAAO3Oa9p4D+%pf{CW=F4Z%F&th^tAl-y>EmDSM`|3W5C_ltgo=h9 zTeSIkd|6ER&Ti!;hLurDI~X3GKqc?9%K;4@W*#2y@4-#gFzWk6C-`Ua_{>lb(LuK^ z&}Y#59)5dBlX`@8#;cA+xdG!Px-?w2WFbN@}z-r}Z zZ(yPGK(;?#o*CLer5Lq`B2?4+-Ecj)vzdK*nAfM_p2}xDBx!CWOQOX6D{=RLxI;ap zBqI+TOczlwtD%h-%1q(Oawa$YCjO*FXFuejjeD*o9w(R^cX`LLf2mS4tZiWP&gdIK zS9_P@n~Hc*aLXn9e(TKZFLiX*JP_TNoZ#DcIjlmE@BaLJyk`JsktVDE@aNoW!=pz?p z(Yu`g#^MOQ!03D$BEfAz_BqQB59ZgVYl*hcQn{tkI3CHCI~f){d*z;EVap!s?uwJ9 zog(|NwXoqUksia+IwtTu*Frt0*QcudJfaJpG$gNRl*q}!v$m>uNj`Wb4aY$J5BTmd ztq~s&DxYfZ(*PNDr4;807JMm0g5*_h#uoO~5i2q~G|#m#S9~xn$(mG3YXF;Ysja0# z?@5eAJ^QP7G!=E5hm*w2p^owKbGR9`61K3mwARr= z&z0YpjP&7w6Awamv@9yCT9Vp8txu7uo6TNquYhhPt#9C}d%!UAuhebng*?1gYd<%^ zvoRrxBUU-ybA6IkY$Q_K9C6>}*}aU}$%V&XyO*J<4k6Vj{JEbSfOXjNN9EDT!HILlfXNm6HS?HbbFO3OhV#q=RGJ zn(Y0u`YQZZNuedGG)_UWO*ZI=@GMMY$jq}0*cgh#33SXB$ZD+ZB6Ei9@ZeQTT!B?( zBZt%~a%lA6SuR7-jg1%~-^lwXQ!tjNsC$6BzVw}3xsuheVe4B?JS^yzsyc&yj>j+C z2(_1@dS^@O8Wg#47^&2&0*hmzLhrMqyeC@bbz?Hwz7b9&X8)p|4w+LIbi_yn<+TZ5 zu?{wb_|b*jvo!5=jUu;;I@L)fkVS6j&RE4vO0_UPq3E@J@hPUPp=sDeh*-%|p1uXH z$s@#HHINGf$I^G$;ed&$KH+JSkXb+ zDhcsx@+gijR1`=0)bjZi40wF~%@4=S=cc~x8l??qz&>8Il7;$bIsas7ANDBCd2f8t zcSlNf3NlBc6D^x;QS5%cD=~$GpYV_t%Hu;PE`fYWtevYwQepbyuGBIFBj&`-qU(}A zWlX-cLD;gAreYWykaEVjFg@^vVbt0!e2OuYHKSv27V2I&4H*YTPjf|wM#**h zp9ReKPXc910iw4@@kSI8Z5vU1BeI9)9pab2&*`G7su~Y3-D7kvmCC6tZSNR*T)W5c z={T#8ZZmLxC3wi32bVx}wWkSPB-$N=kDqCgos|yFrRAD$hRKsCq}SG=kbha*wJ%sy z*g@(WAP!kQry>*tB;Cuo)8?D_p=L;mWVz{kJEw_5CrHFBaCRcWRS#WKUY26rbbRXG zfD}5_mn5r)1L0>7b=cOGHuhl`nRZ9QFK1b7nkDCh?q~~mX|-fq%)55!xQnUw4u(74MlzooW@-@5)wsW_vZwN$Gz;JS-lG^_ zZMag)Piwg-%dsULp)~+{J^}LbJ$W%rI^xTo2sx)2ZOmF`dl{u;M<>mYJDiR`x{v{~ zb;WB5uXPuKQuQ$re{O&B@HoNuBsXoo3m=IBf$Q#6V}|WPJWbO%pA4SR;2)Y#*FqL` zAB0ptxXhZaos9PdKMdrWs-$r?etz7r%;K6XiVglLs+v71uFSvK)>%4g%{$qQ-Et=$ zIH`6IY)PVYky`14*H+=E`+|)ZY~o-;B+G**k4Zr8SmbO~BQZSd!{SoT864YiVcGIs zw>A}y{m?HEoyclPn-{YyA&EA9kCB@7?zmIKX!&b_il&_Znwag{tHaz?a0tSBJM~|NISiJZ#voUa9KWkQS4h!yOHM>zgrz?>TJ7T^qru8v+ zJh5RuZLgg0JY-#CDvB(uw)$z?;|Rz}s5|i~n5X3(IaUrq!#``Otg|K&eEJc~kqJvj zh$C-3n>*96sfn^>m7>(E=sz z1~}gegh4GzGE9)iTMX>%VTOG>(Kz?~(W|h~c^xm>0_tWTw+h^&0*F{F=V6<{2U$K9A0steL*QWq}J2Lnw>osYR;*vrMi|u`u>lY=LtG zkAsZwO@aFBwE39Kz$=9K6on5-b`M)*AWX(fwgvq*zP#`$Njgo0vx*W|!@RX3IvlO2 zVK6i|uV3GS1+8$X*auq*pN8bhb0NwDuik$68OU9P#8{5<#$-E$9RhT)cz(TE&6^gjOH+9TFt{l7GPT|g{;QT9!NvSm>uUrR2Rk) z^72dsB;Ay7xkQ)&PWOqRalYo5D6yr8?oA0XM6tnQ_$Hvhe@Ww@a14SXbXFh`-CLpw z5b5ATz?@l3@@{>x_j@6N#$q^jI~`2$rtW=020YzU5a+{s2%$^M#UmDrEHHkRs1WRP zv18-tr>dfRZ$D^Asg~qJREbWQxR3+EEuDE$xoLeHrN_WG_XgLkTIz{ zaaZoLOebhI$TjFQcBz^#$902zmt?p&@x?Q6A9C*DuB%Ye#?wYdnz6XFzJYkli7Q?~ zGGT$&daJ=5Egt#8;27|=;9J6EyJ~t>jCQD2!*Zuuzuqy8s^n?Cp|~nB8c7?++BEI{g}? zm`K{)DJ`1LITX_-g!`S##7mPBDkSbrZps^SEJ=itUPvQq%{A`qi+b=HLfTD}@dAm& zJpoc96Qp1&cA&!U8$9s^_gZA1ri7>NE{Wjv8Z>ZcsP+6kW8PIpKGFb`eiGF(z!*aA z%6FSRVb)Us_5d6UVmFLN*AYn*EHK7)t-y({3yRKT%R(kvepOLRac57U6emPW7yey9 zk}iCzB3~7_H&Mw-Ofw+bv!qaUJm1Ip|r$UZ5|fV?53 zvif4HE&U@FMf7A#kmvMLzT%T4$Rj}%39~1_tqi9an{r9T)NSc>#P|-W#sq2y*{e5I zgF$hZPJ2s5y15wGG+lphKP1pvQDPUZl|B&)x_-Ynp;xIZ(i8{`7ru?JJ|=Zx2|;j3 zP{?_05K388iMD)Vbsur7gqs9eK`p_LFL-*zE;KYgq{UG3Oo9@cMFR2_KP=HyKdtB2 zPKe=%4${dWlB?IRzZp^z?JMD%i?-8_5L{P@r@>s9uPUZ&jM6QXD{xZ(OoQqsfZ3)3 zMx6iBRRReXn34K9F*%wnj7YRXCKURodqQ$NDmmbwsgyN=9+vs!o(5jE`N7q2rQK3xEd94eYq?l;D}YRDOYLxhr8il0^owMJ8Z9V?U0R^6bc|Ky&Gh*%~4^kQpy2% z006V#UTmZSEub_~h*n$9H`jp3t3h0lNhfO~TqZ(TZvSLUsv!R63Jg_{x{T#2;HB%W zWLlDH{~#R_;l#9HjE8c(to-HH2St$4xC(A6n=%oMH07*t0T3nMkf~@gGng!@7N$q2 z0rOY|1K{ynNrehtBomKP5MW7i;`fnyC_|L&hmfLb4zwD}4iWg6ZD&}M&EgR#~xAy07@a#FAzQ50q{WkNz` zUV$s_>h<3$EqRJW%m!bs?lOQYR)8J`AWS4?up8{zzTkSMrznJVr6xPn?>gu#3}fvw zK=xrDww`$HKoujN!}uA5UAW#}4b@ZVUyH#(h1MjQ;S=dJp?}b&c-wE;f;t%$ARL5T zErx^0l~`Q*hEfxXUxPg2X_G8%4mN!VQcZohEmRE{KIHF%AeS#3sYft|Re_8|Uhoos znUEy(FBeY3pAjip&?>volnWKk`YMM0D!93mZ4A$DNG>Q@6QM$X7Z&xdnb4vjfH-Ps zp?)-gjfLqhLM@09pxrKmSl9wz^y-eLOuz$N+bZ&SV(e_8IluBo#Mj zl5rMCqHAr8-QnSK0)@$%l+#6cw8OaW=XQ{;`#H=CB*T6B9dINO%_Q@}LU%S@53ZP= ze;E*Aoj*HQ?lgS%;7iE$bUWf8PsApJn*sRoH4Cx8Dq&3&z>|adRuwqSC>&*PI9j{? zP^h|flSx&tP2DHRBZ54eHg(^1qzAKNj^2fVBK=wmDUWjioQEvp!9ne6f%vNQ`t(iT zq^#Y=Nk;O)tWfn}*9<_q03S9KCaG{qDm&UdIoW200R-k9fZY;OvV3|Dtfm3_=|h`B z{{VEla2+22;ryEH1|$W0jE!+igdNIa_sZv1ijakZ;`r*m+VzDeej_US9VU@|WOwZ9zjtSO&tlxwtFH+r~LssuPVJ98Eow z3uz?3e&vyNbCFlS@*G0h7gr1Jxs!`#-7v3R?@IJjo`H)9Hnu?s*%1RkdcZ*d!Hx}F z9vnF$q9Z6C5b!c_BLd8o@mgOWGzv&Sh^&X9( z96NHzersd88)zUJY-g%ZQj1k&?RJ+{qFS$@?&oML)DqZlIHX?hEyCM%0j4c4#k^3C zz;T!d3EBm*NE%G#Op=F62wcGqCgI{!PGqA&r!9J*>3SX-JQK`vyD2DqiUS}3S)8fu z&tr}fJdvimh4Nm!IFm<|;FatP(pl%L?GhW4+yYcNlVQp_8*4nU zTpYHO)N504ea6SV(STrD0B?n}BrK$>H1*O5Zh}vbL~_0GDTf<(lKb>h4tSfB=QN#U zJSgattzMV`d7U0xFtO#g#lFODF)b8xcB%I}2T;zux9d3^HJhz1qaFr?Z(}P8`a^21 zj8KOkySMpHvWyy;qdYQu#S1`tu-n*)-2&-+NZ$dgJ(kd%rccA*5tI5{O>`ifr7m$ulA`<-3=#D>>v#`saQHqOihSl0X&bp*=BmuIzE3YQ^ZK*6J0f*pE%C(wxiQZRBfQTK*Ov^2irIGPnzh z>l_?nmgaZn<;QK=zxvOyXKR3h`YHNqS!?xw84k3*ApPlpO%Ju{Ge6dM82?ab^gXLh zeD4m?$vSo=BojFBQ2C8`v8T%0U%o#Sbmq=1qQ~;vjXUnlPfUGypWI#dZI?daqp}y` zvx4w6YT#O~SfJ=J9Z;+o=qg{xd~*SRdExxg6|Z+yPtY{hWVE~$`{&m36{j@!v&Ob} z3%~Cas{Rb+h^bmry=(DXUxb3xdft%Q%AstBvqG<(w@Hzy4@nc#|K{LuhihYZ5V?O1 z;buC834u)%?PhxCmN;adJseTC*5Mz&n*PBhS;r$e3Voi&32 z{|#I8es4>11oxdZZ_dnA@ACIK{C*UOia&F(|J>Q?Wv95F3IyWh-o;NHELP#J@(H3@mi)=TYj?5K~RND4MsIKDI4>{78$WyEMl4x0gP4T>kMF zK~BjlWn9{^eQ6zwQb_TF6_@J=SH^6h8o#Ocb4r{%_tGT>)_|4dnD zE?WF|hk@49&<6mzB@i3HbU?k$KhPs*mOm2P{YU67bG{4wEVp7Hq)4d1j!yg4@}<|A zp1=F8_RC`zt&wCAlzwrM7@|{`FL-M#YsC&y*aacqNh|-r)VLa|BJT7d()NnQxuos9 z6n9FwCcnIf_K9CW7b_V}<%_ow^>1mmJL^I>!&Ew9Dj})4ADH}$foS|Fez{P-->_NY zQ0o2b5Ta_OkiLtk;-R@rAw^reILs*{^9HnT8Qq<4uxoSoFIc z5o#uMSb#r&g62!VwQRFaX`vgCXxC$y#(~uL;E@l}Bi^OUmh@}p6q3u)i`IcZq;7#n zHlatf%ccAE)4H`^iQ0!iBo$^p$PU{?+;YKu)$tHk{cbxnyICh>1EtCZ)j3=^woJUI z0M9?!HR=s3`V*|>0ixh9sjEZCt^T*4?QZyw4X}3?A0D`9-Ovt_{noThBn}sjYoI>_ z<0i-?`8Q3MU`_cT+OLL|KchSbt0`do1D;V{eE3HgM*-t}y04FJ#!B|^Y=K| zMJ3EVH2y{0{y6SpNsz`XyithmrR(1a`dV?a}(^$Yye z_eR!xQ+F=gRCNHl$oTQf1M|!q2JEemB4U$Qu<|i|cGCKSelXU2)lp*o<|~o{Iz2($ zu$wD=XDYSKG9Nj1r2akDhIyxeV`}Y?H2%A+OJvn|=5v~|HujmRf0)wRg3iAvaT-py zrnPCl%G?JRgAhX4(L9l}c6EY?eQPl7JwdwjB>klsIGecg%#!LE<9-EpJnxj>dh(;m zO{7oY{I1lRL0R8^_uE6tna{tiHGtRQQM|4D`P3u*D04C^c=oG~dt)LOTB*0*w0iVp zH}ED`B|p6vm;fEs>EEAG9XY0+*g+pHpSV5FU=2<{O68mRss$o}i7oj14>m)K3>h*5 z*C7@Hb!`99(thGHHr;St*D-%%!TRW5akY1v&M5^%#6;Nz*1Ev2w}Bp9^K=syaEL-W&Mu0 zvX3(!HmC0VGad>8r30r&%07RkM_?-p7$5rsI}pf)HAc19M;w3C>5nBv{aq{wOW~t` z{XTZIFHiq`$Yc;db~LA}#)h8;M!x)cwLd1$ zv|qh<6D_aM{k4T(k|)64q5aN5eCL4u0YP2QMP9Ac+EzSCs9vS=u-P8|A$6X}O%UE+ z>ztxu)hN!;B8scKsE}WX@J9fmJ)eU7v`ZVb&_xOh5=$x5z#;y?1OazBMN>+KY)L|a;xHu> z8ry*Jh4N4c_^`0DzzdLGx|rEeW8H#B13?f#0B@Vv5bbvoxRZre(2jU^-92!Hvv!M5 zdar`Xovb$(!O|d+*CV=kmLs9zZOR+AXl8B07A)AuqEz^vogodJ=_9jh<&{KmDd5Mc z{Tnbg7vc$lEPdY6MwRqu4Y=5pK%NabY^>Av2N6vvDsGLnO>|D0Ndv`10$n-3rnL})RH=~o`d@;9ZFd$idLsv}Tr-iUnyyMaAVb#OhDgfn zdfej_ERDKCs3izFUb02;P;LO${IOLWEEFOe!>yZIeo&qsR;5K+eTD=3c); zT*Q#hM{f_7PP6lae z4bqwnPPb~+qQ&qwQaMo+srC?CjPtZ9KU*$48B~MVIk=K5r7IU%#&!k3wl@lr7t3hE zeuoIazI4(ytGzwqw=}7|Zw4JuNO&*CfgyQ@kqmkY>u0md8T;zD*{Qk>=3$Ps9OtGj ziO|kL7D**Ea~VqT6l!JEFvNTg_B0`RSjiCCHsshOkhB&pepuN_wLb;l01j0W1Y3}q zPjbs@t|%3U_(5g|5t&iG(9Kl4{ouZeAV7->{1924l8KTjSfUES=|Ip%nWxt}=KWF7 zyGsTYiHDpye)GkP%2a)i+|H~;;X70b)Q|miTt}b!4}wgWxjw+?TZ!Yd0U+++R7eaBO>18K{B;pbU#{Q9pPz# z%e(ANRmxgOQpZL@mj8q93)>GSQ-WP_N0^})>o^+AL}K)0z>NA@yr7KWl~kaa5kyFM zLyhxK9?OFBdq5HRw-F2nm+{0u>@Nae*D|iRgeRXI?C*RMHXc z7LaThdy?#_OD6#f7&34eF0wL2UMH_4qnvUPh-V2hz-ucd0zf_%uiUROnx_4-SH-b1 z6#E0gdgeYW#dw9ln_VshL7e4=A=s)Eyv89gGG;F@fI+}$lMv)+1b?82JcWarheXxA zMq)}Mtky_^whBCXM2KZ zO2(|sYe@`Wx*qLpJ}0L7y+6kvf^FwrPMV<8Tv)Vrz&;SUSc$Nwdxr<(%QdwaED9i* z9WO-|82DNQl^R34yd8OV1lkU#@*tTm@{IS?59;s)gd)o$nc8&fRT_ZtKz0NP01gJ9 zdObyK1dJRS30QTdd%)T0#Bl2IeHe?P0P1XX+p3H)TS+pNngx7az=je1Gk8*&GrcD~ zC946hPvHdQ5P(qM)r=m$1dMq@)ElpSR)fg0819gKf8~TH&V#qxbya(Y`k%07dghWb zg^qQIM~Zw-R&FDmRx38^v}ekV3YO8fWFJyEX$rjwqZ9^HF%kk0f!q-hXmPGU4(zO1 zf|n5+^lCm21sm?;3*6D0h|E_@2F@5jWJ_X!YJJL;LUl*Y5ZX*h3%H(YfS3>~f z;lWBiy`AA2L^2`vnH>{%ycd82)J{6BwaXH!ZqAAt3|2A;ST%)2M7&DDbkYUS+kX8d z##T9i(BXkf7eh3_$^c^n@hk>Fb}l>##9|(hLzbKxET4h-F_SS&jq-{(7T6P_`B)R-v3x3F#Eg&JL&-*+(*bj_JvB;O6Nym76Ir2qb2vAw38Q#qEi307p zsY?$J`2uT+Bnz`s748v9nV_qytcLPEV-cVnFsFE_s6V5^_|tMt&14iJ<tXwMT-*;KTdkV(C&i~bjeK~vOpB3{NQVD$PM2rGb0tDcBBZef`9D8FzyffpeI z6nHnv$yX1_fs3uj5~(+AQA-gJmhC|y?eCxY8tY0sMCRrqZFwoUS-G|+qKUBZ@NmC% zmVTFb(bR4DHkFg7MVuE7No8lJ+O-;JuSu$u2#aqB1J6kW#Al#3c>5V3be z5mKnnlUG*o#>ZXSv@R3<3y65gFr4W!d*P}L@g!B|U9%HSB?n$_Ig54|?mq=*2b(J3 z6Y*L#Ur(SA5WR2lpoF_AhfAXn85k>m*fw%zy3nyO>>85bJ`Y~}QR>*h|9X;(m>M5C z4bO8CYtAE99O5l68hfYlA}N@AAoJ$P^*|+bh8^HH0)-wi1VH`IF&~>np89WAa`_=C z|KnQqVX1*bIXuz!*)!9YW{B=K`ns>8r6>g0Qf=gFmHA7flYbMY!Wezbyn5_&Qy@!NiCZ## z7tMVp2d?MnlC8{R=Nq-@p4dHf4o9_(G@j-&=;0~VKFddRUuB$BRw14>2KKKPpUeV$ z-6Zyf0+u#DZ}EUSTA*vBgTtv1d!84??7;&3wr;VVNyyD3B8Hm!)!icvhMNbgNU&q`@=- zxFaAwzn%jB#C_LjsmKbf?_<`&wc`nz+t@x}hMJQ?K1Nn$TlIfL4|KN?=@TBjFyO9L zHJULp^Kk;AqeY<#rEQ6-Ildlx00VOk$Qs%aQUD37qlu-bgoi_S5ct`-va4CTW1X-ikL^qb3A`od=2o zFy?x;^v(Y(X^G*v z9_V)WFcu9}I(;sW2rCB;Gs^FZ__I6zU}Yh8@UV>Y#J;QBt9_6+ex-p{Ex;lWWrGko z7O1SuY@RDhB~aqA>Oie_8pvW>4Aq$=ZCVTWJoXsw3Y=9A;9Thcd9Q^cwKig5Ix45z zmJm^@0sd0~f9})G2w6lmZV+#vI80yw-b4!9BNJmqVUc1AE)k8ff2&wU!s9loa|6%m1mg(_X|7XaPHZ$uW1&xEs;a>VGZs6mzMD_um@b~*=e zK76;`z&JMGZef)f*{Hy46O`0%?xymp_{5~t-vP0!3RsokV{B5MgCI1quFvLeg~$!B ztpG054eIhBw676`NauC@ik5OX0noBStWQkduee}<)Fs5A2^L;`W~kI2N3n9`bl}lK zaVQ?wZ{80ja8aT~vX{h90{;)=%eaGz@~4wh0siab`$;aM@Dze0x+oM2`jW)b(0RnG zp)I-H%S^VD07?W>#M60vuiolXA{N+-pumw4f4?A?H3J-0!C@L=Lg+IDd0IOu<1AJJ z{ekI5m%;izuMeR70#YPf;iP3=j%2O|N>>IHK%>wbe7yYa)zN+_>H?U=YHi8k6qI40 z;O+Sjb#X$z0_s8lafBhUGFix)?e^6iIv#HZ-U(FiLtf%)k*N)N{#ZnUTB513dEw7saXOB_1bj-B6Gx`Vr@u<^uy~| z6SWPkkeYs#3|@>(>w|v2&Kd(%DRx=|IAjG@C>zJ-76z<{dX_LEJOL=uQI-LyPp4l6 z=8M@*JAlU^dxeVaftLY<3n^YwnI^R@40X_latPt65S2F2_NoUl5+%4SmKu8*{kFlx zSq;?9yFl_3l26+TtklI9LjG#0rXI3BG*tYqglmm$XxC%)?cgi|;RBDn7_mDx2$s7F z@+byA0OSWZnVfxnnlOflojD4`o`1r3C31PGX49QN2 z;E7Y!bTXmB2r$V>;qmNosA2(F+DnV8wt$(+PsCy)qVPjc=*%E5y4OMoa0hn61#_+t zG>e{30X6{~+%6X6u@7rc(1=-(7KCgIad1<20~8K!*t$gtB;0chwO6+!nB`#?P_ObV z_I2)4&fnu;5DY3G6C2khr9kdBl8w|goU?buLQ(l4t@O$GIVjDM(dEVMs zmVy~6hcCSBXsT10>(88Ikk(=<9Lq>Y4f@6qCF{8A`Hptu^fOgq|9H!n{o~U=v45$< zUhn$dIPvd~Mv!feqGe=l^v5Z89Mrq8xtiYvszS2s1};qfYGB`7#)Q8e?{S#5mwJEy z!0Xd5qELsNl_A+#?dc)%ovh_w`x|_XtFp(X4%-)DeU9STkE79=229UXH(iYF#1J|A zxa#|ucH@gqRYCuA{+In{%TfZ3_8C5U-;U4k4D4PxfZ~&fF0l$T2*p=zA2#Vx&3<|Iori?iiA*P{5MkX-?YyHEwfBswO^M!%nYY=%rm6~$?)A6j);g-5b5ecFf%}(RfD|YV4=GgDVJB^E|v+~$7 zMA_j#>~&UeeNhhpx|@Am2ZxWKV;)8ia9NIAI@s)%V#WI>mVuQH+t1m_4o@QA?WMg8J!`ySnS{KiCL;kSUGiG2V5a#YtLdU4jV8U#r#N$F9<;u|%~ z!CQy4PZo!sUrq*aNFy(PoOyd0?Cg-XX6fnWXtcv3Yx2W8rZ=BXU3}WI93^-7K5OaY zH-z1}xO5|G8@EJ1aQIqbSDWqn<%>B5zsLoVUhZstW92#UYf?~~v%AZIaEI-Gvy185 zH+JCEp~d(0BYj*V0yu2v*dPA;8{&B6XV;HYZ%`&29xZYvPED*W6TP1E5@+HO@r`-h za<)(Ud*EKZW8K7F+EjTNQYw_$RgSnc&5 z=h1SNy~DKc#TbBOc+7^qBC9xDwo87hJchU>wte}&I{08So6M{vsNbDhPJD52AdRN3 zn28%>X}VFH=gFt|v2>N|-l;(RX>;R7=;vfI-KcRMy$6*$o0oLnA@bo<=1gp1qMBE)gO^8RS*NcPvaL&7Oj|1EB7zYVm# zN+qPlQ7^5)D>l{dx}IaskaO;2jX%q2zS%bR#G1yiXg((zcmvO^hXt!C(|~Uixg4mM z21ZSoRtmk7`Y0;qZnbKjbw8*T<3gdeO&RLed^L#LhQ8YtJ=LBt*Pg;B*-Ao)r$&1h z9F9f89R6{l?XS1bG48@0?HhdpjOc8oE~pHs1>m^B{~(m>&J(@=*qZGw=r}}gUDNFY zgeXA>JNE~VDDUfOo~A~j*&oQiU$i<|Rbz`=bcCvN$;qZ^fT`n)5!6z_- zJ15#Gx1lUIOe`qsM>Z$pxTS1wl%HpTl@Mj1@Ty#Barf{$Uus$wr0EJx`FzZ8y1(XV zUX&`c@VaSeO#=X76z>wH3Lti_&1uXU$myBLkSGNnfhSdCJDjZP_MI3gZi+dzfSR9< zeZ74yy=*Ey#i=!anBQ;Qis$2vU=KJF+aJ65=Q3l%W3TqdRGMVL5P@w*RJn|wSjuG3 zzyizTL2nGQNX~Vk#y-S1|3r{E&z>j}bm%nyw&5H4+&-Gr$pT~xRJa56chC-n%JUA9 z-}-CyfH8t^nWi@XMDs)iEd}A{k>6<6ikWBa-;r*$PrxnIT;aUm7Fp6^ZF9Xn^4qw@ ziVcK}O=u?u)m*yQ4I6>UtA*s?xi{AvViUjB@qArtucM%Y&+F4ep>H3gd5^Z` zkZ%1q-f39NPjPGciE3^~Y;MptY_+HQVTll^bPugJ>DPY6_9*TUx4nHk&-FHcAu;uL zCsW(mkLI}xJF7hA?TL30Ieg5e@wDbUu^>Aoe!_aVHMP^n{Ma|^Ixe0Ay$qH;RvY&; zkn^0O+O{g;&$O(Kb$83wWAD6^^x3Jv|9txyWjyzU>fLYgP|KhwQRzAi9g>{MNU^B= z4a|M$`M|Ru#i?=<{iw+ibWH*a97Y1_^#fGXP2Hm>+I&554N6U$zG7hyRmRuLbv39x zSru|ify;%(B0x-)?+-_=CN!@ia-q(?1aNB^6qG7FbXkL0o?|G-Pf+)TMT#$4Lo0En3&k^%Yzv~-iOoax$cF+YoI#2DLTBui0@>7`K)pRfxDXabOz5M7l>5YEpg)`(T+dT7>CnTg~=ENA|gjG@ZVvNMV=Xb&p~R zv^K*+=?Ha!j0IxBj%o9!357V`a8A@;(#Dy{u5i4;8E11I{qT?PfByqCl%EVG{Ls zn#a%5={kDpQEA>Z5(N(-{0PF{jFlGD2+;5M+U;BX6!ILtYq4@k*R=rf#=E@SjL>~R z^YhXnadj3YW0HFid>5}wS2;QTPZkLF70&{Ve#OxCpG76tdImIIW=5G)H(t5-O{6hIbImyZG@6u zL?OVMSrz5l%Z;mHC_*aAR6$w}aS+7Q75qK+`{C5Q*GFq>mMt8-Ay~M(OVA_eJA}26 zvH)0*rBQRYAKO~zgZiXus|Ji$(KwKO0xbUuYI@{szfE?Tu3SJMq1&)`v6J>vD!CTo~TMkR|?H% z_>CAOfp`E=9C&cy86>zl{Aon#M_Ds_eihspbYg)OQ4|CO0?4=`T8LfIzXk{TOR?$h zY!UHP{F=ef;P>o3JV1QOn0%+CuLbK(^g0tg)cN#Q1rf^UPoJxpWk7xEg98?mBD#&o z2AHBcg#D19K7fIWhv3j&&zD8c9vtml>d%C#rbD5fN7%I<2}be?8J?fI;l4iCMuEC7 zNc-B;%z$jejj*?*(GQyJpq5wiJQ1s2lSIKs;YFw<(7Xw9XBd|O%e!}5?m;_DR60es zH@m?01QMbm@Bk2eALe^$sZ9>**koVz1J#d?|3i1;ai#)(3(@(M8cqDE(Hk{E>*iO` zV7WJRxMf&MTi(0Jjw-H+3V#xfXSELW`zvJ8nj*Ng!{ts5muAj=2&0vT-q5ps-V82Rys39NVG3c&>pE#824NVQO zdKs*}85H%^z(`At%78lpO%!gX+>qoiGhBlKc3(@p27E1($~?WoNEp)*1>2#wF>1SE zCo-Ioh90A7o=MwVyooOX4u-Y_-d*)EEo-eJ$5|`l&HQRRn7xb41nv4T}~#W&l7xSb#Y&wKpbdi(4CVWDKGl)QYX( z$Siy-k5#u?R0yCpa3Dr_&RF*W#EO|hcs6#~+AR$RJRzJhAL?{S%Z~4=&#;p#xQU(I ze0)-nxN>{dP0^}30K&=vjTE83Vpa8s=n_In<)X^XH&5(h53XevxePEM zbPHL3bUfU;@b_b5?`^-tdJN6t&Y(^_P9w# zJd2~@rWDmreI`|j`C^d^LaI@ZJXi4G;a-L2QAsMby{D9dFs7a^+LKZY4Od6|{SFDlR*9L0xXLj2}0tfYe=X<|<{N6S;+dULGt?agR2moF$*xS4F;T1La z`B0|y33iv|A1Up_s{6wmhB}F~v;TU*`@v?FHmTPF7s{6eqtQL2NmM0;v$57VcpU>_ z7rSgR0I$ZHu0cOE@Mtg#4Ofuzmt!|sWpq59#;#|Udj(ygQ2|Ma08Cn2P0PY66V*sx zosOnbvd|Uu&w8wWrN@v^TGV4n4A`c7&in7S==D%AiEq##fcF(YW9Uopvf@+nAND?Z zhQKp&49$vs0_b#ktgX4$kxpBEvz2ytzpCdkv~6i!W|E6czB^80J(uVW(6&z#$wFj? zkY{h2V;^J?!!c<6EOM2gx7w34zztL@Jq~88aw_;09nd)G=#vvrR?Bd8jsotFlx65J z$3T}ZWb(&W(xMGhWE#4?QRjq2fcsDj!!2hs>C(a__cHd_irb2kB+Kpb7OFiC@kWMf z;goRd^hGv{mXtj^zS7P;dA0Iu6}Lpy(s;^m1kk_~;L$zTP6Q)N9;i6#4O>&_ch%|b zpJtnR(9q+8pHDV+9Rk4{io(O82w(o70GgF7onniBLRZfJa1o(+Hw@k1z(n#ts8>TM zb}nz>0d9)IDSetlN7d~ALajr25mFJfsCn}c+q=`*^YwSe-*l>Wup#okHph1+j}DiL zanLIx@p$qpZ$pDTD*qdsA4eSnQYIoPDxaaV3LqR;ij&Mpl#(19EfW~t>v;*FT0VU( zmjitI!Sw*Rx_?K)ez@!budfZ;nb7_Jb$8}rO`d6^PusEQ>#<*3>!^bYP6w415J#g> z2mz;6Xe&w?S&VEmngs=niUcG;m`y9P47(u`9MgibWlRKw5aP5VLP#PaU|1~*0TM(A zB!uf6vRucEnQqenib@h$ zOS$8!mRc15Y6y@tk5RaGG3Lg&xUpK~*Winy7;lbh40!1_W87qX4k>?^Z>?zPL+%o) z{s$XgFk)Om%p&p$Mf)_rvkag0>Y^*rSpl$XL0l3%T5*sOmV)_j2&tZ|&i44&i)o=o za4GVi1LTDA7z5jE0pJFCb+&Lw(#c`yt%EDDkTBO$Y=Z$u4DSdp#J@rJ0X{*Bc}-Ad z-1!Rr++Ay6nz_eROvtYocyOI)8qk;rLVHJ}PENFWP56X)7ST?LamIdbeq%EwL?c-@ zV))$vx94V_kk*#o6FTt%{7PW!yTefwvq4>C0MhJbBPzp*hkP?{*uy*Ah$(M;!0(@5 z7zM^a!7RX^yf8yPneHM5wnrtbQOFo?G};5i7WgF8kKxQ%)Wgc}nDzCf*WN%7EU;i3 znh}l{tf*d`U^RwNA-WAz5W!g!upjE7Jn|eU3koVF4#CZYodY&;L@+Kdy;tdmHlGyu4KrrTB2Se!>kxyM=Au zd_c$+8H|3@c21&#&^yxet$(XNr3gV5cM>NL_R&Ic^!SWJp@uWO?V0HmE25G_2S3#Y zuZSYIvLgVEDTi|*IPW3=0~gHg;5rV-4)D=v?F!rK*Wi~lJM#n)V5bsY7ZzwA_aUL>| zB4x)TnSj}dU}9f$fX;v@eRvH1K!-qnYfer>445s~xyy@wRdv|N8rV%5#SXJzLkC1O zB6#;^hXD<~8|>e}q>DW;L-)quMi7>bm4S%&poK#I@))dvZ_R<&FZd@2EC9U&ga9-S z_*^4~!u?MMyPiUb4A{{vO+z116nqc{0sKaG04ZTb?;URJ#!HPoM+d0ImCGHelG}i&jU#J?XIEZ4|d&Ie6I+V$;su@`LzrX1_aG2=S)CKFvuA z0H5cNVatqS=z*Y)7!&>n4O7v-!~!$~I376>4@VG!uPEj+kQ#9(|Mo#&gqB~6Cn=YZ5mF<5RsoBTPz!E!W1r6y@C zhSN(h6%#-&u)e%r=QSgj+aR2($h3*8f7`T)v&4=e5{I!%-^YJDeHb$x-Lnst;9k8y z%QP;JXIvq}uyKdmA;fk2E0dTnF5MbEkC}ES#`eAsK2VYvdY_F`kKPTvk7_f(*kjM*+KqX~Q|o=gwWaMe}2dJ?Hx9 zZzVW%uq+4lO~F6m&Kx2xFzUj}jjc+E=$?Cf0um6aY9T-GXjLXM8GwN~u5VLu)LYe+ z0yF(EiS_pwFcVy(tUiNtF4XkCH|_kqeHSjkf3`Aor1zhTPQnSD)r*FZ-anq5^72sZ zV9I&iaAO(UXFeI;Ds@T6dT^$UHht{fg!(lLb485<#vY$xMwR2{%A&+JfflvZJEw0z z4#_To%6XU#-6HAc5A1Ee_MGP{OvpHEbjcFClry*TDKYjG5{Z4r*%g-71S0knb}2P}Wdq%j z6Mq7nq)qO}B_^Ff? z(!~Y+1zqdFu0fn$}uUY+QrDTc`cU4~l9no?|%{E-8 zFE^Fr${_ppfo?Ul_YnOq%|D@bxvKy!8q5$ohmoE=G=@h61i}i->^&Y;xsC7~X}OVzhMS=)OQ-k!^H zj{=-CC{Q*`b|87_!A!Dl!*#`SdpxcNS>9u0P`Pgz23NcaoFjVt7(Cvpt9NK4 z?%bEZEbJbKdy3pIf=*yNg|A~zba%edb6g~nOPg^K0h^h{`ZvV-s}oSb=_;i2VEwZ9 zD8C#Rx7-N^w=X|`A|w8>l%>m`qwSOoIm{I}6f7)Q#XYtH!}<+>82;XEy@~V^IA8ZJ zLpk68C6Ti*9z4&_&ZkKd0qmgv#Y0EoPb)iGACi-AB^h&=cSASoKsP~| zI|wZ<`QL67;^y|rG0hw6Q0xiwsyE)(vjE7Z_>hEknSPmGB7S3>;ZDPP@{XfXW{Fb| zHiC^}#VdQ|1o#0|Ak`?ujx2cq;c$HLE_%lij-IeRvlvS*uK<~y8a0|@ z(O6HI0l$eqZ7#=z%Ab003L9`l;~a=?IFpZRFfL|W*7P22*#vGp-NuyyUB~HB4N4KF ziMnYxgq(_u8g+E>*)jAq5$j;;3jY8gyn@{ z^RE4FUU61pPHEcbA6$=bur!%vI;F&d`54NDMHB z%{TlSpkL9O#jGBp(tShlmGX98EZ*`P;i=wOA!ynzts1liL-fw}<{4F2M%?+a^1G_`=l@d=X+({`doxE9 zd<%VNE=un^WUmT&={1V-nl;z8_d(Kln=VV!eYnvRZyhra8sUT;j6ETSkAzXXU8kF< zj018KieFDXUXTUgu?pZ6G+?Q>q~j2bOD$!MEM@Pqg(O#%rQQEw28p|1U0xtC3SLl}IvmCczyI;hG0;PXvs)#K2pJxjO&$(W7Fwy=X5EO7M=1ek^SChErwk z72Q&9oF;J90MpQLS2O6xWao_eFtC{4?{k-$9(^8taG`el_&zoD9`H_}p6>%~ozGSM z6Vw{unf=;wXUuQY#Zkafx_8)=Aje<1rd7$SLLAEOMu-K0P~_2uC5JS~+4hev_1hE2 z4(ZLX$(eXE=OY?|L1JG2p;}8Ki3*2HV1Bw$F;xZ)@fHEksqxBVod?7vAKAmW$_&2F zBoG|R)A8Yp#MO`dBc<>$42Ap_6rH z5M7TBZ&rkVU$pJoEuji%)Y6=ZT=U+!P_b&TJdrQ882R|bb~OYd8>#Ax#g1IjHQfYH z6NuSZOjP0z#xkal3PmM=CJs#3&!77+XO_x5icM~I`|5jW#Zz6yg1HUi}j zBq5EuJyXgxLt5>&j64Kr#S%X(m1srXjeOA$XHJ@WkNf9H`&@Of-hBOa)(C$NtS0;r zn98$hMH)k8ku(YUr2HQjo(&vafg`(Q$ze00t1z~)ux8e8p_MfW>uMpa!>n;D*bCjj zZ@Av>e56hlNIKqy%nJ~kcC8tV9}lT?C7uUX2xQ7bMuPe_2w&t6&!z=1upp=05^$f- zYKU19N_62C?TPp%X<=MP;T)Vah8A3RI71cJ+m9s#X~SAw7Kqa1#kc9Nq5+^?9FOcd z0N(Q_`m1<-T|Am+k=7kDYaunyE)J3LSAV}E>7IfE{eK+Tky-O zNUkFfu<;&a9pZMr=`9-Lza}(aN6w>o4^Y_^$V&6ff7Te5B#uS21fGSg>S!GekNN2k zy#~Y`dCT?y#5oz2ok9(e%S~;BA&BoQsS=uG5Jo{ZsZ^nA>7&NtfF>c=bjDEf=i*N~ zxwK-PLE*N3y8=_4;**0!9UDMf2H0R@!WPKRLMWDRX`q1ZL;*>K`29d$-m@5T11-IW zT&!=>jwSSrd$fvv>nI?TkRciH&?54S4rIoLqA=TX0EABYv>H&*(2=1b+2%Azt+&+G zXjjZRvU-4R&78M9m~B+A#Rx+Ct~J&G)E}`yjsJ#)(Fom5QUlX>Qz-=|2KwvoG|K42 zSuZ=c_PCWIBhuD09+@mw2Rv9)93&ORh5cL1U326h+jbCVIoP{$bf|@c#jgPMZMI z94&z^ElooYOCQzNG63NdUa!0TjNJuhuZn=!ZORBII31lRmtF)qOv5^B(Wr7zn*xwO z*7pdbg47x%Mnql)q98&LiG}ICV8-TRzRZo}g6~+`wY_>D7M4$$huz_<5vc!G8Midz zaRL7_g01{AiIu>#iub52(!Aa?;rw8lCo3Zee_l1HiXc<8uOBP}MMi@_#rVbQM|X;{ zCYRO)TmXa$XHXO)|AEp5n|{bM+A|xgV%csHGP${sbX^!P%XDV~4GxXQDk1B*>BnvU z0YG0}#HxOxf5ICjJlZj;1PIpBPcit9_1A6K_;NjSU=CrrOuuJl#ICo`Z-d+md|K}n zlpopj<2D0gMo(5e?)izW|0Bvgz4%L@O?mplJvZ`Kq#};EmSp0NNG= z^|&_f;A4s1UpMdXFc*}tAVvhLLg~g$Kj^Fms^aXDjx)Q5oq+sex&qOJmXf!o_f^)w ze(v=@a+|*d#2K(SS(*Mpij@NW8H8U8)j!_BKe@%6y(lEDvv5HyHSkbX0^rk9Zp$VK7P4I+v7p!RaT3CfXTRuep zI~4P3vt)i+EoqSdDR)Zp!>7{m?)mx!!@@0F>0I2jezt*W8d?#y`+7m@Wm6Y_xOey3 z-7graqUc6uz#7-TB$n`gqb_<_O(l1WY!^-`BQ%0p2WihAbNV4mj9GbBzhidXMY!@= zetd;zkLfUY6*M0pAN+Gp_S>DbDr-@FpmRU5?7RGb-7FdX%HGvlJ<fa)TFM`98~TMgx~dCx%pp7vjYkIKk2QQhb!VpV>(z2X)SVWc}SMg*El- z#_(6uBB=tU8A+cOR{I-uwYA9eK-=Vwm%17rclLDY-M^kPiwx2^`jP`j$A@Uk%RIRL z`rOPsYh>sAk-jYub=7_x|`qsxWGI5XTIQdSLa8RyT{vA&u8Nr0Q% zI3fsm!mSiWeU!f-A6-Kn3Z&L4u1`DO!Dm%9$3Btk2HXWAMuC&xt`0)(LMFla`MR+= z?sN*_;LY~?3jVZN1s-BqUFP1;uQ7UTgCYpj3m+QV=lfi@&Hs^<5TDE-l?7fURL5@^ zGmK7uP+BNfBu$wm#eZNav{ZUT*5*nB*u>o}j(F{RQPB@<`eule=26+-s_GP{C$ys7 zqQ=7fpELBPNqFFB!u%%-9l^7O_gx`qj^FH`F4F!cFjhx!SxB?0ZMnIkXu`S3+P$3) z1pVRO`B=X?J#(1w()J(nEBW0`s;J2MB6>B8VFaS#aIv6daD5Rxb7diO>Jpr4@Gq;s z+K|uG8H6c^vSpiP_tjoa*Bm*Q0T18HIr%pTXeVMNFIMcKH(IjT4&>QLs=`V z5(+IxJRd%&YU_kUKnuG2- zlr27fU?I$%umLP7Hnb>`?qPF4jYZv}0|f4s8Jsj8N^~0*J{$WG)Sk2elJCs-Q(Zho zkKCex%Cq0Za%ll>a##&xD`P*{hrlw_89Fm*=Ep38SZsC>5V24BD5w=ya*9x_Z3}T} zVN8(CML<@grjoVpWpcV8B`{k-7J6mV`6KP_#S%q2r6KiW*_ zXdY0IwWaIu?gFJRC)G9oBy(|UO3=e@?#uad$&}}1)=`LKdEO}KD7;Uzht??q_?6!%+`R=)tHO&@gu*qqRj1lZ8s|<>50n)`vRNm4 z62!;N(_1c*R`I1w7g0em^ad zrkHdc_tc~n!zceNg*39ZPn+TrMX5+vQk1f=(>WA}*060klp8_zTz3-VL1NE0 z?%P^g)SnYJtzniiWJO)B)Rc4K1D`saD8S1%%gWijt}+NmXILr&2^lh<-6C62gTODz zF6%*TCL{u@TBJ(Zh^#wi%)5oTou%>$c*agjhvdJe{bY^rg}18BUkK-4U2>iMnxT_n zU%w){f4sxyzRLW5v5(k&DTlO&-FkrJqF#T%abCMuk!Yj|KX9(7(zOBn!y6fn~m(2$6_#Cd2! z1Ss?fTW$+NpEQY6$H!ur6+edPvu2eZz<+}O?yY1Vs7)vR@%%sb3>4!z&>i4wxoO>f zC%v1xik?`N z=a%#JiTNx%N63aulUvyAlXWRJ#C1abt)@BVk1$p$}pn4Ahqn|Ly{U>%`sVj`f*6UHLjYXq|KoTZXY z)6-EfUSjWpko~kQQSrIbX$h)FQ3Z@|p@j19e04xS2|&baPIX9jExU zwlXWUpf3EmRsY=_O&=0L1F}J$rShgjJ0X~Ip1o%v*iq=nArJ_7!iLhpn|IIUH$rx7 z&>=LCsVdVqtu`*kRPl++BZ*V7Ld8=?lao4JA)gok84PTDs1|+GM5vbyVNC>L_!NkokuIe?`o$i z9}c8)Bm36hZfm$idSO;%($kkT|p z)FWu`124Q46MWaivIdb%LG0=4V^~7xRNxHD{Pk`S_o{UHJ`7j6{P2Uj3nOV!tnfb+ zY-C#6;IF5DTE1~b@m9<0S$?!`v{v#A;Jqwk3+ zQkkVdZq<>K5nTTL?C^XtCd~xS^cB{$$aSv%?!weg)?>F=JII)7 zo4YffCV)L~;f1>NaT}*pl$L*c! zFom$`yKnrq9ck7D40R;w?$0U%(4LW7Ld&3_Ar@URmDli%Qp~0H|2W=Q21q_6c!%+! ze$}DKX4`=nQgmU|LSXGoJid0!%+KYBW*8i&Xvf|8w5^mr@1n=pBg>c%^T4YWt1PqsV(Cm+@ZN-2nBq-$cQ4u$=^1 zb@pY^(@hj5Z1c!Mnj6i=AZ${SsCy)#s*Ze`On1L33|!z?WvAe;NLW*5efsVE%;VW;kpRKaQ9CcNsJdlFA1)whsGFL-`eq`V%VYD@ zJSwv)p_o_nA!XnF)GnjH3f)5-ry(O8qyvT?_1+J8AQXo zx4WZZ2YE?5(s-KYNYrhvP5h2|Z4b)zvV&&eIS=EA}QZiS!FfW?WG*#NU z(y7@^a;h9HMWT&y^}JZvWcuvW@Q_5iRfP1B^M zr2wf4`o9svg5)0d8IBxB!lqp0=^Q_h7A z#_x3JKX&T527msP0f=!H24!} ziM>D69MRdj{;p!Sra>@j)Z9EaFS4A*ZjL>man6vPhCr3uLTfdN@*)Vlh>xscx>4dp!Y-v!c>&%Y`rKc6 zjLj?xy>7YCe;@egTIsl!N^K}{S{vams46^aD8mz%$@-xoNm#41O{_9=4;-6LYo294 z^+}5iLse?O;)D`>ujrksgGXZlFnk}aP?6ceXS1(S|;w!O%u2kO8DwPDI?lbx}Z`+_~hb!GJrwJ9N zXf2~x44czpV(?;ZNMr?Iat*~QUJ>{R*TI+MvX&4N2tcs$m(2X`o!r1?9haS_9-5@8 z#*Wnl5z&F?8M{w!Gx~SpPe**#U$bHR35^rzyULHxpTC592WvwGjnZCuY%*nqKwGt5 zfX^&DekXKKzS+=CGCX_qY;SXlFxFXgV+4+Z?JeqL6|-QjYMmk#i&}Q})CEdyD2iA6 zEw9Kdm43$c_|Pxdhb5y^(JaxBa zQQ)q2sV<3;OioQY?+w$KP08KSkb07fsdKj`D%d)eud{0-bPmSLh<*n@9a|o9UoQ`C zBOgz6XXiV#QiR`>MOh@1N3EAnR2ripVS_Gk9#Frl_e*X3wb(4*gn{lK}C4`R|pjHv%=MwCw z4~y{fg{UmIvqb*D0|q0f!3U`wBxVg34NeSFtrc*@rmY2KhtS$wRKm}amB$#OgR&-* zj<%K;75a=MR|VB#cKByig1Y{70wz(`Sl6Ew2+n6n3RfT7<(W57{gnaRIdMgrIrxJO zM@kkpTM2g^NsHVsRN|FknX)i}25kzIzV=bu)xS!MbR!jvJT0#ZgoJL>43f){ywC_Z zdw3O3`%c~|67Qt|~#xuN~exbe0-nWJEkV2h?CEAx-Hs?sLI z1YuM%HPT*sKh*0QVRvrtePDxIu-OVKroXt|Ab;Lf@^pharEk)=$!DBcJnc4cq9UuG zRyrKm3dr12>WQ)(bk2CD6dV`;$X&P8r38yZhsh5ieUIiOZzVQn`yq zkB=5hsLfqGVElSf&m6mV@#L~^QDMGRTEfHszmF}Lym6jQ`PmwoYvKmRY$$EdLY literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml index 581b86a54e..7d018b8b79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ + "packaging", "tyro", "transformers>=4.42.3", "datasets>=2.16.0", @@ -184,6 +185,7 @@ colab-ampere-torch220 = [ "flash-attn", ] colab-new = [ + "packaging", "tyro", "transformers>=4.42.3", "datasets>=2.16.0", @@ -198,7 +200,7 @@ colab-no-deps = [ "accelerate>=0.26.1", "trl>=0.7.9", "peft>=0.7.1", - "xformers", + "xformers<0.0.27", "bitsandbytes", "protobuf<4.0.0", ] diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 9eaded31a1..e543287435 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -43,6 +43,7 @@ platform_system = platform_system() import numpy as np import warnings, subprocess, re, inspect, psutil, os, math +from packaging.version import Version # ============================================= # Disable some warnings which can get annoying @@ -126,6 +127,23 @@ import xformers.ops.fmha as xformers xformers_attention = xformers.memory_efficient_attention from xformers import __version__ as xformers_version +# Temporarily disable 0.0.27 and higher - inference issues +if Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + f"Unsloth: Your xformers version of {xformers_version} is too new.\n"\ + 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' + ) +pass + +# Check TRL version +from trl import __version__ as trl_version +if Version(xformers_version) >= Version("0.9.0"): + raise ImportError( + f"Unsloth: Your TRL version of {trl_version} is too new.\n"\ + 'Please downgrade TRL via `pip install --force-reinstall "trl<0.9.0"' + ) +pass + # ============================================= # ============================================= @@ -696,12 +714,14 @@ def patch_linear_scaling( def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! + output = np.array([0,]) try: output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) + output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) + output = np.array([int(x.decode('utf-8'))/1024 for x in output]) except: - raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") - output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) - output = np.array([int(x.decode('utf-8'))/1024 for x in output]) + if not torch.cuda.is_available(): + raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") return output pass PRE_CHECK = check_nvidia() diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index c47e65e87b..bc70b993ae 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -15,15 +15,29 @@ from .llama import * from ._utils import __version__ -from transformers.models.gemma.modeling_gemma import ( - GemmaAttention, - GemmaDecoderLayer, - GemmaModel, - GemmaForCausalLM, - GemmaRotaryEmbedding, - apply_rotary_pos_emb, - repeat_kv, -) +try: + from transformers.models.gemma.modeling_gemma import ( + GemmaAttention, + GemmaDecoderLayer, + GemmaModel, + GemmaForCausalLM, + GemmaRotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, + ) +except: + from packaging.version import Version + transformers_version = Version(transformers_version) + if not transformers_version >= Version("4.38"): + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"\ + f"The minimum required version is 4.38.\n"\ + f'Try `pip install --upgrade "transformers>=4.38"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + pass +pass + from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask_for_sdpa, ) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index fda78534d8..cf1936ded0 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -19,15 +19,29 @@ GemmaFixedLinearScalingRotaryEmbedding, fast_geglu_inference, ) -from transformers.models.gemma2.modeling_gemma2 import ( - Gemma2Attention, - Gemma2DecoderLayer, - Gemma2Model, - Gemma2ForCausalLM, - Gemma2RotaryEmbedding, - apply_rotary_pos_emb, - repeat_kv, -) +try: + from transformers.models.gemma2.modeling_gemma2 import ( + Gemma2Attention, + Gemma2DecoderLayer, + Gemma2Model, + Gemma2ForCausalLM, + Gemma2RotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, + ) +except: + from packaging.version import Version + transformers_version = Version(transformers_version) + if not transformers_version >= Version("4.42"): + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + pass +pass + from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask_for_sdpa, ) @@ -46,7 +60,7 @@ # [TODO] We must randomnly use torch.compile? # I checked the gradients and formulas and I'm sure it's correct. # I'm stumped :( -@torch.compile(fullgraph = True, dynamic = True)#, options = torch_compile_options) +@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): old_dtype = X.dtype X = X.float() @@ -70,7 +84,11 @@ def gemma2_attention(Q, K, V, causal_mask, self, bsz, q_len): K = K.reshape(bsz, n_heads, q_len, head_dim) V = V.reshape(bsz, n_heads, q_len, head_dim) - s = self.config.hidden_size // self.config.num_attention_heads + # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below + # We default to using the config file itself + # s = self.config.hidden_size // self.config.num_attention_heads + s = self.config.query_pre_attn_scalar t = self.config.attn_logit_softcapping Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly @@ -260,7 +278,13 @@ def Gemma2Attention_fast_forward_inference( # Only for Gemma2 self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") - self.scalar = 1.0 / math_sqrt(self.config.hidden_size // self.config.num_attention_heads) + + # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below + # We default to using the config file itself + # s = self.config.hidden_size // self.config.num_attention_heads + self.scalar = 1.0 / math_sqrt(self.config.query_pre_attn_scalar) + # self.scalar = 1.0 / math_sqrt(self.config.hidden_size // self.config.num_attention_heads) self.half_head_dim = head_dim // 2 self. t = self.config.attn_logit_softcapping self.reciprocal_t = 1.0 / self.config.attn_logit_softcapping diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index de7f7bc49b..2d888b8c0e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1276,12 +1276,14 @@ def from_pretrained( f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) import subprocess, re, gc, numpy as np + a = np.array([0,]) try: a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) + a = np.array([int(x.decode('utf-8'))/1024 for x in a]) except: - raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') - a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) - a = np.array([int(x.decode('utf-8'))/1024 for x in a]) + if not torch.cuda.is_available(): + raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') if ((a - PRE_CHECK) >= 1).sum() > 1: raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') for _ in range(3): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 82d177e466..0f170597b1 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -22,16 +22,16 @@ import os # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! -major, minor = transformers_version.split(".")[:2] -major, minor = int(major), int(minor) -SUPPORTS_FOURBIT = (major > 4) or (major == 4 and minor >= 37) -SUPPORTS_GEMMA = (major > 4) or (major == 4 and minor >= 38) -SUPPORTS_GEMMA2 = (major > 4) or (major == 4 and minor >= 42) +from packaging.version import Version +transformers_version = Version(transformers_version) +SUPPORTS_FOURBIT = transformers_version >= Version("4.37") +SUPPORTS_GEMMA = transformers_version >= Version("4.38") +SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: from .gemma2 import FastGemma2Model -del major, minor +pass def _get_model_name(model_name, load_in_4bit = True): @@ -134,7 +134,7 @@ def from_pretrained( elif model_type == "mistral": dispatch_model = FastMistralModel elif model_type == "gemma": if not SUPPORTS_GEMMA: - raise RuntimeError( + raise ImportError( f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"\ f"The minimum required version is 4.38.\n"\ f'Try `pip install --upgrade "transformers>=4.38"`\n'\ @@ -143,10 +143,10 @@ def from_pretrained( dispatch_model = FastGemmaModel elif model_type == "gemma2": if not SUPPORTS_GEMMA2: - raise RuntimeError( + raise ImportError( f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ - f"The minimum required version is 4.43.\n"\ - f'Try `pip install --upgrade "transformers>=4.43"`\n'\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) dispatch_model = FastGemma2Model diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 440a53c1f0..dc0c7da854 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -910,12 +910,14 @@ def add_new_tokens( def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! + output = np.array([0,]) try: output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) + output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) + output = np.array([int(x.decode('utf-8'))/1024 for x in output]) except: - raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") - output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) - output = np.array([int(x.decode('utf-8'))/1024 for x in output]) + if not torch.cuda.is_available(): + raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") return output pass PRE_CHECK = check_nvidia() @@ -972,12 +974,14 @@ def patch_sft_trainer_tokenizer(): " )\n"\ "pass\n"\ "import subprocess, re, gc, numpy as np\n"\ + "a = np.array([0,])\n"\ "try:\n"\ " a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ + " a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a)\n"\ + " a = np.array([int(x.decode('utf-8'))/1024 for x in a])\n"\ "except:\n"\ - " raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!')\n"\ - "a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a)\n"\ - "a = np.array([int(x.decode('utf-8'))/1024 for x in a])\n"\ + " if not torch.cuda.is_available():\n"\ + " raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!')\n"\ "if ((a - PRE_CHECK) >= 1).sum() > 1:\n"\ " raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!')\n"\ "for _ in range(3):\n"\ From e32fc240884435527660bb79a5664a94e27a7576 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 14 Jul 2024 22:41:04 -0700 Subject: [PATCH 0260/1088] Train on responses only (#770) * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Check exec, eval * Update _utils.py * Update _utils.py * Images * Bug fixes * Update pyproject.toml * Bug fixes * Update _utils.py * Update _utils.py * Deprecation fix * Update chat_templates.py * Now permitting use of pre-installed llama.cpp (#763) * Now permitting use of pre-installed llama.cpp * Update save.py --------- Co-authored-by: Giuseppe Strafforello Co-authored-by: Daniel Han * Update save.py * Deprecation & compile * typo * Update chat_templates.py * Update chat_templates.py * train_on_responses_only * Update llama.py * Update llama.py * Update save.py * Update gemma2.py --------- Co-authored-by: pepistrafforello Co-authored-by: Giuseppe Strafforello --- unsloth/chat_templates.py | 166 ++++++++++++++++++++++++++++++----- unsloth/kernels/fast_lora.py | 14 +-- unsloth/kernels/utils.py | 13 ++- unsloth/models/_utils.py | 60 +++++++++---- unsloth/models/gemma2.py | 11 +++ unsloth/save.py | 97 ++++++++++++-------- 6 files changed, 280 insertions(+), 81 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 596548df3f..d31f4c7564 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -21,6 +21,7 @@ "to_sharegpt", "standardize_sharegpt", "apply_chat_template", + "train_on_responses_only", "test_construct_chat_template", ] @@ -1063,7 +1064,6 @@ def construct_chat_template( \ "Below are some instructions that describe some tasks. Write responses that appropriately complete each request.", extra_eos_tokens = None, - ): """ Creates a Ollama modelfile and a HF Jinja template from a custom @@ -1072,6 +1072,9 @@ def construct_chat_template( \ You must use {INPUT}, {OUTPUT} twice, and {SYSTEM} is optional. """ + # Strip only the left + chat_template = chat_template.lstrip() + assert(tokenizer is not None) if extra_eos_tokens is None: extra_eos_tokens = [] @@ -1128,19 +1131,47 @@ def construct_chat_template( \ chat_template = re.sub(r"{OUTPUT}", r"{OUTPUT}" + eos, chat_template) pass - # O(N^2) search finding 2 repeatted pieces of text - j = len(chat_template)-1 - at_least_one = False - while j > 0: - found = chat_template.rfind(chat_template[j:], 0, j) - if found == -1: break - j -= 1 - at_least_one = True - pass - if j > 0: j += 1 - else: raise RuntimeError(error_msg) + # This forces you to provide 2 input and outputs + final_combined_check = False + + try: + # O(N^2) search finding 2 repeatted pieces of text + j = len(chat_template)-1 + at_least_one = False + while j > 0: + found = chat_template.rfind(chat_template[j:], 0, j) + if found == -1: break + j -= 1 + at_least_one = True + pass + if j > 0: j += 1 + else: raise RuntimeError(error_msg) + + if not at_least_one: raise RuntimeError(error_msg) - if not at_least_one: raise RuntimeError(error_msg) + # Must be equivalent to left + final_combined_check = True + except: + # Simple 1 singular input and output + system_count = chat_template.count("{SYSTEM}") + input_count = chat_template.count("{INPUT}") + output_count = chat_template.count("{OUTPUT}") + if system_count > 1: + raise RuntimeError("You must only provide 1 {SYSTEM} in the chat template") + if input_count > 1: + raise RuntimeError("You must only provide 1 {INPUT} in the chat template") + if output_count > 1: + raise RuntimeError("You must only provide 1 {OUTPUT} in the chat template") + + if system_count != 0: + j = next(re.finditer(r"\{SYSTEM\}[\s]{0,}", chat_template)).span(0)[1] + else: + j = 0 + pass + + # Must be equivalent to the original text + final_combined_check = False + pass # Repeatted text instruction_response = chat_template[j:] @@ -1153,6 +1184,8 @@ def construct_chat_template( \ # 2nd Instruction, Output pair right = chat_template[j:] + final_combined_check = left if final_combined_check else chat_template + # Isolate input extra_eos_tokens_regex = "|".join(f"(?:{re.escape(x)})" for x in extra_eos_tokens) if len(extra_eos_tokens_regex) != 0: @@ -1170,13 +1203,14 @@ def construct_chat_template( \ output_part = right[input_end:] # Isolate system - system_part = left[:left.find(input_part)] + where_system = left.find(input_part) + system_part = left[:where_system if where_system != -1 else len(left)] # Check if the user provided a correct prompt combined = system_part + input_part + output_part - if combined != left: - combined_changed = combined.replace('\n', '\\n') - left_changed = left .replace('\n', '\\n') + if combined != final_combined_check: + combined_changed = combined .replace('\n', '\\n') + left_changed = final_combined_check.replace('\n', '\\n') raise RuntimeError( "Unsloth: The prompt template you provided isn't correct. You gave:\n"\ f"{combined_changed}\n\n"\ @@ -1285,6 +1319,15 @@ def process(part, which, content = "message['content']"): jinja_template = "{{ bos_token }}" + jinja_template pass + # Fix missing loop_messages + if "{% set loop_messages = messages %}" not in jinja_template: + jinja_template = jinja_template.replace( + "{% for message in loop_messages %}", + "{% for message in messages %}", + 1, # Only replace the first one + ) + pass + # Check if system part is the same! jinja_template = re.sub( r"\{\% if messages\[0\]\['role'\] \=\= 'system' \%\}\{\{ '(.+?)' \}\}"\ @@ -1300,8 +1343,11 @@ def process(part, which, content = "message['content']"): if not jinja_template.startswith("{{ bos_token }}"): jinja_template = "{{ bos_token }}" + jinja_template pass - - return modelfile, jinja_template + + # Get instruction and output parts for train_on_inputs = False + input_part = input_part [:input_part .find("{INPUT}")] + output_part = output_part[:output_part.find("{OUTPUT}")] + return modelfile, jinja_template, input_part, output_part pass @@ -1327,7 +1373,7 @@ def test_construct_chat_template(): extra_eos_tokens = None - modelfile, jinja_template = construct_chat_template( + modelfile, jinja_template, _, _ = construct_chat_template( tokenizer = tokenizer, chat_template = chat_template, extra_eos_tokens = extra_eos_tokens, @@ -1380,7 +1426,7 @@ def apply_chat_template( \ You must use {INPUT}, {OUTPUT} twice, and {SYSTEM} is optional. """ - modelfile, jinja_template = construct_chat_template( + modelfile, jinja_template, input_part, output_part = construct_chat_template( tokenizer = tokenizer, chat_template = chat_template, default_system_message = default_system_message, @@ -1391,12 +1437,90 @@ def formatting_prompts_func(examples): texts = [tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = False) for convo in convos] return { "text" : texts, } pass + tokenizer.chat_template = jinja_template tokenizer._ollama_modelfile = modelfile + tokenizer._unsloth_input_part = input_part + tokenizer._unsloth_output_part = output_part + return dataset.map(formatting_prompts_func, batched = True,) pass +def train_on_responses_only( + trainer, + instruction_part = None, + response_part = None, +): + """ + Trains only on responses and not on the instruction by masking out + the labels with -100 for the instruction part. + """ + tokenizer = trainer.tokenizer + + if not hasattr(tokenizer, "_unsloth_input_part") or \ + not hasattr(tokenizer, "_unsloth_output_part"): + + if instruction_part is None or response_part is None: + raise ValueError("Unsloth: instruction_part and response_part must be given!") + pass + elif (instruction_part is not None or response_part is not None) and \ + (hasattr(tokenizer, "_unsloth_input_part") or hasattr(tokenizer, "_unsloth_output_part")): + + raise ValueError("Unsloth: Your tokenizer already has instruction and response parts set - do not give custom ones!") + else: + instruction_part = tokenizer._unsloth_input_part + response_part = tokenizer._unsloth_output_part + pass + + instruction_ids = tokenizer(instruction_part, add_special_tokens = False).input_ids + response_ids = tokenizer(response_part, add_special_tokens = False).input_ids + + instruction_length = len(instruction_ids) + response_length = len(response_ids) + max_length = max(instruction_length, response_length) + + def _train_on_responses_only(examples): + input_ids_ = examples["input_ids"] + all_labels = [] + + for input_ids in input_ids_: + + labels = [-100] * len(input_ids) + m = len(input_ids) - max_length + first_response = response_ids[0] + first_instruction = instruction_ids[0] + j = 0 + while j < m: + if input_ids[j] == first_response: + if input_ids[j : j+response_length] == response_ids: + j = j + response_length + start = j + while j < m: + if input_ids[j] == first_instruction and input_ids[j : j+instruction_length] == instruction_ids: + j = j + instruction_length + labels[start : j] = input_ids[start : j] + break + elif j == (m-1): + j = m + labels[start:] = input_ids[start:] + break + pass + j += 1 + pass + pass + pass + j += 1 + pass + all_labels.append(labels) + pass + return { "labels" : all_labels } + pass + trainer.train_dataset = trainer.train_dataset.map(_train_on_responses_only, batched = True) + return trainer +pass + + def create_stopping_criteria(tokenizer, stop_word = "eos_token"): class StoppingCriteriaSub(StoppingCriteria): __slots__ = "stop_token", "single_match", "length", diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 8f7aea585b..8f4101799c 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -19,6 +19,8 @@ get_lora_parameters, get_lora_parameters_bias, matmul_lora, + torch_amp_custom_fwd, + torch_amp_custom_bwd, ) @@ -61,7 +63,7 @@ class LoRA_MLP(torch.autograd.Function): Don't forget to see our blog post for more details! """ @staticmethod - @torch.cuda.amp.custom_fwd + @torch_amp_custom_fwd def forward(ctx, X : torch.Tensor, gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, @@ -87,7 +89,7 @@ def forward(ctx, X : torch.Tensor, @staticmethod - @torch.cuda.amp.custom_bwd + @torch_amp_custom_bwd def backward(ctx, dY : torch.Tensor): gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, \ _backward_function = ctx.custom_saved_tensors @@ -223,7 +225,7 @@ class LoRA_QKV(torch.autograd.Function): dC/dBv = A.T @ X.T @ D(Wv) """ @staticmethod - @torch.cuda.amp.custom_fwd + @torch_amp_custom_fwd def forward(ctx, X : torch.Tensor, QW, QW_quant, QA, QB, QS, KW, KW_quant, KA, KB, KS, @@ -244,7 +246,7 @@ def forward(ctx, X : torch.Tensor, pass @staticmethod - @torch.cuda.amp.custom_bwd + @torch_amp_custom_bwd def backward(ctx, dQ, dK, dV): QW, QW_quant, QS, KW, KW_quant, KS, VW, VW_quant, VS = \ ctx.custom_saved_tensors @@ -352,7 +354,7 @@ class LoRA_W(torch.autograd.Function): dC/dBv = A.T @ X.T @ D(Wv) """ @staticmethod - @torch.cuda.amp.custom_fwd + @torch_amp_custom_fwd def forward(ctx, X : torch.Tensor, W, W_quant, A, B, S): dtype = X.dtype @@ -363,7 +365,7 @@ def forward(ctx, X : torch.Tensor, pass @staticmethod - @torch.cuda.amp.custom_bwd + @torch_amp_custom_bwd def backward(ctx, dY : torch.Tensor): W, W_quant, S = ctx.custom_saved_tensors A, B, X = ctx.saved_tensors diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 935f1d430d..4b789001f5 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -16,6 +16,18 @@ MAX_FUSED_SIZE = 65536 next_power_of_2 = triton.next_power_of_2 +# torch.cuda.amp.custom_fwd is deprecated >= 2.4 +import torch +from packaging.version import Version +if Version(torch.__version__) < Version("2.4.0"): + torch_amp_custom_fwd = torch.cuda.amp.custom_fwd + torch_amp_custom_bwd = torch.cuda.amp.custom_bwd +else: + torch_amp_custom_fwd = torch.amp.custom_fwd(device_type = "cuda") + torch_amp_custom_bwd = torch.amp.custom_bwd(device_type = "cuda") +pass + + def calculate_settings(n): BLOCK_SIZE = next_power_of_2(n) if BLOCK_SIZE > MAX_FUSED_SIZE: @@ -32,7 +44,6 @@ def calculate_settings(n): import bitsandbytes as bnb get_ptr = bnb.functional.get_ptr import ctypes -import torch cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 cdequantize_blockwise_fp16_nf4 = bnb.functional.lib.cdequantize_blockwise_fp16_nf4 cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e543287435..b224e85a89 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -35,6 +35,8 @@ "patch_linear_scaling", "check_nvidia", "create_boolean_mask", + "torch_amp_custom_fwd", + "torch_amp_custom_bwd", ] import torch @@ -92,6 +94,19 @@ pass # ============================================= +# ============================================= +# torch.cuda.amp.custom_fwd is deprecated >= 2.4 +import torch +from packaging.version import Version +if Version(torch.__version__) < Version("2.4.0"): + torch_amp_custom_fwd = torch.cuda.amp.custom_fwd + torch_amp_custom_bwd = torch.cuda.amp.custom_bwd +else: + torch_amp_custom_fwd = torch.amp.custom_fwd(device_type = "cuda") + torch_amp_custom_bwd = torch.amp.custom_bwd(device_type = "cuda") +pass +# ============================================= + # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb @@ -176,11 +191,22 @@ def is_big_gpu(index): "config.cuda.use_fast_math = True", "config.cuda.compile_opt_level = '-O2'", ] +# Torch dynamo arguments +torch_dynamo_arguments = [ + "config.accumulated_cache_size_limit = 512", # Bump up a bit from 256 + "config.suppress_errors = True", # Supress errors for now + "config.do_not_emit_runtime_asserts = True", +] import torch._inductor.config as config for _try_compile_argument in torch_compile_arguments: try: exec(_try_compile_argument) except: pass pass +import torch._dynamo.config as config +for _try_dynamo_argument in torch_dynamo_arguments: + try: exec(_try_dynamo_argument) + except: pass +pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, @@ -358,15 +384,13 @@ def patch_tokenizer(model, tokenizer): pass # ============================================= - -def _get_statistics(statistics = None): +import psutil +def _get_statistics(statistics = None, force_download = True): # We log some basic stats about which environment is being used. # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. # You can disable this by commenting the below out try: - from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled - import psutil n_cpus = psutil.cpu_count(logical = False) keynames = "\n" + "\n".join(os.environ.keys()) @@ -382,21 +406,12 @@ def _get_statistics(statistics = None): else: statistics = "other" if statistics is not None: - disabled = False - if not are_progress_bars_disabled(): - disable_progress_bars() - disabled = True - pass - from transformers import AutoModelForCausalLM stats_model = AutoModelForCausalLM.from_pretrained( f"unslothai/{statistics}", - force_download = True, + force_download = force_download, ) del stats_model - if disabled: - enable_progress_bars() - pass pass except: pass @@ -408,7 +423,14 @@ def get_statistics(): # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. # You can disable this by commenting the below out + from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + disabled = False + if not are_progress_bars_disabled(): + disable_progress_bars() + disabled = True + pass _get_statistics(None) + _get_statistics("repeat", force_download = False) try: vram = torch.cuda.get_device_properties(0).total_memory / 1024 / 1024 / 1024 if vram <= 8 : vram = 8 @@ -423,6 +445,12 @@ def get_statistics(): except: pass pass + try: + devices = torch.cuda.device_count() + _get_statistics(f"{devices if devices <= 8 else 9}") + except: + pass + if disabled: enable_progress_bars() pass @@ -517,7 +545,7 @@ class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): Tiny hit to performance, since we mask the movement via non blocking calls. """ @staticmethod - @torch.cuda.amp.custom_fwd + @torch_amp_custom_fwd def forward(ctx, forward_function, hidden_states, *args): saved_hidden_states = hidden_states.to("cpu", non_blocking = True) with torch.no_grad(): @@ -529,7 +557,7 @@ def forward(ctx, forward_function, hidden_states, *args): pass @staticmethod - @torch.cuda.amp.custom_bwd + @torch_amp_custom_bwd def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index cf1936ded0..9c055ff09e 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -70,6 +70,17 @@ def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): pass +# Flex Attention in torch 2.5 and higher +# try: +# from torch.nn.attention._flex_attention import _flex_attention +# from functools import lru_cache +# @lru_cache +# def create_block_mask_from_score_mod(score_mod, B, H, M, N): +# SPARSE_BLOCK = 128 +# block_mask = _create_block_mask(score_mod, B, H, M, N, device = "cuda:0") +# return block_mask + + # Logit softcapping @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) def gemma2_attention(Q, K, V, causal_mask, self, bsz, q_len): diff --git a/unsloth/save.py b/unsloth/save.py index 293e430606..7075fecc0d 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -840,6 +840,21 @@ def install_llama_cpp_blocking(use_cuda = False): pass +def get_executable(executables): + # Get system locations (System Path).split(system separator) + system_directories = os.environ.get("PATH").split(os.pathsep) + + for directory in system_directories: + for executable in executables: + path = os.path.join(directory, executable) + # Check if the executable exists and is executable + if os.path.exists(path) and os.access(path, os.X_OK): return path + pass + pass + return None +pass + + def save_to_gguf( model_type : str, model_dtype : str, @@ -932,48 +947,56 @@ def save_to_gguf( ) pass - print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") - if _run_installer is not None: - error = _run_installer.wait() + # Determine whether the system already has llama.cpp installed and the scripts are executable + quantize_location = get_executable(["llama-quantize", "quantize"]) + convert_location = get_executable(["convert-hf-to-gguf.py", "convert_hf_to_gguf.py"]) + + if quantize_location is not None and convert_location is not None: + print("Unsloth: llama.cpp found in the system. We shall skip installation.") else: - error = 0 - install_llama_cpp_blocking() - pass + print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") + if _run_installer is not None: + error = _run_installer.wait() + else: + error = 0 + install_llama_cpp_blocking() + pass - # Check if successful. If not install 10th latest release + # Check if successful. If not install 10th latest release - # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize - # and llama.cpp/main changed to llama.cpp/llama-cli - # See https://github.com/ggerganov/llama.cpp/pull/7809 - quantize_location = None - if os.path.exists("llama.cpp/quantize"): - quantize_location = "llama.cpp/quantize" - elif os.path.exists("llama.cpp/llama-quantize"): - quantize_location = "llama.cpp/llama-quantize" - else: - raise RuntimeError( - "Unsloth: The file 'llama.cpp/llama-quantize' or 'llama.cpp/quantize' does not exist.\n"\ - "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" - ) - pass + # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize + # and llama.cpp/main changed to llama.cpp/llama-cli + # See https://github.com/ggerganov/llama.cpp/pull/7809 + quantize_location = None + if os.path.exists("llama.cpp/quantize"): + quantize_location = "llama.cpp/quantize" + elif os.path.exists("llama.cpp/llama-quantize"): + quantize_location = "llama.cpp/llama-quantize" + else: + raise RuntimeError( + "Unsloth: The file 'llama.cpp/llama-quantize' or 'llama.cpp/quantize' does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + ) + pass - # See https://github.com/unslothai/unsloth/pull/730 - # Filenames changed again! - convert_location = None - if os.path.exists("llama.cpp/convert-hf-to-gguf.py"): - convert_location = "llama.cpp/convert-hf-to-gguf.py" - elif os.path.exists("llama.cpp/convert_hf_to_gguf.py"): - convert_location = "llama.cpp/convert_hf_to_gguf.py" - else: - raise RuntimeError( - "Unsloth: The file 'llama.cpp/convert-hf-to-gguf.py' or 'llama.cpp/convert_hf_to_gguf.py' does not exist.\n"\ - "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" - ) - pass + # See https://github.com/unslothai/unsloth/pull/730 + # Filenames changed again! + convert_location = None + if os.path.exists("llama.cpp/convert-hf-to-gguf.py"): + convert_location = "llama.cpp/convert-hf-to-gguf.py" + elif os.path.exists("llama.cpp/convert_hf_to_gguf.py"): + convert_location = "llama.cpp/convert_hf_to_gguf.py" + else: + raise RuntimeError( + "Unsloth: The file 'llama.cpp/convert-hf-to-gguf.py' or 'llama.cpp/convert_hf_to_gguf.py' does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + ) + pass - if error != 0 or quantize_location is None or convert_location is None: - print(f"Unsloth: llama.cpp error code = {error}.") - install_llama_cpp_old(-10) + if error != 0 or quantize_location is None or convert_location is None: + print(f"Unsloth: llama.cpp error code = {error}.") + install_llama_cpp_old(-10) + pass pass # Determine maximum first_conversion state From 0f2e484f3931d1a558dc3a5967c8da665a2e7252 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 15 Jul 2024 14:36:44 -0700 Subject: [PATCH 0261/1088] Chat templates --- unsloth/chat_templates.py | 134 ++++++++++++++++++++------------------ unsloth/save.py | 2 +- 2 files changed, 71 insertions(+), 65 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index d31f4c7564..5bd66bae09 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1125,11 +1125,6 @@ def construct_chat_template( \ for eos in extra_eos_tokens: count_eos += len(re.findall(r"{OUTPUT}" + re.escape(eos), chat_template)) pass - if count_eos == 0: - logger.warning("Unsloth: We automatically added an EOS token to stop endless generations.") - eos = extra_eos_tokens[0] - chat_template = re.sub(r"{OUTPUT}", r"{OUTPUT}" + eos, chat_template) - pass # This forces you to provide 2 input and outputs final_combined_check = False @@ -1151,72 +1146,83 @@ def construct_chat_template( \ # Must be equivalent to left final_combined_check = True - except: - # Simple 1 singular input and output - system_count = chat_template.count("{SYSTEM}") - input_count = chat_template.count("{INPUT}") - output_count = chat_template.count("{OUTPUT}") - if system_count > 1: - raise RuntimeError("You must only provide 1 {SYSTEM} in the chat template") - if input_count > 1: - raise RuntimeError("You must only provide 1 {INPUT} in the chat template") - if output_count > 1: - raise RuntimeError("You must only provide 1 {OUTPUT} in the chat template") - - if system_count != 0: - j = next(re.finditer(r"\{SYSTEM\}[\s]{0,}", chat_template)).span(0)[1] + + # Repeatted text + instruction_response = chat_template[j:] + if instruction_response.count("{INPUT}") != 1 or instruction_response.count("{OUTPUT}") != 1: + raise RuntimeError(error_msg) + pass + + # 1st System, Instruction, Output pair + left = chat_template[:j] + # 2nd Instruction, Output pair + right = chat_template[j:] + + final_combined_check = left if final_combined_check else chat_template + + # Isolate input + extra_eos_tokens_regex = "|".join(f"(?:{re.escape(x)})" for x in extra_eos_tokens) + if len(extra_eos_tokens_regex) != 0: + find_end = f"(?:{extra_eos_tokens_regex})?" else: - j = 0 + find_end = "" + find_end = r"\{INPUT\}[\s\n]{0,}" + find_end + input_end = list(re.finditer(find_end, right)) + assert(len(input_end) == 1) + input_end = input_end[0] + input_end = input_end.span(0)[1] + input_part = right[:input_end] + + # Isolate output + output_part = right[input_end:] + + # Isolate system + where_system = left.find(input_part) + system_part = left[:where_system if where_system != -1 else len(left)] + + # Check if the user provided a correct prompt + combined = system_part + input_part + output_part + if combined != final_combined_check: + combined_changed = combined .replace('\n', '\\n') + left_changed = final_combined_check.replace('\n', '\\n') + raise RuntimeError( + "Unsloth: The prompt template you provided isn't correct. You gave:\n"\ + f"{combined_changed}\n\n"\ + "But we require the following:\n"\ + f"{left_changed}" + ) pass + except: + ending = chat_template[chat_template.find("{OUTPUT}") + len("{OUTPUT}"):] - # Must be equivalent to the original text - final_combined_check = False - pass + ending = re.escape(ending) + find_text = "{INPUT}" + ending + "(.+?{OUTPUT}" + ending + ")" + response_part = re.findall(find_text, chat_template, flags = re.DOTALL | re.MULTILINE) + response_part = response_part[0] - # Repeatted text - instruction_response = chat_template[j:] - if instruction_response.count("{INPUT}") != 1 or instruction_response.count("{OUTPUT}") != 1: - raise RuntimeError(error_msg) - pass + for j in range(1, len(response_part)): + try_find = re.escape(response_part[:j]) + try: found = next(re.finditer("(" + try_find + ").+?\{INPUT\}", chat_template, flags = re.DOTALL | re.MULTILINE)) + except: break + pass + separator = found.group(1) - # 1st System, Instruction, Output pair - left = chat_template[:j] - # 2nd Instruction, Output pair - right = chat_template[j:] + response_start = chat_template.find(response_part) + start_instruction = chat_template[:response_start].rfind(separator) + if start_instruction == -1: start_instruction = 0 + instruction_part = chat_template[start_instruction:response_start] - final_combined_check = left if final_combined_check else chat_template + combined = instruction_part + response_part + where = chat_template.find(combined) + system_part = chat_template[:where] - # Isolate input - extra_eos_tokens_regex = "|".join(f"(?:{re.escape(x)})" for x in extra_eos_tokens) - if len(extra_eos_tokens_regex) != 0: - find_end = f"(?:{extra_eos_tokens_regex})?" - else: - find_end = "" - find_end = r"\{INPUT\}[\s\n]{0,}" + find_end - input_end = list(re.finditer(find_end, right)) - assert(len(input_end) == 1) - input_end = input_end[0] - input_end = input_end.span(0)[1] - input_part = right[:input_end] - - # Isolate output - output_part = right[input_end:] - - # Isolate system - where_system = left.find(input_part) - system_part = left[:where_system if where_system != -1 else len(left)] - - # Check if the user provided a correct prompt - combined = system_part + input_part + output_part - if combined != final_combined_check: - combined_changed = combined .replace('\n', '\\n') - left_changed = final_combined_check.replace('\n', '\\n') - raise RuntimeError( - "Unsloth: The prompt template you provided isn't correct. You gave:\n"\ - f"{combined_changed}\n\n"\ - "But we require the following:\n"\ - f"{left_changed}" - ) + system_part, input_part, output_part = system_part, instruction_part, response_part + pass + + if count_eos == 0: + logger.warning("Unsloth: We automatically added an EOS token to stop endless generations.") + eos = extra_eos_tokens[0] + output_part = output_part + eos pass # Ollama modelfile parts diff --git a/unsloth/save.py b/unsloth/save.py index 7075fecc0d..c8e791c703 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -931,7 +931,7 @@ def save_to_gguf( print_info = \ f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ - f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ + f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits will take 3 minutes.\n"\ f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 10 minutes each.\n"\ f' "-____-" In total, you will have to wait at least 16 minutes.\n' print(print_info) From ccd0c8ea9b9b3044d62331750c1b5b1fe79baecc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 10:51:10 -0700 Subject: [PATCH 0262/1088] Mistral Nemo 12b (#777) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update gemma2.py * Update llama.py * Update llama.py * Update gemma2.py * init * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * All RoPE Scaling support * cleanup * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * exec * exec * Attention_Module * attention_module * imports * exec * Update llama.py * Update llama.py * boolean mask * revert masking * Update llama.py * Update save.py * Update llama.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update utils.py * retry * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update _utils.py * Update _utils.py * Update gemma2.py * Update chat_templates.py * Gemma 2 Ollama support * Update llama.py * Update llama.py * error handling * Update _utils.py * Update _utils.py * Stats for debugging * Update _utils.py * Update _utils.py * Debugging * Update tokenizer_utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Check exec, eval * Update _utils.py * Update _utils.py * Images * Bug fixes * Update pyproject.toml * Bug fixes * Update _utils.py * Update _utils.py * Deprecation fix * Update chat_templates.py * Now permitting use of pre-installed llama.cpp (#763) * Now permitting use of pre-installed llama.cpp * Update save.py --------- Co-authored-by: Giuseppe Strafforello Co-authored-by: Daniel Han * Update save.py * Deprecation & compile * typo * Update chat_templates.py * Update chat_templates.py * train_on_responses_only * Update llama.py * Update llama.py * Update save.py * Update gemma2.py * Flex Attention * typos * Update _utils.py * Update llama.py * Update __init__.py * Update flex_attention.py * Update llama.py * Update llama.py * emulation * Update __init__.py * Update rope_embedding.py * Update flex_attention.py * Update flex_attention.py * Update rope_embedding.py * libdevice * triton_tanh * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * score * Update llama.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update llama.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Flex Attention removal * upload tensorboard training stats to hub if available (#773) * causal_mask * Update llama.py * Update llama.py * Update flex_attention.py * Update _utils.py * Update mapper.py * Update _utils.py --------- Co-authored-by: pepistrafforello Co-authored-by: Giuseppe Strafforello Co-authored-by: Sébastien De Greef --- unsloth/__init__.py | 19 ++++--- unsloth/kernels/__init__.py | 34 +++++++----- unsloth/kernels/cross_entropy_loss.py | 12 ++--- unsloth/kernels/flex_attention.py | 77 +++++++++++++++++++++++++++ unsloth/kernels/geglu.py | 6 +-- unsloth/kernels/rope_embedding.py | 2 +- unsloth/kernels/utils.py | 12 +++++ unsloth/models/_utils.py | 20 ++++++- unsloth/models/gemma2.py | 50 +---------------- unsloth/models/llama.py | 6 +++ unsloth/models/mapper.py | 8 +++ unsloth/save.py | 14 +++++ 12 files changed, 181 insertions(+), 79 deletions(-) create mode 100644 unsloth/kernels/flex_attention.py diff --git a/unsloth/__init__.py b/unsloth/__init__.py index feb550be5f..0490484cb1 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -60,12 +60,6 @@ "We have some installation instructions on our Github page.") pass -# Fix up is_bf16_supported https://github.com/unslothai/unsloth/issues/504 -major_version, minor_version = torch.cuda.get_device_capability() -SUPPORTS_BFLOAT16 = (major_version >= 8) -def is_bf16_supported(): return SUPPORTS_BFLOAT16 -torch.cuda.is_bf16_supported = is_bf16_supported - # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") @@ -79,6 +73,19 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 del os.environ["PYTORCH_CUDA_ALLOC_CONF"] pass +# Torch 2.5 has including_emulation +major_version, minor_version = torch.cuda.get_device_capability() +SUPPORTS_BFLOAT16 = (major_version >= 8) + +if (major_torch == 2) and (minor_torch >= 5): + old_is_bf16_supported = torch.cuda.is_bf16_supported + def is_bf16_supported(including_emulation = False): + return old_is_bf16_supported(including_emulation) + torch.cuda.is_bf16_supported = is_bf16_supported +else: + def is_bf16_supported(): SUPPORTS_BFLOAT16 + torch.cuda.is_bf16_supported = is_bf16_supported +pass # Try loading bitsandbytes and triton import bitsandbytes as bnb diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index ebea02afd7..c2de979a6f 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -17,24 +17,32 @@ from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel from .geglu import ( - geglu_exact_forward_kernel, - geglu_exact_backward_kernel, - geglu_approx_forward_kernel, - geglu_approx_backward_kernel, + geglu_exact_forward_kernel, + geglu_exact_backward_kernel, + geglu_approx_forward_kernel, + geglu_approx_backward_kernel, ) from .fast_lora import ( - get_lora_parameters, - get_lora_parameters_bias, - apply_lora_mlp_swiglu, - apply_lora_mlp_geglu_exact, - apply_lora_mlp_geglu_approx, - apply_lora_qkv, - apply_lora_o, + get_lora_parameters, + get_lora_parameters_bias, + apply_lora_mlp_swiglu, + apply_lora_mlp_geglu_exact, + apply_lora_mlp_geglu_approx, + apply_lora_qkv, + apply_lora_o, ) from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora +from .flex_attention import HAS_FLEX_ATTENTION, slow_attention_softcapping + +if HAS_FLEX_ATTENTION: + from .flex_attention import ( + FLEX_ATTENTION_PADDING, + ) +pass + try: - print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") + print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") except: - print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") + print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") pass diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index dc1ad269fe..6074a51538 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings, MAX_FUSED_SIZE +from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh from transformers.models.llama.modeling_llama import logger @@ -63,7 +63,7 @@ def _cross_entropy_forward( label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * tl.math.tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -72,7 +72,7 @@ def _cross_entropy_forward( if label_idx != -100: x = tl.load(logits_ptr + label_idx) # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: x = SOFTCAP * tl.math.tanh(x / SOFTCAP) + if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) loss = logsumexp - x.to(tl.float32) else: loss = 0.0 @@ -131,7 +131,7 @@ def _chunked_cross_entropy_forward( label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * tl.math.tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -143,7 +143,7 @@ def _chunked_cross_entropy_forward( if label_idx != -100: x = tl.load(logits_ptr + label_idx).to(tl.float32) # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: x = SOFTCAP * tl.math.tanh(x / SOFTCAP) + if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) loss = -1.0 * x.to(tl.float32) else: loss = 0.0 @@ -198,7 +198,7 @@ def _cross_entropy_backward( # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) - partial = tl.math.tanh(x / SOFTCAP) + partial = triton_tanh(x / SOFTCAP) x = SOFTCAP * partial pass diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py new file mode 100644 index 0000000000..1eb2486998 --- /dev/null +++ b/unsloth/kernels/flex_attention.py @@ -0,0 +1,77 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from functools import lru_cache +from transformers.models.llama.modeling_llama import logger + +torch_compile_options = { + "epilogue_fusion" : True, + "max_autotune" : True, + "shape_padding" : True, + "trace.enabled" : False, # Output Triton kernel outputs! + "triton.cudagraphs" : False, +} + +# Flex Attention supported from torch 2.5 onwards only +import torch.nn.attention +if hasattr(torch.nn.attention, "flex_attention"): + import torch.nn.attention.flex_attention + from torch.nn.attention.flex_attention import flex_attention + from torch.nn.attention.flex_attention import create_block_mask + FLEX_ATTENTION_PADDING = getattr( + torch.nn.attention.flex_attention, + "_DEFAULT_SPARSE_BLOCK_SIZE", + 1, + ) + flex_attention = torch.compile(flex_attention, dynamic = False) + HAS_FLEX_ATTENTION = True +else: + HAS_FLEX_ATTENTION = False +pass + +# Logit softcapping +@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) +def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): + n_heads = self.num_heads + head_dim = self.head_dim + n_kv_heads = self.num_key_value_heads + n_groups = self.num_key_value_groups + + # Grouped query attention + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + + # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below + # We default to using the config file itself + # s = self.config.hidden_size // self.config.num_attention_heads + s = self.config.query_pre_attn_scalar + t = self.config.attn_logit_softcapping + + Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly + A = torch.matmul(Q, K.transpose(2, 3)) + A = t * torch.tanh(A / t) # Logit softcapping + A += causal_mask[:q_len, :q_len] + # Much slower in torch compile! + # A.masked_fill_(causal_mask[:q_len, :q_len], -float("inf")) + A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) + A = torch.matmul(A, V) + A = A.transpose(1, 2).contiguous() + A = A.reshape(bsz, q_len, n_heads*head_dim) + return A +pass + diff --git a/unsloth/kernels/geglu.py b/unsloth/kernels/geglu.py index 006e8c0f34..9fedae769e 100644 --- a/unsloth/kernels/geglu.py +++ b/unsloth/kernels/geglu.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings +from .utils import calculate_settings, triton_tanh @triton.jit @@ -119,7 +119,7 @@ def _approx_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32) f_row = 0.5 * e_row * ( - tl.math.tanh(s * e_row * (1.0 + 0.044715 * e_row * e_row)) \ + triton_tanh(s * e_row * (1.0 + 0.044715 * e_row * e_row)) \ + 1.0 ) f_row = f_row.to(g_row.dtype) # Exact copy from HF @@ -168,7 +168,7 @@ def _approx_backward_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): s = 0.7978845608028654 # math.sqrt(2 / math.pi) a = s * e_row # a = sqrt(2 / pi) * x b = a * 0.044715 * e_row * e_row # b = a * 0.044715 * x^2 - T = 1.0 + tl.math.tanh(a + b) + T = 1.0 + triton_tanh(a + b) T2 = 0.5 * T # Q = 0.5 * -T * (T - 2.0) * (a + 3.0 * b) Q2 = -T2 * (T - 2.0) * (a + 3.0 * b) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index b32d75ebf3..2934ac41c9 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -16,7 +16,6 @@ import triton.language as tl import torch from .utils import calculate_settings - ROPE_GROUP_SIZE = 4 @triton.heuristics({"BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],}) @@ -36,6 +35,7 @@ def _rope_embedding( RoPE is Q * cos + rotate_half(Q) * sin See our blog post for more info """ + ROPE_GROUP_SIZE = 4 row_position = tl.program_id(0) group_head_position = tl.program_id(1) col_offsets = tl.arange(0, BLOCK_SIZE) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 4b789001f5..23be372217 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -28,6 +28,18 @@ pass +# tl.math.tanh now is libdevice.tanh +from packaging.version import Version +import triton +if Version(triton.__version__) >= Version("3.0.0"): + from triton.language.extra import libdevice + triton_tanh = libdevice.tanh +else: + import triton.language as tl + triton_tanh = tl.math.tanh +pass + + def calculate_settings(n): BLOCK_SIZE = next_power_of_2(n) if BLOCK_SIZE > MAX_FUSED_SIZE: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b224e85a89..025daec136 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -145,7 +145,15 @@ # Temporarily disable 0.0.27 and higher - inference issues if Version(xformers_version) >= Version("0.0.27"): raise ImportError( - f"Unsloth: Your xformers version of {xformers_version} is too new.\n"\ + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' ) pass @@ -154,7 +162,15 @@ from trl import __version__ as trl_version if Version(xformers_version) >= Version("0.9.0"): raise ImportError( - f"Unsloth: Your TRL version of {trl_version} is too new.\n"\ + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your TRL version of {trl_version} is too new.\n"\ 'Please downgrade TRL via `pip install --force-reinstall "trl<0.9.0"' ) pass diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 9c055ff09e..0d21c47b00 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -70,52 +70,6 @@ def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): pass -# Flex Attention in torch 2.5 and higher -# try: -# from torch.nn.attention._flex_attention import _flex_attention -# from functools import lru_cache -# @lru_cache -# def create_block_mask_from_score_mod(score_mod, B, H, M, N): -# SPARSE_BLOCK = 128 -# block_mask = _create_block_mask(score_mod, B, H, M, N, device = "cuda:0") -# return block_mask - - -# Logit softcapping -@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) -def gemma2_attention(Q, K, V, causal_mask, self, bsz, q_len): - n_heads = self.num_heads - head_dim = self.head_dim - n_kv_heads = self.num_key_value_heads - n_groups = self.num_key_value_groups - - # Grouped query attention - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - K = K.reshape(bsz, n_heads, q_len, head_dim) - V = V.reshape(bsz, n_heads, q_len, head_dim) - - # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e - # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below - # We default to using the config file itself - # s = self.config.hidden_size // self.config.num_attention_heads - s = self.config.query_pre_attn_scalar - t = self.config.attn_logit_softcapping - - Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly - A = torch.matmul(Q, K.transpose(2, 3)) - A = t * torch.tanh(A / t) # Logit softcapping - A += causal_mask[:q_len, :q_len] - # Much slower in torch compile! - # A.masked_fill_(causal_mask[:q_len, :q_len], -float("inf")) - A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) - A = torch.matmul(A, V) - A = A.transpose(1, 2).contiguous() - A = A.reshape(bsz, q_len, n_heads*head_dim) - return A -pass - - # Logit softcapping def Gemma2Attention_fast_forward( self, @@ -172,8 +126,8 @@ def Gemma2Attention_fast_forward( V = torch.cat([past_key_value[1], V], dim = 2) pass past_key_value = (K, V) if use_cache else None - - A = gemma2_attention(Q, K, V, causal_mask, self, bsz, kv_seq_len) + + A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) A = self.apply_o(self, A) return A, None, past_key_value pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2d888b8c0e..9bea364ca4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -662,6 +662,12 @@ def LlamaModel_fast_forward( offloaded_gradient_checkpointing = True pass + # Check for Flex Attention + # if IS_GEMMA2 and HAS_FLEX_ATTENTION: + # if not (seq_length % FLEX_ATTENTION_PADDING == 0): + # USE_FLEX_ATTENTION = True + + # Gemma2 has alternating SWA and global attn if IS_GEMMA2 and not hasattr(self, "SWA_mask"): n = self.config.max_position_embeddings diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 31b3ab6df6..38cbdbe992 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -210,6 +210,14 @@ "unsloth/Phi-3-mini-4k-instruct-v0-bnb-4bit" : ( # Old Phi pre July "unsloth/Phi-3-mini-4k-instruct-v0", ), + "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit" : ( # New 12b Mistral models + "unsloth/Mistral-Nemo-Instruct-2407", + "mistralai/Mistral-Nemo-Instruct-2407", + ), + "unsloth/Mistral-Nemo-Base-2407-bnb-4bit" : ( # New 12b Mistral models + "unsloth/Mistral-Nemo-Base-2407", + "mistralai/Mistral-Nemo-Base-2407", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/save.py b/unsloth/save.py index c8e791c703..a5904efc1b 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1359,6 +1359,20 @@ def upload_to_huggingface( uploaded_location = file_location pass + # find ftevent file from tensorboard and upload it + import glob + ftevent_files = glob.glob("*out.tfevents*", recursive = True) + if len(ftevent_files) > 0: + print("Unsloth: Uploading tensorboard files... Please wait...", file_location + "*out.tfevents*") + for ftevent_file in ftevent_files: + hf_api.upload_file( + path_or_fileobj = ftevent_file, + path_in_repo = ftevent_file.replace(file_location, ""), + repo_id = save_directory, + repo_type = "model", + commit_message = "(Trained with Unsloth)", + ) + hf_api.upload_file( path_or_fileobj = file_location, path_in_repo = uploaded_location, From 2cf7f0e7cc6a7277f4b35b3d2325b2cec8faab0f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 10:58:12 -0700 Subject: [PATCH 0263/1088] Update __init__.py --- unsloth/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 0490484cb1..6a2d999b41 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -60,6 +60,12 @@ "We have some installation instructions on our Github page.") pass +# Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) +keynames = "\n" + "\n".join(os.environ.keys()) +if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +pass + # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") From aed740060a9cf9c9bd7f879699c8daab66fd79dc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 10:59:09 -0700 Subject: [PATCH 0264/1088] Update pyproject.toml --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 7d018b8b79..29b35577e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ huggingface = [ "trl>=0.7.9,<0.9.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", + "huggingface_hub[hf_transfer]", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -195,6 +196,7 @@ colab-new = [ "wheel>=0.42.0", "numpy", "protobuf<4.0.0", + "huggingface_hub[hf_transfer]", ] colab-no-deps = [ "accelerate>=0.26.1", From 7eb34655a5d335db22a67aeb340d0a522aeef7e6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 11:07:32 -0700 Subject: [PATCH 0265/1088] Update __init__.py --- unsloth/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 6a2d999b41..ea2fe76858 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -61,10 +61,10 @@ pass # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) -keynames = "\n" + "\n".join(os.environ.keys()) -if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: - os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -pass +# keynames = "\n" + "\n".join(os.environ.keys()) +# if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: +# os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +# pass # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 From 54dfb1a9e163dfb2e11c7c46ac182bb22849e6a3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 12:06:38 -0700 Subject: [PATCH 0266/1088] dynamic RoPE --- unsloth/__init__.py | 8 ++++---- unsloth/models/llama.py | 28 ++++++++++++++++++++-------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index ea2fe76858..6a2d999b41 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -61,10 +61,10 @@ pass # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) -# keynames = "\n" + "\n".join(os.environ.keys()) -# if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: -# os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -# pass +keynames = "\n" + "\n".join(os.environ.keys()) +if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +pass # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9bea364ca4..0fcfe2a270 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -335,6 +335,9 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + # Extend RoPE dynamically to fit in VRAM + self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -971,19 +974,21 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -994,14 +999,21 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.max_seq_len_cached: + if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( - self.cos_cached[:seq_len].to(dtype=x.dtype), - self.sin_cached[:seq_len].to(dtype=x.dtype), + self.cos_cached[:seq_len].to(dtype = x.dtype), + self.sin_cached[:seq_len].to(dtype = x.dtype), ) pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass pass @@ -1016,11 +1028,11 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s pass def _set_cos_sin_cache(self, seq_len, device, dtype): - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() t = t / self.scaling_factor freqs = torch.outer(t, inv_freq) From 6c8618c75443a08c4ec0304cc54acfae74b2ddcd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 12:08:49 -0700 Subject: [PATCH 0267/1088] Update mistral.py --- unsloth/models/mistral.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 6eb3fccfab..d7376d9521 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -78,6 +78,9 @@ def MistralAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + # Extend RoPE dynamically to fit in VRAM + self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached From a56b2d45c9fcaba1aefb3dd09dacf904362bff59 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 12:33:22 -0700 Subject: [PATCH 0268/1088] Update llama.py --- unsloth/models/llama.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0fcfe2a270..ee261b0f70 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1152,6 +1152,12 @@ def from_pretrained( f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) + + # Warn about fast transfers + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": + logger.warning_once("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + pass + model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking From 40aeb2629e90615a359c2b8870112a826ef5baa5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 13:25:30 -0700 Subject: [PATCH 0269/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index dc0c7da854..060c1ccae4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -38,6 +38,17 @@ IGNORED_TOKENIZER_CHECKING = frozenset(( "CodeLlamaTokenizerFast", "CodeLlamaTokenizer", + "" +)) + + +IGNORED_TOKENIZER_NAMES = frozenset(( + "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", + "unsloth/Mistral-Nemo-Instruct-2407", + "mistralai/Mistral-Nemo-Instruct-2407", + "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", + "unsloth/Mistral-Nemo-Base-2407", + "mistralai/Mistral-Nemo-Base-2407", )) # Check environments @@ -488,7 +499,7 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if slow_tokenizer is not None: + if tokenizer_name not in IGNORED_TOKENIZER_NAMES and slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token if hasattr(fast_tokenizer, "add_eos_token") and hasattr(slow_tokenizer, "add_eos_token"): From fbf6cc747ef52812f5593a2daf96e55b5c200514 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 13:33:13 -0700 Subject: [PATCH 0270/1088] Update mistral.py --- unsloth/models/mistral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index d7376d9521..b2531056a0 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -161,7 +161,7 @@ def MistralAttention_fast_forward( A = A.transpose(1, 2).contiguous() pass - attn_output = A.reshape(bsz, q_len, self.hidden_size) + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) attn_output = self.apply_o(self, attn_output) attn_weights = None return attn_output, attn_weights, past_key_value From 983c2b601aa3418cac25011317e08b454bde2c31 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 14:31:35 -0700 Subject: [PATCH 0271/1088] Update llama.py --- unsloth/models/llama.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ee261b0f70..ba45bbbfbb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -158,6 +158,14 @@ def LlamaAttention_fast_forward_inference( self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + + # Mistral Nemo 12b has weird dimensions + if attention_size != self.hidden_size: + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + else: + self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + pass + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 @@ -239,7 +247,7 @@ def LlamaAttention_fast_forward_inference( pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) - A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) return A, (Kn, Vn) pass @@ -1152,7 +1160,7 @@ def from_pretrained( f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) - + # Warn about fast transfers if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": logger.warning_once("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") From ed56977a8c4b850d984517bc2da29319d20cc4c3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 14:43:03 -0700 Subject: [PATCH 0272/1088] Update __init__.py --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 6a2d999b41..4640681543 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -89,7 +89,7 @@ def is_bf16_supported(including_emulation = False): return old_is_bf16_supported(including_emulation) torch.cuda.is_bf16_supported = is_bf16_supported else: - def is_bf16_supported(): SUPPORTS_BFLOAT16 + def is_bf16_supported(): return SUPPORTS_BFLOAT16 torch.cuda.is_bf16_supported = is_bf16_supported pass From 2a251ec5948ea44d7332ab8b053b83276ae237ca Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 18:18:09 -0700 Subject: [PATCH 0273/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 1eb2486998..a992a02382 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -25,18 +25,23 @@ } # Flex Attention supported from torch 2.5 onwards only -import torch.nn.attention -if hasattr(torch.nn.attention, "flex_attention"): - import torch.nn.attention.flex_attention - from torch.nn.attention.flex_attention import flex_attention - from torch.nn.attention.flex_attention import create_block_mask - FLEX_ATTENTION_PADDING = getattr( - torch.nn.attention.flex_attention, - "_DEFAULT_SPARSE_BLOCK_SIZE", - 1, - ) - flex_attention = torch.compile(flex_attention, dynamic = False) - HAS_FLEX_ATTENTION = True +import torch.nn +if hasattr(torch.nn, "attention"): + import torch.nn.attention + if hasattr(torch.nn.attention, "flex_attention"): + import torch.nn.attention.flex_attention + from torch.nn.attention.flex_attention import flex_attention + from torch.nn.attention.flex_attention import create_block_mask + FLEX_ATTENTION_PADDING = getattr( + torch.nn.attention.flex_attention, + "_DEFAULT_SPARSE_BLOCK_SIZE", + 1, + ) + flex_attention = torch.compile(flex_attention, dynamic = False) + HAS_FLEX_ATTENTION = True + else: + HAS_FLEX_ATTENTION = False + pass else: HAS_FLEX_ATTENTION = False pass From e4598d01b2d1bb3e9f8954938ec9854cbb8ed697 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 18:19:24 -0700 Subject: [PATCH 0274/1088] Fix bugs (#779) * Update __init__.py * dynamic RoPE * Update mistral.py * Update llama.py * Update tokenizer_utils.py * Update mistral.py * Update llama.py * Update __init__.py * Update flex_attention.py --- unsloth/__init__.py | 2 +- unsloth/kernels/flex_attention.py | 29 +++++++++++--------- unsloth/models/llama.py | 44 ++++++++++++++++++++++++------- unsloth/models/mistral.py | 5 +++- unsloth/tokenizer_utils.py | 13 ++++++++- 5 files changed, 69 insertions(+), 24 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 6a2d999b41..4640681543 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -89,7 +89,7 @@ def is_bf16_supported(including_emulation = False): return old_is_bf16_supported(including_emulation) torch.cuda.is_bf16_supported = is_bf16_supported else: - def is_bf16_supported(): SUPPORTS_BFLOAT16 + def is_bf16_supported(): return SUPPORTS_BFLOAT16 torch.cuda.is_bf16_supported = is_bf16_supported pass diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 1eb2486998..a992a02382 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -25,18 +25,23 @@ } # Flex Attention supported from torch 2.5 onwards only -import torch.nn.attention -if hasattr(torch.nn.attention, "flex_attention"): - import torch.nn.attention.flex_attention - from torch.nn.attention.flex_attention import flex_attention - from torch.nn.attention.flex_attention import create_block_mask - FLEX_ATTENTION_PADDING = getattr( - torch.nn.attention.flex_attention, - "_DEFAULT_SPARSE_BLOCK_SIZE", - 1, - ) - flex_attention = torch.compile(flex_attention, dynamic = False) - HAS_FLEX_ATTENTION = True +import torch.nn +if hasattr(torch.nn, "attention"): + import torch.nn.attention + if hasattr(torch.nn.attention, "flex_attention"): + import torch.nn.attention.flex_attention + from torch.nn.attention.flex_attention import flex_attention + from torch.nn.attention.flex_attention import create_block_mask + FLEX_ATTENTION_PADDING = getattr( + torch.nn.attention.flex_attention, + "_DEFAULT_SPARSE_BLOCK_SIZE", + 1, + ) + flex_attention = torch.compile(flex_attention, dynamic = False) + HAS_FLEX_ATTENTION = True + else: + HAS_FLEX_ATTENTION = False + pass else: HAS_FLEX_ATTENTION = False pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9bea364ca4..ba45bbbfbb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -158,6 +158,14 @@ def LlamaAttention_fast_forward_inference( self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + + # Mistral Nemo 12b has weird dimensions + if attention_size != self.hidden_size: + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + else: + self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + pass + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 @@ -239,7 +247,7 @@ def LlamaAttention_fast_forward_inference( pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) - A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) return A, (Kn, Vn) pass @@ -335,6 +343,9 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + # Extend RoPE dynamically to fit in VRAM + self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -971,19 +982,21 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -994,14 +1007,21 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.max_seq_len_cached: + if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( - self.cos_cached[:seq_len].to(dtype=x.dtype), - self.sin_cached[:seq_len].to(dtype=x.dtype), + self.cos_cached[:seq_len].to(dtype = x.dtype), + self.sin_cached[:seq_len].to(dtype = x.dtype), ) pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass pass @@ -1016,11 +1036,11 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s pass def _set_cos_sin_cache(self, seq_len, device, dtype): - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() t = t / self.scaling_factor freqs = torch.outer(t, inv_freq) @@ -1140,6 +1160,12 @@ def from_pretrained( f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) + + # Warn about fast transfers + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": + logger.warning_once("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + pass + model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 6eb3fccfab..b2531056a0 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -78,6 +78,9 @@ def MistralAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + # Extend RoPE dynamically to fit in VRAM + self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -158,7 +161,7 @@ def MistralAttention_fast_forward( A = A.transpose(1, 2).contiguous() pass - attn_output = A.reshape(bsz, q_len, self.hidden_size) + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) attn_output = self.apply_o(self, attn_output) attn_weights = None return attn_output, attn_weights, past_key_value diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index dc0c7da854..060c1ccae4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -38,6 +38,17 @@ IGNORED_TOKENIZER_CHECKING = frozenset(( "CodeLlamaTokenizerFast", "CodeLlamaTokenizer", + "" +)) + + +IGNORED_TOKENIZER_NAMES = frozenset(( + "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", + "unsloth/Mistral-Nemo-Instruct-2407", + "mistralai/Mistral-Nemo-Instruct-2407", + "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", + "unsloth/Mistral-Nemo-Base-2407", + "mistralai/Mistral-Nemo-Base-2407", )) # Check environments @@ -488,7 +499,7 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if slow_tokenizer is not None: + if tokenizer_name not in IGNORED_TOKENIZER_NAMES and slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token if hasattr(fast_tokenizer, "add_eos_token") and hasattr(slow_tokenizer, "add_eos_token"): From 477793753f6aa4100b785a1a3557f34c3223cbcd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 21:57:33 -0700 Subject: [PATCH 0275/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ba45bbbfbb..212767b393 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1163,7 +1163,7 @@ def from_pretrained( # Warn about fast transfers if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": - logger.warning_once("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") pass model_patcher.pre_patch() From 152450462475b7621164be944fbe2945a26cddd0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 22:07:23 -0700 Subject: [PATCH 0276/1088] Update llama.py --- unsloth/models/llama.py | 53 ++++++++--------------------------------- 1 file changed, 10 insertions(+), 43 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 212767b393..1ac96a4f21 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -158,14 +158,6 @@ def LlamaAttention_fast_forward_inference( self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") - - # Mistral Nemo 12b has weird dimensions - if attention_size != self.hidden_size: - self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") - else: - self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] - pass - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 @@ -247,7 +239,7 @@ def LlamaAttention_fast_forward_inference( pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) - A = fast_linear_forward(self.o_proj, A, out = self.temp_O) + A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) return A, (Kn, Vn) pass @@ -343,9 +335,6 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] - # Extend RoPE dynamically to fit in VRAM - self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) - if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -673,12 +662,6 @@ def LlamaModel_fast_forward( offloaded_gradient_checkpointing = True pass - # Check for Flex Attention - # if IS_GEMMA2 and HAS_FLEX_ATTENTION: - # if not (seq_length % FLEX_ATTENTION_PADDING == 0): - # USE_FLEX_ATTENTION = True - - # Gemma2 has alternating SWA and global attn if IS_GEMMA2 and not hasattr(self, "SWA_mask"): n = self.config.max_position_embeddings @@ -982,21 +965,19 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base - # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this - self.current_rope_size = min(4 * 8192, self.max_position_embeddings) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) + self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.current_rope_size = seq_len + self.max_seq_len_cached = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -1007,21 +988,14 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.current_rope_size: + if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( - self.cos_cached[:seq_len].to(dtype = x.dtype), - self.sin_cached[:seq_len].to(dtype = x.dtype), + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), ) pass - - def extend_rope_embedding(self, x, seq_len): - if seq_len <= self.current_rope_size: return - # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 - self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) - pass pass @@ -1036,11 +1010,11 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s pass def _set_cos_sin_cache(self, seq_len, device, dtype): - self.current_rope_size = seq_len + self.max_seq_len_cached = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() t = t / self.scaling_factor freqs = torch.outer(t, inv_freq) @@ -1160,12 +1134,6 @@ def from_pretrained( f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) - - # Warn about fast transfers - if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": - print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") - pass - model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking @@ -2113,5 +2081,4 @@ def for_training(model, use_gradient_checkpointing = True): internal_model._saved_temp_tokenizer.padding_side = "right" pass pass -pass - +pass \ No newline at end of file From c1d349370411f2c7861d5967c0c4a2ca59935670 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 18 Jul 2024 22:53:23 -0700 Subject: [PATCH 0277/1088] Mistral Nemo --- unsloth/models/_utils.py | 31 ++++++++++++++++++--- unsloth/models/llama.py | 58 ++++++++++++++++++++++++++++++++------- unsloth/models/mistral.py | 22 ++++++++++++++- 3 files changed, 96 insertions(+), 15 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 025daec136..466a5fee70 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -65,8 +65,26 @@ # ============================================= # Edits all Config files to enable RoPE Scaling for all models -from transformers import PretrainedConfig +# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now. +def patch_mistral_nemo_config(config): + if "head_dim (" not in config: + add_head_dim = "If it is not specified, will default to `8`.\n"\ + " head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):\n"\ + " The attention head dimension." + config = config.replace("If it is not specified, will default to `8`.", add_head_dim) + + add_head_dim = "num_key_value_heads=8,\n head_dim=None," + config = config.replace("num_key_value_heads=8,", add_head_dim) + + add_head_dim = "self.sliding_window = sliding_window\n self.head_dim = head_dim or hidden_size // num_attention_heads\n" + config = config.replace("self.sliding_window = sliding_window", add_head_dim) + pass + return config +pass + +from transformers import __version__ as transformers_version +from transformers import PretrainedConfig model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2",] for model_name in model_architectures: @@ -87,8 +105,14 @@ r"\n self.rope_scaling = rope_scaling\n", config, ) - exec(config, globals()) + # Just for Mistral Nemo + if model_name == "mistral": + if Version(transformers_version) <= Version("4.42.4"): + config = patch_mistral_nemo_config(config) + pass + + exec(config, globals()) exec(f"import {config_filepath}", globals()) exec(f"{config_filepath}.{config_filename} = {config_filename}", globals()) pass @@ -97,7 +121,6 @@ # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch -from packaging.version import Version if Version(torch.__version__) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd torch_amp_custom_bwd = torch.cuda.amp.custom_bwd @@ -748,7 +771,7 @@ def patch_linear_scaling( "self.rotary_emb = .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) - if len(rotary_emb) == 0: return + if len(rotary_emb) == 0: return None, function rotary_emb = rotary_emb[0] function = function.replace(rotary_emb, fix_rope_function, 1) function = exec_code + "\n\n" + function diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1ac96a4f21..ca4e651598 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -158,6 +158,14 @@ def LlamaAttention_fast_forward_inference( self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + + # Mistral Nemo 12b has weird dimensions + if attention_size != self.hidden_size: + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + else: + self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + pass + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 @@ -239,7 +247,7 @@ def LlamaAttention_fast_forward_inference( pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) - A = fast_linear_forward(self.o_proj, A, out = self.temp_QA[1][:,:,:self.hidden_size]) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) return A, (Kn, Vn) pass @@ -335,6 +343,9 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + # Extend RoPE dynamically to fit in VRAM + self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -662,6 +673,12 @@ def LlamaModel_fast_forward( offloaded_gradient_checkpointing = True pass + # Check for Flex Attention + # if IS_GEMMA2 and HAS_FLEX_ATTENTION: + # if not (seq_length % FLEX_ATTENTION_PADDING == 0): + # USE_FLEX_ATTENTION = True + + # Gemma2 has alternating SWA and global attn if IS_GEMMA2 and not hasattr(self, "SWA_mask"): n = self.config.max_position_embeddings @@ -965,19 +982,21 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() freqs = torch.outer(t, inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -988,14 +1007,21 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.max_seq_len_cached: + if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( - self.cos_cached[:seq_len].to(dtype=x.dtype), - self.sin_cached[:seq_len].to(dtype=x.dtype), + self.cos_cached[:seq_len].to(dtype = x.dtype), + self.sin_cached[:seq_len].to(dtype = x.dtype), ) pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass pass @@ -1010,11 +1036,11 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s pass def _set_cos_sin_cache(self, seq_len, device, dtype): - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len inv_freq = 1.0 / ( self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) ) - t = torch.arange(self.max_seq_len_cached, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() t = t / self.scaling_factor freqs = torch.outer(t, inv_freq) @@ -1134,6 +1160,15 @@ def from_pretrained( f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) + + # Warn about fast transfers + old_hf_transfer = os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": + print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + pass + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking @@ -1215,6 +1250,8 @@ def from_pretrained( attn_implementation = "eager", **kwargs, ) + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! post_check = check_nvidia() @@ -2081,4 +2118,5 @@ def for_training(model, use_gradient_checkpointing = True): internal_model._saved_temp_tokenizer.padding_side = "right" pass pass -pass \ No newline at end of file +pass + diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index b2531056a0..e0e034fc5c 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -270,6 +270,24 @@ def MistralForCausalLM_fast_forward( pass +# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now. +def patch_mistral_nemo_attention(function): + function = function.replace( + "(self.head_dim * self.num_heads) != self.hidden_size", + "False", + ) + function = function.replace( + "self.head_dim = self.hidden_size // self.num_heads", + "self.head_dim = config.head_dim", + ) + function = function.replace( + "self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)", + "self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)", + ) + return function +pass + + class FastMistralModel(FastLlamaModel): @staticmethod @@ -280,7 +298,9 @@ def pre_patch(): scaled_rope_module = LlamaLinearScalingRotaryEmbedding, attention_module = MistralAttention, ) - if init_name is not None: + # Just for Mistral Nemo models! + function = patch_mistral_nemo_attention(function) + if True:#init_name is not None: exec(function, globals()) MistralAttention.__init__ = eval(init_name) pass From 6c3573990c6fd86d1cc56f8936b086da0286ef23 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 00:14:24 -0700 Subject: [PATCH 0278/1088] Mistral Nemo (#782) * Update __init__.py * dynamic RoPE * Update mistral.py * Update llama.py * Update tokenizer_utils.py * Update mistral.py * Update llama.py * Update __init__.py * Update flex_attention.py * Update llama.py * Update llama.py * Mistral Nemo --- unsloth/models/_utils.py | 31 +++++++++++++++++++++++++++---- unsloth/models/llama.py | 7 ++++++- unsloth/models/mistral.py | 22 +++++++++++++++++++++- 3 files changed, 54 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 025daec136..466a5fee70 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -65,8 +65,26 @@ # ============================================= # Edits all Config files to enable RoPE Scaling for all models -from transformers import PretrainedConfig +# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now. +def patch_mistral_nemo_config(config): + if "head_dim (" not in config: + add_head_dim = "If it is not specified, will default to `8`.\n"\ + " head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):\n"\ + " The attention head dimension." + config = config.replace("If it is not specified, will default to `8`.", add_head_dim) + + add_head_dim = "num_key_value_heads=8,\n head_dim=None," + config = config.replace("num_key_value_heads=8,", add_head_dim) + + add_head_dim = "self.sliding_window = sliding_window\n self.head_dim = head_dim or hidden_size // num_attention_heads\n" + config = config.replace("self.sliding_window = sliding_window", add_head_dim) + pass + return config +pass + +from transformers import __version__ as transformers_version +from transformers import PretrainedConfig model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2",] for model_name in model_architectures: @@ -87,8 +105,14 @@ r"\n self.rope_scaling = rope_scaling\n", config, ) - exec(config, globals()) + # Just for Mistral Nemo + if model_name == "mistral": + if Version(transformers_version) <= Version("4.42.4"): + config = patch_mistral_nemo_config(config) + pass + + exec(config, globals()) exec(f"import {config_filepath}", globals()) exec(f"{config_filepath}.{config_filename} = {config_filename}", globals()) pass @@ -97,7 +121,6 @@ # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch -from packaging.version import Version if Version(torch.__version__) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd torch_amp_custom_bwd = torch.cuda.amp.custom_bwd @@ -748,7 +771,7 @@ def patch_linear_scaling( "self.rotary_emb = .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) - if len(rotary_emb) == 0: return + if len(rotary_emb) == 0: return None, function rotary_emb = rotary_emb[0] function = function.replace(rotary_emb, fix_rope_function, 1) function = exec_code + "\n\n" + function diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ba45bbbfbb..ca4e651598 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1162,9 +1162,12 @@ def from_pretrained( print(statistics) # Warn about fast transfers + old_hf_transfer = os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": - logger.warning_once("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") pass + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking @@ -1247,6 +1250,8 @@ def from_pretrained( attn_implementation = "eager", **kwargs, ) + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! post_check = check_nvidia() diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index b2531056a0..e0e034fc5c 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -270,6 +270,24 @@ def MistralForCausalLM_fast_forward( pass +# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now. +def patch_mistral_nemo_attention(function): + function = function.replace( + "(self.head_dim * self.num_heads) != self.hidden_size", + "False", + ) + function = function.replace( + "self.head_dim = self.hidden_size // self.num_heads", + "self.head_dim = config.head_dim", + ) + function = function.replace( + "self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)", + "self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)", + ) + return function +pass + + class FastMistralModel(FastLlamaModel): @staticmethod @@ -280,7 +298,9 @@ def pre_patch(): scaled_rope_module = LlamaLinearScalingRotaryEmbedding, attention_module = MistralAttention, ) - if init_name is not None: + # Just for Mistral Nemo models! + function = patch_mistral_nemo_attention(function) + if True:#init_name is not None: exec(function, globals()) MistralAttention.__init__ = eval(init_name) pass From 009de398004156cf051c22c32fada628b4e38f33 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 00:41:35 -0700 Subject: [PATCH 0279/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 060c1ccae4..e0c89e4516 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -38,7 +38,6 @@ IGNORED_TOKENIZER_CHECKING = frozenset(( "CodeLlamaTokenizerFast", "CodeLlamaTokenizer", - "" )) @@ -499,7 +498,8 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if tokenizer_name not in IGNORED_TOKENIZER_NAMES and slow_tokenizer is not None: + if tokenizer_name in IGNORED_TOKENIZER_NAMES: pass + elif slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token if hasattr(fast_tokenizer, "add_eos_token") and hasattr(slow_tokenizer, "add_eos_token"): From 10c13545c346990c78717b529af5cdac6d1856d1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 01:03:51 -0700 Subject: [PATCH 0280/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index e0c89e4516..7b88b0932f 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -498,7 +498,8 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if tokenizer_name in IGNORED_TOKENIZER_NAMES: pass + if tokenizer_name in IGNORED_TOKENIZER_NAMES: + return fast_tokenizer elif slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token From ad3d38ad4dde514b842688d6fa184e085eaf5320 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 01:06:41 -0700 Subject: [PATCH 0281/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 7b88b0932f..07cd874125 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -41,14 +41,17 @@ )) -IGNORED_TOKENIZER_NAMES = frozenset(( +IGNORED_TOKENIZER_NAMES = [ "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", "unsloth/Mistral-Nemo-Instruct-2407", "mistralai/Mistral-Nemo-Instruct-2407", "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", -)) +] +IGNORED_TOKENIZER_NAMES = frozenset( + [x.lower() for x in IGNORED_TOKENIZER_NAMES] +) # Check environments keynames = "\n" + "\n".join(os.environ.keys()) From 81e1bedf0a41b323e0433ce6a2691ab7b8ea2bfb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 01:24:46 -0700 Subject: [PATCH 0282/1088] Nightly (#783) * Update __init__.py * dynamic RoPE * Update mistral.py * Update llama.py * Update tokenizer_utils.py * Update mistral.py * Update llama.py * Update __init__.py * Update flex_attention.py * Update llama.py * Update llama.py * Mistral Nemo * Update tokenizer_utils.py * Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index e0c89e4516..07cd874125 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -41,14 +41,17 @@ )) -IGNORED_TOKENIZER_NAMES = frozenset(( +IGNORED_TOKENIZER_NAMES = [ "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", "unsloth/Mistral-Nemo-Instruct-2407", "mistralai/Mistral-Nemo-Instruct-2407", "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", -)) +] +IGNORED_TOKENIZER_NAMES = frozenset( + [x.lower() for x in IGNORED_TOKENIZER_NAMES] +) # Check environments keynames = "\n" + "\n".join(os.environ.keys()) @@ -498,7 +501,8 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if tokenizer_name in IGNORED_TOKENIZER_NAMES: pass + if tokenizer_name in IGNORED_TOKENIZER_NAMES: + return fast_tokenizer elif slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token From 8ee997cac28e3bc3ff205252ca543ab46ade3d25 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 01:29:52 -0700 Subject: [PATCH 0283/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 07cd874125..3f75d16863 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -682,6 +682,11 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): embedding_matrix = model.get_input_embeddings ().weight lm_head_matrix = model.get_output_embeddings().weight + # Ignore some model checks for now + if model.config._name_or_path in IGNORED_TOKENIZER_NAMES: + return + pass + # Get untrained tokens indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps where_untrained = torch.where(indicator_untrained)[0] From dd201f57b1fe88fdb22bbbc367893788fd4b3c1c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 01:39:08 -0700 Subject: [PATCH 0284/1088] Nightly (#784) * Update __init__.py * dynamic RoPE * Update mistral.py * Update llama.py * Update tokenizer_utils.py * Update mistral.py * Update llama.py * Update __init__.py * Update flex_attention.py * Update llama.py * Update llama.py * Mistral Nemo * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 07cd874125..3f75d16863 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -682,6 +682,11 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): embedding_matrix = model.get_input_embeddings ().weight lm_head_matrix = model.get_output_embeddings().weight + # Ignore some model checks for now + if model.config._name_or_path in IGNORED_TOKENIZER_NAMES: + return + pass + # Get untrained tokens indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps where_untrained = torch.where(indicator_untrained)[0] From 39885a4ddc03d0cbfd2b6759adf4521fa1a1c5d5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 03:03:50 -0700 Subject: [PATCH 0285/1088] Update README.md --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f3bc0608bf..85e4d14611 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,11 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | -| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | +| **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | -| **Phi-3 (medium)** | [▶️ Start for free](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) | 2x faster | 50% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | +| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | @@ -39,6 +39,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! From 568bfddfc33df6cbc641e0945b33ef13900cff01 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 03:05:15 -0700 Subject: [PATCH 0286/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 85e4d14611..05977bad73 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported -- 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated +- 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! - 📣 NEW! Qwen2 now works - 📣 [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct] From 565a5a389460bc8e4d0f56cc5fb6276bbb658065 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 09:27:18 -0700 Subject: [PATCH 0287/1088] Fix Gemma --- unsloth/models/gemma.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index bc70b993ae..ce89ad3be6 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -210,22 +210,24 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) # Build here to make `torch.jit.trace` work. - self._set_cos_sin_cache(seq_len=max_position_embeddings, device=device, dtype=torch.get_default_dtype()) + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) pass def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len # The difference is we do division explicity instead of t * (1/x) ie we do t/x. freq_exponents = (2.0 / self.dim) * ( torch.arange(self.dim // 2, dtype = torch.int64, device = "cpu").float() ) timescale = self.base**freq_exponents - positions = torch.arange(self.max_seq_len_cached, device = "cpu", dtype = torch.int64).float() + positions = torch.arange(self.current_rope_size, device = "cpu", dtype = torch.int64).float() radians_new = positions[..., None] / timescale[None, None, :] radians_new = radians_new.squeeze(0) @@ -239,7 +241,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - if seq_len > self.max_seq_len_cached: + if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( @@ -247,6 +249,13 @@ def forward(self, x, position_ids=None, seq_len=None): self.sin_cached[:seq_len].to(dtype=x.dtype), ) pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass pass @@ -263,14 +272,14 @@ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, s def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. - self.max_seq_len_cached = seq_len + self.current_rope_size = seq_len # The difference is we do division explicity instead of t * (1/x) ie we do t/x. freq_exponents = (2.0 / self.dim) * ( torch.arange(self.dim // 2, dtype = torch.int64, device = "cpu").float() ) timescale = self.base**freq_exponents - positions = torch.arange(self.max_seq_len_cached, device = "cpu", dtype = torch.int64).float() + positions = torch.arange(self.current_rope_size, device = "cpu", dtype = torch.int64).float() positions = positions / self.scaling_factor radians_new = positions[..., None] / timescale[None, None, :] radians_new = radians_new.squeeze(0) From 182ab7e0cb28b21c0b3b119668ec3cd9aceb15de Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 19 Jul 2024 09:32:27 -0700 Subject: [PATCH 0288/1088] Update mistral.py --- unsloth/models/mistral.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e0e034fc5c..ed6207bb06 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -299,8 +299,9 @@ def pre_patch(): attention_module = MistralAttention, ) # Just for Mistral Nemo models! - function = patch_mistral_nemo_attention(function) - if True:#init_name is not None: + if function is not None: + function = patch_mistral_nemo_attention(function) + # if True:#init_name is not None: exec(function, globals()) MistralAttention.__init__ = eval(init_name) pass From 72e1b03544c3d23a0c28f883f242fa0f96e8091b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jul 2024 11:53:32 -0700 Subject: [PATCH 0289/1088] Update llama.py --- unsloth/models/llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ca4e651598..32610bbfda 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1068,6 +1068,8 @@ def _fast_generate(*args, **kwargs): # For newer HF kwargs["cache_implementation"] = "dynamic" + print(kwargs) + # Set pad token # old_pad_token_id = getattr(model.config, "pad_token_id", None) # old_eos_token_id = getattr(model.config, "eos_token_id", None) From ba515ec92dbc85c03c65d3f31e10166cc73ef323 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jul 2024 12:47:36 -0700 Subject: [PATCH 0290/1088] Update llama.py --- unsloth/models/llama.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 32610bbfda..ff51b90b84 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1068,7 +1068,14 @@ def _fast_generate(*args, **kwargs): # For newer HF kwargs["cache_implementation"] = "dynamic" - print(kwargs) + # Remove token_type_ids + kwargs.pop("token_type_ids", None) + + # Check pad_token + kwargs["pad_token_id"] = kwargs.pop( + "pad_token_id", + getattr(model.config, "eos_token_id", None), + ) # Set pad token # old_pad_token_id = getattr(model.config, "pad_token_id", None) From 5f496efdb4db75371aa17d5b1b393f96cd55a2bd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jul 2024 13:22:36 -0700 Subject: [PATCH 0291/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 3f75d16863..0469f4d61b 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -688,7 +688,12 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): pass # Get untrained tokens - indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps + indicator_untrained1 = torch.amax(embedding_matrix, axis = 1) <= eps + # Check lm_head as well + indicator_untrained2 = torch.amax(lm_head_matrix, axis = 1) <= eps + # Combine both checks + indicator_untrained = indicator_untrained1 & indicator_untrained2 + where_untrained = torch.where(indicator_untrained)[0] n_untrained = where_untrained.shape[0] n_trained = embedding_matrix.shape[0] - n_untrained From e41cc4093c70095e4aef390c8afae85c38aa4eb3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 20 Jul 2024 13:25:59 -0700 Subject: [PATCH 0292/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 0469f4d61b..8474c2c6b7 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -42,12 +42,12 @@ IGNORED_TOKENIZER_NAMES = [ - "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", - "unsloth/Mistral-Nemo-Instruct-2407", - "mistralai/Mistral-Nemo-Instruct-2407", - "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", - "unsloth/Mistral-Nemo-Base-2407", - "mistralai/Mistral-Nemo-Base-2407", + # "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", + # "unsloth/Mistral-Nemo-Instruct-2407", + # "mistralai/Mistral-Nemo-Instruct-2407", + # "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", + # "unsloth/Mistral-Nemo-Base-2407", + # "mistralai/Mistral-Nemo-Base-2407", ] IGNORED_TOKENIZER_NAMES = frozenset( [x.lower() for x in IGNORED_TOKENIZER_NAMES] From c553b175d239f023882562ac727a92e6fcc95417 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 22 Jul 2024 22:58:02 -0700 Subject: [PATCH 0293/1088] Llama 3.1 --- unsloth/models/_utils.py | 93 +++++++++++++++++++++++++++++++++++++++- unsloth/models/llama.py | 73 +++++++++++++++++++++++++++++++ unsloth/models/mapper.py | 14 ++++++ 3 files changed, 179 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 466a5fee70..c7c779a231 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -33,6 +33,7 @@ "unsloth_offloaded_gradient_checkpoint", "torch_compile_options", "patch_linear_scaling", + "patch_llama_rope_scaling", "check_nvidia", "create_boolean_mask", "torch_amp_custom_fwd", @@ -332,7 +333,13 @@ def patch_tokenizer(model, tokenizer): Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! Fixes https://github.com/unslothai/unsloth/issues/5 """ - possible_reserved_tokens = ("<|reserved", "<|placeholder", "[control") + possible_reserved_tokens = ( + "<|reserved", # Llama-3 + "<|placeholder", # Phi-3 + "[control", # Forgot where lol + "", # Mistral Nemo + "<|finetune_right_pad_id|>", # Llama-3.1 + ) if model is not None: model.config.update({"unsloth_version" : __version__}) @@ -779,6 +786,90 @@ def patch_linear_scaling( pass +# Patches for Llama-3 LlamaExtendedRotaryEmbedding +def patch_llama_rope_scaling( + model_name = "llama", + rope_module = None, + scaled_rope_module = None, + extended_rope_module = None, + attention_module = None, +): + assert(\ + rope_module is not None and \ + scaled_rope_module is not None and \ + extended_rope_module is not None + ) + assert(attention_module is not None) + + rope_name = rope_module.__name__ + scaled_rope_name = scaled_rope_module.__name__ + model_filepath = f"transformers.models.{model_name}.modeling_{model_name}" + exec_code = \ + f"import torch.nn as nn\n"\ + f"from typing import Union, Optional, List, Any, Callable, Tuple\n"\ + f"from {model_filepath} import logger, "\ + f"{model_name.title()}Attention, {model_name.title()}Config" + + try: + function = inspect.getsource(attention_module.__init__) + except: + # Most likely already patched! + return None, None + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + init_name = f"{model_name.title()}Attention__init__" + function = function.replace("def __init__", f"def {init_name}") + function = function.replace( + "super().__init__()", + f"super({model_name.title()}Attention, self).__init__()", + ) + fix_rope_function = """ + if getattr(self.config, "rope_scaling", None) is None: + # Hack + if self.config.max_position_embeddings == 131072 + self.rotary_emb = {rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling.get("factor") + if scaling_type == "linear": + self.rotary_emb = {scaled_rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "extended": + self.rotary_emb = {extended_rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {{scaling_type}}") + pass + """ + fix_rope_function = fix_rope_function.format( + rope_function = rope_module.__name__, + scaled_rope_function = scaled_rope_module.__name__, + extended_rope_function = extended_rope_module.__name__, + ) + rotary_emb = re.findall( + "self.rotary_emb = .+?\)", function, + flags = re.DOTALL | re.MULTILINE, + ) + if len(rotary_emb) == 0: return None, function + rotary_emb = rotary_emb[0] + function = function.replace(rotary_emb, fix_rope_function, 1) + function = exec_code + "\n\n" + function + return init_name, function +pass + + def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! output = np.array([0,]) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ff51b90b84..2d224b3cad 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1052,6 +1052,68 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass +# See https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/rotary_embedding.py#L736 +# For Llama 3.1 +class LlamaExtendedRotaryEmbedding(LlamaRotaryEmbedding): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) + + # Normal Llama-3 RoPE + inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) + ) + inv_freq = self.apply_scaling(inv_freq) + self.register_buffer("inv_freq", inv_freq, persistent = False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) + pass + + def _set_cos_sin_cache(self, seq_len, device, dtype): + # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and + # in FP32. They are applied (multiplied) in FP32 as well. + self.current_rope_size = seq_len + + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + pass + + def apply_scaling(self, freqs: torch.Tensor): + scale_factor = 8 + low_freq_factor = 1 + high_freq_factor = 4 + old_context_len = 8192 + + low_freq_wavelen = old_context_len / low_freq_factor + high_freq_wavelen = old_context_len / high_freq_factor + new_freqs = [] + for freq in freqs: + wavelen = 2 * math.pi / freq + if wavelen < high_freq_wavelen: + new_freqs.append(freq) + elif wavelen > low_freq_wavelen: + new_freqs.append(freq / scale_factor) + else: + assert low_freq_wavelen != high_freq_wavelen + smooth = (old_context_len / wavelen - low_freq_factor) / ( + high_freq_factor - low_freq_factor) + new_freqs.append((1 - smooth) * freq / scale_factor + + smooth * freq) + return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device) + pass +pass + + def _wrap_fast_inference(generate, device_type, dtype, model): # Wraps inference with bfloat16 / float16 @torch.inference_mode @@ -1108,6 +1170,17 @@ class FastLlamaModel: @staticmethod def pre_patch(): + init_name, function = patch_llama_rope_scaling( + model_name = "llama", + rope_module = LlamaRotaryEmbedding, + scaled_rope_module = LlamaLinearScalingRotaryEmbedding, + extended_rope_module = LlamaExtendedRotaryEmbedding, + attention_module = LlamaAttention, + ) + if init_name is not None: + exec(function, globals()) + LlamaAttention.__init__ = eval(init_name) + pass LlamaAttention .forward = LlamaAttention_fast_forward LlamaSdpaAttention .forward = LlamaAttention_fast_forward LlamaFlashAttention2.forward = LlamaAttention_fast_forward diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 38cbdbe992..462c85f2a1 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,6 +218,20 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), + "unsloth/llama-3.1-8b-bnb-4bit" : ( + "unsloth/llama-3.1-8b", + "meta-llama/Meta-Llama-3.1-8B", + ), + "unsloth/llama-3.1-8b-Instruct-bnb-4bit" : ( + "unsloth/llama-3.1-8b-Instruct", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + ), + "unsloth/llama-3.1-70b-bnb-4bit" : ( + "meta-llama/Meta-Llama-3.1-70B", + ), + "unsloth/llama-3.1-70b-Instruct-bnb-4bit" : ( + "meta-llama/Meta-Llama-3.1-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From 00ad7992f69ea086ee4b8e9229d6c901ace494c5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 22 Jul 2024 23:01:18 -0700 Subject: [PATCH 0294/1088] Update _utils.py --- unsloth/models/_utils.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c7c779a231..27eb226f27 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -826,13 +826,19 @@ def patch_llama_rope_scaling( ) fix_rope_function = """ if getattr(self.config, "rope_scaling", None) is None: - # Hack - if self.config.max_position_embeddings == 131072 - self.rotary_emb = {rope_function}( - self.head_dim, - max_position_embeddings=self.max_position_embeddings, - base=self.rope_theta, - ) + # Hack to check for Llama-3.1 + if 'llama-3.1' in str(self.config.config._name_or_path).lower(): + self.rotary_emb = {extended_rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + self.rotary_emb = {rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling.get("factor") From ae2d1b6cacf8ce46c5aed68ef44921f6a498d8e2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:27:36 -0700 Subject: [PATCH 0295/1088] Llama 3.1 --- README.md | 5 +++-- unsloth/models/_utils.py | 2 ++ unsloth/models/llama.py | 12 +++++++----- unsloth/models/mapper.py | 14 ++++++++------ 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 05977bad73..c666f2d9c8 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | +| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | | **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | @@ -32,13 +32,14 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -- **Kaggle Notebooks** for [Llama 3 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 27eb226f27..394213b9f0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -868,9 +868,11 @@ def patch_llama_rope_scaling( "self.rotary_emb = .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) + print(rotary_emb) if len(rotary_emb) == 0: return None, function rotary_emb = rotary_emb[0] function = function.replace(rotary_emb, fix_rope_function, 1) + print(function) function = exec_code + "\n\n" + function return init_name, function pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2d224b3cad..58fcc92764 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1088,11 +1088,13 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) pass - def apply_scaling(self, freqs: torch.Tensor): + # From https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/api/model.py#L41 + def apply_scaling(freqs: torch.Tensor): + # Values obtained from grid search scale_factor = 8 low_freq_factor = 1 high_freq_factor = 4 - old_context_len = 8192 + old_context_len = 8192 # original llama3 length low_freq_wavelen = old_context_len / low_freq_factor high_freq_wavelen = old_context_len / high_freq_factor @@ -1106,9 +1108,9 @@ def apply_scaling(self, freqs: torch.Tensor): else: assert low_freq_wavelen != high_freq_wavelen smooth = (old_context_len / wavelen - low_freq_factor) / ( - high_freq_factor - low_freq_factor) - new_freqs.append((1 - smooth) * freq / scale_factor + - smooth * freq) + high_freq_factor - low_freq_factor + ) + new_freqs.append((1 - smooth) * freq / scale_factor + smooth * freq) return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device) pass pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 462c85f2a1..fc13c94e84 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,18 +218,20 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), - "unsloth/llama-3.1-8b-bnb-4bit" : ( - "unsloth/llama-3.1-8b", + "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B", "meta-llama/Meta-Llama-3.1-8B", ), - "unsloth/llama-3.1-8b-Instruct-bnb-4bit" : ( - "unsloth/llama-3.1-8b-Instruct", + "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", ), - "unsloth/llama-3.1-70b-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B", "meta-llama/Meta-Llama-3.1-70B", ), - "unsloth/llama-3.1-70b-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", ), } From 77c502cc0c97a84cf9230308919ad56aed9ef4f9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:33:07 -0700 Subject: [PATCH 0296/1088] Update _utils.py --- unsloth/models/_utils.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 394213b9f0..3ea38eab1d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -826,22 +826,17 @@ def patch_llama_rope_scaling( ) fix_rope_function = """ if getattr(self.config, "rope_scaling", None) is None: - # Hack to check for Llama-3.1 - if 'llama-3.1' in str(self.config.config._name_or_path).lower(): - self.rotary_emb = {extended_rope_function}( - self.head_dim, - max_position_embeddings=self.max_position_embeddings, - base=self.rope_theta, - ) - else: - self.rotary_emb = {rope_function}( - self.head_dim, - max_position_embeddings=self.max_position_embeddings, - base=self.rope_theta, - ) + self.rotary_emb = {rope_function}( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) else: - scaling_type = self.config.rope_scaling["type"] + scaling_type1 = self.config.rope_scaling.get("type", None) + scaling_type2 = self.config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 scaling_factor = self.config.rope_scaling.get("factor") + if scaling_type == "linear": self.rotary_emb = {scaled_rope_function}( self.head_dim, @@ -849,7 +844,7 @@ def patch_llama_rope_scaling( scaling_factor=scaling_factor, base=self.rope_theta, ) - elif scaling_type == "extended": + elif scaling_type == "llama3": self.rotary_emb = {extended_rope_function}( self.head_dim, max_position_embeddings=self.max_position_embeddings, From 41ee26ce655e05769172c209ea6dd3f8174baefc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:35:03 -0700 Subject: [PATCH 0297/1088] Update llama.py --- unsloth/models/llama.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 58fcc92764..403a7130b7 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1054,7 +1054,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # See https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/rotary_embedding.py#L736 # For Llama 3.1 -class LlamaExtendedRotaryEmbedding(LlamaRotaryEmbedding): +class LlamaExtendedRotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim @@ -1113,6 +1113,24 @@ def apply_scaling(freqs: torch.Tensor): new_freqs.append((1 - smooth) * freq / scale_factor + smooth * freq) return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device) pass + + def forward(self, x, position_ids=None, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.current_rope_size: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype = x.dtype), + self.sin_cached[:seq_len].to(dtype = x.dtype), + ) + pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass pass From 3dabf84ab67164c5a4c42c1ea598bdfbee320c6f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:36:06 -0700 Subject: [PATCH 0298/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 403a7130b7..830b345d79 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1089,7 +1089,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass # From https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/api/model.py#L41 - def apply_scaling(freqs: torch.Tensor): + def apply_scaling(self, freqs: torch.Tensor): # Values obtained from grid search scale_factor = 8 low_freq_factor = 1 From 07634b920399ffd0546be7a460bc27c48cc60b34 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:43:36 -0700 Subject: [PATCH 0299/1088] hack for rotary --- unsloth/models/gemma.py | 8 ++++++-- unsloth/models/llama.py | 12 +++++++++--- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index ce89ad3be6..6c9a57abf7 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -205,7 +205,9 @@ class GemmaFixedRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings @@ -264,7 +266,9 @@ class GemmaFixedLinearScalingRotaryEmbedding(GemmaFixedRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + config = None, # [TODO] Hack to pass in config - need to remove later + ): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 830b345d79..929c324966 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -977,7 +977,9 @@ class LlamaRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings @@ -1030,7 +1032,9 @@ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + config = None, # [TODO] Hack to pass in config - need to remove later + ): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) pass @@ -1055,7 +1059,9 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # See https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/rotary_embedding.py#L736 # For Llama 3.1 class LlamaExtendedRotaryEmbedding(torch.nn.Module): - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings From 4a46220131efa70892b48468406dc3bcaaf569bc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:48:45 -0700 Subject: [PATCH 0300/1088] patch RoPE --- unsloth/models/_utils.py | 12 ++++++------ unsloth/models/gemma.py | 6 ++++-- unsloth/models/llama.py | 9 ++++++--- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3ea38eab1d..2b8410032f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -752,7 +752,7 @@ def patch_linear_scaling( fix_rope_function = """ if getattr(self.config, "rope_scaling", None) is None: self.rotary_emb = {rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) @@ -761,7 +761,7 @@ def patch_linear_scaling( scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = {scaled_rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, @@ -827,7 +827,7 @@ def patch_llama_rope_scaling( fix_rope_function = """ if getattr(self.config, "rope_scaling", None) is None: self.rotary_emb = {rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) @@ -836,17 +836,17 @@ def patch_llama_rope_scaling( scaling_type2 = self.config.rope_scaling.get("rope_type", None) scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 scaling_factor = self.config.rope_scaling.get("factor") - + if scaling_type == "linear": self.rotary_emb = {scaled_rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "llama3": self.rotary_emb = {extended_rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 6c9a57abf7..3dccf63ae4 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -205,9 +205,10 @@ class GemmaFixedRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): + if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings @@ -266,9 +267,10 @@ class GemmaFixedLinearScalingRotaryEmbedding(GemmaFixedRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, config = None, # [TODO] Hack to pass in config - need to remove later ): + if config is not None: return # [TODO] Hack to pass in config - need to remove later self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 929c324966..d043f03d1f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -977,9 +977,10 @@ class LlamaRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): + if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings @@ -1032,9 +1033,10 @@ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, config = None, # [TODO] Hack to pass in config - need to remove later ): + if config is not None: return # [TODO] Hack to pass in config - need to remove later self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) pass @@ -1059,9 +1061,10 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # See https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/rotary_embedding.py#L736 # For Llama 3.1 class LlamaExtendedRotaryEmbedding(torch.nn.Module): - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): + if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings From 2d9f189cbe977c4d5bafc9629e9aa0558e373e96 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:53:31 -0700 Subject: [PATCH 0301/1088] refix rope --- unsloth/models/gemma.py | 5 ++--- unsloth/models/llama.py | 9 +++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 3dccf63ae4..e3f1e615db 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -208,8 +208,8 @@ class GemmaFixedRotaryEmbedding(torch.nn.Module): def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): - if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() + if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base @@ -270,9 +270,8 @@ class GemmaFixedLinearScalingRotaryEmbedding(GemmaFixedRotaryEmbedding): def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, config = None, # [TODO] Hack to pass in config - need to remove later ): - if config is not None: return # [TODO] Hack to pass in config - need to remove later self.scaling_factor = scaling_factor - super().__init__(dim, max_position_embeddings, base, device) + super().__init__(dim = dim, max_position_embeddings = max_position_embeddings, base = base, device = device, config = config) pass def _set_cos_sin_cache(self, seq_len, device, dtype): diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d043f03d1f..a4a6527ff2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -980,8 +980,9 @@ class LlamaRotaryEmbedding(torch.nn.Module): def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): - if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() + if config is not None: return # [TODO] Hack to pass in config - need to remove later + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base @@ -1036,9 +1037,8 @@ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, config = None, # [TODO] Hack to pass in config - need to remove later ): - if config is not None: return # [TODO] Hack to pass in config - need to remove later self.scaling_factor = scaling_factor - super().__init__(dim, max_position_embeddings, base, device) + super().__init__(dim = dim, max_position_embeddings = max_position_embeddings, base = base, device = device, config = config) pass def _set_cos_sin_cache(self, seq_len, device, dtype): @@ -1064,8 +1064,9 @@ class LlamaExtendedRotaryEmbedding(torch.nn.Module): def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, config = None, # [TODO] Hack to pass in config - need to remove later ): - if config is not None: return # [TODO] Hack to pass in config - need to remove later super().__init__() + if config is not None: return # [TODO] Hack to pass in config - need to remove later + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base From 80d62c3fa6ae248623c974b2926b61c3dba62da3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:54:54 -0700 Subject: [PATCH 0302/1088] Update _utils.py --- unsloth/models/_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2b8410032f..b021e89e9d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -863,11 +863,9 @@ def patch_llama_rope_scaling( "self.rotary_emb = .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) - print(rotary_emb) if len(rotary_emb) == 0: return None, function rotary_emb = rotary_emb[0] function = function.replace(rotary_emb, fix_rope_function, 1) - print(function) function = exec_code + "\n\n" + function return init_name, function pass From 7d7a5f77655b373c0c50b8df7a2a43ee950dc852 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 10:58:31 -0700 Subject: [PATCH 0303/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a4a6527ff2..a4b6552160 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1065,7 +1065,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - if config is not None: return # [TODO] Hack to pass in config - need to remove later + # if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim self.max_position_embeddings = max_position_embeddings From 2f9bd5bcb61f1530a48ee08bbdd5adbd4ec39a33 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:09:24 -0700 Subject: [PATCH 0304/1088] Llama 3.1 check --- pyproject.toml | 4 ++-- unsloth/models/loader.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 29b35577e9..829b35ad3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.42.3", + "transformers>=4.43.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -188,7 +188,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.42.3", + "transformers>=4.43.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0f170597b1..ece8af2821 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -27,6 +27,7 @@ SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") +SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.1") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -130,7 +131,19 @@ def from_pretrained( model_type = model_config.model_type - if model_type == "llama": dispatch_model = FastLlamaModel + if model_type == "llama": + scaling_type1 = model_config.rope_scaling.get("type", None) + scaling_type2 = model_config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + + if scaling_type == "llama3" and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ + f"The minimum required version is 4.43.1\n"\ + f'Try `pip install --upgrade "transformers>=4.43.1"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastLlamaModel elif model_type == "mistral": dispatch_model = FastMistralModel elif model_type == "gemma": if not SUPPORTS_GEMMA: From 740979b1b9af32d39af7904973a71aaadf009984 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:12:58 -0700 Subject: [PATCH 0305/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a4b6552160..295d92f629 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1010,6 +1010,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass def forward(self, x, position_ids=None, seq_len=None): + print(x, position_ids, seq_len) # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) From 47d230b3cd043306463e2b76bd8023f867427ea2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:13:15 -0700 Subject: [PATCH 0306/1088] Update llama.py --- unsloth/models/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 295d92f629..ff4d19c543 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1010,7 +1010,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass def forward(self, x, position_ids=None, seq_len=None): - print(x, position_ids, seq_len) + print(__LINE__, x, position_ids, seq_len) # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) @@ -1127,6 +1127,7 @@ def apply_scaling(self, freqs: torch.Tensor): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] + print(__LINE__, x, position_ids, seq_len) if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) From f849b8b61f387b672d74de4a4372d03fdebcf809 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:15:35 -0700 Subject: [PATCH 0307/1088] Update llama.py --- unsloth/models/llama.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ff4d19c543..d2bbb5a3a5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1010,7 +1010,6 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass def forward(self, x, position_ids=None, seq_len=None): - print(__LINE__, x, position_ids, seq_len) # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) @@ -1066,6 +1065,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() + print(__FILE__, __LINE__) # if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim @@ -1080,6 +1080,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= ) inv_freq = self.apply_scaling(inv_freq) self.register_buffer("inv_freq", inv_freq, persistent = False) + print(__FILE__, __LINE__) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) @@ -1089,6 +1090,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len + print(__FILE__, __LINE__) t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() @@ -1127,7 +1129,6 @@ def apply_scaling(self, freqs: torch.Tensor): def forward(self, x, position_ids=None, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] - print(__LINE__, x, position_ids, seq_len) if seq_len > self.current_rope_size: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) From 6157cef3d1a37bb432389686ba35038d751b6ba6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:16:31 -0700 Subject: [PATCH 0308/1088] Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d2bbb5a3a5..de9eb80dae 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1080,7 +1080,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= ) inv_freq = self.apply_scaling(inv_freq) self.register_buffer("inv_freq", inv_freq, persistent = False) - print(__FILE__, __LINE__) + print(__LINE__) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) @@ -1090,7 +1090,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - print(__FILE__, __LINE__) + print(__LINE__) t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() From 5da00a946e2af9ebfd1aaf1f3885e94b628745a3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:16:40 -0700 Subject: [PATCH 0309/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index de9eb80dae..3f358fe675 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1065,7 +1065,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - print(__FILE__, __LINE__) + print(__LINE__) # if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim From 2ff7d8368c44c78db1e8cd10326b3c88055d8832 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:18:12 -0700 Subject: [PATCH 0310/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3f358fe675..3085ccd0b0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1065,7 +1065,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - print(__LINE__) + print(1068) # if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim @@ -1080,7 +1080,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= ) inv_freq = self.apply_scaling(inv_freq) self.register_buffer("inv_freq", inv_freq, persistent = False) - print(__LINE__) + print(1083) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) @@ -1090,7 +1090,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - print(__LINE__) + print(1093) t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() From 7c441f3480b217e1909d7b7eb53eb77a6481c7fc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:21:29 -0700 Subject: [PATCH 0311/1088] Update llama.py --- unsloth/models/llama.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3085ccd0b0..f9981f56ea 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1065,9 +1065,15 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - print(1068) - # if config is not None: return # [TODO] Hack to pass in config - need to remove later - + if config is not None: + # [TODO] Hack to pass in config - need to remove later + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base @@ -1080,7 +1086,6 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= ) inv_freq = self.apply_scaling(inv_freq) self.register_buffer("inv_freq", inv_freq, persistent = False) - print(1083) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) @@ -1090,7 +1095,6 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - print(1093) t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() From 5d9245660fd3a739d992f4a0e717ee8c85bdb635 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:22:27 -0700 Subject: [PATCH 0312/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f9981f56ea..8fc480d149 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1073,7 +1073,7 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= device = "cuda" max_position_embeddings = config.max_position_embeddings pass - + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base From 4a3fddd055333f2eeb4ba58cdbf374e449ce3c3a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:23:00 -0700 Subject: [PATCH 0313/1088] Update llama.py --- unsloth/models/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8fc480d149..474dad329e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1067,13 +1067,14 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= super().__init__() if config is not None: # [TODO] Hack to pass in config - need to remove later + print(1) base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 dim = int((config.hidden_size // config.num_attention_heads)) device = "cuda" max_position_embeddings = config.max_position_embeddings pass - + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base From ca3a1b7315c54ccffafe60b5c7abe6869cd7be6a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:23:18 -0700 Subject: [PATCH 0314/1088] Update llama.py --- unsloth/models/llama.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 474dad329e..aef04d604c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -981,8 +981,16 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - if config is not None: return # [TODO] Hack to pass in config - need to remove later - + if config is not None: + # [TODO] Hack to pass in config - need to remove later + print(2) + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base From b93a75778e2ef0b7c9b83b1cc329c6e2f7649b73 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:24:58 -0700 Subject: [PATCH 0315/1088] Update llama.py --- unsloth/models/llama.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index aef04d604c..338ae0a7ce 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -983,7 +983,6 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= super().__init__() if config is not None: # [TODO] Hack to pass in config - need to remove later - print(2) base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 dim = int((config.hidden_size // config.num_attention_heads)) @@ -1075,7 +1074,6 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= super().__init__() if config is not None: # [TODO] Hack to pass in config - need to remove later - print(1) base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 dim = int((config.hidden_size // config.num_attention_heads)) From c86b13d46512c5e7a8b2221e885a3a00eb0ad59a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:40:49 -0700 Subject: [PATCH 0316/1088] Llama 3.1 (#797) * Llama 3.1 * Update _utils.py * Llama 3.1 * Update _utils.py * Update llama.py * Update llama.py * hack for rotary * patch RoPE * refix rope * Update _utils.py * Update llama.py * Llama 3.1 check * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --- README.md | 5 +- pyproject.toml | 4 +- unsloth/models/_utils.py | 98 ++++++++++++++++++++++++++++++- unsloth/models/gemma.py | 11 +++- unsloth/models/llama.py | 123 ++++++++++++++++++++++++++++++++++++++- unsloth/models/loader.py | 15 ++++- unsloth/models/mapper.py | 16 +++++ 7 files changed, 258 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 05977bad73..c666f2d9c8 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | +| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | | **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | @@ -32,13 +32,14 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -- **Kaggle Notebooks** for [Llama 3 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. diff --git a/pyproject.toml b/pyproject.toml index 29b35577e9..829b35ad3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.42.3", + "transformers>=4.43.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -188,7 +188,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.42.3", + "transformers>=4.43.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 466a5fee70..b021e89e9d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -33,6 +33,7 @@ "unsloth_offloaded_gradient_checkpoint", "torch_compile_options", "patch_linear_scaling", + "patch_llama_rope_scaling", "check_nvidia", "create_boolean_mask", "torch_amp_custom_fwd", @@ -332,7 +333,13 @@ def patch_tokenizer(model, tokenizer): Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! Fixes https://github.com/unslothai/unsloth/issues/5 """ - possible_reserved_tokens = ("<|reserved", "<|placeholder", "[control") + possible_reserved_tokens = ( + "<|reserved", # Llama-3 + "<|placeholder", # Phi-3 + "[control", # Forgot where lol + "", # Mistral Nemo + "<|finetune_right_pad_id|>", # Llama-3.1 + ) if model is not None: model.config.update({"unsloth_version" : __version__}) @@ -745,7 +752,7 @@ def patch_linear_scaling( fix_rope_function = """ if getattr(self.config, "rope_scaling", None) is None: self.rotary_emb = {rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) @@ -754,7 +761,7 @@ def patch_linear_scaling( scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = {scaled_rope_function}( - self.head_dim, + dim = self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, @@ -779,6 +786,91 @@ def patch_linear_scaling( pass +# Patches for Llama-3 LlamaExtendedRotaryEmbedding +def patch_llama_rope_scaling( + model_name = "llama", + rope_module = None, + scaled_rope_module = None, + extended_rope_module = None, + attention_module = None, +): + assert(\ + rope_module is not None and \ + scaled_rope_module is not None and \ + extended_rope_module is not None + ) + assert(attention_module is not None) + + rope_name = rope_module.__name__ + scaled_rope_name = scaled_rope_module.__name__ + model_filepath = f"transformers.models.{model_name}.modeling_{model_name}" + exec_code = \ + f"import torch.nn as nn\n"\ + f"from typing import Union, Optional, List, Any, Callable, Tuple\n"\ + f"from {model_filepath} import logger, "\ + f"{model_name.title()}Attention, {model_name.title()}Config" + + try: + function = inspect.getsource(attention_module.__init__) + except: + # Most likely already patched! + return None, None + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + init_name = f"{model_name.title()}Attention__init__" + function = function.replace("def __init__", f"def {init_name}") + function = function.replace( + "super().__init__()", + f"super({model_name.title()}Attention, self).__init__()", + ) + fix_rope_function = """ + if getattr(self.config, "rope_scaling", None) is None: + self.rotary_emb = {rope_function}( + dim = self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type1 = self.config.rope_scaling.get("type", None) + scaling_type2 = self.config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + scaling_factor = self.config.rope_scaling.get("factor") + + if scaling_type == "linear": + self.rotary_emb = {scaled_rope_function}( + dim = self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "llama3": + self.rotary_emb = {extended_rope_function}( + dim = self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {{scaling_type}}") + pass + """ + fix_rope_function = fix_rope_function.format( + rope_function = rope_module.__name__, + scaled_rope_function = scaled_rope_module.__name__, + extended_rope_function = extended_rope_module.__name__, + ) + rotary_emb = re.findall( + "self.rotary_emb = .+?\)", function, + flags = re.DOTALL | re.MULTILINE, + ) + if len(rotary_emb) == 0: return None, function + rotary_emb = rotary_emb[0] + function = function.replace(rotary_emb, fix_rope_function, 1) + function = exec_code + "\n\n" + function + return init_name, function +pass + + def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! output = np.array([0,]) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index ce89ad3be6..e3f1e615db 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -205,8 +205,11 @@ class GemmaFixedRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): super().__init__() + if config is not None: return # [TODO] Hack to pass in config - need to remove later self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base @@ -264,9 +267,11 @@ class GemmaFixedLinearScalingRotaryEmbedding(GemmaFixedRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + config = None, # [TODO] Hack to pass in config - need to remove later + ): self.scaling_factor = scaling_factor - super().__init__(dim, max_position_embeddings, base, device) + super().__init__(dim = dim, max_position_embeddings = max_position_embeddings, base = base, device = device, config = config) pass def _set_cos_sin_cache(self, seq_len, device, dtype): diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ff51b90b84..338ae0a7ce 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -977,8 +977,19 @@ class LlamaRotaryEmbedding(torch.nn.Module): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): super().__init__() + if config is not None: + # [TODO] Hack to pass in config - need to remove later + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass + self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base @@ -1030,9 +1041,11 @@ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): # Fixes https://github.com/huggingface/transformers/pull/28837 # https://github.com/microsoft/DeepSpeed/issues/4932 # The precision of RoPE buffers is not correct, so we cast to int64. - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, + config = None, # [TODO] Hack to pass in config - need to remove later + ): self.scaling_factor = scaling_factor - super().__init__(dim, max_position_embeddings, base, device) + super().__init__(dim = dim, max_position_embeddings = max_position_embeddings, base = base, device = device, config = config) pass def _set_cos_sin_cache(self, seq_len, device, dtype): @@ -1052,6 +1065,99 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): pass +# See https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/rotary_embedding.py#L736 +# For Llama 3.1 +class LlamaExtendedRotaryEmbedding(torch.nn.Module): + def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device=None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): + super().__init__() + if config is not None: + # [TODO] Hack to pass in config - need to remove later + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(4 * 8192, self.max_position_embeddings) + + # Normal Llama-3 RoPE + inv_freq = 1.0 / ( + self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim) + ) + inv_freq = self.apply_scaling(inv_freq) + self.register_buffer("inv_freq", inv_freq, persistent = False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) + pass + + def _set_cos_sin_cache(self, seq_len, device, dtype): + # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and + # in FP32. They are applied (multiplied) in FP32 as well. + self.current_rope_size = seq_len + + t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype=dtype, device=device, non_blocking=True), persistent=False) + pass + + # From https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/api/model.py#L41 + def apply_scaling(self, freqs: torch.Tensor): + # Values obtained from grid search + scale_factor = 8 + low_freq_factor = 1 + high_freq_factor = 4 + old_context_len = 8192 # original llama3 length + + low_freq_wavelen = old_context_len / low_freq_factor + high_freq_wavelen = old_context_len / high_freq_factor + new_freqs = [] + for freq in freqs: + wavelen = 2 * math.pi / freq + if wavelen < high_freq_wavelen: + new_freqs.append(freq) + elif wavelen > low_freq_wavelen: + new_freqs.append(freq / scale_factor) + else: + assert low_freq_wavelen != high_freq_wavelen + smooth = (old_context_len / wavelen - low_freq_factor) / ( + high_freq_factor - low_freq_factor + ) + new_freqs.append((1 - smooth) * freq / scale_factor + smooth * freq) + return torch.tensor(new_freqs, dtype=freqs.dtype, device=freqs.device) + pass + + def forward(self, x, position_ids=None, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.current_rope_size: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype = x.dtype), + self.sin_cached[:seq_len].to(dtype = x.dtype), + ) + pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass +pass + + def _wrap_fast_inference(generate, device_type, dtype, model): # Wraps inference with bfloat16 / float16 @torch.inference_mode @@ -1108,6 +1214,17 @@ class FastLlamaModel: @staticmethod def pre_patch(): + init_name, function = patch_llama_rope_scaling( + model_name = "llama", + rope_module = LlamaRotaryEmbedding, + scaled_rope_module = LlamaLinearScalingRotaryEmbedding, + extended_rope_module = LlamaExtendedRotaryEmbedding, + attention_module = LlamaAttention, + ) + if init_name is not None: + exec(function, globals()) + LlamaAttention.__init__ = eval(init_name) + pass LlamaAttention .forward = LlamaAttention_fast_forward LlamaSdpaAttention .forward = LlamaAttention_fast_forward LlamaFlashAttention2.forward = LlamaAttention_fast_forward diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0f170597b1..ece8af2821 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -27,6 +27,7 @@ SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") +SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.1") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -130,7 +131,19 @@ def from_pretrained( model_type = model_config.model_type - if model_type == "llama": dispatch_model = FastLlamaModel + if model_type == "llama": + scaling_type1 = model_config.rope_scaling.get("type", None) + scaling_type2 = model_config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + + if scaling_type == "llama3" and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ + f"The minimum required version is 4.43.1\n"\ + f'Try `pip install --upgrade "transformers>=4.43.1"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastLlamaModel elif model_type == "mistral": dispatch_model = FastMistralModel elif model_type == "gemma": if not SUPPORTS_GEMMA: diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 38cbdbe992..fc13c94e84 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,6 +218,22 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), + "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B", + "meta-llama/Meta-Llama-3.1-8B", + ), + "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B-Instruct", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + ), + "unsloth/Meta-Llama-3.1-70B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B", + "meta-llama/Meta-Llama-3.1-70B", + ), + "unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B-Instruct", + "meta-llama/Meta-Llama-3.1-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From 22968a2134f3fb265a6158610a8ef173ba9547aa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 11:51:08 -0700 Subject: [PATCH 0317/1088] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c666f2d9c8..e7ef854cf2 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ unsloth logo - + @@ -22,7 +22,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2x faster | 60% less | +| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | | **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | @@ -39,7 +39,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) both Base and Instruct now supported +- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. From 824511e265ff9c45b2448d4c89c93d0306c42741 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 12:07:27 -0700 Subject: [PATCH 0318/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e7ef854cf2..1c98c43f17 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | | **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | -- **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language From 777453967fc8476a846983e9c5eeab3382b88543 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 12:12:29 -0700 Subject: [PATCH 0319/1088] Update loader.py --- unsloth/models/loader.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index ece8af2821..85416b81bd 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -132,9 +132,12 @@ def from_pretrained( model_type = model_config.model_type if model_type == "llama": - scaling_type1 = model_config.rope_scaling.get("type", None) - scaling_type2 = model_config.rope_scaling.get("rope_type", None) - scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + scaling_type = None + if getattr(model_config, "rope_scaling", None) is not None: + scaling_type1 = model_config.rope_scaling.get("type", None) + scaling_type2 = model_config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + pass if scaling_type == "llama3" and not SUPPORTS_LLAMA31: raise ImportError( From caa402828715d428b5426955df8fecc8e3fe1c80 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 12:25:24 -0700 Subject: [PATCH 0320/1088] Update _utils.py --- unsloth/models/_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b021e89e9d..5a2e85997f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -393,7 +393,10 @@ def patch_tokenizer(model, tokenizer): tokenizer.pad_token = possible_pad_token if model is not None: config = model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - pass + else: + if model is not None: + if model.config.pad_token_id is None: + config = model.config.update({"pad_token_id" : tokenizer.pad_token_id}) return model, tokenizer pass From 4dd4ad2104ae9865a029f0408df89c7121f353e9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 12:27:46 -0700 Subject: [PATCH 0321/1088] Update llama.py --- unsloth/models/llama.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 338ae0a7ce..719aee5374 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1178,10 +1178,11 @@ def _fast_generate(*args, **kwargs): kwargs.pop("token_type_ids", None) # Check pad_token - kwargs["pad_token_id"] = kwargs.pop( - "pad_token_id", - getattr(model.config, "eos_token_id", None), - ) + model_eos_token_id = getattr(model.config, "eos_token_id", None) + if hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] + + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) # Set pad token # old_pad_token_id = getattr(model.config, "pad_token_id", None) From cc11b7886138e45690a459019f57c53675a70623 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 12:28:12 -0700 Subject: [PATCH 0322/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 719aee5374..ba4362b3cd 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1179,7 +1179,7 @@ def _fast_generate(*args, **kwargs): # Check pad_token model_eos_token_id = getattr(model.config, "eos_token_id", None) - if hasattr(model_eos_token_id, "__iter__"): + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): model_eos_token_id = model_eos_token_id[0] kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) From d1f3b6c1c4f69cd09ebdcab014bd72ac1217ee71 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 13:14:21 -0700 Subject: [PATCH 0323/1088] Create Run.png --- images/Run.png | Bin 0 -> 11471 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Run.png diff --git a/images/Run.png b/images/Run.png new file mode 100644 index 0000000000000000000000000000000000000000..fd737aa4d6e3684a0ae3405565f95d52e521b785 GIT binary patch literal 11471 zcmXw9bwHHQ)29*X1}Om%NkJr}5hNv)?hffZjxGrS2`Q-qM7lw`r6o?fkE0uoZg`*b z`+NU5j@#$ishypj&+L3rQ<1}e{NynT3JSi0yv#=w6f_9%fBz#a;IEYUvKsJ(<1DZ1 zhJu1iiu|CWq-9W{pl}8$$Vh2`GxisKd^OH8AC4$ZpK8lL#X&VA;a)a-L>UUtBksW+ zdu&$gVLDnoIT>u{X=hlfH?OG`FnP9lU%ZQNr>`+Fr}&|h-6;DzI~vpHK$71qGTe+f zaX!J!xA#JLZ{NSNwi-!wvpV({r$0gYmg*+{=Dii{L~QBg3Y0G4NNyG--jTgpPiyit zZf=7D_Zd2GAokX}WF`|6(`Y$80!=PRFPpsZliNIQB6@So62Tia>xmN~LrF4u>|WaT zuj2RC+_KqZ#Kgp#nWB{mw#U4+lGJK=_QKmx^r_^j+81D~Ui4_p5zGTOOQcm5JW8CMpq^)BO|j=N{MJ)732xtTnc3ASPH7hWE;$E+FoJmXlU3J z>5xE|KpD&6k^|oSA<2)RgtNdIs3-+JJ{kIT^O0WiIy3hsZ2}T8X(^!lyWVOMh+^N; zn?r0;$X4A^3APA^P+;9)l7-l}1rZSuGaT5=Zi(g=anw|)Kb5u>JNbwk@L%+N_vHP` zeaXNLP>I=ziI++?vG_L6^GY%0FL|&v76PS`28>6wV*duoCNN%szN`NQp@~IuA;oYf z9~~HD24?~P)T53TuwXpstf!w{gtuC$sb+nK^+|}eakMJFNMIKF(MU2gFx^a1R2DRg zDk@J#koHa;vG?UDtCnt6UHV(iIv`&J1`AFDF7x;;Z3IAXS4IhOE8dH-C>Ka(tprdf zu9A5yTQ?gS@4;mB6B}v(r!(QT{eoPNQ8sMm-4^N36@^qqo#W_3=#fEe1k*@)NeI+= zs-dDx?!?*hco1teyTI_jq0+#EvD$8Pi)?F6+78P~l-^35LLT5k|d}D~zRUV@ehgsTE#8BR6flBlx z+7v3L3MG;SNzh}S>WOzMlw-;B!JEn*^O8;Dp~x9BmG05+*NbcV9C09vf!MqoEL+7M zfVlC0y?Cl8L#T8IT^t`lsydk9+y%HuJ-)p4M18p&92Jv60BH@Z57Z@?W<)X=wlcFQ zmtl=^eg!_qgkf{LS7IT_l&`!sc@wv%fvSQDb&vjA#4-I2(Chc60UZ75@Rv*xV$pG4 zf4B?5o<1=AvqXecCkWl}^KUxQgrIiRD^pknQb=EBhTO%EI;zd#eBkFM?``QAdi{`Q zkiQw}?zh5Oo-=I-wpN8JM|)u+m0cm&G_&{xS)S1q3bq!6Uu^9lH6j*lB4RF?g;|Kx zcAiL?fT+%q&coE1`Za#m-qI71fqwgY!w<<$e#<{7e$n3C8j*o_JJq`%g&eE}T-3R) zk^^jo8i3|K^UDuOvI>ID++{gi$q1LyzKpm6^Bq2%?&u$Ai?E zX%-{;y)|1&$R>ZsR|+R}U`Xcop=`9hH8n@bCXu~-??1`Ev+}-zu#`TAECx{q!6+P5 zk?&gG&Z;B$o@GINSS|hJ&k@(6(@<@t>P+3ImT)iL*nt3abJUBsL!x3ss2qBZ{@Ad`4of!_3v;% zP5P(8=w1O8^=P7G@TTZfHn=O2*fTvAGYc8@AuK3r$3pfW*bC&Cr)i)=YxJYL>KA@5Tv<67Fhyb-0(D?$1}#@Ef}^;fG}SIw#|Osh9S=9pW-dgf=_DNVA?&Aly%zsg}!s;)(U7)BDR1}KFyqZ-sF(j3wb-$t!N zljh`96z1K#ZOb@=rsAc2U(vxQr`ge|u_4TA*WTB`N6P9_ht3>Cuz7qXVl9FnI#NmH zrz!BrB-ujv+jQ8@cQY4sV=4$t~@1f(fwxJaQECJYp`d z4`=hlGaWj`D^TKf7(8Z&`B`9}Y0CX?!ZuZJ|K`Yf+pKP33k-U}BH#5=-g0E+h?ra$ z^*I0MNkY#mH9!C2_D6tvcV-i+!~r6mE*cC%2cgcMPfU0yH<~{pwYC;ElfYx^gL?6 zb-ouxqOvuEn@LPCe)mBrd8YdSU=h{bCI+w=S-YZ%x~gs%*>(+n^@HbYX^X}Dg}V0Q z+wDGndn2QF@8Z)=qawtv31V`cwPKcT-kD$S{E=wJy(7A$c0OHuS?5{YN<>8!6Zyl? z+mT7IO^Dl8xWuAQf{iQ#xJE0q+ab4?S5;uMW)<^5K+u>8DIAp)V&dxKZ;jsm;(U~A@o}cmPyT>2 zh8xbFAEI1I~)E1Y8H4x@oE$i-f6(Qn(A=;JL^I&x^Ay7rr z6$v@HtP!=ZsZ+7zFBROHq9aytvp(gnWFlR*o4ufP-rp6mN07k))B6$5DiFqSyChmL z=$v#YA@uFiV)z12#y&WF@L?@>TUuf%u7daeCK7 zDLNiBo>9b7ZFqVZlF7};lp*06H_p=V_T2|DJL4tuowi2IA^~k6E|X9cm|6^zcDhBo z)ykAhk*aUG=^4zNYd6-$R}&J3IRt1a?#)Z@C&lPrAsly#_1`aQ`-#1id8Qa0<`lfC zgN^$|hY}|6!v1EiZs#dfYiE}$&Fli;fqMv=Hkrk_US-0 ztF`@OmoM~VZX~FMC5}1gk{q>3TzgAXlC#Cq8cqZUUB!nw7f@v;fzgvPb9}Y7BOagn z3H7GaIDQG8xG~wD1d$>_HuRN#D%)QL;*FWQJ;{0~?Dc}qU}Giuz8=N4rW-;frZ>z7 zi4TLt$Fh1)S~VPsmb{@)g}E*~1cM&5Z%FK%{Cu-z3t{AQ5$=<9x=H>8}93 z0rk3%rTb5guiN|Ipd>ta5HY`Wy%7BqPnmZ#g>DJ#c>XZe-LA=#lRMa9`^e?rYCj{M zm+8NC-(apSk{C#q>qEjSH7f7C5w0E=A`;2?P45gpOJPI^PKvP+2(G zXOE?lR;45uySlz}++5vJ!{_@TB4`0Birs%OnZf6%!TM%-QHnGfL{m|bLxr=GL&gV} z8Kw!>t^?5Uzk>6byMC;v+nvAxce36qT%hEVIIF(Jb#{Ow@5loJ32z!p6n1sYroukC zkd6GwF_C>fn8j(n`h%T@9WQoy{F>)%$u&fn0N4#cw5xVpp@-d1#;9EIUgdZChsgMt zk&3RVjyw%;#oC1}bQQCL<|#IYFr4{(p|OBX$yyEQlTn`hIiVJEiRQ4HtlsN%h>4y9 zEH1KUe(0m3HB`L5n!-UQnXCQkZy!3IYeQT_f?!W;ZsfccoyB}{i0@{fZic6l00hE& z&p>9C%dLEP+*uP)-Z=S*P)jJ_8p@RqllY7?&L<610b<&=XS4<4Qk=7~bt3rB@4@0# z9#ikVADGp8dWxAR=#GxgX)Y&xd?Xyb6b&yUoi+ zM@zKkW=8cpWS|xr&FGLQ0$os$$Lc}Z38W7^5HqJDV$^;zec-fS>y>pfmO;Xxxc44T&4d~eHIEWy3iMV3#vZNkO2n$n20ic z86%Q_{9r!GiL`?ud-*=;3LIqy@ftzCEW_@Bn* ztuKc_*IR_f$K9tM_IjX>hfIfL`p!OHD&YKVbo^jaFb(oA#yMa9$irMS*xooxC#(un zt(00S|6t4qggogdbcAx1_3Au*ZN)9|kzBQ)m5ET48Qwf`hIyi%6&JBHj#CudikPYrJy<4o$ zbX~kFPnr(TLG+eJw9FI&EdJ2p)54ruatcmkna$m#-*POoSeN@KA@x-(_9PRtgSp=UPKi;2= z?|H1z+)S)9kRm;iZYQ4@>!?P>yOL7f7FPS4NDWInyBXstM44uGF#gR|0-p|~irK9E zCztMr*-u$RrN51QH|2Cu8N$zju%I$fT}*Jn4`gd2sY#aJPZaEfN6`lC1C{y_JeH0o z`}u%1UfeZ=4G)&gwWh(QiP~%YEBDzll1TpsW9WvqqCvRF=GH97Z<6Sy2xO^fMcW9w zMigt$PW_(t!R&WScZP>c7h2N!aW7o2ZkO{Rxup5lAaBuUWJ8$V|y$M(>9VQ-BzSpBT!xY>(UQVi#JI&ht2) zwM5`pQZM_$9=YR{XM>AOgn9%C@x6^J7Ecry8ww-8Mit8g z*4Uo1pWpCMY2u%saPe~LJP$OkPh)RwaXq0Jho(!$G&gA3;t|bm#&mJA@n55+Z3%HMR z`0Uj4mlX@TB#7|vN!9&zLP4kO;st|?Sl|FtS9byqTl4D>Rl9_g(0T`+yW8_m#iz&T z6`)E)9X={F>y*_&qoqhv|J2pyPI_3}3CPAhl6=;bBBV+vFrTHNt6vq=-rmpQb2bOi z$ffPIUbkAm1*`7MMXswRZ)Uw*5 z3-0i+#R=wyn*Rky*>s)g9O zv9{9i$wMZ%^6fD4yL|L#j;j3jeVhKcyzcUhR6Y@&znBxt3is`dofYKg;Z||YRhzEh zHuUbFkY5YR{L091xYDzsSGOLmynT9OZe5U`W~_7S+GuJ081SEy<*1GP=WE7OM_cni zhLSoLmQ*Wka^~4PR8;{)#qFk@d5PopG9bYaUZkIGl6yd8_QcSMZMfr@5@#)hNMW+o zIXvi03q|{v0kKropi!$}TEuj6N10{tb(+C-)dL%L)t4MqR@>rJEO*;>W^~z0LaH)l zqbbf`=hE{421ygPT-0?wkQjRS9z$1)PFwou4#|8;&tm`Ekd-kupBx8Rw({pzW~EI+ zg zbiIVDrj#kjUSW^jPZi(aBSG^UUh~m@DP@uJWuvZL!n8((NR`ok5JfwX&^Vs@SKH5@ zOsKSlo#*h78hDmgSu3g?t)TmdSOa;@uMbE2{(-MgQ2WaNu@X6BMEvRS>* zF4UXBC$|D6bt@V*pJS8U@KmQXw)4uHdYs&@+Co-?#obJ`_j45coi7dp{pFbFrgAWAmm9F*E_7*W6`sKfH-TS#ROeL^4ZA&=DWC)i}q{89%>HBFTF|HLXD}Ra71N~X8 z{h+CA`pvL8X&1eq4Yf!IJ+naJf|7`5wKu^&jIQZHSFH^jsiOws0T^Eh-qS@Sug*+W zW7ns)9Zm!cdffE~N2FuY;4MD=W=Dx5Wrz274LHNs;0OI}R6niFJh@xlUP$9uqsR2Ji&x8)r#& z4h5;CygfVLx@)PiS~eOl7=8X8s-Y?z4|>gTEz+`%mG4go zvo}DfLX~C2hw-^!Z&?-G380kHu(gDp;Fs^CCAWCt6*A5;lV3D8IN#2-x^s1vfAG*M z5_a@{)H}U|jw{Y4gZidJ;|0*f55&L}{YIyp7!5UtY<=w^|^-%j8}s)0|JY5k5^Cx11?<-)8*7U2)`t z$xp?lfXwIyy!+@}c4;!?3z6Y>aB>+4)OSXs1UaGE0O)qvf7NJJzU3soH@QMdQW(Mu z+{xHar~TDltDOH_g3WVrhEtB4iL##6^l+)m7<5ro8-PlDtK}k_;@A z?xj9;;{5DNhswkFpD@TS?7FAc)-2FZMVt6j9jE6et481+ zUqa({9^ZFwZSH^dlLEu6_FdSjb)MuDHgtmxc0l0L*GGF&zwJ_d#krJjkogQ4i7|UO_*-tz9Y*EP#h25c{rcyRqh zj%PRE#L!Q#wQSC>Ov8RF4cD^F+pK;Y1_^?=O8n~c<+}&?W8Y!{aLIHdSoE=VsCXn3 zNufu;D^KdTKx9Nd6+2D^tOEcyKDxQ7_T~2SADis&9R{^;X5E4ht3*Mdro0@|Xv@Ii z7DwgpkWT~DNi~~zqDR6BNhw44xV^g7hAv*V<9wM z7Y|VRjW*sO-tp^Dhb676n}p_!|4LH#_kD)>hsHe2tIS#`DI?l-@tV|LiKYqF|pG&dIF2nVoNaFfoT3qAwilfUY1^Z-@>iW2pl zZ`5%eWWryeEb&>8^1vGPnACMgrFIBubF|wLr5*?QSco9oqLYs;r-48{(iVXCy?#z8 zx?cAW4CbVwK>QV|SZL{)s1s~8rvCd0frRcu{XfmzxPVh)rn#qq2q^kXXAOa`H=ncF zN7|Kon+bM775F_m38|Q!fz~FP9U-lZm={P)60MK(&UtiV1uoxghy*G(mxw5k@BJrs&Vn)U4cKQ)eyaedF|TPTwB)0ty$=nVF8oou^F_`?%nHOTcLsf;BMTyR=FI;Oi(B$;N}_jRLn(MaP zE^Bc*fHL(==Ub+^d`oO09n<@Fg&ek}Z+@qx3}4hlTeS%?*5nTj^Ly;D$BoA`#TNqc zI?{;S^HR@fL{bw_jJ@|>qO8N}0$|YizJtsjfCIHR*PJWuwmG4 zi@>2RP4jM7-hFs!8I zDl2b-L1@LDy1W*N1dLmJ3VKqFDE?B4mx=C|6Jxui0|3Ty*@4EMMyj;Lae2~5N$0Av zmQq$g(;LX3W{y7$Ah8ZipJak|LNbY4teaH3yw(F|3!s}HDTBH~`V}G{ek{VT;?YB8 zlFB%2XDVe17*Xr|)w+78JvfEOHMZBgD2LmxFseCC02J8XyipUk*>z=fo7HI63aIFT zgp?bRXD#(6&6W(me4$r;+`4wf+SZl8!IcgGs7i~|8jE)Tho6?A5`xnK z)g`z;<>;vj7TOJNQj3UYG`1}V_VdWT7H0#shd^(fDb0GdTSpcN(2dMy$Tav0w^XN@!ztA!*j0>A@~$H>-Yo zSlaag>fOTmr5$&+u^40>lBa@_?QT_mM5s1X!7exD&Jy4LVT*|T-T5#vDNgtZWETY} zL({^Re#5xVd>iXv{HA@{o(~Jp%t%%J2;VerILA0*u>%z=Y3RPR!ME_}oc&nKBqvck z_;uDh&MLg`8#i+Ea4#B!P6J1zY*EO{)qvgVHpTex_Q&5ljk8%*PD&x;mdMLtky|qG z!bR+NfhAv#7T}&I&2aek=(>E8{w(he%VT7tjBB-ewdFj#k&dVRQJK3fG~fFkyeJ#v zPj|OCZk(q7J7!YY5O|G=HPO^!kaP}9HLyLjAh~>ba77rBc}1-p-sajwrtdp>%O7w# ziUsm5{R>=a0{a zwEs0i+!D=L2XGlGNuLq>2keIxZl5$-dXYnK5$XhfpC0Dm08tW`bJUVK|EgBF7ujXa z{<`}Ir9+POGh@ef`SEF$6q3YTV}{i->_0lbo0LR%>e|2s(=(xf-u}b_o0nj;7F#DS z!+9X;fv<~IXRm|&2EWdl0@nq|pSll}_z^%FE?+Q5*KI%FchWr_WEj}klfz=e{Jg-m zbAfiuZLxW5v@_C5OZ>w~vS;!CE>N8WdA@^WMVpOh9>QYi6Cb~?u6ohAb#b+wbusEb zqqLL(UP* z`BGchKCMBfQxb!gjI3vD&#+_EwA|yk21{nH22AHKSb>=zKK;8LKBW|GkG=L5=C*$O zstc;hVrSs=O7_`ktK3$|-@X|7Tvt10V~XhyO~+50r|1tzI5@umqL#H0a&B*>-v<|9 zzvvO#=jM(}oJ%;(!TE`=knN-*I)HAbpt!GKP#q3Hx@}?_>3YXSAJI)P!u%ich$c5XQ;;>%ss{xI-1Pm*NF90-7cW%q@CVQgI;o9SJ#F{~8lM>?%YL z&9L!xlEMRmHsb!*hA%RS0WGS734la)>xf7A|OY zNGp#J4J>yAZX1c?okU~eiIs7lfzd?KCNTfM8N#C@0Q)#lGGtQ<9)!a1UpK@iYP7wL zUUnyG^!R>rFgOFSS%gfuAqFod-*@;d+5&y|DsY7^^Gx^_TlP7gJP#mUMaS_|1`ebe z@pf+zDJJw(_6UrIq^IA?|4lBXfE$PSIlt(v!E1Q=xewXhB5%=1I76`)B8>wv`K3Ka zv?73{*?lKqV$q%*`b4y~L%Cy`(Gl4(CbO_>7d^CzZ^@hk>{!(gPDrieZO&1EzLPKH zounq>h^lw(-^KR%+F?<`d9Ce3l4uWJaR6)?a z7HXg+OdM*K;q7Vhy9CKTv9Pya11fCTKd?X{iV8#T&E@!KkKif-W~J6mcSeUsUNG7o z)(ASpt%(TGFaFUnt!&ubzB}W-iyZ>P9Bogz!$UH&s}?DAgD#z{RTB=Sfy@N4Ahv4O zW)L3_Mb*FEpyTTSSk9rNU#9(eFxEN~8~C~u{V|XCM}DBI4k^44KhT|M&A9qObD2QtOU(g>ZfhDS!&V&EV_?a{`h`7m{h_tsd!-Eb6NL{A>((XlC+^ zhfrwG9(fosB@5v#e2I6V5Ydn1{{hIuySdyRltU%`7yrNBM3O|TXwC#&(f4GA<0Q(R z^Am5owqYfkTEs6?&Hfx62>v~Q?BTq80=~QC!fl{$z*!wfw$((?fzY9-0VwRW z2>acCF#%HJ6o^q-%RdCUT>CN;H#s(u0_dLVC1U$%ezmVX znIQU&KaJll)Zppt7c{_YyGd%Pa#MOb@)Pc(=jhXyv*FZD?C`tQ`h+u7EYs5L#1NOx zkGARhoN=zV_f?X?`;$O-kh_?&BgZTYLnn1mudYjQ&|m literal 0 HcmV?d00001 From a96d16e44ba2a07f5b6cdc10919286c23a984fe1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 23 Jul 2024 15:08:09 -0700 Subject: [PATCH 0324/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1c98c43f17..4c1271396e 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3, Mistral, Phi-3 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.1, Mistral, Phi-3 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) From bd180c13579f199516ac285ad724f99d11c562c3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 24 Jul 2024 14:05:31 -0700 Subject: [PATCH 0325/1088] Mistral --- unsloth/models/_utils.py | 4 ++-- unsloth/models/mapper.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5a2e85997f..213cb5b0ae 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.7" +__version__ = "2024.8" __all__ = [ "prepare_model_for_kbit_training", @@ -336,7 +336,7 @@ def patch_tokenizer(model, tokenizer): possible_reserved_tokens = ( "<|reserved", # Llama-3 "<|placeholder", # Phi-3 - "[control", # Forgot where lol + "[control", # Mistral type models "", # Mistral Nemo "<|finetune_right_pad_id|>", # Llama-3.1 ) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index fc13c94e84..462555f317 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -227,13 +227,20 @@ "meta-llama/Meta-Llama-3.1-8B-Instruct", ), "unsloth/Meta-Llama-3.1-70B-bnb-4bit" : ( - "unsloth/Meta-Llama-3.1-70B", "meta-llama/Meta-Llama-3.1-70B", ), + "unsloth/Meta-Llama-3.1-405B-bnb-4bit" : ( + "meta-llama/Meta-Llama-3.1-405B", + ), + "unsloth/Meta-Llama-3.1-405B-Instruct-bnb-4bit" : ( + "meta-llama/Meta-Llama-3.1-405B-Instruct", + ), "unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit" : ( - "unsloth/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", ), + "unsloth/Mistral-Large-Instruct-2407-bnb-4bit" : ( + "mistralai/Mistral-Large-Instruct-2407", + ), } INT_TO_FLOAT_MAPPER = {} From 6e30a7a006d51dc5692f4687a5b38a19c7e48596 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 24 Jul 2024 23:45:39 -0700 Subject: [PATCH 0326/1088] Patch PEFT --- unsloth/models/llama.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ba4362b3cd..96eb5035e8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2063,9 +2063,9 @@ def patch_peft_model( (getattr(gate_proj, "base_layer", gate_proj).bias is None) and \ (getattr( up_proj, "base_layer", up_proj).bias is None) and \ (getattr(down_proj, "base_layer", down_proj).bias is None) and \ - (getattr(gate_proj, "lora_magnitude_vector", None) is None) and \ - (getattr( up_proj, "lora_magnitude_vector", None) is None) and \ - (getattr(down_proj, "lora_magnitude_vector", None) is None): + (len(getattr(gate_proj, "lora_magnitude_vector", [])) == 0) and \ + (len(getattr( up_proj, "lora_magnitude_vector", [])) == 0) and \ + (len(getattr(down_proj, "lora_magnitude_vector", [])) == 0): # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) @@ -2085,11 +2085,11 @@ def patch_peft_model( hasattr(k_proj, "lora_A") and \ hasattr(v_proj, "lora_A") and \ (getattr(q_proj, "base_layer", q_proj).bias is None) and \ - (getattr(q_proj, "base_layer", k_proj).bias is None) and \ - (getattr(q_proj, "base_layer", v_proj).bias is None) and \ - (getattr(q_proj, "lora_magnitude_vector", None) is None) and \ - (getattr(k_proj, "lora_magnitude_vector", None) is None) and \ - (getattr(v_proj, "lora_magnitude_vector", None) is None): + (getattr(k_proj, "base_layer", k_proj).bias is None) and \ + (getattr(v_proj, "base_layer", v_proj).bias is None) and \ + (len(getattr(q_proj, "lora_magnitude_vector", [])) == 0) and \ + (len(getattr(k_proj, "lora_magnitude_vector", [])) == 0) and \ + (len(getattr(v_proj, "lora_magnitude_vector", [])) == 0): layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 @@ -2106,7 +2106,7 @@ def patch_peft_model( o_proj = layer.self_attn.o_proj if hasattr(o_proj, "lora_A") and \ (getattr(o_proj, "base_layer", o_proj).bias is None) and \ - (getattr(o_proj, "lora_magnitude_vector", None) is None): + (len(getattr(o_proj, "lora_magnitude_vector", [])) == 0): layer.self_attn.apply_o = apply_lora_o n_o += 1 From 08d3ef4bb3a1da4de67c9e4135e4ea4838895164 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 00:17:19 -0700 Subject: [PATCH 0327/1088] Fix PEFT --- unsloth/models/_utils.py | 54 +++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 213cb5b0ae..23297cc4a8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -404,32 +404,36 @@ def patch_tokenizer(model, tokenizer): # ============================================= # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. -from peft.tuners.lora.layer import LoraLayer -import inspect, re -try: - source = inspect.getsource(LoraLayer.update_layer) - text = "if weight is not None:\n" - start = source.find(text) + len(text) - end = source.find("self.to(weight.device)", start) - spaces = re.findall(r"^([ ]{1,})break", source, flags = re.MULTILINE)[0] - source = source.replace(source[start : end], spaces) - spaces = len(re.match(r"[\s]{1,}", source).group(0)) - lines = source.split("\n") - source = "\n".join(x[spaces:] for x in lines) - source = re.sub("([^\.])nn\.", r"\1torch.nn.", source) - source = source.replace("def update_layer", "def LoraLayer_update_layer") - exec(source, globals()) - - # Fix up incorrect downcasting of LoRA weights +from packaging import Version +from peft import __version__ +if Version(__version__) < Version("0.12.0"): from peft.tuners.lora.layer import LoraLayer - LoraLayer.update_layer = LoraLayer_update_layer - from peft.tuners.lora import LoraLayer - LoraLayer.update_layer = LoraLayer_update_layer -except: - logger.warning_once( - "Unsloth unsuccessfully patched LoraLayer.update_layer. Please file a bug report.\n"\ - "Luckily, your training run will still work in the meantime!" - ) + import inspect, re + try: + source = inspect.getsource(LoraLayer.update_layer) + text = "if weight is not None:\n" + start = source.find(text) + len(text) + end = source.find("self.to(weight.device)", start) + spaces = re.findall(r"^([ ]{1,})break", source, flags = re.MULTILINE)[0] + source = source.replace(source[start : end], spaces) + spaces = len(re.match(r"[\s]{1,}", source).group(0)) + lines = source.split("\n") + source = "\n".join(x[spaces:] for x in lines) + source = re.sub("([^\.])nn\.", r"\1torch.nn.", source) + source = source.replace("def update_layer", "def LoraLayer_update_layer") + exec(source, globals()) + + # Fix up incorrect downcasting of LoRA weights + from peft.tuners.lora.layer import LoraLayer + LoraLayer.update_layer = LoraLayer_update_layer + from peft.tuners.lora import LoraLayer + LoraLayer.update_layer = LoraLayer_update_layer + except: + logger.warning_once( + "Unsloth unsuccessfully patched LoraLayer.update_layer. Please file a bug report.\n"\ + "Luckily, your training run will still work in the meantime!" + ) + pass pass # ============================================= From 66e0453ea85a33132c2e9b6c616726cc4bc0b0f1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 00:19:32 -0700 Subject: [PATCH 0328/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 96eb5035e8..5bc2983a22 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -96,7 +96,7 @@ def fix_prepare_inputs_for_generation(module): pass pass - +torch_matmul = torch.matmul def LlamaAttention_fast_forward_inference( self, hidden_states: torch.Tensor, @@ -238,10 +238,10 @@ def LlamaAttention_fast_forward_inference( if bsz == 1: Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows - A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + A = torch_matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) - A = torch.matmul(A, Vnn, out = Qn) + A = torch_matmul(A, Vnn, out = Qn) else: A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) pass From 7fccd21d9f1388ca51063455c4ffae8e7c06720c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 00:28:20 -0700 Subject: [PATCH 0329/1088] Update loader.py --- unsloth/models/loader.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 85416b81bd..6b83b8e739 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -95,6 +95,9 @@ def from_pretrained( model_name = _get_model_name(model_name, load_in_4bit) # First check if it's a normal model via AutoConfig + from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + was_disabled = are_progress_bars_disabled() + disable_progress_bars() try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) is_model = True @@ -129,6 +132,8 @@ def from_pretrained( model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) pass + if not was_disabled: enable_progress_bars() + model_type = model_config.model_type if model_type == "llama": From 9e1ad7c319e4b6d7412d5f9a104abceef29a7247 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 00:29:12 -0700 Subject: [PATCH 0330/1088] Update _utils.py --- unsloth/models/_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 23297cc4a8..9dc82f1e52 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -404,7 +404,6 @@ def patch_tokenizer(model, tokenizer): # ============================================= # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. -from packaging import Version from peft import __version__ if Version(__version__) < Version("0.12.0"): from peft.tuners.lora.layer import LoraLayer From 8e5054bbea23cb91628cfe8923696806ca4a6274 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 00:33:38 -0700 Subject: [PATCH 0331/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 9dc82f1e52..5a267a459c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -404,8 +404,8 @@ def patch_tokenizer(model, tokenizer): # ============================================= # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. -from peft import __version__ -if Version(__version__) < Version("0.12.0"): +from peft import __version__ as peft_version +if Version(peft_version) < Version("0.12.0"): from peft.tuners.lora.layer import LoraLayer import inspect, re try: From fd753fed99ed5f10ef8a9b7139588d9de9ddecfb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 25 Jul 2024 08:53:21 -0700 Subject: [PATCH 0332/1088] Update llama.py --- unsloth/models/llama.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5bc2983a22..bc434ecf19 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2063,9 +2063,9 @@ def patch_peft_model( (getattr(gate_proj, "base_layer", gate_proj).bias is None) and \ (getattr( up_proj, "base_layer", up_proj).bias is None) and \ (getattr(down_proj, "base_layer", down_proj).bias is None) and \ - (len(getattr(gate_proj, "lora_magnitude_vector", [])) == 0) and \ - (len(getattr( up_proj, "lora_magnitude_vector", [])) == 0) and \ - (len(getattr(down_proj, "lora_magnitude_vector", [])) == 0): + (len(getattr(gate_proj, "lora_magnitude_vector", []) or []) == 0) and \ + (len(getattr( up_proj, "lora_magnitude_vector", []) or []) == 0) and \ + (len(getattr(down_proj, "lora_magnitude_vector", []) or []) == 0): # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) @@ -2087,9 +2087,9 @@ def patch_peft_model( (getattr(q_proj, "base_layer", q_proj).bias is None) and \ (getattr(k_proj, "base_layer", k_proj).bias is None) and \ (getattr(v_proj, "base_layer", v_proj).bias is None) and \ - (len(getattr(q_proj, "lora_magnitude_vector", [])) == 0) and \ - (len(getattr(k_proj, "lora_magnitude_vector", [])) == 0) and \ - (len(getattr(v_proj, "lora_magnitude_vector", [])) == 0): + (len(getattr(q_proj, "lora_magnitude_vector", []) or []) == 0) and \ + (len(getattr(k_proj, "lora_magnitude_vector", []) or []) == 0) and \ + (len(getattr(v_proj, "lora_magnitude_vector", []) or []) == 0): layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 @@ -2106,7 +2106,7 @@ def patch_peft_model( o_proj = layer.self_attn.o_proj if hasattr(o_proj, "lora_A") and \ (getattr(o_proj, "base_layer", o_proj).bias is None) and \ - (len(getattr(o_proj, "lora_magnitude_vector", [])) == 0): + (len(getattr(o_proj, "lora_magnitude_vector", []) or []) == 0): layer.self_attn.apply_o = apply_lora_o n_o += 1 From 01c35f9e17cf455e97f7ce6cf55ecd653363433f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 26 Jul 2024 16:31:40 -0700 Subject: [PATCH 0333/1088] Update __init__.py --- unsloth/__init__.py | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 4640681543..265d08c90d 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -60,6 +60,37 @@ "We have some installation instructions on our Github page.") pass +# ============================================= +# Check if Unsloth's model list has been updated +import os, requests, inspect, re +import numpy as np +import subprocess + +try: + file_location = inspect.getfile(torch) + package, _ = os.path.split(file_location) + dist_packages, package = os.path.split(package) + old_mapper = os.path.join(dist_packages, "unsloth", "models", "mapper.py") + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with open(old_mapper, "r") as old_mapper: old_mapper = old_mapper.read() + with requests.get(new_mapper) as new_mapper: new_mapper = new_mapper.text + old_mapper = re.findall(r'\"unsloth\/([^\"]{1,})\-bnb\-4bit\" \: \(', old_mapper) + new_mapper = re.findall(r'\"unsloth\/([^\"]{1,})\-bnb\-4bit\" \: \(', new_mapper) + new_models = list(frozenset(new_mapper) - frozenset(old_mapper)) + + print(1) + if len(new_models) != 0: + warnings.warn( + f"Unsloth: Some new models including {new_models} have dropped!\n"\ + "If you want to try them out, please update Unsloth via:\n\n" + 'pip install --upgrade --force-reinstall --no-cache-dir \\\n "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' + ) + pass + del new_models, old_mapper, dist_packages, package, file_location +except: + pass +# ============================================= + # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) keynames = "\n" + "\n".join(os.environ.keys()) if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: @@ -103,11 +134,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 except: pass else: from triton.common.build import libcuda_dirs -import os -import re -import numpy as np -import subprocess - try: cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() From 08379f8a9cc0448b13897ffbf0897ad01f7549dc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 20:30:32 -0700 Subject: [PATCH 0334/1088] Edits --- pyproject.toml | 4 ++-- unsloth/__init__.py | 29 +---------------------------- unsloth/models/llama.py | 3 ++- unsloth/models/loader.py | 6 +++--- 4 files changed, 8 insertions(+), 34 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 829b35ad3b..3335a75933 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.43.1", + "transformers>=4.43.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -188,7 +188,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.43.1", + "transformers>=4.43.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 265d08c90d..db54c9a169 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -60,37 +60,10 @@ "We have some installation instructions on our Github page.") pass -# ============================================= -# Check if Unsloth's model list has been updated -import os, requests, inspect, re +import os, re import numpy as np import subprocess -try: - file_location = inspect.getfile(torch) - package, _ = os.path.split(file_location) - dist_packages, package = os.path.split(package) - old_mapper = os.path.join(dist_packages, "unsloth", "models", "mapper.py") - new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" - with open(old_mapper, "r") as old_mapper: old_mapper = old_mapper.read() - with requests.get(new_mapper) as new_mapper: new_mapper = new_mapper.text - old_mapper = re.findall(r'\"unsloth\/([^\"]{1,})\-bnb\-4bit\" \: \(', old_mapper) - new_mapper = re.findall(r'\"unsloth\/([^\"]{1,})\-bnb\-4bit\" \: \(', new_mapper) - new_models = list(frozenset(new_mapper) - frozenset(old_mapper)) - - print(1) - if len(new_models) != 0: - warnings.warn( - f"Unsloth: Some new models including {new_models} have dropped!\n"\ - "If you want to try them out, please update Unsloth via:\n\n" - 'pip install --upgrade --force-reinstall --no-cache-dir \\\n "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' - ) - pass - del new_models, old_mapper, dist_packages, package, file_location -except: - pass -# ============================================= - # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) keynames = "\n" + "\n".join(os.environ.keys()) if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bc434ecf19..d363539997 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -18,6 +18,7 @@ from ._utils import * from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention +from transformers import __version__ as transformers_version from transformers.models.llama.modeling_llama import ( logger, BaseModelOutputWithPast, @@ -1281,7 +1282,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 6b83b8e739..ecf871d5d0 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -27,7 +27,7 @@ SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") -SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.1") +SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -147,8 +147,8 @@ def from_pretrained( if scaling_type == "llama3" and not SUPPORTS_LLAMA31: raise ImportError( f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ - f"The minimum required version is 4.43.1\n"\ - f'Try `pip install --upgrade "transformers>=4.43.1"`\n'\ + f"The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) dispatch_model = FastLlamaModel From f6c2b4aa7d99b16e43bc165fd75b10970e766af1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:16:33 -0700 Subject: [PATCH 0335/1088] Checks --- pyproject.toml | 6 ++- unsloth/models/_utils.py | 22 ++++++++++- unsloth/models/llama.py | 2 +- unsloth/models/loader.py | 81 +++++++++++++++++++++++++++++++++------- 4 files changed, 92 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3335a75933..6777f7c26e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,8 @@ huggingface = [ "trl>=0.7.9,<0.9.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", - "huggingface_hub[hf_transfer]", + "huggingface_hub", + "hf-transfer", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -196,7 +197,8 @@ colab-new = [ "wheel>=0.42.0", "numpy", "protobuf<4.0.0", - "huggingface_hub[hf_transfer]", + "huggingface_hub", + "hf-transfer", ] colab-no-deps = [ "accelerate>=0.26.1", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5a267a459c..a3263f85a9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -122,7 +122,8 @@ def patch_mistral_nemo_config(config): # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch -if Version(torch.__version__) < Version("2.4.0"): +torch_version = torch.__version__ +if Version(torch_version) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd torch_amp_custom_bwd = torch.cuda.amp.custom_bwd else: @@ -184,7 +185,7 @@ def patch_mistral_nemo_config(config): # Check TRL version from trl import __version__ as trl_version -if Version(xformers_version) >= Version("0.9.0"): +if Version(trl_version) >= Version("0.9.0"): raise ImportError( "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ "then press Disconnect Runtime and then Restart it.\n"\ @@ -199,7 +200,24 @@ def patch_mistral_nemo_config(config): ) pass +# Confirm versions # ============================================= +if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.24 for torch = {torch_version}." + ) +elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.26 for torch = {torch_version}." + ) +elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.27 for torch = {torch_version}." + ) +pass # ============================================= # Torch compile settings diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d363539997..6b16a4cc61 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1282,7 +1282,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index ecf871d5d0..2a7fa75fe4 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -35,9 +35,14 @@ pass -def _get_model_name(model_name, load_in_4bit = True): +def __get_model_name( + model_name, + load_in_4bit = True, + INT_TO_FLOAT_MAPPER = None, + FLOAT_TO_INT_MAPPER = None, +): - if not SUPPORTS_FOURBIT and model_name in INT_TO_FLOAT_MAPPER: + if not SUPPORTS_FOURBIT and model_name.lower() in INT_TO_FLOAT_MAPPER: model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ @@ -46,25 +51,71 @@ def _get_model_name(model_name, load_in_4bit = True): f"to obtain the latest transformers build, then restart this session.\n"\ f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." ) + return model_name - elif not load_in_4bit and model_name in INT_TO_FLOAT_MAPPER: + elif not load_in_4bit and model_name.lower() in INT_TO_FLOAT_MAPPER: new_model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." # ) - model_name = new_model_name + return new_model_name - elif load_in_4bit and SUPPORTS_FOURBIT and model_name in FLOAT_TO_INT_MAPPER: + elif load_in_4bit and SUPPORTS_FOURBIT and model_name.lower() in FLOAT_TO_INT_MAPPER: new_model_name = FLOAT_TO_INT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ # f"We shall load `{new_model_name}` for 4x faster loading." # ) - model_name = new_model_name + return new_model_name pass - return model_name + return None +pass + + +def _get_new_mapper(): + try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") + exec(new_mapper, locals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER + except: + return {}, {} + pass +pass + + +def _get_model_name(model_name, load_in_4bit = True): + new_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + ) + if new_model_name is None and \ + model_name.count("/") == 1 and \ + model_name[0].isalnum(): + # Try checking if a new Unsloth version allows it! + NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() + upgraded_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + ) + if upgraded_model_name is not None: + raise NotImplementedError( + f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ + 'pip install --upgrade --force-reinstall --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' + ) + pass + pass + return new_model_name if new_model_name is not None else model_name pass @@ -98,16 +149,22 @@ def from_pretrained( from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() disable_progress_bars() + + autoconfig_error = None + peft_error = None try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) is_model = True - except: + except Exception as autoconfig_error: + autoconfig_error = str(autoconfig_error) is_model = False try: peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) is_peft = True - except: + except Exception as peft_error: + peft_error = str(peft_error) is_peft = False + pass # Cannot be both! if is_model and is_peft: @@ -118,11 +175,7 @@ def from_pretrained( "Please separate the LoRA and base models to 2 repos." ) elif not is_model and not is_peft: - raise RuntimeError( - f"Unsloth: `{model_name}` is not a base model or a PEFT model.\n"\ - "We could not locate a `config.json` or `adapter_config.json` file.\n"\ - "Are you certain the model name is correct? Does it actually exist?" - ) + raise RuntimeError(autoconfig_error or peft_error) pass # Get base model for PEFT: From 78fa9d058db4f039183da7a56e81a5cc4dfe289f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:30:48 -0700 Subject: [PATCH 0336/1088] Update _utils.py --- unsloth/models/_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a3263f85a9..04ffd2062b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -135,35 +135,36 @@ def patch_mistral_nemo_config(config): # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb -from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer +from transformers.utils.import_utils import _is_package_available major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = False if major_version >= 8: SUPPORTS_BFLOAT16 = True - try: - from flash_attn import flash_attn_func + if _is_package_available("flash_attn"): # Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl" try: from flash_attn.flash_attn_interface import flash_attn_cuda HAS_FLASH_ATTENTION = True except: - logger.warning_once( + print( "Unsloth: Your Flash Attention 2 installation seems to be broken?\n"\ "A possible explanation is you have a new CUDA version which isn't\n"\ "yet compatible with FA2? Please file a ticket to Unsloth or FA2.\n"\ - "We shall now use Xformers instead, which gets a 0.01% performance hit.\n"\ + "We shall now use Xformers instead, which does not have any performance hits!\n"\ "We found this negligible impact by benchmarking on 1x A100." ) HAS_FLASH_ATTENTION = False - except: + else: HAS_FLASH_ATTENTION = False else: # Tri Dao's benchmark shows xformers is faster for now. HAS_FLASH_ATTENTION = False pass + +from transformers.models.llama.modeling_llama import logger import xformers.ops.fmha as xformers xformers_attention = xformers.memory_efficient_attention from xformers import __version__ as xformers_version From 4ea5789db6d601e452a7a701fe5ac53669876856 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:32:39 -0700 Subject: [PATCH 0337/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 04ffd2062b..74dfe69878 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -156,7 +156,15 @@ def patch_mistral_nemo_config(config): "We shall now use Xformers instead, which does not have any performance hits!\n"\ "We found this negligible impact by benchmarking on 1x A100." ) + + # Stop Flash Attention from importing! + import transformers.utils.import_utils + transformers.utils.import_utils.is_flash_attn_2_available = lambda *args, **kwargs: False + import transformers.utils + transformers.utils.is_flash_attn_2_available = lambda *args, **kwargs: False + HAS_FLASH_ATTENTION = False + pass else: HAS_FLASH_ATTENTION = False else: From aab503ac80e2032805854c2ff211cdc2a4b23b3f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:35:24 -0700 Subject: [PATCH 0338/1088] Update loader.py --- unsloth/models/loader.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 2a7fa75fe4..615b8286fd 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -111,7 +111,8 @@ def _get_model_name(model_name, load_in_4bit = True): if upgraded_model_name is not None: raise NotImplementedError( f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ - 'pip install --upgrade --force-reinstall --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' + 'pip uninstall unsloth -y\n'\ + 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' ) pass pass From b310f5e06a6b00e18d66324c53a686626d0b9040 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:48:52 -0700 Subject: [PATCH 0339/1088] Update _utils.py --- unsloth/models/_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 74dfe69878..8435c834f4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -173,6 +173,22 @@ def patch_mistral_nemo_config(config): pass from transformers.models.llama.modeling_llama import logger + +# ============================================= +# Get Xformers +from xformers._cpp_lib import _register_extensions +try: + _register_extensions() # Check if C++ modules are loaded correctly +except Exception as error: + raise ImportError( + "Unsloth: Xformers was not installed correctly. "\ + "Please install xformers separately first. "\ + "Then confirm if it's correctly installed by running:\n"\ + "python -m xformers.info\n\n" + "Longer error message:\n" + str(error) + ) +pass + import xformers.ops.fmha as xformers xformers_attention = xformers.memory_efficient_attention from xformers import __version__ as xformers_version From 1f6705673883efbed5373d9bc845d56f40fd6dc9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:50:38 -0700 Subject: [PATCH 0340/1088] Update mapper.py --- unsloth/models/mapper.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 462555f317..a3191392ce 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,10 +218,10 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), - "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( - "unsloth/Meta-Llama-3.1-8B", - "meta-llama/Meta-Llama-3.1-8B", - ), + # "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( + # "unsloth/Meta-Llama-3.1-8B", + # "meta-llama/Meta-Llama-3.1-8B", + # ), "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" : ( "unsloth/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", From a8e7556fc6aaac537f67d418b2c862ac349e66cd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:52:06 -0700 Subject: [PATCH 0341/1088] Update loader.py --- unsloth/models/loader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 615b8286fd..fc356c3c16 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -97,6 +97,7 @@ def _get_model_name(model_name, load_in_4bit = True): INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, ) + print(new_model_name) if new_model_name is None and \ model_name.count("/") == 1 and \ model_name[0].isalnum(): @@ -108,6 +109,7 @@ def _get_model_name(model_name, load_in_4bit = True): INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, ) + print(upgraded_model_name) if upgraded_model_name is not None: raise NotImplementedError( f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ From 38b5c77635f7a0febb694a45d41489c6c827f564 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:54:49 -0700 Subject: [PATCH 0342/1088] Update loader.py --- unsloth/models/loader.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index fc356c3c16..f06515e624 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -98,11 +98,11 @@ def _get_model_name(model_name, load_in_4bit = True): FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, ) print(new_model_name) - if new_model_name is None and \ - model_name.count("/") == 1 and \ - model_name[0].isalnum(): + if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): # Try checking if a new Unsloth version allows it! NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() + print(NEW_INT_TO_FLOAT_MAPPER) + print(NEW_FLOAT_TO_INT_MAPPER) upgraded_model_name = __get_model_name( model_name = model_name, load_in_4bit = load_in_4bit, From 4a8c9e88249ceabfd955c79c9eacef221e0ef175 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:56:59 -0700 Subject: [PATCH 0343/1088] Update _utils.py --- unsloth/models/_utils.py | 66 +++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8435c834f4..994f97ab73 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -176,21 +176,6 @@ def patch_mistral_nemo_config(config): # ============================================= # Get Xformers -from xformers._cpp_lib import _register_extensions -try: - _register_extensions() # Check if C++ modules are loaded correctly -except Exception as error: - raise ImportError( - "Unsloth: Xformers was not installed correctly. "\ - "Please install xformers separately first. "\ - "Then confirm if it's correctly installed by running:\n"\ - "python -m xformers.info\n\n" - "Longer error message:\n" + str(error) - ) -pass - -import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention from xformers import __version__ as xformers_version # Temporarily disable 0.0.27 and higher - inference issues if Version(xformers_version) >= Version("0.0.27"): @@ -208,25 +193,6 @@ def patch_mistral_nemo_config(config): ) pass -# Check TRL version -from trl import __version__ as trl_version -if Version(trl_version) >= Version("0.9.0"): - raise ImportError( - "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ - "then press Disconnect Runtime and then Restart it.\n"\ - "\n"\ - "%%capture\n" - "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" - '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ - '\n'\ - f"Otherwise in local machines, your TRL version of {trl_version} is too new.\n"\ - 'Please downgrade TRL via `pip install --force-reinstall "trl<0.9.0"' - ) -pass - -# Confirm versions -# ============================================= if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): raise ImportError( f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ @@ -244,6 +210,38 @@ def patch_mistral_nemo_config(config): ) pass +from xformers._cpp_lib import _register_extensions +try: + _register_extensions() # Check if C++ modules are loaded correctly +except Exception as error: + raise ImportError( + "Unsloth: Xformers was not installed correctly.\n"\ + "Please install xformers separately first.\n"\ + "Then confirm if it's correctly installed by running:\n"\ + "python -m xformers.info\n\n" + "Longer error message:\n" + str(error) + ) +pass +import xformers.ops.fmha as xformers +xformers_attention = xformers.memory_efficient_attention + +# Check TRL version +from trl import __version__ as trl_version +if Version(trl_version) >= Version("0.9.0"): + raise ImportError( + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your TRL version of {trl_version} is too new.\n"\ + 'Please downgrade TRL via `pip install --force-reinstall "trl<0.9.0"' + ) +pass + # ============================================= # Torch compile settings From c03fd228a240eb97d6ee386668c30b8352779935 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:58:15 -0700 Subject: [PATCH 0344/1088] Update loader.py --- unsloth/models/loader.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f06515e624..21e58bb3dc 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -75,17 +75,17 @@ def __get_model_name( def _get_new_mapper(): - try: - import requests - new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" - with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text - new_mapper = new_mapper\ - .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ - .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") - exec(new_mapper, locals()) - return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER - except: - return {}, {} + # try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") + exec(new_mapper, locals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER + # except: + # return {}, {} pass pass From 9d5195296f7dcaac51519659fda13ee4027b2b1a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:59:16 -0700 Subject: [PATCH 0345/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 21e58bb3dc..074e837398 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -82,7 +82,7 @@ def _get_new_mapper(): new_mapper = new_mapper\ .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") - exec(new_mapper, locals()) + exec(new_mapper) return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER # except: # return {}, {} From ddc2dde97c2d234e75db032e41c7fafe2b6f384f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 22:59:47 -0700 Subject: [PATCH 0346/1088] Update loader.py --- unsloth/models/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 074e837398..9f2f53aa82 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -79,6 +79,7 @@ def _get_new_mapper(): import requests new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] new_mapper = new_mapper\ .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") From b6ef70963011888c679ceaa6a929ce2cb32be1c7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 23:00:47 -0700 Subject: [PATCH 0347/1088] Update loader.py --- unsloth/models/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9f2f53aa82..6bfbf0f288 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -84,6 +84,7 @@ def _get_new_mapper(): .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") exec(new_mapper) + print(new_mapper) return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER # except: # return {}, {} From ed8bc007c6beef621941a4ee5882c6c55c0f1df5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 23:02:10 -0700 Subject: [PATCH 0348/1088] Update loader.py --- unsloth/models/loader.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 6bfbf0f288..f321f23ed0 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -83,8 +83,7 @@ def _get_new_mapper(): new_mapper = new_mapper\ .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") - exec(new_mapper) - print(new_mapper) + exec(new_mapper, globals()) return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER # except: # return {}, {} From e1d61ce5d9f4c88af7d8c0c5d4ad9ca65b9e9327 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 23:03:13 -0700 Subject: [PATCH 0349/1088] Update loader.py --- unsloth/models/loader.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f321f23ed0..15d3f952bb 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -75,18 +75,18 @@ def __get_model_name( def _get_new_mapper(): - # try: - import requests - new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" - with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text - new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] - new_mapper = new_mapper\ - .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ - .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") - exec(new_mapper, globals()) - return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER - # except: - # return {}, {} + try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") + exec(new_mapper, globals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER + except: + return {}, {} pass pass @@ -102,15 +102,12 @@ def _get_model_name(model_name, load_in_4bit = True): if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): # Try checking if a new Unsloth version allows it! NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() - print(NEW_INT_TO_FLOAT_MAPPER) - print(NEW_FLOAT_TO_INT_MAPPER) upgraded_model_name = __get_model_name( model_name = model_name, load_in_4bit = load_in_4bit, INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, ) - print(upgraded_model_name) if upgraded_model_name is not None: raise NotImplementedError( f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ From 858e1a2a8c23dc08e6bbd17c841e5a7b133cee06 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 23:03:29 -0700 Subject: [PATCH 0350/1088] Update mapper.py --- unsloth/models/mapper.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index a3191392ce..462555f317 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,10 +218,10 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), - # "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( - # "unsloth/Meta-Llama-3.1-8B", - # "meta-llama/Meta-Llama-3.1-8B", - # ), + "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B", + "meta-llama/Meta-Llama-3.1-8B", + ), "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" : ( "unsloth/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", From ea0a49448dca64808734cff797e352a69236343c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 27 Jul 2024 23:04:23 -0700 Subject: [PATCH 0351/1088] Update loader.py --- unsloth/models/loader.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 15d3f952bb..e2bfe1d639 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -98,7 +98,6 @@ def _get_model_name(model_name, load_in_4bit = True): INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, ) - print(new_model_name) if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): # Try checking if a new Unsloth version allows it! NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() From a7bfbe7927ea75f959e1d7c84e7bf50945d405ff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 28 Jul 2024 00:10:02 -0700 Subject: [PATCH 0352/1088] Better debugging (#826) * Update __init__.py * Edits * Checks * Update _utils.py * Update _utils.py * Update loader.py * Update _utils.py * Update mapper.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update mapper.py * Update loader.py --- pyproject.toml | 10 +++-- unsloth/__init__.py | 9 ++--- unsloth/models/_utils.py | 65 ++++++++++++++++++++++++------ unsloth/models/llama.py | 3 +- unsloth/models/loader.py | 87 ++++++++++++++++++++++++++++++++-------- 5 files changed, 135 insertions(+), 39 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 829b35ad3b..6777f7c26e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.43.1", + "transformers>=4.43.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -46,7 +46,8 @@ huggingface = [ "trl>=0.7.9,<0.9.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", - "huggingface_hub[hf_transfer]", + "huggingface_hub", + "hf-transfer", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -188,7 +189,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.43.1", + "transformers>=4.43.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -196,7 +197,8 @@ colab-new = [ "wheel>=0.42.0", "numpy", "protobuf<4.0.0", - "huggingface_hub[hf_transfer]", + "huggingface_hub", + "hf-transfer", ] colab-no-deps = [ "accelerate>=0.26.1", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 4640681543..db54c9a169 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -60,6 +60,10 @@ "We have some installation instructions on our Github page.") pass +import os, re +import numpy as np +import subprocess + # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) keynames = "\n" + "\n".join(os.environ.keys()) if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: @@ -103,11 +107,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 except: pass else: from triton.common.build import libcuda_dirs -import os -import re -import numpy as np -import subprocess - try: cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5a267a459c..994f97ab73 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -122,7 +122,8 @@ def patch_mistral_nemo_config(config): # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch -if Version(torch.__version__) < Version("2.4.0"): +torch_version = torch.__version__ +if Version(torch_version) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd torch_amp_custom_bwd = torch.cuda.amp.custom_bwd else: @@ -134,37 +135,47 @@ def patch_mistral_nemo_config(config): # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb -from transformers.models.llama.modeling_llama import logger from transformers import AutoTokenizer +from transformers.utils.import_utils import _is_package_available major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = False if major_version >= 8: SUPPORTS_BFLOAT16 = True - try: - from flash_attn import flash_attn_func + if _is_package_available("flash_attn"): # Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl" try: from flash_attn.flash_attn_interface import flash_attn_cuda HAS_FLASH_ATTENTION = True except: - logger.warning_once( + print( "Unsloth: Your Flash Attention 2 installation seems to be broken?\n"\ "A possible explanation is you have a new CUDA version which isn't\n"\ "yet compatible with FA2? Please file a ticket to Unsloth or FA2.\n"\ - "We shall now use Xformers instead, which gets a 0.01% performance hit.\n"\ + "We shall now use Xformers instead, which does not have any performance hits!\n"\ "We found this negligible impact by benchmarking on 1x A100." ) + + # Stop Flash Attention from importing! + import transformers.utils.import_utils + transformers.utils.import_utils.is_flash_attn_2_available = lambda *args, **kwargs: False + import transformers.utils + transformers.utils.is_flash_attn_2_available = lambda *args, **kwargs: False + HAS_FLASH_ATTENTION = False - except: + pass + else: HAS_FLASH_ATTENTION = False else: # Tri Dao's benchmark shows xformers is faster for now. HAS_FLASH_ATTENTION = False pass -import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention + +from transformers.models.llama.modeling_llama import logger + +# ============================================= +# Get Xformers from xformers import __version__ as xformers_version # Temporarily disable 0.0.27 and higher - inference issues if Version(xformers_version) >= Version("0.0.27"): @@ -182,9 +193,41 @@ def patch_mistral_nemo_config(config): ) pass +if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.24 for torch = {torch_version}." + ) +elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.26 for torch = {torch_version}." + ) +elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.27 for torch = {torch_version}." + ) +pass + +from xformers._cpp_lib import _register_extensions +try: + _register_extensions() # Check if C++ modules are loaded correctly +except Exception as error: + raise ImportError( + "Unsloth: Xformers was not installed correctly.\n"\ + "Please install xformers separately first.\n"\ + "Then confirm if it's correctly installed by running:\n"\ + "python -m xformers.info\n\n" + "Longer error message:\n" + str(error) + ) +pass +import xformers.ops.fmha as xformers +xformers_attention = xformers.memory_efficient_attention + # Check TRL version from trl import __version__ as trl_version -if Version(xformers_version) >= Version("0.9.0"): +if Version(trl_version) >= Version("0.9.0"): raise ImportError( "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ "then press Disconnect Runtime and then Restart it.\n"\ @@ -199,8 +242,6 @@ def patch_mistral_nemo_config(config): ) pass -# ============================================= - # ============================================= # Torch compile settings diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bc434ecf19..6b16a4cc61 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -18,6 +18,7 @@ from ._utils import * from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention +from transformers import __version__ as transformers_version from transformers.models.llama.modeling_llama import ( logger, BaseModelOutputWithPast, @@ -1281,7 +1282,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth: Fast {model_patcher.__name__[4:-5]} patching release {__version__}\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 6b83b8e739..e2bfe1d639 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -27,7 +27,7 @@ SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") -SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.1") +SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -35,9 +35,14 @@ pass -def _get_model_name(model_name, load_in_4bit = True): +def __get_model_name( + model_name, + load_in_4bit = True, + INT_TO_FLOAT_MAPPER = None, + FLOAT_TO_INT_MAPPER = None, +): - if not SUPPORTS_FOURBIT and model_name in INT_TO_FLOAT_MAPPER: + if not SUPPORTS_FOURBIT and model_name.lower() in INT_TO_FLOAT_MAPPER: model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ @@ -46,25 +51,71 @@ def _get_model_name(model_name, load_in_4bit = True): f"to obtain the latest transformers build, then restart this session.\n"\ f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." ) + return model_name - elif not load_in_4bit and model_name in INT_TO_FLOAT_MAPPER: + elif not load_in_4bit and model_name.lower() in INT_TO_FLOAT_MAPPER: new_model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." # ) - model_name = new_model_name + return new_model_name - elif load_in_4bit and SUPPORTS_FOURBIT and model_name in FLOAT_TO_INT_MAPPER: + elif load_in_4bit and SUPPORTS_FOURBIT and model_name.lower() in FLOAT_TO_INT_MAPPER: new_model_name = FLOAT_TO_INT_MAPPER[model_name.lower()] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ # f"We shall load `{new_model_name}` for 4x faster loading." # ) - model_name = new_model_name + return new_model_name pass - return model_name + return None +pass + + +def _get_new_mapper(): + try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") + exec(new_mapper, globals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER + except: + return {}, {} + pass +pass + + +def _get_model_name(model_name, load_in_4bit = True): + new_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + ) + if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): + # Try checking if a new Unsloth version allows it! + NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() + upgraded_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + ) + if upgraded_model_name is not None: + raise NotImplementedError( + f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ + 'pip uninstall unsloth -y\n'\ + 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' + ) + pass + pass + return new_model_name if new_model_name is not None else model_name pass @@ -98,16 +149,22 @@ def from_pretrained( from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() disable_progress_bars() + + autoconfig_error = None + peft_error = None try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) is_model = True - except: + except Exception as autoconfig_error: + autoconfig_error = str(autoconfig_error) is_model = False try: peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) is_peft = True - except: + except Exception as peft_error: + peft_error = str(peft_error) is_peft = False + pass # Cannot be both! if is_model and is_peft: @@ -118,11 +175,7 @@ def from_pretrained( "Please separate the LoRA and base models to 2 repos." ) elif not is_model and not is_peft: - raise RuntimeError( - f"Unsloth: `{model_name}` is not a base model or a PEFT model.\n"\ - "We could not locate a `config.json` or `adapter_config.json` file.\n"\ - "Are you certain the model name is correct? Does it actually exist?" - ) + raise RuntimeError(autoconfig_error or peft_error) pass # Get base model for PEFT: @@ -147,8 +200,8 @@ def from_pretrained( if scaling_type == "llama3" and not SUPPORTS_LLAMA31: raise ImportError( f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ - f"The minimum required version is 4.43.1\n"\ - f'Try `pip install --upgrade "transformers>=4.43.1"`\n'\ + f"The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) dispatch_model = FastLlamaModel From 18900721c2a3ac7f95d228d8fb41b2c3bfb6f869 Mon Sep 17 00:00:00 2001 From: XiaoYang Date: Wed, 31 Jul 2024 01:15:09 +0800 Subject: [PATCH 0353/1088] fix UnboundLocalError (#834) * When an exception has been assigned using as target, it is cleared at the end of the except clause.(https://docs.python.org/3/reference/compound_stmts.html#the-try-statement) * Update loader.py --------- Co-authored-by: xiaoyang Co-authored-by: Daniel Han --- unsloth/models/loader.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index e2bfe1d639..34deb8f9bd 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -155,14 +155,14 @@ def from_pretrained( try: model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) is_model = True - except Exception as autoconfig_error: - autoconfig_error = str(autoconfig_error) + except Exception as error: + autoconfig_error = str(error) is_model = False try: peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) is_peft = True - except Exception as peft_error: - peft_error = str(peft_error) + except Exception as error: + peft_error = str(error) is_peft = False pass From be0930d1f6d9a742e6971ba8e9206c04e87d16d6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 10:18:51 -0700 Subject: [PATCH 0354/1088] Update loader.py --- unsloth/models/loader.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 34deb8f9bd..f22e81efad 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -175,6 +175,15 @@ def from_pretrained( "Please separate the LoRA and base models to 2 repos." ) elif not is_model and not is_peft: + error = autoconfig_error or peft_error + # Old transformers version + if "rope_scaling" in error.lower() and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support new RoPE scaling methods.\n"\ + f"This includes Llama 3.1. The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) raise RuntimeError(autoconfig_error or peft_error) pass From 4285d1b479d665b5f94136353ba2d8c3a73a789f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 10:29:54 -0700 Subject: [PATCH 0355/1088] Update llama.py --- unsloth/models/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6b16a4cc61..496a37e7a3 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2012,7 +2012,8 @@ def patch_peft_model( model.peft_config[active_adapter].base_model_name_or_path = name pass # Add revision to enable future fast inference paths - model.peft_config[active_adapter].revision = f"unsloth" + # [TODO] Bugs out!see https://github.com/unslothai/unsloth/issues/492 + # model.peft_config[active_adapter].revision = f"unsloth" pass from transformers.trainer import Trainer From 42e09d192fb3d8a6e7b96563c0047fdd19585219 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 19:56:36 -0700 Subject: [PATCH 0356/1088] bugs --- unsloth/models/_utils.py | 28 ++++++++++++++-------------- unsloth/models/loader.py | 1 + unsloth/models/mapper.py | 1 + 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 994f97ab73..8677879aad 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -178,20 +178,20 @@ def patch_mistral_nemo_config(config): # Get Xformers from xformers import __version__ as xformers_version # Temporarily disable 0.0.27 and higher - inference issues -if Version(xformers_version) >= Version("0.0.27"): - raise ImportError( - "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ - "then press Disconnect Runtime and then Restart it.\n"\ - "\n"\ - "%%capture\n" - "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" - '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ - '\n'\ - f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ - 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' - ) -pass +# if Version(xformers_version) >= Version("0.0.27"): +# raise ImportError( +# "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ +# "then press Disconnect Runtime and then Restart it.\n"\ +# "\n"\ +# "%%capture\n" +# "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" +# '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' +# '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ +# '\n'\ +# f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ +# 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' +# ) +# pass if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): raise ImportError( diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f22e81efad..fb6d5c5018 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -42,6 +42,7 @@ def __get_model_name( FLOAT_TO_INT_MAPPER = None, ): + model_name = str(model_name) if not SUPPORTS_FOURBIT and model_name.lower() in INT_TO_FLOAT_MAPPER: model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] logger.warning_once( diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 462555f317..254a68a420 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -67,6 +67,7 @@ "codellama/CodeLlama-7b-hf", ), "unsloth/codellama-13b-bnb-4bit" : ( + "unsloth/codellama-13b", "codellama/CodeLlama-13b-hf", ), "unsloth/yi-6b-bnb-4bit" : ( From 79ef745c7b9f369644d9a740a2b8be29e9dad860 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 19:57:53 -0700 Subject: [PATCH 0357/1088] Update _utils.py --- unsloth/models/_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8677879aad..f4e4257b2b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -203,12 +203,12 @@ def patch_mistral_nemo_config(config): f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ f"Please install xformers < 0.0.26 for torch = {torch_version}." ) -elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.27 for torch = {torch_version}." - ) -pass +# elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): +# raise ImportError( +# f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ +# f"Please install xformers < 0.0.27 for torch = {torch_version}." +# ) +# pass from xformers._cpp_lib import _register_extensions try: From 9617ecbbb8bef7864961096f925f03e40ffa7f99 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 22:48:53 -0700 Subject: [PATCH 0358/1088] flash-attn softcapping --- pyproject.toml | 22 ++++++------- unsloth/models/_utils.py | 14 +++++++++ unsloth/models/gemma2.py | 68 +++++++++++++++++++++++++++++----------- unsloth/models/llama.py | 39 +++++++++++++---------- unsloth/models/loader.py | 16 ++++++++++ unsloth/models/mapper.py | 1 - 6 files changed, 113 insertions(+), 47 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6777f7c26e..e711325be9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,7 +171,7 @@ colab-ampere-torch211 = [ "unsloth[cu121onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] colab-torch220 = [ "unsloth[huggingface]", @@ -184,7 +184,7 @@ colab-ampere-torch220 = [ "unsloth[cu121onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] colab-new = [ "packaging", @@ -215,7 +215,7 @@ colab-ampere = [ "unsloth[colab-ampere-torch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere = [ "unsloth[huggingface]", @@ -223,7 +223,7 @@ cu118-ampere = [ "unsloth[cu118only]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere = [ "unsloth[huggingface]", @@ -231,7 +231,7 @@ cu121-ampere = [ "unsloth[cu121only]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch211 = [ "unsloth[huggingface]", @@ -239,7 +239,7 @@ cu118-ampere-torch211 = [ "unsloth[cu118onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch211 = [ "unsloth[huggingface]", @@ -247,7 +247,7 @@ cu121-ampere-torch211 = [ "unsloth[cu121onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch220 = [ "unsloth[huggingface]", @@ -255,7 +255,7 @@ cu118-ampere-torch220 = [ "unsloth[cu118onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch220 = [ "unsloth[huggingface]", @@ -263,7 +263,7 @@ cu121-ampere-torch220 = [ "unsloth[cu121onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch230 = [ "unsloth[huggingface]", @@ -271,7 +271,7 @@ cu118-ampere-torch230 = [ "unsloth[cu118onlytorch230]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch230 = [ "unsloth[huggingface]", @@ -279,7 +279,7 @@ cu121-ampere-torch230 = [ "unsloth[cu121onlytorch230]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] [project.urls] diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f4e4257b2b..c9bc6065f9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -21,6 +21,7 @@ "xformers_version", "__version__", "HAS_FLASH_ATTENTION", + "HAS_FLASH_ATTENTION_SOFTCAPPING", "PRE_CHECK", "platform_system", "patch_tokenizer", @@ -140,6 +141,8 @@ def patch_mistral_nemo_config(config): major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = False +HAS_FLASH_ATTENTION = False +HAS_FLASH_ATTENTION_SOFTCAPPING = False if major_version >= 8: SUPPORTS_BFLOAT16 = True @@ -148,6 +151,17 @@ def patch_mistral_nemo_config(config): try: from flash_attn.flash_attn_interface import flash_attn_cuda HAS_FLASH_ATTENTION = True + + # Also check for softcapping + from flash_attn import __version__ as flash_attn_version + HAS_FLASH_ATTENTION_SOFTCAPPING = Version(flash_attn_version) >= Version("2.6.3") + if not HAS_FLASH_ATTENTION_SOFTCAPPING: + print( + "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ + "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ + "To update flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) except: print( "Unsloth: Your Flash Attention 2 installation seems to be broken?\n"\ diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 0d21c47b00..ecd45fbce2 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -56,6 +56,8 @@ Gemma2FlashAttention2 = Gemma2Attention pass +if HAS_FLASH_ATTENTION_SOFTCAPPING: + from flash_attn import flash_attn_func # [TODO] We must randomnly use torch.compile? # I checked the gradients and formulas and I'm sure it's correct. @@ -126,8 +128,31 @@ def Gemma2Attention_fast_forward( V = torch.cat([past_key_value[1], V], dim = 2) pass past_key_value = (K, V) if use_cache else None - - A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) + + # Only enable if the attention_mask is True + has_sliding_window = type(attention_mask) is bool and attention_mask is True + if HAS_FLASH_ATTENTION_SOFTCAPPING and type(attention_mask) is bool: + window = (-1, -1) + if has_sliding_window: + sw = getattr(self.config, "sliding_window", None) + sw = kv_seq_len if (sw is None or sw == "null") else sw + window = (-1, -1) if (kv_seq_len <= sw) else (sw, sw) + pass + + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + A = flash_attn_func( + Q, K, V, + causal = True, + softcap = self.config.attn_logit_softcapping, + softmax_scale = self.config.query_pre_attn_scalar, + window_size = window, + ) + A = A.reshape(bsz, q_len, n_heads*head_dim) + else: + A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) + pass A = self.apply_o(self, A) return A, None, past_key_value pass @@ -205,6 +230,8 @@ def Gemma2DecoderLayer_fast_forward( from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax +torch_matmul = torch.matmul +torch_tanh = torch.tanh def Gemma2Attention_fast_forward_inference( self, @@ -322,13 +349,13 @@ def Gemma2Attention_fast_forward_inference( # if bsz == 1: Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows - A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + A = torch_matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched - A *= self.reciprocal_t; torch.tanh(A, out = A); A *= self.t; # Logit softcapping + A *= self.reciprocal_t; torch_tanh(A, out = A); A *= self.t; # Logit softcapping A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) - A = torch.matmul(A, Vnn, out = Qn) + A = torch_matmul(A, Vnn, out = Qn) # else: # A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) # pass @@ -359,19 +386,24 @@ def Gemma2Model_fast_forward_inference( bsz, q_len, hd = hidden_states.shape seq_len = past_key_values[0][0].shape[-2] if bsz != 1: - SWA = _prepare_4d_causal_attention_mask_for_sdpa( - attention_mask, - (bsz, q_len), - hidden_states, - seq_len, - sliding_window = self.config.sliding_window, - ) - GA = _prepare_4d_causal_attention_mask_for_sdpa( - attention_mask, - (bsz, q_len), - hidden_states, - seq_len, - ) + if HAS_FLASH_ATTENTION_SOFTCAPPING: + SWA = True + GA = False + else: + SWA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = self.config.sliding_window, + ) + GA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) + pass else: SWA = attention_mask GA = attention_mask diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 496a37e7a3..b5244ed4ee 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -682,23 +682,28 @@ def LlamaModel_fast_forward( # Gemma2 has alternating SWA and global attn if IS_GEMMA2 and not hasattr(self, "SWA_mask"): - n = self.config.max_position_embeddings - # masked_fill is making stuff slower! - # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) - # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) - from transformers.modeling_attn_mask_utils import AttentionMaskConverter - self.SWA_mask = AttentionMaskConverter( - is_causal = True, - sliding_window = self.config.sliding_window, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) - - self.GA_mask = AttentionMaskConverter( - is_causal = True, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) + if HAS_FLASH_ATTENTION_SOFTCAPPING: + self.SWA_mask = True + self.GA_mask = False + else: + n = self.config.max_position_embeddings + # masked_fill is making stuff slower! + # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) + # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) + from transformers.modeling_attn_mask_utils import AttentionMaskConverter + self.SWA_mask = AttentionMaskConverter( + is_causal = True, + sliding_window = self.config.sliding_window, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + + self.GA_mask = AttentionMaskConverter( + is_causal = True, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + pass pass # Go through every layer! diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index fb6d5c5018..47152d6764 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING from .llama import FastLlamaModel, logger from .mistral import FastMistralModel from .qwen2 import FastQwen2Model @@ -233,6 +234,21 @@ def from_pretrained( f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) + # Also check for softcapping support in flash-attn which is faster! + if is_bfloat16_supported() and not HAS_FLASH_ATTENTION: + print( + "Unsloth: If you want to finetune Gemma 2, install flash-attn to make it faster!\n"\ + "To install flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + elif HAS_FLASH_ATTENTION and not HAS_FLASH_ATTENTION_SOFTCAPPING: + print( + "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ + "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ + "To update flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + dispatch_model = FastGemma2Model elif model_type == "qwen2": dispatch_model = FastQwen2Model diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 254a68a420..462555f317 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -67,7 +67,6 @@ "codellama/CodeLlama-7b-hf", ), "unsloth/codellama-13b-bnb-4bit" : ( - "unsloth/codellama-13b", "codellama/CodeLlama-13b-hf", ), "unsloth/yi-6b-bnb-4bit" : ( From d326c988585d3c764bacefec0f92432c8a50e85a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 23:04:00 -0700 Subject: [PATCH 0359/1088] Update gemma2.py --- unsloth/models/gemma2.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index ecd45fbce2..a0880daefe 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -131,6 +131,9 @@ def Gemma2Attention_fast_forward( # Only enable if the attention_mask is True has_sliding_window = type(attention_mask) is bool and attention_mask is True + print(HAS_FLASH_ATTENTION_SOFTCAPPING) + print(has_sliding_window) + print(attention_mask) if HAS_FLASH_ATTENTION_SOFTCAPPING and type(attention_mask) is bool: window = (-1, -1) if has_sliding_window: From 86b71c4ef5f90379b075d2ab97827b3c2537d501 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 23:07:47 -0700 Subject: [PATCH 0360/1088] Update gemma2.py --- unsloth/models/gemma2.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index a0880daefe..2191a99c07 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -130,11 +130,8 @@ def Gemma2Attention_fast_forward( past_key_value = (K, V) if use_cache else None # Only enable if the attention_mask is True - has_sliding_window = type(attention_mask) is bool and attention_mask is True - print(HAS_FLASH_ATTENTION_SOFTCAPPING) - print(has_sliding_window) - print(attention_mask) - if HAS_FLASH_ATTENTION_SOFTCAPPING and type(attention_mask) is bool: + has_sliding_window = type(causal_mask) is bool and causal_mask is True + if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: window = (-1, -1) if has_sliding_window: sw = getattr(self.config, "sliding_window", None) From cf1054c9bcc74bd659739f34444f46d8c79837cf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 23:11:23 -0700 Subject: [PATCH 0361/1088] Update gemma2.py --- unsloth/models/gemma2.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 2191a99c07..d2bfb78992 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -139,6 +139,11 @@ def Gemma2Attention_fast_forward( window = (-1, -1) if (kv_seq_len <= sw) else (sw, sw) pass + # FA uses 1 / sqrt for softmax_scale! + if not hasattr(self, "_flash_attention_softmax_scale"): + self._flash_attention_softmax_scale = 1.0 / self.config.query_pre_attn_scalar**0.5 + pass + Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) @@ -146,7 +151,7 @@ def Gemma2Attention_fast_forward( Q, K, V, causal = True, softcap = self.config.attn_logit_softcapping, - softmax_scale = self.config.query_pre_attn_scalar, + softmax_scale = self._flash_attention_softmax_scale, window_size = window, ) A = A.reshape(bsz, q_len, n_heads*head_dim) From 8db7e809d0dd60fb0262b3d0c4db70d43100cce0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 23:11:35 -0700 Subject: [PATCH 0362/1088] Update gemma2.py --- unsloth/models/gemma2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index d2bfb78992..1cbaf5b169 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -141,7 +141,7 @@ def Gemma2Attention_fast_forward( # FA uses 1 / sqrt for softmax_scale! if not hasattr(self, "_flash_attention_softmax_scale"): - self._flash_attention_softmax_scale = 1.0 / self.config.query_pre_attn_scalar**0.5 + self._flash_attention_softmax_scale = 1.0 / (self.config.query_pre_attn_scalar**0.5) pass Q = Q.transpose(1, 2) From 0c932bc0bb79b405af6e4b623088c86bdc51e48e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 30 Jul 2024 23:51:47 -0700 Subject: [PATCH 0363/1088] Update mapper.py --- unsloth/models/mapper.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 462555f317..57ba676585 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -241,6 +241,14 @@ "unsloth/Mistral-Large-Instruct-2407-bnb-4bit" : ( "mistralai/Mistral-Large-Instruct-2407", ), + "unsloth/gemma-2-2b-bnb-4bit" : ( + "unsloth/gemma-2-2b", + "google/gemma-2-2b", + ), + "unsloth/gemma-2-2b-it-bnb-4bit" : ( + "unsloth/gemma-2-2b-it", + "google/gemma-2-2b-it", + ), } INT_TO_FLOAT_MAPPER = {} From 7af632075c201075e8469917169862d002bc8dc5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 08:53:20 -0700 Subject: [PATCH 0364/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4c1271396e..d843158d23 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported From fdfe1f59f56935f1945269e5beda50969810158a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 08:54:22 -0700 Subject: [PATCH 0365/1088] Update _utils.py --- unsloth/models/_utils.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c9bc6065f9..fe3aa90402 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -192,20 +192,20 @@ def patch_mistral_nemo_config(config): # Get Xformers from xformers import __version__ as xformers_version # Temporarily disable 0.0.27 and higher - inference issues -# if Version(xformers_version) >= Version("0.0.27"): -# raise ImportError( -# "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ -# "then press Disconnect Runtime and then Restart it.\n"\ -# "\n"\ -# "%%capture\n" -# "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" -# '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' -# '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ -# '\n'\ -# f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ -# 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' -# ) -# pass +if Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ + 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' + ) +pass if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): raise ImportError( @@ -217,12 +217,12 @@ def patch_mistral_nemo_config(config): f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ f"Please install xformers < 0.0.26 for torch = {torch_version}." ) -# elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): -# raise ImportError( -# f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ -# f"Please install xformers < 0.0.27 for torch = {torch_version}." -# ) -# pass +elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.27 for torch = {torch_version}." + ) +pass from xformers._cpp_lib import _register_extensions try: From b85670de83fd8eb10a9ca61045361918ea35686b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 08:54:58 -0700 Subject: [PATCH 0366/1088] Gemma (#843) * bugs * Update _utils.py * flash-attn softcapping * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update mapper.py * Update README.md * Update _utils.py --- README.md | 1 + pyproject.toml | 22 ++++++------ unsloth/models/_utils.py | 14 ++++++++ unsloth/models/gemma2.py | 73 ++++++++++++++++++++++++++++++---------- unsloth/models/llama.py | 39 +++++++++++---------- unsloth/models/loader.py | 17 ++++++++++ unsloth/models/mapper.py | 8 +++++ 7 files changed, 128 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index 4c1271396e..d843158d23 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported diff --git a/pyproject.toml b/pyproject.toml index 6777f7c26e..e711325be9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,7 +171,7 @@ colab-ampere-torch211 = [ "unsloth[cu121onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] colab-torch220 = [ "unsloth[huggingface]", @@ -184,7 +184,7 @@ colab-ampere-torch220 = [ "unsloth[cu121onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] colab-new = [ "packaging", @@ -215,7 +215,7 @@ colab-ampere = [ "unsloth[colab-ampere-torch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere = [ "unsloth[huggingface]", @@ -223,7 +223,7 @@ cu118-ampere = [ "unsloth[cu118only]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere = [ "unsloth[huggingface]", @@ -231,7 +231,7 @@ cu121-ampere = [ "unsloth[cu121only]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch211 = [ "unsloth[huggingface]", @@ -239,7 +239,7 @@ cu118-ampere-torch211 = [ "unsloth[cu118onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch211 = [ "unsloth[huggingface]", @@ -247,7 +247,7 @@ cu121-ampere-torch211 = [ "unsloth[cu121onlytorch211]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch220 = [ "unsloth[huggingface]", @@ -255,7 +255,7 @@ cu118-ampere-torch220 = [ "unsloth[cu118onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch220 = [ "unsloth[huggingface]", @@ -263,7 +263,7 @@ cu121-ampere-torch220 = [ "unsloth[cu121onlytorch220]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu118-ampere-torch230 = [ "unsloth[huggingface]", @@ -271,7 +271,7 @@ cu118-ampere-torch230 = [ "unsloth[cu118onlytorch230]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] cu121-ampere-torch230 = [ "unsloth[huggingface]", @@ -279,7 +279,7 @@ cu121-ampere-torch230 = [ "unsloth[cu121onlytorch230]", "packaging", "ninja", - "flash-attn", + "flash-attn>=2.6.3", ] [project.urls] diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 994f97ab73..fe3aa90402 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -21,6 +21,7 @@ "xformers_version", "__version__", "HAS_FLASH_ATTENTION", + "HAS_FLASH_ATTENTION_SOFTCAPPING", "PRE_CHECK", "platform_system", "patch_tokenizer", @@ -140,6 +141,8 @@ def patch_mistral_nemo_config(config): major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = False +HAS_FLASH_ATTENTION = False +HAS_FLASH_ATTENTION_SOFTCAPPING = False if major_version >= 8: SUPPORTS_BFLOAT16 = True @@ -148,6 +151,17 @@ def patch_mistral_nemo_config(config): try: from flash_attn.flash_attn_interface import flash_attn_cuda HAS_FLASH_ATTENTION = True + + # Also check for softcapping + from flash_attn import __version__ as flash_attn_version + HAS_FLASH_ATTENTION_SOFTCAPPING = Version(flash_attn_version) >= Version("2.6.3") + if not HAS_FLASH_ATTENTION_SOFTCAPPING: + print( + "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ + "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ + "To update flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) except: print( "Unsloth: Your Flash Attention 2 installation seems to be broken?\n"\ diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 0d21c47b00..1cbaf5b169 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -56,6 +56,8 @@ Gemma2FlashAttention2 = Gemma2Attention pass +if HAS_FLASH_ATTENTION_SOFTCAPPING: + from flash_attn import flash_attn_func # [TODO] We must randomnly use torch.compile? # I checked the gradients and formulas and I'm sure it's correct. @@ -126,8 +128,36 @@ def Gemma2Attention_fast_forward( V = torch.cat([past_key_value[1], V], dim = 2) pass past_key_value = (K, V) if use_cache else None - - A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) + + # Only enable if the attention_mask is True + has_sliding_window = type(causal_mask) is bool and causal_mask is True + if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: + window = (-1, -1) + if has_sliding_window: + sw = getattr(self.config, "sliding_window", None) + sw = kv_seq_len if (sw is None or sw == "null") else sw + window = (-1, -1) if (kv_seq_len <= sw) else (sw, sw) + pass + + # FA uses 1 / sqrt for softmax_scale! + if not hasattr(self, "_flash_attention_softmax_scale"): + self._flash_attention_softmax_scale = 1.0 / (self.config.query_pre_attn_scalar**0.5) + pass + + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + A = flash_attn_func( + Q, K, V, + causal = True, + softcap = self.config.attn_logit_softcapping, + softmax_scale = self._flash_attention_softmax_scale, + window_size = window, + ) + A = A.reshape(bsz, q_len, n_heads*head_dim) + else: + A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) + pass A = self.apply_o(self, A) return A, None, past_key_value pass @@ -205,6 +235,8 @@ def Gemma2DecoderLayer_fast_forward( from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax +torch_matmul = torch.matmul +torch_tanh = torch.tanh def Gemma2Attention_fast_forward_inference( self, @@ -322,13 +354,13 @@ def Gemma2Attention_fast_forward_inference( # if bsz == 1: Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows - A = torch.matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + A = torch_matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched - A *= self.reciprocal_t; torch.tanh(A, out = A); A *= self.t; # Logit softcapping + A *= self.reciprocal_t; torch_tanh(A, out = A); A *= self.t; # Logit softcapping A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) - A = torch.matmul(A, Vnn, out = Qn) + A = torch_matmul(A, Vnn, out = Qn) # else: # A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) # pass @@ -359,19 +391,24 @@ def Gemma2Model_fast_forward_inference( bsz, q_len, hd = hidden_states.shape seq_len = past_key_values[0][0].shape[-2] if bsz != 1: - SWA = _prepare_4d_causal_attention_mask_for_sdpa( - attention_mask, - (bsz, q_len), - hidden_states, - seq_len, - sliding_window = self.config.sliding_window, - ) - GA = _prepare_4d_causal_attention_mask_for_sdpa( - attention_mask, - (bsz, q_len), - hidden_states, - seq_len, - ) + if HAS_FLASH_ATTENTION_SOFTCAPPING: + SWA = True + GA = False + else: + SWA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = self.config.sliding_window, + ) + GA = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) + pass else: SWA = attention_mask GA = attention_mask diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 496a37e7a3..b5244ed4ee 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -682,23 +682,28 @@ def LlamaModel_fast_forward( # Gemma2 has alternating SWA and global attn if IS_GEMMA2 and not hasattr(self, "SWA_mask"): - n = self.config.max_position_embeddings - # masked_fill is making stuff slower! - # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) - # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) - from transformers.modeling_attn_mask_utils import AttentionMaskConverter - self.SWA_mask = AttentionMaskConverter( - is_causal = True, - sliding_window = self.config.sliding_window, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) - - self.GA_mask = AttentionMaskConverter( - is_causal = True, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) + if HAS_FLASH_ATTENTION_SOFTCAPPING: + self.SWA_mask = True + self.GA_mask = False + else: + n = self.config.max_position_embeddings + # masked_fill is making stuff slower! + # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) + # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) + from transformers.modeling_attn_mask_utils import AttentionMaskConverter + self.SWA_mask = AttentionMaskConverter( + is_causal = True, + sliding_window = self.config.sliding_window, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + + self.GA_mask = AttentionMaskConverter( + is_causal = True, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + pass pass # Go through every layer! diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f22e81efad..47152d6764 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING from .llama import FastLlamaModel, logger from .mistral import FastMistralModel from .qwen2 import FastQwen2Model @@ -42,6 +43,7 @@ def __get_model_name( FLOAT_TO_INT_MAPPER = None, ): + model_name = str(model_name) if not SUPPORTS_FOURBIT and model_name.lower() in INT_TO_FLOAT_MAPPER: model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] logger.warning_once( @@ -232,6 +234,21 @@ def from_pretrained( f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) + # Also check for softcapping support in flash-attn which is faster! + if is_bfloat16_supported() and not HAS_FLASH_ATTENTION: + print( + "Unsloth: If you want to finetune Gemma 2, install flash-attn to make it faster!\n"\ + "To install flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + elif HAS_FLASH_ATTENTION and not HAS_FLASH_ATTENTION_SOFTCAPPING: + print( + "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ + "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ + "To update flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + dispatch_model = FastGemma2Model elif model_type == "qwen2": dispatch_model = FastQwen2Model diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 462555f317..57ba676585 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -241,6 +241,14 @@ "unsloth/Mistral-Large-Instruct-2407-bnb-4bit" : ( "mistralai/Mistral-Large-Instruct-2407", ), + "unsloth/gemma-2-2b-bnb-4bit" : ( + "unsloth/gemma-2-2b", + "google/gemma-2-2b", + ), + "unsloth/gemma-2-2b-it-bnb-4bit" : ( + "unsloth/gemma-2-2b-it", + "google/gemma-2-2b-it", + ), } INT_TO_FLOAT_MAPPER = {} From dfca5516e74e60d52915d4287121d9ff8b80b314 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 09:50:11 -0700 Subject: [PATCH 0367/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d843158d23..9407c452ad 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! +- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! And uploaded [GGUF quants](https://huggingface.co/unsloth/gemma-2-it-GGUF) Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing) for Gemma-2-2b Instruct! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported From 2de142712d2dc8892d216dfca365dc3ba2707c43 Mon Sep 17 00:00:00 2001 From: XiaoYang Date: Thu, 1 Aug 2024 03:05:08 +0800 Subject: [PATCH 0368/1088] Fix ROPE extension issue and device mismatch (#840) * When an exception has been assigned using as target, it is cleared at the end of the except clause.(https://docs.python.org/3/reference/compound_stmts.html#the-try-statement) * Update loader.py * round up to extend rope size * inv_freq.device changed, make sure they are on the same device --------- Co-authored-by: xiaoyang Co-authored-by: Daniel Han --- unsloth/models/llama.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b5244ed4ee..e6c9280bc5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -14,6 +14,7 @@ import torch import gc +import math from typing import Optional, Tuple, List, Union from ._utils import * from ._utils import __version__ @@ -1036,7 +1037,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1109,7 +1110,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device=self.inv_freq.device, dtype=torch.int64).float() freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -1158,7 +1159,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass From d0a7dcec1dd2b9f67c9be97d3b9ac05341b5fc9b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 12:09:33 -0700 Subject: [PATCH 0369/1088] Update gemma.py --- unsloth/models/gemma.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index e3f1e615db..a0894ec7a0 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -14,6 +14,7 @@ from .llama import * from ._utils import __version__ +import math try: from transformers.models.gemma.modeling_gemma import ( @@ -256,7 +257,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass From 4e570be9ae4ced8cdc64e498125708e34942befc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 31 Jul 2024 12:10:33 -0700 Subject: [PATCH 0370/1088] Fix RoPE extension (#846) * bugs * Update _utils.py * flash-attn softcapping * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update gemma2.py * Update mapper.py * Update README.md * Update _utils.py * Fix ROPE extension issue and device mismatch (#840) * When an exception has been assigned using as target, it is cleared at the end of the except clause.(https://docs.python.org/3/reference/compound_stmts.html#the-try-statement) * Update loader.py * round up to extend rope size * inv_freq.device changed, make sure they are on the same device --------- Co-authored-by: xiaoyang Co-authored-by: Daniel Han * Update gemma.py --------- Co-authored-by: XiaoYang Co-authored-by: xiaoyang --- unsloth/models/gemma.py | 3 ++- unsloth/models/llama.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index e3f1e615db..a0894ec7a0 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -14,6 +14,7 @@ from .llama import * from ._utils import __version__ +import math try: from transformers.models.gemma.modeling_gemma import ( @@ -256,7 +257,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b5244ed4ee..e6c9280bc5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -14,6 +14,7 @@ import torch import gc +import math from typing import Optional, Tuple, List, Union from ._utils import * from ._utils import __version__ @@ -1036,7 +1037,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1109,7 +1110,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - t = torch.arange(self.current_rope_size, device="cpu", dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device=self.inv_freq.device, dtype=torch.int64).float() freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation @@ -1158,7 +1159,7 @@ def forward(self, x, position_ids=None, seq_len=None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = int(round(seq_len / 8192)) * 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass From f65cc9877c9ee42b9c6719a4fe168b00abceb095 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 4 Aug 2024 11:28:21 -0700 Subject: [PATCH 0371/1088] Update pyproject.toml --- pyproject.toml | 40 ++++++++++------------------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e711325be9..fdc098854b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,54 +50,34 @@ huggingface = [ "hf-transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.22.post7", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.22.post7", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.23", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.23", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.23.post1", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.23.post1", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.24", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.24", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.26.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.26.post1", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.26.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers==0.0.26.post1", ] cu118 = [ From 16b6932c43baaf0097943ab14321a8f3c1bc6415 Mon Sep 17 00:00:00 2001 From: moontidef <53668275+relic-yuexi@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:45:34 +0800 Subject: [PATCH 0372/1088] fix: fix config.torch_dtype bug (#874) fix the bug #404 and the bug https://github.com/hiyouga/LLaMA-Factory/issues/4698#issue-2393500878 --- unsloth/models/llama.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e6c9280bc5..445e5026f0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -570,7 +570,14 @@ def LlamaModel_fast_forward( # Embed positions if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - + + if self.config.torch_dtype == "float32": + self.config.torch_dtype = torch.float32 + elif self.config.torch_dtype == "bfloat16": + self.config.torch_dtype = torch.bfloat16 + elif self.config.torch_dtype == "float16": + self.config.torch_dtype = torch.float16 + inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma From 46b434869847e202e1cf594ab8466819cb398e7a Mon Sep 17 00:00:00 2001 From: emuchogu Date: Mon, 5 Aug 2024 09:45:51 +0300 Subject: [PATCH 0373/1088] pascal support (#870) Co-authored-by: Edward Muchogu --- README.md | 389 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 389 insertions(+) diff --git a/README.md b/README.md index 9407c452ad..4e29a43ec4 100644 --- a/README.md +++ b/README.md @@ -436,6 +436,395 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    +## NVIDIA Pascal Support + +Support for NVIDIA Pascal family of cards, specifically the P40 and P100. + +### Setup Guide + +1. Create three files (`Dockerfile`, `unsloth_env_file.yml`, and `docker-compose.yml`) with the contents provided below. +2. Ensure Docker and Docker Compose are installed on your system. +3. Install the NVIDIA Container Toolkit for GPU support if not already done. +4. Place all three files in the same directory. +5. Open a terminal and navigate to the directory containing these files. +6. Run the following command to build and start the container: + + ``` + docker-compose up --build + ``` + +7. Once the container is running, access Jupyter Lab by opening a web browser and navigating to `http://localhost:8888`. + +### Configuration Files + +#### 1. Dockerfile + +```dockerfile +# Stage 1: Base image with system dependencies +FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as base + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + git \ + vim \ + curl \ + wget \ + && rm -rf /var/lib/apt/lists/* + +# Install Miniconda only if it's not already installed +RUN if [ ! -d "/opt/conda" ]; then \ + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \ + bash miniconda.sh -b -p /opt/conda && \ + rm miniconda.sh; \ + fi + +# Set path to conda +ENV PATH /opt/conda/bin:$PATH + +# Set path to conda +ENV PATH /opt/conda/bin:$PATH + +# Stage 2: Python environment setup +FROM base as python-env + +COPY unsloth_env_file.yml unsloth_env_file.yml + +RUN conda env create -f unsloth_env_file.yml + +SHELL ["conda", "run", "-n", "unsloth_env", "/bin/bash", "-c"] + +# Stage 3: Final image +FROM python-env as final + +# Install Unsloth (This step is separate because it's likely to change more frequently) +RUN pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" + +ENV PATH /usr/local/cuda/bin:$PATH +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:$LD_LIBRARY_PATH + +# Set the working directory +WORKDIR /workspace + +# Set the default command to run Jupyter Lab +CMD ["conda", "run", "--no-capture-output", "-n", "unsloth_env", "jupyter", "lab", "--ip=0.0.0.0", "--no-browser", "--allow-root", "--NotebookApp.token=''", "--NotebookApp.password=''"] +``` + +#### 2. unsloth_env_file.yml + +```yaml +name: unsloth_env +channels: + - xformers + - pytorch + - nvidia + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1=conda_forge + - _openmp_mutex=4.5=2_gnu + - aiohttp=3.9.5=py310h5eee18b_0 + - aiosignal=1.2.0=pyhd3eb1b0_0 + - anyio=4.2.0=py310h06a4308_0 + - argon2-cffi=21.3.0=pyhd3eb1b0_0 + - argon2-cffi-bindings=21.2.0=py310h7f8727e_0 + - arrow-cpp=16.1.0=hc1eb8f0_0 + - async-lru=2.0.4=pyhd8ed1ab_0 + - async-timeout=4.0.3=py310h06a4308_0 + - attrs=23.1.0=py310h06a4308_0 + - aws-c-auth=0.6.19=h5eee18b_0 + - aws-c-cal=0.5.20=hdbd6064_0 + - aws-c-common=0.8.5=h5eee18b_0 + - aws-c-compression=0.2.16=h5eee18b_0 + - aws-c-event-stream=0.2.15=h6a678d5_0 + - aws-c-http=0.6.25=h5eee18b_0 + - aws-c-io=0.13.10=h5eee18b_0 + - aws-c-mqtt=0.7.13=h5eee18b_0 + - aws-c-s3=0.1.51=hdbd6064_0 + - aws-c-sdkutils=0.1.6=h5eee18b_0 + - aws-checksums=0.1.13=h5eee18b_0 + - aws-crt-cpp=0.18.16=h6a678d5_0 + - aws-sdk-cpp=1.10.55=h721c034_0 + - babel=2.14.0=pyhd8ed1ab_0 + - beautifulsoup4=4.12.3=py310h06a4308_0 + - blas=1.0=mkl + - bleach=4.1.0=pyhd3eb1b0_0 + - boost-cpp=1.82.0=hdb19cb5_2 + - bottleneck=1.3.7=py310ha9d4c09_0 + - brotli-python=1.0.9=py310h6a678d5_8 + - bzip2=1.0.8=h5eee18b_6 + - c-ares=1.19.1=h5eee18b_0 + - ca-certificates=2024.7.4=hbcca054_0 + - certifi=2024.7.4=pyhd8ed1ab_0 + - cffi=1.16.0=py310h5eee18b_1 + - charset-normalizer=3.3.2=pyhd3eb1b0_0 + - cuda-cudart=11.8.89=0 + - cuda-cupti=11.8.87=0 + - cuda-libraries=11.8.0=0 + - cuda-nvrtc=11.8.89=0 + - cuda-nvtx=11.8.86=0 + - cuda-runtime=11.8.0=0 + - cuda-version=11.8=hcce14f8_3 + - cudatoolkit=11.8.0=h6a678d5_0 + - datasets=2.19.1=py310h06a4308_0 + - debugpy=1.6.7=py310h6a678d5_0 + - decorator=5.1.1=pyhd3eb1b0_0 + - defusedxml=0.7.1=pyhd3eb1b0_0 + - dill=0.3.8=py310h06a4308_0 + - entrypoints=0.4=py310h06a4308_0 + - ffmpeg=4.3=hf484d3e_0 + - filelock=3.13.1=py310h06a4308_0 + - freetype=2.12.1=h4a9f257_0 + - frozenlist=1.4.0=py310h5eee18b_0 + - fsspec=2024.3.1=py310h06a4308_0 + - gflags=2.2.2=h6a678d5_1 + - glog=0.5.0=h6a678d5_1 + - gmp=6.2.1=h295c915_3 + - gmpy2=2.1.2=py310heeb90bb_0 + - gnutls=3.6.15=he1e5248_0 + - h11=0.14.0=pyhd8ed1ab_0 + - h2=4.1.0=pyhd8ed1ab_0 + - hpack=4.0.0=pyh9f0ad1d_0 + - httpcore=1.0.5=pyhd8ed1ab_0 + - httpx=0.27.0=pyhd8ed1ab_0 + - hyperframe=6.0.1=pyhd8ed1ab_0 + - icu=73.1=h6a678d5_0 + - idna=3.7=py310h06a4308_0 + - importlib-metadata=7.0.1=py310h06a4308_0 + - importlib_metadata=7.0.1=hd8ed1ab_0 + - importlib_resources=6.4.0=pyhd8ed1ab_0 + - intel-openmp=2023.1.0=hdb19cb5_46306 + - ipykernel=6.28.0=py310h06a4308_0 + - ipython_genutils=0.2.0=pyhd3eb1b0_1 + - jedi=0.19.1=py310h06a4308_0 + - jinja2=3.1.4=py310h06a4308_0 + - jpeg=9e=h5eee18b_2 + - json5=0.9.25=pyhd8ed1ab_0 + - jsonschema=4.19.2=py310h06a4308_0 + - jsonschema-specifications=2023.7.1=py310h06a4308_0 + - jupyter-lsp=2.2.5=pyhd8ed1ab_0 + - jupyter_client=7.4.9=py310h06a4308_0 + - jupyter_core=5.7.2=py310h06a4308_0 + - jupyter_events=0.10.0=py310h06a4308_0 + - jupyter_server=2.14.1=py310h06a4308_0 + - jupyter_server_terminals=0.4.4=py310h06a4308_1 + - jupyterlab=4.2.4=pyhd8ed1ab_0 + - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1 + - jupyterlab_server=2.27.3=pyhd8ed1ab_0 + - krb5=1.20.1=h143b758_1 + - lame=3.100=h7b6447c_0 + - lcms2=2.12=h3be6417_0 + - ld_impl_linux-64=2.38=h1181459_1 + - lerc=3.0=h295c915_0 + - libabseil=20240116.2=cxx17_h6a678d5_0 + - libboost=1.82.0=h109eef0_2 + - libbrotlicommon=1.0.9=h5eee18b_8 + - libbrotlidec=1.0.9=h5eee18b_8 + - libbrotlienc=1.0.9=h5eee18b_8 + - libcublas=11.11.3.6=0 + - libcufft=10.9.0.58=0 + - libcufile=1.9.1.3=0 + - libcurand=10.3.5.147=0 + - libcurl=8.7.1=h251f7ec_0 + - libcusolver=11.4.1.48=0 + - libcusparse=11.7.5.86=0 + - libdeflate=1.17=h5eee18b_1 + - libedit=3.1.20230828=h5eee18b_0 + - libev=4.33=h7f8727e_1 + - libevent=2.1.12=hdbd6064_1 + - libffi=3.4.4=h6a678d5_1 + - libgcc-ng=14.1.0=h77fa898_0 + - libgomp=14.1.0=h77fa898_0 + - libgrpc=1.62.2=h2d74bed_0 + - libiconv=1.16=h5eee18b_3 + - libidn2=2.3.4=h5eee18b_0 + - libjpeg-turbo=2.0.0=h9bf148f_0 + - libnghttp2=1.57.0=h2d74bed_0 + - libnpp=11.8.0.86=0 + - libnvjpeg=11.9.0.86=0 + - libpng=1.6.39=h5eee18b_0 + - libprotobuf=4.25.3=he621ea3_0 + - libsodium=1.0.18=h7b6447c_0 + - libssh2=1.11.0=h251f7ec_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - libtasn1=4.19.0=h5eee18b_0 + - libthrift=0.15.0=h1795dd8_2 + - libtiff=4.5.1=h6a678d5_0 + - libunistring=0.9.10=h27cfd23_0 + - libuuid=1.41.5=h5eee18b_0 + - libwebp-base=1.3.2=h5eee18b_0 + - llvm-openmp=14.0.6=h9e868ea_0 + - lz4-c=1.9.4=h6a678d5_1 + - markupsafe=2.1.3=py310h5eee18b_0 + - mistune=2.0.4=py310h06a4308_0 + - mkl=2023.1.0=h213fc3f_46344 + - mkl-service=2.4.0=py310h5eee18b_1 + - mkl_fft=1.3.8=py310h5eee18b_0 + - mkl_random=1.2.4=py310hdb19cb5_0 + - mpc=1.1.0=h10f8cd9_1 + - mpfr=4.0.2=hb69a4c5_1 + - mpmath=1.3.0=py310h06a4308_0 + - multidict=6.0.4=py310h5eee18b_0 + - multiprocess=0.70.15=py310h06a4308_0 + - nb_conda_kernels=2.3.1=py310h06a4308_0 + - nbclassic=1.1.0=py310h06a4308_0 + - nbclient=0.8.0=py310h06a4308_0 + - nbconvert=7.10.0=py310h06a4308_0 + - nbformat=5.9.2=py310h06a4308_0 + - ncurses=6.4=h6a678d5_0 + - nest-asyncio=1.6.0=py310h06a4308_0 + - nettle=3.7.3=hbbd107a_1 + - networkx=3.3=py310h06a4308_0 + - notebook=6.5.7=py310h06a4308_0 + - notebook-shim=0.2.3=py310h06a4308_0 + - numexpr=2.8.7=py310h85018f9_0 + - numpy=1.26.4=py310h5f9d8c6_0 + - numpy-base=1.26.4=py310hb5e798b_0 + - openh264=2.1.1=h4ff587b_0 + - openjpeg=2.4.0=h9ca470c_2 + - openssl=3.3.1=h4bc722e_2 + - orc=2.0.1=h2d29ad5_0 + - overrides=7.4.0=py310h06a4308_0 + - packaging=24.1=py310h06a4308_0 + - pandas=2.2.2=py310h6a678d5_0 + - pandocfilters=1.5.0=pyhd3eb1b0_0 + - pillow=10.4.0=py310h5eee18b_0 + - pip=24.0=py310h06a4308_0 + - platformdirs=3.10.0=py310h06a4308_0 + - prometheus_client=0.14.1=py310h06a4308_0 + - prompt_toolkit=3.0.43=hd3eb1b0_0 + - psutil=5.9.0=py310h5eee18b_0 + - ptyprocess=0.7.0=pyhd3eb1b0_2 + - pure_eval=0.2.2=pyhd3eb1b0_0 + - pyarrow=16.1.0=py310h1128e8f_0 + - pycparser=2.21=pyhd3eb1b0_0 + - pysocks=1.7.1=py310h06a4308_0 + - python=3.10.14=h955ad1f_1 + - python-dateutil=2.9.0post0=py310h06a4308_2 + - python-fastjsonschema=2.16.2=py310h06a4308_0 + - python-json-logger=2.0.7=py310h06a4308_0 + - python-tzdata=2023.3=pyhd3eb1b0_0 + - python-xxhash=2.0.2=py310h5eee18b_1 + - pytorch=2.1.0=py3.10_cuda11.8_cudnn8.7.0_0 + - pytorch-cuda=11.8=h7e8668a_5 + - pytorch-mutex=1.0=cuda + - pytz=2024.1=py310h06a4308_0 + - pyyaml=6.0.1=py310h5eee18b_0 + - pyzmq=24.0.1=py310h5eee18b_0 + - re2=2022.04.01=h295c915_0 + - readline=8.2=h5eee18b_0 + - referencing=0.30.2=py310h06a4308_0 + - regex=2023.10.3=py310h5eee18b_0 + - requests=2.32.3=py310h06a4308_0 + - rfc3339-validator=0.1.4=py310h06a4308_0 + - rfc3986-validator=0.1.1=py310h06a4308_0 + - rpds-py=0.10.6=py310hb02cf49_0 + - s2n=1.3.27=hdbd6064_0 + - safetensors=0.4.2=py310ha89cbab_1 + - send2trash=1.8.2=py310h06a4308_0 + - setuptools=69.5.1=py310h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - snappy=1.1.10=h6a678d5_1 + - sniffio=1.3.0=py310h06a4308_0 + - soupsieve=2.5=py310h06a4308_0 + - sqlite=3.45.3=h5eee18b_0 + - stack_data=0.2.0=pyhd3eb1b0_0 + - sympy=1.12=py310h06a4308_0 + - tbb=2021.8.0=hdb19cb5_0 + - terminado=0.17.1=py310h06a4308_0 + - tinycss2=1.2.1=py310h06a4308_0 + - tk=8.6.14=h39e8969_0 + - tokenizers=0.19.1=py310hff361bb_0 + - tomli=2.0.1=pyhd8ed1ab_0 + - torchaudio=2.1.0=py310_cu118 + - torchtriton=2.1.0=py310 + - torchvision=0.16.0=py310_cu118 + - tornado=6.4.1=py310h5eee18b_0 + - tqdm=4.66.4=py310h2f386ee_0 + - traitlets=5.14.3=py310h06a4308_0 + - typing-extensions=4.11.0=py310h06a4308_0 + - typing_extensions=4.11.0=py310h06a4308_0 + - tzdata=2024a=h04d1e81_0 + - urllib3=2.2.2=py310h06a4308_0 + - utf8proc=2.6.1=h5eee18b_1 + - webencodings=0.5.1=py310h06a4308_1 + - websocket-client=1.8.0=py310h06a4308_0 + - wheel=0.43.0=py310h06a4308_0 + - xformers=0.0.22.post7=py310_cu11.8.0_pyt2.1.0 + - xxhash=0.8.0=h7f8727e_3 + - xz=5.4.6=h5eee18b_1 + - yaml=0.2.5=h7b6447c_0 + - yarl=1.9.3=py310h5eee18b_0 + - zeromq=4.3.5=h6a678d5_0 + - zipp=3.17.0=py310h06a4308_0 + - zlib=1.2.13=h5eee18b_1 + - zstd=1.5.5=hc292b87_2 + - pip: + - accelerate==0.33.0 + - asttokens==2.4.1 + - bitsandbytes==0.43.2 + - comm==0.2.2 + - docstring-parser==0.16 + - exceptiongroup==1.2.2 + - executing==2.0.1 + - gguf==0.9.1 + - hf-transfer==0.1.8 + - huggingface-hub==0.24.2 + - iprogress==0.4 + - ipython==8.26.0 + - ipywidgets==8.1.3 + - jupyterlab-widgets==3.0.11 + - markdown-it-py==3.0.0 + - matplotlib-inline==0.1.7 + - mdurl==0.1.2 + - parso==0.8.4 + - peft==0.12.0 + - pexpect==4.9.0 + - prompt-toolkit==3.0.47 + - protobuf==3.20.3 + - pure-eval==0.2.3 + - pygments==2.18.0 + - rich==13.7.1 + - sentencepiece==0.2.0 + - shtab==1.7.1 + - stack-data==0.6.3 + - transformers==4.43.3 + - trl==0.8.6 + - tyro==0.8.5 + - wcwidth==0.2.13 + - widgetsnbextension==4.0.11 + ``` + +#### 3. docker-compose.yml + +```yaml +version: '3.8' + +services: + unsloth-env: + environment: + - NVIDIA_VISIBLE_DEVICES=all + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + volumes: + - ./cache:/root/.cache + - ./workspace:/workspace + working_dir: /workspace + ports: + - "8888:8888" # For Jupyter Lab + tty: true + stdin_open: true + build: + context: . + dockerfile: Dockerfile +``` + + ### Thank You to - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support From 28dea9ac9550b136d8493d3b3ea57c859f20aab1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 4 Aug 2024 23:49:35 -0700 Subject: [PATCH 0374/1088] Update llama.py --- unsloth/models/llama.py | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 445e5026f0..cec743e59d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -570,14 +570,7 @@ def LlamaModel_fast_forward( # Embed positions if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - - if self.config.torch_dtype == "float32": - self.config.torch_dtype = torch.float32 - elif self.config.torch_dtype == "bfloat16": - self.config.torch_dtype = torch.bfloat16 - elif self.config.torch_dtype == "float16": - self.config.torch_dtype = torch.float16 - + inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma @@ -1580,6 +1573,30 @@ def from_pretrained( internal_model = internal_model.model pass internal_model._saved_temp_tokenizer = tokenizer + + # Also fix torch_dtype + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass return model, tokenizer pass From 291bc6e25495070a9118bb0618ba6172abb11970 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 4 Aug 2024 23:50:40 -0700 Subject: [PATCH 0375/1088] Update README.md --- README.md | 389 ------------------------------------------------------ 1 file changed, 389 deletions(-) diff --git a/README.md b/README.md index 4e29a43ec4..9407c452ad 100644 --- a/README.md +++ b/README.md @@ -436,395 +436,6 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    -## NVIDIA Pascal Support - -Support for NVIDIA Pascal family of cards, specifically the P40 and P100. - -### Setup Guide - -1. Create three files (`Dockerfile`, `unsloth_env_file.yml`, and `docker-compose.yml`) with the contents provided below. -2. Ensure Docker and Docker Compose are installed on your system. -3. Install the NVIDIA Container Toolkit for GPU support if not already done. -4. Place all three files in the same directory. -5. Open a terminal and navigate to the directory containing these files. -6. Run the following command to build and start the container: - - ``` - docker-compose up --build - ``` - -7. Once the container is running, access Jupyter Lab by opening a web browser and navigating to `http://localhost:8888`. - -### Configuration Files - -#### 1. Dockerfile - -```dockerfile -# Stage 1: Base image with system dependencies -FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as base - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - build-essential \ - git \ - vim \ - curl \ - wget \ - && rm -rf /var/lib/apt/lists/* - -# Install Miniconda only if it's not already installed -RUN if [ ! -d "/opt/conda" ]; then \ - wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \ - bash miniconda.sh -b -p /opt/conda && \ - rm miniconda.sh; \ - fi - -# Set path to conda -ENV PATH /opt/conda/bin:$PATH - -# Set path to conda -ENV PATH /opt/conda/bin:$PATH - -# Stage 2: Python environment setup -FROM base as python-env - -COPY unsloth_env_file.yml unsloth_env_file.yml - -RUN conda env create -f unsloth_env_file.yml - -SHELL ["conda", "run", "-n", "unsloth_env", "/bin/bash", "-c"] - -# Stage 3: Final image -FROM python-env as final - -# Install Unsloth (This step is separate because it's likely to change more frequently) -RUN pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" - -ENV PATH /usr/local/cuda/bin:$PATH -ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:$LD_LIBRARY_PATH - -# Set the working directory -WORKDIR /workspace - -# Set the default command to run Jupyter Lab -CMD ["conda", "run", "--no-capture-output", "-n", "unsloth_env", "jupyter", "lab", "--ip=0.0.0.0", "--no-browser", "--allow-root", "--NotebookApp.token=''", "--NotebookApp.password=''"] -``` - -#### 2. unsloth_env_file.yml - -```yaml -name: unsloth_env -channels: - - xformers - - pytorch - - nvidia - - conda-forge - - defaults -dependencies: - - _libgcc_mutex=0.1=conda_forge - - _openmp_mutex=4.5=2_gnu - - aiohttp=3.9.5=py310h5eee18b_0 - - aiosignal=1.2.0=pyhd3eb1b0_0 - - anyio=4.2.0=py310h06a4308_0 - - argon2-cffi=21.3.0=pyhd3eb1b0_0 - - argon2-cffi-bindings=21.2.0=py310h7f8727e_0 - - arrow-cpp=16.1.0=hc1eb8f0_0 - - async-lru=2.0.4=pyhd8ed1ab_0 - - async-timeout=4.0.3=py310h06a4308_0 - - attrs=23.1.0=py310h06a4308_0 - - aws-c-auth=0.6.19=h5eee18b_0 - - aws-c-cal=0.5.20=hdbd6064_0 - - aws-c-common=0.8.5=h5eee18b_0 - - aws-c-compression=0.2.16=h5eee18b_0 - - aws-c-event-stream=0.2.15=h6a678d5_0 - - aws-c-http=0.6.25=h5eee18b_0 - - aws-c-io=0.13.10=h5eee18b_0 - - aws-c-mqtt=0.7.13=h5eee18b_0 - - aws-c-s3=0.1.51=hdbd6064_0 - - aws-c-sdkutils=0.1.6=h5eee18b_0 - - aws-checksums=0.1.13=h5eee18b_0 - - aws-crt-cpp=0.18.16=h6a678d5_0 - - aws-sdk-cpp=1.10.55=h721c034_0 - - babel=2.14.0=pyhd8ed1ab_0 - - beautifulsoup4=4.12.3=py310h06a4308_0 - - blas=1.0=mkl - - bleach=4.1.0=pyhd3eb1b0_0 - - boost-cpp=1.82.0=hdb19cb5_2 - - bottleneck=1.3.7=py310ha9d4c09_0 - - brotli-python=1.0.9=py310h6a678d5_8 - - bzip2=1.0.8=h5eee18b_6 - - c-ares=1.19.1=h5eee18b_0 - - ca-certificates=2024.7.4=hbcca054_0 - - certifi=2024.7.4=pyhd8ed1ab_0 - - cffi=1.16.0=py310h5eee18b_1 - - charset-normalizer=3.3.2=pyhd3eb1b0_0 - - cuda-cudart=11.8.89=0 - - cuda-cupti=11.8.87=0 - - cuda-libraries=11.8.0=0 - - cuda-nvrtc=11.8.89=0 - - cuda-nvtx=11.8.86=0 - - cuda-runtime=11.8.0=0 - - cuda-version=11.8=hcce14f8_3 - - cudatoolkit=11.8.0=h6a678d5_0 - - datasets=2.19.1=py310h06a4308_0 - - debugpy=1.6.7=py310h6a678d5_0 - - decorator=5.1.1=pyhd3eb1b0_0 - - defusedxml=0.7.1=pyhd3eb1b0_0 - - dill=0.3.8=py310h06a4308_0 - - entrypoints=0.4=py310h06a4308_0 - - ffmpeg=4.3=hf484d3e_0 - - filelock=3.13.1=py310h06a4308_0 - - freetype=2.12.1=h4a9f257_0 - - frozenlist=1.4.0=py310h5eee18b_0 - - fsspec=2024.3.1=py310h06a4308_0 - - gflags=2.2.2=h6a678d5_1 - - glog=0.5.0=h6a678d5_1 - - gmp=6.2.1=h295c915_3 - - gmpy2=2.1.2=py310heeb90bb_0 - - gnutls=3.6.15=he1e5248_0 - - h11=0.14.0=pyhd8ed1ab_0 - - h2=4.1.0=pyhd8ed1ab_0 - - hpack=4.0.0=pyh9f0ad1d_0 - - httpcore=1.0.5=pyhd8ed1ab_0 - - httpx=0.27.0=pyhd8ed1ab_0 - - hyperframe=6.0.1=pyhd8ed1ab_0 - - icu=73.1=h6a678d5_0 - - idna=3.7=py310h06a4308_0 - - importlib-metadata=7.0.1=py310h06a4308_0 - - importlib_metadata=7.0.1=hd8ed1ab_0 - - importlib_resources=6.4.0=pyhd8ed1ab_0 - - intel-openmp=2023.1.0=hdb19cb5_46306 - - ipykernel=6.28.0=py310h06a4308_0 - - ipython_genutils=0.2.0=pyhd3eb1b0_1 - - jedi=0.19.1=py310h06a4308_0 - - jinja2=3.1.4=py310h06a4308_0 - - jpeg=9e=h5eee18b_2 - - json5=0.9.25=pyhd8ed1ab_0 - - jsonschema=4.19.2=py310h06a4308_0 - - jsonschema-specifications=2023.7.1=py310h06a4308_0 - - jupyter-lsp=2.2.5=pyhd8ed1ab_0 - - jupyter_client=7.4.9=py310h06a4308_0 - - jupyter_core=5.7.2=py310h06a4308_0 - - jupyter_events=0.10.0=py310h06a4308_0 - - jupyter_server=2.14.1=py310h06a4308_0 - - jupyter_server_terminals=0.4.4=py310h06a4308_1 - - jupyterlab=4.2.4=pyhd8ed1ab_0 - - jupyterlab_pygments=0.3.0=pyhd8ed1ab_1 - - jupyterlab_server=2.27.3=pyhd8ed1ab_0 - - krb5=1.20.1=h143b758_1 - - lame=3.100=h7b6447c_0 - - lcms2=2.12=h3be6417_0 - - ld_impl_linux-64=2.38=h1181459_1 - - lerc=3.0=h295c915_0 - - libabseil=20240116.2=cxx17_h6a678d5_0 - - libboost=1.82.0=h109eef0_2 - - libbrotlicommon=1.0.9=h5eee18b_8 - - libbrotlidec=1.0.9=h5eee18b_8 - - libbrotlienc=1.0.9=h5eee18b_8 - - libcublas=11.11.3.6=0 - - libcufft=10.9.0.58=0 - - libcufile=1.9.1.3=0 - - libcurand=10.3.5.147=0 - - libcurl=8.7.1=h251f7ec_0 - - libcusolver=11.4.1.48=0 - - libcusparse=11.7.5.86=0 - - libdeflate=1.17=h5eee18b_1 - - libedit=3.1.20230828=h5eee18b_0 - - libev=4.33=h7f8727e_1 - - libevent=2.1.12=hdbd6064_1 - - libffi=3.4.4=h6a678d5_1 - - libgcc-ng=14.1.0=h77fa898_0 - - libgomp=14.1.0=h77fa898_0 - - libgrpc=1.62.2=h2d74bed_0 - - libiconv=1.16=h5eee18b_3 - - libidn2=2.3.4=h5eee18b_0 - - libjpeg-turbo=2.0.0=h9bf148f_0 - - libnghttp2=1.57.0=h2d74bed_0 - - libnpp=11.8.0.86=0 - - libnvjpeg=11.9.0.86=0 - - libpng=1.6.39=h5eee18b_0 - - libprotobuf=4.25.3=he621ea3_0 - - libsodium=1.0.18=h7b6447c_0 - - libssh2=1.11.0=h251f7ec_0 - - libstdcxx-ng=11.2.0=h1234567_1 - - libtasn1=4.19.0=h5eee18b_0 - - libthrift=0.15.0=h1795dd8_2 - - libtiff=4.5.1=h6a678d5_0 - - libunistring=0.9.10=h27cfd23_0 - - libuuid=1.41.5=h5eee18b_0 - - libwebp-base=1.3.2=h5eee18b_0 - - llvm-openmp=14.0.6=h9e868ea_0 - - lz4-c=1.9.4=h6a678d5_1 - - markupsafe=2.1.3=py310h5eee18b_0 - - mistune=2.0.4=py310h06a4308_0 - - mkl=2023.1.0=h213fc3f_46344 - - mkl-service=2.4.0=py310h5eee18b_1 - - mkl_fft=1.3.8=py310h5eee18b_0 - - mkl_random=1.2.4=py310hdb19cb5_0 - - mpc=1.1.0=h10f8cd9_1 - - mpfr=4.0.2=hb69a4c5_1 - - mpmath=1.3.0=py310h06a4308_0 - - multidict=6.0.4=py310h5eee18b_0 - - multiprocess=0.70.15=py310h06a4308_0 - - nb_conda_kernels=2.3.1=py310h06a4308_0 - - nbclassic=1.1.0=py310h06a4308_0 - - nbclient=0.8.0=py310h06a4308_0 - - nbconvert=7.10.0=py310h06a4308_0 - - nbformat=5.9.2=py310h06a4308_0 - - ncurses=6.4=h6a678d5_0 - - nest-asyncio=1.6.0=py310h06a4308_0 - - nettle=3.7.3=hbbd107a_1 - - networkx=3.3=py310h06a4308_0 - - notebook=6.5.7=py310h06a4308_0 - - notebook-shim=0.2.3=py310h06a4308_0 - - numexpr=2.8.7=py310h85018f9_0 - - numpy=1.26.4=py310h5f9d8c6_0 - - numpy-base=1.26.4=py310hb5e798b_0 - - openh264=2.1.1=h4ff587b_0 - - openjpeg=2.4.0=h9ca470c_2 - - openssl=3.3.1=h4bc722e_2 - - orc=2.0.1=h2d29ad5_0 - - overrides=7.4.0=py310h06a4308_0 - - packaging=24.1=py310h06a4308_0 - - pandas=2.2.2=py310h6a678d5_0 - - pandocfilters=1.5.0=pyhd3eb1b0_0 - - pillow=10.4.0=py310h5eee18b_0 - - pip=24.0=py310h06a4308_0 - - platformdirs=3.10.0=py310h06a4308_0 - - prometheus_client=0.14.1=py310h06a4308_0 - - prompt_toolkit=3.0.43=hd3eb1b0_0 - - psutil=5.9.0=py310h5eee18b_0 - - ptyprocess=0.7.0=pyhd3eb1b0_2 - - pure_eval=0.2.2=pyhd3eb1b0_0 - - pyarrow=16.1.0=py310h1128e8f_0 - - pycparser=2.21=pyhd3eb1b0_0 - - pysocks=1.7.1=py310h06a4308_0 - - python=3.10.14=h955ad1f_1 - - python-dateutil=2.9.0post0=py310h06a4308_2 - - python-fastjsonschema=2.16.2=py310h06a4308_0 - - python-json-logger=2.0.7=py310h06a4308_0 - - python-tzdata=2023.3=pyhd3eb1b0_0 - - python-xxhash=2.0.2=py310h5eee18b_1 - - pytorch=2.1.0=py3.10_cuda11.8_cudnn8.7.0_0 - - pytorch-cuda=11.8=h7e8668a_5 - - pytorch-mutex=1.0=cuda - - pytz=2024.1=py310h06a4308_0 - - pyyaml=6.0.1=py310h5eee18b_0 - - pyzmq=24.0.1=py310h5eee18b_0 - - re2=2022.04.01=h295c915_0 - - readline=8.2=h5eee18b_0 - - referencing=0.30.2=py310h06a4308_0 - - regex=2023.10.3=py310h5eee18b_0 - - requests=2.32.3=py310h06a4308_0 - - rfc3339-validator=0.1.4=py310h06a4308_0 - - rfc3986-validator=0.1.1=py310h06a4308_0 - - rpds-py=0.10.6=py310hb02cf49_0 - - s2n=1.3.27=hdbd6064_0 - - safetensors=0.4.2=py310ha89cbab_1 - - send2trash=1.8.2=py310h06a4308_0 - - setuptools=69.5.1=py310h06a4308_0 - - six=1.16.0=pyhd3eb1b0_1 - - snappy=1.1.10=h6a678d5_1 - - sniffio=1.3.0=py310h06a4308_0 - - soupsieve=2.5=py310h06a4308_0 - - sqlite=3.45.3=h5eee18b_0 - - stack_data=0.2.0=pyhd3eb1b0_0 - - sympy=1.12=py310h06a4308_0 - - tbb=2021.8.0=hdb19cb5_0 - - terminado=0.17.1=py310h06a4308_0 - - tinycss2=1.2.1=py310h06a4308_0 - - tk=8.6.14=h39e8969_0 - - tokenizers=0.19.1=py310hff361bb_0 - - tomli=2.0.1=pyhd8ed1ab_0 - - torchaudio=2.1.0=py310_cu118 - - torchtriton=2.1.0=py310 - - torchvision=0.16.0=py310_cu118 - - tornado=6.4.1=py310h5eee18b_0 - - tqdm=4.66.4=py310h2f386ee_0 - - traitlets=5.14.3=py310h06a4308_0 - - typing-extensions=4.11.0=py310h06a4308_0 - - typing_extensions=4.11.0=py310h06a4308_0 - - tzdata=2024a=h04d1e81_0 - - urllib3=2.2.2=py310h06a4308_0 - - utf8proc=2.6.1=h5eee18b_1 - - webencodings=0.5.1=py310h06a4308_1 - - websocket-client=1.8.0=py310h06a4308_0 - - wheel=0.43.0=py310h06a4308_0 - - xformers=0.0.22.post7=py310_cu11.8.0_pyt2.1.0 - - xxhash=0.8.0=h7f8727e_3 - - xz=5.4.6=h5eee18b_1 - - yaml=0.2.5=h7b6447c_0 - - yarl=1.9.3=py310h5eee18b_0 - - zeromq=4.3.5=h6a678d5_0 - - zipp=3.17.0=py310h06a4308_0 - - zlib=1.2.13=h5eee18b_1 - - zstd=1.5.5=hc292b87_2 - - pip: - - accelerate==0.33.0 - - asttokens==2.4.1 - - bitsandbytes==0.43.2 - - comm==0.2.2 - - docstring-parser==0.16 - - exceptiongroup==1.2.2 - - executing==2.0.1 - - gguf==0.9.1 - - hf-transfer==0.1.8 - - huggingface-hub==0.24.2 - - iprogress==0.4 - - ipython==8.26.0 - - ipywidgets==8.1.3 - - jupyterlab-widgets==3.0.11 - - markdown-it-py==3.0.0 - - matplotlib-inline==0.1.7 - - mdurl==0.1.2 - - parso==0.8.4 - - peft==0.12.0 - - pexpect==4.9.0 - - prompt-toolkit==3.0.47 - - protobuf==3.20.3 - - pure-eval==0.2.3 - - pygments==2.18.0 - - rich==13.7.1 - - sentencepiece==0.2.0 - - shtab==1.7.1 - - stack-data==0.6.3 - - transformers==4.43.3 - - trl==0.8.6 - - tyro==0.8.5 - - wcwidth==0.2.13 - - widgetsnbextension==4.0.11 - ``` - -#### 3. docker-compose.yml - -```yaml -version: '3.8' - -services: - unsloth-env: - environment: - - NVIDIA_VISIBLE_DEVICES=all - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: all - capabilities: [gpu] - volumes: - - ./cache:/root/.cache - - ./workspace:/workspace - working_dir: /workspace - ports: - - "8888:8888" # For Jupyter Lab - tty: true - stdin_open: true - build: - context: . - dockerfile: Dockerfile -``` - - ### Thank You to - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support From b43855fb3635ce06860b27f7c8f9987a16b47ad7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 4 Aug 2024 23:59:57 -0700 Subject: [PATCH 0376/1088] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 9407c452ad..35cbbe6970 100644 --- a/README.md +++ b/README.md @@ -37,8 +37,10 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. +- Install Unsloth with `pip install unsloth[colab-new]` then `pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes` ## 🦥 Unsloth.ai News +- 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! And uploaded [GGUF quants](https://huggingface.co/unsloth/gemma-2-it-GGUF) Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing) for Gemma-2-2b Instruct! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported - 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported From bfe38e6ea8d3d7cf8ce9e37962de03c71c90cbe2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 5 Aug 2024 00:00:53 -0700 Subject: [PATCH 0377/1088] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 35cbbe6970..86c3fbd86e 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,6 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. -- Install Unsloth with `pip install unsloth[colab-new]` then `pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes` ## 🦥 Unsloth.ai News - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. @@ -94,6 +93,9 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ![](https://i.ibb.co/sJ7RhGG/image-41.png) ## 💾 Installation Instructions + +If you have Pytorch 2.3 and CUDA 12.1, install Unsloth with `pip install unsloth[colab-new]` then `pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes` + ### Conda Installation Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs. ```bash From 8001d30a8f7c179ff7036eaa2a7552ce620176b6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 6 Aug 2024 20:24:44 -0700 Subject: [PATCH 0378/1088] Fix tokenizers (#887) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py --- pyproject.toml | 4 +- unsloth/models/_utils.py | 83 ++++++++++++++++++++------ unsloth/models/llama.py | 1 + unsloth/tokenizer_utils.py | 115 ++++++++++++++++++++++++++++++++++++- 4 files changed, 180 insertions(+), 23 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fdc098854b..2cbe68f4a6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ huggingface = [ "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", - "hf-transfer", + "hf_transfer", ] cu118only = [ "xformers==0.0.22.post7", @@ -178,7 +178,7 @@ colab-new = [ "numpy", "protobuf<4.0.0", "huggingface_hub", - "hf-transfer", + "hf_transfer", ] colab-no-deps = [ "accelerate>=0.26.1", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index fe3aa90402..d5be8d97e7 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -332,7 +332,6 @@ def prepare_model_for_kbit_training( """ # Freeze all parameters except LoRA - import re with torch.no_grad(): for name, param in model.named_parameters(): if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: @@ -389,12 +388,14 @@ def patch_tokenizer(model, tokenizer): Fixes https://github.com/unslothai/unsloth/issues/5 """ possible_reserved_tokens = ( + "<|finetune_right_pad_id|>", # Llama-3.1 + "", # Mistral Nemo "<|reserved", # Llama-3 "<|placeholder", # Phi-3 "[control", # Mistral type models - "", # Mistral Nemo - "<|finetune_right_pad_id|>", # Llama-3.1 ) + joiner = "\1\0=+=\0\1" + number_repetitions = 3 - 1 # Number of reserved tokens needed if model is not None: model.config.update({"unsloth_version" : __version__}) @@ -412,28 +413,69 @@ def patch_tokenizer(model, tokenizer): if bad_pad_token: # Find a better pad token added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] - possible_pad_token = None - n_possible_pad_tokens = 0 - for added_token in added_tokens[::-1]: - if added_token.startswith(possible_reserved_tokens): - if possible_pad_token is None: possible_pad_token = added_token - n_possible_pad_tokens += 1 - # We must see at least 3 of the reserved tokens - if n_possible_pad_tokens >= 3: break + all_added_tokens = joiner.join(added_tokens[::-1]) + all_added_tokens += joiner + + final_pad_token = None + final_good_match = False + + for possible_reserved_token in possible_reserved_tokens: + possible_reserved_token = re.escape(possible_reserved_token) + found = re.finditer(f"{possible_reserved_token}", all_added_tokens) + first_match = None + good_match = False + for j, x in enumerate(found): + if j == 0: first_match = x + if j >= number_repetitions: + good_match = True + break + pass + pass + + if first_match is None: continue + + # If it ends with |> or > etc, then set it as a good pad token! + start = first_match.span(0)[0] + possible_pad_token = first_match.group(0) + end = all_added_tokens.find(joiner, start) + first_match = all_added_tokens[start:end] + + if first_match is not None: + good_match = possible_pad_token.endswith((">", "|>", "]", ")")) + pass + possible_pad_token = first_match + + # Replace current pad token if another exact match is found + if not final_good_match and good_match: + final_good_match = True + final_pad_token = possible_pad_token + break + else: + final_good_match = False + final_pad_token = possible_pad_token pass pass - if n_possible_pad_tokens < 3: possible_pad_token = None + possible_pad_token = final_pad_token - if possible_pad_token is None: - # Try unk_token + # Try unk_token + if possible_pad_token is None and hasattr(tokenizer, "unk_token"): possible_pad_token = tokenizer.unk_token pass + # Check pad token's id must be less than vocab size + if possible_pad_token is not None: + check_pad_token = tokenizer(possible_pad_token, add_special_tokens = False).input_ids + if len(check_pad_token) != 1: + possible_pad_token = None + if check_pad_token[0] >= config.vocab_size: + possible_pad_token = None + pass + if possible_pad_token is None: # Failure to find a good replacement!! We shall manually add one! new_pad_token = "<|PAD_TOKEN|>" while new_pad_token in tokenizer.get_vocab(): - new_pad_token += "#" + new_pad_token = f"<{new_pad_token}>" pass possible_pad_token = new_pad_token pass @@ -447,11 +489,16 @@ def patch_tokenizer(model, tokenizer): tokenizer.add_special_tokens({"pad_token" : possible_pad_token}) tokenizer.pad_token = possible_pad_token if model is not None: - config = model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + model.generation_config.update(pad_token_id = tokenizer.pad_token_id) else: if model is not None: if model.config.pad_token_id is None: - config = model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + model.generation_config.update(pad_token_id = tokenizer.pad_token_id) + pass + pass + model.generation_config.update(max_length = model.config.max_position_embeddings) return model, tokenizer pass @@ -462,7 +509,6 @@ def patch_tokenizer(model, tokenizer): from peft import __version__ as peft_version if Version(peft_version) < Version("0.12.0"): from peft.tuners.lora.layer import LoraLayer - import inspect, re try: source = inspect.getsource(LoraLayer.update_layer) text = "if weight is not None:\n" @@ -688,7 +734,6 @@ def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod from inspect import getsource from accelerate.utils.dataclasses import DistributedType -import re BitsAndBytesConfig__init__ = getsource(BitsAndBytesConfig.__init__) BitsAndBytesConfig__init__ = re.sub( r"if[\s]{1,}kwargs\:[\s]{1,}.+?\n", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cec743e59d..e300e07e0c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1397,6 +1397,7 @@ def from_pretrained( padding_side = "right", token = token, trust_remote_code = trust_remote_code, + fix_tokenizer = fix_tokenizer, ) model, tokenizer = patch_tokenizer(model, tokenizer) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 8474c2c6b7..c67f82c2ce 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -454,13 +454,14 @@ class SentencePieceTokenTypes(IntEnum): pass -def load_correct_tokenizer( +def _load_correct_tokenizer( tokenizer_name, model_max_length = None, padding_side = "right", token = None, trust_remote_code = False, cache_dir = "huggingface_tokenizers_cache", + fix_tokenizer = True, ): if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT: cache_dir = cache_dir @@ -501,7 +502,10 @@ def load_correct_tokenizer( cache_dir = cache_dir, ) - if tokenizer_name in IGNORED_TOKENIZER_NAMES: + if not fix_tokenizer or tokenizer_name in IGNORED_TOKENIZER_NAMES: + return fast_tokenizer + # Ignore Mistral ones - they're a bit weird to handle! + elif "mistral" in tokenizer_name.lower(): return fast_tokenizer elif slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): @@ -522,6 +526,113 @@ def load_correct_tokenizer( pass +def load_correct_tokenizer( + tokenizer_name, + model_max_length = None, + padding_side = "right", + token = None, + trust_remote_code = False, + cache_dir = "huggingface_tokenizers_cache", + fix_tokenizer = True, +): + tokenizer = _load_correct_tokenizer( + tokenizer_name = tokenizer_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + trust_remote_code = trust_remote_code, + cache_dir = cache_dir, + fix_tokenizer = fix_tokenizer, + ) + + ### 1. Fixup tokenizer's chat_template + old_chat_template = getattr(tokenizer, "chat_template", None) + + # Ignore mistral type models since they don't have a add_generation_prompt + if "mistral" in str(getattr(tokenizer, "name_or_path", "")).lower(): + chat_template = old_chat_template + + # Also check Llama-2 old style models + elif old_chat_template is not None and \ + "[/INST]" in old_chat_template and "[INST]" in old_chat_template and \ + "bos_token" in old_chat_template and "eos_token" in old_chat_template: + + chat_template = old_chat_template + + else: + chat_template = fix_chat_template(tokenizer) + if old_chat_template is not None and chat_template is None: + raise RuntimeError( + "Unsloth: Fixing chat template failed - please file a report immediately!" + ) + pass + pass + + tokenizer.chat_template = chat_template + return tokenizer +pass + + +def _fix_chat_template(chat_template): + endfor = "{% endfor %}" + where = chat_template.find(endfor) + if where == -1: return chat_template + + after_endfor = chat_template[where + len(endfor):] + + if "{% if" not in after_endfor and "{% set " not in after_endfor and \ + after_endfor.startswith("{{") and after_endfor.endswith("}}") and \ + after_endfor.count("{{") == 1 and after_endfor.count("}}") == 1: + + after_endfor = "{% if add_generation_prompt %}" + after_endfor + "{% endif %}" + + chat_template = chat_template[:where + len(endfor)] + after_endfor + pass + return chat_template +pass + + +def fix_chat_template(tokenizer): + chat_template = getattr(tokenizer, "chat_template", None) + if chat_template is None: return None + + ### 1. Check if add_generation_prompt works + messages = [ + {"role": "user", "content": "Who are you?"}, + ] + no = tokenizer.apply_chat_template(messages, add_generation_prompt = False, tokenize = False) + yes = tokenizer.apply_chat_template(messages, add_generation_prompt = True, tokenize = False) + + if no == yes: + # SAME?! That's not good! We check for add_generation_prompt + if "{% if add_generation_prompt %}" not in chat_template: + # Try fixing it by adding it + new_chat_template = _fix_chat_template(chat_template) + if "{% if add_generation_prompt %}" not in new_chat_template: + raise RuntimeError( + f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"\ + "does not have a {% if add_generation_prompt %} for generation purposes.\n"\ + "Please file a bug report immediately - thanks!" + ) + else: + logger.warning_once( + "Unsloth: We successfully patched the tokenizer to add a {% if add_generation_prompt %} to the chat_template.\n"\ + "This is not a bug, but please notify the Unsloth maintainers - thanks!" + ) + chat_template = new_chat_template + pass + else: + raise RuntimeError( + f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"\ + "has a {% if add_generation_prompt %} for generation purposes, but wasn't provided correctly.\n"\ + "Please file a bug report immediately - thanks!" + ) + pass + pass + return chat_template +pass + + def check_tokenizer( model, tokenizer, From 637ed8c6bd252f981e89e30e1085efc03a06a880 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 7 Aug 2024 01:11:06 -0700 Subject: [PATCH 0379/1088] Update _utils.py --- unsloth/models/_utils.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d5be8d97e7..db27eb8a84 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -490,15 +490,21 @@ def patch_tokenizer(model, tokenizer): tokenizer.pad_token = possible_pad_token if model is not None: model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) + if getattr(model, "generation_config") is not None: + model.generation_config.update(pad_token_id = tokenizer.pad_token_id) else: if model is not None: if model.config.pad_token_id is None: model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) + if getattr(model, "generation_config") is not None: + model.generation_config.update(pad_token_id = tokenizer.pad_token_id) pass pass - model.generation_config.update(max_length = model.config.max_position_embeddings) + + if model is not None: + if getattr(model, "generation_config") is not None: + model.generation_config.update(max_length = model.config.max_position_embeddings) + return model, tokenizer pass From cad1146ff7c60f4afc10b9ab243304befdad7a0f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 7 Aug 2024 10:47:11 -0700 Subject: [PATCH 0380/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index db27eb8a84..3686717b2c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -467,7 +467,7 @@ def patch_tokenizer(model, tokenizer): check_pad_token = tokenizer(possible_pad_token, add_special_tokens = False).input_ids if len(check_pad_token) != 1: possible_pad_token = None - if check_pad_token[0] >= config.vocab_size: + if check_pad_token[0] >= model.config.vocab_size: possible_pad_token = None pass From e4c8ceacb3fca634f78e662873a01c37678fcb3e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 7 Aug 2024 10:48:39 -0700 Subject: [PATCH 0381/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3686717b2c..195fd5bb65 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -467,7 +467,7 @@ def patch_tokenizer(model, tokenizer): check_pad_token = tokenizer(possible_pad_token, add_special_tokens = False).input_ids if len(check_pad_token) != 1: possible_pad_token = None - if check_pad_token[0] >= model.config.vocab_size: + if model is not None and check_pad_token[0] >= model.config.vocab_size: possible_pad_token = None pass From 3bc804a9f9d603287f0a42a7169ed8cd40420f6b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 10 Aug 2024 19:59:40 -0700 Subject: [PATCH 0382/1088] Torch 2.4, Xformers>0.0.27, TRL>0.9, Python 3.12 + bug fixes (#902) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py --- pyproject.toml | 133 ++++++++++++++++++++++++++++----------- unsloth/models/_utils.py | 60 +++++++++++++++--- unsloth/models/gemma2.py | 2 +- unsloth/models/llama.py | 31 ++++++--- unsloth/models/loader.py | 6 +- unsloth/save.py | 82 +++++++++++++++++++----- 6 files changed, 240 insertions(+), 74 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2cbe68f4a6..b61908a690 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,111 +43,154 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.26.1", - "trl>=0.7.9,<0.9.0", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", "hf_transfer", ] cu118only = [ - "xformers==0.0.22.post7", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121only = [ - "xformers==0.0.22.post7", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch211 = [ - "xformers==0.0.23", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch211 = [ - "xformers==0.0.23", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch212 = [ - "xformers==0.0.23.post1", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch212 = [ - "xformers==0.0.23.post1", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch220 = [ - "xformers==0.0.24", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch220 = [ - "xformers==0.0.24", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch230 = [ - "xformers==0.0.26.post1", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch230 = [ - "xformers==0.0.26.post1", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", +] +cu118onlytorch240 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", +] +cu121onlytorch240 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] - cu118 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118only]", ] cu121 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121only]", ] cu118-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch211]", ] cu121-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", ] cu118-torch212 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch212]", ] cu121-torch212 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch212]", ] cu118-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch220]", ] cu121-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", ] cu118-torch230 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch230]", ] cu121-torch230 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch230]", ] +cu118-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu118onlytorch240]", +] +cu121-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch240]", +] kaggle = [ "unsloth[huggingface]", ] kaggle-new = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", ] conda = [ "unsloth[huggingface]", ] colab-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", ] colab-ampere-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", "packaging", "ninja", @@ -155,12 +198,12 @@ colab-ampere-torch211 = [ ] colab-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", ] colab-ampere-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", "packaging", "ninja", @@ -182,10 +225,10 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.26.1", - "trl>=0.7.9", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", "xformers<0.0.27", - "bitsandbytes", + "bitsandbytes>=0.43.3", "protobuf<4.0.0", ] colab = [ @@ -199,7 +242,7 @@ colab-ampere = [ ] cu118-ampere = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118only]", "packaging", "ninja", @@ -207,7 +250,7 @@ cu118-ampere = [ ] cu121-ampere = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121only]", "packaging", "ninja", @@ -215,7 +258,7 @@ cu121-ampere = [ ] cu118-ampere-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch211]", "packaging", "ninja", @@ -223,7 +266,7 @@ cu118-ampere-torch211 = [ ] cu121-ampere-torch211 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", "packaging", "ninja", @@ -231,7 +274,7 @@ cu121-ampere-torch211 = [ ] cu118-ampere-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch220]", "packaging", "ninja", @@ -239,7 +282,7 @@ cu118-ampere-torch220 = [ ] cu121-ampere-torch220 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", "packaging", "ninja", @@ -247,7 +290,7 @@ cu121-ampere-torch220 = [ ] cu118-ampere-torch230 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch230]", "packaging", "ninja", @@ -255,12 +298,28 @@ cu118-ampere-torch230 = [ ] cu121-ampere-torch230 = [ "unsloth[huggingface]", - "bitsandbytes", + "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch230]", "packaging", "ninja", "flash-attn>=2.6.3", ] +cu118-ampere-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu118onlytorch240]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu121-ampere-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch240]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 195fd5bb65..0c0057496b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -53,7 +53,9 @@ # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "xformers") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate") @@ -133,6 +135,28 @@ def patch_mistral_nemo_config(config): pass # ============================================= +# ============================================= +# Fix KeyError: 'Cache only has 0 layers, attempted to access layer with index 0' +import transformers.cache_utils +if hasattr(transformers.cache_utils, "DynamicCache") and \ + transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": + + source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) + start = source.find("def") + spaces = start*" " + source = source.split("\n") + source = "\n".join(x[start:] for x in source) + where = source.find("raise KeyError") + source = source[:where] + \ + f"if len(self) == 0:\n{spaces}{spaces}"\ + " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ + f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] + source = source.replace("__getitem__", "__cache_utils_getitem__", 1) + exec(source) + transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ +pass +# ============================================= + # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb @@ -192,7 +216,7 @@ def patch_mistral_nemo_config(config): # Get Xformers from xformers import __version__ as xformers_version # Temporarily disable 0.0.27 and higher - inference issues -if Version(xformers_version) >= Version("0.0.27"): +if False: #Version(xformers_version) >= Version("0.0.27"): raise ImportError( "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ "then press Disconnect Runtime and then Restart it.\n"\ @@ -200,10 +224,10 @@ def patch_mistral_nemo_config(config): "%%capture\n" "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ '\n'\ f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ - 'Please downgrade xformers via `pip install --force-reinstall "xformers<0.0.27"' + 'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"' ) pass @@ -217,10 +241,10 @@ def patch_mistral_nemo_config(config): f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ f"Please install xformers < 0.0.26 for torch = {torch_version}." ) -elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) >= Version("0.0.27"): +elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) > Version("0.0.27"): raise ImportError( f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.27 for torch = {torch_version}." + f"Please install xformers <= 0.0.27 for torch = {torch_version}." ) pass @@ -241,7 +265,8 @@ def patch_mistral_nemo_config(config): # Check TRL version from trl import __version__ as trl_version -if Version(trl_version) >= Version("0.9.0"): +# Unsloth now supports all TRL versions! +if False:#Version(trl_version) >= Version("0.9.0"): raise ImportError( "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ "then press Disconnect Runtime and then Restart it.\n"\ @@ -249,13 +274,32 @@ def patch_mistral_nemo_config(config): "%%capture\n" "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes\n'\ + '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ '\n'\ f"Otherwise in local machines, your TRL version of {trl_version} is too new.\n"\ - 'Please downgrade TRL via `pip install --force-reinstall "trl<0.9.0"' + 'Please downgrade TRL via `pip install --force-reinstall trl' ) pass +# ============================================= +# Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' +if Version(xformers_version) >= Version("0.0.27"): + import accelerate.utils.operations + if hasattr(accelerate.utils.operations, "send_to_device") and \ + accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": + from accelerate.utils.operations import * + send_to_device = inspect.getsource(accelerate.utils.operations.send_to_device) + send_to_device = re.sub( + r"([ ]{4,})return tensor\.to\(device\)", + r"\1try: return tensor.to(device)\n\1except: return tensor", + send_to_device, + ).replace("def send_to_device", "def _fixed_send_to_device") + exec(send_to_device) + accelerate.utils.operations.send_to_device = _fixed_send_to_device + pass +pass +# ============================================= + # ============================================= # Torch compile settings diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 1cbaf5b169..ea9f53e7db 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -156,6 +156,7 @@ def Gemma2Attention_fast_forward( ) A = A.reshape(bsz, q_len, n_heads*head_dim) else: + mask = causal_mask if attention_mask is None else attention_mask A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) pass A = self.apply_o(self, A) @@ -413,7 +414,6 @@ def Gemma2Model_fast_forward_inference( SWA = attention_mask GA = attention_mask pass - next_decoder_cache = [] for idx, decoder_layer in enumerate(self.model.layers): diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e300e07e0c..2a07da6ce5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -419,7 +419,7 @@ def LlamaAttention_fast_forward( def LlamaDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -505,7 +505,7 @@ def LlamaModel_fast_forward( return_dict: Optional[bool] = None, *args, **kwargs, ) -> Union[Tuple, BaseModelOutputWithPast]: - + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions assert(output_attentions is False) output_hidden_states = ( @@ -682,12 +682,27 @@ def LlamaModel_fast_forward( # Gemma2 has alternating SWA and global attn - if IS_GEMMA2 and not hasattr(self, "SWA_mask"): - if HAS_FLASH_ATTENTION_SOFTCAPPING: + if IS_GEMMA2: + if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: self.SWA_mask = True self.GA_mask = False - else: - n = self.config.max_position_embeddings + elif attention_mask is not None: + self.SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window = self.config.sliding_window, + ) + self.GA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window = None, + ) + elif not hasattr(self, "SWA_mask"): + n = self.max_seq_length # self.config.max_position_embeddings # masked_fill is making stuff slower! # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) @@ -870,7 +885,7 @@ def _CausalLM_fast_forward( ) else: causal_mask = xformers.attn_bias.LowerTriangularMask() - + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states @@ -879,7 +894,6 @@ def _CausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None - outputs = self.model( input_ids=input_ids, causal_mask=causal_mask, @@ -893,7 +907,6 @@ def _CausalLM_fast_forward( return_dict=return_dict, ) pass - hidden_states = outputs[0] bsz, q_len, hd = hidden_states.shape lm_head = self.lm_head.weight diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 47152d6764..cce22aebf6 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -93,7 +93,7 @@ def _get_new_mapper(): pass -def _get_model_name(model_name, load_in_4bit = True): +def get_model_name(model_name, load_in_4bit = True): new_model_name = __get_model_name( model_name = model_name, load_in_4bit = load_in_4bit, @@ -145,7 +145,7 @@ def from_pretrained( token = os.environ["HUGGINGFACE_TOKEN"] old_model_name = model_name - model_name = _get_model_name(model_name, load_in_4bit) + model_name = get_model_name(model_name, load_in_4bit) # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled @@ -192,7 +192,7 @@ def from_pretrained( # Get base model for PEFT: if is_peft: # Check base model again for PEFT - model_name = _get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) pass diff --git a/unsloth/save.py b/unsloth/save.py index a5904efc1b..f45d8062ad 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -28,12 +28,14 @@ import re from transformers.models.llama.modeling_llama import logger from .tokenizer_utils import fix_sentencepiece_gguf +from huggingface_hub import HfApi __all__ = [ "print_quantization_methods", "unsloth_save_model", "save_to_gguf", "patch_saving_functions", + "create_huggingface_repo", ] # Check environments @@ -207,8 +209,9 @@ def unsloth_save_model( ): if token is None and "HF_TOKEN" in os.environ: token = os.environ["HF_TOKEN"] - - if token is None and "HUGGINGFACE_TOKEN" in os.environ: + elif token is None and "hf_token" in os.environ: + token = os.environ["hf_token"] + elif token is None and "HUGGINGFACE_TOKEN" in os.environ: token = os.environ["HUGGINGFACE_TOKEN"] if commit_message is None: commit_message = "" @@ -555,7 +558,8 @@ def unsloth_save_model( logger.warning_once(f"We will save to Disk and not RAM now.") filename = os.path.join(temporary_location, f"{name}.pt") torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) - state_dict[name] = torch.load(filename, map_location = "cpu", mmap = True) + # weights_only = True weirdly fails? + state_dict[name] = torch.load(filename, map_location = "cpu", mmap = True, weights_only = False) pass for item in LLAMA_LAYERNORMS: try: @@ -675,7 +679,6 @@ def unsloth_save_model( # Now manually go through each file and upload them manually! filenames = os.listdir(new_save_directory) - from huggingface_hub import HfApi hf_api = HfApi(token = save_pretrained_settings["token"]) print("Unsloth: Uploading all files... Please wait...") @@ -1312,6 +1315,49 @@ def _determine_username(save_directory, old_username, token): pass +def create_huggingface_repo( + model, + save_directory, + token = None, + private = False, +): + if token is None and "HF_TOKEN" in os.environ: + token = os.environ["HF_TOKEN"] + elif token is None and "hf_token" in os.environ: + token = os.environ["hf_token"] + elif token is None and "HUGGINGFACE_TOKEN" in os.environ: + token = os.environ["HUGGINGFACE_TOKEN"] + pass + save_directory, username = _determine_username(save_directory, "", token) + + from huggingface_hub import create_repo + try: + create_repo( + repo_id = save_directory, + token = token, + repo_type = "model", + exist_ok = False, + private = private, + ) + + # Create model card + from huggingface_hub import ModelCard + content = MODEL_CARD.format( + username = username, + base_model = model.config._name_or_path, + model_type = model.config.model_type, + method = "", + extra = "unsloth", + ) + card = ModelCard(content) + card.push_to_hub(save_directory, token = token) + except: + pass + hf_api = HfApi(token = token) + return save_directory, hf_api +pass + + def upload_to_huggingface( model, save_directory, @@ -1321,6 +1367,7 @@ def upload_to_huggingface( file_location = None, old_username = None, private = None, + create_config = True, ): save_directory, username = _determine_username(save_directory, old_username, token) @@ -1350,7 +1397,6 @@ def upload_to_huggingface( if file_location is not None: # Now upload file - from huggingface_hub import HfApi hf_api = HfApi(token = token) if "/" in file_location: @@ -1372,6 +1418,8 @@ def upload_to_huggingface( repo_type = "model", commit_message = "(Trained with Unsloth)", ) + pass + pass hf_api.upload_file( path_or_fileobj = file_location, @@ -1382,18 +1430,20 @@ def upload_to_huggingface( ) # We also upload a config.json file - import json - with open("_temporary_unsloth_config.json", "w") as file: - json.dump({"model_type" : model.config.model_type}, file, indent = 4) + if create_config: + import json + with open("_temporary_unsloth_config.json", "w") as file: + json.dump({"model_type" : model.config.model_type}, file, indent = 4) + pass + hf_api.upload_file( + path_or_fileobj = "_temporary_unsloth_config.json", + path_in_repo = "config.json", + repo_id = save_directory, + repo_type = "model", + commit_message = "(Trained with Unsloth)", + ) + os.remove("_temporary_unsloth_config.json") pass - hf_api.upload_file( - path_or_fileobj = "_temporary_unsloth_config.json", - path_in_repo = "config.json", - repo_id = save_directory, - repo_type = "model", - commit_message = "(Trained with Unsloth)", - ) - os.remove("_temporary_unsloth_config.json") pass return username pass From 3781a03903c6a24c929737f49a1f73b25a517ac6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 11 Aug 2024 18:26:20 -0700 Subject: [PATCH 0383/1088] Fix DPO stats (#906) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py * Update llama.py * Update cross_entropy_loss.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py --- unsloth/kernels/cross_entropy_loss.py | 1 + unsloth/models/dpo.py | 16 +++++++++++++--- unsloth/models/llama.py | 1 + 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 6074a51538..b8473e60c7 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -303,6 +303,7 @@ def backward(ctx, dlosses): pass +@torch._disable_dynamo def fast_cross_entropy_loss(logits, labels, logit_softcapping = 0): """ Arguments: diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index b7c7305bb3..e7074350c3 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +__all__ = [ + "PatchDPOTrainer", +] + try: from transformers.utils.notebook import ( IntervalStrategy, @@ -22,6 +26,12 @@ except: HAS_NOTEBOOK = False pass +import torch +from ._utils import torch_compile_options +import inspect +import torch.nn as nn +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union + DPOTrainer_metrics = [ "rewards/chosen", @@ -37,11 +47,11 @@ def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): - self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" + self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step" self.training_loss = 0 self.last_log = 0 column_names = [self.first_column] + ["Training Loss"] - if args.evaluation_strategy != IntervalStrategy.NO: + if args.eval_strategy != IntervalStrategy.NO: column_names.append("Validation Loss") column_names += [x.replace("/", " / ") for x in DPOTrainer_metrics] self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) @@ -50,7 +60,7 @@ def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwargs): # Only for when there is no evaluation - if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: + if args.eval_strategy == IntervalStrategy.NO and "loss" in logs: values = {"Training Loss": logs["loss"]} for metric in DPOTrainer_metrics: values[metric.replace("/", " / ")] = logs[metric] diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2a07da6ce5..6f1bb62c18 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -961,6 +961,7 @@ def _CausalLM_fast_forward( pass +@torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, input_ids=None, From a64b8f648ad067f9745253161e73a0367bf0ca5a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 13 Aug 2024 17:54:02 -0700 Subject: [PATCH 0384/1088] Fix Chat Templates (#916) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py * Update llama.py * Update cross_entropy_loss.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Providing more flexibility for users to customize their llama when using LoRA (#910) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * return model * Update tokenizer_utils.py * Update chat_templates.py * Update tokenizer_utils.py --------- Co-authored-by: Po-Lung Wang --- unsloth/chat_templates.py | 222 +++++++++++++++++++++++++++++++++++-- unsloth/models/llama.py | 17 ++- unsloth/tokenizer_utils.py | 28 ++++- 3 files changed, 256 insertions(+), 11 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 5bd66bae09..07e79b180b 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -508,6 +508,200 @@ CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) pass +# =========================================== Llama-3.1 +""" +No trimming in Llama 3.1 Instruct! +Also an extra newline for Cutting Knowledge Date +See https://colab.research.google.com/drive/1Xpqq5xpIgO-B00MQ-UccYMwN2J8QFgBM?usp=sharing + +Also should be + +import datetime +tokenizer.apply_chat_template( + messages, + add_generation_prompt = True, + tokenize = False, + date_string = datetime.today().strftime("%d %B %Y")), +) +""" + +llama31_template = \ +"""{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 July 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content'] %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} +""" +pass + +# Ollama from https://ollama.com/library/llama3.1 (needs updating!) +llama31_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{ if .Messages }} +{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|> +{{- if .System }} + +{{ .System }} +{{- end }} +{{- if .Tools }} + +You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the orginal use question. +{{- end }} +{{- end }}<|eot_id|> +{{- range $i, $_ := .Messages }} +{{- $last := eq (len (slice $.Messages $i)) 1 }} +{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|> +{{- if and $.Tools $last }} + +Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. + +Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables. + +{{ $.Tools }} +{{- end }} + +{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|> + +{{ end }} +{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|> +{{- if .ToolCalls }} + +{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }} +{{- else }} + +{{ .Content }}{{ if not $last }}<|eot_id|>{{ end }} +{{- end }} +{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|> + +{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|> + +{{ end }} +{{- end }} +{{- end }} +{{- else }} +{{- if .System }}<|start_header_id|>system<|end_header_id|> + +{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> + +{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> + +{{ end }}{{ .Response }}{{ if .Response }}<|eot_id|>{{ end }}""" +PARAMETER stop "<|start_header_id|>" +PARAMETER stop "<|end_header_id|>" +PARAMETER stop "<|eot_id|>" +PARAMETER stop "<|eom_id|>" +''' + +llama31_template_eos_token = "eos_token" +CHAT_TEMPLATES["llama-3.1"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +CHAT_TEMPLATES["llama-31"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +pass + def get_chat_template( tokenizer, @@ -680,21 +874,33 @@ def get_chat_template( ) pass - # For ShareGPT role -> from and content -> value - chat_template = chat_template\ - .replace("'role'", "'" + mapping["role"] + "'")\ - .replace("'content'", "'" + mapping["content"] + "'")\ - .replace("'user'", "'" + mapping["user"] + "'")\ - .replace("'assistant'", "'" + mapping["assistant"] + "'") - # Careful on Gemma # bos_token is a must or else losses become too high if IS_GEMMA and not chat_template.startswith("{{ bos_token }}"): chat_template = "{{ bos_token }}" + chat_template pass + # For ShareGPT role -> from and content -> value + new_chat_template = chat_template\ + .replace("'role'", "'" + mapping["role"] + "'")\ + .replace("'content'", "'" + mapping["content"] + "'")\ + .replace("'user'", "'" + mapping["user"] + "'")\ + .replace("'assistant'", "'" + mapping["assistant"] + "'") + _, tokenizer = patch_tokenizer(model = None, tokenizer = tokenizer) - tokenizer.padding_side = old_padding_side + tokenizer.padding_side = old_padding_side + + # If not normal HF, we add a check to make old templates work + if mapping != {"role" : "role", "content" : "content", "user" : "user", "assistant" : "assistant"}: + chat_template = \ + "{% if 'role' in messages[0] %}" + \ + chat_template + \ + "{% else %}" + \ + new_chat_template + \ + "{% endif %}" + else: + chat_template = new_chat_template + pass tokenizer.chat_template = chat_template # Also fix up other tokens diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6f1bb62c18..6a111c9344 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1873,8 +1873,17 @@ def get_peft_model( else: modules_to_save.append("embed_tokens") else: - assert(module in accepted_modules) - final_modules.append(module) + try: + assert(module in accepted_modules) + final_modules.append(module) + except AssertionError as e: + final_modules.append(module) + print( + "Unsloth: You added custom modules, but Unsloth hasn't optimized for this.\n"\ + "Beware - your finetuning might be noticeably slower!" + ) + pass + pass pass # Check if we added new tokens! @@ -2253,6 +2262,8 @@ def for_inference(model): if hasattr(internal_model, "_saved_temp_tokenizer"): internal_model._saved_temp_tokenizer.padding_side = "left" pass + + return model pass @@ -2291,6 +2302,8 @@ def for_training(model, use_gradient_checkpointing = True): if hasattr(internal_model, "_saved_temp_tokenizer"): internal_model._saved_temp_tokenizer.padding_side = "right" pass + + return model pass pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index c67f82c2ce..9c0bc1c510 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -597,8 +597,34 @@ def fix_chat_template(tokenizer): if chat_template is None: return None ### 1. Check if add_generation_prompt works + # Check for ShareGPT style first + is_sharegpt = None + try: + messages = [ + {"role": "user", "content": "Who are you?"}, + ] + tokenizer.apply_chat_template(messages, add_generation_prompt = False, tokenize = False) + is_sharegpt = False + except: + try: + messages = [ + {"from": "human", "value": "Who are you?"}, + ] + tokenizer.apply_chat_template(messages, add_generation_prompt = False, tokenize = False) + is_sharegpt = True + except: + is_sharegpt = None + pass + pass + + # Not ShareGPT or HF style - just return + if is_sharegpt is None: return chat_template + + # Tokenize messages = [ - {"role": "user", "content": "Who are you?"}, + {"role": "user", "content": "Who are you?"} \ + if not is_sharegpt else \ + {"from": "human", "value": "Who are you?"} ] no = tokenizer.apply_chat_template(messages, add_generation_prompt = False, tokenize = False) yes = tokenizer.apply_chat_template(messages, add_generation_prompt = True, tokenize = False) From a4ab920de9282602d587a40df828674bfa9d650e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 14 Aug 2024 00:58:02 -0700 Subject: [PATCH 0385/1088] Fix chat templates (#917) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py * Update llama.py * Update cross_entropy_loss.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Providing more flexibility for users to customize their llama when using LoRA (#910) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * return model * Update tokenizer_utils.py * Update chat_templates.py * Update tokenizer_utils.py * Train on completions --------- Co-authored-by: Po-Lung Wang --- unsloth/chat_templates.py | 165 +++++++++++++++++++++++++++++++------- 1 file changed, 138 insertions(+), 27 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 07e79b180b..7070524e0f 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1458,9 +1458,10 @@ def construct_chat_template( \ ollama_eos = '\n'.join(f'PARAMETER stop "{eos}"' for eos in ollama_eos) # Ollama modelfile + part = '"""' modelfile = 'FROM {__FILE_LOCATION__}\n\n'\ - 'TEMPLATE """' + system_modelfile + input_modelfile + output_modelfile + \ - '"""\n\n' + ollama_eos + 'TEMPLATE ' + part + system_modelfile + input_modelfile + output_modelfile + \ + part + '\n\n' + ollama_eos # HF Jinja Chat template def process(part, which, content = "message['content']"): @@ -1659,6 +1660,70 @@ def formatting_prompts_func(examples): pass +# From https://www.geeksforgeeks.org/longest-common-substring-array-strings/ +# Longest Common Substring in an Array of Strings +def _longest_common_substring(arr): + n = len(arr) + s = arr[0] + l = len(s) + res = "" + for i in range(l): + for j in range(i + 1, l + 1): + stem = s[i:j] + k = 1 + for k in range(1, n): + if stem not in arr[k]: + break + if (k + 1 == n and len(res) < len(stem)): + res = stem + return res +pass + + +def _find_common_token_ids(component, tokenizer): + """ + \n### User:\n\n + \n\n### User:\n\n + etc + we need to find the middle most repeatted part. + Tokenizers can tokenize newlines or spaces as 1 token! + """ + right_text = "" + if component.endswith (" "): right_text = " " + elif component.endswith("\n"): right_text = "\n" + left_text = "" + if component.startswith (" "): left_text = " " + elif component.startswith("\n"): left_text = "\n" + stripped = component.strip() + + # Add current pieces and also newlines + all_input_ids = [] + for left in range(3): + for right in range(3): + x = left*left_text + stripped + right*right_text + x = tokenizer(x, add_special_tokens = False).input_ids + all_input_ids.append(x) + + x = left*"\n" + stripped + right*"\n" + x = tokenizer(x, add_special_tokens = False).input_ids + all_input_ids.append(x) + pass + pass + substring = _longest_common_substring([str(x + [0]) for x in all_input_ids]) + substring = substring.split(", ")[:-1] + substring = [int(x) for x in substring] + + # Also get rest of tokenized string + original = tokenizer(component, add_special_tokens = False).input_ids + # Get optional left and right + for j in range(len(original)): + if original[j : j + len(substring)] == substring: break + optional_left = original[:j] + optional_right = original[j+len(substring):] + return substring, optional_left, optional_right +pass + + def train_on_responses_only( trainer, instruction_part = None, @@ -1685,41 +1750,87 @@ def train_on_responses_only( response_part = tokenizer._unsloth_output_part pass - instruction_ids = tokenizer(instruction_part, add_special_tokens = False).input_ids - response_ids = tokenizer(response_part, add_special_tokens = False).input_ids + # Get most common tokens since tokenizers can tokenize stuff differently! + Q_must, Q_left, Q_right = _find_common_token_ids(instruction_part, tokenizer) + A_must, A_left, A_right = _find_common_token_ids(response_part, tokenizer) - instruction_length = len(instruction_ids) - response_length = len(response_ids) - max_length = max(instruction_length, response_length) + # Store some temporary stuff + A_first = A_must[0] + len_A_must = len(A_must) + A_left_reversed = A_left[::-1] + A_right_forward = A_right + + Q_first = Q_must[0] + len_Q_must = len(Q_must) + Q_left_reversed = Q_left[::-1] + Q_right_forward = Q_right def _train_on_responses_only(examples): input_ids_ = examples["input_ids"] all_labels = [] for input_ids in input_ids_: - - labels = [-100] * len(input_ids) - m = len(input_ids) - max_length - first_response = response_ids[0] - first_instruction = instruction_ids[0] + n = len(input_ids) + labels = [-100] * n + n_minus_1 = n - 1 j = 0 - while j < m: - if input_ids[j] == first_response: - if input_ids[j : j+response_length] == response_ids: - j = j + response_length - start = j - while j < m: - if input_ids[j] == first_instruction and input_ids[j : j+instruction_length] == instruction_ids: - j = j + instruction_length - labels[start : j] = input_ids[start : j] - break - elif j == (m-1): - j = m - labels[start:] = input_ids[start:] - break + while j < n: + # Find + if (input_ids[j] == A_first) and \ + (input_ids[j : (k := j + len_A_must)] == A_must): + + # Now backtrack to get previous optional tokens + for optional_left in A_left_reversed: + if j < 1: break + if optional_left == input_ids[j-1]: j -= 1 + else: break + pass + # And forwards look as well + for optional_right in A_right_forward: + if k >= n_minus_1: break + if optional_right == input_ids[k+1]: k += 1 + else: break + pass + # assistant_j = j + assistant_k = k + + j = assistant_k + # Given , now find next user + while j < n: + # Find + # Also accept last final item if assistant is the last turn + if (j == n_minus_1) or \ + ((input_ids[j] == Q_first) and \ + (input_ids[j : (k := j + len_Q_must)] == Q_must)): + + # Now backtrack to get previous optional tokens + for optional_left in Q_left_reversed: + if j < 1: break + if optional_left == input_ids[j-1]: j -= 1 + else: break + pass + # And forwards look as well + for optional_right in Q_right_forward: + if k >= n_minus_1: break + if optional_right == input_ids[k+1]: k += 1 + else: break + pass + user_j = j + # Account for last item + if user_j != n_minus_1: + # user_k = k + # j = user_k + j = k + else: + user_j = n + k = n pass - j += 1 + # Now copy input_ids to labels + labels[assistant_k : user_j] = input_ids[assistant_k : user_j] + # print(assistant_j, assistant_k, user_j, user_k) + break pass + j += 1 pass pass j += 1 From 5393e9e00a1e2019144698d90035ae21e03325c7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 15 Aug 2024 00:31:30 -0700 Subject: [PATCH 0386/1088] Bug Fixes (#920) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py * Update llama.py * Update cross_entropy_loss.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Providing more flexibility for users to customize their llama when using LoRA (#910) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * return model * Update tokenizer_utils.py * Update chat_templates.py * Update tokenizer_utils.py * Train on completions * load_in_4bit=False broken --------- Co-authored-by: Po-Lung Wang --- unsloth/models/llama.py | 6 +++++- unsloth/models/loader.py | 19 +++++++++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6a111c9344..6139115f67 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1387,11 +1387,15 @@ def from_pretrained( # RoPE Scaling's max_position_embeddings must be updated max_position_embeddings = max(max_seq_length, model_max_seq_length) kwargs.pop("attn_implementation", None); # No need since we auto call it + + # Cannot be None, since HF now checks for the config + if load_in_4bit: kwargs["quantization_config"] = bnb_config + model = AutoModelForCausalLM.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, - quantization_config = bnb_config, + # quantization_config = bnb_config, token = token, max_position_embeddings = max_position_embeddings, trust_remote_code = trust_remote_code, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index cce22aebf6..ad1098edac 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -42,10 +42,11 @@ def __get_model_name( INT_TO_FLOAT_MAPPER = None, FLOAT_TO_INT_MAPPER = None, ): - model_name = str(model_name) - if not SUPPORTS_FOURBIT and model_name.lower() in INT_TO_FLOAT_MAPPER: - model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] + lower_model_name = model_name.lower() + + if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: + model_name = INT_TO_FLOAT_MAPPER[lower_model_name] logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ f"4bit loading.\nThe minimum required version is 4.37.\n"\ @@ -55,16 +56,18 @@ def __get_model_name( ) return model_name - elif not load_in_4bit and model_name.lower() in INT_TO_FLOAT_MAPPER: - new_model_name = INT_TO_FLOAT_MAPPER[model_name.lower()] + elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: + new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." # ) return new_model_name - - elif load_in_4bit and SUPPORTS_FOURBIT and model_name.lower() in FLOAT_TO_INT_MAPPER: - new_model_name = FLOAT_TO_INT_MAPPER[model_name.lower()] + elif not load_in_4bit and lower_model_name in FLOAT_TO_INT_MAPPER: + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] + return new_model_name + elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ # f"We shall load `{new_model_name}` for 4x faster loading." From 53cd1e778133efa9721731834fb06589dc95b719 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 15 Aug 2024 01:15:35 -0700 Subject: [PATCH 0387/1088] Fix mapping (#921) * Update pyproject.toml * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * fix_tokenizer * Update tokenizer_utils.py * Update tokenizer_utils.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update loader.py * Update pyproject.toml * Update _utils.py * Update gemma2.py * Update gemma2.py * Update _utils.py * gemma 2 mask * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Torch 2.4 Xformers 0.0.27post2 * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Gemma 2 fixes * Update gemma2.py * Update llama.py * Update llama.py * Update save.py * Update save.py * Update llama.py * Update cross_entropy_loss.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Update dpo.py * Providing more flexibility for users to customize their llama when using LoRA (#910) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update chat_templates.py * return model * Update tokenizer_utils.py * Update chat_templates.py * Update tokenizer_utils.py * Train on completions * load_in_4bit=False broken * Update llama.py * MAP_TO_UNSLOTH_16bit * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update mapper.py * Update mapper.py * works! --------- Co-authored-by: Po-Lung Wang --- unsloth/models/llama.py | 2 +- unsloth/models/loader.py | 39 +++++++++++++++++++++++++-------------- unsloth/models/mapper.py | 13 +++++++++++-- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6139115f67..6a23335c8c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1390,7 +1390,7 @@ def from_pretrained( # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config - + model = AutoModelForCausalLM.from_pretrained( model_name, device_map = device_map, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index ad1098edac..e260017fb9 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -19,7 +19,7 @@ from transformers import AutoConfig from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel -from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER +from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit import os # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! @@ -39,13 +39,15 @@ def __get_model_name( model_name, load_in_4bit = True, - INT_TO_FLOAT_MAPPER = None, - FLOAT_TO_INT_MAPPER = None, + INT_TO_FLOAT_MAPPER = None, + FLOAT_TO_INT_MAPPER = None, + MAP_TO_UNSLOTH_16bit = None, ): model_name = str(model_name) lower_model_name = model_name.lower() if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: + model_name = INT_TO_FLOAT_MAPPER[lower_model_name] logger.warning_once( f"Unsloth: Your transformers version of {transformers_version} does not support native "\ @@ -57,16 +59,21 @@ def __get_model_name( return model_name elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: + new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." # ) return new_model_name - elif not load_in_4bit and lower_model_name in FLOAT_TO_INT_MAPPER: - new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] + + elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit: + + new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name] return new_model_name + elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ @@ -86,12 +93,14 @@ def _get_new_mapper(): with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] new_mapper = new_mapper\ - .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ - .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER") + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")\ + .replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit") + exec(new_mapper, globals()) - return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit except: - return {}, {} + return {}, {}, {} pass pass @@ -100,17 +109,19 @@ def get_model_name(model_name, load_in_4bit = True): new_model_name = __get_model_name( model_name = model_name, load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit, ) if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): # Try checking if a new Unsloth version allows it! - NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER = _get_new_mapper() + NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = _get_new_mapper() upgraded_model_name = __get_model_name( model_name = model_name, load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit, ) if upgraded_model_name is not None: raise NotImplementedError( diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 57ba676585..b8259a073c 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -251,8 +251,9 @@ ), } -INT_TO_FLOAT_MAPPER = {} -FLOAT_TO_INT_MAPPER = {} +INT_TO_FLOAT_MAPPER = {} +FLOAT_TO_INT_MAPPER = {} +MAP_TO_UNSLOTH_16bit = {} for key, values in __INT_TO_FLOAT_MAPPER.items(): INT_TO_FLOAT_MAPPER[key] = values[0] @@ -261,6 +262,14 @@ FLOAT_TO_INT_MAPPER[value] = key pass + # Map to Unsloth version for 16bit versions + if len(values) == 2: + if values[0].startswith("unsloth"): + MAP_TO_UNSLOTH_16bit[values[1]] = values[0] + MAP_TO_UNSLOTH_16bit[values[1].lower()] = values[0] + pass + pass + # Get lowercased lowered_key = key.lower() INT_TO_FLOAT_MAPPER[lowered_key] = values[0].lower() From 8be73b10860fee8ac3ab84c88548de2392948492 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 15 Aug 2024 15:04:46 -0700 Subject: [PATCH 0388/1088] Bug fixes --- unsloth/__init__.py | 15 ++++++--------- unsloth/models/loader.py | 24 ++++++++++++++++++++---- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index db54c9a169..dd526dc3cc 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -11,10 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os -import warnings -import importlib -import sys + +import warnings, importlib, sys from packaging.version import Version # # Define a list of modules to check @@ -60,9 +58,8 @@ "We have some installation instructions on our Github page.") pass -import os, re +import os, re, subprocess, inspect import numpy as np -import subprocess # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) keynames = "\n" + "\n".join(os.environ.keys()) @@ -83,12 +80,12 @@ del os.environ["PYTORCH_CUDA_ALLOC_CONF"] pass -# Torch 2.5 has including_emulation +# Torch 2.4 has including_emulation major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = (major_version >= 8) -if (major_torch == 2) and (minor_torch >= 5): - old_is_bf16_supported = torch.cuda.is_bf16_supported +old_is_bf16_supported = torch.cuda.is_bf16_supported +if "including_emulation" in str(inspect.signature(old_is_bf16_supported)): def is_bf16_supported(including_emulation = False): return old_is_bf16_supported(including_emulation) torch.cuda.is_bf16_supported = is_bf16_supported diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index e260017fb9..02ed00f5cd 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -169,13 +169,23 @@ def from_pretrained( autoconfig_error = None peft_error = None try: - model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) is_model = True except Exception as error: autoconfig_error = str(error) is_model = False try: - peft_config = PeftConfig .from_pretrained(model_name, token = token, revision = revision) + peft_config = PeftConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) is_peft = True except Exception as error: peft_error = str(error) @@ -207,7 +217,12 @@ def from_pretrained( if is_peft: # Check base model again for PEFT model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) - model_config = AutoConfig.from_pretrained(model_name, token = token, revision = revision) + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) pass if not was_disabled: enable_progress_bars() @@ -340,10 +355,11 @@ def from_pretrained( token = token, revision = revision, is_trainable = True, + trust_remote_code = trust_remote_code, ) # Patch it as well! model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) pass return model, tokenizer pass -pass +pass \ No newline at end of file From 8b80820b8b9f13ab4ecca089ec6ff92c58530bea Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 15 Aug 2024 15:07:42 -0700 Subject: [PATCH 0389/1088] Update __init__.py --- unsloth/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index dd526dc3cc..f6ed999530 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -14,6 +14,8 @@ import warnings, importlib, sys from packaging.version import Version +import os, re, subprocess, inspect +import numpy as np # # Define a list of modules to check # MODULES_TO_CHECK = ["bitsandbytes"] @@ -58,9 +60,6 @@ "We have some installation instructions on our Github page.") pass -import os, re, subprocess, inspect -import numpy as np - # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) keynames = "\n" + "\n".join(os.environ.keys()) if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: From 5e2cf1c51cab723000d0ba33a863cad8c4642a7d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 16 Aug 2024 19:28:43 -0700 Subject: [PATCH 0390/1088] untrained tokens llama 3.1 base --- unsloth/chat_templates.py | 4 ++-- unsloth/tokenizer_utils.py | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 7070524e0f..82f6aba148 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -876,7 +876,7 @@ def get_chat_template( # Careful on Gemma # bos_token is a must or else losses become too high - if IS_GEMMA and not chat_template.startswith("{{ bos_token }}"): + if IS_GEMMA and not chat_template.startswith(("{{ bos_token }}", "{{- bos_token }}")): chat_template = "{{ bos_token }}" + chat_template pass @@ -1553,7 +1553,7 @@ def process(part, which, content = "message['content']"): # Check jinja tempate for bos if always_bos_token: - if not jinja_template.startswith("{{ bos_token }}"): + if not jinja_template.startswith(("{{ bos_token }}", "{{- bos_token }}")): jinja_template = "{{ bos_token }}" + jinja_template pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 9c0bc1c510..a4f0b33be4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -827,7 +827,27 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # Get untrained tokens indicator_untrained1 = torch.amax(embedding_matrix, axis = 1) <= eps # Check lm_head as well + + # Does NOT work for Llama 3.1!! indicator_untrained2 = torch.amax(lm_head_matrix, axis = 1) <= eps + + # We instead check for repeated vectors + lm_head_where = torch.where(indicator_untrained1)[0] + lm_head_bad = lm_head_matrix[lm_head_where] + lm_head_bad = lm_head_bad.cpu().numpy().round(3) + from collections import Counter + counter = Counter() + for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 + counter = Counter({k: c for k, c in counter.items() if c >= 2}) + + lm_head_where = lm_head_where.cpu().numpy() + final_bad_lm_head = [] + for j, row in enumerate(lm_head_bad): + if hash(row.data.tobytes()) in counter: + final_bad_lm_head.append(lm_head_where[j]) + indicator_untrained2 = indicator_untrained2 | torch.zeros_like(indicator_untrained2) + indicator_untrained2[final_bad_lm_head] = True + # Combine both checks indicator_untrained = indicator_untrained1 & indicator_untrained2 From c22162b402a0e8cc8a5580f232e39a005fad02f1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 16 Aug 2024 19:57:19 -0700 Subject: [PATCH 0391/1088] untrained tokens llama 3.1 base (#929) --- unsloth/chat_templates.py | 4 ++-- unsloth/tokenizer_utils.py | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 7070524e0f..82f6aba148 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -876,7 +876,7 @@ def get_chat_template( # Careful on Gemma # bos_token is a must or else losses become too high - if IS_GEMMA and not chat_template.startswith("{{ bos_token }}"): + if IS_GEMMA and not chat_template.startswith(("{{ bos_token }}", "{{- bos_token }}")): chat_template = "{{ bos_token }}" + chat_template pass @@ -1553,7 +1553,7 @@ def process(part, which, content = "message['content']"): # Check jinja tempate for bos if always_bos_token: - if not jinja_template.startswith("{{ bos_token }}"): + if not jinja_template.startswith(("{{ bos_token }}", "{{- bos_token }}")): jinja_template = "{{ bos_token }}" + jinja_template pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 9c0bc1c510..a4f0b33be4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -827,7 +827,27 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # Get untrained tokens indicator_untrained1 = torch.amax(embedding_matrix, axis = 1) <= eps # Check lm_head as well + + # Does NOT work for Llama 3.1!! indicator_untrained2 = torch.amax(lm_head_matrix, axis = 1) <= eps + + # We instead check for repeated vectors + lm_head_where = torch.where(indicator_untrained1)[0] + lm_head_bad = lm_head_matrix[lm_head_where] + lm_head_bad = lm_head_bad.cpu().numpy().round(3) + from collections import Counter + counter = Counter() + for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 + counter = Counter({k: c for k, c in counter.items() if c >= 2}) + + lm_head_where = lm_head_where.cpu().numpy() + final_bad_lm_head = [] + for j, row in enumerate(lm_head_bad): + if hash(row.data.tobytes()) in counter: + final_bad_lm_head.append(lm_head_where[j]) + indicator_untrained2 = indicator_untrained2 | torch.zeros_like(indicator_untrained2) + indicator_untrained2[final_bad_lm_head] = True + # Combine both checks indicator_untrained = indicator_untrained1 & indicator_untrained2 From 9cb5c2eca4c7b5ea8f2a3fb3048d0b376589296e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 16 Aug 2024 23:38:02 -0700 Subject: [PATCH 0392/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index a4f0b33be4..38d5949f4b 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -834,7 +834,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # We instead check for repeated vectors lm_head_where = torch.where(indicator_untrained1)[0] lm_head_bad = lm_head_matrix[lm_head_where] - lm_head_bad = lm_head_bad.cpu().numpy().round(3) + lm_head_bad = lm_head_bad.cpu().to(torch.float32).numpy().round(3) from collections import Counter counter = Counter() for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 From 487637db7bfd0d162a1932379f9dab176323689d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 16 Aug 2024 23:38:43 -0700 Subject: [PATCH 0393/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 38d5949f4b..7316656b2a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -834,7 +834,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # We instead check for repeated vectors lm_head_where = torch.where(indicator_untrained1)[0] lm_head_bad = lm_head_matrix[lm_head_where] - lm_head_bad = lm_head_bad.cpu().to(torch.float32).numpy().round(3) + lm_head_bad = lm_head_bad.cpu().float().numpy().round(3) from collections import Counter counter = Counter() for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 From 52bc19d1fa4cd3557b785127fd68b5f4d1c34347 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 16 Aug 2024 23:39:44 -0700 Subject: [PATCH 0394/1088] Bug #930 (#931) * untrained tokens llama 3.1 base * Update tokenizer_utils.py * Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index a4f0b33be4..7316656b2a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -834,7 +834,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): # We instead check for repeated vectors lm_head_where = torch.where(indicator_untrained1)[0] lm_head_bad = lm_head_matrix[lm_head_where] - lm_head_bad = lm_head_bad.cpu().numpy().round(3) + lm_head_bad = lm_head_bad.cpu().float().numpy().round(3) from collections import Counter counter = Counter() for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 From 9335fa0960c40fd36e2702456415cbdbbcd847dd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 15:04:25 -0700 Subject: [PATCH 0395/1088] Bug fixes --- unsloth/models/_utils.py | 28 ++++++++++++++++++++++++---- unsloth/models/llama.py | 28 +++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0c0057496b..d8904aa12b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -595,7 +595,6 @@ def _get_statistics(statistics = None, force_download = True): # You can disable this by commenting the below out try: n_cpus = psutil.cpu_count(logical = False) - keynames = "\n" + "\n".join(os.environ.keys()) if statistics is not None: pass elif "\nCOLAB_" in keynames and n_cpus == 1: statistics = "colab" @@ -604,10 +603,31 @@ def _get_statistics(statistics = None, force_download = True): elif "\nRUNPOD_" in keynames: statistics = "runpod" elif "\nAWS_" in keynames: statistics = "aws" elif "\nAZURE_" in keynames: statistics = "azure" - elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" + # elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" elif "\nINVOCATION_ID" in keynames: statistics = "lambda" - else: statistics = "other" - + # else: statistics = "other" + else: + def try_vllm_check(): + vendor_files = ( + "/sys/class/dmi/id/product_version", + "/sys/class/dmi/id/bios_vendor", + "/sys/class/dmi/id/product_name", + "/sys/class/dmi/id/chassis_asset_tag", + "/sys/class/dmi/id/sys_vendor", + ) + from pathlib import Path + for vendor_file in vendor_files: + path = Path(vendor_file) + if path.is_file(): + file_content = path.read_text().lower() + if "amazon" in file_content: return "aws" + elif "microsoft corporation" in file_content: return "azure" + elif "google" in file_content: return "gcp" + return "other" + pass + try: statistics = try_vllm_check() + except: statistics = "other" + pass if statistics is not None: from transformers import AutoModelForCausalLM stats_model = AutoModelForCausalLM.from_pretrained( diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6a23335c8c..d18dd4ce9f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1628,7 +1628,7 @@ def post_patch(model): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2. - model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) + # model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) model.config.update({"unsloth_version" : __version__}) # We also do this for the lm_head @@ -2234,6 +2234,9 @@ def for_inference(model): internal_model.gradient_checkpointing = False internal_model.training = False pass + if hasattr(internal_model, "training"): + internal_model.training = False + pass # Also check if lm_head / embeddings are trained internal_model = model @@ -2267,6 +2270,16 @@ def for_inference(model): internal_model._saved_temp_tokenizer.padding_side = "left" pass + # Also disable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + return model pass @@ -2288,6 +2301,9 @@ def for_training(model, use_gradient_checkpointing = True): internal_model.gradient_checkpointing = use_gradient_checkpointing internal_model.training = True pass + if hasattr(internal_model, "training"): + internal_model.training = True + pass # Also revert model.generate if hasattr(model, "_unwrapped_old_generate"): @@ -2307,6 +2323,16 @@ def for_training(model, use_gradient_checkpointing = True): internal_model._saved_temp_tokenizer.padding_side = "right" pass + # Also re-enable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + return model pass pass From 1bed78c99279f3667379e0798440ee3a94d536b4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 15:08:53 -0700 Subject: [PATCH 0396/1088] Update llama.py --- unsloth/models/llama.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d18dd4ce9f..3f42dee9cf 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -571,6 +571,9 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + print(getattr(self.embed_tokens, "neftune_noise_alpha")) + print(getattr(self.embed_tokens, "_forward_hooks")) + print(getattr(self.embed_tokens, "_forward_pre_hooks")) inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma From 2c4772b666e93404a780301a9166736fe4734c25 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 15:50:16 -0700 Subject: [PATCH 0397/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 7316656b2a..873544007d 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1109,6 +1109,7 @@ def check_nvidia(): import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * from transformers.trainer import * +from trl.trainer.sft_trainer import neftune_post_forward_hook def patch_sft_trainer_tokenizer(): """ @@ -1173,6 +1174,17 @@ def patch_sft_trainer_tokenizer(): "\n"\ "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" + # Add NEFTune since it doesn't seem to work?? We need to manually inject it + check_text += \ + "\n\n"\ + "if getattr(self.model.get_input_embeddings(), 'neftune_noise_alpha', None) is not None:\n"\ + " if hasattr(self, 'neftune_hook_handle'):\n"\ + " self.neftune_hook_handle.remove()\n"\ + " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ + "\n"\ + " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n\n"\ + "\n" + check_text = check_text.split("\n") check_text = "\n".join(" "*where + x for x in check_text) From 7fd058fc71f7433bf55cd978feccc580fa26dab8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 15:52:13 -0700 Subject: [PATCH 0398/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 873544007d..a73887061f 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1177,14 +1177,16 @@ def patch_sft_trainer_tokenizer(): # Add NEFTune since it doesn't seem to work?? We need to manually inject it check_text += \ "\n\n"\ + "print(1)\n"\ "if getattr(self.model.get_input_embeddings(), 'neftune_noise_alpha', None) is not None:\n"\ + " print(2)\n"\ " if hasattr(self, 'neftune_hook_handle'):\n"\ " self.neftune_hook_handle.remove()\n"\ " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ "\n"\ " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n\n"\ "\n" - + check_text = check_text.split("\n") check_text = "\n".join(" "*where + x for x in check_text) From 6e5ad15cd73388ba694bd532b8bf4d05316b1d9a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 16:03:24 -0700 Subject: [PATCH 0399/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index a73887061f..b677f864a4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1176,15 +1176,15 @@ def patch_sft_trainer_tokenizer(): # Add NEFTune since it doesn't seem to work?? We need to manually inject it check_text += \ - "\n\n"\ - "print(1)\n"\ - "if getattr(self.model.get_input_embeddings(), 'neftune_noise_alpha', None) is not None:\n"\ - " print(2)\n"\ - " if hasattr(self, 'neftune_hook_handle'):\n"\ - " self.neftune_hook_handle.remove()\n"\ - " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ "\n"\ - " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n\n"\ + "if hasattr(self, 'neftune_hook_handle'):\n"\ + " self.neftune_hook_handle.remove()\n"\ + " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ + "\n"\ + "if getattr(self, 'neftune_noise_alpha', None) is not None:\n"\ + " self.model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"\ + " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n"\ + "pass\n"\ "\n" check_text = check_text.split("\n") From 7139e57b729253c0ce1d70892dbac4f7f87d28ef Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 16:08:14 -0700 Subject: [PATCH 0400/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3f42dee9cf..461feb3c7e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -571,9 +571,9 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - print(getattr(self.embed_tokens, "neftune_noise_alpha")) - print(getattr(self.embed_tokens, "_forward_hooks")) - print(getattr(self.embed_tokens, "_forward_pre_hooks")) + # print(getattr(self.embed_tokens, "neftune_noise_alpha")) + # print(getattr(self.embed_tokens, "_forward_hooks")) + # print(getattr(self.embed_tokens, "_forward_pre_hooks")) inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma From 9caaa5af78292f29aaaad2ed05d5a55564020a3e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 16:11:09 -0700 Subject: [PATCH 0401/1088] Update llama.py --- unsloth/models/llama.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 461feb3c7e..d18dd4ce9f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -571,9 +571,6 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - # print(getattr(self.embed_tokens, "neftune_noise_alpha")) - # print(getattr(self.embed_tokens, "_forward_hooks")) - # print(getattr(self.embed_tokens, "_forward_pre_hooks")) inputs_embeds = inputs_embeds.to(self.config.torch_dtype) # Normalized from Gemma From be7ed9a1e60224c99fb91f01479b8b654264d8eb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 16:14:01 -0700 Subject: [PATCH 0402/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d18dd4ce9f..048ba69193 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1628,7 +1628,7 @@ def post_patch(model): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2. - # model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) + model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) model.config.update({"unsloth_version" : __version__}) # We also do this for the lm_head From 75013ff022523729f13479f7738ec5a0e1d237b0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 16:17:52 -0700 Subject: [PATCH 0403/1088] Fix NEFTune (#937) * untrained tokens llama 3.1 base * Update tokenizer_utils.py * Update tokenizer_utils.py * Bug fixes * Update llama.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update llama.py * Update llama.py * Update llama.py --- unsloth/models/_utils.py | 28 ++++++++++++++++++++++++---- unsloth/models/llama.py | 26 ++++++++++++++++++++++++++ unsloth/tokenizer_utils.py | 14 ++++++++++++++ 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0c0057496b..d8904aa12b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -595,7 +595,6 @@ def _get_statistics(statistics = None, force_download = True): # You can disable this by commenting the below out try: n_cpus = psutil.cpu_count(logical = False) - keynames = "\n" + "\n".join(os.environ.keys()) if statistics is not None: pass elif "\nCOLAB_" in keynames and n_cpus == 1: statistics = "colab" @@ -604,10 +603,31 @@ def _get_statistics(statistics = None, force_download = True): elif "\nRUNPOD_" in keynames: statistics = "runpod" elif "\nAWS_" in keynames: statistics = "aws" elif "\nAZURE_" in keynames: statistics = "azure" - elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" + # elif "\nK_" in keynames or "\nFUNCTION_" in keynames: statistics = "gcp" elif "\nINVOCATION_ID" in keynames: statistics = "lambda" - else: statistics = "other" - + # else: statistics = "other" + else: + def try_vllm_check(): + vendor_files = ( + "/sys/class/dmi/id/product_version", + "/sys/class/dmi/id/bios_vendor", + "/sys/class/dmi/id/product_name", + "/sys/class/dmi/id/chassis_asset_tag", + "/sys/class/dmi/id/sys_vendor", + ) + from pathlib import Path + for vendor_file in vendor_files: + path = Path(vendor_file) + if path.is_file(): + file_content = path.read_text().lower() + if "amazon" in file_content: return "aws" + elif "microsoft corporation" in file_content: return "azure" + elif "google" in file_content: return "gcp" + return "other" + pass + try: statistics = try_vllm_check() + except: statistics = "other" + pass if statistics is not None: from transformers import AutoModelForCausalLM stats_model = AutoModelForCausalLM.from_pretrained( diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 6a23335c8c..048ba69193 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2234,6 +2234,9 @@ def for_inference(model): internal_model.gradient_checkpointing = False internal_model.training = False pass + if hasattr(internal_model, "training"): + internal_model.training = False + pass # Also check if lm_head / embeddings are trained internal_model = model @@ -2267,6 +2270,16 @@ def for_inference(model): internal_model._saved_temp_tokenizer.padding_side = "left" pass + # Also disable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + return model pass @@ -2288,6 +2301,9 @@ def for_training(model, use_gradient_checkpointing = True): internal_model.gradient_checkpointing = use_gradient_checkpointing internal_model.training = True pass + if hasattr(internal_model, "training"): + internal_model.training = True + pass # Also revert model.generate if hasattr(model, "_unwrapped_old_generate"): @@ -2307,6 +2323,16 @@ def for_training(model, use_gradient_checkpointing = True): internal_model._saved_temp_tokenizer.padding_side = "right" pass + # Also re-enable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + return model pass pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 7316656b2a..b677f864a4 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1109,6 +1109,7 @@ def check_nvidia(): import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * from transformers.trainer import * +from trl.trainer.sft_trainer import neftune_post_forward_hook def patch_sft_trainer_tokenizer(): """ @@ -1173,6 +1174,19 @@ def patch_sft_trainer_tokenizer(): "\n"\ "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" + # Add NEFTune since it doesn't seem to work?? We need to manually inject it + check_text += \ + "\n"\ + "if hasattr(self, 'neftune_hook_handle'):\n"\ + " self.neftune_hook_handle.remove()\n"\ + " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ + "\n"\ + "if getattr(self, 'neftune_noise_alpha', None) is not None:\n"\ + " self.model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"\ + " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n"\ + "pass\n"\ + "\n" + check_text = check_text.split("\n") check_text = "\n".join(" "*where + x for x in check_text) From 4cc20f4720ad482a2da04ce79cf5cd622c14e54e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 17:12:32 -0700 Subject: [PATCH 0404/1088] Create _auto_install.py --- unsloth/_auto_install.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 unsloth/_auto_install.py diff --git a/unsloth/_auto_install.py b/unsloth/_auto_install.py new file mode 100644 index 0000000000..2f5b62d4ca --- /dev/null +++ b/unsloth/_auto_install.py @@ -0,0 +1,16 @@ +try: import torch +except: raise ImportError('Install torch via `pip install torch`') +from packaging.version import Version as V +v = V(torch.__version__) +cuda = str(torch.version.cuda) +is_ampere = torch.cuda.get_device_capability()[0] >= 8 +if cuda != "12.1" and cuda != "11.8": raise RuntimeError(f"CUDA = {cuda} not supported!") +if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!") +elif v <= V('2.1.1'): x = 'cu{}{}-torch211' +elif v <= V('2.1.2'): x = 'cu{}{}-torch212' +elif v < V('2.3.0'): x = 'cu{}{}-torch220' +elif v < V('2.4.0'): x = 'cu{}{}-torch230' +elif v < V('2.5.0'): x = 'cu{}{}-torch240' +else: raise RuntimeError(f"Torch = {v} too new!") +x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") +print(f'pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') \ No newline at end of file From fbf50a42602bf299da2a0a99fea2f9b18550332d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 17:12:46 -0700 Subject: [PATCH 0405/1088] Update _auto_install.py --- unsloth/_auto_install.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/unsloth/_auto_install.py b/unsloth/_auto_install.py index 2f5b62d4ca..2e6351b8d9 100644 --- a/unsloth/_auto_install.py +++ b/unsloth/_auto_install.py @@ -1,3 +1,17 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + try: import torch except: raise ImportError('Install torch via `pip install torch`') from packaging.version import Version as V From d45ade257b8578d63236b389f25e73b5c22bb862 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 19 Aug 2024 17:18:30 -0700 Subject: [PATCH 0406/1088] Update README.md (#938) --- README.md | 105 +++++++++++++++++++++++------------------------------- 1 file changed, 44 insertions(+), 61 deletions(-) diff --git a/README.md b/README.md index 86c3fbd86e..b23acffcb8 100644 --- a/README.md +++ b/README.md @@ -94,85 +94,68 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 💾 Installation Instructions -If you have Pytorch 2.3 and CUDA 12.1, install Unsloth with `pip install unsloth[colab-new]` then `pip install --no-deps "xformers<0.0.27" "trl<0.9.0" peft accelerate bitsandbytes` - ### Conda Installation -Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs. +`⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. We support `python=3.10,3.11,3.12`. ```bash conda create --name unsloth_env \ - python=3.10 \ - pytorch-cuda=<11.8/12.1> \ + python=3.11 \ + pytorch-cuda=12.1 \ pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers \ -y conda activate unsloth_env pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" - -pip install --no-deps "trl<0.9.0" peft accelerate bitsandbytes +pip install --no-deps trl peft accelerate bitsandbytes ``` +

    + If you're looking to install Conda in a Linux environment, read here, or run the below 🔽 + + ```bash + mkdir -p ~/miniconda3 + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh + bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 + rm -rf ~/miniconda3/miniconda.sh + ~/miniconda3/bin/conda init bash + ~/miniconda3/bin/conda init zsh + ``` +
    + ### Pip Installation -Do **NOT** use this if you have Anaconda. You must use the Conda install method, or else stuff will BREAK. +`⚠️Do **NOT** use this if you have Conda.` Pip is a bit more complex since there are dependency issues. The pip command is different for `torch 2.2,2.3,2.4` and CUDA versions. -1. Find your CUDA version via -```python -import torch; torch.version.cuda -``` -2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. For Pytorch 2.1.1: go to step 3. For Pytorch 2.2.0: go to step 4. -```bash -pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ - --index-url https://download.pytorch.org/whl/cu121 -``` -```bash -pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118-ampere] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-ampere] @ git+https://github.com/unslothai/unsloth.git" -``` -3. For Pytorch 2.1.1: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. -```bash -pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.1 triton \ - --index-url https://download.pytorch.org/whl/cu121 -``` -```bash -pip install "unsloth[cu118-torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118-ampere-torch211] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-ampere-torch211] @ git+https://github.com/unslothai/unsloth.git" -``` -4. For Pytorch 2.2.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. -```bash -pip install --upgrade --force-reinstall --no-cache-dir torch==2.2.0 triton \ - --index-url https://download.pytorch.org/whl/cu121 -``` -```bash -pip install "unsloth[cu118-torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118-ampere-torch220] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-ampere-torch220] @ git+https://github.com/unslothai/unsloth.git" -``` -5. If you get errors, try the below first, then go back to step 1: +In general, if you have `torch 2.4` and `CUDA 12.1`, use: ```bash pip install --upgrade pip +pip install "unsloth[cu121-torch240] @ git+https://github.com/unslothai/unsloth.git" ``` -6. For Pytorch 2.2.1: -```bash -# RTX 3090, 4090 Ampere GPUs: -pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes -# Pre Ampere RTX 2080, T4, GTX 1080 GPUs: -pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes -``` -7. For Pytorch 2.3.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. +Or, run the below in a terminal to get the optional pip installation command: ```bash -pip install "unsloth[cu118-torch230] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-torch230] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu118-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[cu121-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" +wget -qO- https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/_auto_install.py | python - ``` -8. To troubleshoot installs try the below (all must succeed). Xformers should mostly all be available. + +Or, run the below manually in a Python REPL: +```python +try: import torch +except: raise ImportError("Install torch via `pip install torch`") +from packaging.version import Version as V +v = V(torch.__version__) +cuda = str(torch.version.cuda) +is_ampere = torch.cuda.get_device_capability()[0] >= 8 +if cuda != "12.1" and cuda != "11.8": raise RuntimeError(f"CUDA = {cuda} not supported!") +if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!") +elif v <= V('2.1.1'): x = 'cu{}{}-torch211' +elif v <= V('2.1.2'): x = 'cu{}{}-torch212' +elif v < V('2.3.0'): x = 'cu{}{}-torch220' +elif v < V('2.4.0'): x = 'cu{}{}-torch230' +elif v < V('2.5.0'): x = 'cu{}{}-torch240' +else: raise RuntimeError(f"Torch = {v} too new!") +x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") +print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') +``` + +Afterwards, confirm if `nvcc` `xformers` and `bitsandbytes` have successfully installed - if not, install them individually first until they work, then install Unsloth. ```bash nvcc python -m xformers.info From d5768379c5b5bff9abe50b946fb4e9a7d9f04d47 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 20 Aug 2024 16:51:39 -0700 Subject: [PATCH 0407/1088] Phi 3.5 (#940) * LongRoPE * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update mapper.py * Phi 3.5 --- README.md | 2 +- unsloth/chat_templates.py | 6 +- unsloth/models/_utils.py | 13 ++++ unsloth/models/gemma.py | 4 + unsloth/models/llama.py | 158 +++++++++++++++++++++++++++++++++++--- unsloth/models/mapper.py | 4 + 6 files changed, 173 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index b23acffcb8..0590415f43 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3.1, Mistral, Phi-3 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.1, Mistral, Phi-3.5 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 82f6aba148..f83df579b1 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -473,7 +473,7 @@ # =========================================== Phi-3 phi3_template = \ - "{{ bos_token }}"\ + # "{{ bos_token }}"\ # Phi-3.5 removes BOS? "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ "{{'<|user|>\n' + message['content'] + '<|end|>\n'}}"\ @@ -505,7 +505,9 @@ ''' phi3_template_eos_token = "<|end|>" -CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) +CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) +CHAT_TEMPLATES["phi-35"] = CHAT_TEMPLATES["phi-3"] +CHAT_TEMPLATES["phi-3.5"] = CHAT_TEMPLATES["phi-3"] pass # =========================================== Llama-3.1 diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d8904aa12b..1c48e8e588 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -969,6 +969,7 @@ def patch_llama_rope_scaling( scaled_rope_module = None, extended_rope_module = None, attention_module = None, + longrope_module = None, ): assert(\ rope_module is not None and \ @@ -1026,14 +1027,26 @@ def patch_llama_rope_scaling( max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) + elif scaling_type == "longrope": + self.rotary_emb = {longrope_rope_function}( + dim = self.head_dim, + max_position_embeddings = self.max_position_embeddings, + original_max_position_embeddings = self.config.original_max_position_embeddings, + base = self.rope_theta, + short_factor = self.config.rope_scaling['short_factor'], + long_factor = self.config.rope_scaling['long_factor' ], + ) else: raise ValueError(f"Unknown RoPE scaling type {{scaling_type}}") pass """ + fix_rope_function = fix_rope_function.format( rope_function = rope_module.__name__, scaled_rope_function = scaled_rope_module.__name__, extended_rope_function = extended_rope_module.__name__, + longrope_rope_function = \ + (longrope_module if longrope_module is not None else rope_module).__name__ ) rotary_emb = re.findall( "self.rotary_emb = .+?\)", function, diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index a0894ec7a0..45f14c1131 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -254,6 +254,10 @@ def forward(self, x, position_ids=None, seq_len=None): ) pass + def get_cached(self, seq_len = None): + return self.cos_cached, self.sin_cached + pass + def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 048ba69193..376b4b4ebc 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -187,8 +187,9 @@ def LlamaAttention_fast_forward_inference( # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) - cos = self.rotary_emb.cos_cached[position_ids].unsqueeze(1) - sin = self.rotary_emb.sin_cached[position_ids].unsqueeze(1) + cos, sin = self.rotary_emb.get_cached(kv_seq_len) + cos = cos[position_ids].unsqueeze(1) + sin = sin[position_ids].unsqueeze(1) h = self.half_head_dim RH_Q = self.RH_Q @@ -346,14 +347,17 @@ def LlamaAttention_fast_forward( kv_seq_len += past_key_value[0].shape[-2] # Extend RoPE dynamically to fit in VRAM - self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) + rotary_emb = self.rotary_emb + rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) if position_ids is None: - cos = self.rotary_emb.cos_cached - sin = self.rotary_emb.sin_cached + # Useful for LongRoPE + cos, sin = rotary_emb.get_cached(kv_seq_len) + # cos = self.rotary_emb.cos_cached + # sin = self.rotary_emb.sin_cached Q, K = fast_rope_embedding(Q, K, cos, sin) else: - cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + cos, sin = rotary_emb(V, seq_len = kv_seq_len) Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) pass @@ -1048,6 +1052,10 @@ def forward(self, x, position_ids=None, seq_len=None): ) pass + def get_cached(self, seq_len = None): + return self.cos_cached, self.sin_cached + pass + def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 @@ -1170,6 +1178,125 @@ def forward(self, x, position_ids=None, seq_len=None): ) pass + def get_cached(self, seq_len = None): + return self.cos_cached, self.sin_cached + pass + + def extend_rope_embedding(self, x, seq_len): + if seq_len <= self.current_rope_size: return + # Iteratively grow by increments of 8192 + self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + pass +pass + + +class LongRopeRotaryEmbedding(torch.nn.Module): + # For Phi 3.5 128K https://huggingface.co/microsoft/Phi-3.5-mini-instruct/blob/main/modeling_phi3.py + def __init__(self, + dim = None, + max_position_embeddings = 131072, + original_max_position_embeddings = 4096, + base = 10000, + short_factor = None, + long_factor = None, + device = None, + config = None, # [TODO] Hack to pass in config - need to remove later + ): + super().__init__() + assert(short_factor is not None) + assert(long_factor is not None) + assert(type(original_max_position_embeddings) is int) + + if config is not None: + # [TODO] Hack to pass in config - need to remove later + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.original_max_position_embeddings = original_max_position_embeddings + self.base = base + # Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this + self.current_rope_size = min(original_max_position_embeddings, self.max_position_embeddings) + + # Long RoPE similar to RoPE except short sequences have 1 cos / sin + # and long sequences have another cos / sin + inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device="cpu").float() / self.dim + short_factor = torch.tensor(short_factor, device = "cpu", dtype = torch.float32) + long_factor = torch.tensor(long_factor, device = "cpu", dtype = torch.float32) + short_inv_freq = 1.0 / (short_factor * self.base**inv_freq_shape) + long_inv_freq = 1.0 / (long_factor * self.base**inv_freq_shape) + + # Phi-3 Scale factor + scale = self.max_position_embeddings / self.original_max_position_embeddings + if scale <= 1.0: + scaling_factor = 1.0 + else: + scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings)) + pass + self.scaling_factor = scaling_factor + + # Short and long inv_freq + self.register_buffer("short_inv_freq", short_inv_freq, persistent = False) + self.register_buffer("long_inv_freq", long_inv_freq, persistent = False) + # Build here to make `torch.jit.trace` work. + # self._set_cos_sin_cache(seq_len=self.current_rope_size, device=device, dtype=torch.get_default_dtype()) + + # Short sequences + dtype = torch.bfloat16 if is_bfloat16_supported() else torch.float16 + t = torch.arange(original_max_position_embeddings, device=self.short_inv_freq.device, dtype=torch.int64).float() + freqs = torch.outer(t, self.short_inv_freq) + emb = torch.cat((freqs, freqs), dim=-1) + cos_cached = (emb.cos() * self.scaling_factor).to(dtype=dtype, device=device, non_blocking=True) + sin_cached = (emb.sin() * self.scaling_factor).to(dtype=dtype, device=device, non_blocking=True) + self.register_buffer("short_cos_cached", cos_cached, persistent=False) + self.register_buffer("short_sin_cached", sin_cached, persistent=False) + pass + + def _set_cos_sin_cache(self, seq_len, device, dtype): + # Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and + # in FP32. They are applied (multiplied) in FP32 as well. + self.current_rope_size = seq_len + + t = torch.arange(self.current_rope_size, device=self.inv_freq.device, dtype=torch.int64).float() + # Long sequences + freqs = torch.outer(t, self.long_inv_freq) + emb = torch.cat((freqs, freqs), dim=-1) + cos_cached = (emb.cos() * self.scaling_factor).to(dtype=dtype, device=device, non_blocking=True) + sin_cached = (emb.sin() * self.scaling_factor).to(dtype=dtype, device=device, non_blocking=True) + self.register_buffer("long_cos_cached", cos_cached, persistent=False) + self.register_buffer("long_sin_cached", sin_cached, persistent=False) + pass + + def forward(self, x, position_ids=None, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.current_rope_size: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + if seq_len < self.original_max_position_embeddings: + return ( + self.short_cos_cached[:seq_len].to(dtype = x.dtype), + self.short_sin_cached[:seq_len].to(dtype = x.dtype), + ) + else: + return ( + self.long_cos_cached[:seq_len].to(dtype = x.dtype), + self.long_sin_cached[:seq_len].to(dtype = x.dtype), + ) + pass + pass + + def get_cached(self, seq_len = None): + if seq_len < self.original_max_position_embeddings: + return self.short_cos_cached, self.short_sin_cached + return self.long_cos_cached, self.long_sin_cached + pass + def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 @@ -1242,6 +1369,7 @@ def pre_patch(): scaled_rope_module = LlamaLinearScalingRotaryEmbedding, extended_rope_module = LlamaExtendedRotaryEmbedding, attention_module = LlamaAttention, + longrope_module = LongRopeRotaryEmbedding, ) if init_name is not None: exec(function, globals()) @@ -1657,11 +1785,19 @@ def post_patch(model): pass pass # Downcast RoPE embedding to correct data type - if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - and (module.cos_cached.dtype != correct_dtype): - - module.cos_cached = module.cos_cached.to(correct_dtype) - module.sin_cached = module.sin_cached.to(correct_dtype) + if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")): + + if hasattr(module, "cos_cached") and \ + (module.cos_cached.dtype != correct_dtype): + + module.cos_cached = module.cos_cached.to(correct_dtype) + module.sin_cached = module.sin_cached.to(correct_dtype) + + elif hasattr(module, "short_cos_cached") and \ + (module.short_cos_cached.dtype != correct_dtype): + + module.short_cos_cached = module.short_cos_cached.to(correct_dtype) + module.short_sin_cached = module.short_sin_cached.to(correct_dtype) pass pass pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b8259a073c..3f49c96551 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -249,6 +249,10 @@ "unsloth/gemma-2-2b-it", "google/gemma-2-2b-it", ), + "unsloth/Phi-3.5-mini-instruct-bnb-4bit" : ( + "unsloth/Phi-3.5-mini-instruct", + "microsoft/Phi-3.5-mini-instruct", + ), } INT_TO_FLOAT_MAPPER = {} From f629a638d6abd20464176e657842b58b9fdc1763 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 20 Aug 2024 16:54:11 -0700 Subject: [PATCH 0408/1088] Update chat_templates.py --- unsloth/chat_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index f83df579b1..d81413ae70 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -472,8 +472,8 @@ # =========================================== Phi-3 +# "{{ bos_token }}"\ # Phi-3.5 removes BOS? phi3_template = \ - # "{{ bos_token }}"\ # Phi-3.5 removes BOS? "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ "{{'<|user|>\n' + message['content'] + '<|end|>\n'}}"\ From 600ffe2a175312e7f2fbcdffbe6b99e3df20c417 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 20 Aug 2024 17:59:50 -0700 Subject: [PATCH 0409/1088] Update README.md (#941) Co-authored-by: Michael <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 0590415f43..5c4de1f586 100644 --- a/README.md +++ b/README.md @@ -23,14 +23,13 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | -| **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | +| **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | -| **Phi-3 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | +| **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | -| **TinyLlama** | [▶️ Start for free](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | - **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) @@ -39,10 +38,13 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. -- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Gemma-2-9b and Gemma-2-27b are alrady supported! And uploaded [GGUF quants](https://huggingface.co/unsloth/gemma-2-it-GGUF) Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing) for Gemma-2-2b Instruct! -- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) both Base and Instruct now supported -- 📣 NEW! [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct now supported +- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! +- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported +
    + Click for more news + - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! @@ -50,11 +52,13 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - 📣 [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct] - 📣 [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here + [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models - 📣 We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support [4x longer context windows](https://unsloth.ai/blog/long-context)! +- +
    ## 🔗 Links and Resources | Type | Links | | ------------------------------- | --------------------------------------- | -| 📚 **Documentation & Wiki** | [Read Our Wiki](https://github.com/unslothai/unsloth/wiki) | +| 📚 **Documentation & Wiki** | [Read Our Docs](https://docs.unsloth.ai) | |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| | 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) @@ -162,8 +166,8 @@ python -m xformers.info python -m bitsandbytes ``` -## 📜 Documentation -- Go to our [Wiki page](https://github.com/unslothai/unsloth/wiki) for saving to GGUF, checkpointing, evaluation and more! +## 📜 [Documentation](https://docs.unsloth.ai) +- Go to our official [Documentation](https://docs.unsloth.ai) for saving to GGUF, checkpointing, evaluation and more! - We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! - We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! From d0ca3497eb5911483339be025e9924cf73280178 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 22 Aug 2024 02:18:03 -0700 Subject: [PATCH 0410/1088] Fix DPO (#947) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 118 +++++++++++++++++++++---------------- 1 file changed, 67 insertions(+), 51 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index b677f864a4..044629eaed 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1143,57 +1143,73 @@ def patch_sft_trainer_tokenizer(): pass # Patch train with fix_untrained_tokens - function_name, replacer = "train", "if resume_from_checkpoint is False:" - function = getsource(eval(f"trl.trainer.sft_trainer.SFTTrainer.{function_name}")) - where = function.find("def") - function = function.split("\n") - function = "\n".join(x[where:] for x in function) - - check_text = \ - "\n"\ - "if self._inner_training_loop.__name__ != '_fast_inner_training_loop':\n"\ - " raise RuntimeError(\n"\ - " 'Please do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ - " )\n"\ - "pass\n"\ - "import subprocess, re, gc, numpy as np\n"\ - "a = np.array([0,])\n"\ - "try:\n"\ - " a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ - " a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a)\n"\ - " a = np.array([int(x.decode('utf-8'))/1024 for x in a])\n"\ - "except:\n"\ - " if not torch.cuda.is_available():\n"\ - " raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!')\n"\ - "if ((a - PRE_CHECK) >= 1).sum() > 1:\n"\ - " raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!')\n"\ - "for _ in range(3):\n"\ - " gc.collect()\n"\ - " torch.cuda.empty_cache()\n"\ - "pass\n"\ - "\n"\ - "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" - - # Add NEFTune since it doesn't seem to work?? We need to manually inject it - check_text += \ - "\n"\ - "if hasattr(self, 'neftune_hook_handle'):\n"\ - " self.neftune_hook_handle.remove()\n"\ - " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ - "\n"\ - "if getattr(self, 'neftune_noise_alpha', None) is not None:\n"\ - " self.model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"\ - " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n"\ - "pass\n"\ - "\n" - - check_text = check_text.split("\n") - check_text = "\n".join(" "*where + x for x in check_text) - - function = function.replace(replacer, check_text + replacer) - exec(function, globals()) - - exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) + for path_to_trainer in \ + ("sft_trainer.SFTTrainer", "dpo_trainer.DPOTrainer",): + + function_name, replacer = "train", "if resume_from_checkpoint is False:" + function = getsource(eval(f"trl.trainer.{path_to_trainer}.{function_name}")) + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + + check_text = \ + "\n"\ + "if self._inner_training_loop.__name__ != '_fast_inner_training_loop':\n"\ + " raise RuntimeError(\n"\ + " 'Please do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ + " )\n"\ + "pass\n"\ + "import subprocess, re, gc, numpy as np\n"\ + "a = np.array([0,])\n"\ + "try:\n"\ + " a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True)\n"\ + " a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a)\n"\ + " a = np.array([int(x.decode('utf-8'))/1024 for x in a])\n"\ + "except:\n"\ + " if not torch.cuda.is_available():\n"\ + " raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!')\n"\ + "if ((a - PRE_CHECK) >= 1).sum() > 1:\n"\ + " raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!')\n"\ + "for _ in range(3):\n"\ + " gc.collect()\n"\ + " torch.cuda.empty_cache()\n"\ + "pass\n"\ + "\n"\ + "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" + + # Add NEFTune since it doesn't seem to work?? We need to manually inject it + check_text += \ + "\n"\ + "if hasattr(self, 'neftune_hook_handle'):\n"\ + " self.neftune_hook_handle.remove()\n"\ + " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ + "\n"\ + "if getattr(self, 'neftune_noise_alpha', None) is not None:\n"\ + " self.model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"\ + " self.neftune_hook_handle = self.model.get_input_embeddings().register_forward_hook(neftune_post_forward_hook)\n"\ + "pass\n"\ + "\n" + + # Also DPO weirdly tokenizes non numeric columns? Delete them! + check_text += \ + "\n"\ + "column_names = set(self.train_dataset.column_names)\n"\ + "check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ + " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ + " 'prompt_input_ids', 'prompt_attention_mask']\n"\ + "if all(x in column_names for x in check):\n"\ + " self.train_dataset = self.train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ + "del check, column_names\n"\ + "\n" + + check_text = check_text.split("\n") + check_text = "\n".join(" "*where + x for x in check_text) + + function = function.replace(replacer, check_text + replacer) + exec(function, globals()) + + exec(f"trl.trainer.{path_to_trainer}.{function_name} = {function_name}", globals()) + pass pass patch_sft_trainer_tokenizer() From 20506463e1630605492e4e69380176c6c681b145 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 23 Aug 2024 17:38:24 -0700 Subject: [PATCH 0411/1088] Phi 3.5 bug fix (#955) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update token retrieval logic (#952) * Fix DPO (#947) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update hf token retrieval logic --------- Co-authored-by: Daniel Han * Update llama.py * get_token * Update README.md --------- Co-authored-by: Hafedh <70411813+not-lain@users.noreply.github.com> --- README.md | 3 ++- unsloth/models/llama.py | 11 +++-------- unsloth/models/loader.py | 9 +++------ unsloth/save.py | 16 ++++------------ 4 files changed, 12 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 5c4de1f586..6cd1be1381 100644 --- a/README.md +++ b/README.md @@ -32,12 +32,13 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | - **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- Run [Llama 3.1 conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.1 Conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 376b4b4ebc..f62f0f1165 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -61,6 +61,7 @@ from peft.tuners.lora import Linear4bit as Peft_Linear4bit from ..save import patch_saving_functions import re, os, inspect, math, sys +from huggingface_hub.utils._token import get_token def original_apply_qkv(self, X): @@ -1263,7 +1264,7 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): # in FP32. They are applied (multiplied) in FP32 as well. self.current_rope_size = seq_len - t = torch.arange(self.current_rope_size, device=self.inv_freq.device, dtype=torch.int64).float() + t = torch.arange(self.current_rope_size, device=self.long_inv_freq.device, dtype=torch.int64).float() # Long sequences freqs = torch.outer(t, self.long_inv_freq) emb = torch.cat((freqs, freqs), dim=-1) @@ -1417,13 +1418,7 @@ def from_pretrained( "Are you certain you want to do remote code execution?" ) pass - - if token is None and "HF_TOKEN" in os.environ: - token = os.environ["HF_TOKEN"] - - if token is None and "HUGGINGFACE_TOKEN" in os.environ: - token = os.environ["HUGGINGFACE_TOKEN"] - + if token is None: token = get_token() if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = is_bfloat16_supported() gpu_stats = torch.cuda.get_device_properties(0) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 02ed00f5cd..e1f17aca04 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -21,6 +21,7 @@ from peft import PeftConfig, PeftModel from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit import os +from huggingface_hub.utils._token import get_token # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! from packaging.version import Version @@ -152,12 +153,8 @@ def from_pretrained( revision = None, *args, **kwargs, ): - if token is None and "HF_TOKEN" in os.environ: - token = os.environ["HF_TOKEN"] - - if token is None and "HUGGINGFACE_TOKEN" in os.environ: - token = os.environ["HUGGINGFACE_TOKEN"] - + if token is None: token = get_token() + old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) diff --git a/unsloth/save.py b/unsloth/save.py index f45d8062ad..66e2ec6bf0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -29,6 +29,7 @@ from transformers.models.llama.modeling_llama import logger from .tokenizer_utils import fix_sentencepiece_gguf from huggingface_hub import HfApi +from huggingface_hub.utils._token import get_token __all__ = [ "print_quantization_methods", @@ -207,12 +208,7 @@ def unsloth_save_model( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): - if token is None and "HF_TOKEN" in os.environ: - token = os.environ["HF_TOKEN"] - elif token is None and "hf_token" in os.environ: - token = os.environ["hf_token"] - elif token is None and "HUGGINGFACE_TOKEN" in os.environ: - token = os.environ["HUGGINGFACE_TOKEN"] + if token is None: token = get_token() if commit_message is None: commit_message = "" if "Unsloth" not in commit_message: @@ -1321,12 +1317,8 @@ def create_huggingface_repo( token = None, private = False, ): - if token is None and "HF_TOKEN" in os.environ: - token = os.environ["HF_TOKEN"] - elif token is None and "hf_token" in os.environ: - token = os.environ["hf_token"] - elif token is None and "HUGGINGFACE_TOKEN" in os.environ: - token = os.environ["HUGGINGFACE_TOKEN"] + if token is None : + token = get_token() pass save_directory, username = _determine_username(save_directory, "", token) From 12b437e12204532f82542c12ac1ab00d19e3ebbf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 23 Aug 2024 23:43:57 -0700 Subject: [PATCH 0412/1088] Update gemma2.py --- unsloth/models/gemma2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index ea9f53e7db..6858f52573 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -62,7 +62,7 @@ # [TODO] We must randomnly use torch.compile? # I checked the gradients and formulas and I'm sure it's correct. # I'm stumped :( -@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) +@torch.compile(fullgraph = False, dynamic = True, options = torch_compile_options) def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): old_dtype = X.dtype X = X.float() From 976d11a10d54383aeb7a692c69e01151a20bfd72 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 27 Aug 2024 00:08:39 -0700 Subject: [PATCH 0413/1088] Update save.py --- unsloth/save.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 66e2ec6bf0..b875e9b2d1 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1483,14 +1483,23 @@ def create_ollama_modelfile(tokenizer, gguf_location): modelfile = modelfile\ .replace("{{", "⚫@✅#🦥")\ - .replace("}}", "⚡@🦥#⛵")\ - .format( + .replace("}}", "⚡@🦥#⛵") + + if "__EOS_TOKEN__" in modelfile: + modelfile = modelfile.format( __FILE_LOCATION__ = gguf_location, - )\ + __EOS_TOKEN__ = tokenizer.eos_token, + ) + else: + modelfile = modelfile.format( + __FILE_LOCATION__ = gguf_location, + ) + pass + + modelfile = modelfile\ .replace("⚫@✅#🦥", "{{")\ .replace("⚡@🦥#⛵", "}}")\ .rstrip() - pass return modelfile pass From c085a4562c704b94e76c17df1363a8fc6cd07e85 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Sep 2024 01:52:32 -0700 Subject: [PATCH 0414/1088] Cohere, Bug fixes (#984) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update token retrieval logic (#952) * Fix DPO (#947) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update hf token retrieval logic --------- Co-authored-by: Daniel Han * Update llama.py * get_token * Update README.md * Update gemma2.py * Update rms_layernorm.py * synchronize * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * layernorm * Update rms_layernorm.py * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * revert * Gemma * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma2.py * Change UnslothTrainingArguments base class to SFTConfig (#979) * Cohere * Update trainer.py * Cohere * Cohere * New models * Update llama.py * Update llama.py * Update cohere.py * Update llama.py * Update cohere.py * retry * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * _apply_lora_mlp * Update _utils.py --------- Co-authored-by: Hafedh <70411813+not-lain@users.noreply.github.com> Co-authored-by: Tuan Pham <82665400+vTuanpham@users.noreply.github.com> --- unsloth/kernels/cross_entropy_loss.py | 130 ++++--- unsloth/kernels/fast_lora.py | 30 +- unsloth/kernels/rms_layernorm.py | 4 +- unsloth/models/_utils.py | 2 +- unsloth/models/cohere.py | 473 ++++++++++++++++++++++++++ unsloth/models/llama.py | 81 ++++- unsloth/models/loader.py | 7 +- unsloth/models/mapper.py | 23 ++ unsloth/trainer.py | 7 +- 9 files changed, 690 insertions(+), 67 deletions(-) create mode 100644 unsloth/models/cohere.py diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index b8473e60c7..24e8002bec 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,17 +19,22 @@ from transformers.models.llama.modeling_llama import logger -@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _cross_entropy_forward( logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING: tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -62,8 +67,11 @@ def _cross_entropy_forward( label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + + # Go logit scaling for Cohere: t * x + if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -71,8 +79,10 @@ def _cross_entropy_forward( if label_idx != -100: x = tl.load(logits_ptr + label_idx) + # Go logit scaling for Cohere: t * x + if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) + if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) loss = logsumexp - x.to(tl.float32) else: loss = 0.0 @@ -81,18 +91,23 @@ def _cross_entropy_forward( pass -@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _chunked_cross_entropy_forward( logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING: tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -130,8 +145,11 @@ def _chunked_cross_entropy_forward( label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + + # Go logit scaling for Cohere: t * x + if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -142,8 +160,10 @@ def _chunked_cross_entropy_forward( # Do the -x separately if label_idx != -100: x = tl.load(logits_ptr + label_idx).to(tl.float32) + # Go logit scaling for Cohere: t * x + if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) + if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) loss = -1.0 * x.to(tl.float32) else: loss = 0.0 @@ -153,17 +173,22 @@ def _chunked_cross_entropy_forward( pass -@triton.heuristics({"DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING"],}) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _cross_entropy_backward( logits_ptr, logits_row_stride, dloss_ptr, dloss_row_stride, logsumexp_ptr, labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING: tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) @@ -195,6 +220,13 @@ def _cross_entropy_backward( dloss = 0.0 x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + + # Do logit scaling for Cohere + if DO_LOGIT_SCALING: + # d/dx [s * x] = s + x = x * LOGIT_SCALE + pass + # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) @@ -210,6 +242,11 @@ def _cross_entropy_backward( y, # exp(x - logsumexp) ) + if DO_LOGIT_SCALING: + # d/dx [s * x] = s + y = y * LOGIT_SCALE + pass + if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) y = y * (1.0 - partial*partial) @@ -224,14 +261,15 @@ def _cross_entropy_backward( class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod - def forward(ctx, logits, labels, logit_softcapping = 0): + def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_rows, vocab_size = logits.shape div, mod = divmod(vocab_size, MAX_FUSED_SIZE) n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - DO_SOFTCAPPING = (logit_softcapping != 0) + DO_SOFTCAPPING = (logit_softcapping != 0) + DO_LOGIT_SCALING = (logit_scaling != 0) if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral @@ -243,11 +281,13 @@ def forward(ctx, logits, labels, logit_softcapping = 0): losses, logsumexp, labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, - SOFTCAP = logit_softcapping, - num_warps = num_warps, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, + LOGIT_SCALE = logit_scaling, + num_warps = num_warps, ) else: # For large vocabs > 65336 like Gemma 256K @@ -258,12 +298,14 @@ def forward(ctx, logits, labels, logit_softcapping = 0): losses, logsumexp, labels, - VOCAB_SIZE = vocab_size, - N_CHUNKS = n_chunks, - BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, - SOFTCAP = logit_softcapping, - num_warps = 32, + VOCAB_SIZE = vocab_size, + N_CHUNKS = n_chunks, + BLOCK_SIZE = MAX_FUSED_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, + LOGIT_SCALE = logit_scaling, + num_warps = 32, ) # logsumexp(chunked_logsumexp) - x # Do the -x separately @@ -275,6 +317,8 @@ def forward(ctx, logits, labels, logit_softcapping = 0): ctx.save_for_backward(logits, logsumexp, labels) ctx.DO_SOFTCAPPING = DO_SOFTCAPPING ctx.logit_softcapping = logit_softcapping + ctx.DO_LOGIT_SCALING = DO_LOGIT_SCALING + ctx.logit_scaling = logit_scaling return losses pass @@ -292,19 +336,26 @@ def backward(ctx, dlosses): dlosses, dlosses.stride(0), logsumexp, labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, - SOFTCAP = ctx.logit_softcapping, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, + SOFTCAP = ctx.logit_softcapping, + DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, + LOGIT_SCALE = ctx.logit_scaling, num_warps = 8, ) - return logits, None, None, + return logits, None, None, None, pass pass @torch._disable_dynamo -def fast_cross_entropy_loss(logits, labels, logit_softcapping = 0): +def fast_cross_entropy_loss( + logits, + labels, + logit_softcapping = 0, + logit_scaling = 0, +): """ Arguments: logits: (batch, seq_len, vocab_size) @@ -319,6 +370,7 @@ def fast_cross_entropy_loss(logits, labels, logit_softcapping = 0): logits.view(batch*seq_len, d), labels.view(-1), logit_softcapping, + logit_scaling, ) n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 8f4101799c..2177b43b9e 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -68,7 +68,8 @@ def forward(ctx, X : torch.Tensor, gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, downW, downW_quant, downA, downB, downS, - _forward_function, _backward_function,): + _forward_function, _backward_function, + inplace = True,): dtype = X.dtype e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) @@ -84,6 +85,7 @@ def forward(ctx, X : torch.Tensor, ) ctx.save_for_backward(gateA, gateB, upA, upB, downA, downB, X, e, g) + ctx.inplace = inplace return i pass @@ -131,7 +133,7 @@ def backward(ctx, dY : torch.Tensor): # dX = matmul_lora(df, upW.t(), upW_quant, upB, upA, upS) # dX += matmul_lora(de, gateW.t(), gateW_quant, gateB, gateA, gateS) upW = fast_dequantize(upW.t(), upW_quant) - dX = torch.matmul(df, upW.t(), out = X) + dX = torch.matmul(df, upW.t(), out = X if ctx.inplace else None) del upW dX += df @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) @@ -147,13 +149,13 @@ def backward(ctx, dY : torch.Tensor): None, None, d_gateA.t(), d_gateB.t(), None, \ None, None, d_upA.t(), d_upB.t(), None, \ None, None, d_downA.t(), d_downB.t(), None, \ - None, None, # _backward and _forward + None, None, None, # _backward and _forward and inplace pass pass from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel -def apply_lora_mlp_swiglu(self, X): +def apply_lora_mlp_swiglu(self, X, inplace = True): gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) @@ -161,13 +163,14 @@ def apply_lora_mlp_swiglu(self, X): gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, downW, downW_quant, downA, downB, downS, - swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel,) + swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel, + inplace,) return out pass from .geglu import geglu_exact_forward_kernel, geglu_exact_backward_kernel -def apply_lora_mlp_geglu_exact(self, X): +def apply_lora_mlp_geglu_exact(self, X, inplace = True): gateW, gateW_quant, gateA, gateB, gateS = get_lora_parameters(self.gate_proj) upW, upW_quant, upA, upB, upS = get_lora_parameters(self. up_proj) downW, downW_quant, downA, downB, downS = get_lora_parameters(self.down_proj) @@ -175,7 +178,8 @@ def apply_lora_mlp_geglu_exact(self, X): gateW, gateW_quant, gateA, gateB, gateS, upW, upW_quant, upA, upB, upS, downW, downW_quant, downA, downB, downS, - geglu_exact_forward_kernel, geglu_exact_backward_kernel,) + geglu_exact_forward_kernel, geglu_exact_backward_kernel, + inplace,) return out pass @@ -229,7 +233,8 @@ class LoRA_QKV(torch.autograd.Function): def forward(ctx, X : torch.Tensor, QW, QW_quant, QA, QB, QS, KW, KW_quant, KA, KB, KS, - VW, VW_quant, VA, VB, VS,): + VW, VW_quant, VA, VB, VS, + inplace = True): dtype = X.dtype Q = matmul_lora(X, QW, QW_quant, QA, QB, QS) @@ -242,6 +247,7 @@ def forward(ctx, X : torch.Tensor, VW, VW_quant, VS, ) ctx.save_for_backward(X, QA, QB, KA, KB, VA, VB,) + ctx.inplace = inplace return Q, K, V pass @@ -286,7 +292,7 @@ def backward(ctx, dQ, dK, dV): # Combine derivatives to find dX # dQ QW = fast_dequantize(QW.t(), QW_quant) - dX = torch.matmul(dQ, QW.t(), out = X) + dX = torch.matmul(dQ, QW.t(), out = X if ctx.inplace else None) del QW dX += (dQ @ QB.to(dtype).t() @ (QS * QA.to(dtype).t())) @@ -308,12 +314,13 @@ def backward(ctx, dQ, dK, dV): return dX.view(batch, seq_len, hd), \ None, None, d_QA.t(), d_QB.t(), None, \ None, None, d_KA.t(), d_KB.t(), None, \ - None, None, d_VA.t(), d_VB.t(), None + None, None, d_VA.t(), d_VB.t(), None, \ + None, pass pass -def apply_lora_qkv(self, X): +def apply_lora_qkv(self, X, inplace = True): QW, QW_quant, QA, QB, QS = get_lora_parameters(self.q_proj) KW, KW_quant, KA, KB, KS = get_lora_parameters(self.k_proj) VW, VW_quant, VA, VB, VS = get_lora_parameters(self.v_proj) @@ -321,6 +328,7 @@ def apply_lora_qkv(self, X): QW, QW_quant, QA, QB, QS, KW, KW_quant, KA, KB, KS, VW, VW_quant, VA, VB, VS, + inplace, ) return Q, K, V pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index f26e596530..ac5beb5ab1 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -186,7 +186,9 @@ def backward(ctx, dY): def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight - eps = layernorm.variance_epsilon + eps = layernorm.variance_epsilon if \ + hasattr(layernorm, "variance_epsilon") \ + else layernorm.eps out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1c48e8e588..ea9a0c53db 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -295,7 +295,7 @@ def patch_mistral_nemo_config(config): send_to_device, ).replace("def send_to_device", "def _fixed_send_to_device") exec(send_to_device) - accelerate.utils.operations.send_to_device = _fixed_send_to_device + # accelerate.utils.operations.send_to_device = _fixed_send_to_device pass pass # ============================================= diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py new file mode 100644 index 0000000000..aa0bcb55ee --- /dev/null +++ b/unsloth/models/cohere.py @@ -0,0 +1,473 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from ._utils import __version__ +try: + from transformers.models.cohere.modeling_cohere import ( + CohereAttention, + CohereDecoderLayer, + CohereModel, + CohereForCausalLM, + CohereRotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, + ) +except: + from packaging.version import Version + transformers_version = Version(transformers_version) + if not transformers_version >= Version("4.42"): + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Cohere.\n"\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + pass +pass + +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) +# For Pytorch 2.1.1 +try: + from transformers.models.cohere.modeling_cohere import ( + CohereSdpaAttention, + CohereFlashAttention2, + ) +except: + CohereSdpaAttention = CohereAttention + CohereFlashAttention2 = CohereAttention +pass + + +def fast_layernorm_inference(self, X, out_weight = None): + XX = X.to(torch.float32, copy = True) + XX -= X.mean(-1, keepdim = True) + variance = XX.square().mean(-1, keepdim = True) + variance += self.variance_epsilon + XX *= variance.rsqrt_() + out_weight[:] = self.weight + XX *= out_weight + return XX.to(X.dtype) +pass + + +# QK norm in Cohere +def CohereAttention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention + del self.q_norm_out_weight + del self.k_norm_out_weight + pass + + bsz, q_len, _ = hidden_states.size() + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q, K, V = self.apply_qkv(self, hidden_states) + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + if self.use_qk_norm: + Q = fast_layernorm_compiled(self.q_norm, Q) + K = fast_layernorm_compiled(self.k_norm, K) + pass + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + if position_ids is None: + cos = self.rotary_emb.cos_cached + sin = self.rotary_emb.sin_cached + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + pass + + if past_key_value is not None: + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + pass + past_key_value = (K, V) if use_cache else None + + # Attention module + if (not HAS_FLASH_ATTENTION and attention_mask is None): + # Xformers memory efficient attention + # Also has Flash Attention v2 dispatching + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + + # Group query attention + if n_groups != 1: + K = K .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + K = K.reshape(bsz, kv_seq_len, n_heads, head_dim) + V = V.reshape(bsz, kv_seq_len, n_heads, head_dim) + else: + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + pass + A = xformers_attention(Q, K, V, attn_bias = causal_mask) + A = A.view(bsz, q_len, n_heads, head_dim) + + elif HAS_FLASH_ATTENTION and attention_mask is None: + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + A = flash_attn_func(Q, K, V, causal = True) + else: + # Grouped query attention + if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) + pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2).contiguous() + pass + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) + attn_output = self.apply_o(self, attn_output) + attn_weights = None + return attn_output, attn_weights, past_key_value +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 +def CohereDecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +): + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: + out_weight = torch.empty(self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0") + + # Self Attention + residual = hidden_states + hidden_states = fast_layernorm_inference(self.input_layernorm, hidden_states, out_weight) + hidden_states_attention, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + + # Fully Connected + hidden_states_mlp = fast_swiglu_inference(self.mlp, hidden_states) + residual += hidden_states_attention + residual += hidden_states_mlp + hidden_states = residual + else: + residual = hidden_states + hidden_states = fast_layernorm_compiled(self.input_layernorm, hidden_states) + hidden_states_attention, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + ) + + # Fully Connected + hidden_states_mlp = self.mlp(hidden_states) + hidden_states = residual + hidden_states_attention + hidden_states_mlp + pass + + outputs = (hidden_states,) + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) + return outputs +pass + + +from math import sqrt as math_sqrt +KV_CACHE_INCREMENT = 256 # KV Cache update size +torch_nn_functional_softmax = torch.nn.functional.softmax +torch_matmul = torch.matmul + +def CohereAttention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, + do_prefill = False, + attention_mask = None, +): + Xn = hidden_states + bsz, _, hd = hidden_states.size() + K1, V1 = past_key_value + dtype = Xn.dtype + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + attention_size = n_heads*head_dim + # assert(n_kv_heads * n_groups == n_heads) + seq_len = K1.shape[-2] + kv_seq_len = seq_len + 1 + + # Prefill phase + # if not hasattr(self, "paged_attention"): + if do_prefill: + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) + self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + + # Mistral Nemo 12b has weird dimensions + if attention_size != self.hidden_size: + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + else: + self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + pass + + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + self.scalar = 1.0 / math_sqrt(self.head_dim) + self.half_head_dim = head_dim // 2 + # Cohere has QK layernorms + if self.use_qk_norm: + self.q_norm_out_weight = torch.empty(self.q_norm.weight.shape, dtype = torch.float32, device = "cuda:0") + self.k_norm_out_weight = torch.empty(self.k_norm.weight.shape, dtype = torch.float32, device = "cuda:0") + else: + self.q_norm_out_weight = None + self.k_norm_out_weight = None + pass + elif kv_seq_len >= self.paged_attention.shape[0]: + self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.attention.resize_((bsz, n_heads, 1, self.attention.shape[-1]+KV_CACHE_INCREMENT)) + pass + + Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0]) + Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0]) + Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1]) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + if self.use_qk_norm: + Q = fast_layernorm_inference(self.q_norm, Q, self.q_norm_out_weight) + K = fast_layernorm_inference(self.k_norm, K, self.k_norm_out_weight) + pass + + # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + cos, sin = self.rotary_emb.get_cached(kv_seq_len) + cos = cos[position_ids].unsqueeze(1) + sin = sin[position_ids].unsqueeze(1) + h = self.half_head_dim + + RH_Q = self.RH_Q + RH_Q[:,:,:,:h] = Qn[:,:,:,h:] + RH_Q[:,:,:,h:] = Qn[:,:,:,:h] + torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + Qn *= cos + Qn.addcmul_(RH_Q, sin) + + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + RH_K[:,:,:,:h] = Kn[:,:,:,h:] + RH_K[:,:,:,h:] = Kn[:,:,:,:h] + torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + Kn *= cos + Kn.addcmul_(RH_K, sin) + + # New KV cache + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + + # Handle sliding windows + sliding_window = getattr(self.config, "sliding_window", None) + if sliding_window is not None and kv_seq_len > sliding_window: + # From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193 + slicing_tokens = 1 - sliding_window + Knn = Kn[:, :, slicing_tokens:, :]#.contiguous() + Vnn = Vn[:, :, slicing_tokens:, :]#.contiguous() + else: + Knn, Vnn = Kn, Vn + pass + + # Grouped query attention + _, _, cached_len, _ = Knn.shape + if n_groups != 1: + Knn = Knn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vnn = Vnn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) + Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) + pass + # else: + # Knn, Vnn = Knn, Vnn + # pass + + # Attention + if bsz == 1: + Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963 + # It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows + A = torch_matmul(Qn, Knn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched + A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch_matmul(A, Vnn, out = Qn) + else: + A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) + pass + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, attention_size) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) + return A, (Kn, Vn) +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +# @torch.inference_mode +def CohereModel_fast_forward_inference( + self, + input_ids, + past_key_values, + position_ids, + attention_mask = None, +): + out_weight = torch.empty_like(self.model.layers[0].input_layernorm.weight, dtype = torch.float32, device = "cuda:0") + input_ids = input_ids[:,:self.max_seq_length] + hidden_states = self.model.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + sliding_window = getattr(self.config, "sliding_window", None), + ) + else: + attention_mask = None + pass + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.model.layers): + residual = hidden_states + hidden_states = fast_layernorm_inference(decoder_layer.input_layernorm, hidden_states, out_weight) + hidden_states_attention, present_key_value = CohereAttention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = attention_mask, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), + ) + + hidden_states_mlp = fast_swiglu_inference(self.mlp, hidden_states) + residual += hidden_states_attention + residual += hidden_states_mlp + hidden_states = residual + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_layernorm_inference(self.model.norm, hidden_states, out_weight) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + + +class FastCohereModel(FastLlamaModel): + + @staticmethod + def pre_patch(): + init_name, function = patch_linear_scaling( + model_name = "cohere", + rope_module = LlamaRotaryEmbedding, + scaled_rope_module = LlamaLinearScalingRotaryEmbedding, + attention_module = CohereAttention, + ) + if init_name is not None: + exec(function, globals()) + CohereAttention.__init__ = eval(init_name) + pass + CohereAttention .forward = CohereAttention_fast_forward + CohereSdpaAttention .forward = CohereAttention_fast_forward + CohereFlashAttention2.forward = CohereAttention_fast_forward + CohereDecoderLayer .forward = CohereDecoderLayer_fast_forward + CohereModel .forward = LlamaModel_fast_forward + CohereForCausalLM .forward = CausalLM_fast_forward(CohereModel_fast_forward_inference) + PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(CohereForCausalLM) + + import transformers.models.cohere.modeling_cohere + transformers.models.cohere.modeling_cohere.CohereRotaryEmbedding = LlamaRotaryEmbedding + return + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f62f0f1165..5ccf906acb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -305,6 +305,20 @@ def fast_rms_layernorm_inference_gemma(self, X, out_weight = None): pass +# Normal layernorm with mean removal +@torch.compile(fullgraph = False, dynamic = True, options = torch_compile_options) +def fast_layernorm_compiled(layernorm, X): + old_dtype = X.dtype + X = X.float() + mean = X.mean(-1, keepdim = True) + Xbar = X - mean + X = Xbar * torch.rsqrt(Xbar.square().mean(-1, keepdim = True) + \ + layernorm.variance_epsilon) * \ + layernorm.weight.float() + return X.to(old_dtype) +pass + + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, @@ -495,6 +509,16 @@ def LlamaDecoderLayer_fast_forward( pass +# https://github.com/unslothai/unsloth/issues/404#issuecomment-2323473452 +__DTYPE_MAP = { + "float32": torch.float32, + torch.float32: torch.float32, + "float16": torch.float16, + torch.float16: torch.float16, + "bfloat16": torch.bfloat16, + torch.bfloat16: torch.bfloat16, +} + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 def LlamaModel_fast_forward( self, @@ -576,11 +600,18 @@ def LlamaModel_fast_forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - inputs_embeds = inputs_embeds.to(self.config.torch_dtype) + # inputs_embeds = inputs_embeds.to(self.config.torch_dtype) + torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) + if torch_dtype is not None: + inputs_embeds = inputs_embeds.to(torch_dtype) + else: + raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") + pass # Normalized from Gemma IS_GEMMA = self.config.model_type.startswith("gemma") IS_GEMMA2 = self.config.model_type.startswith("gemma2") + IS_COHERE = self.config.model_type.startswith("cohere") train_embed_tokens = self.embed_tokens.weight.requires_grad if IS_GEMMA: @@ -786,8 +817,11 @@ def custom_forward(*inputs): # Final layernorm if use_cache: - hidden_states = (fast_rms_layernorm_inference_gemma if IS_GEMMA else fast_rms_layernorm_inference)\ + hidden_states = \ + (fast_rms_layernorm_inference_gemma if IS_GEMMA else fast_rms_layernorm_inference)\ (self.norm, hidden_states) + elif IS_COHERE: + hidden_states = fast_layernorm_compiled(self.norm, hidden_states) else: hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) pass @@ -877,6 +911,7 @@ def _CausalLM_fast_forward( output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, + num_logits_to_keep: Optional[int] = 0, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: @@ -925,6 +960,7 @@ def _CausalLM_fast_forward( loss = None logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): @@ -937,16 +973,26 @@ def _CausalLM_fast_forward( logits = shift_logits, labels = shift_labels, logit_softcapping = logit_softcapping, + logit_scaling = logit_scaling, ) - elif logit_softcapping != 0: - if logits.requires_grad: - logits = (1.0 / logit_softcapping) * logits - logits = torch.tanh(logits) - logits = logit_softcapping * logits - else: - logits *= (1.0 / logit_softcapping) - torch.tanh(logits, out = logits) - logits *= logit_softcapping + else: + if logit_scaling != 0: + if logits.requires_grad: + logits = logit_scaling * logits + else: + logits *= logit_scaling + pass + pass + if logit_softcapping != 0: + if logits.requires_grad: + logits = (1.0 / logit_softcapping) * logits + logits = torch.tanh(logits) + logits = logit_softcapping * logits + else: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping + pass pass pass @@ -978,6 +1024,7 @@ def PeftModelForCausalLM_fast_forward( output_hidden_states=None, return_dict=None, task_ids=None, + num_logits_to_keep=0, **kwargs, ): return self.base_model( @@ -989,6 +1036,7 @@ def PeftModelForCausalLM_fast_forward( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, + num_logits_to_keep=num_logits_to_keep, **kwargs, ) pass @@ -2181,6 +2229,7 @@ def patch_peft_model( elif model_type == "qwen2": apply_lora_mlp = apply_lora_mlp_swiglu elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx elif model_type == "gemma2": apply_lora_mlp = apply_lora_mlp_geglu_approx + elif model_type == "cohere": apply_lora_mlp = apply_lora_mlp_swiglu else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") pass @@ -2240,6 +2289,14 @@ def patch_peft_model( lora_dropout = model.peft_config[active_adapter].lora_dropout bias = model.peft_config[active_adapter].bias + # We also do not inplace edit QKV for Cohere! + from functools import partial + _apply_lora_mlp = \ + partial(apply_lora_mlp, inplace = False) \ + if model_type == "cohere" else \ + apply_lora_mlp + pass + if lora_dropout == 0 and bias == "none": for idx, layer in enumerate(model.model.model.layers): @@ -2259,7 +2316,7 @@ def patch_peft_model( (len(getattr(down_proj, "lora_magnitude_vector", []) or []) == 0): # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module - layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) + layer.mlp.forward = types.MethodType(_apply_lora_mlp, layer.mlp) n_mlp += 1 else: logger.warning_once( diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index e1f17aca04..13710eeda1 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -13,9 +13,10 @@ # limitations under the License. from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING -from .llama import FastLlamaModel, logger +from .llama import FastLlamaModel, logger from .mistral import FastMistralModel -from .qwen2 import FastQwen2Model +from .qwen2 import FastQwen2Model +from .cohere import FastCohereModel from transformers import AutoConfig from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel @@ -278,6 +279,8 @@ def from_pretrained( dispatch_model = FastGemma2Model elif model_type == "qwen2": dispatch_model = FastQwen2Model + elif model_type == "cohere": + dispatch_model = FastCohereModel else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 3f49c96551..bff7f0253f 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -227,6 +227,7 @@ "meta-llama/Meta-Llama-3.1-8B-Instruct", ), "unsloth/Meta-Llama-3.1-70B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B", "meta-llama/Meta-Llama-3.1-70B", ), "unsloth/Meta-Llama-3.1-405B-bnb-4bit" : ( @@ -236,6 +237,7 @@ "meta-llama/Meta-Llama-3.1-405B-Instruct", ), "unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", ), "unsloth/Mistral-Large-Instruct-2407-bnb-4bit" : ( @@ -253,6 +255,27 @@ "unsloth/Phi-3.5-mini-instruct", "microsoft/Phi-3.5-mini-instruct", ), + "unsloth/c4ai-command-r-08-2024-bnb-4bit" : ( + "CohereForAI/c4ai-command-r-08-2024", + ), + "unsloth/c4ai-command-r-plus-08-2024-bnb-4bit" : ( + "CohereForAI/c4ai-command-r-plus-08-2024", + ), + "unsloth/Llama-3.1-Storm-8B-bnb-4bit" : ( + "unsloth/Llama-3.1-Storm-8B", + "akjindal53244/Llama-3.1-Storm-8B", + ), + "unsloth/Hermes-3-Llama-3.1-8B-bnb-4bit" : ( + "unsloth/Hermes-3-Llama-3.1-8B", + "NousResearch/Hermes-3-Llama-3.1-8B", + ), + "unsloth/Hermes-3-Llama-3.1-70B-bnb-4bit" : ( + "unsloth/Hermes-3-Llama-3.1-70B", + "NousResearch/Hermes-3-Llama-3.1-70B", + ), + "unsloth/Hermes-3-Llama-3.1-405B-bnb-4bit" : ( + "NousResearch/Hermes-3-Llama-3.1-405B", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/trainer.py b/unsloth/trainer.py index c8e00be231..45616ca6be 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -14,8 +14,13 @@ from dataclasses import dataclass, field from typing import Optional -from transformers import TrainingArguments + from trl import SFTTrainer +try: + from trl import SFTConfig as TrainingArguments +except: + from transformers import TrainingArguments +pass from . import is_bfloat16_supported __all__ = [ From 480f2ef31c7c19e216bf21f57d08fa1895e5fc73 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Sep 2024 13:52:12 -0700 Subject: [PATCH 0415/1088] Gemma faster inference (#987) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update token retrieval logic (#952) * Fix DPO (#947) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update hf token retrieval logic --------- Co-authored-by: Daniel Han * Update llama.py * get_token * Update README.md * Update gemma2.py * Update rms_layernorm.py * synchronize * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * layernorm * Update rms_layernorm.py * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * revert * Gemma * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma2.py * Change UnslothTrainingArguments base class to SFTConfig (#979) * Cohere * Update trainer.py * Cohere * Cohere * New models * Update llama.py * Update llama.py * Update cohere.py * Update llama.py * Update cohere.py * retry * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * _apply_lora_mlp * Update _utils.py * Gemma fixes * Update llama.py * Update flex_attention.py --------- Co-authored-by: Hafedh <70411813+not-lain@users.noreply.github.com> Co-authored-by: Tuan Pham <82665400+vTuanpham@users.noreply.github.com> --- unsloth/kernels/__init__.py | 6 ++++- unsloth/kernels/flex_attention.py | 37 +++++++++++++++++++++++++++++++ unsloth/models/_utils.py | 4 ++++ unsloth/models/gemma2.py | 6 ++++- unsloth/models/llama.py | 11 +++++++++ 5 files changed, 62 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index c2de979a6f..26f632ee12 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -33,7 +33,11 @@ ) from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora -from .flex_attention import HAS_FLEX_ATTENTION, slow_attention_softcapping +from .flex_attention import ( + HAS_FLEX_ATTENTION, + slow_attention_softcapping, + slow_inference_attention_softcapping, +) if HAS_FLEX_ATTENTION: from .flex_attention import ( diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index a992a02382..9cf999e2b7 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -80,3 +80,40 @@ def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): return A pass + +torch_matmul = torch.matmul +torch_tanh = torch.tanh +torch_nn_functional_softmax = torch.nn.functional.softmax +def slow_inference_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): + n_heads = self.num_heads + head_dim = self.head_dim + n_kv_heads = self.num_key_value_heads + n_groups = self.num_key_value_groups + + # Grouped query attention + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + + # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below + # We default to using the config file itself + # s = self.config.hidden_size // self.config.num_attention_heads + s = self.config.query_pre_attn_scalar + t = self.config.attn_logit_softcapping + + Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly + A = torch_matmul(Q, K.transpose(2, 3)) + + # Logit softcapping + A /= t; torch_tanh(A, out = A); A *= t; + A += causal_mask[:q_len, :q_len] + # Much slower in torch compile! + # A.masked_fill_(causal_mask[:q_len, :q_len], -float("inf")) + A = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) + A = torch_matmul(A, V) + A = A.transpose(1, 2).contiguous() + A = A.reshape(bsz, q_len, n_heads*head_dim) + return A +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ea9a0c53db..242d234db8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -39,6 +39,8 @@ "create_boolean_mask", "torch_amp_custom_fwd", "torch_amp_custom_bwd", + "accelerate_old_send_to_device", + "accelerate_new_send_to_device", ] import torch @@ -287,6 +289,7 @@ def patch_mistral_nemo_config(config): import accelerate.utils.operations if hasattr(accelerate.utils.operations, "send_to_device") and \ accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": + accelerate_old_send_to_device = accelerate.utils.operations.send_to_device from accelerate.utils.operations import * send_to_device = inspect.getsource(accelerate.utils.operations.send_to_device) send_to_device = re.sub( @@ -296,6 +299,7 @@ def patch_mistral_nemo_config(config): ).replace("def send_to_device", "def _fixed_send_to_device") exec(send_to_device) # accelerate.utils.operations.send_to_device = _fixed_send_to_device + accelerate_new_send_to_device = _fixed_send_to_device pass pass # ============================================= diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 6858f52573..218849ef2f 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -157,7 +157,10 @@ def Gemma2Attention_fast_forward( A = A.reshape(bsz, q_len, n_heads*head_dim) else: mask = causal_mask if attention_mask is None else attention_mask - A = slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, kv_seq_len) + fx = slow_inference_attention_softcapping \ + if "_flag_for_generation" in kwargs else \ + slow_attention_softcapping + A = fx(Q, K, V, causal_mask, self, bsz, kv_seq_len) pass A = self.apply_o(self, A) return A, None, past_key_value @@ -192,6 +195,7 @@ def Gemma2DecoderLayer_fast_forward( output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, + _flag_for_generation=True, ) hidden_states = fast_rms_layernorm_inference_gemma(self.post_attention_layernorm, hidden_states, out_weight) hidden_states += residual diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5ccf906acb..3fcb8a76d2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -953,6 +953,8 @@ def _CausalLM_fast_forward( if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) + elif num_logits_to_keep != 0: + logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass @@ -1368,8 +1370,14 @@ def _fast_generate(*args, **kwargs): pass internal_model._flag_for_generation = True + # Must patch accelerate for Xformers + import accelerate.utils.operations + accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + # For newer HF kwargs["cache_implementation"] = "dynamic" + # For num_logits_to_keep + kwargs["num_logits_to_keep"] = 1 # Remove token_type_ids kwargs.pop("token_type_ids", None) @@ -1402,6 +1410,9 @@ def _fast_generate(*args, **kwargs): pass if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation + # Return accelerate back + accelerate.utils.operations.send_to_device = accelerate_old_send_to_device + return output pass return _fast_generate From 3682672d733ff6ef22d738cae35ae04d4ffd6c52 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Sep 2024 17:30:40 -0700 Subject: [PATCH 0416/1088] Fix bug --- unsloth/models/_utils.py | 2 ++ unsloth/models/llama.py | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 242d234db8..6dd17e739e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -285,6 +285,8 @@ def patch_mistral_nemo_config(config): # ============================================= # Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' +accelerate_old_send_to_device = None +accelerate_new_send_to_device = None if Version(xformers_version) >= Version("0.0.27"): import accelerate.utils.operations if hasattr(accelerate.utils.operations, "send_to_device") and \ diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3fcb8a76d2..39998127f0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1371,8 +1371,10 @@ def _fast_generate(*args, **kwargs): internal_model._flag_for_generation = True # Must patch accelerate for Xformers - import accelerate.utils.operations - accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + if accelerate_new_send_to_device is not None: + import accelerate.utils.operations + accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + pass # For newer HF kwargs["cache_implementation"] = "dynamic" @@ -1411,7 +1413,9 @@ def _fast_generate(*args, **kwargs): if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation # Return accelerate back - accelerate.utils.operations.send_to_device = accelerate_old_send_to_device + if accelerate_new_send_to_device is not None: + accelerate.utils.operations.send_to_device = accelerate_old_send_to_device + pass return output pass From d91d40a7b6b556f2d1fdd3e1e430f7a76a799627 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 4 Sep 2024 00:28:53 -0700 Subject: [PATCH 0417/1088] Bug fixes --- unsloth/kernels/utils.py | 335 ++++++++++++++++++++++++++------------ unsloth/models/mistral.py | 3 + 2 files changed, 231 insertions(+), 107 deletions(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 23be372217..0af65edeee 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -54,6 +54,9 @@ def calculate_settings(n): import bitsandbytes as bnb +# https://github.com/bitsandbytes-foundation/bitsandbytes/pull/1330/files +HAS_CUDA_STREAM = Version(bnb.__version__) > Version("0.43.3") +CUDA_STREAM = torch.cuda.current_stream("cuda:0") get_ptr = bnb.functional.get_ptr import ctypes cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 @@ -105,119 +108,237 @@ def get_lora_parameters_bias(proj): pass -def fast_dequantize(W, quant_state = None, out = None): - if quant_state is None: return W - if type(quant_state) is not list: - # New quant_state as a class - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - absmax = quant_state.absmax - shape = quant_state.shape - dtype = quant_state.dtype - blocksize = quant_state.blocksize - offset = quant_state.offset - state2 = quant_state.state2 - absmax2 = state2.absmax - code2 = state2.code - blocksize2 = state2.blocksize - else: - # Old quant_state as a list of lists - absmax, shape, dtype, blocksize, compressed_stats, _, _ = quant_state - offset, state2 = compressed_stats - absmax2, code2, blocksize2, _, _, _, _ = state2 +if HAS_CUDA_STREAM: + def fast_dequantize(W, quant_state = None, out = None): + if quant_state is None: return W + if type(quant_state) is not list: + # New quant_state as a class + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + # Old quant_state as a list of lists + absmax, shape, dtype, blocksize, compressed_stats, _, _ = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass + + # Create weight matrix + if out is None: + out = torch.empty(shape, dtype = dtype, device = "cuda:0") + else: + assert(out.shape == shape) + assert(out.dtype == dtype) + + # NF4 dequantization of statistics + n_elements_absmax = absmax.numel() + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") + + # Do dequantization + ptr_out_absmax = get_ptr(out_absmax) + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, + ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax), CUDA_STREAM, + ) + out_absmax += offset + + fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ + cdequantize_blockwise_bf16_nf4 + fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), + ctypes.c_int(blocksize), ctypes.c_int(out.numel()), CUDA_STREAM,) + + # Careful returning transposed data + is_transposed = (True if W.shape[0] == 1 else False) + return out.t() if is_transposed else out pass +else: + def fast_dequantize(W, quant_state = None, out = None): + if quant_state is None: return W + if type(quant_state) is not list: + # New quant_state as a class + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + # Old quant_state as a list of lists + absmax, shape, dtype, blocksize, compressed_stats, _, _ = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass - # Create weight matrix - if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda:0") - else: - assert(out.shape == shape) - assert(out.dtype == dtype) - - # NF4 dequantization of statistics - n_elements_absmax = absmax.numel() - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") - - # Do dequantization - ptr_out_absmax = get_ptr(out_absmax) - cdequantize_blockwise_fp32( - get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, - ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax) - ) - out_absmax += offset - - fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ - cdequantize_blockwise_bf16_nf4 - fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), - ctypes.c_int(blocksize), ctypes.c_int(out.numel())) - - # Careful returning transposed data - is_transposed = (True if W.shape[0] == 1 else False) - return out.t() if is_transposed else out + # Create weight matrix + if out is None: + out = torch.empty(shape, dtype = dtype, device = "cuda:0") + else: + assert(out.shape == shape) + assert(out.dtype == dtype) + + # NF4 dequantization of statistics + n_elements_absmax = absmax.numel() + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") + + # Do dequantization + ptr_out_absmax = get_ptr(out_absmax) + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, + ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax), + ) + out_absmax += offset + + fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ + cdequantize_blockwise_bf16_nf4 + fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), + ctypes.c_int(blocksize), ctypes.c_int(out.numel()),) + + # Careful returning transposed data + is_transposed = (True if W.shape[0] == 1 else False) + return out.t() if is_transposed else out + pass pass -def fast_gemv(X, W, quant_state, out = None): - if quant_state is None: return torch.matmul(X, W, out = out) - # For fast X @ W where seq_len == 1 - # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 - _, q_len, hd = X.shape - # assert(q_len == 1) - - if type(quant_state) is not list: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - absmax = quant_state.absmax - shape = quant_state.shape - dtype = quant_state.dtype - blocksize = quant_state.blocksize - stats = quant_state.code - offset = quant_state.offset - state2 = quant_state.state2 - absmax2 = state2.absmax - code2 = state2.code - blocksize2 = state2.blocksize - else: - absmax, shape, dtype, blocksize, compressed_stats, quant_type, stats = quant_state - offset, state2 = compressed_stats - absmax2, code2, blocksize2, _, _, _, _ = state2 +if HAS_CUDA_STREAM: + def fast_gemv(X, W, quant_state, out = None): + if quant_state is None: return torch.matmul(X, W, out = out) + # For fast X @ W where seq_len == 1 + # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 + _, q_len, hd = X.shape + # assert(q_len == 1) + + if type(quant_state) is not list: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + stats = quant_state.code + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + absmax, shape, dtype, blocksize, compressed_stats, quant_type, stats = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass + # assert(dtype == X.dtype) + bout = shape[0] + + if out is None: + out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") + # else: + # assert(out.shape == (1, 1, bout,)) + # pass + + n = 1 + m = shape[0] + k = shape[1] + lda = shape[0] + ldc = shape[0] + ldb = (hd+1)//2 + m = ctypes.c_int32(m) + n = ctypes.c_int32(n) + k = ctypes.c_int32(k) + lda = ctypes.c_int32(lda) + ldb = ctypes.c_int32(ldb) + ldc = ctypes.c_int32(ldc) + + df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), + ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), CUDA_STREAM, + ) + df += offset + absmax = df + + fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ + cgemm_4bit_inference_naive_bf16 + + blocksize = ctypes.c_int32(blocksize) + fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), + lda, ldb, ldc, blocksize, CUDA_STREAM,) + + return out + pass +else: + def fast_gemv(X, W, quant_state, out = None): + if quant_state is None: return torch.matmul(X, W, out = out) + # For fast X @ W where seq_len == 1 + # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 + _, q_len, hd = X.shape + # assert(q_len == 1) + + if type(quant_state) is not list: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + absmax = quant_state.absmax + shape = quant_state.shape + dtype = quant_state.dtype + blocksize = quant_state.blocksize + stats = quant_state.code + offset = quant_state.offset + state2 = quant_state.state2 + absmax2 = state2.absmax + code2 = state2.code + blocksize2 = state2.blocksize + else: + absmax, shape, dtype, blocksize, compressed_stats, quant_type, stats = quant_state + offset, state2 = compressed_stats + absmax2, code2, blocksize2, _, _, _, _ = state2 + pass + # assert(dtype == X.dtype) + bout = shape[0] + + if out is None: + out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") + # else: + # assert(out.shape == (1, 1, bout,)) + # pass + + n = 1 + m = shape[0] + k = shape[1] + lda = shape[0] + ldc = shape[0] + ldb = (hd+1)//2 + m = ctypes.c_int32(m) + n = ctypes.c_int32(n) + k = ctypes.c_int32(k) + lda = ctypes.c_int32(lda) + ldb = ctypes.c_int32(ldb) + ldc = ctypes.c_int32(ldc) + + df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), + ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), + ) + df += offset + absmax = df + + fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ + cgemm_4bit_inference_naive_bf16 + + blocksize = ctypes.c_int32(blocksize) + fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), + lda, ldb, ldc, blocksize,) + + return out pass - # assert(dtype == X.dtype) - bout = shape[0] - - if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") - # else: - # assert(out.shape == (1, 1, bout,)) - # pass - - n = 1 - m = shape[0] - k = shape[1] - lda = shape[0] - ldc = shape[0] - ldb = (hd+1)//2 - m = ctypes.c_int32(m) - n = ctypes.c_int32(n) - k = ctypes.c_int32(k) - lda = ctypes.c_int32(lda) - ldb = ctypes.c_int32(ldb) - ldc = ctypes.c_int32(ldc) - - df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") - cdequantize_blockwise_fp32( - get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), - ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), - ) - df += offset - absmax = df - - fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ - cgemm_4bit_inference_naive_bf16 - - blocksize = ctypes.c_int32(blocksize) - fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), - lda, ldb, ldc, blocksize) - - return out pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index ed6207bb06..15e9efc426 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -181,6 +181,7 @@ def MistralForCausalLM_fast_forward( output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, + num_logits_to_keep: Optional[int] = 0, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: @@ -236,6 +237,8 @@ def MistralForCausalLM_fast_forward( if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) + elif num_logits_to_keep != 0: + logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass From f549a5473c101bfcf279d28aa23152038b96fd22 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 8 Sep 2024 03:16:09 -0700 Subject: [PATCH 0418/1088] Bug fixes (#1004) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update token retrieval logic (#952) * Fix DPO (#947) * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * update hf token retrieval logic --------- Co-authored-by: Daniel Han * Update llama.py * get_token * Update README.md * Update gemma2.py * Update rms_layernorm.py * synchronize * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * layernorm * Update rms_layernorm.py * Update gemma2.py * Update rms_layernorm.py * Update rms_layernorm.py * revert * Gemma * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update gemma2.py * Change UnslothTrainingArguments base class to SFTConfig (#979) * Cohere * Update trainer.py * Cohere * Cohere * New models * Update llama.py * Update llama.py * Update cohere.py * Update llama.py * Update cohere.py * retry * Update fast_lora.py * Update llama.py * Update fast_lora.py * Update llama.py * Update llama.py * Update cross_entropy_loss.py * _apply_lora_mlp * Update _utils.py * Gemma fixes * Update llama.py * Update flex_attention.py * Update llama.py * layernorm * Update llama.py * Update llama.py * Flex Attention * Update gemma2.py * Update __init__.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update chat_templates.py (#999) fix all misspelled "unsued" to "unused" * Update key from "from" to "user" (#1000) When use [tokenizer.apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating), the key should be "role" rather than "from", this is liknk to [this issue](https://github.com/unslothai/unsloth/issues/994) I don't know it is suitable for all situation, I also can add a dedicated parameter of the key if you think it is better. * Update chat_templates.py * Also patch the KTO trainer (#1001) * flex attention * Update llama.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update gemma2.py * Update gemma2.py --------- Co-authored-by: Hafedh <70411813+not-lain@users.noreply.github.com> Co-authored-by: Tuan Pham <82665400+vTuanpham@users.noreply.github.com> Co-authored-by: Yihao Wang <42559837+AgainstEntropy@users.noreply.github.com> Co-authored-by: Peng Co-authored-by: Kyle Corbitt --- unsloth/chat_templates.py | 16 +-- unsloth/kernels/__init__.py | 8 +- unsloth/kernels/flex_attention.py | 157 +++++++++++++++++++++--------- unsloth/models/_utils.py | 5 +- unsloth/models/gemma2.py | 1 - unsloth/models/llama.py | 48 ++++----- unsloth/tokenizer_utils.py | 2 +- 7 files changed, 147 insertions(+), 90 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index d81413ae70..e19bea0771 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1033,7 +1033,7 @@ def to_sharegpt( merged_prompt = "", merged_column_name = "instruction", output_column_name = "output", - remove_unsued_columns = True, + remove_unused_columns = True, conversation_extension = 1, random_state = 3407, ): @@ -1047,7 +1047,7 @@ def to_sharegpt( merged_prompt = "", Prompt to merge columns into 1 input merged_column_name = "instruction", Final column name for the input field output_column_name = "output", Final column name for the output field - remove_unsued_columns = True, + remove_unused_columns = True, conversation_extension = 1, Automatically combines `conversation_extension` convos into 1 random_state = 3407, """ @@ -1068,8 +1068,8 @@ def __convert_to_sharegpt__(examples): assistants = examples[output_column_name] texts = [ [ - {"from" : "user", "content" : str(user) }, - {"from" : "assistant", "content" : str(assistant)}, + {"from" : "human", "value" : str(user) }, + {"from" : "gpt", "value" : str(assistant)}, ] \ for user, assistant in zip(users, assistants) ] @@ -1080,8 +1080,8 @@ def __convert_to_sharegpt__(examples): __convert_to_sharegpt__, batched = True, desc = "Converting to ShareGPT", - # Remove unsued columns! - remove_columns = dataset.column_names if remove_unsued_columns else None, + # Remove unused columns! + remove_columns = dataset.column_names if remove_unused_columns else None, ) # Randomnly concat conversations to create a long stream! @@ -1115,8 +1115,8 @@ def __convert_to_sharegpt__(examples): __combine_conversations__, batched = True, desc = "Extending conversations", - # Remove unsued columns! - remove_columns = dataset.column_names if remove_unsued_columns else None, + # Remove unused columns! + remove_columns = dataset.column_names if remove_unused_columns else None, ) return dataset pass diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 26f632ee12..cd1d90f262 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -37,14 +37,10 @@ HAS_FLEX_ATTENTION, slow_attention_softcapping, slow_inference_attention_softcapping, + create_flex_attention_causal_mask, + create_flex_attention_sliding_window_mask, ) -if HAS_FLEX_ATTENTION: - from .flex_attention import ( - FLEX_ATTENTION_PADDING, - ) -pass - try: print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") except: diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 9cf999e2b7..2fba359b77 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -25,59 +25,120 @@ } # Flex Attention supported from torch 2.5 onwards only -import torch.nn -if hasattr(torch.nn, "attention"): - import torch.nn.attention - if hasattr(torch.nn.attention, "flex_attention"): - import torch.nn.attention.flex_attention - from torch.nn.attention.flex_attention import flex_attention - from torch.nn.attention.flex_attention import create_block_mask - FLEX_ATTENTION_PADDING = getattr( - torch.nn.attention.flex_attention, - "_DEFAULT_SPARSE_BLOCK_SIZE", - 1, - ) - flex_attention = torch.compile(flex_attention, dynamic = False) - HAS_FLEX_ATTENTION = True - else: - HAS_FLEX_ATTENTION = False - pass -else: +try: + from torch.nn.attention.flex_attention import ( + flex_attention as _flex_attention, + create_block_mask as _create_block_mask, + ) + _flex_attention = torch.compile(_flex_attention, dynamic = True, options = torch_compile_options) + HAS_FLEX_ATTENTION = True +except: HAS_FLEX_ATTENTION = False pass -# Logit softcapping -@torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) -def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): - n_heads = self.num_heads - head_dim = self.head_dim - n_kv_heads = self.num_key_value_heads - n_groups = self.num_key_value_groups - - # Grouped query attention - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - K = K.reshape(bsz, n_heads, q_len, head_dim) - V = V.reshape(bsz, n_heads, q_len, head_dim) - # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e - # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below - # We default to using the config file itself - # s = self.config.hidden_size // self.config.num_attention_heads - s = self.config.query_pre_attn_scalar - t = self.config.attn_logit_softcapping +if not HAS_FLEX_ATTENTION: - Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly - A = torch.matmul(Q, K.transpose(2, 3)) - A = t * torch.tanh(A / t) # Logit softcapping - A += causal_mask[:q_len, :q_len] - # Much slower in torch compile! - # A.masked_fill_(causal_mask[:q_len, :q_len], -float("inf")) - A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) - A = torch.matmul(A, V) - A = A.transpose(1, 2).contiguous() - A = A.reshape(bsz, q_len, n_heads*head_dim) - return A + # Logit softcapping + @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) + def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): + n_heads = self.num_heads + head_dim = self.head_dim + n_kv_heads = self.num_key_value_heads + n_groups = self.num_key_value_groups + + # Grouped query attention + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + + # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e + # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below + # We default to using the config file itself + # s = self.config.hidden_size // self.config.num_attention_heads + s = self.config.query_pre_attn_scalar + t = self.config.attn_logit_softcapping + + Q = Q * torch.tensor(s**-0.5, dtype = Q.dtype) # Follow Keras exactly + A = torch.matmul(Q, K.transpose(2, 3)) + A = t * torch.tanh(A / t) # Logit softcapping + A += causal_mask[:q_len, :q_len] + # Much slower in torch compile! + # A.masked_fill_(causal_mask[:q_len, :q_len], -float("inf")) + A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(Q.dtype) + A = torch.matmul(A, V) + A = A.transpose(1, 2).contiguous() + A = A.reshape(bsz, q_len, n_heads*head_dim) + return A + pass + + create_flex_attention_causal_mask = None + create_flex_attention_sliding_window_mask = None +else: + # See https://github.com/pytorch-labs/attention-gym/blob/main/examples/flex_attn.ipynb + # for more examples + # BSD 3-Clause License Copyright (c) 2023, Driss Guessous, Horace He et al + import functools, math + + def generate_tanh_softcap(t): + def tanh_softcap(x, b, h, q_idx, kv_idx): + return t * torch.tanh(x / t) + return tanh_softcap + pass + def causal_masker(b, h, q_idx, kv_idx): + return q_idx >= kv_idx + pass + + @functools.lru_cache + def sliding_window_masker(size = 4096): + def sliding_window(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + window_mask = q_idx - kv_idx <= size + return causal_mask & window_mask + return sliding_window + pass + + @functools.lru_cache + def create_block_mask(mask, n = 128): + return _create_block_mask( + mask, 1, 1, n, n, + BLOCK_SIZE = 128, + _compile = True, + ) + pass + + def create_flex_attention_causal_mask(max_seq_length = 8192): + causal_mask = create_block_mask(causal_masker, max_seq_length) + return causal_mask + pass + + def create_flex_attention_sliding_window_mask(max_seq_length = 8192, sliding_window = 4096): + sliding_masker = sliding_window_masker(sliding_window) + causal_mask = create_block_mask(sliding_masker, max_seq_length) + return causal_mask + pass + + @functools.lru_cache + def flex_attention(s, t): + scale = 1.0 / math.sqrt(s) + score_mod = generate_tanh_softcap(t) + return functools.partial( + _flex_attention, score_mod = score_mod, scale = scale, enable_gqa = True, + ) + pass + + def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): + n_heads = self.num_heads + head_dim = self.head_dim + s = self.config.query_pre_attn_scalar + t = self.config.attn_logit_softcapping + fx = flex_attention(s, t) + A = fx(query = Q, key = K, value = V, block_mask = causal_mask) + A = A.transpose(1, 2).contiguous() + A = A.reshape(bsz, q_len, n_heads*head_dim) + return A + pass pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6dd17e739e..b5a57a777c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -330,7 +330,7 @@ def is_big_gpu(index): "config.coordinate_descent_tuning = True", "config.max_autotune_gemm = False", # GEMM is unnecessary "config.autotune_multi_device = False", - "config.max_autotune_gemm_backends = 'ATEN'", # Not much faster + "config.max_autotune_gemm_backends = 'TRITON,ATEN,CPP'", # Not much faster "config.aggressive_fusion = False", # Careful changes results! "config.cuda.enable_cuda_lto = True", "config.cuda.use_fast_math = True", @@ -338,9 +338,10 @@ def is_big_gpu(index): ] # Torch dynamo arguments torch_dynamo_arguments = [ - "config.accumulated_cache_size_limit = 512", # Bump up a bit from 256 + "config.accumulated_cache_size_limit = 1024", # Bump up a bit from 256 "config.suppress_errors = True", # Supress errors for now "config.do_not_emit_runtime_asserts = True", + "config.cache_size_limit = 1024", # Flex Attention ] import torch._inductor.config as config for _try_compile_argument in torch_compile_arguments: diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 218849ef2f..bf40ea8a27 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -156,7 +156,6 @@ def Gemma2Attention_fast_forward( ) A = A.reshape(bsz, q_len, n_heads*head_dim) else: - mask = causal_mask if attention_mask is None else attention_mask fx = slow_inference_attention_softcapping \ if "_flag_for_generation" in kwargs else \ slow_attention_softcapping diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 39998127f0..22dcb25a2f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -711,12 +711,6 @@ def LlamaModel_fast_forward( offloaded_gradient_checkpointing = True pass - # Check for Flex Attention - # if IS_GEMMA2 and HAS_FLEX_ATTENTION: - # if not (seq_length % FLEX_ATTENTION_PADDING == 0): - # USE_FLEX_ATTENTION = True - - # Gemma2 has alternating SWA and global attn if IS_GEMMA2: if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: @@ -738,23 +732,29 @@ def LlamaModel_fast_forward( sliding_window = None, ) elif not hasattr(self, "SWA_mask"): - n = self.max_seq_length # self.config.max_position_embeddings - # masked_fill is making stuff slower! - # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) - # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) - from transformers.modeling_attn_mask_utils import AttentionMaskConverter - self.SWA_mask = AttentionMaskConverter( - is_causal = True, - sliding_window = self.config.sliding_window, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) - - self.GA_mask = AttentionMaskConverter( - is_causal = True, - )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ - .squeeze(0).squeeze(0) + if HAS_FLEX_ATTENTION: + # Use Flex Attention instead! + self.SWA_mask = create_flex_attention_sliding_window_mask(self.max_seq_length, self.config.sliding_window) + self.GA_mask = create_flex_attention_causal_mask(self.max_seq_length) + else: + n = self.max_seq_length # self.config.max_position_embeddings + # masked_fill is making stuff slower! + # self. GA_mask = create_boolean_mask(n = n, sliding_window = 0) + # self.SWA_mask = create_boolean_mask(n = n, sliding_window = self.config.sliding_window) + from transformers.modeling_attn_mask_utils import AttentionMaskConverter + self.SWA_mask = AttentionMaskConverter( + is_causal = True, + sliding_window = self.config.sliding_window, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + + self.GA_mask = AttentionMaskConverter( + is_causal = True, + )\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .squeeze(0).squeeze(0) + pass pass pass @@ -821,7 +821,7 @@ def custom_forward(*inputs): (fast_rms_layernorm_inference_gemma if IS_GEMMA else fast_rms_layernorm_inference)\ (self.norm, hidden_states) elif IS_COHERE: - hidden_states = fast_layernorm_compiled(self.norm, hidden_states) + hidden_states = self.norm(hidden_states) else: hidden_states = fast_rms_layernorm(self.norm, hidden_states, gemma = IS_GEMMA) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 044629eaed..b8f710b2c5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1144,7 +1144,7 @@ def patch_sft_trainer_tokenizer(): # Patch train with fix_untrained_tokens for path_to_trainer in \ - ("sft_trainer.SFTTrainer", "dpo_trainer.DPOTrainer",): + ("sft_trainer.SFTTrainer", "dpo_trainer.DPOTrainer", "kto_trainer.KTOTrainer"): function_name, replacer = "train", "if resume_from_checkpoint is False:" function = getsource(eval(f"trl.trainer.{path_to_trainer}.{function_name}")) From d674f1c852035ed118f05a0d7c0e7e57625835c8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 8 Sep 2024 12:29:31 -0700 Subject: [PATCH 0419/1088] Update README.md --- README.md | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 6cd1be1381..d9e0146f30 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | ------------------------------- | --------------------------------------- | | 📚 **Documentation & Wiki** | [Read Our Docs](https://docs.unsloth.ai) | |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| -| 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#installation-instructions)| +| 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#-installation-instructions)| | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) | 🌐 **Released Models** | [Unsloth Releases](https://huggingface.co/unsloth)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| @@ -100,7 +100,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 💾 Installation Instructions ### Conda Installation -`⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. We support `python=3.10,3.11,3.12`. +`⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. We support `python=3.10,3.11,3.12`. ```bash conda create --name unsloth_env \ python=3.11 \ @@ -127,15 +127,28 @@ pip install --no-deps trl peft accelerate bitsandbytes ### Pip Installation -`⚠️Do **NOT** use this if you have Conda.` Pip is a bit more complex since there are dependency issues. The pip command is different for `torch 2.2,2.3,2.4` and CUDA versions. +`⚠️Do **NOT** use this if you have Conda.` Pip is a bit more complex since there are dependency issues. The pip command is different for `torch 2.2,2.3,2.4,2.5` and CUDA versions. -In general, if you have `torch 2.4` and `CUDA 12.1`, use: +For other torch versions, we support `torch211`, `torch212`, `torch220`, `torch230`, `torch240` and for CUDA versions, we support `cu118` and `cu121`. For Ampere devices (A100, H100, RTX3090) and above, use `cu118-ampere` or `cu121-ampere`. + +For example, if you have `torch 2.4` and `CUDA 12.1`, use: ```bash pip install --upgrade pip pip install "unsloth[cu121-torch240] @ git+https://github.com/unslothai/unsloth.git" ``` -Or, run the below in a terminal to get the optional pip installation command: +And other examples: +```bash +pip install "unsloth[cu121-ampere-torch240] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-ampere-torch240] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-torch240] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118-torch240] @ git+https://github.com/unslothai/unsloth.git" + +pip install "unsloth[cu121-torch230] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" +``` + +Or, run the below in a terminal to get the **optimal** pip installation command: ```bash wget -qO- https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/_auto_install.py | python - ``` @@ -160,12 +173,12 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` -Afterwards, confirm if `nvcc` `xformers` and `bitsandbytes` have successfully installed - if not, install them individually first until they work, then install Unsloth. -```bash -nvcc -python -m xformers.info -python -m bitsandbytes -``` +For **advanced installation instructions** or if you see weird errors: + +1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` +2. Confirm if CUDA is installated correctly. Try `nvcc`. If that fails, you need to install `cudatoolkit` or CUDA drivers. +3. Install `xformers` manually. You can try installing `vllm` and seeing if `vllm` succeeds. Check if `xformers` succeeded with `python -m xformers.info` Go to https://github.com/facebookresearch/xformers. Another option is to install `flash-attn` for Ampere GPUs. +4. Finally, install `bitsandbytes` and check it with `python -m bitsandbytes` ## 📜 [Documentation](https://docs.unsloth.ai) - Go to our official [Documentation](https://docs.unsloth.ai) for saving to GGUF, checkpointing, evaluation and more! From 7476d4b5f68bb11f9f7841df4c08baf7a9e8f632 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 8 Sep 2024 14:30:54 -0700 Subject: [PATCH 0420/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d9e0146f30..0c1c7f840e 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,7 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` -For **advanced installation instructions** or if you see weird errors: +For **advanced installation instructions** or if you see weird errors during installations: 1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` 2. Confirm if CUDA is installated correctly. Try `nvcc`. If that fails, you need to install `cudatoolkit` or CUDA drivers. From de43b9cedc6bd807babe3d4639c8214bec381cc3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 8 Sep 2024 15:51:27 -0700 Subject: [PATCH 0421/1088] Update __init__.py --- unsloth/__init__.py | 93 +++++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 45 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index f6ed999530..e7db41ce2c 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -96,59 +96,62 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Try loading bitsandbytes and triton import bitsandbytes as bnb -import triton -libcuda_dirs = lambda: None -if Version(triton.__version__) >= Version("3.0.0"): - try: from triton.backends.nvidia.driver import libcuda_dirs - except: pass -else: from triton.common.build import libcuda_dirs +if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ: -try: - cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 - libcuda_dirs() -except: - warnings.warn( - "Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ - ) - - if os.path.exists("/usr/lib64-nvidia"): - os.system("ldconfig /usr/lib64-nvidia") - elif os.path.exists("/usr/local"): - # Sometimes bitsandbytes cannot be linked properly in Runpod for example - possible_cudas = subprocess.check_output(["ls", "-al", "/usr/local"]).decode("utf-8").split("\n") - find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$") - possible_cudas = [find_cuda.search(x) for x in possible_cudas] - possible_cudas = [x.group(1) for x in possible_cudas if x is not None] - - # Try linking cuda folder, or everything in local - if len(possible_cudas) == 0: - os.system(f"ldconfig /usr/local/") - else: - find_number = re.compile(r"([\d\.]{2,})") - latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] - latest_cuda = possible_cudas[latest_cuda] - os.system(f"ldconfig /usr/local/{latest_cuda}") - pass + import triton + libcuda_dirs = lambda: None + if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass + else: from triton.common.build import libcuda_dirs - importlib.reload(bnb) - importlib.reload(triton) try: - libcuda_dirs = lambda: None - if Version(triton.__version__) >= Version("3.0.0"): - try: from triton.backends.nvidia.driver import libcuda_dirs - except: pass - else: from triton.common.build import libcuda_dirs cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() except: warnings.warn( - "Unsloth: CUDA is not linked properly.\n"\ - "Try running `python -m bitsandbytes` then `python -m xformers.info`\n"\ - "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ - "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ - "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\ - "Unsloth will still run for now, but maybe it might crash - let's hope it works!" + "Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ ) + + if os.path.exists("/usr/lib64-nvidia"): + os.system("ldconfig /usr/lib64-nvidia") + elif os.path.exists("/usr/local"): + # Sometimes bitsandbytes cannot be linked properly in Runpod for example + possible_cudas = subprocess.check_output(["ls", "-al", "/usr/local"]).decode("utf-8").split("\n") + find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$") + possible_cudas = [find_cuda.search(x) for x in possible_cudas] + possible_cudas = [x.group(1) for x in possible_cudas if x is not None] + + # Try linking cuda folder, or everything in local + if len(possible_cudas) == 0: + os.system(f"ldconfig /usr/local/") + else: + find_number = re.compile(r"([\d\.]{2,})") + latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] + latest_cuda = possible_cudas[latest_cuda] + os.system(f"ldconfig /usr/local/{latest_cuda}") + pass + + importlib.reload(bnb) + importlib.reload(triton) + try: + libcuda_dirs = lambda: None + if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass + else: from triton.common.build import libcuda_dirs + cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 + libcuda_dirs() + except: + warnings.warn( + "Unsloth: CUDA is not linked properly.\n"\ + "Try running `python -m bitsandbytes` then `python -m xformers.info`\n"\ + "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ + "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ + "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\ + "Unsloth will still run for now, but maybe it might crash - let's hope it works!" + ) + pass pass from .models import * From 6c534341bb229b136f9504443f0161645d2070c5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 8 Sep 2024 19:47:21 -0700 Subject: [PATCH 0422/1088] Update utils.py --- unsloth/kernels/utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 0af65edeee..a8c20c75a4 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -56,7 +56,8 @@ def calculate_settings(n): import bitsandbytes as bnb # https://github.com/bitsandbytes-foundation/bitsandbytes/pull/1330/files HAS_CUDA_STREAM = Version(bnb.__version__) > Version("0.43.3") -CUDA_STREAM = torch.cuda.current_stream("cuda:0") +global CUDA_STREAM +CUDA_STREAM = None get_ptr = bnb.functional.get_ptr import ctypes cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 @@ -129,6 +130,8 @@ def fast_dequantize(W, quant_state = None, out = None): offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass + global CUDA_STREAM + if CUDA_STREAM is None: CUDA_STREAM = torch.cuda.current_stream("cuda:0") # Create weight matrix if out is None: @@ -236,6 +239,9 @@ def fast_gemv(X, W, quant_state, out = None): offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass + global CUDA_STREAM + if CUDA_STREAM is None: CUDA_STREAM = torch.cuda.current_stream("cuda:0") + # assert(dtype == X.dtype) bout = shape[0] From 575c1bd67d973d2d1d3010a782b7d4d2cc101b8b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 15 Sep 2024 17:42:09 -0700 Subject: [PATCH 0423/1088] Update README.md (#1033) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0c1c7f840e..e46396261f 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,8 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 💾 Installation Instructions +For stable releases, use `pip install unsloth`. We recommend `pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"` for most installations though. + ### Conda Installation `⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. We support `python=3.10,3.11,3.12`. ```bash From 572c925fa7ea80b23776d8a5f94e49f8cd927664 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 15 Sep 2024 18:04:18 -0700 Subject: [PATCH 0424/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b5a57a777c..1112348b33 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.8" +__version__ = "2024.9" __all__ = [ "prepare_model_for_kbit_training", From 62c989ef0ae0e9fbac714a4cb21eda76c1fe84b6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 15 Sep 2024 21:50:00 -0700 Subject: [PATCH 0425/1088] Update mapper.py --- unsloth/models/mapper.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index bff7f0253f..08a9ef2097 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -276,6 +276,30 @@ "unsloth/Hermes-3-Llama-3.1-405B-bnb-4bit" : ( "NousResearch/Hermes-3-Llama-3.1-405B", ), + "unsloth/SmolLM-135M-bnb-4bit" : ( + "unsloth/SmolLM-135M", + "HuggingFaceTB/SmolLM-135M", + ), + "unsloth/SmolLM-360M-bnb-4bit" : ( + "unsloth/SmolLM-360M", + "HuggingFaceTB/SmolLM-360M", + ), + "unsloth/SmolLM-1.7B-bnb-4bit" : ( + "unsloth/SmolLM-1.7B", + "HuggingFaceTB/SmolLM-1.7B", + ), + "unsloth/SmolLM-135M-Instruct-bnb-4bit" : ( + "unsloth/SmolLM-135M-Instruct", + "HuggingFaceTB/SmolLM-135M-Instruct", + ), + "unsloth/SmolLM-360M-Instruct-bnb-4bit" : ( + "unsloth/SmolLM-360M-Instruct", + "HuggingFaceTB/SmolLM-360M-Instruct", + ), + "unsloth/SmolLM-1.7B-Instruct-bnb-4bit" : ( + "unsloth/SmolLM-1.7B-Instruct", + "HuggingFaceTB/SmolLM-1.7B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From f1951c0f6d3e1f184af93e5d8f5eff6e7834e4b5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 17 Sep 2024 10:50:57 -0700 Subject: [PATCH 0426/1088] Update mapper.py --- unsloth/models/mapper.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 08a9ef2097..615480c678 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -300,6 +300,10 @@ "unsloth/SmolLM-1.7B-Instruct", "HuggingFaceTB/SmolLM-1.7B-Instruct", ), + "unsloth/Mistral-Small-Instruct-2409-bnb-4bit" : ( + "unsloth/Mistral-Small-Instruct-2409", + "mistralai/Mistral-Small-Instruct-2409", + ), } INT_TO_FLOAT_MAPPER = {} From c730659de7a0a9e0520d01184d5dad2503b52285 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 17 Sep 2024 17:38:12 -0700 Subject: [PATCH 0427/1088] Update llama.py --- unsloth/models/llama.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 22dcb25a2f..a245330108 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -958,7 +958,13 @@ def _CausalLM_fast_forward( else: logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass - logits = logits.to(self.config.torch_dtype) + + torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) + if torch_dtype is not None: + logits = logits.to(torch_dtype) + else: + raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") + pass loss = None logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) From 8aceff3e7b7510250e88d5109ad947932b6898c2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 18 Sep 2024 13:23:45 -0700 Subject: [PATCH 0428/1088] Update README.md (#1036) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e46396261f..e24c36d8d1 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Mistral Small 22b](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Llama 3.1 Conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. From 3fddfd5166d5fd160eca5ae1000e8a5c8a2dc465 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 18 Sep 2024 14:23:22 -0700 Subject: [PATCH 0429/1088] Update mapper.py --- unsloth/models/mapper.py | 56 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 615480c678..8981819853 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -304,6 +304,62 @@ "unsloth/Mistral-Small-Instruct-2409", "mistralai/Mistral-Small-Instruct-2409", ), + "unsloth/Qwen2.5-0.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-0.5B-Instruct", + "Qwen/Qwen2.5-0.5B-Instruct", + ), + "unsloth/Qwen2.5-1.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-1.5B-Instruct", + "Qwen/Qwen2.5-1.5B-Instruct", + ), + "unsloth/Qwen2.5-3B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-3B-Instruct", + "Qwen/Qwen2.5-3B-Instruct", + ), + "unsloth/Qwen2.5-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-7B-Instruct", + "Qwen/Qwen2.5-7B-Instruct", + ), + "unsloth/Qwen2.5-14B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-14B-Instruct", + "Qwen/Qwen2.5-14B-Instruct", + ), + "unsloth/Qwen2.5-32B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-32B-Instruct", + "Qwen/Qwen2.5-32B-Instruct", + ), + "unsloth/Qwen2.5-72B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-72B-Instruct", + "Qwen/Qwen2.5-72B-Instruct", + ), + "unsloth/Qwen2.5-0.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-0.5B", + "Qwen/Qwen2.5-0.5B", + ), + "unsloth/Qwen2.5-1.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-1.5B", + "Qwen/Qwen2.5-1.5B", + ), + "unsloth/Qwen2.5-3B-bnb-4bit" : ( + "unsloth/Qwen2.5-3B", + "Qwen/Qwen2.5-3B", + ), + "unsloth/Qwen2.5-7B-bnb-4bit" : ( + "unsloth/Qwen2.5-7B", + "Qwen/Qwen2.5-7B", + ), + "unsloth/Qwen2.5-14B-bnb-4bit" : ( + "unsloth/Qwen2.5-14B", + "Qwen/Qwen2.5-14B", + ), + "unsloth/Qwen2.5-32B-bnb-4bit" : ( + "unsloth/Qwen2.5-32B", + "Qwen/Qwen2.5-32B", + ), + "unsloth/Qwen2.5-72B-bnb-4bit" : ( + "unsloth/Qwen2.5-72B", + "Qwen/Qwen2.5-72B", + ), } INT_TO_FLOAT_MAPPER = {} From c247bfc72ce91b8150de5f7f0604c94fde147699 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 21 Sep 2024 01:56:17 -0700 Subject: [PATCH 0430/1088] Update chat_templates.py --- unsloth/chat_templates.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index e19bea0771..1b59e379a7 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1841,7 +1841,11 @@ def _train_on_responses_only(examples): pass return { "labels" : all_labels } pass - trainer.train_dataset = trainer.train_dataset.map(_train_on_responses_only, batched = True) + + if hasattr(trainer, "train_dataset"): + trainer.train_dataset = trainer.train_dataset.map(_train_on_responses_only, batched = True) + if hasattr(trainer, "eval_dataset"): + trainer.eval_dataset = trainer.eval_dataset .map(_train_on_responses_only, batched = True) return trainer pass From 02cc97e38870ed632961370d6a769e05d6941ed2 Mon Sep 17 00:00:00 2001 From: Nazim Ali Date: Sun, 22 Sep 2024 04:18:37 -0400 Subject: [PATCH 0431/1088] fix: chat_templates.py bug (#1048) * fix: chat_template bug * fix: check trainer attribute values are not None --- unsloth/chat_templates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 1b59e379a7..70599b8b41 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1842,10 +1842,10 @@ def _train_on_responses_only(examples): return { "labels" : all_labels } pass - if hasattr(trainer, "train_dataset"): + if hasattr(trainer, "train_dataset") and trainer.train_dataset is not None: trainer.train_dataset = trainer.train_dataset.map(_train_on_responses_only, batched = True) - if hasattr(trainer, "eval_dataset"): - trainer.eval_dataset = trainer.eval_dataset .map(_train_on_responses_only, batched = True) + if hasattr(trainer, "eval_dataset") and trainer.eval_dataset is not None: + trainer.eval_dataset = trainer.eval_dataset.map(_train_on_responses_only, batched = True) return trainer pass From e5381b6a263dce315ed4d44da1dd4f188d6800a3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 22 Sep 2024 02:38:28 -0700 Subject: [PATCH 0432/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1112348b33..3e5efde40e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9" +__version__ = "2024.9.post1" __all__ = [ "prepare_model_for_kbit_training", From 26a0f945d606e7de7b34f6cd59601fe90e76b39d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 22 Sep 2024 22:13:59 -0700 Subject: [PATCH 0433/1088] Update mapper.py --- unsloth/models/mapper.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 8981819853..50436a7a4f 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -360,6 +360,46 @@ "unsloth/Qwen2.5-72B", "Qwen/Qwen2.5-72B", ), + "unsloth/Qwen2.5-Math-1.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-1.5B", + "Qwen/Qwen2.5-Math-1.5B", + ), + "unsloth/Qwen2.5-Math-7B-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-7B", + "Qwen/Qwen2.5-Math-7B", + ), + "unsloth/Qwen2.5-Math-72B-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-72B", + "Qwen/Qwen2.5-Math-72B", + ), + "unsloth/Qwen2.5-Math-1.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-1.5B-Instruct", + "Qwen/Qwen2.5-Math-1.5B-Instruct", + ), + "unsloth/Qwen2.5-Math-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-7B-Instruct", + "Qwen/Qwen2.5-Math-7B-Instruct", + ), + "unsloth/Qwen2.5-Math-72B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Math-72B-Instruct", + "Qwen/Qwen2.5-Math-72B-Instruct", + ), + "unsloth/Qwen2.5-Coder-1.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-1.5B", + "Qwen/Qwen2.5-Coder-1.5B", + ), + "unsloth/Qwen2.5-Coder-7B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-7B", + "Qwen/Qwen2.5-Coder-7B", + ), + "unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-Instruct-1.5B", + "Qwen/Qwen2.5-Coder-Instruct-1.5B", + ), + "unsloth/Qwen2.5-Coder-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-7B-Instruct", + "Qwen/Qwen2.5-Coder-7B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From c6bec501e4d79c8e13ef1f1277ffa2bea3380404 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 22 Sep 2024 22:48:35 -0700 Subject: [PATCH 0434/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index b8f710b2c5..89e62c717a 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -916,7 +916,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): if bad_not_trainable: raise ValueError( - 'Unsloth: Untrained tokens found, but embed_tokens & lm_head not trainable, causing NaNs. '\ + f'Unsloth: Untrained tokens for [{where_untrained_set}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ 'Restart then add `embed_tokens` & `lm_head` to '\ '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",]). `'\ 'Are you using the `base` model? Instead, use the `instruct` version to silence this warning.', From 80bba893415599e041d61eb99c42e850b4f7d56c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 22 Sep 2024 23:00:24 -0700 Subject: [PATCH 0435/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 89e62c717a..8ee7ea8b86 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -915,8 +915,34 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): if not lm_head_matrix .requires_grad: bad_not_trainable = True if bad_not_trainable: + + final_bad_items = [] + + # Re-check the first 250, last 250 input_ids + size_dataset = len(train_dataset) + size = min(size_dataset, 250) + for j in range(size): + input_ids = train_dataset[j] + if "input_ids" in input_ids: + input_ids = input_ids["input_ids"] + for item in input_ids: + if item in where_untrained_set: final_bad_items.append(item) + pass + pass + + # Re-check last 250 + left = max(size_dataset-250, 0) + for j in range(left, size_dataset): + input_ids = train_dataset[j] + if "input_ids" in input_ids: + input_ids = input_ids["input_ids"] + for item in input_ids: + if item in where_untrained_set: final_bad_items.append(item) + pass + pass + raise ValueError( - f'Unsloth: Untrained tokens for [{where_untrained_set}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ + f'Unsloth: Untrained tokens of [{list(final_bad_items)}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ 'Restart then add `embed_tokens` & `lm_head` to '\ '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",]). `'\ 'Are you using the `base` model? Instead, use the `instruct` version to silence this warning.', From e543614fe30c105d59ea70ee8b60ec96cd6b5390 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 00:04:33 -0700 Subject: [PATCH 0436/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 8ee7ea8b86..477d993e69 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -942,7 +942,7 @@ def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): pass raise ValueError( - f'Unsloth: Untrained tokens of [{list(final_bad_items)}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ + f'Unsloth: Untrained tokens of [{list(set(final_bad_items))}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ 'Restart then add `embed_tokens` & `lm_head` to '\ '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",]). `'\ 'Are you using the `base` model? Instead, use the `instruct` version to silence this warning.', From bf493d5b2da0997043bcdad7e4b096c031e03508 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 00:29:21 -0700 Subject: [PATCH 0437/1088] Update chat_templates.py --- unsloth/chat_templates.py | 124 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 70599b8b41..63079eb94c 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -705,6 +705,130 @@ pass +# =========================================== Qwen 2.5 +qwen25_template = \ +"""{%- if tools %} + {{- '<|im_start|>system\n' }} + {%- if messages[0]['role'] == 'system' %} + {{- messages[0]['content'] }} + {%- else %} + {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }} + {%- endif %} + {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {%- for tool in tools %} + {{- "\n" }} + {{- tool | tojson }} + {%- endfor %} + {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} +{%- else %} + {%- if messages[0]['role'] == 'system' %} + {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }} + {%- else %} + {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }} + {%- endif %} +{%- endif %} +{%- for message in messages %} + {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} + {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} + {%- elif message.role == "assistant" %} + {{- '<|im_start|>' + message.role }} + {%- if message.content %} + {{- '\n' + message.content }} + {%- endif %} + {%- for tool_call in message.tool_calls %} + {%- if tool_call.function is defined %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '\n\n{"name": "' }} + {{- tool_call.name }} + {{- '", "arguments": ' }} + {{- tool_call.arguments | tojson }} + {{- '}\n' }} + {%- endfor %} + {{- '<|im_end|>\n' }} + {%- elif message.role == "tool" %} + {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} + {{- '<|im_start|>user' }} + {%- endif %} + {{- '\n\n' }} + {{- message.content }} + {{- '\n' }} + {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} + {{- '<|im_end|>\n' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} +{%- endif %}""" + + +# Ollama from https://ollama.com/library/qwen2.5/blobs/eb4402837c78 +qwen25_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{- if .Messages }} +{{- if or .System .Tools }}<|im_start|>system +{{- if .System }} +{{ .System }} +{{- end }} +{{- if .Tools }} + +# Tools + +You may call one or more functions to assist with the user query. + +You are provided with function signatures within XML tags: + +{{- range .Tools }} +{"type": "function", "function": {{ .Function }}} +{{- end }} + + +For each function call, return a json object with function name and arguments within XML tags: + +{"name": , "arguments": } + +{{- end }}<|im_end|> +{{ end }} +{{- range $i, $_ := .Messages }} +{{- $last := eq (len (slice $.Messages $i)) 1 -}} +{{- if eq .Role "user" }}<|im_start|>user +{{ .Content }}<|im_end|> +{{ else if eq .Role "assistant" }}<|im_start|>assistant +{{ if .Content }}{{ .Content }} +{{- else if .ToolCalls }} +{{ range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}} +{{ end }} +{{- end }}{{ if not $last }}<|im_end|> +{{ end }} +{{- else if eq .Role "tool" }}<|im_start|>user + +{{ .Content }} +<|im_end|> +{{ end }} +{{- if and (ne .Role "assistant") $last }}<|im_start|>assistant +{{ end }} +{{- end }} +{{- else }} +{{- if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ end }}{{ .Response }}{{ if .Response }}<|im_end|>{{ end }}""" +PARAMETER stop "<|im_end|>" +PARAMETER stop "<|endoftext|>" +''' + +qwen25_template_eos_token = "eos_token" +CHAT_TEMPLATES["qwen-2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +CHAT_TEMPLATES["qwen-25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +CHAT_TEMPLATES["qwen25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +CHAT_TEMPLATES["qwen2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +pass + + def get_chat_template( tokenizer, chat_template = "chatml", From 4063c1360e413e70b95eb643f393b07f948b9f83 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 01:02:24 -0700 Subject: [PATCH 0438/1088] Upgrade Ollama presets --- unsloth/chat_templates.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 63079eb94c..e7d972f02f 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -71,6 +71,8 @@ {{ end }}>>> Assistant: {{ .Response }}{__EOS_TOKEN__} """ PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 SYSTEM """You are a helpful assistant to the user""" ''' @@ -106,6 +108,8 @@ {{ .Response }}{__EOS_TOKEN__} """ PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' zephyr_eos_token = "eos_token" @@ -141,6 +145,8 @@ """ PARAMETER stop "<|im_start|>" PARAMETER stop "<|im_end|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' chatml_eos_token = "<|im_end|>" @@ -179,6 +185,8 @@ FROM {__FILE_LOCATION__} TEMPLATE """[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]""" PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' mistral_eos_token = "eos_token" @@ -218,6 +226,8 @@ {{ .Prompt }} [/INST]""" PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' llama_eos_token = "eos_token" @@ -255,6 +265,8 @@ FROM {__FILE_LOCATION__} TEMPLATE """{{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} {__EOS_TOKEN__}""" PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' vicuna_eos_token = "eos_token" @@ -294,6 +306,8 @@ {{ end }}### Assistant: {{ .Response }}{__EOS_TOKEN__} """ PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 SYSTEM """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.""" ''' @@ -339,6 +353,8 @@ """ PARAMETER stop "{__EOS_TOKEN__}" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 SYSTEM """Below are some instructions that describe some tasks. Write responses that appropriately complete each request.""" ''' @@ -383,6 +399,8 @@ PARAMETER stop "" PARAMETER stop "" PARAMETER penalize_newline false +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' gemma_eos_token = "" @@ -408,6 +426,8 @@ PARAMETER stop "<|im_start|>" PARAMETER stop "<|im_end|>" PARAMETER penalize_newline false +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' gemma_chatml_eos_token = ( @@ -464,6 +484,8 @@ PARAMETER stop "<|start_header_id|>" PARAMETER stop "<|end_header_id|>" PARAMETER stop "<|eot_id|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' llama3_template_eos_token = "eos_token" @@ -502,6 +524,8 @@ PARAMETER stop "<|end|>" PARAMETER stop "<|user|>" PARAMETER stop "<|assistant|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' phi3_template_eos_token = "<|end|>" @@ -697,6 +721,8 @@ PARAMETER stop "<|end_header_id|>" PARAMETER stop "<|eot_id|>" PARAMETER stop "<|eom_id|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' llama31_template_eos_token = "eos_token" @@ -819,6 +845,8 @@ {{ end }}{{ .Response }}{{ if .Response }}<|im_end|>{{ end }}""" PARAMETER stop "<|im_end|>" PARAMETER stop "<|endoftext|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 ''' qwen25_template_eos_token = "eos_token" From bd2f0f549ebb06f8092636d92d65f4da4c0c9433 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 01:07:06 -0700 Subject: [PATCH 0439/1088] Update chat_templates.py --- unsloth/chat_templates.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index e7d972f02f..d500afb726 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -786,7 +786,8 @@ {%- endfor %} {%- if add_generation_prompt %} {{- '<|im_start|>assistant\n' }} -{%- endif %}""" +{%- endif %} +""" # Ollama from https://ollama.com/library/qwen2.5/blobs/eb4402837c78 From 1038c723bcaa8b03d15f189275a8363e4635dedc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 01:27:12 -0700 Subject: [PATCH 0440/1088] Qwen 2.5 --- README.md | 9 ++++--- unsloth/chat_templates.py | 54 +++++++++++++++++---------------------- 2 files changed, 29 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index e24c36d8d1..9ab2ed6e59 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | | **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | -| **Mistral Nemo (12B)** | [▶️ Start for free](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) | 2x faster | 60% less | +| **Mistral Small (22B)** | [▶️ Start for free](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) | 2x faster | 60% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | @@ -38,15 +38,16 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! [Mistral Small 22b](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! +- 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) +- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Llama 3.1 Conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported -- 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported
    Click for more news - + +- 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index d500afb726..937cbd06ea 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -734,58 +734,52 @@ # =========================================== Qwen 2.5 qwen25_template = \ """{%- if tools %} - {{- '<|im_start|>system\n' }} - {%- if messages[0]['role'] == 'system' %} - {{- messages[0]['content'] }} + {{- \'<|im_start|>system\\n\' }} + {%- if messages[0][\'role\'] == \'system\' %} + {{- messages[0][\'content\'] }} {%- else %} - {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }} + {{- \'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\' }} {%- endif %} - {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {{- "\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n" }} {%- for tool in tools %} - {{- "\n" }} + {{- "\\n" }} {{- tool | tojson }} {%- endfor %} - {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} -{%- else %} - {%- if messages[0]['role'] == 'system' %} - {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }} + {{- "\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\"name\\": , \\"arguments\\": }\\n<|im_end|>\\n" }}\n{%- else %} + {%- if messages[0][\'role\'] == \'system\' %} + {{- \'<|im_start|>system\\n\' + messages[0][\'content\'] + \'<|im_end|>\\n\' }} {%- else %} - {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }} - {%- endif %} -{%- endif %} -{%- for message in messages %} + {{- \'<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n\' }} + {%- endif %}\n{%- endif %}\n{%- for message in messages %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} - {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} + {{- \'<|im_start|>\' + message.role + \'\\n\' + message.content + \'<|im_end|>\' + \'\\n\' }} {%- elif message.role == "assistant" %} - {{- '<|im_start|>' + message.role }} + {{- \'<|im_start|>\' + message.role }} {%- if message.content %} - {{- '\n' + message.content }} + {{- \'\\n\' + message.content }} {%- endif %} {%- for tool_call in message.tool_calls %} {%- if tool_call.function is defined %} {%- set tool_call = tool_call.function %} {%- endif %} - {{- '\n\n{"name": "' }} + {{- \'\\n\\n{"name": "\' }} {{- tool_call.name }} - {{- '", "arguments": ' }} + {{- \'", "arguments": \' }} {{- tool_call.arguments | tojson }} - {{- '}\n' }} + {{- \'}\\n\' }} {%- endfor %} - {{- '<|im_end|>\n' }} + {{- \'<|im_end|>\\n\' }} {%- elif message.role == "tool" %} - {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} - {{- '<|im_start|>user' }} + {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} {{- \'<|im_start|>user\' }} {%- endif %} - {{- '\n\n' }} + {{- \'\\n\\n\' }} {{- message.content }} - {{- '\n' }} + {{- \'\\n\' }} {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} - {{- '<|im_end|>\n' }} + {{- \'<|im_end|>\\n\' }} {%- endif %} - {%- endif %} -{%- endfor %} -{%- if add_generation_prompt %} - {{- '<|im_start|>assistant\n' }} + {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %} + {{- \'<|im_start|>assistant\\n\' }} {%- endif %} """ From 3ad8309b00927c614cfa9832836bac91bacff5be Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 01:36:50 -0700 Subject: [PATCH 0441/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9ab2ed6e59..f8944c5a77 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) +- 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Llama 3.1 Conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported From fb77505f8429566f5d21d6ea5318c342e8a67991 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Sep 2024 10:56:55 -0700 Subject: [PATCH 0442/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3e5efde40e..f868c855bc 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post1" +__version__ = "2024.9.post2" __all__ = [ "prepare_model_for_kbit_training", From c4d303ee109e21216585b78d294b4f0b439c45af Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 02:49:48 -0700 Subject: [PATCH 0443/1088] Layernorm --- unsloth/kernels/__init__.py | 1 + unsloth/kernels/layernorm.py | 160 +++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 unsloth/kernels/layernorm.py diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index cd1d90f262..2fb9d11f99 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -14,6 +14,7 @@ from .cross_entropy_loss import fast_cross_entropy_loss from .rms_layernorm import fast_rms_layernorm +from .layernorm import fast_layernorm from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel from .geglu import ( diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py new file mode 100644 index 0000000000..c65b5412ff --- /dev/null +++ b/unsloth/kernels/layernorm.py @@ -0,0 +1,160 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.jit +def layernorm_forward( + Y, Y_row_stride, + X, X_row_stride, + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE : tl.constexpr +): + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + Y += row_idx * Y_row_stride + X += row_idx * X_row_stride + r += row_idx + mu += row_idx + + # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules + # are in float32! + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) + + mean_X = tl.sum(X_row, axis = 0) / n_cols + XX = X_row - mean_X + row_var = tl.sum(XX * XX, axis = 0) / n_cols + inv_var = tl.math.rsqrt(row_var + eps) + tl.store (r, inv_var) + tl.store (mu, mean_X) + output = (XX * inv_var) * W_row + b_row + tl.store(Y + col_offsets, output, mask = mask) +pass + + +@triton.jit +def layernorm_backward( + dY, dY_row_stride, + X, X_row_stride, + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE : tl.constexpr +): + # Approximately follows https://github.com/karpathy/llm.c/blob/master/doc/layernorm/layernorm.md + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + dY += row_idx * dY_row_stride + X += row_idx * X_row_stride + r += row_idx + mu += row_idx + + # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules + # are in float32! + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) + + inv_var = tl.load(r) .to(tl.float32) + mean = tl.load(mu).to(tl.float32) + normed = (X_row - mean) * inv_var + dY_W = dY_row * W_row + dX_row = dY_W - tl.sum(dY_W, axis = 0) / n_cols - normed * tl.sum(dY_W * normed, axis = 0) / n_cols + dX_row = dX_row * inv_var + tl.store(dY + col_offsets, dX_row, mask = mask) +pass + + +class Fast_Layernorm(torch.autograd.Function): + @staticmethod + def forward(ctx, X, W, b, eps): + shape = X.shape + dim = shape[-1] + X = X.view(-1, dim) + n_rows, n_cols = X.shape + BLOCK_SIZE, num_warps = calculate_settings(n_cols) + + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") + r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + mu = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + + layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + ctx.eps = eps + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps + ctx.save_for_backward(X, W, r, mu) + return Y.view(*shape) + pass + + @staticmethod + def backward(ctx, dY): + shape = dY.shape + dim = shape[-1] + dY = dY.view(-1, dim) + X, W, r, mu = ctx.saved_tensors + n_rows, n_cols = dY.shape + + layernorm_backward[(n_rows,)]( + dY, dY.stride(0), + X, X .stride(0), + W, + b, + r, + mu, + n_cols, ctx.eps, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) + dX = dY.view(*shape) + return dX, None, None, None, None + pass +pass + + +def fast_layernorm(layernorm, X): + W = layernorm.weight + bias = layernorm.bias + eps = layernorm.variance_epsilon if \ + hasattr(layernorm, "variance_epsilon") \ + else layernorm.eps + out = Fast_Layernorm.apply(X, W, bias, eps) + return out +pass From 93717b4b28f1025ff00b11253de4b15104d6c25b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 16:29:54 -0700 Subject: [PATCH 0444/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index c65b5412ff..cfafa4a11b 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -120,7 +120,7 @@ def forward(ctx, X, W, b, eps): ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps - ctx.save_for_backward(X, W, r, mu) + ctx.save_for_backward(X, W, b, r, mu) return Y.view(*shape) pass @@ -129,7 +129,7 @@ def backward(ctx, dY): shape = dY.shape dim = shape[-1] dY = dY.view(-1, dim) - X, W, r, mu = ctx.saved_tensors + X, W, b, r, mu = ctx.saved_tensors n_rows, n_cols = dY.shape layernorm_backward[(n_rows,)]( From 2e14b9cb5e1eb37c4d1553667db609b846ab39d7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 16:44:33 -0700 Subject: [PATCH 0445/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index cfafa4a11b..29c77462f5 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -41,8 +41,8 @@ def layernorm_forward( # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules # are in float32! X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) - W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) - b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0)#.to(tl.float32) mean_X = tl.sum(X_row, axis = 0) / n_cols XX = X_row - mean_X @@ -50,7 +50,7 @@ def layernorm_forward( inv_var = tl.math.rsqrt(row_var + eps) tl.store (r, inv_var) tl.store (mu, mean_X) - output = (XX * inv_var) * W_row + b_row + output = (XX * inv_var).to(W_row.dtype) * W_row + b_row tl.store(Y + col_offsets, output, mask = mask) pass From e382a77ea996463c46f774d11f3102b3f1e2fc27 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 16:45:50 -0700 Subject: [PATCH 0446/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 29c77462f5..cfafa4a11b 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -41,8 +41,8 @@ def layernorm_forward( # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules # are in float32! X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) - W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) - b_row = tl.load(b + col_offsets, mask = mask, other = 0)#.to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) mean_X = tl.sum(X_row, axis = 0) / n_cols XX = X_row - mean_X @@ -50,7 +50,7 @@ def layernorm_forward( inv_var = tl.math.rsqrt(row_var + eps) tl.store (r, inv_var) tl.store (mu, mean_X) - output = (XX * inv_var).to(W_row.dtype) * W_row + b_row + output = (XX * inv_var) * W_row + b_row tl.store(Y + col_offsets, output, mask = mask) pass From 6d183c42e3604b6b0b4d3bf0726801a522c94a47 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 16:54:52 -0700 Subject: [PATCH 0447/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 45 ++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index cfafa4a11b..058ec6a168 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -158,3 +158,48 @@ def fast_layernorm(layernorm, X): out = Fast_Layernorm.apply(X, W, bias, eps) return out pass + + +def test_layernorm( + dim = 1024, eps = 1e-5, dtype = torch.float16, + bsz = 21, random_state = 3407, seqlen = 3341, +): + from torch.nn import LayerNorm + layernorm = LayerNorm((dim,), eps = eps, device = "cuda", dtype = dtype) + torch.cuda.manual_seed(random_state) + torch.manual_seed(random_state) + torch.nn.init.uniform_(layernorm.weight) + torch.nn.init.uniform_(layernorm.bias) + X = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda") + XX = X.clone() + X .requires_grad_(True) + XX.requires_grad_(True) + Y = layernorm(X) + YY = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda", requires_grad = True) + Y.backward(YY) + correct_grad = X.grad.clone() + from unsloth.kernels import fast_layernorm + Y = fast_layernorm(layernorm, XX) + Y.backward(YY) + assert(torch.dist(correct_grad, XX.grad).item() <= 0.1) +pass + + +def testing_suite_layernorm(): + for dim in [512, 1024, 2048]: + for dtype in [torch.float16, torch.bfloat16]: + for seqlen in [3341, 2048, 349]: + for random_state in [3407, 42]: + test_layernorm( + dim = dim, + eps = 1e-5, + dtype = dtype, + bsz = 21, + random_state = random_state, + seqlen = seqlen, + ) + pass + pass + pass + pass +pass From e447ad4557db46a273eda9353c7748ed77118043 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 17:00:23 -0700 Subject: [PATCH 0448/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 058ec6a168..7939613804 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -178,7 +178,7 @@ def test_layernorm( YY = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda", requires_grad = True) Y.backward(YY) correct_grad = X.grad.clone() - from unsloth.kernels import fast_layernorm + # from unsloth.kernels import fast_layernorm Y = fast_layernorm(layernorm, XX) Y.backward(YY) assert(torch.dist(correct_grad, XX.grad).item() <= 0.1) From 80b440f5e247bcd0cfa45593c0442d3cefec7f72 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 17:03:19 -0700 Subject: [PATCH 0449/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 7939613804..c0ff4d7440 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -188,16 +188,18 @@ def test_layernorm( def testing_suite_layernorm(): for dim in [512, 1024, 2048]: for dtype in [torch.float16, torch.bfloat16]: - for seqlen in [3341, 2048, 349]: - for random_state in [3407, 42]: - test_layernorm( - dim = dim, - eps = 1e-5, - dtype = dtype, - bsz = 21, - random_state = random_state, - seqlen = seqlen, - ) + with torch.autocast(device_type = "cuda", dtype = dtype): + for seqlen in [3341, 2048, 349]: + for random_state in [3407, 42]: + test_layernorm( + dim = dim, + eps = 1e-5, + dtype = dtype, + bsz = 21, + random_state = random_state, + seqlen = seqlen, + ) + pass pass pass pass From dd4c53c095e442436065205739d348344ca4c0df Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 17:22:20 -0700 Subject: [PATCH 0450/1088] Patch layernorm --- unsloth/kernels/__init__.py | 6 +++++- unsloth/kernels/layernorm.py | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 2fb9d11f99..841d0ce0f0 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -14,7 +14,11 @@ from .cross_entropy_loss import fast_cross_entropy_loss from .rms_layernorm import fast_rms_layernorm -from .layernorm import fast_layernorm +from .layernorm import ( + fast_layernorm, + patch_layernorm, + unpatch_layernorm, +) from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel from .geglu import ( diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index c0ff4d7440..0d456109ea 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -150,6 +150,7 @@ def backward(ctx, dY): def fast_layernorm(layernorm, X): + assert(layernorm.elementwise_affine is True) W = layernorm.weight bias = layernorm.bias eps = layernorm.variance_epsilon if \ @@ -160,6 +161,28 @@ def fast_layernorm(layernorm, X): pass +from torch.nn import LayerNorm +class Fast_LayerNorm_Module(LayerNorm): + def forward(self, X): + return fast_layernorm(self, X) + pass +pass + + +def patch_layernorm(): + import torch.nn + torch.nn.LayerNorm = Fast_LayerNorm_Module + return +pass + + +def unpatch_layernorm(): + import torch.nn + torch.nn.LayerNorm = LayerNorm + return +pass + + def test_layernorm( dim = 1024, eps = 1e-5, dtype = torch.float16, bsz = 21, random_state = 3407, seqlen = 3341, From a488f8d007b8b6c8f10e856a60340d0c8ceeb001 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 17:24:39 -0700 Subject: [PATCH 0451/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 0d456109ea..0546484f17 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -162,7 +162,7 @@ def fast_layernorm(layernorm, X): from torch.nn import LayerNorm -class Fast_LayerNorm_Module(LayerNorm): +class Unsloth_LayerNorm(LayerNorm): def forward(self, X): return fast_layernorm(self, X) pass @@ -171,7 +171,7 @@ def forward(self, X): def patch_layernorm(): import torch.nn - torch.nn.LayerNorm = Fast_LayerNorm_Module + torch.nn.LayerNorm = Unsloth_LayerNorm return pass From f0fbba95b33eeffefccc054ccf5eaa534dcd4c5c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 22:50:33 -0700 Subject: [PATCH 0452/1088] RMS Layernorm --- unsloth/kernels/__init__.py | 6 +++++- unsloth/kernels/rms_layernorm.py | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 841d0ce0f0..606adf80f7 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -13,7 +13,11 @@ # limitations under the License. from .cross_entropy_loss import fast_cross_entropy_loss -from .rms_layernorm import fast_rms_layernorm +from .rms_layernorm import ( + fast_rms_layernorm, + patch_rms_layernorm, + unpatch_rms_layernorm, +) from .layernorm import ( fast_layernorm, patch_layernorm, diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index ac5beb5ab1..75d491cb1a 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -192,3 +192,25 @@ def fast_rms_layernorm(layernorm, X, gemma = False): out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass + + +from transformers.models.llama.modeling_llama import LlamaRMSNorm +class Unsloth_LlamaRMSNorm(LlamaRMSNorm): + def forward(self, X): + return fast_rms_layernorm(self, X, gemma = False) + pass +pass + + +def patch_rms_layernorm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaRMSNorm = Unsloth_LlamaRMSNorm + return +pass + + +def unpatch_rms_layernorm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaRMSNorm = LlamaRMSNorm + return +pass From fd3736de033dfdfdbed55d945b3cc4f8ea0d3575 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:13:51 -0700 Subject: [PATCH 0453/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 46 ++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 75d491cb1a..43924d0ab3 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -214,3 +214,49 @@ def unpatch_rms_layernorm(): transformers.models.llama.modeling_llama.LlamaRMSNorm = LlamaRMSNorm return pass + + +def test_rms_layernorm( + dim = 1024, eps = 1e-5, dtype = torch.float16, + bsz = 21, random_state = 3407, seqlen = 3341, +): + from transformers.models.llama.modeling_llama import LlamaRMSNorm + layernorm = LlamaRMSNorm((dim,), eps = eps).to("cuda") + torch.cuda.manual_seed(random_state) + torch.manual_seed(random_state) + torch.nn.init.uniform_(layernorm.weight) + X = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda") + XX = X.clone() + X .requires_grad_(True) + XX.requires_grad_(True) + Y = layernorm(X) + YY = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda", requires_grad = True) + Y.backward(YY) + correct_grad = X.grad.clone() + # from unsloth.kernels import fast_rms_layernorm + Y = fast_rms_layernorm(layernorm, XX) + Y.backward(YY) + assert(torch.amax(correct_grad - XX.grad).item() <= 0.05) +pass + + +def testing_suite_layernorm(): + for dim in [512, 1024, 2048]: + for dtype in [torch.float16, torch.bfloat16]: + with torch.autocast(device_type = "cuda", dtype = dtype): + for seqlen in [3341, 2048, 349]: + for random_state in [3407, 42]: + test_rms_layernorm( + dim = dim, + eps = 1e-5, + dtype = dtype, + bsz = 21, + random_state = random_state, + seqlen = seqlen, + ) + pass + pass + pass + pass + pass +pass From b25b46977fb9b2576bfc2d3b4d5b051c905fe722 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:49:57 -0700 Subject: [PATCH 0454/1088] Causal LM --- unsloth/kernels/__init__.py | 6 +- unsloth/kernels/cross_entropy_loss.py | 79 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 606adf80f7..3e55332c80 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .cross_entropy_loss import fast_cross_entropy_loss +from .cross_entropy_loss import ( + fast_cross_entropy_loss, + patch_llama_for_causal_lm, + unpatch_llama_for_causal_lm, +) from .rms_layernorm import ( fast_rms_layernorm, patch_rms_layernorm, diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 24e8002bec..74ee4ee66a 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -375,3 +375,82 @@ def fast_cross_entropy_loss( n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass + + +from transformers.models.llama.modeling_llama import LlamaForCausalLM +def patch_llama_for_causal_lm(): + import transformers.models.llama.modeling_llama + from transformers.models.llama.modeling_llama import ( + CausalLMOutputWithPast, + Optional, + Union, + Cache, + List, + Tuple, + ) + import inspect, re + function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) + function = function.split("\n") + i = re.match(r"[ ]{1,}", function[0]).span(0)[1] + function = [x[i:] for x in function] + function = "\n".join(function) + function = function[function.find("def forward"):] + replacement = """ loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + logit_softcapping = logit_softcapping, + logit_scaling = logit_scaling, + ) + else: + if logit_scaling != 0: + if logits.requires_grad: + logits = logit_scaling * logits + else: + logits *= logit_scaling + pass + pass + if logit_softcapping != 0: + if logits.requires_grad: + logits = (1.0 / logit_softcapping) * logits + logits = torch.tanh(logits) + logits = logit_softcapping * logits + else: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping + pass + pass + pass + """ + + function = \ + function[:function.find(" loss = None")] + \ + replacement + \ + function[ function.find(" if not return_dict"):] + function = function.replace("logits = logits.float()", "\n") + + patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ + f" {function}\n" + + exec(patched_function) + transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM + return +pass + + +def unpatch_llama_for_causal_lm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaForCausalLM = LlamaForCausalLM + return +pass From 805d343b7f9af17b24aaeede108145fe52ca34d0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:52:54 -0700 Subject: [PATCH 0455/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 74ee4ee66a..59d0f3b694 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -443,6 +443,7 @@ def patch_llama_for_causal_lm(): patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" + print(patched_function) exec(patched_function) transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM return From dd0f981a7c6f903182da331802d8b62274e3b2c7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:54:14 -0700 Subject: [PATCH 0456/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 59d0f3b694..58104e4dfa 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -440,6 +440,10 @@ def patch_llama_for_causal_lm(): function[ function.find(" if not return_dict"):] function = function.replace("logits = logits.float()", "\n") + function = function.split("\n") + function = [" "*4 + x for x in function] + function = "\n".join(function) + patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" From ef702f08d0cc887a168c11fcd967387d715ec4f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:55:25 -0700 Subject: [PATCH 0457/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 58104e4dfa..d06bf69913 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -440,10 +440,12 @@ def patch_llama_for_causal_lm(): function[ function.find(" if not return_dict"):] function = function.replace("logits = logits.float()", "\n") + # Missed spaces function = function.split("\n") - function = [" "*4 + x for x in function] + # Not the first one though! + function = [function[0]] + [" "*4 + x for x in function[1:]] function = "\n".join(function) - + patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" From 0c53b20ba04a843539203cea10d398026b0e39ae Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:57:22 -0700 Subject: [PATCH 0458/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d06bf69913..e791664a6f 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -437,7 +437,7 @@ def patch_llama_for_causal_lm(): function = \ function[:function.find(" loss = None")] + \ replacement + \ - function[ function.find(" if not return_dict"):] + function[ function.find("if not return_dict"):] function = function.replace("logits = logits.float()", "\n") # Missed spaces From fde8fb58f280689757563cb0da68fa5ab95e787d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Sep 2024 23:59:09 -0700 Subject: [PATCH 0459/1088] Update layernorm.py --- unsloth/kernels/layernorm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 0546484f17..48ade6d5ec 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -1,4 +1,5 @@ # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# Copyright 2024-present Andrej Karpathy & the llm.c team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 63f310f9346e683c67af8f4064def4403f2615f5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:00:51 -0700 Subject: [PATCH 0460/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index e791664a6f..e5ba8b55f1 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -377,17 +377,17 @@ def fast_cross_entropy_loss( pass -from transformers.models.llama.modeling_llama import LlamaForCausalLM +from transformers.models.llama.modeling_llama import ( + LlamaForCausalLM, + CausalLMOutputWithPast, + Optional, + Union, + Cache, + List, + Tuple, +) def patch_llama_for_causal_lm(): import transformers.models.llama.modeling_llama - from transformers.models.llama.modeling_llama import ( - CausalLMOutputWithPast, - Optional, - Union, - Cache, - List, - Tuple, - ) import inspect, re function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) function = function.split("\n") From 822a590785a99e89a6a146d97ed2e3a77ae1514f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:01:56 -0700 Subject: [PATCH 0461/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index e5ba8b55f1..a09f167db7 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -377,17 +377,17 @@ def fast_cross_entropy_loss( pass -from transformers.models.llama.modeling_llama import ( - LlamaForCausalLM, - CausalLMOutputWithPast, - Optional, - Union, - Cache, - List, - Tuple, -) +from transformers.models.llama.modeling_llama import LlamaForCausalLM def patch_llama_for_causal_lm(): import transformers.models.llama.modeling_llama + from transformers.models.llama.modeling_llama import ( + CausalLMOutputWithPast, + Optional, + Union, + Cache, + List, + Tuple, + ) import inspect, re function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) function = function.split("\n") @@ -448,9 +448,8 @@ def patch_llama_for_causal_lm(): patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" - - print(patched_function) - exec(patched_function) + + exec(patched_function, globals()) transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM return pass From f677ae6fc9a1909f3f88b9c7eb647df9fa2be504 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:04:58 -0700 Subject: [PATCH 0462/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 71 +++++++++++++-------------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index a09f167db7..72bd3b92d6 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -377,25 +377,23 @@ def fast_cross_entropy_loss( pass -from transformers.models.llama.modeling_llama import LlamaForCausalLM -def patch_llama_for_causal_lm(): - import transformers.models.llama.modeling_llama - from transformers.models.llama.modeling_llama import ( - CausalLMOutputWithPast, - Optional, - Union, - Cache, - List, - Tuple, - ) - import inspect, re - function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) - function = function.split("\n") - i = re.match(r"[ ]{1,}", function[0]).span(0)[1] - function = [x[i:] for x in function] - function = "\n".join(function) - function = function[function.find("def forward"):] - replacement = """ loss = None +from transformers.models.llama.modeling_llama import ( + LlamaForCausalLM, + CausalLMOutputWithPast, + Optional, + Union, + Cache, + List, + Tuple, +) +import inspect, re +function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) +function = function.split("\n") +i = re.match(r"[ ]{1,}", function[0]).span(0)[1] +function = [x[i:] for x in function] +function = "\n".join(function) +function = function[function.find("def forward"):] +replacement = """ loss = None logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) if labels is not None: @@ -432,24 +430,25 @@ def patch_llama_for_causal_lm(): pass pass pass - """ +""" +function = \ + function[:function.find(" loss = None")] + \ + replacement + \ + function[ function.find("if not return_dict"):] +function = function.replace("logits = logits.float()", "\n") +# Missed spaces +function = function.split("\n") +# Not the first one though! +function = [function[0]] + [" "*4 + x for x in function[1:]] +function = "\n".join(function) +function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ +f" {function}\n" +exec(function, globals()) +del function, replacement - function = \ - function[:function.find(" loss = None")] + \ - replacement + \ - function[ function.find("if not return_dict"):] - function = function.replace("logits = logits.float()", "\n") - - # Missed spaces - function = function.split("\n") - # Not the first one though! - function = [function[0]] + [" "*4 + x for x in function[1:]] - function = "\n".join(function) - - patched_function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ - f" {function}\n" - - exec(patched_function, globals()) + +def patch_llama_for_causal_lm(): + import transformers.models.llama.modeling_llama transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM return pass From 3146b604ba18b362fb0d1e60846ea394b72779f3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:06:10 -0700 Subject: [PATCH 0463/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 72bd3b92d6..1174ead7d1 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -387,7 +387,7 @@ def fast_cross_entropy_loss( Tuple, ) import inspect, re -function = inspect.getsource(transformers.models.llama.modeling_llama.LlamaForCausalLM.forward) +function = inspect.getsource(LlamaForCausalLM.forward) function = function.split("\n") i = re.match(r"[ ]{1,}", function[0]).span(0)[1] function = [x[i:] for x in function] From 2ebea90e49dbeae61805d00733a96a08c6a99a7a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:07:49 -0700 Subject: [PATCH 0464/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 1174ead7d1..49bdbd10ea 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -443,6 +443,7 @@ def fast_cross_entropy_loss( function = "\n".join(function) function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" +print(function) exec(function, globals()) del function, replacement From 0b87c2696c5da0a8eb0ab7298df5e22902229644 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:09:19 -0700 Subject: [PATCH 0465/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 49bdbd10ea..0034448be5 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -434,7 +434,7 @@ def fast_cross_entropy_loss( function = \ function[:function.find(" loss = None")] + \ replacement + \ - function[ function.find("if not return_dict"):] + function[ function.find(" if not return_dict"):] function = function.replace("logits = logits.float()", "\n") # Missed spaces function = function.split("\n") From aed22c45dd064e16239233243543d7b23034c0e6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:11:57 -0700 Subject: [PATCH 0466/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 0034448be5..1fec5d7a85 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -443,9 +443,8 @@ def fast_cross_entropy_loss( function = "\n".join(function) function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ f" {function}\n" -print(function) exec(function, globals()) -del function, replacement +del function, replacement, inspect, re def patch_llama_for_causal_lm(): From 4ba04293ae5669226d3b6c9a59b14588d7497031 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:22:54 -0700 Subject: [PATCH 0467/1088] Update _utils.py --- unsloth/models/_utils.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f868c855bc..cd66825edf 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -41,6 +41,8 @@ "torch_amp_custom_bwd", "accelerate_old_send_to_device", "accelerate_new_send_to_device", + "patch_gradient_checkpointing", + "unpatch_gradient_checkpointing", ] import torch @@ -791,7 +793,7 @@ def forward(ctx, forward_function, hidden_states, *args): def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() - hidden_states.requires_grad = True + hidden_states.requires_grad_(True) with torch.enable_grad(): (output,) = ctx.forward_function(hidden_states, *ctx.args) torch.autograd.backward(output, dY) @@ -806,6 +808,17 @@ def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, pass +import torch.utils +old_checkpoint = torch.utils.checkpoint +def patch_gradient_checkpointing(): + torch.utils.checkpoint = unsloth_offloaded_gradient_checkpoint +pass + +def unpatch_gradient_checkpointing(): + torch.utils.checkpoint = old_checkpoint +pass + + # ============================================= # Fixes Bitsandbytes to remove missing warnings from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod From 2dd26aa18b10e23cb8daea305650a78714e956e1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 00:23:23 -0700 Subject: [PATCH 0468/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cd66825edf..af7e1eb293 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post2" +__version__ = "2024.9.post3" __all__ = [ "prepare_model_for_kbit_training", From 30b7b0a6057dc1afa87c241bd1a263e0ea613ac7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 11:42:41 -0700 Subject: [PATCH 0469/1088] Llama 3.2 --- unsloth/kernels/rms_layernorm.py | 21 ++ unsloth/models/mapper.py | 16 + unsloth/models/vision.py | 592 +++++++++++++++++++++++++++++++ 3 files changed, 629 insertions(+) create mode 100644 unsloth/models/vision.py diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 43924d0ab3..13faf08d6a 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -201,10 +201,25 @@ def forward(self, X): pass pass +try: + from transformers.models.mllama.modeling_mllama import MllamaTextRMSNorm + class Unsloth_MllamaTextRMSNorm(MllamaTextRMSNorm): + def forward(self, X): + return fast_rms_layernorm(self, X, gemma = False) + pass + pass +except: + pass +pass def patch_rms_layernorm(): import transformers.models.llama.modeling_llama transformers.models.llama.modeling_llama.LlamaRMSNorm = Unsloth_LlamaRMSNorm + try: + import transformers.models.mllama.modeling_mllama + transformers.models.mllama.modeling_mllama.MllamaTextRMSNorm = Unsloth_MllamaTextRMSNorm + except: + pass return pass @@ -212,6 +227,12 @@ def patch_rms_layernorm(): def unpatch_rms_layernorm(): import transformers.models.llama.modeling_llama transformers.models.llama.modeling_llama.LlamaRMSNorm = LlamaRMSNorm + try: + import transformers.models.mllama.modeling_mllama + transformers.models.mllama.modeling_mllama.MllamaTextRMSNorm = MllamaTextRMSNorm + except: + pass + return return pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 50436a7a4f..7f27437904 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -400,6 +400,22 @@ "unsloth/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", ), + "unsloth/Llama-3.2-1B-bnb-4bit" : ( + "unsloth/Llama-3.2-1B", + "meta-llama/Llama-3.2-1B", + ), + "unsloth/Llama-3.2-3B-bnb-4bit" : ( + "unsloth/Llama-3.2-3B", + "meta-llama/Llama-3.2-3B", + ), + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-1B-Instruct", + "meta-llama/Llama-3.2-1B-Instruct", + ), + "unsloth/Llama-3.2-3B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-3B-Instruct", + "meta-llama/Llama-3.2-3B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py new file mode 100644 index 0000000000..988fda4938 --- /dev/null +++ b/unsloth/models/vision.py @@ -0,0 +1,592 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from ..kernels import patch_layernorm, unpatch_layernorm +from ..kernels import patch_rms_layernorm, unpatch_rms_layernorm +from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm +from ._utils import patch_gradient_checkpointing + +from transformers import AutoProcessor, AutoModelForVision2Seq + + +class FastVisionModel: + + def pre_patch(self): + patch_gradient_checkpointing() + patch_layernorm() + patch_rms_layernorm() + patch_llama_for_causal_lm() + pass + + def post_unpatch(self): + unpatch_layernorm() + unpatch_rms_layernorm() + unpatch_llama_for_causal_lm() + pass + + + @staticmethod + def from_pretrained( + model_name = "llava-hf/llava-1.5-7b-hf", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + trust_remote_code = False, + **kwargs, + ): + if trust_remote_code: + print( + "Unsloth: WARNING `trust_remote_code` is True.\n"\ + "Are you certain you want to do remote code execution?" + ) + pass + if token is None: token = get_token() + if model_patcher is None: model_patcher = FastLlamaModel + SUPPORTS_BFLOAT16 = is_bfloat16_supported() + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + + statistics = \ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ + f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' + print(statistics) + + # Warn about fast transfers + old_hf_transfer = os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": + print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + pass + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + + get_statistics() # For debugging - we use a download counter to see if environments are not breaking + + if dtype is None: + dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 + elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: + logger.warning_once("Device does not support bfloat16. Will change to float16.") + dtype = torch.float16 + + assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + + # RoPE Scaling + model_config = AutoConfig.from_pretrained(model_name, token = token) + model_max_seq_length = model_config.max_position_embeddings + + # Check if RoPE Scaling is even allowed + model_function = MODEL_FOR_CAUSAL_LM_MAPPING[model_config.__class__] + has_rope_scaling = False + try: + with open(inspect.getfile(model_function), "r") as file: + has_rope_scaling = "self.config.rope_scaling" in file.read() + except: pass + has_rope_scaling = True + + # If max_seq_length is not specified, use maximum fron config + if max_seq_length is None: + max_seq_length = model_max_seq_length + pass + + if (rope_scaling is None) and (max_seq_length > model_max_seq_length): + + rope_scaling = max_seq_length / model_max_seq_length + + logger.warning_once( + f"Unsloth: {model_name} can only handle sequence lengths of at most "\ + f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ + f"{round(rope_scaling, 3)}, it can be magically be extended to "\ + f"{max_seq_length}!" + ) + + # Warn RoPE scaling isn't allowed + if not has_rope_scaling: + raise RuntimeError( + "However, {model_name} doesn't support RoPE Scaling!\n"\ + "Please file a feature request at https://github.com/unslothai/unsloth." + ) + pass + + rope_scaling = {"type": "linear", "factor": rope_scaling,} + + # Add to kwargs + kwargs["rope_scaling"] = rope_scaling + pass + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + pre_check = check_nvidia() + + bnb_config = None + if load_in_4bit: + bnb_config = BitsAndBytesConfig( + load_in_4bit = True, + bnb_4bit_use_double_quant = True, + bnb_4bit_quant_type = "nf4", + bnb_4bit_compute_dtype = dtype, + ) + pass + + # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 + # RoPE Scaling's max_position_embeddings must be updated + max_position_embeddings = max(max_seq_length, model_max_seq_length) + kwargs.pop("attn_implementation", None); # No need since we auto call it + + # Cannot be None, since HF now checks for the config + if load_in_4bit: kwargs["quantization_config"] = bnb_config + + self.pre_patch() + model = AutoModelForVision2Seq.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + # quantization_config = bnb_config, + token = token, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + attn_implementation = "eager", + **kwargs, + ) + self.post_unpatch() + + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + post_check = check_nvidia() + + # Counteract saved tokenizers + tokenizer = AutoProcessor.from_pretrained( + model_name, + ) + model = FastVisionModel.post_patch(model) + + # Patch Trainer + from transformers.trainer import Trainer + try: + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + inner_training_loop = inspect.getsource(Trainer._inner_training_loop) + Trainer._original_training_loop = inner_training_loop + else: + inner_training_loop = Trainer._original_training_loop + except: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + pass + + if ((post_check - pre_check) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in inner_training_loop: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] + end = inner_training_loop.find("\n\n", start) + original_debug = inner_training_loop[start:end] + spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] + front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + + debug_info = """debug_info = \\ + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + logger.warning(debug_info) + import subprocess, re, gc, numpy as np + a = np.array([0,]) + try: + a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) + a = np.array([int(x.decode('utf-8'))/1024 for x in a]) + except: + if not torch.cuda.is_available(): + raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') + if ((a - PRE_CHECK) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + for _ in range(3): + gc.collect() + torch.cuda.empty_cache()""" + + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace(original_debug, debug_info) + + debug_info = """n_total_devices = total_train_batch_size // \\ + args.gradient_accumulation_steps // self._train_batch_size + if n_total_devices > 1: + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') + debug_info =""" + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) + + front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) + inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = tpu_spmd_dataloader(train_dataloader)", + "raise RuntimeError('Unsloth: TPUs are not yet supported!')" + ) + inner_training_loop = inner_training_loop.replace( + "self.accelerator.free_memory()", + "self.accelerator.free_memory()\n" + \ + front_spaces + "if self.is_deepspeed_enabled:"\ + "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, + ) + + check_batches = """train_dataloader = self.get_train_dataloader() + ga = args.gradient_accumulation_steps + bsz = self._train_batch_size + total_batches = bsz * ga * args.world_size + n_total_devices = total_batches // ga // bsz + if n_total_devices > 1: + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') + divisor = n_total_devices / 1 + bsz = self._train_batch_size = max(int(bsz / divisor), 1) + if total_batches // ga // bsz > 1: + divisor = n_total_devices / 1 + ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" + check_batches = check_batches.split('\n') + check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = self.get_train_dataloader()", + check_batches, 1, + ) + inner_training_loop = inner_training_loop.replace( + "_inner_training_loop", + "_fast_inner_training_loop", 1, + ) + exec(inner_training_loop, globals()) + + Trainer._inner_training_loop = _fast_inner_training_loop + inner_training_loop = inner_training_loop.replace( + "is_torch_tpu_available()", + "False", + ) + if "n_total_devices >" not in inner_training_loop: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + pass + inner_training_loop = inner_training_loop.replace( + "is_sagemaker_mp_enabled()", + "False", + ) + exec(inner_training_loop, globals()) + Trainer._inner_training_loop = _fast_inner_training_loop + + # Save max_seq_length + model.max_seq_length = max_position_embeddings + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_position_embeddings + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_position_embeddings + + # Fix up config for transformers uploading PEFT + # Not necessary anymore since we require transformers>=4.37! + if False: + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass + pass + + # Log Unsloth version for future fastpaths for inference + model.config.update({"unsloth_version" : __version__}) + + # Add save modules + patch_saving_functions(model) + Trainer._inner_training_loop = _fast_inner_training_loop + + # Also fix torch_dtype + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass + + return model, tokenizer + pass + + + @staticmethod + def post_patch(model): + # Patch model + layers = model.model.layers + lm_head = model.get_output_embeddings().weight + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + pass + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass + + + @staticmethod + def get_peft_model( + model, + r = 16, + target_modules = "all-linear", + lora_alpha = 16, + lora_dropout = 0, + bias = "none", + layers_to_transform = None, + layers_pattern = None, + use_gradient_checkpointing = True, + random_state = 3407, + max_seq_length = 2048, # not used anymore + use_rslora = False, + modules_to_save = None, + init_lora_weights = True, + loftq_config = {}, + temporary_location = "_unsloth_temporary_saved_buffers", + **kwargs, + ): + transformers_set_seed(random_state) + + # Get LoRA + arguments = dict( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = lora_dropout, + bias = bias, + layers_to_transform = layers_to_transform, + init_lora_weights = init_lora_weights, + # loftq_config = loftq_config, + # use_rslora = use_rslora, + modules_to_save = modules_to_save, + **kwargs, + ) + + lora_config = LoraConfig(**arguments) + + model = _get_peft_model(model, lora_config) + + model = FastVisionModel.patch_peft_model(model, use_gradient_checkpointing) + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + + return model + pass + + + @staticmethod + def patch_peft_model( + model, + use_gradient_checkpointing = True, + ): + + model = prepare_model_for_kbit_training( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + use_reentrant = True, + ) + + # Fix up config for transformers uploading PEFT + for active_adapter in model.peft_config.keys(): + # Not necessary since we requires transformers >= 4.37 + if False: + name = model.peft_config[active_adapter].base_model_name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.peft_config[active_adapter].base_model_name_or_path = name + pass + # Add revision to enable future fast inference paths + # [TODO] Bugs out!see https://github.com/unslothai/unsloth/issues/492 + # model.peft_config[active_adapter].revision = f"unsloth" + pass + + from transformers.trainer import Trainer + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + raise RuntimeError( + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ + 'We do have a separate beta version, which you can contact us about!\n'\ + 'Thank you for your understanding and we appreciate it immensely!' + ) + pass + + logger.warning_once( + f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ + f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", + ) + patch_saving_functions(model) + + # Patch cross entropy loss labels + # Fixes https://github.com/unslothai/unsloth/issues/10 + max_seq_length = model.max_seq_length + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + model.model.extra_ignored_labels = extra_ignored_labels + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + return model + pass + + + @staticmethod + def for_inference(model): + # if model.config.model_type == "qwen2": + # FastLlamaModel.for_training(model) + # return + # pass + + internal_model = model + internal_model.gradient_checkpointing = False + internal_model.training = False + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = False + internal_model.training = False + pass + if hasattr(internal_model, "training"): + internal_model.training = False + pass + + # Also check if lm_head / embeddings are trained + internal_model = model + while not hasattr(internal_model, "lm_head"): + internal_model = internal_model.model + pass + lm_head = internal_model.lm_head.weight + device_type = lm_head.device.type + dtype = model.config.torch_dtype + + if type(dtype) is str: + if dtype == "float16": dtype = torch.float16 + elif dtype == "bfloat16": dtype = torch.bfloat16 + pass + + # Also disable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + + return model + pass + + + @staticmethod + def for_training(model, use_gradient_checkpointing = True): + internal_model = model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + + # Delete all fast inference loras + for param in model.parameters(): + if hasattr(param, "_fast_lora"): + del param._fast_lora + pass + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + pass + if hasattr(internal_model, "training"): + internal_model.training = True + pass + + # Also re-enable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + + return model + pass +pass From 087ffc1907aef7079b9a7029a49ae234123b1f45 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 11:48:24 -0700 Subject: [PATCH 0470/1088] Llama 3.2 (#1058) * Layernorm * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Patch layernorm * Update layernorm.py * RMS Layernorm * Update rms_layernorm.py * Causal LM * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update layernorm.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Llama 3.2 --- unsloth/kernels/__init__.py | 17 +- unsloth/kernels/cross_entropy_loss.py | 84 ++++ unsloth/kernels/layernorm.py | 231 ++++++++++ unsloth/kernels/rms_layernorm.py | 89 ++++ unsloth/models/_utils.py | 17 +- unsloth/models/mapper.py | 16 + unsloth/models/vision.py | 592 ++++++++++++++++++++++++++ 7 files changed, 1042 insertions(+), 4 deletions(-) create mode 100644 unsloth/kernels/layernorm.py create mode 100644 unsloth/models/vision.py diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index cd1d90f262..3e55332c80 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -12,8 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .cross_entropy_loss import fast_cross_entropy_loss -from .rms_layernorm import fast_rms_layernorm +from .cross_entropy_loss import ( + fast_cross_entropy_loss, + patch_llama_for_causal_lm, + unpatch_llama_for_causal_lm, +) +from .rms_layernorm import ( + fast_rms_layernorm, + patch_rms_layernorm, + unpatch_rms_layernorm, +) +from .layernorm import ( + fast_layernorm, + patch_layernorm, + unpatch_layernorm, +) from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel from .geglu import ( diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 24e8002bec..1fec5d7a85 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -375,3 +375,87 @@ def fast_cross_entropy_loss( n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass + + +from transformers.models.llama.modeling_llama import ( + LlamaForCausalLM, + CausalLMOutputWithPast, + Optional, + Union, + Cache, + List, + Tuple, +) +import inspect, re +function = inspect.getsource(LlamaForCausalLM.forward) +function = function.split("\n") +i = re.match(r"[ ]{1,}", function[0]).span(0)[1] +function = [x[i:] for x in function] +function = "\n".join(function) +function = function[function.find("def forward"):] +replacement = """ loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + logit_softcapping = logit_softcapping, + logit_scaling = logit_scaling, + ) + else: + if logit_scaling != 0: + if logits.requires_grad: + logits = logit_scaling * logits + else: + logits *= logit_scaling + pass + pass + if logit_softcapping != 0: + if logits.requires_grad: + logits = (1.0 / logit_softcapping) * logits + logits = torch.tanh(logits) + logits = logit_softcapping * logits + else: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping + pass + pass + pass +""" +function = \ + function[:function.find(" loss = None")] + \ + replacement + \ + function[ function.find(" if not return_dict"):] +function = function.replace("logits = logits.float()", "\n") +# Missed spaces +function = function.split("\n") +# Not the first one though! +function = [function[0]] + [" "*4 + x for x in function[1:]] +function = "\n".join(function) +function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ +f" {function}\n" +exec(function, globals()) +del function, replacement, inspect, re + + +def patch_llama_for_causal_lm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM + return +pass + + +def unpatch_llama_for_causal_lm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaForCausalLM = LlamaForCausalLM + return +pass diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py new file mode 100644 index 0000000000..48ade6d5ec --- /dev/null +++ b/unsloth/kernels/layernorm.py @@ -0,0 +1,231 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# Copyright 2024-present Andrej Karpathy & the llm.c team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import triton +import triton.language as tl +import torch +from .utils import calculate_settings + + +@triton.jit +def layernorm_forward( + Y, Y_row_stride, + X, X_row_stride, + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE : tl.constexpr +): + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + Y += row_idx * Y_row_stride + X += row_idx * X_row_stride + r += row_idx + mu += row_idx + + # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules + # are in float32! + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) + + mean_X = tl.sum(X_row, axis = 0) / n_cols + XX = X_row - mean_X + row_var = tl.sum(XX * XX, axis = 0) / n_cols + inv_var = tl.math.rsqrt(row_var + eps) + tl.store (r, inv_var) + tl.store (mu, mean_X) + output = (XX * inv_var) * W_row + b_row + tl.store(Y + col_offsets, output, mask = mask) +pass + + +@triton.jit +def layernorm_backward( + dY, dY_row_stride, + X, X_row_stride, + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE : tl.constexpr +): + # Approximately follows https://github.com/karpathy/llm.c/blob/master/doc/layernorm/layernorm.md + row_idx = tl.program_id(0) + col_offsets = tl.arange(0, BLOCK_SIZE) + mask = col_offsets < n_cols + + dY += row_idx * dY_row_stride + X += row_idx * X_row_stride + r += row_idx + mu += row_idx + + # According to https://pytorch.org/torchtune/stable/_modules/torchtune/modules/layer_norm.html#Fp32LayerNorm, all modules + # are in float32! + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) + X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) + W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) + b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) + + inv_var = tl.load(r) .to(tl.float32) + mean = tl.load(mu).to(tl.float32) + normed = (X_row - mean) * inv_var + dY_W = dY_row * W_row + dX_row = dY_W - tl.sum(dY_W, axis = 0) / n_cols - normed * tl.sum(dY_W * normed, axis = 0) / n_cols + dX_row = dX_row * inv_var + tl.store(dY + col_offsets, dX_row, mask = mask) +pass + + +class Fast_Layernorm(torch.autograd.Function): + @staticmethod + def forward(ctx, X, W, b, eps): + shape = X.shape + dim = shape[-1] + X = X.view(-1, dim) + n_rows, n_cols = X.shape + BLOCK_SIZE, num_warps = calculate_settings(n_cols) + + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") + r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + mu = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + + layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + ctx.eps = eps + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps + ctx.save_for_backward(X, W, b, r, mu) + return Y.view(*shape) + pass + + @staticmethod + def backward(ctx, dY): + shape = dY.shape + dim = shape[-1] + dY = dY.view(-1, dim) + X, W, b, r, mu = ctx.saved_tensors + n_rows, n_cols = dY.shape + + layernorm_backward[(n_rows,)]( + dY, dY.stride(0), + X, X .stride(0), + W, + b, + r, + mu, + n_cols, ctx.eps, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) + dX = dY.view(*shape) + return dX, None, None, None, None + pass +pass + + +def fast_layernorm(layernorm, X): + assert(layernorm.elementwise_affine is True) + W = layernorm.weight + bias = layernorm.bias + eps = layernorm.variance_epsilon if \ + hasattr(layernorm, "variance_epsilon") \ + else layernorm.eps + out = Fast_Layernorm.apply(X, W, bias, eps) + return out +pass + + +from torch.nn import LayerNorm +class Unsloth_LayerNorm(LayerNorm): + def forward(self, X): + return fast_layernorm(self, X) + pass +pass + + +def patch_layernorm(): + import torch.nn + torch.nn.LayerNorm = Unsloth_LayerNorm + return +pass + + +def unpatch_layernorm(): + import torch.nn + torch.nn.LayerNorm = LayerNorm + return +pass + + +def test_layernorm( + dim = 1024, eps = 1e-5, dtype = torch.float16, + bsz = 21, random_state = 3407, seqlen = 3341, +): + from torch.nn import LayerNorm + layernorm = LayerNorm((dim,), eps = eps, device = "cuda", dtype = dtype) + torch.cuda.manual_seed(random_state) + torch.manual_seed(random_state) + torch.nn.init.uniform_(layernorm.weight) + torch.nn.init.uniform_(layernorm.bias) + X = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda") + XX = X.clone() + X .requires_grad_(True) + XX.requires_grad_(True) + Y = layernorm(X) + YY = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda", requires_grad = True) + Y.backward(YY) + correct_grad = X.grad.clone() + # from unsloth.kernels import fast_layernorm + Y = fast_layernorm(layernorm, XX) + Y.backward(YY) + assert(torch.dist(correct_grad, XX.grad).item() <= 0.1) +pass + + +def testing_suite_layernorm(): + for dim in [512, 1024, 2048]: + for dtype in [torch.float16, torch.bfloat16]: + with torch.autocast(device_type = "cuda", dtype = dtype): + for seqlen in [3341, 2048, 349]: + for random_state in [3407, 42]: + test_layernorm( + dim = dim, + eps = 1e-5, + dtype = dtype, + bsz = 21, + random_state = random_state, + seqlen = seqlen, + ) + pass + pass + pass + pass + pass +pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index ac5beb5ab1..13faf08d6a 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -192,3 +192,92 @@ def fast_rms_layernorm(layernorm, X, gemma = False): out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass + + +from transformers.models.llama.modeling_llama import LlamaRMSNorm +class Unsloth_LlamaRMSNorm(LlamaRMSNorm): + def forward(self, X): + return fast_rms_layernorm(self, X, gemma = False) + pass +pass + +try: + from transformers.models.mllama.modeling_mllama import MllamaTextRMSNorm + class Unsloth_MllamaTextRMSNorm(MllamaTextRMSNorm): + def forward(self, X): + return fast_rms_layernorm(self, X, gemma = False) + pass + pass +except: + pass +pass + +def patch_rms_layernorm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaRMSNorm = Unsloth_LlamaRMSNorm + try: + import transformers.models.mllama.modeling_mllama + transformers.models.mllama.modeling_mllama.MllamaTextRMSNorm = Unsloth_MllamaTextRMSNorm + except: + pass + return +pass + + +def unpatch_rms_layernorm(): + import transformers.models.llama.modeling_llama + transformers.models.llama.modeling_llama.LlamaRMSNorm = LlamaRMSNorm + try: + import transformers.models.mllama.modeling_mllama + transformers.models.mllama.modeling_mllama.MllamaTextRMSNorm = MllamaTextRMSNorm + except: + pass + return + return +pass + + +def test_rms_layernorm( + dim = 1024, eps = 1e-5, dtype = torch.float16, + bsz = 21, random_state = 3407, seqlen = 3341, +): + from transformers.models.llama.modeling_llama import LlamaRMSNorm + layernorm = LlamaRMSNorm((dim,), eps = eps).to("cuda") + torch.cuda.manual_seed(random_state) + torch.manual_seed(random_state) + torch.nn.init.uniform_(layernorm.weight) + X = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda") + XX = X.clone() + X .requires_grad_(True) + XX.requires_grad_(True) + Y = layernorm(X) + YY = torch.randn((bsz, seqlen, dim), dtype = dtype, device = "cuda", requires_grad = True) + Y.backward(YY) + correct_grad = X.grad.clone() + # from unsloth.kernels import fast_rms_layernorm + Y = fast_rms_layernorm(layernorm, XX) + Y.backward(YY) + assert(torch.amax(correct_grad - XX.grad).item() <= 0.05) +pass + + +def testing_suite_layernorm(): + for dim in [512, 1024, 2048]: + for dtype in [torch.float16, torch.bfloat16]: + with torch.autocast(device_type = "cuda", dtype = dtype): + for seqlen in [3341, 2048, 349]: + for random_state in [3407, 42]: + test_rms_layernorm( + dim = dim, + eps = 1e-5, + dtype = dtype, + bsz = 21, + random_state = random_state, + seqlen = seqlen, + ) + pass + pass + pass + pass + pass +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f868c855bc..af7e1eb293 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post2" +__version__ = "2024.9.post3" __all__ = [ "prepare_model_for_kbit_training", @@ -41,6 +41,8 @@ "torch_amp_custom_bwd", "accelerate_old_send_to_device", "accelerate_new_send_to_device", + "patch_gradient_checkpointing", + "unpatch_gradient_checkpointing", ] import torch @@ -791,7 +793,7 @@ def forward(ctx, forward_function, hidden_states, *args): def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() - hidden_states.requires_grad = True + hidden_states.requires_grad_(True) with torch.enable_grad(): (output,) = ctx.forward_function(hidden_states, *ctx.args) torch.autograd.backward(output, dY) @@ -806,6 +808,17 @@ def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, pass +import torch.utils +old_checkpoint = torch.utils.checkpoint +def patch_gradient_checkpointing(): + torch.utils.checkpoint = unsloth_offloaded_gradient_checkpoint +pass + +def unpatch_gradient_checkpointing(): + torch.utils.checkpoint = old_checkpoint +pass + + # ============================================= # Fixes Bitsandbytes to remove missing warnings from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 50436a7a4f..7f27437904 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -400,6 +400,22 @@ "unsloth/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", ), + "unsloth/Llama-3.2-1B-bnb-4bit" : ( + "unsloth/Llama-3.2-1B", + "meta-llama/Llama-3.2-1B", + ), + "unsloth/Llama-3.2-3B-bnb-4bit" : ( + "unsloth/Llama-3.2-3B", + "meta-llama/Llama-3.2-3B", + ), + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-1B-Instruct", + "meta-llama/Llama-3.2-1B-Instruct", + ), + "unsloth/Llama-3.2-3B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-3B-Instruct", + "meta-llama/Llama-3.2-3B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py new file mode 100644 index 0000000000..988fda4938 --- /dev/null +++ b/unsloth/models/vision.py @@ -0,0 +1,592 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +from ..kernels import patch_layernorm, unpatch_layernorm +from ..kernels import patch_rms_layernorm, unpatch_rms_layernorm +from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm +from ._utils import patch_gradient_checkpointing + +from transformers import AutoProcessor, AutoModelForVision2Seq + + +class FastVisionModel: + + def pre_patch(self): + patch_gradient_checkpointing() + patch_layernorm() + patch_rms_layernorm() + patch_llama_for_causal_lm() + pass + + def post_unpatch(self): + unpatch_layernorm() + unpatch_rms_layernorm() + unpatch_llama_for_causal_lm() + pass + + + @staticmethod + def from_pretrained( + model_name = "llava-hf/llava-1.5-7b-hf", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + trust_remote_code = False, + **kwargs, + ): + if trust_remote_code: + print( + "Unsloth: WARNING `trust_remote_code` is True.\n"\ + "Are you certain you want to do remote code execution?" + ) + pass + if token is None: token = get_token() + if model_patcher is None: model_patcher = FastLlamaModel + SUPPORTS_BFLOAT16 = is_bfloat16_supported() + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + + statistics = \ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ + f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ + f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' + print(statistics) + + # Warn about fast transfers + old_hf_transfer = os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") + if os.environ.get("HF_HUB_ENABLE_HF_TRANSFER", "0") == "1": + print("Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!") + pass + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + + get_statistics() # For debugging - we use a download counter to see if environments are not breaking + + if dtype is None: + dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 + elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: + logger.warning_once("Device does not support bfloat16. Will change to float16.") + dtype = torch.float16 + + assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + + # RoPE Scaling + model_config = AutoConfig.from_pretrained(model_name, token = token) + model_max_seq_length = model_config.max_position_embeddings + + # Check if RoPE Scaling is even allowed + model_function = MODEL_FOR_CAUSAL_LM_MAPPING[model_config.__class__] + has_rope_scaling = False + try: + with open(inspect.getfile(model_function), "r") as file: + has_rope_scaling = "self.config.rope_scaling" in file.read() + except: pass + has_rope_scaling = True + + # If max_seq_length is not specified, use maximum fron config + if max_seq_length is None: + max_seq_length = model_max_seq_length + pass + + if (rope_scaling is None) and (max_seq_length > model_max_seq_length): + + rope_scaling = max_seq_length / model_max_seq_length + + logger.warning_once( + f"Unsloth: {model_name} can only handle sequence lengths of at most "\ + f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ + f"{round(rope_scaling, 3)}, it can be magically be extended to "\ + f"{max_seq_length}!" + ) + + # Warn RoPE scaling isn't allowed + if not has_rope_scaling: + raise RuntimeError( + "However, {model_name} doesn't support RoPE Scaling!\n"\ + "Please file a feature request at https://github.com/unslothai/unsloth." + ) + pass + + rope_scaling = {"type": "linear", "factor": rope_scaling,} + + # Add to kwargs + kwargs["rope_scaling"] = rope_scaling + pass + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + pre_check = check_nvidia() + + bnb_config = None + if load_in_4bit: + bnb_config = BitsAndBytesConfig( + load_in_4bit = True, + bnb_4bit_use_double_quant = True, + bnb_4bit_quant_type = "nf4", + bnb_4bit_compute_dtype = dtype, + ) + pass + + # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 + # RoPE Scaling's max_position_embeddings must be updated + max_position_embeddings = max(max_seq_length, model_max_seq_length) + kwargs.pop("attn_implementation", None); # No need since we auto call it + + # Cannot be None, since HF now checks for the config + if load_in_4bit: kwargs["quantization_config"] = bnb_config + + self.pre_patch() + model = AutoModelForVision2Seq.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + # quantization_config = bnb_config, + token = token, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + attn_implementation = "eager", + **kwargs, + ) + self.post_unpatch() + + # Return old flag + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! + post_check = check_nvidia() + + # Counteract saved tokenizers + tokenizer = AutoProcessor.from_pretrained( + model_name, + ) + model = FastVisionModel.post_patch(model) + + # Patch Trainer + from transformers.trainer import Trainer + try: + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + inner_training_loop = inspect.getsource(Trainer._inner_training_loop) + Trainer._original_training_loop = inner_training_loop + else: + inner_training_loop = Trainer._original_training_loop + except: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + pass + + if ((post_check - pre_check) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in inner_training_loop: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] + end = inner_training_loop.find("\n\n", start) + original_debug = inner_training_loop[start:end] + spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] + front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + + debug_info = """debug_info = \\ + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + logger.warning(debug_info) + import subprocess, re, gc, numpy as np + a = np.array([0,]) + try: + a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) + a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) + a = np.array([int(x.decode('utf-8'))/1024 for x in a]) + except: + if not torch.cuda.is_available(): + raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') + if ((a - PRE_CHECK) >= 1).sum() > 1: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + for _ in range(3): + gc.collect() + torch.cuda.empty_cache()""" + + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace(original_debug, debug_info) + + debug_info = """n_total_devices = total_train_batch_size // \\ + args.gradient_accumulation_steps // self._train_batch_size + if n_total_devices > 1: + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') + debug_info =""" + debug_info = debug_info.split('\n') + debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) + inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) + + front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) + inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = tpu_spmd_dataloader(train_dataloader)", + "raise RuntimeError('Unsloth: TPUs are not yet supported!')" + ) + inner_training_loop = inner_training_loop.replace( + "self.accelerator.free_memory()", + "self.accelerator.free_memory()\n" + \ + front_spaces + "if self.is_deepspeed_enabled:"\ + "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, + ) + + check_batches = """train_dataloader = self.get_train_dataloader() + ga = args.gradient_accumulation_steps + bsz = self._train_batch_size + total_batches = bsz * ga * args.world_size + n_total_devices = total_batches // ga // bsz + if n_total_devices > 1: + logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') + divisor = n_total_devices / 1 + bsz = self._train_batch_size = max(int(bsz / divisor), 1) + if total_batches // ga // bsz > 1: + divisor = n_total_devices / 1 + ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" + check_batches = check_batches.split('\n') + check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) + inner_training_loop = inner_training_loop.replace( + "train_dataloader = self.get_train_dataloader()", + check_batches, 1, + ) + inner_training_loop = inner_training_loop.replace( + "_inner_training_loop", + "_fast_inner_training_loop", 1, + ) + exec(inner_training_loop, globals()) + + Trainer._inner_training_loop = _fast_inner_training_loop + inner_training_loop = inner_training_loop.replace( + "is_torch_tpu_available()", + "False", + ) + if "n_total_devices >" not in inner_training_loop: + raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + pass + inner_training_loop = inner_training_loop.replace( + "is_sagemaker_mp_enabled()", + "False", + ) + exec(inner_training_loop, globals()) + Trainer._inner_training_loop = _fast_inner_training_loop + + # Save max_seq_length + model.max_seq_length = max_position_embeddings + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_position_embeddings + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_position_embeddings + + # Fix up config for transformers uploading PEFT + # Not necessary anymore since we require transformers>=4.37! + if False: + name = model.config._name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.config.update({"_name_or_path" : name}) + pass + pass + + # Log Unsloth version for future fastpaths for inference + model.config.update({"unsloth_version" : __version__}) + + # Add save modules + patch_saving_functions(model) + Trainer._inner_training_loop = _fast_inner_training_loop + + # Also fix torch_dtype + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "config"): + if internal_model.config.torch_dtype == "float32": + internal_model.config.torch_dtype = torch.float32 + elif internal_model.config.torch_dtype == "bfloat16": + internal_model.config.torch_dtype = torch.bfloat16 + elif internal_model.config.torch_dtype == "float16": + internal_model.config.torch_dtype = torch.float16 + pass + pass + + return model, tokenizer + pass + + + @staticmethod + def post_patch(model): + # Patch model + layers = model.model.layers + lm_head = model.get_output_embeddings().weight + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + pass + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass + + + @staticmethod + def get_peft_model( + model, + r = 16, + target_modules = "all-linear", + lora_alpha = 16, + lora_dropout = 0, + bias = "none", + layers_to_transform = None, + layers_pattern = None, + use_gradient_checkpointing = True, + random_state = 3407, + max_seq_length = 2048, # not used anymore + use_rslora = False, + modules_to_save = None, + init_lora_weights = True, + loftq_config = {}, + temporary_location = "_unsloth_temporary_saved_buffers", + **kwargs, + ): + transformers_set_seed(random_state) + + # Get LoRA + arguments = dict( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = lora_dropout, + bias = bias, + layers_to_transform = layers_to_transform, + init_lora_weights = init_lora_weights, + # loftq_config = loftq_config, + # use_rslora = use_rslora, + modules_to_save = modules_to_save, + **kwargs, + ) + + lora_config = LoraConfig(**arguments) + + model = _get_peft_model(model, lora_config) + + model = FastVisionModel.patch_peft_model(model, use_gradient_checkpointing) + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + + return model + pass + + + @staticmethod + def patch_peft_model( + model, + use_gradient_checkpointing = True, + ): + + model = prepare_model_for_kbit_training( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + use_reentrant = True, + ) + + # Fix up config for transformers uploading PEFT + for active_adapter in model.peft_config.keys(): + # Not necessary since we requires transformers >= 4.37 + if False: + name = model.peft_config[active_adapter].base_model_name_or_path + if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): + name = name[:len(name) - len("-bnb-4bit")] + model.peft_config[active_adapter].base_model_name_or_path = name + pass + # Add revision to enable future fast inference paths + # [TODO] Bugs out!see https://github.com/unslothai/unsloth/issues/492 + # model.peft_config[active_adapter].revision = f"unsloth" + pass + + from transformers.trainer import Trainer + if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": + raise RuntimeError( + 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ + 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ + 'We do have a separate beta version, which you can contact us about!\n'\ + 'Thank you for your understanding and we appreciate it immensely!' + ) + pass + + logger.warning_once( + f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ + f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", + ) + patch_saving_functions(model) + + # Patch cross entropy loss labels + # Fixes https://github.com/unslothai/unsloth/issues/10 + max_seq_length = model.max_seq_length + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + model.model.extra_ignored_labels = extra_ignored_labels + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + return model + pass + + + @staticmethod + def for_inference(model): + # if model.config.model_type == "qwen2": + # FastLlamaModel.for_training(model) + # return + # pass + + internal_model = model + internal_model.gradient_checkpointing = False + internal_model.training = False + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = False + internal_model.training = False + pass + if hasattr(internal_model, "training"): + internal_model.training = False + pass + + # Also check if lm_head / embeddings are trained + internal_model = model + while not hasattr(internal_model, "lm_head"): + internal_model = internal_model.model + pass + lm_head = internal_model.lm_head.weight + device_type = lm_head.device.type + dtype = model.config.torch_dtype + + if type(dtype) is str: + if dtype == "float16": dtype = torch.float16 + elif dtype == "bfloat16": dtype = torch.bfloat16 + pass + + # Also disable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = False + pass + + return model + pass + + + @staticmethod + def for_training(model, use_gradient_checkpointing = True): + internal_model = model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + + # Delete all fast inference loras + for param in model.parameters(): + if hasattr(param, "_fast_lora"): + del param._fast_lora + pass + + while hasattr(internal_model, "model"): + internal_model = internal_model.model + internal_model.gradient_checkpointing = use_gradient_checkpointing + internal_model.training = True + pass + if hasattr(internal_model, "training"): + internal_model.training = True + pass + + # Also re-enable training for embeddings for NEFTune + if hasattr(model, "get_input_embeddings"): + embeddings = model.get_input_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + if hasattr(model, "get_output_embeddings"): + embeddings = model.get_output_embeddings() + if hasattr(embeddings, "training"): embeddings.training = True + pass + + return model + pass +pass From 64473743d571a8cf3c084dd0c3cd0c086ba2e5e3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 12:18:43 -0700 Subject: [PATCH 0471/1088] Llama 3.2 --- unsloth/models/llama.py | 10 ++++++++-- unsloth/models/loader.py | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a245330108..aedc935d75 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -62,7 +62,10 @@ from ..save import patch_saving_functions import re, os, inspect, math, sys from huggingface_hub.utils._token import get_token - +from transformers import __version__ as transformers_version +from packaging.version import Version +transformers_version = Version(transformers_version) +SUPPORTS_LLAMA32 = transformers_version >= Version("4.46") def original_apply_qkv(self, X): Q = self.q_proj(X) @@ -1383,7 +1386,10 @@ def _fast_generate(*args, **kwargs): pass # For newer HF - kwargs["cache_implementation"] = "dynamic" + if SUPPORTS_LLAMA32: + kwargs["cache_implementation"] = "hybrid" + else: + kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep kwargs["num_logits_to_keep"] = 1 diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 13710eeda1..8afb3a3a82 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -31,6 +31,7 @@ SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") +SUPPORTS_LLAMA32 = transformers_version >= Version("4.46") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -137,6 +138,17 @@ def get_model_name(model_name, load_in_4bit = True): pass +LLAMA32_MODEL_NAMES = set(( + "unsloth/Llama-3.2-1B-bnb-4bit", + "unsloth/Llama-3.2-3B-bnb-4bit", + "unsloth/Llama-3.2-1B", + "unsloth/Llama-3.2-3B", + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", + "unsloth/Llama-3.2-3B-Instruct-bnb-4bit", + "unsloth/Llama-3.2-1B-Instruct", + "unsloth/Llama-3.2-3B-Instruct", +)) + class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( @@ -242,7 +254,17 @@ def from_pretrained( f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ f"to obtain the latest transformers build, then restart this session."\ ) + + elif model_name.lower() in LLAMA32_MODEL_NAMES and not SUPPORTS_LLAMA32: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.2.\n"\ + f"The minimum required version is 4.46\n"\ + f'Try `pip install --upgrade "transformers>=4.46"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastLlamaModel + elif model_type == "mistral": dispatch_model = FastMistralModel elif model_type == "gemma": if not SUPPORTS_GEMMA: From b4e4c874b4c54e6864aa6c730df4ff225db04d6e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 12:35:38 -0700 Subject: [PATCH 0472/1088] Fix version --- unsloth/models/llama.py | 2 +- unsloth/models/loader.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index aedc935d75..f5dc02704e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -65,7 +65,7 @@ from transformers import __version__ as transformers_version from packaging.version import Version transformers_version = Version(transformers_version) -SUPPORTS_LLAMA32 = transformers_version >= Version("4.46") +SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0") def original_apply_qkv(self, X): Q = self.q_proj(X) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 8afb3a3a82..ee1f680ace 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -31,7 +31,7 @@ SUPPORTS_GEMMA = transformers_version >= Version("4.38") SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") -SUPPORTS_LLAMA32 = transformers_version >= Version("4.46") +SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -264,7 +264,7 @@ def from_pretrained( ) dispatch_model = FastLlamaModel - + elif model_type == "mistral": dispatch_model = FastMistralModel elif model_type == "gemma": if not SUPPORTS_GEMMA: From 539adef0f8aaaed43aaafab5aa952aa933f70c0c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 12:42:24 -0700 Subject: [PATCH 0473/1088] Update llama.py --- unsloth/models/llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f5dc02704e..bae6d5b80d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1387,7 +1387,8 @@ def _fast_generate(*args, **kwargs): # For newer HF if SUPPORTS_LLAMA32: - kwargs["cache_implementation"] = "hybrid" + # kwargs["cache_implementation"] = "hybrid" + pass else: kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep From 1de64fcad2ab246bd9d047680fd554948459d869 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 13:19:01 -0700 Subject: [PATCH 0474/1088] Update _utils.py --- unsloth/models/_utils.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index af7e1eb293..09b448a2f6 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -141,24 +141,24 @@ def patch_mistral_nemo_config(config): # ============================================= # Fix KeyError: 'Cache only has 0 layers, attempted to access layer with index 0' -import transformers.cache_utils -if hasattr(transformers.cache_utils, "DynamicCache") and \ - transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": - - source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) - start = source.find("def") - spaces = start*" " - source = source.split("\n") - source = "\n".join(x[start:] for x in source) - where = source.find("raise KeyError") - source = source[:where] + \ - f"if len(self) == 0:\n{spaces}{spaces}"\ - " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ - f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] - source = source.replace("__getitem__", "__cache_utils_getitem__", 1) - exec(source) - transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ -pass +# import transformers.cache_utils +# if hasattr(transformers.cache_utils, "DynamicCache") and \ +# transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": + +# source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) +# start = source.find("def") +# spaces = start*" " +# source = source.split("\n") +# source = "\n".join(x[start:] for x in source) +# where = source.find("raise KeyError") +# source = source[:where] + \ +# f"if len(self) == 0:\n{spaces}{spaces}"\ +# " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ +# f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] +# source = source.replace("__getitem__", "__cache_utils_getitem__", 1) +# exec(source) +# transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ +# pass # ============================================= # ============================================= From 01c494281781524bdeccacb8950c39104377239c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 13:28:05 -0700 Subject: [PATCH 0475/1088] Update _utils.py --- unsloth/models/_utils.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 09b448a2f6..6144efe485 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -141,24 +141,24 @@ def patch_mistral_nemo_config(config): # ============================================= # Fix KeyError: 'Cache only has 0 layers, attempted to access layer with index 0' -# import transformers.cache_utils -# if hasattr(transformers.cache_utils, "DynamicCache") and \ -# transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": - -# source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) -# start = source.find("def") -# spaces = start*" " -# source = source.split("\n") -# source = "\n".join(x[start:] for x in source) -# where = source.find("raise KeyError") -# source = source[:where] + \ -# f"if len(self) == 0:\n{spaces}{spaces}"\ -# " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ -# f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] -# source = source.replace("__getitem__", "__cache_utils_getitem__", 1) -# exec(source) -# transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ -# pass +import transformers.cache_utils +if hasattr(transformers.cache_utils, "DynamicCache") and \ + transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": + + source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) + start = source.find("def") + spaces = start*" " + source = source.split("\n") + source = "\n".join(x[start:] for x in source) + where = source.find("raise KeyError") + # source = source[:where] + \ + # f"if len(self) == 0:\n{spaces}{spaces}"\ + # " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ + # f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] + source = source.replace("__getitem__", "__cache_utils_getitem__", 1) + exec(source) + transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ +pass # ============================================= # ============================================= From cbe9278191efaaded9294dc810c669a86d373fa4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 14:12:16 -0700 Subject: [PATCH 0476/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6144efe485..af7e1eb293 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -151,10 +151,10 @@ def patch_mistral_nemo_config(config): source = source.split("\n") source = "\n".join(x[start:] for x in source) where = source.find("raise KeyError") - # source = source[:where] + \ - # f"if len(self) == 0:\n{spaces}{spaces}"\ - # " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ - # f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] + source = source[:where] + \ + f"if len(self) == 0:\n{spaces}{spaces}"\ + " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ + f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] source = source.replace("__getitem__", "__cache_utils_getitem__", 1) exec(source) transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ From f9181beefbcbb26381dd331a5d469085f237ca67 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 14:32:29 -0700 Subject: [PATCH 0477/1088] Update llama.py --- unsloth/models/llama.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bae6d5b80d..2524ec1312 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1386,11 +1386,7 @@ def _fast_generate(*args, **kwargs): pass # For newer HF - if SUPPORTS_LLAMA32: - # kwargs["cache_implementation"] = "hybrid" - pass - else: - kwargs["cache_implementation"] = "dynamic" + kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep kwargs["num_logits_to_keep"] = 1 From 3a10b06bd6e85b06c8f5b7e809dfee3ac698f6c5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 17:44:13 -0700 Subject: [PATCH 0478/1088] Update vision.py --- unsloth/models/vision.py | 62 +++++++--------------------------------- 1 file changed, 10 insertions(+), 52 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 988fda4938..0b8c08a371 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -18,8 +18,14 @@ from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm from ._utils import patch_gradient_checkpointing -from transformers import AutoProcessor, AutoModelForVision2Seq - +from transformers import AutoProcessor +try: + from transformers import MllamaForConditionalGeneration +except: + raise ImportError( + "Unsloth: Please update your transformers version to 4.46.0 for Llama 3.2 support!" + ) +pass class FastVisionModel: @@ -56,7 +62,6 @@ def from_pretrained( ) pass if token is None: token = get_token() - if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = is_bfloat16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) @@ -87,48 +92,6 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - # RoPE Scaling - model_config = AutoConfig.from_pretrained(model_name, token = token) - model_max_seq_length = model_config.max_position_embeddings - - # Check if RoPE Scaling is even allowed - model_function = MODEL_FOR_CAUSAL_LM_MAPPING[model_config.__class__] - has_rope_scaling = False - try: - with open(inspect.getfile(model_function), "r") as file: - has_rope_scaling = "self.config.rope_scaling" in file.read() - except: pass - has_rope_scaling = True - - # If max_seq_length is not specified, use maximum fron config - if max_seq_length is None: - max_seq_length = model_max_seq_length - pass - - if (rope_scaling is None) and (max_seq_length > model_max_seq_length): - - rope_scaling = max_seq_length / model_max_seq_length - - logger.warning_once( - f"Unsloth: {model_name} can only handle sequence lengths of at most "\ - f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ - f"{round(rope_scaling, 3)}, it can be magically be extended to "\ - f"{max_seq_length}!" - ) - - # Warn RoPE scaling isn't allowed - if not has_rope_scaling: - raise RuntimeError( - "However, {model_name} doesn't support RoPE Scaling!\n"\ - "Please file a feature request at https://github.com/unslothai/unsloth." - ) - pass - - rope_scaling = {"type": "linear", "factor": rope_scaling,} - - # Add to kwargs - kwargs["rope_scaling"] = rope_scaling - pass # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! pre_check = check_nvidia() @@ -142,16 +105,11 @@ def from_pretrained( ) pass - # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 - # RoPE Scaling's max_position_embeddings must be updated - max_position_embeddings = max(max_seq_length, model_max_seq_length) - kwargs.pop("attn_implementation", None); # No need since we auto call it - # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config self.pre_patch() - model = AutoModelForVision2Seq.from_pretrained( + model = MllamaForConditionalGeneration.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, @@ -159,7 +117,7 @@ def from_pretrained( token = token, max_position_embeddings = max_position_embeddings, trust_remote_code = trust_remote_code, - attn_implementation = "eager", + attn_implementation = "sdpa", **kwargs, ) self.post_unpatch() From 990ef6e2b7cd7a11558efc17b7af4b77dd4fde92 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 17:46:29 -0700 Subject: [PATCH 0479/1088] Update llama.py --- unsloth/models/llama.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2524ec1312..b7dc68586c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -843,6 +843,9 @@ def custom_forward(*inputs): pass +global past_key_values_all +past_key_values_all = None + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 def LlamaModel_fast_forward_inference( self, @@ -855,6 +858,8 @@ def LlamaModel_fast_forward_inference( hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) bsz, q_len, hd = hidden_states.shape + global past_key_values_all + past_key_values_all = past_key_values seq_len = past_key_values[0][0].shape[-2] if bsz != 1: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( From 7fac7e288d5ff85b4d2427f8882a7e4deaeacdff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 17:51:36 -0700 Subject: [PATCH 0480/1088] Update llama.py --- unsloth/models/llama.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b7dc68586c..07bb5505ef 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -843,9 +843,6 @@ def custom_forward(*inputs): pass -global past_key_values_all -past_key_values_all = None - # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 def LlamaModel_fast_forward_inference( self, @@ -858,8 +855,8 @@ def LlamaModel_fast_forward_inference( hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) bsz, q_len, hd = hidden_states.shape - global past_key_values_all - past_key_values_all = past_key_values + import os + os.environ["past_key_values_all"] = past_key_values seq_len = past_key_values[0][0].shape[-2] if bsz != 1: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( From 38581963edc63241a5a529d9d08aa542222fbd90 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 17:55:42 -0700 Subject: [PATCH 0481/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 07bb5505ef..c187449902 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1388,7 +1388,7 @@ def _fast_generate(*args, **kwargs): pass # For newer HF - kwargs["cache_implementation"] = "dynamic" + # kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep kwargs["num_logits_to_keep"] = 1 From dc5afb068ffbc4fa3ad39f1b190985a69d01359c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 18:07:21 -0700 Subject: [PATCH 0482/1088] Update llama.py --- unsloth/models/llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c187449902..48d1f7ec6e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1409,6 +1409,8 @@ def _fast_generate(*args, **kwargs): # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): + print(args) + print(kwargs) output = generate(*args, **kwargs) pass From bd85f8f0740862a13b7b273195470bfb7dc8237d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 18:10:46 -0700 Subject: [PATCH 0483/1088] Update llama.py --- unsloth/models/llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 48d1f7ec6e..a31a485676 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1372,6 +1372,8 @@ def _wrap_fast_inference(generate, device_type, dtype, model): # Wraps inference with bfloat16 / float16 @torch.inference_mode def _fast_generate(*args, **kwargs): + print(args) + print(kwargs) # Set a flag for generation! internal_model = model From c4efa618809d96a0d8243e3a04a1a7898f980ad1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 18:24:30 -0700 Subject: [PATCH 0484/1088] Update llama.py --- unsloth/models/llama.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a31a485676..dd6d805ec4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1384,10 +1384,10 @@ def _fast_generate(*args, **kwargs): internal_model._flag_for_generation = True # Must patch accelerate for Xformers - if accelerate_new_send_to_device is not None: - import accelerate.utils.operations - accelerate.utils.operations.send_to_device = accelerate_new_send_to_device - pass + # if accelerate_new_send_to_device is not None: + # import accelerate.utils.operations + # accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + # pass # For newer HF # kwargs["cache_implementation"] = "dynamic" @@ -1411,8 +1411,6 @@ def _fast_generate(*args, **kwargs): # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): - print(args) - print(kwargs) output = generate(*args, **kwargs) pass From 7086c20ca5041bb834da4290913c60b2166f94bd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 19:15:40 -0700 Subject: [PATCH 0485/1088] Update llama.py --- unsloth/models/llama.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index dd6d805ec4..24f9942119 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1392,17 +1392,17 @@ def _fast_generate(*args, **kwargs): # For newer HF # kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep - kwargs["num_logits_to_keep"] = 1 + # kwargs["num_logits_to_keep"] = 1 - # Remove token_type_ids - kwargs.pop("token_type_ids", None) + # # Remove token_type_ids + # kwargs.pop("token_type_ids", None) - # Check pad_token - model_eos_token_id = getattr(model.config, "eos_token_id", None) - if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): - model_eos_token_id = model_eos_token_id[0] + # # Check pad_token + # model_eos_token_id = getattr(model.config, "eos_token_id", None) + # if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + # model_eos_token_id = model_eos_token_id[0] - kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + # kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) # Set pad token # old_pad_token_id = getattr(model.config, "pad_token_id", None) From a957a7583e4cb818fd22c11d6dd406fb84327b01 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 19:38:01 -0700 Subject: [PATCH 0486/1088] Update llama.py --- unsloth/models/llama.py | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 24f9942119..f5dc02704e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -855,8 +855,6 @@ def LlamaModel_fast_forward_inference( hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) bsz, q_len, hd = hidden_states.shape - import os - os.environ["past_key_values_all"] = past_key_values seq_len = past_key_values[0][0].shape[-2] if bsz != 1: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( @@ -1372,8 +1370,6 @@ def _wrap_fast_inference(generate, device_type, dtype, model): # Wraps inference with bfloat16 / float16 @torch.inference_mode def _fast_generate(*args, **kwargs): - print(args) - print(kwargs) # Set a flag for generation! internal_model = model @@ -1384,25 +1380,28 @@ def _fast_generate(*args, **kwargs): internal_model._flag_for_generation = True # Must patch accelerate for Xformers - # if accelerate_new_send_to_device is not None: - # import accelerate.utils.operations - # accelerate.utils.operations.send_to_device = accelerate_new_send_to_device - # pass + if accelerate_new_send_to_device is not None: + import accelerate.utils.operations + accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + pass # For newer HF - # kwargs["cache_implementation"] = "dynamic" + if SUPPORTS_LLAMA32: + kwargs["cache_implementation"] = "hybrid" + else: + kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep - # kwargs["num_logits_to_keep"] = 1 + kwargs["num_logits_to_keep"] = 1 - # # Remove token_type_ids - # kwargs.pop("token_type_ids", None) + # Remove token_type_ids + kwargs.pop("token_type_ids", None) - # # Check pad_token - # model_eos_token_id = getattr(model.config, "eos_token_id", None) - # if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): - # model_eos_token_id = model_eos_token_id[0] + # Check pad_token + model_eos_token_id = getattr(model.config, "eos_token_id", None) + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] - # kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) # Set pad token # old_pad_token_id = getattr(model.config, "pad_token_id", None) From 330a7f4682daf473304bc565a76784f0adcca56e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 22:12:21 -0700 Subject: [PATCH 0487/1088] Update llama.py --- unsloth/models/llama.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bae6d5b80d..2524ec1312 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1386,11 +1386,7 @@ def _fast_generate(*args, **kwargs): pass # For newer HF - if SUPPORTS_LLAMA32: - # kwargs["cache_implementation"] = "hybrid" - pass - else: - kwargs["cache_implementation"] = "dynamic" + kwargs["cache_implementation"] = "dynamic" # For num_logits_to_keep kwargs["num_logits_to_keep"] = 1 From f00eaf1b09620528e362eb3a828699d0c2a65fc4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 22:56:41 -0700 Subject: [PATCH 0488/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index af7e1eb293..309c0d913e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -306,6 +306,14 @@ def patch_mistral_nemo_config(config): accelerate_new_send_to_device = _fixed_send_to_device pass pass + +# Transformers 4.46 breaks dynamic caching. This is a hack +import transformers.generation.configuration_utils +if hasattr(transformers.generation.configuration_utils, "ALL_CACHE_IMPLEMENTATIONS"): + if type(transformers.generation.configuration_utils.ALL_CACHE_IMPLEMENTATIONS) is list: + transformers.generation.configuration_utils.ALL_CACHE_IMPLEMENTATIONS.append("dynamic") + pass +pass # ============================================= # ============================================= From 1c3124fc10f1d8a27f6806635908f749343f93e9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 23:00:09 -0700 Subject: [PATCH 0489/1088] Remove version checks --- unsloth/models/llama.py | 5 +---- unsloth/models/loader.py | 19 ------------------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2524ec1312..a245330108 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -62,10 +62,7 @@ from ..save import patch_saving_functions import re, os, inspect, math, sys from huggingface_hub.utils._token import get_token -from transformers import __version__ as transformers_version -from packaging.version import Version -transformers_version = Version(transformers_version) -SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0") + def original_apply_qkv(self, X): Q = self.q_proj(X) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index ee1f680ace..cb03088b64 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -138,17 +138,6 @@ def get_model_name(model_name, load_in_4bit = True): pass -LLAMA32_MODEL_NAMES = set(( - "unsloth/Llama-3.2-1B-bnb-4bit", - "unsloth/Llama-3.2-3B-bnb-4bit", - "unsloth/Llama-3.2-1B", - "unsloth/Llama-3.2-3B", - "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", - "unsloth/Llama-3.2-3B-Instruct-bnb-4bit", - "unsloth/Llama-3.2-1B-Instruct", - "unsloth/Llama-3.2-3B-Instruct", -)) - class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( @@ -255,14 +244,6 @@ def from_pretrained( f"to obtain the latest transformers build, then restart this session."\ ) - elif model_name.lower() in LLAMA32_MODEL_NAMES and not SUPPORTS_LLAMA32: - raise ImportError( - f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.2.\n"\ - f"The minimum required version is 4.46\n"\ - f'Try `pip install --upgrade "transformers>=4.46"`\n'\ - f"to obtain the latest transformers build, then restart this session."\ - ) - dispatch_model = FastLlamaModel elif model_type == "mistral": dispatch_model = FastMistralModel From a1ca724f5d362c7d059a650921e152b20ed3fb09 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 23:13:49 -0700 Subject: [PATCH 0490/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b61908a690..6cec9627ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.43.2", + "transformers>=4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From 2c5a3995c8b170685383bc8673924557ee621afb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Sep 2024 23:47:15 -0700 Subject: [PATCH 0491/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6cec9627ba..26761ebc58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -212,7 +212,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.43.2", + "transformers>=4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From b0f3dc087a5ba216439667f8177772eb6bb75431 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Sep 2024 00:02:15 -0700 Subject: [PATCH 0492/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f8944c5a77..0b6ebb9f89 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | - **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- Run [Llama 3.2 1B 3B notebook](https://colab.research.google.com/drive/1hoHFpf7ROqk_oZHzxQdfPW9yvTxnvItq?usp=sharing) and [Llama 3.2 conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) - Run [Llama 3.1 conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language From 2462e5c51b2cf954eee340c75d30be6e0812304c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Sep 2024 00:05:38 -0700 Subject: [PATCH 0493/1088] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0b6ebb9f89..12fd793e5a 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3.1, Mistral, Phi-3.5 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.2, Mistral, Phi-3.5 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -39,9 +39,9 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.2 Conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! -- 📣 NEW! [Llama 3.1 Conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported - 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported From 7c8f227681180bd4f57bd02f44e9d4636a38ee3f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Sep 2024 00:12:42 -0700 Subject: [PATCH 0494/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 12fd793e5a..2b8e79f021 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News - 📣 NEW! [Llama 3.2 Conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! +- 📣 NEW! [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook) and [Llama 3.2 Kaggle conversational notebook](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-2-1b-3b-conversational-unsloth/notebook) - 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported From 3dff3b38687c92cfbe80a62324eadccb4672206e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Sep 2024 01:23:40 -0700 Subject: [PATCH 0495/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 477d993e69..04690d3566 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1180,11 +1180,6 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "if self._inner_training_loop.__name__ != '_fast_inner_training_loop':\n"\ - " raise RuntimeError(\n"\ - " 'Please do not edit specific areas of the Unsloth codebase or you will get CUDA segfaults.'\n"\ - " )\n"\ - "pass\n"\ "import subprocess, re, gc, numpy as np\n"\ "a = np.array([0,])\n"\ "try:\n"\ From c3f4e9a87d964ecee1efd9963f497119edbefaab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 27 Sep 2024 01:36:45 -0700 Subject: [PATCH 0496/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 26761ebc58..59ef1b8aab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.45.0", + "transformers>=4.45.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -212,7 +212,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.45.0", + "transformers>=4.45.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From a0acecb50f39d9b62a144684be9ed9e3c3755a1f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Sep 2024 01:42:58 -0700 Subject: [PATCH 0497/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index cb03088b64..0ac9b02743 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -192,7 +192,7 @@ def from_pretrained( pass # Cannot be both! - if is_model and is_peft: + if (is_model and is_peft) and not SUPPORTS_LLAMA32: raise RuntimeError( "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ "You have 2 files `config.json` and `adapter_config.json`.\n"\ From c8cd804b707bbe41ca2be0499cf8ce75cfa05158 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Sep 2024 23:13:44 -0700 Subject: [PATCH 0498/1088] Update loader.py --- unsloth/models/loader.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0ac9b02743..2a89c06c21 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -23,6 +23,7 @@ from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit import os from huggingface_hub.utils._token import get_token +from huggingface_hub import HfFileSystem # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! from packaging.version import Version @@ -191,14 +192,28 @@ def from_pretrained( is_peft = False pass - # Cannot be both! - if (is_model and is_peft) and not SUPPORTS_LLAMA32: + # Both config.json and adapter_config.json should not exist! + + # Old transformers versions check + both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + + if SUPPORTS_LLAMA32: + # New transformers need to check manually. + files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + if sum(x.endswith(("adapter_config.json", "config.json")) for x in files) >= 2: + both_exist = True + pass + pass + + # Error out if both LoRA and normal model config exists. + if both_exist: raise RuntimeError( "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ "You have 2 files `config.json` and `adapter_config.json`.\n"\ "We must only allow one config file.\n"\ "Please separate the LoRA and base models to 2 repos." ) + elif not is_model and not is_peft: error = autoconfig_error or peft_error # Old transformers version From d96afb3b385d781d70e5f03c4aaa565a3f5dbd10 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Sep 2024 23:15:22 -0700 Subject: [PATCH 0499/1088] Update loader.py --- unsloth/models/loader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 2a89c06c21..9ea6c308e3 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -197,12 +197,14 @@ def from_pretrained( # Old transformers versions check both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + print(both_exist) if SUPPORTS_LLAMA32: # New transformers need to check manually. files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) if sum(x.endswith(("adapter_config.json", "config.json")) for x in files) >= 2: both_exist = True pass + print(both_exist) pass # Error out if both LoRA and normal model config exists. From e7b0c9e5001d812f800542f600cbd23c4b5c7d95 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Sep 2024 23:22:05 -0700 Subject: [PATCH 0500/1088] Update loader.py --- unsloth/models/loader.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9ea6c308e3..61e8132731 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -196,15 +196,14 @@ def from_pretrained( # Old transformers versions check both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 - - print(both_exist) + if SUPPORTS_LLAMA32: # New transformers need to check manually. files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) - if sum(x.endswith(("adapter_config.json", "config.json")) for x in files) >= 2: + files = (os.path.split(x)[-1] for x in files) + if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: both_exist = True pass - print(both_exist) pass # Error out if both LoRA and normal model config exists. From 96e90c5578fc1551fcbe36aa1bdc642924863105 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 30 Sep 2024 02:09:15 -0700 Subject: [PATCH 0501/1088] Dependencies --- pyproject.toml | 8 ++++---- unsloth/tokenizer_utils.py | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 59ef1b8aab..1e8fd7e236 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,8 +42,8 @@ huggingface = [ "psutil", "wheel>=0.42.0", "numpy", - "accelerate>=0.26.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "accelerate>=0.34.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -224,8 +224,8 @@ colab-new = [ "hf_transfer", ] colab-no-deps = [ - "accelerate>=0.26.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "accelerate>=0.34.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", "peft>=0.7.1", "xformers<0.0.27", "bitsandbytes>=0.43.3", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 04690d3566..cdce372b50 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1135,7 +1135,39 @@ def check_nvidia(): import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * from transformers.trainer import * -from trl.trainer.sft_trainer import neftune_post_forward_hook +try: + from trl.trainer.sft_trainer import neftune_post_forward_hook +except: + def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for + torch.nn.Embedding layers. This method is slightly adapted from the original source code + that can be found here: https://github.com/neelsjain/NEFTune + + Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set + `module.neftune_noise_alpha` to the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + pass +pass + def patch_sft_trainer_tokenizer(): """ From a9533a0eb64a4457206fcea1dfc7d59dfc649bda Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 30 Sep 2024 02:48:31 -0700 Subject: [PATCH 0502/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1e8fd7e236..9499d771c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.45.1", + "transformers<4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -212,7 +212,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.45.1", + "transformers<4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From 5e02fb73d17f93df82254ccefb17f4723eeee76e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 30 Sep 2024 02:51:32 -0700 Subject: [PATCH 0503/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 309c0d913e..b14bb39144 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post3" +__version__ = "2024.9.post4" __all__ = [ "prepare_model_for_kbit_training", From 597e1894f60b9157490d99dc140714899bcc0d14 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 30 Sep 2024 03:03:01 -0700 Subject: [PATCH 0504/1088] Fix merges (#1079) * Layernorm * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Update layernorm.py * Patch layernorm * Update layernorm.py * RMS Layernorm * Update rms_layernorm.py * Causal LM * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update layernorm.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Llama 3.2 * Update _utils.py * Update _utils.py * Update _utils.py * Update llama.py * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update loader.py * Update loader.py * Update loader.py * Dependencies * Update pyproject.toml * Update _utils.py --- pyproject.toml | 12 ++++---- unsloth/models/_utils.py | 2 +- unsloth/models/loader.py | 20 ++++++++++-- unsloth/models/vision.py | 62 ++++++-------------------------------- unsloth/tokenizer_utils.py | 34 ++++++++++++++++++++- 5 files changed, 68 insertions(+), 62 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 59ef1b8aab..9499d771c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,15 +35,15 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers>=4.45.1", + "transformers<4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", "psutil", "wheel>=0.42.0", "numpy", - "accelerate>=0.26.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "accelerate>=0.34.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -212,7 +212,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers>=4.45.1", + "transformers<4.45.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -224,8 +224,8 @@ colab-new = [ "hf_transfer", ] colab-no-deps = [ - "accelerate>=0.26.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "accelerate>=0.34.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", "peft>=0.7.1", "xformers<0.0.27", "bitsandbytes>=0.43.3", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 309c0d913e..b14bb39144 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post3" +__version__ = "2024.9.post4" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0ac9b02743..61e8132731 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -23,6 +23,7 @@ from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit import os from huggingface_hub.utils._token import get_token +from huggingface_hub import HfFileSystem # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! from packaging.version import Version @@ -191,14 +192,29 @@ def from_pretrained( is_peft = False pass - # Cannot be both! - if (is_model and is_peft) and not SUPPORTS_LLAMA32: + # Both config.json and adapter_config.json should not exist! + + # Old transformers versions check + both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + + if SUPPORTS_LLAMA32: + # New transformers need to check manually. + files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + files = (os.path.split(x)[-1] for x in files) + if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: + both_exist = True + pass + pass + + # Error out if both LoRA and normal model config exists. + if both_exist: raise RuntimeError( "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ "You have 2 files `config.json` and `adapter_config.json`.\n"\ "We must only allow one config file.\n"\ "Please separate the LoRA and base models to 2 repos." ) + elif not is_model and not is_peft: error = autoconfig_error or peft_error # Old transformers version diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 988fda4938..0b8c08a371 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -18,8 +18,14 @@ from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm from ._utils import patch_gradient_checkpointing -from transformers import AutoProcessor, AutoModelForVision2Seq - +from transformers import AutoProcessor +try: + from transformers import MllamaForConditionalGeneration +except: + raise ImportError( + "Unsloth: Please update your transformers version to 4.46.0 for Llama 3.2 support!" + ) +pass class FastVisionModel: @@ -56,7 +62,6 @@ def from_pretrained( ) pass if token is None: token = get_token() - if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = is_bfloat16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) @@ -87,48 +92,6 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - # RoPE Scaling - model_config = AutoConfig.from_pretrained(model_name, token = token) - model_max_seq_length = model_config.max_position_embeddings - - # Check if RoPE Scaling is even allowed - model_function = MODEL_FOR_CAUSAL_LM_MAPPING[model_config.__class__] - has_rope_scaling = False - try: - with open(inspect.getfile(model_function), "r") as file: - has_rope_scaling = "self.config.rope_scaling" in file.read() - except: pass - has_rope_scaling = True - - # If max_seq_length is not specified, use maximum fron config - if max_seq_length is None: - max_seq_length = model_max_seq_length - pass - - if (rope_scaling is None) and (max_seq_length > model_max_seq_length): - - rope_scaling = max_seq_length / model_max_seq_length - - logger.warning_once( - f"Unsloth: {model_name} can only handle sequence lengths of at most "\ - f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ - f"{round(rope_scaling, 3)}, it can be magically be extended to "\ - f"{max_seq_length}!" - ) - - # Warn RoPE scaling isn't allowed - if not has_rope_scaling: - raise RuntimeError( - "However, {model_name} doesn't support RoPE Scaling!\n"\ - "Please file a feature request at https://github.com/unslothai/unsloth." - ) - pass - - rope_scaling = {"type": "linear", "factor": rope_scaling,} - - # Add to kwargs - kwargs["rope_scaling"] = rope_scaling - pass # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! pre_check = check_nvidia() @@ -142,16 +105,11 @@ def from_pretrained( ) pass - # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 - # RoPE Scaling's max_position_embeddings must be updated - max_position_embeddings = max(max_seq_length, model_max_seq_length) - kwargs.pop("attn_implementation", None); # No need since we auto call it - # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config self.pre_patch() - model = AutoModelForVision2Seq.from_pretrained( + model = MllamaForConditionalGeneration.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, @@ -159,7 +117,7 @@ def from_pretrained( token = token, max_position_embeddings = max_position_embeddings, trust_remote_code = trust_remote_code, - attn_implementation = "eager", + attn_implementation = "sdpa", **kwargs, ) self.post_unpatch() diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 04690d3566..cdce372b50 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1135,7 +1135,39 @@ def check_nvidia(): import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * from transformers.trainer import * -from trl.trainer.sft_trainer import neftune_post_forward_hook +try: + from trl.trainer.sft_trainer import neftune_post_forward_hook +except: + def neftune_post_forward_hook(module, input, output): + """ + Implements the NEFTune forward pass for the model using forward hooks. Note this works only for + torch.nn.Embedding layers. This method is slightly adapted from the original source code + that can be found here: https://github.com/neelsjain/NEFTune + + Simply add it to your model as follows: + ```python + model = ... + model.embed_tokens.neftune_noise_alpha = 0.1 + model.embed_tokens.register_forward_hook(neftune_post_forward_hook) + ``` + + Args: + module (`torch.nn.Module`): + The embedding module where the hook is attached. Note that you need to set + `module.neftune_noise_alpha` to the desired noise alpha value. + input (`torch.Tensor`): + The input tensor to the model. + output (`torch.Tensor`): + The output tensor of the model (i.e. the embeddings). + """ + if module.training: + dims = torch.tensor(output.size(1) * output.size(2)) + mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) + output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) + return output + pass +pass + def patch_sft_trainer_tokenizer(): """ From 0376b81364ad036d8fc2236c9e9fc13c9b1afe3d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 30 Sep 2024 23:08:46 -0700 Subject: [PATCH 0505/1088] Update chat_templates.py --- unsloth/chat_templates.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 937cbd06ea..9b5f9ff28c 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1606,6 +1606,9 @@ def construct_chat_template( \ ollama_eos = get_ollama_eos_tokens(tokenizer, extra_eos_tokens) ollama_eos = '\n'.join(f'PARAMETER stop "{eos}"' for eos in ollama_eos) + # Add temperature and min_p to counteract gibberish + ollama_eos += "\nPARAMETER temperature 1.5\nPARAMETER min_p 0.1" + # Ollama modelfile part = '"""' modelfile = 'FROM {__FILE_LOCATION__}\n\n'\ From 2efb9e151b15af7ed804248fd585d232f914cb76 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 1 Oct 2024 00:14:52 -0700 Subject: [PATCH 0506/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index cdce372b50..df36552b4b 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1116,6 +1116,40 @@ def add_new_tokens( pass +@torch.inference_mode +def fix_zero_training_loss(model, tokenizer, train_dataset): + """ + Sometimes the labels get masked by all -100s, causing the loss + to be 0. We check for this! + """ + if len(train_dataset) == 0: return + + row = train_dataset[0] + if type(row) is dict and "labels" in row: + + # Check the first 100 rows + seen_bad = 0 + seen_good = 0 + for i, row in enumerate(train_dataset): + try: check_tokens = list(set(row["labels"])) + except: continue + if len(check_tokens) == 1 and check_tokens[0] == -100: seen_bad += 1 + else: seen_good += 1 + if i >= 100: break + pass + + # Check ratio + if seen_bad / (seen_bad + seen_good) >= 0.9: + logger.warning( + "Unsloth: Most labels in your dataset are -100. Training losses will be 0.\n"\ + "Are you usre you used `train_on_responses_only` correctly?\n"\ + "Or did you mask our tokens incorrectly? Maybe this is intended?" + ) + pass + pass +pass + + def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! output = np.array([0,]) @@ -1228,7 +1262,8 @@ def patch_sft_trainer_tokenizer(): " torch.cuda.empty_cache()\n"\ "pass\n"\ "\n"\ - "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n" + "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n"\ + "fix_zero_training_loss(self.model, self.tokenizer, self.train_dataset)\n\n" # Add NEFTune since it doesn't seem to work?? We need to manually inject it check_text += \ From 75d1006ad81c16b57188665b6baa80b7d0e30d32 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 1 Oct 2024 00:20:01 -0700 Subject: [PATCH 0507/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index df36552b4b..5efe610a10 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -42,15 +42,13 @@ IGNORED_TOKENIZER_NAMES = [ - # "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", - # "unsloth/Mistral-Nemo-Instruct-2407", - # "mistralai/Mistral-Nemo-Instruct-2407", - # "unsloth/Mistral-Nemo-Base-2407-bnb-4bit", - # "unsloth/Mistral-Nemo-Base-2407", - # "mistralai/Mistral-Nemo-Base-2407", + # Qwen Coder did not train on tool calling. Math did! + "unsloth/Qwen2.5-Coder-1.5B-Instruct", + "unsloth/Qwen2.5-Coder-7B-Instruct", ] IGNORED_TOKENIZER_NAMES = frozenset( - [x.lower() for x in IGNORED_TOKENIZER_NAMES] + [x.lower() for x in IGNORED_TOKENIZER_NAMES] + \ + [x.lower()+"-bnb-4bit" for x in IGNORED_TOKENIZER_NAMES] ) # Check environments From 2cab804e55a734ef4efb02a55914f9ec0bf300c7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 1 Oct 2024 00:35:54 -0700 Subject: [PATCH 0508/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 5efe610a10..196e496188 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1140,7 +1140,7 @@ def fix_zero_training_loss(model, tokenizer, train_dataset): if seen_bad / (seen_bad + seen_good) >= 0.9: logger.warning( "Unsloth: Most labels in your dataset are -100. Training losses will be 0.\n"\ - "Are you usre you used `train_on_responses_only` correctly?\n"\ + "For example, are you sure you used `train_on_responses_only` correctly?\n"\ "Or did you mask our tokens incorrectly? Maybe this is intended?" ) pass From 39bff2f034c5fb6c21a1ed6004147215c676a6aa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 1 Oct 2024 00:40:17 -0700 Subject: [PATCH 0509/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2b8e79f021..51342fdf27 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| +| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) | 2x faster | 60% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | | **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | From 79a2112ca4a775ce0b3cb75f5074136cb54ea6df Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 5 Oct 2024 17:21:48 -0700 Subject: [PATCH 0510/1088] Reload --- pyproject.toml | 4 ++-- unsloth/models/loader.py | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9499d771c8..b61437e321 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ exclude = ["images*"] huggingface = [ "packaging", "tyro", - "transformers<4.45.0", + "transformers>=4.44.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -212,7 +212,7 @@ colab-ampere-torch220 = [ colab-new = [ "packaging", "tyro", - "transformers<4.45.0", + "transformers>=4.44.2", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 61e8132731..5774c2242e 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -197,12 +197,19 @@ def from_pretrained( # Old transformers versions check both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + # New transformers need to check manually. if SUPPORTS_LLAMA32: - # New transformers need to check manually. - files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) - files = (os.path.split(x)[-1] for x in files) - if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: - both_exist = True + # Check if folder exists locally + if os.path.isdir(model_name): + exist_adapter_config = os.path.exists(os.path.join(model_name, "adapter_config.json")) + exist_config = os.path.exists(os.path.join(model_name, "config.json")) + both_exist = exist_adapter_config and exist_config + else: + files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + files = (os.path.split(x)[-1] for x in files) + if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: + both_exist = True + pass pass pass From 6d4fa754d93ea2bdabed2559b4ec2f90b0e49bb4 Mon Sep 17 00:00:00 2001 From: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> Date: Fri, 11 Oct 2024 08:20:34 +0200 Subject: [PATCH 0511/1088] Handle absolute paths using pathlib (#1120) --- unsloth/save.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index b875e9b2d1..3ba62b4688 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -30,6 +30,7 @@ from .tokenizer_utils import fix_sentencepiece_gguf from huggingface_hub import HfApi from huggingface_hub.utils._token import get_token +from pathlib import Path __all__ = [ "print_quantization_methods", @@ -1058,9 +1059,9 @@ def save_to_gguf( if n_cpus is None: n_cpus = 1 n_cpus *= 2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model - - final_location = f"./{model_directory}/unsloth.{first_conversion.upper()}.gguf" + final_location = (Path(model_directory) / f"unsloth.{first_conversion.upper()}.gguf").absolute() + print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ "This will take 3 minutes...") @@ -1128,7 +1129,7 @@ def save_to_gguf( for quant_method in quantization_method: if quant_method != first_conversion: print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") - final_location = f"./{model_directory}/unsloth.{quant_method.upper()}.gguf" + final_location = (Path(model_directory) / f"unsloth.{quant_method.upper()}.gguf").absolute() command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" From 13d434c20828885514da536237d8d1415e0a204e Mon Sep 17 00:00:00 2001 From: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> Date: Fri, 11 Oct 2024 08:21:27 +0200 Subject: [PATCH 0512/1088] Only remove folder in sentenpiece check if it was created (#1121) --- unsloth/save.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 3ba62b4688..3e82a5bcdd 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -101,14 +101,17 @@ def check_if_sentencepiece_model(model, temporary_location = "_unsloth_sentencep temp_tokenizer = model._saved_temp_tokenizer sentencepiece_model = False file_location = os.path.join(temporary_location, temp_tokenizer.name_or_path) + created_folder = False if not os.path.exists(file_location): + created_folder = True os.makedirs(file_location) pass temp_tokenizer.save_pretrained(file_location) if os.path.isfile(f"{file_location}/tokenizer.model"): sentencepiece_model = True pass - shutil.rmtree(file_location, ignore_errors = True) + if created_folder: + shutil.rmtree(file_location, ignore_errors = True) return sentencepiece_model pass From f397ad146b8a53e363c0292fb9a8aadc1c7a5b60 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 10 Oct 2024 23:22:17 -0700 Subject: [PATCH 0513/1088] Update save.py --- unsloth/save.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 3e82a5bcdd..7f523faadc 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1063,7 +1063,7 @@ def save_to_gguf( n_cpus *= 2 # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model - final_location = (Path(model_directory) / f"unsloth.{first_conversion.upper()}.gguf").absolute() + final_location = str((Path(model_directory) / f"unsloth.{first_conversion.upper()}.gguf").absolute()) print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ @@ -1132,7 +1132,7 @@ def save_to_gguf( for quant_method in quantization_method: if quant_method != first_conversion: print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") - final_location = (Path(model_directory) / f"unsloth.{quant_method.upper()}.gguf").absolute() + final_location = str((Path(model_directory) / f"unsloth.{quant_method.upper()}.gguf").absolute()) command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" From a2f4c9793ecf829ede2cb64f2ca7a909ce3b0884 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 11 Oct 2024 00:00:06 -0700 Subject: [PATCH 0514/1088] Update save.py --- unsloth/save.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 7f523faadc..dce30c4c71 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1485,9 +1485,23 @@ def create_ollama_modelfile(tokenizer, gguf_location): modelfile = getattr(tokenizer, "_ollama_modelfile", None) if modelfile is None: return None + FILE_LOCATION_REPLACER = "⚫@✅#🦥__FILE_LOCATION__⚡@🦥#⛵" + EOS_TOKEN_REPLACER = "⚫@✅#🦥__EOS_TOKEN__⚡@🦥#⛵" + LEFT_BRACKET_REPLACER = "⚫@✅#🦥" + RIGHT_BRACKET_REPLACER = "⚡@🦥#⛵" + + # Fixes https://github.com/unslothai/unsloth/issues/1087 + # We must convert all {'s and }'s but keep {__FILE_LOCATION__} intact + modelfile = modelfile\ + .replace("{__FILE_LOCATION__}", FILE_LOCATION_REPLACER)\ + .replace("{__EOS_TOKEN__}", EOS_TOKEN_REPLACER)\ + .replace("{", LEFT_BRACKET_REPLACER)\ + .replace("}", RIGHT_BRACKET_REPLACER) + + # Revert {__FILE_LOCATION__} back modelfile = modelfile\ - .replace("{{", "⚫@✅#🦥")\ - .replace("}}", "⚡@🦥#⛵") + .replace(FILE_LOCATION_REPLACER, "{__FILE_LOCATION__}")\ + .replace(EOS_TOKEN_REPLACER, "{__EOS_TOKEN__}") if "__EOS_TOKEN__" in modelfile: modelfile = modelfile.format( @@ -1501,8 +1515,8 @@ def create_ollama_modelfile(tokenizer, gguf_location): pass modelfile = modelfile\ - .replace("⚫@✅#🦥", "{{")\ - .replace("⚡@🦥#⛵", "}}")\ + .replace("⚫@✅#🦥", "{")\ + .replace("⚡@🦥#⛵", "}")\ .rstrip() return modelfile From 38663b01f5dd0e610b12475bd95b144303cff539 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 14 Oct 2024 19:17:35 -0700 Subject: [PATCH 0515/1088] Gradient Accumulation Fix (#1134) * Unsloth Zoo * Update trainer.py * Update trainer.py * Update cross_entropy_loss.py * n_items * Update llama.py * kwargs * Remove extraneous f prefixes (#1133) Co-authored-by: Emil Sadek * Update __init__.py --------- Co-authored-by: Emil Sadek Co-authored-by: Emil Sadek --- pyproject.toml | 2 + unsloth/__init__.py | 9 +- unsloth/chat_templates.py | 198 +-------------- unsloth/kernels/cross_entropy_loss.py | 5 +- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 7 +- unsloth/save.py | 8 +- unsloth/tokenizer_utils.py | 352 +------------------------- unsloth/trainer.py | 2 + 9 files changed, 41 insertions(+), 544 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b61437e321..455f8477e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ + "unsloth_zoo", "packaging", "tyro", "transformers>=4.44.2", @@ -210,6 +211,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ + "unsloth_zoo", "packaging", "tyro", "transformers>=4.44.2", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index e7db41ce2c..abee9c9e04 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -27,6 +27,13 @@ # pass # pass +# Check for unsloth_zoo +try: + import unsloth_zoo +except: + raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") +pass + # Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! @@ -124,7 +131,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Try linking cuda folder, or everything in local if len(possible_cudas) == 0: - os.system(f"ldconfig /usr/local/") + os.system("ldconfig /usr/local/") else: find_number = re.compile(r"([\d\.]{2,})") latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 9b5f9ff28c..cab6130dd1 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -35,7 +35,9 @@ from .tokenizer_utils import * from .models._utils import patch_tokenizer import re - +from unsloth_zoo.dataset_utils import ( + train_on_responses_only, +) CHAT_TEMPLATES = {} # =========================================== Unsloth @@ -910,7 +912,7 @@ def get_chat_template( # Check fast tokenizer if not is_fast_tokenizer: print( - f"Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ + "Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ "Please log a Github issue if you want this as a new feature!\n"\ "Your chat template will still work, but it won't add or edit tokens." ) @@ -1236,7 +1238,7 @@ def __convert_to_sharegpt__(examples): n_extensions = max(conversation_extension-1, 0) if n_extensions == 0: return dataset - dataset = dataset.rename_columns({"conversations" : f"conversations0"}) + dataset = dataset.rename_columns({"conversations" : "conversations0"}) all_shuffled = [dataset] for j in range(1, n_extensions+1): shuffled = dataset.shuffle(seed = random_state+j).rename_columns({"conversations0" : f"conversations{j}"}) @@ -1254,7 +1256,7 @@ def __convert_to_sharegpt__(examples): f"in zip({', '.join(f'conversations{j}__' for j in range(n_extensions))}):\n" function += f"{' '*8}convos.append("\ f"{'+'.join(f'conversations{j}' for j in range(n_extensions))})\n" - function += f"{' '*4}return " + "{ " + f"'conversations' : convos" + " }" + function += f"{' '*4}return " + "{ " + "'conversations' : convos" + " }" # Map function exec(function, globals()) @@ -1812,194 +1814,6 @@ def formatting_prompts_func(examples): pass -# From https://www.geeksforgeeks.org/longest-common-substring-array-strings/ -# Longest Common Substring in an Array of Strings -def _longest_common_substring(arr): - n = len(arr) - s = arr[0] - l = len(s) - res = "" - for i in range(l): - for j in range(i + 1, l + 1): - stem = s[i:j] - k = 1 - for k in range(1, n): - if stem not in arr[k]: - break - if (k + 1 == n and len(res) < len(stem)): - res = stem - return res -pass - - -def _find_common_token_ids(component, tokenizer): - """ - \n### User:\n\n - \n\n### User:\n\n - etc - we need to find the middle most repeatted part. - Tokenizers can tokenize newlines or spaces as 1 token! - """ - right_text = "" - if component.endswith (" "): right_text = " " - elif component.endswith("\n"): right_text = "\n" - left_text = "" - if component.startswith (" "): left_text = " " - elif component.startswith("\n"): left_text = "\n" - stripped = component.strip() - - # Add current pieces and also newlines - all_input_ids = [] - for left in range(3): - for right in range(3): - x = left*left_text + stripped + right*right_text - x = tokenizer(x, add_special_tokens = False).input_ids - all_input_ids.append(x) - - x = left*"\n" + stripped + right*"\n" - x = tokenizer(x, add_special_tokens = False).input_ids - all_input_ids.append(x) - pass - pass - substring = _longest_common_substring([str(x + [0]) for x in all_input_ids]) - substring = substring.split(", ")[:-1] - substring = [int(x) for x in substring] - - # Also get rest of tokenized string - original = tokenizer(component, add_special_tokens = False).input_ids - # Get optional left and right - for j in range(len(original)): - if original[j : j + len(substring)] == substring: break - optional_left = original[:j] - optional_right = original[j+len(substring):] - return substring, optional_left, optional_right -pass - - -def train_on_responses_only( - trainer, - instruction_part = None, - response_part = None, -): - """ - Trains only on responses and not on the instruction by masking out - the labels with -100 for the instruction part. - """ - tokenizer = trainer.tokenizer - - if not hasattr(tokenizer, "_unsloth_input_part") or \ - not hasattr(tokenizer, "_unsloth_output_part"): - - if instruction_part is None or response_part is None: - raise ValueError("Unsloth: instruction_part and response_part must be given!") - pass - elif (instruction_part is not None or response_part is not None) and \ - (hasattr(tokenizer, "_unsloth_input_part") or hasattr(tokenizer, "_unsloth_output_part")): - - raise ValueError("Unsloth: Your tokenizer already has instruction and response parts set - do not give custom ones!") - else: - instruction_part = tokenizer._unsloth_input_part - response_part = tokenizer._unsloth_output_part - pass - - # Get most common tokens since tokenizers can tokenize stuff differently! - Q_must, Q_left, Q_right = _find_common_token_ids(instruction_part, tokenizer) - A_must, A_left, A_right = _find_common_token_ids(response_part, tokenizer) - - # Store some temporary stuff - A_first = A_must[0] - len_A_must = len(A_must) - A_left_reversed = A_left[::-1] - A_right_forward = A_right - - Q_first = Q_must[0] - len_Q_must = len(Q_must) - Q_left_reversed = Q_left[::-1] - Q_right_forward = Q_right - - def _train_on_responses_only(examples): - input_ids_ = examples["input_ids"] - all_labels = [] - - for input_ids in input_ids_: - n = len(input_ids) - labels = [-100] * n - n_minus_1 = n - 1 - j = 0 - while j < n: - # Find - if (input_ids[j] == A_first) and \ - (input_ids[j : (k := j + len_A_must)] == A_must): - - # Now backtrack to get previous optional tokens - for optional_left in A_left_reversed: - if j < 1: break - if optional_left == input_ids[j-1]: j -= 1 - else: break - pass - # And forwards look as well - for optional_right in A_right_forward: - if k >= n_minus_1: break - if optional_right == input_ids[k+1]: k += 1 - else: break - pass - # assistant_j = j - assistant_k = k - - j = assistant_k - # Given , now find next user - while j < n: - # Find - # Also accept last final item if assistant is the last turn - if (j == n_minus_1) or \ - ((input_ids[j] == Q_first) and \ - (input_ids[j : (k := j + len_Q_must)] == Q_must)): - - # Now backtrack to get previous optional tokens - for optional_left in Q_left_reversed: - if j < 1: break - if optional_left == input_ids[j-1]: j -= 1 - else: break - pass - # And forwards look as well - for optional_right in Q_right_forward: - if k >= n_minus_1: break - if optional_right == input_ids[k+1]: k += 1 - else: break - pass - user_j = j - # Account for last item - if user_j != n_minus_1: - # user_k = k - # j = user_k - j = k - else: - user_j = n - k = n - pass - # Now copy input_ids to labels - labels[assistant_k : user_j] = input_ids[assistant_k : user_j] - # print(assistant_j, assistant_k, user_j, user_k) - break - pass - j += 1 - pass - pass - j += 1 - pass - all_labels.append(labels) - pass - return { "labels" : all_labels } - pass - - if hasattr(trainer, "train_dataset") and trainer.train_dataset is not None: - trainer.train_dataset = trainer.train_dataset.map(_train_on_responses_only, batched = True) - if hasattr(trainer, "eval_dataset") and trainer.eval_dataset is not None: - trainer.eval_dataset = trainer.eval_dataset.map(_train_on_responses_only, batched = True) - return trainer -pass - - def create_stopping_criteria(tokenizer, stop_word = "eos_token"): class StoppingCriteriaSub(StoppingCriteria): __slots__ = "stop_token", "single_match", "length", diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 1fec5d7a85..5abed6a3e6 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -355,6 +355,7 @@ def fast_cross_entropy_loss( labels, logit_softcapping = 0, logit_scaling = 0, + n_items = None, ): """ Arguments: @@ -372,7 +373,8 @@ def fast_cross_entropy_loss( logit_softcapping, logit_scaling, ) - n_items = torch.count_nonzero(labels != -100) + if n_items is None: + n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass @@ -409,6 +411,7 @@ def fast_cross_entropy_loss( labels = shift_labels, logit_softcapping = logit_softcapping, logit_scaling = logit_scaling, + n_items = kwargs.get("n_items", None), ) else: if logit_scaling != 0: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b14bb39144..aa7a69c94e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.9.post4" +__version__ = "2024.10.0" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a245330108..4cd512a98e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -975,13 +975,14 @@ def _CausalLM_fast_forward( # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass - + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, logit_softcapping = logit_softcapping, logit_scaling = logit_scaling, + n_items = kwargs.get("n_items", None), ) else: if logit_scaling != 0: @@ -2019,8 +2020,8 @@ def get_peft_model( if loftq_config == {}: from peft import LoftQConfig logger.warning_once( - f"Unsloth: init_lora_weights = `loftq` is set, but `loftq_config` is None.\n"\ - f"We shall use `loftq_config = LoftQConfig(loftq_bits = 4, loftq_iter = 1)`." + "Unsloth: init_lora_weights = `loftq` is set, but `loftq_config` is None.\n"\ + "We shall use `loftq_config = LoftQConfig(loftq_bits = 4, loftq_iter = 1)`." ) loftq_config = LoftQConfig(loftq_bits = 4, loftq_iter = 1) pass diff --git a/unsloth/save.py b/unsloth/save.py index dce30c4c71..3760e23281 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -555,7 +555,7 @@ def unsloth_save_model( # max_ram = max(max_ram - W.nbytes, 0) else: # Save to Disk - logger.warning_once(f"We will save to Disk and not RAM now.") + logger.warning_once("We will save to Disk and not RAM now.") filename = os.path.join(temporary_location, f"{name}.pt") torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) # weights_only = True weirdly fails? @@ -1460,7 +1460,7 @@ def fix_tokenizer_bos_token(tokenizer): fix_bos_token = True logger.warning( - f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### The current model auto adds a BOS token.\n"\ "Unsloth: ##### Your chat template has a BOS token. We shall remove it temporarily." ) @@ -1671,7 +1671,7 @@ def unsloth_save_pretrained_gguf( if fix_bos_token: logger.warning( - f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### The current model auto adds a BOS token.\n"\ "Unsloth: ##### We removed it in GGUF's chat template for you." ) pass @@ -1867,7 +1867,7 @@ def unsloth_push_to_hub_gguf( if fix_bos_token: logger.warning( - f"Unsloth: ##### The current model auto adds a BOS token.\n"\ + "Unsloth: ##### The current model auto adds a BOS token.\n"\ "Unsloth: ##### We removed it in GGUF's chat template for you." ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 196e496188..63d07c9271 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -26,6 +26,15 @@ import gc import subprocess +from unsloth_zoo.tokenizer_utils import ( + mean_of_trained_tokens, + add_new_tokens, + fix_untrained_tokens, +) +from unsloth_zoo.training_utils import ( + fix_zero_training_loss, +) + __all__ = [ "load_correct_tokenizer", "fix_sentencepiece_tokenizer", @@ -807,347 +816,6 @@ def check_tokenizer( pass -@torch.inference_mode -def fix_untrained_tokens(model, tokenizer, train_dataset, eps = 1e-16): - """ - Llama-3 for eg has untrained vectors in the base model. - These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> - We reset them to the mean of the rest of the tokens - """ - embedding_matrix = model.get_input_embeddings ().weight - lm_head_matrix = model.get_output_embeddings().weight - - # Ignore some model checks for now - if model.config._name_or_path in IGNORED_TOKENIZER_NAMES: - return - pass - - # Get untrained tokens - indicator_untrained1 = torch.amax(embedding_matrix, axis = 1) <= eps - # Check lm_head as well - - # Does NOT work for Llama 3.1!! - indicator_untrained2 = torch.amax(lm_head_matrix, axis = 1) <= eps - - # We instead check for repeated vectors - lm_head_where = torch.where(indicator_untrained1)[0] - lm_head_bad = lm_head_matrix[lm_head_where] - lm_head_bad = lm_head_bad.cpu().float().numpy().round(3) - from collections import Counter - counter = Counter() - for row in lm_head_bad: counter[hash(row.data.tobytes())] += 1 - counter = Counter({k: c for k, c in counter.items() if c >= 2}) - - lm_head_where = lm_head_where.cpu().numpy() - final_bad_lm_head = [] - for j, row in enumerate(lm_head_bad): - if hash(row.data.tobytes()) in counter: - final_bad_lm_head.append(lm_head_where[j]) - indicator_untrained2 = indicator_untrained2 | torch.zeros_like(indicator_untrained2) - indicator_untrained2[final_bad_lm_head] = True - - # Combine both checks - indicator_untrained = indicator_untrained1 & indicator_untrained2 - - where_untrained = torch.where(indicator_untrained)[0] - n_untrained = where_untrained.shape[0] - n_trained = embedding_matrix.shape[0] - n_untrained - - # Get set and actual tokens - where_untrained = where_untrained.tolist() - if len(where_untrained) == 0: return - - # Remove untrained indices where it's longer - - where_untrained_set = frozenset(where_untrained) - actual_bad_tokens = tokenizer.convert_ids_to_tokens(where_untrained) - # Remove None items in actual_bad_tokens - actual_bad_tokens = [x for x in actual_bad_tokens if x is not None] - - # Check if tokenizer and training datasets have bad tokens - if_bad_first = False - if_bad_second = False - # Check tokenizer's chat template for any untrained tokens - chat_template = getattr(tokenizer, "chat_template", None) - if chat_template is not None: - if_bad_first = any(x in chat_template for x in actual_bad_tokens) - pass - - # Check the first 250, last 250 input_ids - size_dataset = len(train_dataset) - size = min(size_dataset, 250) - for j in range(size): - input_ids = train_dataset[j] - if "input_ids" in input_ids: - input_ids = input_ids["input_ids"] - if_bad = any(item in where_untrained_set for item in input_ids) - if if_bad: - if_bad_second = True - break - pass - pass - pass - - # Check last 250 - if not if_bad_second: - left = max(size_dataset-250, 0) - for j in range(left, size_dataset): - input_ids = train_dataset[j] - if "input_ids" in input_ids: - input_ids = input_ids["input_ids"] - if_bad = any(item in where_untrained_set for item in input_ids) - if if_bad: - if_bad_second = True - break - pass - pass - pass - pass - - # Check if bad tokens exists! - if not if_bad_first and not if_bad_second: return - - # Check if lm_head / embed_token are trainable! - bad_not_trainable = False - if not embedding_matrix.requires_grad: bad_not_trainable = True - if not lm_head_matrix .requires_grad: bad_not_trainable = True - - if bad_not_trainable: - - final_bad_items = [] - - # Re-check the first 250, last 250 input_ids - size_dataset = len(train_dataset) - size = min(size_dataset, 250) - for j in range(size): - input_ids = train_dataset[j] - if "input_ids" in input_ids: - input_ids = input_ids["input_ids"] - for item in input_ids: - if item in where_untrained_set: final_bad_items.append(item) - pass - pass - - # Re-check last 250 - left = max(size_dataset-250, 0) - for j in range(left, size_dataset): - input_ids = train_dataset[j] - if "input_ids" in input_ids: - input_ids = input_ids["input_ids"] - for item in input_ids: - if item in where_untrained_set: final_bad_items.append(item) - pass - pass - - raise ValueError( - f'Unsloth: Untrained tokens of [{list(set(final_bad_items))}] found, but embed_tokens & lm_head not trainable, causing NaNs. '\ - 'Restart then add `embed_tokens` & `lm_head` to '\ - '`FastLanguageModel.get_peft_model(target_modules = [..., "embed_tokens", "lm_head",]). `'\ - 'Are you using the `base` model? Instead, use the `instruct` version to silence this warning.', - ) - pass - - # Count all the possible bad tokens - final_counts = np.zeros(max(len(tokenizer), embedding_matrix.shape[0]), dtype = np.int64) - def mapping(examples): - input_ids = examples["input_ids"] - counter = np.fromiter(itertools.chain.from_iterable(input_ids), dtype = np.int32) - np.add.at(final_counts, counter, 1) - pass - train_dataset.map(mapping, batched = True, desc = "Counting untrained tokens") - - # Get sum of all items - sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) - sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) - - # Remove bad tokens - sum_embedding -= torch.sum(embedding_matrix[where_untrained], dtype = torch.float32, axis = 0) - sum_lm_head -= torch.sum(lm_head_matrix [where_untrained], dtype = torch.float32, axis = 0) - - # Find correct average by dividing by sum of trained tokens - mean_embedding = (sum_embedding / n_trained) - mean_lm_head = (sum_lm_head / n_trained) - - # Scale each to be equal to 1/max_frequency. Also set some to 0 if none seen - scaling = final_counts[where_untrained] / max(final_counts.max(), 1) - scaling = torch.tensor(scaling, device = mean_embedding.device).unsqueeze(1) - mean_embedding = mean_embedding.repeat((n_untrained, 1,)) * scaling - mean_lm_head = mean_lm_head .repeat((n_untrained, 1,)) * scaling - where_null = scaling.ravel() == 0 - mean_embedding[where_null] = 0 - mean_lm_head [where_null] = 0 - - # Set them to the mean - logger.warning( - "Unsloth: Setting embed_tokens & lm_head untrained tokens to "\ - "mean(trained) to counteract NaNs during training." - ) - embedding_matrix[where_untrained] = mean_embedding.to(embedding_matrix.dtype) - lm_head_matrix [where_untrained] = mean_lm_head .to(lm_head_matrix .dtype) - - # Clean up - for _ in range(3): - gc.collect() - torch.cuda.empty_cache() - pass - return -pass - - -@torch.inference_mode -def mean_of_trained_tokens(model, eps = 1e-16): - """ - Llama-3 for eg has untrained vectors in the base model. - These include <|eot_id|>, <|start_header_id|>, <|end_header_id|> - We reset them to the mean of the rest of the tokens - """ - embedding_matrix = model.get_input_embeddings ().weight.clone() - lm_head_matrix = model.get_output_embeddings().weight.clone() - - # Get untrained tokens - indicator_untrained = torch.amax(embedding_matrix, axis = 1) <= eps - where_untrained = torch.where(indicator_untrained)[0] - n_untrained = where_untrained.shape[0] - n_trained = embedding_matrix.shape[0] - n_untrained - # if n_untrained != 0: - # print( - # f"Unsloth: Not an error, but your model has {n_untrained} untrained tokens.\n"\ - # "We shall set them to the mean of the other trained tokens." - # ) - # pass - - # Get sum of all items - sum_embedding = torch.sum(embedding_matrix, dtype = torch.float32, axis = 0) - sum_lm_head = torch.sum(lm_head_matrix, dtype = torch.float32, axis = 0) - - # Remove bad tokens - sum_embedding -= torch.sum(embedding_matrix[where_untrained], dtype = torch.float32, axis = 0) - sum_lm_head -= torch.sum(lm_head_matrix [where_untrained], dtype = torch.float32, axis = 0) - - # Find correct average by dividing by sum of trained tokens - mean_embedding = (sum_embedding / n_trained) - mean_lm_head = (sum_lm_head / n_trained) - - return mean_embedding, mean_lm_head -pass - - -@torch.inference_mode -def add_new_tokens( - model, - tokenizer, - new_tokens = [], - method = "mean", - interpolation = 0.5, -): - """ - Smartly resizes the tokenizer and adds new tokens to the model. - We also disregard untrained tokens by removing them from the mean calculation. - """ - assert(isinstance(new_tokens, (list, tuple))) - assert(len(new_tokens) > 0) - assert(method == "mean" or method == "interpolation") - assert(interpolation >= 0 and interpolation <= 1) - - # Check if tokens already exist - overlapping_tokens = set(new_tokens) & set(tokenizer.vocab.keys()) - if len(overlapping_tokens) != 0: - print( - f"Unsloth: You're adding new_tokens = {new_tokens}\n"\ - f"There are tokens which are overlapping = {list(overlapping_tokens)}\n"\ - f"We shall safely ignore these overlapping tokens." - ) - new_tokens = [x for x in new_tokens if x not in overlapping_tokens] - pass - - # Get mean of trained tokens - # mean_embedding, mean_lm_head = fix_untrained_tokens(model) - - # Weirdly be careful reserved tokens can pop out - mean_embedding, mean_lm_head = mean_of_trained_tokens(model) - mean_embedding = mean_embedding.to(torch.float32) - mean_lm_head = mean_lm_head .to(torch.float32) - - # Add tokens! - old_length = len(tokenizer) - tokenizer.add_tokens(new_tokens) - model.resize_token_embeddings(len(tokenizer)) - - # If we use interpolation, we interpolate between the mean embeddings and - # the Word2Vec sum of the other vectors - embedding_matrix = model.get_input_embeddings ().weight - lm_head_matrix = model.get_output_embeddings().weight - - if method == "interpolation": - print( - "Unsloth: You are using interpolation to add new tokens.\n"\ - f"We shall set new tokens = mean(embeddings)*{1-interpolation} + mean(new_tokens)*{interpolation}" - ) - for j, token in enumerate(new_tokens): - input_ids = tokenizer(token, add_special_tokens = False).input_ids - mean_embedding_token = embedding_matrix[input_ids].mean(axis = 0, dtype = torch.float32) - mean_lm_head_token = lm_head_matrix [input_ids].mean(axis = 0, dtype = torch.float32) - - # Interpolate - mean_embedding_token = mean_embedding*(1-interpolation) + mean_embedding_token*interpolation - mean_lm_head_token = mean_lm_head *(1-interpolation) + mean_lm_head_token *interpolation - - # Set the new vector - embedding_matrix[old_length+j] = mean_embedding_token - lm_head_matrix [old_length+j] = mean_lm_head_token - pass - else: - # Now set the new tokens to the mean! - embedding_matrix[old_length:] = mean_embedding - lm_head_matrix [old_length:] = mean_lm_head - pass - - # We set a flag to say we need to train embeddings - internal_model = model - while hasattr(internal_model, "model"): - internal_model._need_to_train_embeddings = True - internal_model = internal_model.model - pass - internal_model._need_to_train_embeddings = True - - return -pass - - -@torch.inference_mode -def fix_zero_training_loss(model, tokenizer, train_dataset): - """ - Sometimes the labels get masked by all -100s, causing the loss - to be 0. We check for this! - """ - if len(train_dataset) == 0: return - - row = train_dataset[0] - if type(row) is dict and "labels" in row: - - # Check the first 100 rows - seen_bad = 0 - seen_good = 0 - for i, row in enumerate(train_dataset): - try: check_tokens = list(set(row["labels"])) - except: continue - if len(check_tokens) == 1 and check_tokens[0] == -100: seen_bad += 1 - else: seen_good += 1 - if i >= 100: break - pass - - # Check ratio - if seen_bad / (seen_bad + seen_good) >= 0.9: - logger.warning( - "Unsloth: Most labels in your dataset are -100. Training losses will be 0.\n"\ - "For example, are you sure you used `train_on_responses_only` correctly?\n"\ - "Or did you mask our tokens incorrectly? Maybe this is intended?" - ) - pass - pass -pass - - def check_nvidia(): # Unsloth doesn't work yet on AMD devices - we're working on it! output = np.array([0,]) @@ -1260,7 +928,7 @@ def patch_sft_trainer_tokenizer(): " torch.cuda.empty_cache()\n"\ "pass\n"\ "\n"\ - "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, eps = 1e-16)\n\n"\ + "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)\n\n"\ "fix_zero_training_loss(self.model, self.tokenizer, self.train_dataset)\n\n" # Add NEFTune since it doesn't seem to work?? We need to manually inject it diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 45616ca6be..c9c0ca2d09 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -22,10 +22,12 @@ from transformers import TrainingArguments pass from . import is_bfloat16_supported +from unsloth_zoo.training_utils import unsloth_train __all__ = [ "UnslothTrainingArguments", "UnslothTrainer", + "unsloth_train", ] From a395211e02334e53c653b53156da05bb8440d42a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 16 Oct 2024 21:48:05 -0700 Subject: [PATCH 0516/1088] Update mapper.py --- unsloth/models/mapper.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 7f27437904..10e40ab7c6 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -416,6 +416,10 @@ "unsloth/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-3B-Instruct", ), + "unsloth/Llama-3.1-Nemotron-70B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.1-Nemotron-70B-Instruct", + "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + ), } INT_TO_FLOAT_MAPPER = {} From e210840ba97a5837ca59fc73ede47b56f10772dd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 17 Oct 2024 20:43:07 -0700 Subject: [PATCH 0517/1088] Gradient Accumulation Fix (#1146) * Unsloth Zoo * Update trainer.py * Update trainer.py * Update cross_entropy_loss.py * n_items * Update llama.py * kwargs * Remove extraneous f prefixes (#1133) Co-authored-by: Emil Sadek * Update __init__.py * kwargs * Update trainer.py * Update trainer.py * Update trainer.py * Fix GA * Update _utils.py * Update llama.py * Update tokenizer_utils.py * Warn on old versions * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py --------- Co-authored-by: Emil Sadek Co-authored-by: Emil Sadek --- unsloth/kernels/cross_entropy_loss.py | 2 +- unsloth/models/_utils.py | 63 ++++++++++++++++++++++++++- unsloth/models/llama.py | 5 ++- unsloth/tokenizer_utils.py | 19 +++++++- unsloth/trainer.py | 20 ++++++++- 5 files changed, 103 insertions(+), 6 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 5abed6a3e6..1c8f8c8d99 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -411,7 +411,7 @@ def fast_cross_entropy_loss( labels = shift_labels, logit_softcapping = logit_softcapping, logit_scaling = logit_scaling, - n_items = kwargs.get("n_items", None), + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), ) else: if logit_scaling != 0: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index aa7a69c94e..7a13f0bbdf 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.0" +__version__ = "2024.10.1" __all__ = [ "prepare_model_for_kbit_training", @@ -43,6 +43,7 @@ "accelerate_new_send_to_device", "patch_gradient_checkpointing", "unpatch_gradient_checkpointing", + "patch_gradient_accumulation_fix", ] import torch @@ -1138,3 +1139,63 @@ def test_mask_creation(): assert(torch.all(correct_mask == our_mask)) pass pass + + +def _unsloth_get_batch_samples(self, epoch_iterator, num_batches): + batch_samples = [] + num_items_in_batch = None + for _ in range(num_batches): + try: + batch_samples += [next(epoch_iterator)] + except StopIteration: + break + if len(batch_samples) > 0 and "labels" in batch_samples[0]: + try: + num_items_in_batch = sum( + [torch.count_nonzero(x["labels"][..., 1:] != -100) for x in batch_samples] + ) + except TypeError: + pass + return batch_samples, num_items_in_batch +pass + + +def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): + if "num_items_in_batch" in kwargs: + if "num_items_in_batch" not in inputs: + inputs["num_items_in_batch"] = kwargs["num_items_in_batch"] + pass + pass + return self._old_compute_loss(model, inputs, args, kwargs) +pass + + +def patch_gradient_accumulation_fix(Trainer): + # Fixes gradient accumulation + if hasattr(Trainer, "get_batch_samples"): + from inspect import getsource + if \ + not getsource(Trainer.get_batch_samples).strip()\ + .endswith("return batch_samples, num_items_in_batch"): + + raise NotImplementedError("Unsloth: Please make a Github issue immediately!!") + else: + if Trainer.get_batch_samples.__name__ != "_unsloth_get_batch_samples": + Trainer.get_batch_samples = _unsloth_get_batch_samples + pass + + # Also fix passing in num_items_in_batch + if not hasattr(Trainer, "_old_compute_loss"): + Trainer._old_compute_loss = Trainer.compute_loss + Trainer.compute_loss = _unsloth_pre_compute_loss + pass + pass + else: + logger.warning_once( + "Unsloth: We fixed a gradient accumulation bug, "\ + "but it seems like you don't have the latest transformers version!\n"\ + "Please update transformers via:\n"\ + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + ) + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 4cd512a98e..f0437207b0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -982,7 +982,7 @@ def _CausalLM_fast_forward( labels = shift_labels, logit_softcapping = logit_softcapping, logit_scaling = logit_scaling, - n_items = kwargs.get("n_items", None), + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), ) else: if logit_scaling != 0: @@ -1777,6 +1777,9 @@ def from_pretrained( patch_saving_functions(model) Trainer._inner_training_loop = _fast_inner_training_loop + # Fix gradient accumulation + patch_gradient_accumulation_fix(Trainer) + # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference internal_model = model diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 63d07c9271..ffe9933f47 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -928,8 +928,23 @@ def patch_sft_trainer_tokenizer(): " torch.cuda.empty_cache()\n"\ "pass\n"\ "\n"\ - "fix_untrained_tokens(self.model, self.tokenizer, self.train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)\n\n"\ - "fix_zero_training_loss(self.model, self.tokenizer, self.train_dataset)\n\n" + "tokenizer = self.processing_class if hasattr(self, 'processing_class') else self.tokenizer\n"\ + "fix_untrained_tokens(self.model, tokenizer, self.train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)\n\n"\ + "fix_zero_training_loss(self.model, tokenizer, self.train_dataset)\n\n" + + # Warn on gradient accumulation steps if it's used + check_text += \ + "\n"\ + "try:\n"\ + " gradient_accumulation_steps = self.args.gradient_accumulation_steps\n"\ + " if type(gradient_accumulation_steps) is int and gradient_accumulation_steps > 1:\n"\ + " from transformers import __version__ as transformers_version\n"\ + " from packaging.version import Version\n"\ + " if Version(transformers_version) <= Version('4.45.2'):\n"\ + " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers and Unsloth!')\n"\ + "except:\n"\ + " pass\n"\ + "\n\n" # Add NEFTune since it doesn't seem to work?? We need to manually inject it check_text += \ diff --git a/unsloth/trainer.py b/unsloth/trainer.py index c9c0ca2d09..25bb434023 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -22,7 +22,25 @@ from transformers import TrainingArguments pass from . import is_bfloat16_supported -from unsloth_zoo.training_utils import unsloth_train +from unsloth_zoo.training_utils import unsloth_train as _unsloth_train +from packaging.version import Version + +# Unsloth gradient accumulation fix: +from transformers import __version__ as transformers_version +if Version(transformers_version) > Version("4.45.2"): + def unsloth_train(trainer): + return trainer.train() + pass +else: + def unsloth_train(trainer): + print( + "Unsloth: Using our custom gradient accumulation fixed trainer, which is not feature complete.\n"\ + "If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"\ + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + ) + return _unsloth_train(trainer) + pass +pass __all__ = [ "UnslothTrainingArguments", From eb533dbf14f421f8fa4e200e86f93fdf870c4377 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 17 Oct 2024 20:45:40 -0700 Subject: [PATCH 0518/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 51342fdf27..e345227c61 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and
    Click for more news +- 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. From b3e85e915e2f361783996df56c04140ae4a656a8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 17 Oct 2024 20:46:11 -0700 Subject: [PATCH 0519/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e345227c61..6b583d5fd5 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. - 📣 NEW! [Llama 3.2 Conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! - 📣 NEW! [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook) and [Llama 3.2 Kaggle conversational notebook](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-2-1b-3b-conversational-unsloth/notebook) - 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) @@ -50,7 +51,6 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and
    Click for more news -- 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported - 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. From 3085f4c3daacc63939e78e3c87759d0d03c5a71f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 17 Oct 2024 20:50:05 -0700 Subject: [PATCH 0520/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7a13f0bbdf..ea6647594d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.1" +__version__ = "2024.10.2" __all__ = [ "prepare_model_for_kbit_training", From 12bdd86636be8180bafa26cd9e45fb92c5126fca Mon Sep 17 00:00:00 2001 From: vo1d-ai Date: Fri, 18 Oct 2024 23:46:07 -0400 Subject: [PATCH 0521/1088] fix: compute_loss bug (#1151) Currently, Unsloth doesn't pass additional parameters to Trainer.compute_loss such as return_outputs. This leads to errors when calling trainer.evaluate(). This change fixes the bug by properly passing parameters to Trainer.compute_loss. --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ea6647594d..0c9468ce9d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1166,7 +1166,7 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): inputs["num_items_in_batch"] = kwargs["num_items_in_batch"] pass pass - return self._old_compute_loss(model, inputs, args, kwargs) + return self._old_compute_loss(model, inputs, *args, **kwargs) pass From d7850d8b2e1e4598564a460b0e104a83b38179e3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 18 Oct 2024 23:08:30 -0700 Subject: [PATCH 0522/1088] Fix `get_token` --- unsloth/models/llama.py | 7 ++++++- unsloth/models/loader.py | 7 ++++++- unsloth/save.py | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f0437207b0..5f20f51209 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -61,7 +61,12 @@ from peft.tuners.lora import Linear4bit as Peft_Linear4bit from ..save import patch_saving_functions import re, os, inspect, math, sys -from huggingface_hub.utils._token import get_token +try: + from huggingface_hub.utils import get_token +except: + # Old HF Hub versions <= 0.0.25 + from huggingface_hub.utils._token import get_token +pass def original_apply_qkv(self, X): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 5774c2242e..db7259b1d9 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -22,7 +22,12 @@ from peft import PeftConfig, PeftModel from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit import os -from huggingface_hub.utils._token import get_token +try: + from huggingface_hub.utils import get_token +except: + # Old HF Hub versions <= 0.0.25 + from huggingface_hub.utils._token import get_token +pass from huggingface_hub import HfFileSystem # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! diff --git a/unsloth/save.py b/unsloth/save.py index 3760e23281..0381ab7ee2 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -29,7 +29,12 @@ from transformers.models.llama.modeling_llama import logger from .tokenizer_utils import fix_sentencepiece_gguf from huggingface_hub import HfApi -from huggingface_hub.utils._token import get_token +try: + from huggingface_hub.utils import get_token +except: + # Old HF Hub versions <= 0.0.25 + from huggingface_hub.utils._token import get_token +pass from pathlib import Path __all__ = [ From 9327d900023f8427fa6a9b20ee8ec88c41ad5e55 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 18 Oct 2024 23:10:30 -0700 Subject: [PATCH 0523/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0c9468ce9d..4aa8e1e592 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.2" +__version__ = "2024.10.3" __all__ = [ "prepare_model_for_kbit_training", From 1f52468fa31bf0b641ec96217ef0f5916a07fce5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 20 Oct 2024 01:52:21 -0700 Subject: [PATCH 0524/1088] Update save.py --- unsloth/save.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 0381ab7ee2..ab30e0fea5 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1181,7 +1181,10 @@ def save_to_gguf( pass pass - return all_saved_locations + # Finally check if first_conversion (f16, bf16 etc) was in the list of actual quant methods + full_precision_seen = first_conversion in frozenset(quantization_method) + + return all_saved_locations, full_precision_seen pass @@ -1659,7 +1662,8 @@ def unsloth_save_pretrained_gguf( is_sentencepiece_model = check_if_sentencepiece_model(self) # Save to GGUF - all_file_locations = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, + all_file_locations, want_full_precision = save_to_gguf( + model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1684,6 +1688,9 @@ def unsloth_save_pretrained_gguf( if push_to_hub: print("Unsloth: Uploading GGUF to Huggingface Hub...") + # If not needing full precision, skip the first + if not want_full_precision: all_file_locations = all_file_locations[1:] + for file_location in all_file_locations: username = upload_to_huggingface( self, save_directory, token, @@ -1833,7 +1840,8 @@ def unsloth_push_to_hub_gguf( is_sentencepiece_model = check_if_sentencepiece_model(self) # Save to GGUF - all_file_locations = save_to_gguf(model_type, model_dtype, is_sentencepiece_model, + all_file_locations, want_full_precision = save_to_gguf( + model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1848,6 +1856,9 @@ def unsloth_push_to_hub_gguf( print(f"Unsloth: Saved Ollama Modelfile to {modelfile_location}") pass + # If not needing full precision, skip the first + if not want_full_precision: all_file_locations = all_file_locations[1:] + for file_location in all_file_locations: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( From f0aca9073edd18a21f37fe112391f3e840474f7c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 21 Oct 2024 01:02:53 -0700 Subject: [PATCH 0525/1088] Fix TRL --- pyproject.toml | 4 ++-- unsloth/models/_utils.py | 4 ++-- unsloth/tokenizer_utils.py | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 455f8477e8..a2a9c2c939 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -227,7 +227,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", "xformers<0.0.27", "bitsandbytes>=0.43.3", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4aa8e1e592..2381f509f3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1194,8 +1194,8 @@ def patch_gradient_accumulation_fix(Trainer): logger.warning_once( "Unsloth: We fixed a gradient accumulation bug, "\ "but it seems like you don't have the latest transformers version!\n"\ - "Please update transformers via:\n"\ - '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + "Please update transformers, TRL and unsloth via:\n"\ + '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' ) pass pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index ffe9933f47..9ad603cd54 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -884,7 +884,7 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])[0]\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is not None and dataset_text_field is None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ @@ -941,7 +941,8 @@ def patch_sft_trainer_tokenizer(): " from transformers import __version__ as transformers_version\n"\ " from packaging.version import Version\n"\ " if Version(transformers_version) <= Version('4.45.2'):\n"\ - " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers and Unsloth!')\n"\ + " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\\\n"\ + " '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`')\n"\ "except:\n"\ " pass\n"\ "\n\n" From f4ae58540578676bcfc0ce59903fa91e27a2b5f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 00:29:30 -0700 Subject: [PATCH 0526/1088] Update mistral.py --- unsloth/models/mistral.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 15e9efc426..00dcc5cd1d 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -254,8 +254,9 @@ def MistralForCausalLM_fast_forward( shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, + logits = shift_logits, + labels = shift_labels, + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), ) pass From 106f213f405c6ef26515c8c1ac286b5d705ea175 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 00:53:38 -0700 Subject: [PATCH 0527/1088] Patch processing_class --- unsloth/tokenizer_utils.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 9ad603cd54..e11783f7c5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -831,6 +831,7 @@ def check_nvidia(): PRE_CHECK = check_nvidia() +import inspect from inspect import getsource import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * @@ -869,6 +870,29 @@ def neftune_post_forward_hook(module, input, output): pass +def patch_trl_tokenizer_processing_class(trainer_name): + # New TRL removes tokenizer! + # We return it back! + exec(f"from trl import {trainer_name}") + if str(eval(f"{trainer_name}")__name__).startswith("Unsloth"): return None + exec(f"parameters = inspect.signature({trainer_name}).parameters") + if "tokenizer" in parameters: return None + + args = {key : value.default for key, value in parameters.items()} + args["tokenizer"] = None + new_args = args.copy() + del new_args["tokenizer"] + del new_args["processing_class"] + new_args = ",\n".join(f"{' '*12}{key} = {key}" for key in new_args) + \ + f",\n{' '*12}processing_class = tokenizer if tokenizer else processing_class" + args = ",\n".join(f"{' '*8}{key} = {value}" for key, value in args.items()) + args = f"{' '*4}def __init__(\n" + f"{' '*8}self,\n" + args + "):" + args += f"\n{' '*8}\n{' '*8}super().__init__(\n{new_args}\n{' '*8})" + new_class = f"""class Unsloth{trainer_name}({trainer_name}):\n{' '*4}{args}\n""" + return new_class +pass + + def patch_sft_trainer_tokenizer(): """ Patches the trainer with changes @@ -982,4 +1006,13 @@ def patch_sft_trainer_tokenizer(): pass pass +# Fix TRL trainers with removed tokenizer args (got replaced with processing_class) +for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): + trainer_text = patch_trl_tokenizer_processing_class(trainer_name) + if trainer_text is None: continue + exec(trainer_text, globals()) + exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) +pass + +# FInally patch TRL tokenizer things patch_sft_trainer_tokenizer() From ef842120d2490ba456fb2a13417cca5406b0e9da Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 00:55:47 -0700 Subject: [PATCH 0528/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index e11783f7c5..9d69fd12ce 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -874,7 +874,7 @@ def patch_trl_tokenizer_processing_class(trainer_name): # New TRL removes tokenizer! # We return it back! exec(f"from trl import {trainer_name}") - if str(eval(f"{trainer_name}")__name__).startswith("Unsloth"): return None + if str(eval(f"{trainer_name}").__name__).startswith("Unsloth"): return None exec(f"parameters = inspect.signature({trainer_name}).parameters") if "tokenizer" in parameters: return None From 4f7c527ae0b87073610ac45de34428fa887a4663 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 00:57:36 -0700 Subject: [PATCH 0529/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 9d69fd12ce..1dbff4f57c 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -873,9 +873,9 @@ def neftune_post_forward_hook(module, input, output): def patch_trl_tokenizer_processing_class(trainer_name): # New TRL removes tokenizer! # We return it back! - exec(f"from trl import {trainer_name}") + exec(f"from trl import {trainer_name}", globals()) if str(eval(f"{trainer_name}").__name__).startswith("Unsloth"): return None - exec(f"parameters = inspect.signature({trainer_name}).parameters") + parameters = eval(f"inspect.signature({trainer_name}).parameters") if "tokenizer" in parameters: return None args = {key : value.default for key, value in parameters.items()} From aa2b20763e5730e30a0c71df319d7674826d7c8e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 01:05:41 -0700 Subject: [PATCH 0530/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 1dbff4f57c..3978f62e3b 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1010,6 +1010,7 @@ def patch_sft_trainer_tokenizer(): for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): trainer_text = patch_trl_tokenizer_processing_class(trainer_name) if trainer_text is None: continue + print(trainer_text) exec(trainer_text, globals()) exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) pass From 101389d728881b441dc123b60551579eadf5c3bc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 01:09:20 -0700 Subject: [PATCH 0531/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 3978f62e3b..2f5295e017 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -886,7 +886,7 @@ def patch_trl_tokenizer_processing_class(trainer_name): new_args = ",\n".join(f"{' '*12}{key} = {key}" for key in new_args) + \ f",\n{' '*12}processing_class = tokenizer if tokenizer else processing_class" args = ",\n".join(f"{' '*8}{key} = {value}" for key, value in args.items()) - args = f"{' '*4}def __init__(\n" + f"{' '*8}self,\n" + args + "):" + args = f"def __init__(\n" + f"{' '*8}self,\n" + args + "):" args += f"\n{' '*8}\n{' '*8}super().__init__(\n{new_args}\n{' '*8})" new_class = f"""class Unsloth{trainer_name}({trainer_name}):\n{' '*4}{args}\n""" return new_class From c0f0fc987d31d374db03656c87047bdc40ce5e96 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 01:22:13 -0700 Subject: [PATCH 0532/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 2f5295e017..6e9c061fad 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -878,7 +878,13 @@ def patch_trl_tokenizer_processing_class(trainer_name): parameters = eval(f"inspect.signature({trainer_name}).parameters") if "tokenizer" in parameters: return None - args = {key : value.default for key, value in parameters.items()} + args = { + key : \ + value.default \ + if type(value.default) is not str else \ + f"'{value.default}'" \ + for key, value in parameters.items() + } args["tokenizer"] = None new_args = args.copy() del new_args["tokenizer"] From b3e00335c1b45f8093599ec5b151b5c6d6546952 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 22 Oct 2024 01:28:38 -0700 Subject: [PATCH 0533/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 6e9c061fad..d7af61a090 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -914,6 +914,7 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ "test_text = dataset[0][dataset_text_field] if (formatting_func is not None and dataset_text_field is None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ From aabb5ff54b4f6baa7922e03e59d9cbaabb7ea4c9 Mon Sep 17 00:00:00 2001 From: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Date: Wed, 23 Oct 2024 09:55:26 +0200 Subject: [PATCH 0534/1088] Installation guide (#1165) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 6b583d5fd5..d0e5a97999 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,18 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` +### Windows Installation + +To run Unsloth directly on Windows: +- Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows +- In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: +```python +trainer = SFTTrainer( + dataset_num_proc=1, + ... +) +``` + For **advanced installation instructions** or if you see weird errors during installations: 1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` From 30bf33957dd8822926661ccdb52a1ad908609683 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 23 Oct 2024 16:59:02 +0900 Subject: [PATCH 0535/1088] chore: update chat_templates.py (#1166) orginal -> original --- unsloth/chat_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index cab6130dd1..b254202c75 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -678,7 +678,7 @@ {{- end }} {{- if .Tools }} -You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the orginal use question. +You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the original use question. {{- end }} {{- end }}<|eot_id|> {{- range $i, $_ := .Messages }} From 28958397ee376d0c06435c50d36f4e0d6484aca0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 02:58:40 -0700 Subject: [PATCH 0536/1088] Disable Flex Attention --- unsloth/kernels/flex_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 2fba359b77..08426b69e0 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -31,7 +31,7 @@ create_block_mask as _create_block_mask, ) _flex_attention = torch.compile(_flex_attention, dynamic = True, options = torch_compile_options) - HAS_FLEX_ATTENTION = True + HAS_FLEX_ATTENTION = False except: HAS_FLEX_ATTENTION = False pass From 06f5d75b811a70f94f1d97baf0970b450973df9f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 03:04:22 -0700 Subject: [PATCH 0537/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index d7af61a090..1cad00d44d 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1017,7 +1017,6 @@ def patch_sft_trainer_tokenizer(): for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): trainer_text = patch_trl_tokenizer_processing_class(trainer_name) if trainer_text is None: continue - print(trainer_text) exec(trainer_text, globals()) exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) pass From 28e6eeabd8f841a4440735c387cad5f4a492e879 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 03:14:48 -0700 Subject: [PATCH 0538/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2381f509f3..25024be2ec 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.3" +__version__ = "2024.10.4" __all__ = [ "prepare_model_for_kbit_training", From 0e5a507f87132cd8fbae5239fc436ef5ba3232d6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 03:14:57 -0700 Subject: [PATCH 0539/1088] Many bug fixes (#1162) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine --- README.md | 12 +++++++++ pyproject.toml | 4 +-- unsloth/chat_templates.py | 2 +- unsloth/kernels/flex_attention.py | 2 +- unsloth/models/_utils.py | 6 ++--- unsloth/models/mistral.py | 5 ++-- unsloth/tokenizer_utils.py | 45 +++++++++++++++++++++++++++++-- 7 files changed, 65 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 6b583d5fd5..d0e5a97999 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,18 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` +### Windows Installation + +To run Unsloth directly on Windows: +- Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows +- In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: +```python +trainer = SFTTrainer( + dataset_num_proc=1, + ... +) +``` + For **advanced installation instructions** or if you see weird errors during installations: 1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` diff --git a/pyproject.toml b/pyproject.toml index 455f8477e8..a2a9c2c939 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -227,7 +227,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<=0.11.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", "xformers<0.0.27", "bitsandbytes>=0.43.3", diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index cab6130dd1..b254202c75 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -678,7 +678,7 @@ {{- end }} {{- if .Tools }} -You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the orginal use question. +You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the original use question. {{- end }} {{- end }}<|eot_id|> {{- range $i, $_ := .Messages }} diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 2fba359b77..08426b69e0 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -31,7 +31,7 @@ create_block_mask as _create_block_mask, ) _flex_attention = torch.compile(_flex_attention, dynamic = True, options = torch_compile_options) - HAS_FLEX_ATTENTION = True + HAS_FLEX_ATTENTION = False except: HAS_FLEX_ATTENTION = False pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4aa8e1e592..25024be2ec 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.3" +__version__ = "2024.10.4" __all__ = [ "prepare_model_for_kbit_training", @@ -1194,8 +1194,8 @@ def patch_gradient_accumulation_fix(Trainer): logger.warning_once( "Unsloth: We fixed a gradient accumulation bug, "\ "but it seems like you don't have the latest transformers version!\n"\ - "Please update transformers via:\n"\ - '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + "Please update transformers, TRL and unsloth via:\n"\ + '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' ) pass pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 15e9efc426..00dcc5cd1d 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -254,8 +254,9 @@ def MistralForCausalLM_fast_forward( shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, + logits = shift_logits, + labels = shift_labels, + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index ffe9933f47..1cad00d44d 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -831,6 +831,7 @@ def check_nvidia(): PRE_CHECK = check_nvidia() +import inspect from inspect import getsource import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * @@ -869,6 +870,35 @@ def neftune_post_forward_hook(module, input, output): pass +def patch_trl_tokenizer_processing_class(trainer_name): + # New TRL removes tokenizer! + # We return it back! + exec(f"from trl import {trainer_name}", globals()) + if str(eval(f"{trainer_name}").__name__).startswith("Unsloth"): return None + parameters = eval(f"inspect.signature({trainer_name}).parameters") + if "tokenizer" in parameters: return None + + args = { + key : \ + value.default \ + if type(value.default) is not str else \ + f"'{value.default}'" \ + for key, value in parameters.items() + } + args["tokenizer"] = None + new_args = args.copy() + del new_args["tokenizer"] + del new_args["processing_class"] + new_args = ",\n".join(f"{' '*12}{key} = {key}" for key in new_args) + \ + f",\n{' '*12}processing_class = tokenizer if tokenizer else processing_class" + args = ",\n".join(f"{' '*8}{key} = {value}" for key, value in args.items()) + args = f"def __init__(\n" + f"{' '*8}self,\n" + args + "):" + args += f"\n{' '*8}\n{' '*8}super().__init__(\n{new_args}\n{' '*8})" + new_class = f"""class Unsloth{trainer_name}({trainer_name}):\n{' '*4}{args}\n""" + return new_class +pass + + def patch_sft_trainer_tokenizer(): """ Patches the trainer with changes @@ -884,7 +914,8 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "test_text = dataset[0][dataset_text_field] if (formatting_func is None or not use_formatting_func) else formatting_func(dataset[0])[0]\n"\ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is not None and dataset_text_field is None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ @@ -941,7 +972,8 @@ def patch_sft_trainer_tokenizer(): " from transformers import __version__ as transformers_version\n"\ " from packaging.version import Version\n"\ " if Version(transformers_version) <= Version('4.45.2'):\n"\ - " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers and Unsloth!')\n"\ + " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\\\n"\ + " '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`')\n"\ "except:\n"\ " pass\n"\ "\n\n" @@ -981,4 +1013,13 @@ def patch_sft_trainer_tokenizer(): pass pass +# Fix TRL trainers with removed tokenizer args (got replaced with processing_class) +for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): + trainer_text = patch_trl_tokenizer_processing_class(trainer_name) + if trainer_text is None: continue + exec(trainer_text, globals()) + exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) +pass + +# FInally patch TRL tokenizer things patch_sft_trainer_tokenizer() From f6a661f174092053a6949e24da049bd6bbe25e4b Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Wed, 23 Oct 2024 23:32:33 +0400 Subject: [PATCH 0540/1088] Fix/patch tokenizer (#1171) * fix: correct tokenizer handling in patch_sft_trainer_tokenizer * Revert "fix: correct tokenizer handling in patch_sft_trainer_tokenizer" This reverts commit f18ac214cd5a15fcfbf310ddd9dfce5d66fd3731. * fix: correct condition for test_text assignment in patch_sft_trainer_tokenizer --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 1cad00d44d..4b9fd5e133 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -915,7 +915,7 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ - "test_text = dataset[0][dataset_text_field] if (formatting_func is not None and dataset_text_field is None) else formatting_func(dataset[0])[0]\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ From 1e7e0e23683c5ec1c1e3a5df0f586d4c433fee44 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 12:39:58 -0700 Subject: [PATCH 0541/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 25024be2ec..2214ff80d4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.4" +__version__ = "2024.10.5" __all__ = [ "prepare_model_for_kbit_training", From b821f20b36cb1cf27bb1f6e928dc55b13a55ab15 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 22:13:45 -0700 Subject: [PATCH 0542/1088] n_items --- unsloth/__init__.py | 10 +++++++--- unsloth/kernels/cross_entropy_loss.py | 1 + unsloth/tokenizer_utils.py | 11 ++++++++--- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index abee9c9e04..458c2696bc 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -62,9 +62,13 @@ try: import torch -except: - raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ - "We have some installation instructions on our Github page.") +except ModuleNotFoundError: + raise ImportError( + "Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"\ + "We have some installation instructions on our Github page." + ) +except Exception as exception: + raise exception pass # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 1c8f8c8d99..35df257329 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -373,6 +373,7 @@ def fast_cross_entropy_loss( logit_softcapping, logit_scaling, ) + print(n_items) if n_items is None: n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 1cad00d44d..8806f1e743 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -914,8 +914,10 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ - "test_text = dataset[0][dataset_text_field] if (formatting_func is not None and dataset_text_field is None) else formatting_func(dataset[0])[0]\n"\ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"\ + "if 'dataset_text_field' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `dataset_text_field` does not exist!')\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ @@ -1017,7 +1019,10 @@ def patch_sft_trainer_tokenizer(): for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): trainer_text = patch_trl_tokenizer_processing_class(trainer_name) if trainer_text is None: continue - exec(trainer_text, globals()) + try: + exec(trainer_text, globals()) + except: + raise RuntimeError(f"Unsloth: Please file a bug report! Error patching {trainer_name}") exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) pass From e56136636540097f63bb11fa540558001d30b880 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 23 Oct 2024 22:18:24 -0700 Subject: [PATCH 0543/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 35df257329..4895d27310 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -373,9 +373,9 @@ def fast_cross_entropy_loss( logit_softcapping, logit_scaling, ) - print(n_items) if n_items is None: n_items = torch.count_nonzero(labels != -100) + print(n_items) return loss.sum() / n_items pass From 4ff247ab18600fb3bc474ddd89dfdce76b50c287 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 00:17:26 -0700 Subject: [PATCH 0544/1088] Fix DPO, ORPO --- unsloth/kernels/cross_entropy_loss.py | 1 - unsloth/models/_utils.py | 33 +++++++++++++++++++++++++-- unsloth/save.py | 2 +- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 4895d27310..1c8f8c8d99 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -375,7 +375,6 @@ def fast_cross_entropy_loss( ) if n_items is None: n_items = torch.count_nonzero(labels != -100) - print(n_items) return loss.sum() / n_items pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 25024be2ec..6611b4f638 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1172,10 +1172,10 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): def patch_gradient_accumulation_fix(Trainer): # Fixes gradient accumulation + import inspect if hasattr(Trainer, "get_batch_samples"): - from inspect import getsource if \ - not getsource(Trainer.get_batch_samples).strip()\ + not inspect.getsource(Trainer.get_batch_samples).strip()\ .endswith("return batch_samples, num_items_in_batch"): raise NotImplementedError("Unsloth: Please make a Github issue immediately!!") @@ -1198,4 +1198,33 @@ def patch_gradient_accumulation_fix(Trainer): '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' ) pass + + # Also fix up loss scaling ie negate loss *= self.args.gradient_accumulation_steps + if "num_items_in_batch" not in inspect.signature(Trainer.training_step).parameters: return + + function = inspect.getsource(Trainer.training_step) + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + + # Import all variables that need importing + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in function: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + # Accelerate does / self.args.gradient_accumulation_steps internally, so if we already + # summed it up and did the division before hand, we have to negate it. + function = function.replace( + "loss *= self.args.gradient_accumulation_steps", + "if num_items_in_batch is not None: loss *= self.args.gradient_accumulation_steps", + ) + function = function.replace("def training_step", "def _unsloth_training_step", 1) + exec(function, globals()) + Trainer.training_step = _unsloth_training_step pass diff --git a/unsloth/save.py b/unsloth/save.py index ab30e0fea5..ccda79aeee 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -145,7 +145,7 @@ def _free_cached_model(model): def _merge_lora(layer, name): - bias = None + bias = getattr(layer, "bias", None) if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)): # Is LoRA so we need to merge! W, quant_state, A, B, s, bias = get_lora_parameters_bias(layer) From 1c063b4c98a9f63e47ba86d887e996dc8dc12e2a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 00:25:28 -0700 Subject: [PATCH 0545/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3724cdeab2..bf5216b228 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.5" +__version__ = "2024.10.6" __all__ = [ "prepare_model_for_kbit_training", From 4f1c474d4a4d75529677db96a0031fd8d57ab696 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 00:36:37 -0700 Subject: [PATCH 0546/1088] Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine --- unsloth/__init__.py | 10 +++++++--- unsloth/models/_utils.py | 35 ++++++++++++++++++++++++++++++++--- unsloth/save.py | 2 +- unsloth/tokenizer_utils.py | 9 +++++++-- 4 files changed, 47 insertions(+), 9 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index abee9c9e04..458c2696bc 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -62,9 +62,13 @@ try: import torch -except: - raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ - "We have some installation instructions on our Github page.") +except ModuleNotFoundError: + raise ImportError( + "Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"\ + "We have some installation instructions on our Github page." + ) +except Exception as exception: + raise exception pass # Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2214ff80d4..bf5216b228 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.5" +__version__ = "2024.10.6" __all__ = [ "prepare_model_for_kbit_training", @@ -1172,10 +1172,10 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): def patch_gradient_accumulation_fix(Trainer): # Fixes gradient accumulation + import inspect if hasattr(Trainer, "get_batch_samples"): - from inspect import getsource if \ - not getsource(Trainer.get_batch_samples).strip()\ + not inspect.getsource(Trainer.get_batch_samples).strip()\ .endswith("return batch_samples, num_items_in_batch"): raise NotImplementedError("Unsloth: Please make a Github issue immediately!!") @@ -1198,4 +1198,33 @@ def patch_gradient_accumulation_fix(Trainer): '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' ) pass + + # Also fix up loss scaling ie negate loss *= self.args.gradient_accumulation_steps + if "num_items_in_batch" not in inspect.signature(Trainer.training_step).parameters: return + + function = inspect.getsource(Trainer.training_step) + where = function.find("def") + function = function.split("\n") + function = "\n".join(x[where:] for x in function) + + # Import all variables that need importing + import transformers.trainer + items_in_trainer = dir(transformers.trainer) + good_items = [] + for item in items_in_trainer: + # TODO: Support Deepspeed + if item.startswith(("deepspeed", "xm", "met", "smp")): continue + if item in function: good_items.append(item) + pass + exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) + + # Accelerate does / self.args.gradient_accumulation_steps internally, so if we already + # summed it up and did the division before hand, we have to negate it. + function = function.replace( + "loss *= self.args.gradient_accumulation_steps", + "if num_items_in_batch is not None: loss *= self.args.gradient_accumulation_steps", + ) + function = function.replace("def training_step", "def _unsloth_training_step", 1) + exec(function, globals()) + Trainer.training_step = _unsloth_training_step pass diff --git a/unsloth/save.py b/unsloth/save.py index ab30e0fea5..ccda79aeee 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -145,7 +145,7 @@ def _free_cached_model(model): def _merge_lora(layer, name): - bias = None + bias = getattr(layer, "bias", None) if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)): # Is LoRA so we need to merge! W, quant_state, A, B, s, bias = get_lora_parameters_bias(layer) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 4b9fd5e133..8806f1e743 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -914,7 +914,9 @@ def patch_sft_trainer_tokenizer(): check_text = \ "\n"\ - "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"\ + "if 'dataset_text_field' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `dataset_text_field` does not exist!')\n"\ "test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ @@ -1017,7 +1019,10 @@ def patch_sft_trainer_tokenizer(): for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): trainer_text = patch_trl_tokenizer_processing_class(trainer_name) if trainer_text is None: continue - exec(trainer_text, globals()) + try: + exec(trainer_text, globals()) + except: + raise RuntimeError(f"Unsloth: Please file a bug report! Error patching {trainer_name}") exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) pass From f195ee1e6567dcc14620961b353d2c42226a4a54 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 01:11:20 -0700 Subject: [PATCH 0547/1088] Update _utils.py --- unsloth/models/_utils.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bf5216b228..873a2723c2 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -162,6 +162,20 @@ def patch_mistral_nemo_config(config): pass # ============================================= +# ============================================= +# Weird Databricks errors +from transformers.utils import is_openai_available +if is_openai_available(): + try: + from openai import OpenAI + except: + print("Unsloth: OpenAI failed to import - ignoring for now.") + import transformers.utils + def _is_openai_available(): return False + transformers.utils.is_openai_available = _is_openai_available + pass +pass + # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb From faf27477aab01ed879b8d5d8d75fc6de2c5c05e5 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Thu, 24 Oct 2024 23:10:52 +0400 Subject: [PATCH 0548/1088] fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine --- unsloth/kernels/cross_entropy_loss.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 1c8f8c8d99..57d07af493 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -388,6 +388,13 @@ def fast_cross_entropy_loss( List, Tuple, ) + +try: + from transformers.models.llama.modeling_llama import Unpack, KwargsForCausalLM +except ImportError: + logger.warning("Unsloth: Could not find Unpack, KwargsForCausalLM in LlamaForCausalLM. " + "This is expected if you are using an older version of Transformers (<4.46.0). ") + import inspect, re function = inspect.getsource(LlamaForCausalLM.forward) function = function.split("\n") From 5961c34a71871aaf18335f0e493c38a02e48d458 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 12:11:38 -0700 Subject: [PATCH 0549/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 57d07af493..f2377d55cc 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -389,11 +389,12 @@ def fast_cross_entropy_loss( Tuple, ) +# Transformers 4.47 need Unpack, KwargsForCausalLM try: from transformers.models.llama.modeling_llama import Unpack, KwargsForCausalLM -except ImportError: - logger.warning("Unsloth: Could not find Unpack, KwargsForCausalLM in LlamaForCausalLM. " - "This is expected if you are using an older version of Transformers (<4.46.0). ") +except: + pass +pass import inspect, re function = inspect.getsource(LlamaForCausalLM.forward) From 7308bb82998322f2cae91ec217efd4e82fff9086 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 12:14:14 -0700 Subject: [PATCH 0550/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 873a2723c2..68e294f157 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.6" +__version__ = "2024.10.7" __all__ = [ "prepare_model_for_kbit_training", From 0096e5b07ff1642163168bda6e9c41338edb470c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 12:17:09 -0700 Subject: [PATCH 0551/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 68e294f157..873a2723c2 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.7" +__version__ = "2024.10.6" __all__ = [ "prepare_model_for_kbit_training", From 9ca0bba14bba8f591c808fe1ce50fe2bc439721a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 12:17:21 -0700 Subject: [PATCH 0552/1088] Fix 4.47 issue (#1182) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> --- unsloth/kernels/cross_entropy_loss.py | 8 ++++++++ unsloth/models/_utils.py | 14 ++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 1c8f8c8d99..f2377d55cc 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -388,6 +388,14 @@ def fast_cross_entropy_loss( List, Tuple, ) + +# Transformers 4.47 need Unpack, KwargsForCausalLM +try: + from transformers.models.llama.modeling_llama import Unpack, KwargsForCausalLM +except: + pass +pass + import inspect, re function = inspect.getsource(LlamaForCausalLM.forward) function = function.split("\n") diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bf5216b228..873a2723c2 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -162,6 +162,20 @@ def patch_mistral_nemo_config(config): pass # ============================================= +# ============================================= +# Weird Databricks errors +from transformers.utils import is_openai_available +if is_openai_available(): + try: + from openai import OpenAI + except: + print("Unsloth: OpenAI failed to import - ignoring for now.") + import transformers.utils + def _is_openai_available(): return False + transformers.utils.is_openai_available = _is_openai_available + pass +pass + # ============================================= # Get Flash Attention v2 if Ampere (RTX 30xx, A100) import bitsandbytes as bnb From 9ca13b836f647e67d6e9ca8bb712403ffaadd607 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 24 Oct 2024 12:17:48 -0700 Subject: [PATCH 0553/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 873a2723c2..68e294f157 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.6" +__version__ = "2024.10.7" __all__ = [ "prepare_model_for_kbit_training", From 67760559e4b4a77add264c0e2814f6a13beea964 Mon Sep 17 00:00:00 2001 From: Datta Nimmaturi Date: Fri, 25 Oct 2024 13:58:12 +0530 Subject: [PATCH 0554/1088] donot upcast lm_head and embeddings to float32 (#1186) --- unsloth/models/llama.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5f20f51209..9c5499dc75 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1955,7 +1955,7 @@ def get_peft_model( print("Unsloth: Casting embed_tokens to float32") model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! @@ -1968,7 +1968,7 @@ def get_peft_model( print("Unsloth: Casting lm_head to float32") model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! @@ -2206,7 +2206,7 @@ def get_peft_model( print("Unsloth: Casting embed_tokens to float32") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass @@ -2214,7 +2214,7 @@ def get_peft_model( print("Unsloth: Casting lm_head to float32") assert(hasattr(model.model.lm_head, "modules_to_save")) model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass From 625209e11febd6d91a5edf8dfdfd04906a013c9f Mon Sep 17 00:00:00 2001 From: Datta Nimmaturi Date: Sat, 26 Oct 2024 00:47:54 +0530 Subject: [PATCH 0555/1088] Cleanup upcast logs (#1188) --- unsloth/models/llama.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9c5499dc75..cb0ce2feaf 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1952,7 +1952,7 @@ def get_peft_model( # Offload! # [TODO] First offload lm_head and embed_tokens to CPU (should be disk!!) if "embed_tokens" in new_target_modules: - print("Unsloth: Casting embed_tokens to float32") + print("Unsloth: Training embed_tokens in mixed precision to save VRAM") model.model.model.embed_tokens.modules_to_save.default\ .to(device = "cuda:0", non_blocking = True) @@ -1965,7 +1965,7 @@ def get_peft_model( pass if "lm_head" in new_target_modules: - print("Unsloth: Casting lm_head to float32") + print("Unsloth: Training lm_head in mixed precision to save VRAM") model.model.lm_head.modules_to_save.default\ .to(device = "cuda:0", non_blocking = True) @@ -2203,7 +2203,7 @@ def get_peft_model( # Now patch lm_head and embed_tokens if train_embed_tokens: - print("Unsloth: Casting embed_tokens to float32") + print("Unsloth: Training embed_tokens in mixed precision to save VRAM") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) model.model.model.embed_tokens.modules_to_save.default\ .to(device = "cuda:0", non_blocking = True) @@ -2211,7 +2211,7 @@ def get_peft_model( pass if train_lm_head: - print("Unsloth: Casting lm_head to float32") + print("Unsloth: Training lm_head in mixed precision to save VRAM") assert(hasattr(model.model.lm_head, "modules_to_save")) model.model.lm_head.modules_to_save.default\ .to(device = "cuda:0", non_blocking = True) From 2bc189f490a7e1c1f5431c326d0e4bb14858a2e4 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Sat, 26 Oct 2024 02:44:10 +0400 Subject: [PATCH 0556/1088] Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han --- unsloth/models/llama.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cb0ce2feaf..c98feeca1e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -193,6 +193,10 @@ def LlamaAttention_fast_forward_inference( # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + + # Need to do it prior 2 steps before hitting full on short KV cache + # or else error + self.rotary_emb.extend_rope_embedding(Vn, seq_len + 2) cos, sin = self.rotary_emb.get_cached(kv_seq_len) cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) @@ -1122,7 +1126,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1248,7 +1252,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1363,7 +1367,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass From 6f28d160b217dc6145b64f5168cfeeb771b2da38 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 26 Oct 2024 01:20:37 -0700 Subject: [PATCH 0557/1088] Update transformers --- unsloth/models/_utils.py | 2 +- unsloth/tokenizer_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 68e294f157..0acc8cd350 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1209,7 +1209,7 @@ def patch_gradient_accumulation_fix(Trainer): "Unsloth: We fixed a gradient accumulation bug, "\ "but it seems like you don't have the latest transformers version!\n"\ "Please update transformers, TRL and unsloth via:\n"\ - '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' + '`pip install --upgrade --no-cache-dir --no-deps unsloth transformers git+https://github.com/huggingface/trl.git`' ) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 8806f1e743..c05485f902 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -975,7 +975,7 @@ def patch_sft_trainer_tokenizer(): " from packaging.version import Version\n"\ " if Version(transformers_version) <= Version('4.45.2'):\n"\ " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\\\n"\ - " '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`')\n"\ + " '`pip install --upgrade --no-cache-dir --no-deps unsloth transformers git+https://github.com/huggingface/trl.git`')\n"\ "except:\n"\ " pass\n"\ "\n\n" From d76eda4f66828d66aa6a1b01a0d03323e43810dd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 26 Oct 2024 01:21:24 -0700 Subject: [PATCH 0558/1088] Bug fixes (#1195) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi --- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 26 +++++++++++++++----------- unsloth/tokenizer_utils.py | 2 +- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 68e294f157..0acc8cd350 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1209,7 +1209,7 @@ def patch_gradient_accumulation_fix(Trainer): "Unsloth: We fixed a gradient accumulation bug, "\ "but it seems like you don't have the latest transformers version!\n"\ "Please update transformers, TRL and unsloth via:\n"\ - '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`' + '`pip install --upgrade --no-cache-dir --no-deps unsloth transformers git+https://github.com/huggingface/trl.git`' ) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5f20f51209..c98feeca1e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -193,6 +193,10 @@ def LlamaAttention_fast_forward_inference( # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + + # Need to do it prior 2 steps before hitting full on short KV cache + # or else error + self.rotary_emb.extend_rope_embedding(Vn, seq_len + 2) cos, sin = self.rotary_emb.get_cached(kv_seq_len) cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) @@ -1122,7 +1126,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1248,7 +1252,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1363,7 +1367,7 @@ def get_cached(self, seq_len = None): def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 - self.current_rope_size = math.ceil(seq_len / 8192) * 8192 + self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) pass pass @@ -1952,10 +1956,10 @@ def get_peft_model( # Offload! # [TODO] First offload lm_head and embed_tokens to CPU (should be disk!!) if "embed_tokens" in new_target_modules: - print("Unsloth: Casting embed_tokens to float32") + print("Unsloth: Training embed_tokens in mixed precision to save VRAM") model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! @@ -1965,10 +1969,10 @@ def get_peft_model( pass if "lm_head" in new_target_modules: - print("Unsloth: Casting lm_head to float32") + print("Unsloth: Training lm_head in mixed precision to save VRAM") model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! @@ -2203,18 +2207,18 @@ def get_peft_model( # Now patch lm_head and embed_tokens if train_embed_tokens: - print("Unsloth: Casting embed_tokens to float32") + print("Unsloth: Training embed_tokens in mixed precision to save VRAM") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass if train_lm_head: - print("Unsloth: Casting lm_head to float32") + print("Unsloth: Training lm_head in mixed precision to save VRAM") assert(hasattr(model.model.lm_head, "modules_to_save")) model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype = torch.float32, non_blocking = True) + .to(device = "cuda:0", non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 8806f1e743..c05485f902 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -975,7 +975,7 @@ def patch_sft_trainer_tokenizer(): " from packaging.version import Version\n"\ " if Version(transformers_version) <= Version('4.45.2'):\n"\ " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\\\n"\ - " '`pip install --upgrade --no-cache-dir unsloth git+https://github.com/huggingface/transformers.git git+https://github.com/huggingface/trl.git`')\n"\ + " '`pip install --upgrade --no-cache-dir --no-deps unsloth transformers git+https://github.com/huggingface/trl.git`')\n"\ "except:\n"\ " pass\n"\ "\n\n" From 8d46c0d4d6e217a1a372ad61ab32229ab1e26590 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 26 Oct 2024 18:03:15 -0700 Subject: [PATCH 0559/1088] Torch 2.5 --- README.md | 16 ++++++++-- pyproject.toml | 65 +++++++++++++++++++++++++++++++++++++--- unsloth/_auto_install.py | 5 ++-- 3 files changed, 77 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index d0e5a97999..0a3c83f3fa 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ pip install --no-deps trl peft accelerate bitsandbytes ### Pip Installation `⚠️Do **NOT** use this if you have Conda.` Pip is a bit more complex since there are dependency issues. The pip command is different for `torch 2.2,2.3,2.4,2.5` and CUDA versions. -For other torch versions, we support `torch211`, `torch212`, `torch220`, `torch230`, `torch240` and for CUDA versions, we support `cu118` and `cu121`. For Ampere devices (A100, H100, RTX3090) and above, use `cu118-ampere` or `cu121-ampere`. +For other torch versions, we support `torch211`, `torch212`, `torch220`, `torch230`, `torch240` and for CUDA versions, we support `cu118` and `cu121` and `cu124`. For Ampere devices (A100, H100, RTX3090) and above, use `cu118-ampere` or `cu121-ampere` or `cu124-ampere`. For example, if you have `torch 2.4` and `CUDA 12.1`, use: ```bash @@ -145,6 +145,12 @@ pip install --upgrade pip pip install "unsloth[cu121-torch240] @ git+https://github.com/unslothai/unsloth.git" ``` +Another example, if you have `torch 2.5` and `CUDA 12.4`, use: +```bash +pip install --upgrade pip +pip install "unsloth[cu124-torch250] @ git+https://github.com/unslothai/unsloth.git" +``` + And other examples: ```bash pip install "unsloth[cu121-ampere-torch240] @ git+https://github.com/unslothai/unsloth.git" @@ -154,6 +160,9 @@ pip install "unsloth[cu118-torch240] @ git+https://github.com/unslothai/unsloth. pip install "unsloth[cu121-torch230] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" + +pip install "unsloth[cu121-torch250] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu124-ampere-torch250] @ git+https://github.com/unslothai/unsloth.git" ``` Or, run the below in a terminal to get the **optimal** pip installation command: @@ -164,18 +173,19 @@ wget -qO- https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/_auto Or, run the below manually in a Python REPL: ```python try: import torch -except: raise ImportError("Install torch via `pip install torch`") +except: raise ImportError('Install torch via `pip install torch`') from packaging.version import Version as V v = V(torch.__version__) cuda = str(torch.version.cuda) is_ampere = torch.cuda.get_device_capability()[0] >= 8 -if cuda != "12.1" and cuda != "11.8": raise RuntimeError(f"CUDA = {cuda} not supported!") +if cuda != "12.1" and cuda != "11.8" and cuda != "12.4": raise RuntimeError(f"CUDA = {cuda} not supported!") if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!") elif v <= V('2.1.1'): x = 'cu{}{}-torch211' elif v <= V('2.1.2'): x = 'cu{}{}-torch212' elif v < V('2.3.0'): x = 'cu{}{}-torch220' elif v < V('2.4.0'): x = 'cu{}{}-torch230' elif v < V('2.5.0'): x = 'cu{}{}-torch240' +elif v < V('2.6.0'): x = 'cu{}{}-torch250' else: raise RuntimeError(f"Torch = {v} too new!") x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') diff --git a/pyproject.toml b/pyproject.toml index a2a9c2c939..498fc35da6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,10 +109,28 @@ cu118onlytorch240 = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27.post2-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", +] +cu124onlytorch240 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", +] +cu121onlytorch250 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", +] +cu124onlytorch250 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu118 = [ "unsloth[huggingface]", @@ -174,6 +192,21 @@ cu121-torch240 = [ "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch240]", ] +cu121-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch250]", +] +cu124-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch240]", +] +cu124-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch250]", +] kaggle = [ "unsloth[huggingface]", ] @@ -322,6 +355,30 @@ cu121-ampere-torch240 = [ "ninja", "flash-attn>=2.6.3", ] +cu121-ampere-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch250]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch240 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch240]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch250]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/_auto_install.py b/unsloth/_auto_install.py index 2e6351b8d9..c3b94c6706 100644 --- a/unsloth/_auto_install.py +++ b/unsloth/_auto_install.py @@ -18,13 +18,14 @@ v = V(torch.__version__) cuda = str(torch.version.cuda) is_ampere = torch.cuda.get_device_capability()[0] >= 8 -if cuda != "12.1" and cuda != "11.8": raise RuntimeError(f"CUDA = {cuda} not supported!") +if cuda != "12.1" and cuda != "11.8" and cuda != "12.4": raise RuntimeError(f"CUDA = {cuda} not supported!") if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!") elif v <= V('2.1.1'): x = 'cu{}{}-torch211' elif v <= V('2.1.2'): x = 'cu{}{}-torch212' elif v < V('2.3.0'): x = 'cu{}{}-torch220' elif v < V('2.4.0'): x = 'cu{}{}-torch230' elif v < V('2.5.0'): x = 'cu{}{}-torch240' +elif v < V('2.6.0'): x = 'cu{}{}-torch250' else: raise RuntimeError(f"Torch = {v} too new!") x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") -print(f'pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') \ No newline at end of file +print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') \ No newline at end of file From 49ae6194122b594a7054da0bfd6f387cf720f40f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 26 Oct 2024 18:05:55 -0700 Subject: [PATCH 0560/1088] Update pyproject.toml --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 498fc35da6..fc9c8256ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -257,6 +257,7 @@ colab-new = [ "protobuf<4.0.0", "huggingface_hub", "hf_transfer", + "bitsandbytes>=0.43.3", ] colab-no-deps = [ "accelerate>=0.34.1", From fdf25b758a2df1f6a38dc9a3952bdae15e65f8ff Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Mon, 28 Oct 2024 02:06:45 +0400 Subject: [PATCH 0561/1088] Fix/casting continue pretraining (#1200) * Bring back float32 if float16 instead of bfloat16 * Refactor mixed precision handling for lm_head and embed_tokens to ensure correct dtype usage * Fix dtype retrieval for embed_tokens and lm_head in mixed precision training * Fix dtype retrieval for embed_tokens and lm_head to use weight dtype in mixed precision training * Fix dtype handling for embed_tokens and lm_head to ensure correct float32 usage in mixed precision training * Fix dtype assignment for lm_head modules to ensure correct weight dtype usage in mixed precision training --- unsloth/models/llama.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c98feeca1e..cf05d432c7 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1958,8 +1958,9 @@ def get_peft_model( if "embed_tokens" in new_target_modules: print("Unsloth: Training embed_tokens in mixed precision to save VRAM") + dtype = model.model.model.embed_tokens.modules_to_save.default.weight.dtype model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", non_blocking = True) + .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! @@ -1971,8 +1972,9 @@ def get_peft_model( if "lm_head" in new_target_modules: print("Unsloth: Training lm_head in mixed precision to save VRAM") + dtype = model.model.model.lm_head.modules_to_save.default.weight.dtype model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", non_blocking = True) + .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! @@ -2209,16 +2211,20 @@ def get_peft_model( if train_embed_tokens: print("Unsloth: Training embed_tokens in mixed precision to save VRAM") assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) + + dtype = model.model.model.embed_tokens.modules_to_save.default.weight.dtype model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", non_blocking = True) + .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) pass if train_lm_head: print("Unsloth: Training lm_head in mixed precision to save VRAM") assert(hasattr(model.model.lm_head, "modules_to_save")) + + dtype = model.model.lm_head.modules_to_save.default.weight.dtype model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", non_blocking = True) + .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) model.model.lm_head.modules_to_save.default.requires_grad_(True) pass From 007efc27514fba28e5b6457d0b47f7fe164c7469 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 15:09:35 -0700 Subject: [PATCH 0562/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0acc8cd350..51c63fc7dd 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -367,6 +367,7 @@ def is_big_gpu(index): "config.suppress_errors = True", # Supress errors for now "config.do_not_emit_runtime_asserts = True", "config.cache_size_limit = 1024", # Flex Attention + "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation ] import torch._inductor.config as config for _try_compile_argument in torch_compile_arguments: From 7083a1d455890ed276af9a7a4aee2b2bd655a2a8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 17:32:26 -0700 Subject: [PATCH 0563/1088] Unk token issues --- unsloth/models/_utils.py | 48 ++++++++++++++++++++++++++++++++++++++-- unsloth/models/llama.py | 5 +++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 51c63fc7dd..c0a15592e5 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -487,6 +487,50 @@ def patch_tokenizer(model, tokenizer): bad_pad_token = False pass + # Check if unknown token is broken + fixed_unk_token = False + + if hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None: + + eos_token = getattr(tokenizer, "eos_token", None) + bos_token = getattr(tokenizer, "bos_token", None) + + old_unk_token = tokenizer.unk_token + if old_unk_token == eos_token or old_unk_token == bos_token: + has_broken_unk = True + # Use the unicode replacement characters + possible_replacements = [ + "\uFFFD", # Original replacement char + "\uFFFC", # Another option + "\u2753", # Red Question mark emoji + "\u2754", # White Question mark emoji + "\u00BF", # Inverted question mark + ] + for replacement_char in possible_replacements: + char = tokenizer(replacement_char, add_special_tokens = False) + if len(char) == 1: + # Get actual token representation + try: char = tokenizer.convert_ids_to_tokens(char[0]) + except: continue + tokenizer.unk_token = char + fixed_unk_token = True + break + pass + pass + + if not fixed_unk_token: # Still broken! + raise RuntimeError( + f"Unsloth: Tried fixing the unk_token = {old_unk_token}, but couldn't!" + ) + pass + + logger.warning_once( + f"Unsloth: unk_token = {old_unk_token} is the same as the EOS or BOS tokens.\n"\ + f"We fixed it by changing it to {tokenizer.unk_token}." + ) + pass + pass + if bad_pad_token: # Find a better pad token added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] @@ -534,8 +578,8 @@ def patch_tokenizer(model, tokenizer): pass possible_pad_token = final_pad_token - # Try unk_token - if possible_pad_token is None and hasattr(tokenizer, "unk_token"): + # Try unk_token if it wasn't fixed + if possible_pad_token is None and not fixed_unk_token and hasattr(tokenizer, "unk_token"): possible_pad_token = tokenizer.unk_token pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cf05d432c7..9c9ea53752 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1910,6 +1910,11 @@ def get_peft_model( ): transformers_set_seed(random_state) + if type(r) is not int: + raise TypeError(f"Unsloth: Rank of {str(r)} must be an integer.") + if r <= 0: + raise TypeError(f"Unsloth: Rank of {str(r)} must be larger than 0.") + if isinstance(model, PeftModelForCausalLM): # Check if exactly the same and then pass through! assert(hasattr(model, "peft_config")) From 3acc5afad3e7e93332c6252781a4ac85ea0267a5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 17:34:33 -0700 Subject: [PATCH 0564/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c0a15592e5..d92a938eee 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -507,7 +507,7 @@ def patch_tokenizer(model, tokenizer): "\u00BF", # Inverted question mark ] for replacement_char in possible_replacements: - char = tokenizer(replacement_char, add_special_tokens = False) + char = tokenizer(replacement_char, add_special_tokens = False).input_ids if len(char) == 1: # Get actual token representation try: char = tokenizer.convert_ids_to_tokens(char[0]) From 1c044da660810c422b32041cbfbd1519ff2db6e9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 19:06:57 -0700 Subject: [PATCH 0565/1088] Fix pad token --- unsloth/models/_utils.py | 27 ++++++++++++++-- unsloth/models/gemma.py | 7 ++--- unsloth/models/gemma2.py | 7 ++--- unsloth/models/llama.py | 68 ++++++++++++++++++++++++++++++---------- 4 files changed, 80 insertions(+), 29 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d92a938eee..46a5f45cfa 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -525,7 +525,7 @@ def patch_tokenizer(model, tokenizer): pass logger.warning_once( - f"Unsloth: unk_token = {old_unk_token} is the same as the EOS or BOS tokens.\n"\ + f"Unsloth: unk_token = {old_unk_token} is the same as the EOS or BOS tokens. "\ f"We fixed it by changing it to {tokenizer.unk_token}." ) pass @@ -610,13 +610,34 @@ def patch_tokenizer(model, tokenizer): tokenizer.add_special_tokens({"pad_token" : possible_pad_token}) tokenizer.pad_token = possible_pad_token if model is not None: - model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + + # Edit all config with new pad token + current_model = model + while hasattr(model, "model") and hasattr(model, "config"): + current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + current_model = current_model.model + if hasattr(model, "model") and hasattr(model, "config"): + current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + pass + + # Generation edit pad token if getattr(model, "generation_config") is not None: model.generation_config.update(pad_token_id = tokenizer.pad_token_id) else: if model is not None: + if model.config.pad_token_id is None: - model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + + # Edit all config with new pad token + current_model = model + while hasattr(model, "model") and hasattr(model, "config"): + current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + current_model = model + if hasattr(model, "model") and hasattr(model, "config"): + current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) + pass + + # Generation edit pad token if getattr(model, "generation_config") is not None: model.generation_config.update(pad_token_id = tokenizer.pad_token_id) pass diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 45f14c1131..1ec116b2ea 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -339,10 +339,7 @@ def pre_patch(): @staticmethod - def post_patch(model): - # Patch model for Gemma - layers = model.model.layers - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) @@ -425,6 +422,6 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass pass diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index bf40ea8a27..54d8f628cb 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -490,10 +490,7 @@ def pre_patch(): @staticmethod - def post_patch(model): - # Patch model for Gemma - layers = model.model.layers - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) @@ -576,6 +573,6 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9c9ea53752..044ea6e244 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1621,7 +1621,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model = model_patcher.post_patch(model) + model, tokenizer = model_patcher.post_patch(model, tokenizer) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): @@ -1827,27 +1827,63 @@ def from_pretrained( @staticmethod - def post_patch(model): - # Patch model - layers = model.model.layers - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2. - model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) + try: old_input_embedding = model.get_input_embeddings ().weight + except: return model, tokenizer + + # Maybe not all models have a lm_head? + try: old_output_embedding = model.get_output_embeddings().weight + except: old_output_embedding = torch.zeros(0) + + # Check for tied weights as well + is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() + + # Check pad token's id -> we need to expand the embedding + if len(tokenizer) > old_input_embedding.shape[0]: + # Workaround randomnly fixes it for torch versions < 2. + requires_grad = old_input_embedding.requires_grad + old_input_embedding.requires_grad_(False) + old_input_embedding.resize_(len(tokenizer), old_input_embedding.shape[1]) + old_input_embedding.requires_grad_(requires_grad) + + # Fix up all vocab sizes + current_model = model + while hasattr(model, "model") and hasattr(model, "config"): + if hasattr(model.config, "vocab_size"): + current_model.config.update({"vocab_size" : len(tokenizer)}) + current_model = current_model.model + if hasattr(model, "model") and hasattr(model, "config"): + if hasattr(model.config, "vocab_size"): + current_model.config.update({"vocab_size" : len(tokenizer)}) + pass + pass + + model.set_input_embeddings( + torch.nn.Embedding.from_pretrained( + old_input_embedding, + padding_idx = getattr(model.config, "pad_token_id", None), + ) + ) model.config.update({"unsloth_version" : __version__}) # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.get_output_embeddings().weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head + if old_output_embedding.numel() != 0: + requires_grad = old_output_embedding.requires_grad + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = old_output_embedding if not is_tied else old_input_embedding + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + lm_head.weight.requires_grad_(requires_grad) + model.lm_head = lm_head + correct_dtype = lm_head.weight.dtype + else: + correct_dtype = old_input_embedding.dtype + pass # Also patch all dtypes - BnB seems to not allocate the correct type? # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - for name, module in model.named_modules(): if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): weight = module.weight @@ -1883,7 +1919,7 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass From 5286f1972560ef1b106e07e22132cd771143245f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 19:08:14 -0700 Subject: [PATCH 0566/1088] Update llama.py --- unsloth/models/llama.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 044ea6e244..a53f52fe27 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1869,14 +1869,19 @@ def post_patch(model, tokenizer): # We also do this for the lm_head if old_output_embedding.numel() != 0: + requires_grad = old_output_embedding.requires_grad lm_head = torch.nn.Linear(1, 1, bias = None) del lm_head.weight + lm_head.weight = old_output_embedding if not is_tied else old_input_embedding lm_head.in_features = lm_head.weight.shape[1] lm_head.out_features = lm_head.weight.shape[0] + lm_head.weight.requires_grad_(requires_grad) - model.lm_head = lm_head + model.set_output_embeddings(lm_head) + if hasattr(model, "lm_head"): model.lm_head = lm_head + correct_dtype = lm_head.weight.dtype else: correct_dtype = old_input_embedding.dtype From 02437a839105c5556f672b122d033f4de22fe095 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 19:09:27 -0700 Subject: [PATCH 0567/1088] Typo --- unsloth/models/_utils.py | 8 ++++---- unsloth/models/llama.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 46a5f45cfa..b2f4b5c66e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -613,10 +613,10 @@ def patch_tokenizer(model, tokenizer): # Edit all config with new pad token current_model = model - while hasattr(model, "model") and hasattr(model, "config"): + while hasattr(current_model, "model") and hasattr(current_model, "config"): current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) current_model = current_model.model - if hasattr(model, "model") and hasattr(model, "config"): + if hasattr(current_model, "model") and hasattr(current_model, "config"): current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) pass @@ -630,10 +630,10 @@ def patch_tokenizer(model, tokenizer): # Edit all config with new pad token current_model = model - while hasattr(model, "model") and hasattr(model, "config"): + while hasattr(current_model, "model") and hasattr(current_model, "config"): current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) current_model = model - if hasattr(model, "model") and hasattr(model, "config"): + if hasattr(current_model, "model") and hasattr(current_model, "config"): current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a53f52fe27..65e2d773e8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1849,12 +1849,12 @@ def post_patch(model, tokenizer): # Fix up all vocab sizes current_model = model - while hasattr(model, "model") and hasattr(model, "config"): - if hasattr(model.config, "vocab_size"): + while hasattr(current_model, "model") and hasattr(current_model, "config"): + if hasattr(current_model.config, "vocab_size"): current_model.config.update({"vocab_size" : len(tokenizer)}) current_model = current_model.model - if hasattr(model, "model") and hasattr(model, "config"): - if hasattr(model.config, "vocab_size"): + if hasattr(current_model, "model") and hasattr(current_model, "config"): + if hasattr(current_model.config, "vocab_size"): current_model.config.update({"vocab_size" : len(tokenizer)}) pass pass From 9d07be077b3355b55dcf93098d0afe2591e67750 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 22:10:59 -0700 Subject: [PATCH 0568/1088] ignored labels --- unsloth/models/gemma.py | 13 ++++++++++++- unsloth/models/gemma2.py | 13 ++++++++++++- unsloth/models/llama.py | 27 +++++++++++++-------------- 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 1ec116b2ea..095c8cdd6b 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -339,7 +339,18 @@ def pre_patch(): @staticmethod - def post_patch(model, tokenizer): + def post_patch(model, tokenizer, max_seq_length): + # Add max_seq_length to all modules + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 54d8f628cb..231d8f2661 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -490,7 +490,18 @@ def pre_patch(): @staticmethod - def post_patch(model, tokenizer): + def post_patch(model, tokenizer, max_seq_length): + # Add max_seq_length to all modules + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 65e2d773e8..4712d9ca05 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1621,7 +1621,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model, tokenizer = model_patcher.post_patch(model, tokenizer) + model, tokenizer = model_patcher.post_patch(model, tokenizer, max_position_embeddings) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): @@ -1827,7 +1827,18 @@ def from_pretrained( @staticmethod - def post_patch(model, tokenizer): + def post_patch(model, tokenizer, max_seq_length): + # Add max_seq_length to all modules + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + internal_model.extra_ignored_labels = extra_ignored_labels + # Torch.compile fails on embedding matrix?? try: old_input_embedding = model.get_input_embeddings ().weight except: return model, tokenizer @@ -2459,18 +2470,6 @@ def patch_peft_model( ) patch_saving_functions(model) - # Patch cross entropy loss labels - # Fixes https://github.com/unslothai/unsloth/issues/10 - max_seq_length = model.max_seq_length - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - model.model.extra_ignored_labels = extra_ignored_labels - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length - # Patch tokenizer to pad to the right internal_model = model while hasattr(internal_model, "model"): From a8b37a320d3ed72fceff9c08dd1e534eaad703fa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 27 Oct 2024 22:18:05 -0700 Subject: [PATCH 0569/1088] Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. --- unsloth/models/gemma.py | 13 +------------ unsloth/models/gemma2.py | 13 +------------ unsloth/models/llama.py | 27 ++++++++++++++------------- 3 files changed, 16 insertions(+), 37 deletions(-) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 095c8cdd6b..1ec116b2ea 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -339,18 +339,7 @@ def pre_patch(): @staticmethod - def post_patch(model, tokenizer, max_seq_length): - # Add max_seq_length to all modules - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 231d8f2661..54d8f628cb 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -490,18 +490,7 @@ def pre_patch(): @staticmethod - def post_patch(model, tokenizer, max_seq_length): - # Add max_seq_length to all modules - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 4712d9ca05..65e2d773e8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1621,7 +1621,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model, tokenizer = model_patcher.post_patch(model, tokenizer, max_position_embeddings) + model, tokenizer = model_patcher.post_patch(model, tokenizer) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): @@ -1827,18 +1827,7 @@ def from_pretrained( @staticmethod - def post_patch(model, tokenizer, max_seq_length): - # Add max_seq_length to all modules - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length - internal_model.extra_ignored_labels = extra_ignored_labels - + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? try: old_input_embedding = model.get_input_embeddings ().weight except: return model, tokenizer @@ -2470,6 +2459,18 @@ def patch_peft_model( ) patch_saving_functions(model) + # Patch cross entropy loss labels + # Fixes https://github.com/unslothai/unsloth/issues/10 + max_seq_length = model.max_seq_length + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + model.model.extra_ignored_labels = extra_ignored_labels + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_seq_length + # Patch tokenizer to pad to the right internal_model = model while hasattr(internal_model, "model"): From 2dfdba3493b8be24b054608a847a85e99cc2f253 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 01:10:23 -0700 Subject: [PATCH 0570/1088] More patching --- unsloth/kernels/__init__.py | 2 ++ unsloth/kernels/cross_entropy_loss.py | 49 +++++++++++++++++++++++++++ unsloth/models/_utils.py | 45 +++++++++++++++++++++--- 3 files changed, 92 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 3e55332c80..6357ddaf87 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -16,6 +16,8 @@ fast_cross_entropy_loss, patch_llama_for_causal_lm, unpatch_llama_for_causal_lm, + patch_transformers_losses, + patch_loss_function, ) from .rms_layernorm import ( fast_rms_layernorm, diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f2377d55cc..7ec1c258a9 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -470,3 +470,52 @@ def unpatch_llama_for_causal_lm(): transformers.models.llama.modeling_llama.LlamaForCausalLM = LlamaForCausalLM return pass + + +@torch._disable_dynamo +def UnslothForCausalLMLoss( + logits, labels, vocab_size: int, num_items_in_batch: int = None, ignore_index: int = -100, **kwargs +): + shift_logits = logits + shift_labels = torch.empty_like(labels) + shift_labels[..., :-1] = labels[..., 1:] + shift_labels[..., -1] = -100 + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + n_items = num_items_in_batch, + ) + return loss +pass + + +def patch_transformers_losses(): + import re + try: + import transformers.loss.loss_utils + except: + logger.warning_once("Unsloth: Cannot patch loss functions - update transformers for faster modules!") + + import transformers.modeling_utils + LOSS_MAPPING = transformers.loss.loss_utils.LOSS_MAPPING + LOSS_MAPPING["ForCausalLM"] = UnslothForCausalLMLoss + + # Remove @property and @lru_cache + if hasattr(transformers.modeling_utils.PreTrainedModel.loss_function, "fget"): + transformers.modeling_utils.PreTrainedModel.loss_function = \ + transformers.modeling_utils.PreTrainedModel.loss_function.fget.__wrapped__ + pass +pass + + +def patch_loss_function(model): + try: + # model.loss_function starts as a dict to a loss fx + # We invoke it to save it + model.loss_function = model.loss_function() + except: + # Failed means we already invoked it, and we need args to the loss fx + pass + pass + return model +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b2f4b5c66e..bd40fbd2eb 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -44,6 +44,8 @@ "patch_gradient_checkpointing", "unpatch_gradient_checkpointing", "patch_gradient_accumulation_fix", + "patch_compiling_bitsandbytes", + "patch_regional_compilation", ] import torch @@ -683,8 +685,19 @@ def patch_tokenizer(model, tokenizer): ) pass pass + +# Also disable compiling on bitsandbytes +def patch_compiling_bitsandbytes(): + import peft.tuners.lora.bnb + peft.tuners.lora.bnb.Linear4bit.forward = \ + torch._disable_dynamo(peft.tuners.lora.bnb.Linear4bit.forward) + peft.tuners.lora.bnb.Linear8bit.forward = \ + torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bit.forward) + return +pass # ============================================= + import psutil def _get_statistics(statistics = None, force_download = True): # We log some basic stats about which environment is being used. @@ -896,15 +909,39 @@ def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, return Unsloth_Offloaded_Gradient_Checkpointer.apply(function, *args) pass - import torch.utils -old_checkpoint = torch.utils.checkpoint def patch_gradient_checkpointing(): - torch.utils.checkpoint = unsloth_offloaded_gradient_checkpoint + if torch.utils.checkpoint.checkpoint.__name__ == "unsloth_offloaded_gradient_checkpoint": return + torch.utils.checkpoint._old_checkpoint = torch.utils.checkpoint.checkpoint + torch.utils.checkpoint.checkpoint = unsloth_offloaded_gradient_checkpoint pass def unpatch_gradient_checkpointing(): - torch.utils.checkpoint = old_checkpoint + if hasattr(torch.utils.checkpoint, "_old_checkpoint"): + torch.utils.checkpoint.checkpoint = torch.utils.checkpoint._old_checkpoint + del torch.utils.checkpoint._old_checkpoint + pass +pass + + +# ============================================= +# Regional torch 2.5 Recompilation - weirdly very slow?? +def patch_regional_compilation(): + if torch.nn.ModuleList.__name__ == "UnslothModuleList": return + # Only works for torch 2.5 + if Version(torch.__version__) < Version("2.5.0"): return + + old_module_list = torch.nn.ModuleList + + def UnslothModuleList(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0 and type(args[0]) is list: + args = [old_module_list([torch.compile(x, dynamic = True, options = torch_compile_options, fullgraph = False) for x in args[0]])] + return old_module_list(*args, **kwargs) + pass + UnslothModuleList.__doc__ = old_module_list.__doc__ + + torch.nn.ModuleList = UnslothModuleList + return pass From 5541ab48fe435612b1a14078f3181faa57db6e5e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 01:12:33 -0700 Subject: [PATCH 0571/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bd40fbd2eb..692f48488c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -691,8 +691,8 @@ def patch_compiling_bitsandbytes(): import peft.tuners.lora.bnb peft.tuners.lora.bnb.Linear4bit.forward = \ torch._disable_dynamo(peft.tuners.lora.bnb.Linear4bit.forward) - peft.tuners.lora.bnb.Linear8bit.forward = \ - torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bit.forward) + peft.tuners.lora.bnb.Linear8bitLt.forward = \ + torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bitLt.forward) return pass # ============================================= From c6e9af2e5b69abc8cb332abef3eb101b0d33c63e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 10:41:31 -0700 Subject: [PATCH 0572/1088] Update _utils.py --- unsloth/models/_utils.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 692f48488c..97b2ea7b7b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -688,11 +688,15 @@ def patch_tokenizer(model, tokenizer): # Also disable compiling on bitsandbytes def patch_compiling_bitsandbytes(): - import peft.tuners.lora.bnb - peft.tuners.lora.bnb.Linear4bit.forward = \ - torch._disable_dynamo(peft.tuners.lora.bnb.Linear4bit.forward) - peft.tuners.lora.bnb.Linear8bitLt.forward = \ - torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bitLt.forward) + # import peft.tuners.lora.bnb + # peft.tuners.lora.bnb.Linear4bit.forward = \ + # torch._disable_dynamo(peft.tuners.lora.bnb.Linear4bit.forward) + # peft.tuners.lora.bnb.Linear8bitLt.forward = \ + # torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bitLt.forward) + # return + import bitsandbytes.nn.modules + bitsandbytes.nn.modules.Linear4bit.forward = \ + torch._disable_dynamo(bitsandbytes.nn.modules.Linear4bit.forward) return pass # ============================================= From cac56d112b08befe1355deabbf6c856230eb9d5d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 14:30:06 -0700 Subject: [PATCH 0573/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 7ec1c258a9..4ff4ec152a 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -349,7 +349,7 @@ def backward(ctx, dlosses): pass -@torch._disable_dynamo +# @torch._disable_dynamo def fast_cross_entropy_loss( logits, labels, @@ -472,7 +472,7 @@ def unpatch_llama_for_causal_lm(): pass -@torch._disable_dynamo +# @torch._disable_dynamo def UnslothForCausalLMLoss( logits, labels, vocab_size: int, num_items_in_batch: int = None, ignore_index: int = -100, **kwargs ): @@ -495,7 +495,9 @@ def patch_transformers_losses(): import transformers.loss.loss_utils except: logger.warning_once("Unsloth: Cannot patch loss functions - update transformers for faster modules!") - + return + pass + import transformers.modeling_utils LOSS_MAPPING = transformers.loss.loss_utils.LOSS_MAPPING LOSS_MAPPING["ForCausalLM"] = UnslothForCausalLMLoss From 5ee1189657fdc50b79c79a5659fb07e73f10de59 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 14:47:11 -0700 Subject: [PATCH 0574/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 4ff4ec152a..2db59bbae7 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -25,7 +25,8 @@ }) @triton.jit def _cross_entropy_forward( - logits_ptr, logits_row_stride, + logits_ptr, + logits_row_stride : tl.constexpr(tl.int64), loss_ptr, logsumexp_ptr, labels_ptr, @@ -57,7 +58,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * logits_row_stride loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -97,7 +98,8 @@ def _cross_entropy_forward( }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr, logits_row_stride, + logits_ptr, + logits_row_stride : tl.constexpr(tl.int64), loss_ptr, logsumexp_ptr, labels_ptr, @@ -135,7 +137,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * logits_row_stride loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -179,7 +181,8 @@ def _chunked_cross_entropy_forward( }) @triton.jit def _cross_entropy_backward( - logits_ptr, logits_row_stride, + logits_ptr, + logits_row_stride : tl.constexpr(tl.int64), dloss_ptr, dloss_row_stride, logsumexp_ptr, labels_ptr, @@ -208,7 +211,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * logits_row_stride dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE @@ -497,7 +500,7 @@ def patch_transformers_losses(): logger.warning_once("Unsloth: Cannot patch loss functions - update transformers for faster modules!") return pass - + import transformers.modeling_utils LOSS_MAPPING = transformers.loss.loss_utils.LOSS_MAPPING LOSS_MAPPING["ForCausalLM"] = UnslothForCausalLMLoss From 85a5f6098a1bac8af5a3482d2a1569069111c84d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 28 Oct 2024 15:01:04 -0700 Subject: [PATCH 0575/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 2db59bbae7..debd037b64 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -269,7 +269,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): div, mod = divmod(vocab_size, MAX_FUSED_SIZE) n_chunks = div + (mod != 0) - losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + losses = torch.empty(n_rows, dtype = torch.float32, device = logits.device) DO_SOFTCAPPING = (logit_softcapping != 0) DO_LOGIT_SCALING = (logit_scaling != 0) @@ -277,7 +277,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) - logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = logits.device) _cross_entropy_forward[(n_rows,)]( logits, logits.stride(0), @@ -294,7 +294,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): ) else: # For large vocabs > 65336 like Gemma 256K - logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda:0") + logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = logits.device) _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( logits, logits.stride(0), From 20e38eda6b6ec2a97e8cdb62c84b6747179c659c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 00:43:03 -0700 Subject: [PATCH 0576/1088] Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 --- unsloth/save.py | 66 ++++++++++++++++++++++++-------------- unsloth/tokenizer_utils.py | 7 +++- 2 files changed, 48 insertions(+), 25 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index ccda79aeee..b4c6b499cf 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -49,6 +49,7 @@ keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +KAGGLE_TMP = "/tmp" del keynames # Weights @@ -447,13 +448,20 @@ def unsloth_save_model( if push_to_hub and "/" in save_directory: # +1 solves absolute path issues - username = save_directory[:save_directory.find("/")] - new_save_directory = save_directory[save_directory.find("/")+1:] - - logger.warning_once( - f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"\ - f"We shall truncate {save_directory} to {new_save_directory}" - ) + new_save_directory = save_directory + username = new_save_directory[:new_save_directory.find("/")] + new_save_directory = new_save_directory[new_save_directory.find("/")+1:] + if IS_KAGGLE_ENVIRONMENT: + new_save_directory = os.path.join(KAGGLE_TMP, new_save_directory[new_save_directory.find("/")+1:]) + logger.warning_once( + "Unsloth: You are pushing to hub in Kaggle environment.\n"\ + f"To save memory, we shall move {save_directory} to {new_save_directory}" + ) + else: + logger.warning_once( + f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"\ + f"We shall truncate {save_directory} to {new_save_directory}" + ) save_pretrained_settings["save_directory"] = new_save_directory tokenizer_save_settings ["save_directory"] = new_save_directory @@ -507,6 +515,10 @@ def unsloth_save_model( f"{round(max_ram/1024/1024/1024, 2)} out of "\ f"{round(psutil.virtual_memory().total/1024/1024/1024, 2)} RAM for saving.") + # Move temporary_location to /tmp in Kaggle + if IS_KAGGLE_ENVIRONMENT: + temporary_location = os.path.join(KAGGLE_TMP, temporary_location) + # Max directory for disk saving if not os.path.exists(temporary_location): os.makedirs(temporary_location) @@ -708,7 +720,7 @@ def unsloth_save_model( print("Done.") if push_to_hub and hasattr(model, "config"): - print(f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/')}") + print(f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/').split('/')[-1]}") pass save_pretrained_settings["state_dict"] = None @@ -1108,14 +1120,17 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): if IS_KAGGLE_ENVIRONMENT: - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You are in a Kaggle environment, which might be the reason this is failing.\n"\ - "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ - "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ - "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ - "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." - ) + if not Path(final_location).resolve().is_relative_to(Path('/tmp').resolve()): + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space in the working directory.\n"\ + "Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "You can try saving it to the `/tmp` directory for larger disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) else: raise RuntimeError( f"Unsloth: Quantization failed for {final_location}\n"\ @@ -1156,14 +1171,17 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): if IS_KAGGLE_ENVIRONMENT: - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You are in a Kaggle environment, which might be the reason this is failing.\n"\ - "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ - "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ - "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ - "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." - ) + if not Path(final_location).resolve().is_relative_to(Path('/tmp').resolve()): + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space in the working directory.\n"\ + "Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "You can try saving it to the `/tmp` directory for larger disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) else: raise RuntimeError( "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index c05485f902..c639dbf1a0 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -64,6 +64,7 @@ keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +KAGGLE_TMP = "/tmp" del keynames @@ -470,8 +471,12 @@ def _load_correct_tokenizer( cache_dir = "huggingface_tokenizers_cache", fix_tokenizer = True, ): - if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT: + if IS_COLAB_ENVIRONMENT: cache_dir = cache_dir + elif IS_KAGGLE_ENVIRONMENT: + # /tmp of Kaggle seems has a 80GB limit! + # Let's utilize them + cache_dir = os.path.join(KAGGLE_TMP, cache_dir) else: cache_dir = None pass From 7e1692ace19627e0cff5d8ece58b71a59d78c651 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:11:43 -0700 Subject: [PATCH 0577/1088] Bug fixes --- unsloth/__init__.py | 14 +++++++------- unsloth/kernels/cross_entropy_loss.py | 10 +++++++--- unsloth/models/_utils.py | 6 +++++- unsloth/models/llama.py | 1 + 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 458c2696bc..109e1c6d2f 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -27,13 +27,6 @@ # pass # pass -# Check for unsloth_zoo -try: - import unsloth_zoo -except: - raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") -pass - # Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! @@ -165,6 +158,13 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 pass pass +# Check for unsloth_zoo +try: + import unsloth_zoo +except: + raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") +pass + from .models import * from .save import * from .chat_templates import * diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index debd037b64..a2337a14d9 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -17,7 +17,7 @@ import torch from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh from transformers.models.llama.modeling_llama import logger - +from packaging.version import Version @triton.heuristics({ "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], @@ -352,7 +352,6 @@ def backward(ctx, dlosses): pass -# @torch._disable_dynamo def fast_cross_entropy_loss( logits, labels, @@ -380,6 +379,9 @@ def fast_cross_entropy_loss( n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass +if Version(torch.__version__) < Version("2.5.0"): + fast_cross_entropy_loss = torch._disable_dynamo(fast_cross_entropy_loss) +pass from transformers.models.llama.modeling_llama import ( @@ -475,7 +477,6 @@ def unpatch_llama_for_causal_lm(): pass -# @torch._disable_dynamo def UnslothForCausalLMLoss( logits, labels, vocab_size: int, num_items_in_batch: int = None, ignore_index: int = -100, **kwargs ): @@ -490,6 +491,9 @@ def UnslothForCausalLMLoss( ) return loss pass +if Version(torch.__version__) < Version("2.5.0"): + UnslothForCausalLMLoss = torch._disable_dynamo(UnslothForCausalLMLoss) +pass def patch_transformers_losses(): diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 97b2ea7b7b..a39bc58db0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -760,7 +760,9 @@ def get_statistics(): # We log some basic stats about which environment is being used. # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. - # You can disable this by commenting the below out + # You can disable this by setting UNSLOTH_DISABLE_STATISTICS + import os + if "UNSLOTH_DISABLE_STATISTICS" in os.environ: return from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled disabled = False if not are_progress_bars_disabled(): @@ -1295,6 +1297,7 @@ def patch_gradient_accumulation_fix(Trainer): # Fixes gradient accumulation import inspect if hasattr(Trainer, "get_batch_samples"): + if Trainer.get_batch_samples.__name__ == "_unsloth_get_batch_samples": return if \ not inspect.getsource(Trainer.get_batch_samples).strip()\ .endswith("return batch_samples, num_items_in_batch"): @@ -1321,6 +1324,7 @@ def patch_gradient_accumulation_fix(Trainer): pass # Also fix up loss scaling ie negate loss *= self.args.gradient_accumulation_steps + if Trainer.training_step.__name__ == "_unsloth_training_step": return if "num_items_in_batch" not in inspect.signature(Trainer.training_step).parameters: return function = inspect.getsource(Trainer.training_step) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 65e2d773e8..c0175bbfaf 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1518,6 +1518,7 @@ def from_pretrained( pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking From 6bef8f1c3cc2e0a97166c92b0d348f0753bc3ceb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:16:00 -0700 Subject: [PATCH 0578/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fc9c8256ad..8922cc7c8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ huggingface = [ "unsloth_zoo", "packaging", "tyro", - "transformers>=4.44.2", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -247,7 +247,7 @@ colab-new = [ "unsloth_zoo", "packaging", "tyro", - "transformers>=4.44.2", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From 9ccbc0ed1fb32107a848a3e11f05b89f65107d18 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:35:01 -0700 Subject: [PATCH 0579/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a39bc58db0..35b56f26a9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -910,7 +910,7 @@ def backward(ctx, dY): pass -@torch._disable_dynamo +# @torch._disable_dynamo def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, **kwargs): return Unsloth_Offloaded_Gradient_Checkpointer.apply(function, *args) pass From 95ecc5795dcc1de86ac1bca5a55dac6ae2c48f11 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:44:05 -0700 Subject: [PATCH 0580/1088] Update __init__.py --- unsloth/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 109e1c6d2f..23f54d213e 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -53,6 +53,13 @@ # Reduce VRAM usage by reducing fragmentation os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +# Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) +keynames = "\n" + "\n".join(os.environ.keys()) +if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" + print("Hello") +pass + try: import torch except ModuleNotFoundError: @@ -64,12 +71,6 @@ raise exception pass -# Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) -keynames = "\n" + "\n".join(os.environ.keys()) -if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: - os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -pass - # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") From 5f5fef8075f5df30d2f7b72ac57e167c3354ebe9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:47:21 -0700 Subject: [PATCH 0581/1088] Update __init__.py --- unsloth/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 23f54d213e..91ec460094 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -57,7 +57,6 @@ keynames = "\n" + "\n".join(os.environ.keys()) if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" - print("Hello") pass try: From 784dd13da70ed1c7c0f58b506c85f24dc024fa8c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:51:10 -0700 Subject: [PATCH 0582/1088] Update _utils.py --- unsloth/models/_utils.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 35b56f26a9..0539e255ea 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -371,16 +371,16 @@ def is_big_gpu(index): "config.cache_size_limit = 1024", # Flex Attention "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation ] -import torch._inductor.config as config -for _try_compile_argument in torch_compile_arguments: - try: exec(_try_compile_argument) - except: pass -pass -import torch._dynamo.config as config -for _try_dynamo_argument in torch_dynamo_arguments: - try: exec(_try_dynamo_argument) - except: pass -pass +# import torch._inductor.config as config +# for _try_compile_argument in torch_compile_arguments: +# try: exec(_try_compile_argument) +# except: pass +# pass +# import torch._dynamo.config as config +# for _try_dynamo_argument in torch_dynamo_arguments: +# try: exec(_try_dynamo_argument) +# except: pass +# pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, From 5b75e21a4b8bcda18e4b4d2d99beef041bf7dd3d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 13:54:11 -0700 Subject: [PATCH 0583/1088] Update _utils.py --- unsloth/models/_utils.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0539e255ea..a3cb3cda8d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -366,21 +366,21 @@ def is_big_gpu(index): # Torch dynamo arguments torch_dynamo_arguments = [ "config.accumulated_cache_size_limit = 1024", # Bump up a bit from 256 - "config.suppress_errors = True", # Supress errors for now + # "config.suppress_errors = True", # Supress errors for now "config.do_not_emit_runtime_asserts = True", "config.cache_size_limit = 1024", # Flex Attention "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation ] -# import torch._inductor.config as config -# for _try_compile_argument in torch_compile_arguments: -# try: exec(_try_compile_argument) -# except: pass -# pass -# import torch._dynamo.config as config -# for _try_dynamo_argument in torch_dynamo_arguments: -# try: exec(_try_dynamo_argument) -# except: pass -# pass +import torch._inductor.config as config +for _try_compile_argument in torch_compile_arguments: + try: exec(_try_compile_argument) + except: pass +pass +import torch._dynamo.config as config +for _try_dynamo_argument in torch_dynamo_arguments: + try: exec(_try_dynamo_argument) + except: pass +pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, From 74ab93c9da51a353b558e16ad521641f37b152b3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 14:00:03 -0700 Subject: [PATCH 0584/1088] Update _utils.py --- unsloth/models/_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a3cb3cda8d..a13723c9fb 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -366,7 +366,7 @@ def is_big_gpu(index): # Torch dynamo arguments torch_dynamo_arguments = [ "config.accumulated_cache_size_limit = 1024", # Bump up a bit from 256 - # "config.suppress_errors = True", # Supress errors for now + "config.suppress_errors = True", # Supress errors for now "config.do_not_emit_runtime_asserts = True", "config.cache_size_limit = 1024", # Flex Attention "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation @@ -376,11 +376,11 @@ def is_big_gpu(index): try: exec(_try_compile_argument) except: pass pass -import torch._dynamo.config as config -for _try_dynamo_argument in torch_dynamo_arguments: - try: exec(_try_dynamo_argument) - except: pass -pass +# import torch._dynamo.config as config +# for _try_dynamo_argument in torch_dynamo_arguments: +# try: exec(_try_dynamo_argument) +# except: pass +# pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, From 526505c11989d5e02f73d0578922748205092318 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 14:01:49 -0700 Subject: [PATCH 0585/1088] Update _utils.py --- unsloth/models/_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a13723c9fb..0539e255ea 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -371,11 +371,11 @@ def is_big_gpu(index): "config.cache_size_limit = 1024", # Flex Attention "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation ] -import torch._inductor.config as config -for _try_compile_argument in torch_compile_arguments: - try: exec(_try_compile_argument) - except: pass -pass +# import torch._inductor.config as config +# for _try_compile_argument in torch_compile_arguments: +# try: exec(_try_compile_argument) +# except: pass +# pass # import torch._dynamo.config as config # for _try_dynamo_argument in torch_dynamo_arguments: # try: exec(_try_dynamo_argument) From 251ba777719a1a8dd403387bcd3f18e18b16024b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 14:57:14 -0700 Subject: [PATCH 0586/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 87 ++++++++++++++------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index a2337a14d9..e23f818a11 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,23 +19,23 @@ from transformers.models.llama.modeling_llama import logger from packaging.version import Version -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _cross_entropy_forward( - logits_ptr, + logits_ptr : tl.pointer_type, logits_row_stride : tl.constexpr(tl.int64), - loss_ptr, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + loss_ptr : tl.pointer_type(tl.float32), + logsumexp_ptr : tl.pointer_type(tl.float32), + labels_ptr : tl.const_pointer_type(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr(tl.int1), + SOFTCAP : tl.constexpr(tl.float32), + DO_LOGIT_SCALING : tl.constexpr(tl.int1), + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -92,24 +92,24 @@ def _cross_entropy_forward( pass -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr, + logits_ptr : tl.const_pointer_type, logits_row_stride : tl.constexpr(tl.int64), - loss_ptr, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + loss_ptr : tl.pointer_type(tl.float32), + logsumexp_ptr : tl.pointer_type(tl.float32), + labels_ptr : tl.const_pointer_type(tl.int32), + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr(tl.int1), + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr(tl.int1), + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -175,23 +175,24 @@ def _chunked_cross_entropy_forward( pass -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _cross_entropy_backward( - logits_ptr, + logits_ptr : tl.pointer_type, logits_row_stride : tl.constexpr(tl.int64), - dloss_ptr, dloss_row_stride, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + dloss_ptr : tl.const_pointer_type(tl.float32), + dloss_row_stride : tl.constexpr, + logsumexp_ptr : tl.const_pointer_type(tl.float32), + labels_ptr : tl.const_pointer_type(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr(tl.int1), + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr(tl.int1), + LOGIT_SCALE : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 530c4958e8d7394862973ca6440a317a416ebd0f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 15:33:37 -0700 Subject: [PATCH 0587/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 70 +++++++++++++-------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index e23f818a11..fd34b9a4df 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -25,17 +25,17 @@ # }) @triton.jit def _cross_entropy_forward( - logits_ptr : tl.pointer_type, - logits_row_stride : tl.constexpr(tl.int64), - loss_ptr : tl.pointer_type(tl.float32), - logsumexp_ptr : tl.pointer_type(tl.float32), - labels_ptr : tl.const_pointer_type(tl.int32), - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + logits_ptr , + logits_row_stride , + loss_ptr , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + BLOCK_SIZE , + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -98,18 +98,18 @@ def _cross_entropy_forward( # }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr : tl.const_pointer_type, - logits_row_stride : tl.constexpr(tl.int64), - loss_ptr : tl.pointer_type(tl.float32), - logsumexp_ptr : tl.pointer_type(tl.float32), - labels_ptr : tl.const_pointer_type(tl.int32), - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + loss_ptr , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + N_CHUNKS , + BLOCK_SIZE , + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -181,18 +181,18 @@ def _chunked_cross_entropy_forward( # }) @triton.jit def _cross_entropy_backward( - logits_ptr : tl.pointer_type, - logits_row_stride : tl.constexpr(tl.int64), - dloss_ptr : tl.const_pointer_type(tl.float32), - dloss_row_stride : tl.constexpr, - logsumexp_ptr : tl.const_pointer_type(tl.float32), - labels_ptr : tl.const_pointer_type(tl.int32), - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + dloss_ptr , + dloss_row_stride , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + BLOCK_SIZE , + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 07394c34368efbbc338b3c664d34a8540c308ac4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 15:46:09 -0700 Subject: [PATCH 0588/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index fd34b9a4df..20b57b30b7 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -31,7 +31,7 @@ def _cross_entropy_forward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -105,7 +105,7 @@ def _chunked_cross_entropy_forward( labels_ptr , VOCAB_SIZE , N_CHUNKS , - BLOCK_SIZE , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -188,7 +188,7 @@ def _cross_entropy_backward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , From 6d7004b5ca5f1dd9e823d6c3ee26855ac2e9ab6f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 16:37:37 -0700 Subject: [PATCH 0589/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 20b57b30b7..c5f054a43f 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -223,7 +223,7 @@ def _cross_entropy_backward( else: dloss = 0.0 - x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Do logit scaling for Cohere if DO_LOGIT_SCALING: @@ -239,7 +239,7 @@ def _cross_entropy_backward( pass logsumexp = tl.load(logsumexp_ptr + row_idx) - y = tl.exp(x.to(tl.float32) - logsumexp) + y = tl.exp(x - logsumexp) y = tl.where( col_offsets == label_idx, y - 1.0, # exp(x - logsumexp) - 1 From d86b20a50fded8c4a6b6c73e079a0600cf0d1d5a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 16:46:33 -0700 Subject: [PATCH 0590/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index c5f054a43f..474c291ed8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -232,6 +232,7 @@ def _cross_entropy_backward( pass # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + partial = 0.0 if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) partial = triton_tanh(x / SOFTCAP) From 9920950b7fb7d8116d007547ad6aef8027d0f950 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 16:50:56 -0700 Subject: [PATCH 0591/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 474c291ed8..9beb2da25c 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -353,7 +353,7 @@ def backward(ctx, dlosses): pass pass - +@torch._disable_dynamo def fast_cross_entropy_loss( logits, labels, From 9f926ced28d377e6c8616f4021570792995c6321 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 16:53:04 -0700 Subject: [PATCH 0592/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9beb2da25c..28e40487c1 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -67,14 +67,14 @@ def _cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) - logits = logits.to(tl.float32) + # logits = logits c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -146,14 +146,14 @@ def _chunked_cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) - logits = logits.to(tl.float32) + # logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) From 30cdf652d331d794f2eaf3e118cfb6372bb13591 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 16:55:24 -0700 Subject: [PATCH 0593/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 28e40487c1..266ece4ca2 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -232,7 +232,7 @@ def _cross_entropy_backward( pass # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - partial = 0.0 + partial = x if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) partial = triton_tanh(x / SOFTCAP) From 54b901bc50b7798a78ed862813d4d986dd4c1e24 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 17:19:30 -0700 Subject: [PATCH 0594/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 266ece4ca2..66d5046deb 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -353,7 +353,7 @@ def backward(ctx, dlosses): pass pass -@torch._disable_dynamo +# @torch._disable_dynamo def fast_cross_entropy_loss( logits, labels, From 6db9d286d809cf8f29973d22e205d4bc0841a65a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 17:26:34 -0700 Subject: [PATCH 0595/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 66d5046deb..9f16e8e605 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -273,8 +273,8 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = logits.device) - DO_SOFTCAPPING = (logit_softcapping != 0) - DO_LOGIT_SCALING = (logit_scaling != 0) + DO_SOFTCAPPING = logit_softcapping != 0 + DO_LOGIT_SCALING = logit_scaling != 0 if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral From 8aefcd0b5000efcd2406e8b56bea6770d3ed9f82 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 30 Oct 2024 17:35:20 -0700 Subject: [PATCH 0596/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9f16e8e605..8cbdbf2a2c 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -273,8 +273,8 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = logits.device) - DO_SOFTCAPPING = logit_softcapping != 0 - DO_LOGIT_SCALING = logit_scaling != 0 + DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) + DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral From 7bf626b36b63bda24defd6d0b20c59bb6312fdbb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 01:23:41 -0700 Subject: [PATCH 0597/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 8cbdbf2a2c..a13475e227 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -92,10 +92,10 @@ def _cross_entropy_forward( pass -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _chunked_cross_entropy_forward( logits_ptr , @@ -106,9 +106,9 @@ def _chunked_cross_entropy_forward( VOCAB_SIZE , N_CHUNKS , BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , + DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP , - DO_LOGIT_SCALING , + DO_LOGIT_SCALING : tl.constexpr(tl.int1), LOGIT_SCALE , ): """ @@ -189,9 +189,9 @@ def _cross_entropy_backward( labels_ptr , VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , + DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP , - DO_LOGIT_SCALING , + DO_LOGIT_SCALING : tl.constexpr(tl.int1), LOGIT_SCALE , ): """ @@ -347,7 +347,7 @@ def backward(ctx, dlosses): SOFTCAP = ctx.logit_softcapping, DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, LOGIT_SCALE = ctx.logit_scaling, - num_warps = 8, + num_warps = 8, ) return logits, None, None, None, pass From d4557513032b582d9a4e4d9fc3efd9484f9705e8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 01:56:03 -0700 Subject: [PATCH 0598/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 64 +++++++++++++-------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index a13475e227..f256918746 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,23 +19,23 @@ from transformers.models.llama.modeling_llama import logger from packaging.version import Version -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr(tl.int1), + SOFTCAP : tl.constexpr(tl.float32), + DO_LOGIT_SCALING : tl.constexpr(tl.int1), + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -67,14 +67,14 @@ def _cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) - # logits = logits + logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -99,7 +99,7 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , @@ -107,9 +107,9 @@ def _chunked_cross_entropy_forward( N_CHUNKS , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ 256K vocab divided in 4 chunks @@ -146,14 +146,14 @@ def _chunked_cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) - # logits = logits.to(tl.float32) + logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -175,14 +175,14 @@ def _chunked_cross_entropy_forward( pass -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], + "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +}) @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , dloss_row_stride , logsumexp_ptr , @@ -190,9 +190,9 @@ def _cross_entropy_backward( VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) @@ -223,7 +223,7 @@ def _cross_entropy_backward( else: dloss = 0.0 - x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) # Do logit scaling for Cohere if DO_LOGIT_SCALING: @@ -235,12 +235,12 @@ def _cross_entropy_backward( partial = x if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) - partial = triton_tanh(x / SOFTCAP) + partial = triton_tanh(x.to(tl.float32) / SOFTCAP).to(x.dtype) x = SOFTCAP * partial pass logsumexp = tl.load(logsumexp_ptr + row_idx) - y = tl.exp(x - logsumexp) + y = tl.exp(x.to(tl.float32) - logsumexp) y = tl.where( col_offsets == label_idx, y - 1.0, # exp(x - logsumexp) - 1 @@ -271,7 +271,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): div, mod = divmod(vocab_size, MAX_FUSED_SIZE) n_chunks = div + (mod != 0) - losses = torch.empty(n_rows, dtype = torch.float32, device = logits.device) + losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) @@ -279,7 +279,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) - logsumexp = torch.empty(n_rows, dtype = torch.float32, device = logits.device) + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") _cross_entropy_forward[(n_rows,)]( logits, logits.stride(0), @@ -296,7 +296,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): ) else: # For large vocabs > 65336 like Gemma 256K - logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = logits.device) + logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda:0") _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( logits, logits.stride(0), From 055eeb8c47bf63a426fe64cecfd46247d27f6f1a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 02:00:04 -0700 Subject: [PATCH 0599/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f256918746..f8b9b4245e 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -30,8 +30,8 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), @@ -103,9 +103,9 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + N_CHUNKS : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), @@ -187,8 +187,8 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), From 8090b7c01aaceecac4263f9af2737fdb76ebd458 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 12:36:21 -0700 Subject: [PATCH 0600/1088] Tied weights --- unsloth/models/llama.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c0175bbfaf..0e9b70a8b2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1838,7 +1838,8 @@ def post_patch(model, tokenizer): except: old_output_embedding = torch.zeros(0) # Check for tied weights as well - is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() + # is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() + is_tied = model.config.tie_word_embeddings # Check pad token's id -> we need to expand the embedding if len(tokenizer) > old_input_embedding.shape[0]: @@ -1887,6 +1888,9 @@ def post_patch(model, tokenizer): else: correct_dtype = old_input_embedding.dtype pass + + # Finally tie them if needed! + if is_tied: model.tie_weights() # Also patch all dtypes - BnB seems to not allocate the correct type? # BnB default dtype seems to be float16! From 7559efbbfd24037ba4501e1da7b6f9f18581b102 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 12:38:20 -0700 Subject: [PATCH 0601/1088] Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. --- unsloth/models/llama.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0e9b70a8b2..c0175bbfaf 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1838,8 +1838,7 @@ def post_patch(model, tokenizer): except: old_output_embedding = torch.zeros(0) # Check for tied weights as well - # is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() - is_tied = model.config.tie_word_embeddings + is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() # Check pad token's id -> we need to expand the embedding if len(tokenizer) > old_input_embedding.shape[0]: @@ -1888,9 +1887,6 @@ def post_patch(model, tokenizer): else: correct_dtype = old_input_embedding.dtype pass - - # Finally tie them if needed! - if is_tied: model.tie_weights() # Also patch all dtypes - BnB seems to not allocate the correct type? # BnB default dtype seems to be float16! From ad63a32a0332cc25c4ee72cb8aab5e2fb4f10c8a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 31 Oct 2024 16:25:26 -0700 Subject: [PATCH 0602/1088] Tied weights --- unsloth/models/_utils.py | 30 ++++++++++++++++++++++++++++-- unsloth/models/llama.py | 7 ++++++- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0539e255ea..e1ed649560 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -479,6 +479,32 @@ def patch_tokenizer(model, tokenizer): if model is not None: model.config.update({"unsloth_version" : __version__}) + # First remove pad and unk tokens if they are known to be BOS / EOS + possible_bad_tokens = ( + "<|endoftext|>", + "<|im_start|>", + "<|im_end|>", + "<|begin_of_text|>", + "<|end_of_text|>", + "", + "", + ) + input_ids = tokenizer(list(possible_bad_tokens), add_special_tokens = False).input_ids + possible_bad_tokens = frozenset(token for token, input_id in zip(possible_bad_tokens, input_ids) if len(input_id) == 1) + + if hasattr(tokenizer, "pad_token") and tokenizer.pad_token in possible_bad_tokens: + print(f"Unsloth: Pad token was {tokenizer.pad_token} which is not a good idea. We shall fix this.") + tokenizer.pad_token = None + pass + + has_bad_unk_token = False + if hasattr(tokenizer, "unk_token") and tokenizer.unk_token in possible_bad_tokens: + print(f"Unsloth: Unk token was {tokenizer.unk_token} which is not a good idea. We shall fix this.") + tokenizer.unk_token = None + has_bad_unk_token = True + pass + + # Now check pad token again bad_pad_token = False if hasattr(tokenizer, "pad_token") and tokenizer.pad_token is not None: # Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! @@ -492,13 +518,13 @@ def patch_tokenizer(model, tokenizer): # Check if unknown token is broken fixed_unk_token = False - if hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None: + if (hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None) or has_bad_unk_token: eos_token = getattr(tokenizer, "eos_token", None) bos_token = getattr(tokenizer, "bos_token", None) old_unk_token = tokenizer.unk_token - if old_unk_token == eos_token or old_unk_token == bos_token: + if (old_unk_token == eos_token) or (old_unk_token == bos_token) or has_bad_unk_token: has_broken_unk = True # Use the unicode replacement characters possible_replacements = [ diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c0175bbfaf..8c4b7fd253 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1838,7 +1838,8 @@ def post_patch(model, tokenizer): except: old_output_embedding = torch.zeros(0) # Check for tied weights as well - is_tied = old_input_embedding.data_ptr() == old_output_embedding.data_ptr() + is_tied = (old_input_embedding.data_ptr() == old_output_embedding.data_ptr()) \ + or (model.config.tie_word_embeddings) # Check pad token's id -> we need to expand the embedding if len(tokenizer) > old_input_embedding.shape[0]: @@ -1887,6 +1888,10 @@ def post_patch(model, tokenizer): else: correct_dtype = old_input_embedding.dtype pass + + # Must tie lm_head and embed_tokens if they are tied! + # Otherwise error will occur on saving models ie use save_model + if is_tied: model.tie_weights() # Also patch all dtypes - BnB seems to not allocate the correct type? # BnB default dtype seems to be float16! From 35aa99261545731aa6c7728f53348579fedb1db0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 2 Nov 2024 19:08:57 -0700 Subject: [PATCH 0603/1088] Utils --- unsloth/kernels/cross_entropy_loss.py | 1 + unsloth/models/_utils.py | 369 +------------------------- 2 files changed, 11 insertions(+), 359 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f8b9b4245e..f0dbb66f49 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,6 +19,7 @@ from transformers.models.llama.modeling_llama import logger from packaging.version import Version + @triton.heuristics({ "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e1ed649560..4829e4d88c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -56,6 +56,16 @@ import warnings, subprocess, re, inspect, psutil, os, math from packaging.version import Version +from unsloth_zoo.tokenizer_utils import ( + patch_tokenizer, +) +from unsloth_zoo.gradient_checkpointing import ( + Unsloth_Offloaded_Gradient_Checkpointer, + unsloth_offloaded_gradient_checkpoint, + patch_gradient_checkpointing, + unpatch_gradient_checkpointing, +) + # ============================================= # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") @@ -131,7 +141,6 @@ def patch_mistral_nemo_config(config): # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 -import torch torch_version = torch.__version__ if Version(torch_version) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd @@ -457,228 +466,6 @@ def make_inputs_require_grad(module, input, output): return model pass - -def patch_tokenizer(model, tokenizer): - """ - Phi3's pad_token isn't set. We set it to <|placeholder... - Llama-3 is <|reserved... - Llama-2 is - Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! - Fixes https://github.com/unslothai/unsloth/issues/5 - """ - possible_reserved_tokens = ( - "<|finetune_right_pad_id|>", # Llama-3.1 - "", # Mistral Nemo - "<|reserved", # Llama-3 - "<|placeholder", # Phi-3 - "[control", # Mistral type models - ) - joiner = "\1\0=+=\0\1" - number_repetitions = 3 - 1 # Number of reserved tokens needed - - if model is not None: - model.config.update({"unsloth_version" : __version__}) - - # First remove pad and unk tokens if they are known to be BOS / EOS - possible_bad_tokens = ( - "<|endoftext|>", - "<|im_start|>", - "<|im_end|>", - "<|begin_of_text|>", - "<|end_of_text|>", - "", - "", - ) - input_ids = tokenizer(list(possible_bad_tokens), add_special_tokens = False).input_ids - possible_bad_tokens = frozenset(token for token, input_id in zip(possible_bad_tokens, input_ids) if len(input_id) == 1) - - if hasattr(tokenizer, "pad_token") and tokenizer.pad_token in possible_bad_tokens: - print(f"Unsloth: Pad token was {tokenizer.pad_token} which is not a good idea. We shall fix this.") - tokenizer.pad_token = None - pass - - has_bad_unk_token = False - if hasattr(tokenizer, "unk_token") and tokenizer.unk_token in possible_bad_tokens: - print(f"Unsloth: Unk token was {tokenizer.unk_token} which is not a good idea. We shall fix this.") - tokenizer.unk_token = None - has_bad_unk_token = True - pass - - # Now check pad token again - bad_pad_token = False - if hasattr(tokenizer, "pad_token") and tokenizer.pad_token is not None: - # Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! - bad_pad_token = tokenizer.eos_token == tokenizer.pad_token - elif hasattr(tokenizer, "pad_token") and tokenizer.pad_token is None: - bad_pad_token = True - else: - bad_pad_token = False - pass - - # Check if unknown token is broken - fixed_unk_token = False - - if (hasattr(tokenizer, "unk_token") and tokenizer.unk_token is not None) or has_bad_unk_token: - - eos_token = getattr(tokenizer, "eos_token", None) - bos_token = getattr(tokenizer, "bos_token", None) - - old_unk_token = tokenizer.unk_token - if (old_unk_token == eos_token) or (old_unk_token == bos_token) or has_bad_unk_token: - has_broken_unk = True - # Use the unicode replacement characters - possible_replacements = [ - "\uFFFD", # Original replacement char - "\uFFFC", # Another option - "\u2753", # Red Question mark emoji - "\u2754", # White Question mark emoji - "\u00BF", # Inverted question mark - ] - for replacement_char in possible_replacements: - char = tokenizer(replacement_char, add_special_tokens = False).input_ids - if len(char) == 1: - # Get actual token representation - try: char = tokenizer.convert_ids_to_tokens(char[0]) - except: continue - tokenizer.unk_token = char - fixed_unk_token = True - break - pass - pass - - if not fixed_unk_token: # Still broken! - raise RuntimeError( - f"Unsloth: Tried fixing the unk_token = {old_unk_token}, but couldn't!" - ) - pass - - logger.warning_once( - f"Unsloth: unk_token = {old_unk_token} is the same as the EOS or BOS tokens. "\ - f"We fixed it by changing it to {tokenizer.unk_token}." - ) - pass - pass - - if bad_pad_token: - # Find a better pad token - added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] - all_added_tokens = joiner.join(added_tokens[::-1]) - all_added_tokens += joiner - - final_pad_token = None - final_good_match = False - - for possible_reserved_token in possible_reserved_tokens: - possible_reserved_token = re.escape(possible_reserved_token) - found = re.finditer(f"{possible_reserved_token}", all_added_tokens) - first_match = None - good_match = False - for j, x in enumerate(found): - if j == 0: first_match = x - if j >= number_repetitions: - good_match = True - break - pass - pass - - if first_match is None: continue - - # If it ends with |> or > etc, then set it as a good pad token! - start = first_match.span(0)[0] - possible_pad_token = first_match.group(0) - end = all_added_tokens.find(joiner, start) - first_match = all_added_tokens[start:end] - - if first_match is not None: - good_match = possible_pad_token.endswith((">", "|>", "]", ")")) - pass - possible_pad_token = first_match - - # Replace current pad token if another exact match is found - if not final_good_match and good_match: - final_good_match = True - final_pad_token = possible_pad_token - break - else: - final_good_match = False - final_pad_token = possible_pad_token - pass - pass - possible_pad_token = final_pad_token - - # Try unk_token if it wasn't fixed - if possible_pad_token is None and not fixed_unk_token and hasattr(tokenizer, "unk_token"): - possible_pad_token = tokenizer.unk_token - pass - - # Check pad token's id must be less than vocab size - if possible_pad_token is not None: - check_pad_token = tokenizer(possible_pad_token, add_special_tokens = False).input_ids - if len(check_pad_token) != 1: - possible_pad_token = None - if model is not None and check_pad_token[0] >= model.config.vocab_size: - possible_pad_token = None - pass - - if possible_pad_token is None: - # Failure to find a good replacement!! We shall manually add one! - new_pad_token = "<|PAD_TOKEN|>" - while new_pad_token in tokenizer.get_vocab(): - new_pad_token = f"<{new_pad_token}>" - pass - possible_pad_token = new_pad_token - pass - - name = model.config._name_or_path if model is not None else "Model" - logger.warning_once( - f"{name} does not have a padding token! Will use pad_token = {possible_pad_token}." - ) - - # Edit pad_token - tokenizer.add_special_tokens({"pad_token" : possible_pad_token}) - tokenizer.pad_token = possible_pad_token - if model is not None: - - # Edit all config with new pad token - current_model = model - while hasattr(current_model, "model") and hasattr(current_model, "config"): - current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - current_model = current_model.model - if hasattr(current_model, "model") and hasattr(current_model, "config"): - current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - pass - - # Generation edit pad token - if getattr(model, "generation_config") is not None: - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) - else: - if model is not None: - - if model.config.pad_token_id is None: - - # Edit all config with new pad token - current_model = model - while hasattr(current_model, "model") and hasattr(current_model, "config"): - current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - current_model = model - if hasattr(current_model, "model") and hasattr(current_model, "config"): - current_model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - pass - - # Generation edit pad token - if getattr(model, "generation_config") is not None: - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) - pass - pass - - if model is not None: - if getattr(model, "generation_config") is not None: - model.generation_config.update(max_length = model.config.max_position_embeddings) - - return model, tokenizer -pass - - # ============================================= # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. @@ -820,142 +607,6 @@ def get_statistics(): pass -def _calculate_n_gradient_checkpoints( - n_layers : int, - method : Optional[Union[str, int]] = "sqrt", -) -> List[int]: - assert(type(n_layers) is int and n_layers > 0) - - if method is None: method = "sqrt" - - if method == "sqrt": - n_checkpoints = int(n_layers**0.5) - elif type(method) is int and method > 0: - n_checkpoints = int(np.ceil(n_layers / method)) - else: - raise ValueError("method must be 'sqrt' or an int >0 and <= n_layers.") - - size = n_layers // n_checkpoints - sizes = np.full(n_checkpoints, size, dtype = int) - leftovers = n_layers % n_checkpoints - # We append leftovers from the right - for k in range(leftovers): - sizes[n_checkpoints-1-k] += 1 - boundaries = np.hstack((0, np.cumsum(sizes))) - boundaries = boundaries.tolist() - return boundaries -pass - - -def calculate_n_gradient_checkpoints( - n_layers : int, - layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", -) -> List[int]: - assert(type(n_layers) is int and n_layers > 0) - - if layers_per_checkpoint is None or layers_per_checkpoint == 1: - return None - - boundaries = _calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) - - assert(boundaries[0] == 0 and boundaries[-1] == n_layers) - assert(min(boundaries) == 0 and max(boundaries) == n_layers) - assert(np.diff(boundaries).min() >= 0) - return boundaries -pass - - -def prepare_n_gradient_checkpoints( - model : Any, - layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", - use_reentrant : Optional[bool] = True, -) -> None: - """ - Calculates where to place the gradient checkpoints given n_layers. - - Args: - model: Any LlamaModel with layers. - layers_per_checkpoint (`Union[str, int]`, *optional*): - Can either be `sqrt` or an integer for how many layers per checkpoint you want. - The more, the less memory usage, but can be slower. Default is `sqrt`. - Choose 1 for Pytorch gradient checkpointing. 2 to wrap 2 layers in 1 module etc. - use_reentrant (`bool`, *optional*): - https://github.com/pytorch/pytorch/blob/main/torch/utils/checkpoint.py#L354 - Optimal gradient checkpointing algorithm `use_reentrant=False` which will - be the default in future Pytorch versions doesn't seem to work?? - """ - _model = None - if hasattr(model, "layers"): - _model = model - elif hasattr(model, "model"): - if hasattr(model.model, "layers"): - _model = model.model - if _model is None: - raise TypeError("`model` or `model.model` does not have attribute `layers`. Are you sure this is a model?") - pass - - if use_reentrant is False: - use_reentrant = True - pass - - n_layers = len(_model.layers) - boundaries = calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) - _model._gradient_checkpointing_boundaries = boundaries - _model._gradient_checkpointing_use_reentrant = use_reentrant -pass - - -class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): - """ - Saves VRAM by smartly offloading to RAM. - Tiny hit to performance, since we mask the movement via non blocking calls. - """ - @staticmethod - @torch_amp_custom_fwd - def forward(ctx, forward_function, hidden_states, *args): - saved_hidden_states = hidden_states.to("cpu", non_blocking = True) - with torch.no_grad(): - output = forward_function(hidden_states, *args) - ctx.save_for_backward(saved_hidden_states) - ctx.forward_function = forward_function - ctx.args = args - return output - pass - - @staticmethod - @torch_amp_custom_bwd - def backward(ctx, dY): - (hidden_states,) = ctx.saved_tensors - hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() - hidden_states.requires_grad_(True) - with torch.enable_grad(): - (output,) = ctx.forward_function(hidden_states, *ctx.args) - torch.autograd.backward(output, dY) - return (None, hidden_states.grad,) + (None,)*len(ctx.args) - pass -pass - - -# @torch._disable_dynamo -def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, **kwargs): - return Unsloth_Offloaded_Gradient_Checkpointer.apply(function, *args) -pass - -import torch.utils -def patch_gradient_checkpointing(): - if torch.utils.checkpoint.checkpoint.__name__ == "unsloth_offloaded_gradient_checkpoint": return - torch.utils.checkpoint._old_checkpoint = torch.utils.checkpoint.checkpoint - torch.utils.checkpoint.checkpoint = unsloth_offloaded_gradient_checkpoint -pass - -def unpatch_gradient_checkpointing(): - if hasattr(torch.utils.checkpoint, "_old_checkpoint"): - torch.utils.checkpoint.checkpoint = torch.utils.checkpoint._old_checkpoint - del torch.utils.checkpoint._old_checkpoint - pass -pass - - # ============================================= # Regional torch 2.5 Recompilation - weirdly very slow?? def patch_regional_compilation(): From 0172ee34efe92fab26e04ff3cd39120af7f3e852 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 2 Nov 2024 19:20:48 -0700 Subject: [PATCH 0604/1088] CE Loss patching --- unsloth/kernels/__init__.py | 4 +- unsloth/kernels/cross_entropy_loss.py | 158 ++------------------------ 2 files changed, 12 insertions(+), 150 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 6357ddaf87..9d5b2da4f9 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -14,9 +14,7 @@ from .cross_entropy_loss import ( fast_cross_entropy_loss, - patch_llama_for_causal_lm, - unpatch_llama_for_causal_lm, - patch_transformers_losses, + patch_losses, patch_loss_function, ) from .rms_layernorm import ( diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f0dbb66f49..5236e51985 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -19,6 +19,12 @@ from transformers.models.llama.modeling_llama import logger from packaging.version import Version +from unsloth_zoo.loss_utils import ( + causal_loss_function, + transformers_losses_patcher, + patch_loss_function, +) + @triton.heuristics({ "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], @@ -354,7 +360,7 @@ def backward(ctx, dlosses): pass pass -# @torch._disable_dynamo + def fast_cross_entropy_loss( logits, labels, @@ -382,152 +388,10 @@ def fast_cross_entropy_loss( n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass -if Version(torch.__version__) < Version("2.5.0"): +if (Version(torch.__version__) < Version("2.4.0")) and \ + not hasattr(fast_cross_entropy_loss, "__wrapped__"): fast_cross_entropy_loss = torch._disable_dynamo(fast_cross_entropy_loss) pass - -from transformers.models.llama.modeling_llama import ( - LlamaForCausalLM, - CausalLMOutputWithPast, - Optional, - Union, - Cache, - List, - Tuple, -) - -# Transformers 4.47 need Unpack, KwargsForCausalLM -try: - from transformers.models.llama.modeling_llama import Unpack, KwargsForCausalLM -except: - pass -pass - -import inspect, re -function = inspect.getsource(LlamaForCausalLM.forward) -function = function.split("\n") -i = re.match(r"[ ]{1,}", function[0]).span(0)[1] -function = [x[i:] for x in function] -function = "\n".join(function) -function = function[function.find("def forward"):] -replacement = """ loss = None - logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - logit_scaling = getattr(self.config, "logit_scale", 0) - if labels is not None: - shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - pass - - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, - logit_softcapping = logit_softcapping, - logit_scaling = logit_scaling, - n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), - ) - else: - if logit_scaling != 0: - if logits.requires_grad: - logits = logit_scaling * logits - else: - logits *= logit_scaling - pass - pass - if logit_softcapping != 0: - if logits.requires_grad: - logits = (1.0 / logit_softcapping) * logits - logits = torch.tanh(logits) - logits = logit_softcapping * logits - else: - logits *= (1.0 / logit_softcapping) - torch.tanh(logits, out = logits) - logits *= logit_softcapping - pass - pass - pass -""" -function = \ - function[:function.find(" loss = None")] + \ - replacement + \ - function[ function.find(" if not return_dict"):] -function = function.replace("logits = logits.float()", "\n") -# Missed spaces -function = function.split("\n") -# Not the first one though! -function = [function[0]] + [" "*4 + x for x in function[1:]] -function = "\n".join(function) -function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ -f" {function}\n" -exec(function, globals()) -del function, replacement, inspect, re - - -def patch_llama_for_causal_lm(): - import transformers.models.llama.modeling_llama - transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM - return -pass - - -def unpatch_llama_for_causal_lm(): - import transformers.models.llama.modeling_llama - transformers.models.llama.modeling_llama.LlamaForCausalLM = LlamaForCausalLM - return -pass - - -def UnslothForCausalLMLoss( - logits, labels, vocab_size: int, num_items_in_batch: int = None, ignore_index: int = -100, **kwargs -): - shift_logits = logits - shift_labels = torch.empty_like(labels) - shift_labels[..., :-1] = labels[..., 1:] - shift_labels[..., -1] = -100 - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, - n_items = num_items_in_batch, - ) - return loss -pass -if Version(torch.__version__) < Version("2.5.0"): - UnslothForCausalLMLoss = torch._disable_dynamo(UnslothForCausalLMLoss) -pass - - -def patch_transformers_losses(): - import re - try: - import transformers.loss.loss_utils - except: - logger.warning_once("Unsloth: Cannot patch loss functions - update transformers for faster modules!") - return - pass - - import transformers.modeling_utils - LOSS_MAPPING = transformers.loss.loss_utils.LOSS_MAPPING - LOSS_MAPPING["ForCausalLM"] = UnslothForCausalLMLoss - - # Remove @property and @lru_cache - if hasattr(transformers.modeling_utils.PreTrainedModel.loss_function, "fget"): - transformers.modeling_utils.PreTrainedModel.loss_function = \ - transformers.modeling_utils.PreTrainedModel.loss_function.fget.__wrapped__ - pass -pass - - -def patch_loss_function(model): - try: - # model.loss_function starts as a dict to a loss fx - # We invoke it to save it - model.loss_function = model.loss_function() - except: - # Failed means we already invoked it, and we need args to the loss fx - pass - pass - return model -pass +# Patch CE Losses in transformers +patch_losses = transformers_losses_patcher(causal_loss_function(fast_cross_entropy_loss)) From c228682c40033520cce9e7167237334f529b95d4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 2 Nov 2024 23:14:41 -0700 Subject: [PATCH 0605/1088] Update __init__.py --- unsloth/__init__.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 91ec460094..5102d8f466 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -53,12 +53,14 @@ # Reduce VRAM usage by reducing fragmentation os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" -# Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) -keynames = "\n" + "\n".join(os.environ.keys()) -if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: +# Hugging Face Hub faster downloads +if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" pass +# Log Unsloth is being used +os.environ["UNSLOTH_IS_PRESENT"] = "1" + try: import torch except ModuleNotFoundError: From 9aa221a421c9fba219bc5989537ccec0f993e284 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 00:22:38 -0700 Subject: [PATCH 0606/1088] Update __init__.py --- unsloth/kernels/__init__.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 9d5b2da4f9..df7589719d 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -54,8 +54,12 @@ create_flex_attention_sliding_window_mask, ) -try: - print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") -except: - print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") +import os +if "UNSLOTH_ZOO_IS_PRESENT" not in os.environ: + try: + print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") + except: + print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") + pass pass +del os From 751413ed00e70c5ad4ffbec96b61f5eaa20bba54 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 01:18:48 -0700 Subject: [PATCH 0607/1088] Patching --- unsloth/kernels/layernorm.py | 22 +----------- unsloth/models/_utils.py | 67 ++++++++++-------------------------- 2 files changed, 20 insertions(+), 69 deletions(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 48ade6d5ec..48d1a65ec7 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -17,6 +17,7 @@ import triton.language as tl import torch from .utils import calculate_settings +from unsloth_zoo import patch_layernorm @triton.jit @@ -162,27 +163,6 @@ def fast_layernorm(layernorm, X): pass -from torch.nn import LayerNorm -class Unsloth_LayerNorm(LayerNorm): - def forward(self, X): - return fast_layernorm(self, X) - pass -pass - - -def patch_layernorm(): - import torch.nn - torch.nn.LayerNorm = Unsloth_LayerNorm - return -pass - - -def unpatch_layernorm(): - import torch.nn - torch.nn.LayerNorm = LayerNorm - return -pass - def test_layernorm( dim = 1024, eps = 1e-5, dtype = torch.float16, diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4829e4d88c..64cb222e61 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -59,6 +59,11 @@ from unsloth_zoo.tokenizer_utils import ( patch_tokenizer, ) +from unsloth_zoo.patching_utils import ( + patch_compiling_bitsandbytes, + patch_layernorm, + patch_torch_compile, +) from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, unsloth_offloaded_gradient_checkpoint, @@ -356,47 +361,27 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu +patch_torch_compile() - -# Torch compile arguments -torch_compile_arguments = [ - "config.dce = True", - "config.memory_planning = True", - "config.memory_pool = 'combined'", - "config.coordinate_descent_tuning = True", - "config.max_autotune_gemm = False", # GEMM is unnecessary - "config.autotune_multi_device = False", - "config.max_autotune_gemm_backends = 'TRITON,ATEN,CPP'", # Not much faster - "config.aggressive_fusion = False", # Careful changes results! - "config.cuda.enable_cuda_lto = True", - "config.cuda.use_fast_math = True", - "config.cuda.compile_opt_level = '-O2'", -] -# Torch dynamo arguments -torch_dynamo_arguments = [ - "config.accumulated_cache_size_limit = 1024", # Bump up a bit from 256 - "config.suppress_errors = True", # Supress errors for now - "config.do_not_emit_runtime_asserts = True", - "config.cache_size_limit = 1024", # Flex Attention - "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation -] -# import torch._inductor.config as config -# for _try_compile_argument in torch_compile_arguments: -# try: exec(_try_compile_argument) -# except: pass -# pass -# import torch._dynamo.config as config -# for _try_dynamo_argument in torch_dynamo_arguments: -# try: exec(_try_dynamo_argument) -# except: pass -# pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, "shape_padding" : True, - "trace.enabled" : False, # Output Triton kernel outputs! + "trace.enabled" : False, "triton.cudagraphs" : False, } + +import accelerate +def torch_compile_kwargs(*args, **kwargs): + print("Unsloth: Enabled auto compiling") + return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options} +pass + +accelerate.utils.dataclasses.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +accelerate.utils.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +accelerate.accelerator.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +del accelerate + # ============================================= def prepare_model_for_kbit_training( @@ -499,22 +484,8 @@ def make_inputs_require_grad(module, input, output): pass pass -# Also disable compiling on bitsandbytes -def patch_compiling_bitsandbytes(): - # import peft.tuners.lora.bnb - # peft.tuners.lora.bnb.Linear4bit.forward = \ - # torch._disable_dynamo(peft.tuners.lora.bnb.Linear4bit.forward) - # peft.tuners.lora.bnb.Linear8bitLt.forward = \ - # torch._disable_dynamo(peft.tuners.lora.bnb.Linear8bitLt.forward) - # return - import bitsandbytes.nn.modules - bitsandbytes.nn.modules.Linear4bit.forward = \ - torch._disable_dynamo(bitsandbytes.nn.modules.Linear4bit.forward) - return -pass # ============================================= - import psutil def _get_statistics(statistics = None, force_download = True): # We log some basic stats about which environment is being used. From 82db087cab3918c35290c4001cb61b4265ca54fc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 14:09:47 -0800 Subject: [PATCH 0608/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 5236e51985..f5a015073c 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -20,9 +20,8 @@ from packaging.version import Version from unsloth_zoo.loss_utils import ( - causal_loss_function, - transformers_losses_patcher, - patch_loss_function, + patch_loss_functions, + post_patch_loss_function, ) From cf682022526762aaa1c887ac6c8055b448cf2f2b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 14:15:08 -0800 Subject: [PATCH 0609/1088] CE Loss --- unsloth/kernels/__init__.py | 3 +-- unsloth/kernels/cross_entropy_loss.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index df7589719d..78e70a65b5 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -14,8 +14,7 @@ from .cross_entropy_loss import ( fast_cross_entropy_loss, - patch_losses, - patch_loss_function, + post_patch_loss_function, ) from .rms_layernorm import ( fast_rms_layernorm, diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f5a015073c..92e64bfa9b 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -393,4 +393,4 @@ def fast_cross_entropy_loss( pass # Patch CE Losses in transformers -patch_losses = transformers_losses_patcher(causal_loss_function(fast_cross_entropy_loss)) +patch_loss_functions(fast_cross_entropy_loss) From 63a18286d7ccf2f6374daf12b68cd0e07b8871dd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 15:01:29 -0800 Subject: [PATCH 0610/1088] Update _utils.py --- unsloth/models/_utils.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 64cb222e61..61d660bce4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -63,6 +63,7 @@ patch_compiling_bitsandbytes, patch_layernorm, patch_torch_compile, + patch_regional_compilation, ) from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, @@ -578,27 +579,6 @@ def get_statistics(): pass -# ============================================= -# Regional torch 2.5 Recompilation - weirdly very slow?? -def patch_regional_compilation(): - if torch.nn.ModuleList.__name__ == "UnslothModuleList": return - # Only works for torch 2.5 - if Version(torch.__version__) < Version("2.5.0"): return - - old_module_list = torch.nn.ModuleList - - def UnslothModuleList(*args, **kwargs): - if len(args) == 1 and len(kwargs) == 0 and type(args[0]) is list: - args = [old_module_list([torch.compile(x, dynamic = True, options = torch_compile_options, fullgraph = False) for x in args[0]])] - return old_module_list(*args, **kwargs) - pass - UnslothModuleList.__doc__ = old_module_list.__doc__ - - torch.nn.ModuleList = UnslothModuleList - return -pass - - # ============================================= # Fixes Bitsandbytes to remove missing warnings from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod From 3f0e56fc2313518f8bc348bc00d10748dd4b5162 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 15:02:38 -0800 Subject: [PATCH 0611/1088] Update _utils.py --- unsloth/models/_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 61d660bce4..f49c9db1ff 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -46,6 +46,8 @@ "patch_gradient_accumulation_fix", "patch_compiling_bitsandbytes", "patch_regional_compilation", + "patch_layernorm", + "patch_torch_compile", ] import torch From 1190ed45b3608b914d9de2b318118ddeb15c4b39 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 15:15:58 -0800 Subject: [PATCH 0612/1088] CE Loss --- unsloth/kernels/__init__.py | 1 + unsloth/kernels/cross_entropy_loss.py | 6 ++++-- unsloth/models/_utils.py | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 78e70a65b5..3b31f49999 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -15,6 +15,7 @@ from .cross_entropy_loss import ( fast_cross_entropy_loss, post_patch_loss_function, + patch_loss_functions, ) from .rms_layernorm import ( fast_rms_layernorm, diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 92e64bfa9b..41bce690e1 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -20,7 +20,7 @@ from packaging.version import Version from unsloth_zoo.loss_utils import ( - patch_loss_functions, + patch_loss_functions as _patch_loss_functions, post_patch_loss_function, ) @@ -393,4 +393,6 @@ def fast_cross_entropy_loss( pass # Patch CE Losses in transformers -patch_loss_functions(fast_cross_entropy_loss) +def patch_loss_functions(): + _patch_loss_functions(fast_cross_entropy_loss) +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f49c9db1ff..45b9569c0d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -48,6 +48,7 @@ "patch_regional_compilation", "patch_layernorm", "patch_torch_compile", + "patch_loss_functions", ] import torch From 607ac343518666674903247e0d5c2a04c0d4ad3b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 15:21:03 -0800 Subject: [PATCH 0613/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 45b9569c0d..5d2bb3a523 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.7" +__version__ = "2024.11.1" __all__ = [ "prepare_model_for_kbit_training", From 32eac0b6c565d85aba376bd4460e20937fcb58e8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 15:25:18 -0800 Subject: [PATCH 0614/1088] Update _utils.py --- unsloth/models/_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5d2bb3a523..0fb73c2237 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -48,7 +48,6 @@ "patch_regional_compilation", "patch_layernorm", "patch_torch_compile", - "patch_loss_functions", ] import torch From 5b6d401650752534e890425adb7997644b2357c2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 17:05:33 -0800 Subject: [PATCH 0615/1088] Layernorm --- unsloth/kernels/__init__.py | 1 - unsloth/kernels/layernorm.py | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 3b31f49999..82e7641693 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -25,7 +25,6 @@ from .layernorm import ( fast_layernorm, patch_layernorm, - unpatch_layernorm, ) from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 48d1a65ec7..a5f7926e2e 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -17,7 +17,9 @@ import triton.language as tl import torch from .utils import calculate_settings -from unsloth_zoo import patch_layernorm +from unsloth_zoo.patching_utils import ( + patch_layernorm, +) @triton.jit From 3d19a71cf96b09b56390a6bb0a2af55d573a43c4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 17:09:37 -0800 Subject: [PATCH 0616/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0fb73c2237..82bd4c979d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -364,7 +364,7 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile() +patch_torch_compile(debug = False, O3 = False) torch_compile_options = { "epilogue_fusion" : True, From 76da5117d3358946748c2287c6212b68a26afa9f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 17:15:27 -0800 Subject: [PATCH 0617/1088] Update _utils.py --- unsloth/models/_utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 82bd4c979d..8f0fc30568 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -59,7 +59,7 @@ from packaging.version import Version from unsloth_zoo.tokenizer_utils import ( - patch_tokenizer, + patch_tokenizer as _patch_tokenizer, ) from unsloth_zoo.patching_utils import ( patch_compiling_bitsandbytes, @@ -983,3 +983,11 @@ def patch_gradient_accumulation_fix(Trainer): exec(function, globals()) Trainer.training_step = _unsloth_training_step pass + + +def patch_tokenizer(model, tokenizer): + model, tokenizer = _patch_tokenizer(model, tokenizer) + if model is not None: + model.config.update({"unsloth_version" : __version__}) + return model, tokenizer +pass From 013ebaa8769757c22ef1426b66922058983532af Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 17:49:08 -0800 Subject: [PATCH 0618/1088] Post patch --- unsloth/models/_utils.py | 1 + unsloth/models/gemma.py | 52 +--------------- unsloth/models/gemma2.py | 52 +--------------- unsloth/models/llama.py | 129 +-------------------------------------- 4 files changed, 6 insertions(+), 228 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8f0fc30568..ba6392eab8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -66,6 +66,7 @@ patch_layernorm, patch_torch_compile, patch_regional_compilation, + patch_model_and_tokenizer, ) from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 1ec116b2ea..1d9a0c1334 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -340,56 +340,8 @@ def pre_patch(): @staticmethod def post_patch(model, tokenizer): - # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2.2 - model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.lm_head.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - - # Gemma has tied weights! This means lm_head == embed_tokens - if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.model.embed_tokens.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - pass - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - # RoPE must be done in float32 for Gemma - # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - # and (module.cos_cached.dtype != correct_dtype): - - # module.cos_cached = module.cos_cached.to(correct_dtype) - # module.sin_cached = module.sin_cached.to(correct_dtype) - # pass - # pass - pass + # Gemma does not downcast RoPE + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = False) # Add 1 to weight # return output * (1 + self.weight) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 54d8f628cb..4eb9d64313 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -491,56 +491,8 @@ def pre_patch(): @staticmethod def post_patch(model, tokenizer): - # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2.2 - model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.lm_head.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - - # Gemma has tied weights! This means lm_head == embed_tokens - if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.model.embed_tokens.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - pass - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - # RoPE must be done in float32 for Gemma - # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - # and (module.cos_cached.dtype != correct_dtype): - - # module.cos_cached = module.cos_cached.to(correct_dtype) - # module.sin_cached = module.sin_cached.to(correct_dtype) - # pass - # pass - pass + # Gemma does not downcast RoPE + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = False) # Add 1 to weight # return output * (1 + self.weight) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8c4b7fd253..4e83e69d60 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -57,8 +57,6 @@ from transformers import set_seed as transformers_set_seed from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model from peft import PeftModelForCausalLM -from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit -from peft.tuners.lora import Linear4bit as Peft_Linear4bit from ..save import patch_saving_functions import re, os, inspect, math, sys try: @@ -1798,30 +1796,6 @@ def from_pretrained( internal_model = internal_model.model pass internal_model._saved_temp_tokenizer = tokenizer - - # Also fix torch_dtype - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass return model, tokenizer pass @@ -1829,108 +1803,7 @@ def from_pretrained( @staticmethod def post_patch(model, tokenizer): - # Torch.compile fails on embedding matrix?? - try: old_input_embedding = model.get_input_embeddings ().weight - except: return model, tokenizer - - # Maybe not all models have a lm_head? - try: old_output_embedding = model.get_output_embeddings().weight - except: old_output_embedding = torch.zeros(0) - - # Check for tied weights as well - is_tied = (old_input_embedding.data_ptr() == old_output_embedding.data_ptr()) \ - or (model.config.tie_word_embeddings) - - # Check pad token's id -> we need to expand the embedding - if len(tokenizer) > old_input_embedding.shape[0]: - # Workaround randomnly fixes it for torch versions < 2. - requires_grad = old_input_embedding.requires_grad - old_input_embedding.requires_grad_(False) - old_input_embedding.resize_(len(tokenizer), old_input_embedding.shape[1]) - old_input_embedding.requires_grad_(requires_grad) - - # Fix up all vocab sizes - current_model = model - while hasattr(current_model, "model") and hasattr(current_model, "config"): - if hasattr(current_model.config, "vocab_size"): - current_model.config.update({"vocab_size" : len(tokenizer)}) - current_model = current_model.model - if hasattr(current_model, "model") and hasattr(current_model, "config"): - if hasattr(current_model.config, "vocab_size"): - current_model.config.update({"vocab_size" : len(tokenizer)}) - pass - pass - - model.set_input_embeddings( - torch.nn.Embedding.from_pretrained( - old_input_embedding, - padding_idx = getattr(model.config, "pad_token_id", None), - ) - ) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - if old_output_embedding.numel() != 0: - - requires_grad = old_output_embedding.requires_grad - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - - lm_head.weight = old_output_embedding if not is_tied else old_input_embedding - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - - lm_head.weight.requires_grad_(requires_grad) - model.set_output_embeddings(lm_head) - if hasattr(model, "lm_head"): model.lm_head = lm_head - - correct_dtype = lm_head.weight.dtype - else: - correct_dtype = old_input_embedding.dtype - pass - - # Must tie lm_head and embed_tokens if they are tied! - # Otherwise error will occur on saving models ie use save_model - if is_tied: model.tie_weights() - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")): - - if hasattr(module, "cos_cached") and \ - (module.cos_cached.dtype != correct_dtype): - - module.cos_cached = module.cos_cached.to(correct_dtype) - module.sin_cached = module.sin_cached.to(correct_dtype) - - elif hasattr(module, "short_cos_cached") and \ - (module.short_cos_cached.dtype != correct_dtype): - - module.short_cos_cached = module.short_cos_cached.to(correct_dtype) - module.short_sin_cached = module.short_sin_cached.to(correct_dtype) - pass - pass - pass - - # Clear deleted GPU items - for _ in range(3): - gc.collect() - torch.cuda.empty_cache() - return model, tokenizer + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = True) pass From 608916a83116f6968c4cfa1e388fc0170945945b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 17:56:03 -0800 Subject: [PATCH 0619/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ba6392eab8..091dbaee29 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -48,6 +48,7 @@ "patch_regional_compilation", "patch_layernorm", "patch_torch_compile", + "patch_model_and_tokenizer", ] import torch From 19836e38bbc4b8367162161d902ff43a2b116db5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 18:08:22 -0800 Subject: [PATCH 0620/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 4e83e69d60..3c4d8f3b38 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1804,6 +1804,7 @@ def from_pretrained( @staticmethod def post_patch(model, tokenizer): model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = True) + return model, tokenizer pass From 01640876043b3f4de6382b39b9e9d986a73e1a5b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 18:13:11 -0800 Subject: [PATCH 0621/1088] Update _utils.py --- unsloth/models/_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 091dbaee29..837b4849f1 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -354,6 +354,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings +UNSLOTH_COMPILE_DEBUG = True # Just remove max_autotune_gemm warning import functools @@ -366,13 +367,13 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile(debug = False, O3 = False) +patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = False) torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, "shape_padding" : True, - "trace.enabled" : False, + "trace.enabled" : UNSLOTH_COMPILE_DEBUG, "triton.cudagraphs" : False, } From 205f7ad7d923ca1bd62fce1094cbeacb3effcb40 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 18:33:02 -0800 Subject: [PATCH 0622/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 41bce690e1..a8af945221 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -279,9 +279,6 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) - DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) - if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) @@ -294,9 +291,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): labels, VOCAB_SIZE = vocab_size, BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, + DO_SOFTCAPPING = logit_softcapping != 0, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, + DO_LOGIT_SCALING = logit_scaling != 0, LOGIT_SCALE = logit_scaling, num_warps = num_warps, ) @@ -312,9 +309,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): VOCAB_SIZE = vocab_size, N_CHUNKS = n_chunks, BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, + DO_SOFTCAPPING = logit_softcapping != 0, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, + DO_LOGIT_SCALING = logit_scaling != 0, LOGIT_SCALE = logit_scaling, num_warps = 32, ) @@ -326,9 +323,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): pass ctx.save_for_backward(logits, logsumexp, labels) - ctx.DO_SOFTCAPPING = DO_SOFTCAPPING + ctx.DO_SOFTCAPPING = logit_softcapping != 0 ctx.logit_softcapping = logit_softcapping - ctx.DO_LOGIT_SCALING = DO_LOGIT_SCALING + ctx.DO_LOGIT_SCALING = logit_scaling != 0 ctx.logit_scaling = logit_scaling return losses pass From 2f1f393dd7f50252450756c5bbe6bac5a40fb3ec Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 18:35:43 -0800 Subject: [PATCH 0623/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index a8af945221..04a2e1861c 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -25,10 +25,10 @@ ) -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _cross_entropy_forward( logits_ptr , @@ -98,10 +98,10 @@ def _cross_entropy_forward( pass -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _chunked_cross_entropy_forward( logits_ptr , @@ -181,10 +181,10 @@ def _chunked_cross_entropy_forward( pass -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], +# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], +# }) @triton.jit def _cross_entropy_backward( logits_ptr , From 05b8f663ef56c21608e304df3444e46bf6b77517 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 19:57:26 -0800 Subject: [PATCH 0624/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 04a2e1861c..d33cb78409 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -279,6 +279,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) + DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) + if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) @@ -291,9 +294,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): labels, VOCAB_SIZE = vocab_size, BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = logit_softcapping != 0, + DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = logit_scaling != 0, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, LOGIT_SCALE = logit_scaling, num_warps = num_warps, ) @@ -309,9 +312,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): VOCAB_SIZE = vocab_size, N_CHUNKS = n_chunks, BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = logit_softcapping != 0, + DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = logit_scaling != 0, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, LOGIT_SCALE = logit_scaling, num_warps = 32, ) @@ -323,9 +326,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): pass ctx.save_for_backward(logits, logsumexp, labels) - ctx.DO_SOFTCAPPING = logit_softcapping != 0 + ctx.DO_SOFTCAPPING = DO_SOFTCAPPING ctx.logit_softcapping = logit_softcapping - ctx.DO_LOGIT_SCALING = logit_scaling != 0 + ctx.DO_LOGIT_SCALING = DO_LOGIT_SCALING ctx.logit_scaling = logit_scaling return losses pass From 8d205c0e0bb6d6913eded73081d0d7f62f732888 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 19:59:59 -0800 Subject: [PATCH 0625/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d33cb78409..70b0f116d5 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -279,9 +279,6 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) - DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) - if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) @@ -294,9 +291,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): labels, VOCAB_SIZE = vocab_size, BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, + DO_SOFTCAPPING = bool(logit_softcapping != 0), SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, + DO_LOGIT_SCALING = bool(logit_scaling != 0), LOGIT_SCALE = logit_scaling, num_warps = num_warps, ) @@ -312,9 +309,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): VOCAB_SIZE = vocab_size, N_CHUNKS = n_chunks, BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, + DO_SOFTCAPPING = bool(logit_softcapping != 0), SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, + DO_LOGIT_SCALING = bool(logit_scaling != 0), LOGIT_SCALE = logit_scaling, num_warps = 32, ) @@ -326,9 +323,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): pass ctx.save_for_backward(logits, logsumexp, labels) - ctx.DO_SOFTCAPPING = DO_SOFTCAPPING + ctx.DO_SOFTCAPPING = bool(logit_softcapping != 0) ctx.logit_softcapping = logit_softcapping - ctx.DO_LOGIT_SCALING = DO_LOGIT_SCALING + ctx.DO_LOGIT_SCALING = bool(logit_scaling != 0) ctx.logit_scaling = logit_scaling return losses pass From a1e9e135cd34ab2a4f8f3eed95ecc2b07dc8b440 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 20:03:03 -0800 Subject: [PATCH 0626/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 70b0f116d5..347f9eb9da 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -346,9 +346,9 @@ def backward(ctx, dlosses): labels, VOCAB_SIZE = vocab_size, BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, + DO_SOFTCAPPING = bool(ctx.DO_SOFTCAPPING), SOFTCAP = ctx.logit_softcapping, - DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, + DO_LOGIT_SCALING = bool(ctx.DO_LOGIT_SCALING), LOGIT_SCALE = ctx.logit_scaling, num_warps = 8, ) From 94655f8ddb7b623eeaad013bfc0da12c29001daa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 20:49:59 -0800 Subject: [PATCH 0627/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 44 +++++++++++++-------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 347f9eb9da..9c3c9442c9 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,16 +32,16 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + VOCAB_SIZE , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -105,17 +105,17 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - N_CHUNKS : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + VOCAB_SIZE , + N_CHUNKS , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -188,17 +188,17 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), - DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + VOCAB_SIZE , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 085f9988bab2f7fd36809d401f19d02d68185031 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 20:58:31 -0800 Subject: [PATCH 0628/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 43 ++++++++++++++------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9c3c9442c9..35fbbed730 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -37,7 +37,7 @@ def _cross_entropy_forward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr , DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -69,7 +69,7 @@ def _cross_entropy_forward( logsumexp_ptr += row_idx labels_ptr += row_idx - col_offsets = tl.arange(0, BLOCK_SIZE) + col_offsets = tl.arange(0, BLOCK_SIZE : tl.constexpr) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) @@ -111,7 +111,7 @@ def _chunked_cross_entropy_forward( labels_ptr , VOCAB_SIZE , N_CHUNKS , - BLOCK_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr , DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -148,7 +148,7 @@ def _chunked_cross_entropy_forward( logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx - col_offsets = chunk_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + col_offsets = chunk_idx*BLOCK_SIZE : tl.constexpr + tl.arange(0, BLOCK_SIZE : tl.constexpr) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) @@ -194,7 +194,7 @@ def _cross_entropy_backward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr , DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -220,7 +220,7 @@ def _cross_entropy_backward( logits_ptr += row_idx * logits_row_stride dloss_ptr += row_idx * dloss_row_stride - col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + col_offsets = block_idx*BLOCK_SIZE : tl.constexpr + tl.arange(0, BLOCK_SIZE : tl.constexpr) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr + row_idx).to(tl.int32) @@ -279,9 +279,12 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): n_chunks = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) + DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) + if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral - BLOCK_SIZE, num_warps = calculate_settings(vocab_size) + BLOCK_SIZE : tl.constexpr, num_warps = calculate_settings(vocab_size) logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") _cross_entropy_forward[(n_rows,)]( @@ -290,10 +293,10 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): logsumexp, labels, VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = bool(logit_softcapping != 0), + BLOCK_SIZE : tl.constexpr = BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = bool(logit_scaling != 0), + DO_LOGIT_SCALING = DO_LOGIT_SCALING, LOGIT_SCALE = logit_scaling, num_warps = num_warps, ) @@ -308,10 +311,10 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): labels, VOCAB_SIZE = vocab_size, N_CHUNKS = n_chunks, - BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = bool(logit_softcapping != 0), + BLOCK_SIZE : tl.constexpr = MAX_FUSED_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = bool(logit_scaling != 0), + DO_LOGIT_SCALING = DO_LOGIT_SCALING, LOGIT_SCALE = logit_scaling, num_warps = 32, ) @@ -323,9 +326,9 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): pass ctx.save_for_backward(logits, logsumexp, labels) - ctx.DO_SOFTCAPPING = bool(logit_softcapping != 0) + ctx.DO_SOFTCAPPING = DO_SOFTCAPPING ctx.logit_softcapping = logit_softcapping - ctx.DO_LOGIT_SCALING = bool(logit_scaling != 0) + ctx.DO_LOGIT_SCALING = DO_LOGIT_SCALING ctx.logit_scaling = logit_scaling return losses pass @@ -335,8 +338,8 @@ def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors n_rows, vocab_size = logits.shape - BLOCK_SIZE = 4096 - div, mod = divmod(vocab_size, BLOCK_SIZE) + BLOCK_SIZE : tl.constexpr = 4096 + div, mod = divmod(vocab_size, BLOCK_SIZE : tl.constexpr) n_blocks = div + (mod != 0) _cross_entropy_backward[(n_rows, n_blocks,)]( @@ -345,10 +348,10 @@ def backward(ctx, dlosses): logsumexp, labels, VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = bool(ctx.DO_SOFTCAPPING), + BLOCK_SIZE : tl.constexpr = BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, SOFTCAP = ctx.logit_softcapping, - DO_LOGIT_SCALING = bool(ctx.DO_LOGIT_SCALING), + DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, LOGIT_SCALE = ctx.logit_scaling, num_warps = 8, ) From c796fd9479550cfa1204a9b410014be56419c2da Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 20:58:59 -0800 Subject: [PATCH 0629/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 35fbbed730..61a015d9ba 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -37,7 +37,7 @@ def _cross_entropy_forward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -69,7 +69,7 @@ def _cross_entropy_forward( logsumexp_ptr += row_idx labels_ptr += row_idx - col_offsets = tl.arange(0, BLOCK_SIZE : tl.constexpr) + col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) @@ -111,7 +111,7 @@ def _chunked_cross_entropy_forward( labels_ptr , VOCAB_SIZE , N_CHUNKS , - BLOCK_SIZE : tl.constexpr , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -148,7 +148,7 @@ def _chunked_cross_entropy_forward( logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx - col_offsets = chunk_idx*BLOCK_SIZE : tl.constexpr + tl.arange(0, BLOCK_SIZE : tl.constexpr) + col_offsets = chunk_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) @@ -194,7 +194,7 @@ def _cross_entropy_backward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr , + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -220,7 +220,7 @@ def _cross_entropy_backward( logits_ptr += row_idx * logits_row_stride dloss_ptr += row_idx * dloss_row_stride - col_offsets = block_idx*BLOCK_SIZE : tl.constexpr + tl.arange(0, BLOCK_SIZE : tl.constexpr) + col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr + row_idx).to(tl.int32) @@ -284,7 +284,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral - BLOCK_SIZE : tl.constexpr, num_warps = calculate_settings(vocab_size) + BLOCK_SIZE, num_warps = calculate_settings(vocab_size) logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") _cross_entropy_forward[(n_rows,)]( @@ -293,7 +293,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): logsumexp, labels, VOCAB_SIZE = vocab_size, - BLOCK_SIZE : tl.constexpr = BLOCK_SIZE : tl.constexpr, + BLOCK_SIZE = BLOCK_SIZE, DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, DO_LOGIT_SCALING = DO_LOGIT_SCALING, @@ -311,7 +311,7 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): labels, VOCAB_SIZE = vocab_size, N_CHUNKS = n_chunks, - BLOCK_SIZE : tl.constexpr = MAX_FUSED_SIZE, + BLOCK_SIZE = MAX_FUSED_SIZE, DO_SOFTCAPPING = DO_SOFTCAPPING, SOFTCAP = logit_softcapping, DO_LOGIT_SCALING = DO_LOGIT_SCALING, @@ -338,8 +338,8 @@ def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors n_rows, vocab_size = logits.shape - BLOCK_SIZE : tl.constexpr = 4096 - div, mod = divmod(vocab_size, BLOCK_SIZE : tl.constexpr) + BLOCK_SIZE = 4096 + div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks = div + (mod != 0) _cross_entropy_backward[(n_rows, n_blocks,)]( @@ -348,7 +348,7 @@ def backward(ctx, dlosses): logsumexp, labels, VOCAB_SIZE = vocab_size, - BLOCK_SIZE : tl.constexpr = BLOCK_SIZE : tl.constexpr, + BLOCK_SIZE = BLOCK_SIZE, DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, SOFTCAP = ctx.logit_softcapping, DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, From e943d77dbf29f76a71f4de172e935c31d7f1f369 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:03:24 -0800 Subject: [PATCH 0630/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 32 +++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 61a015d9ba..efe18f5c14 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,16 +32,16 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING , - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -105,17 +105,17 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + N_CHUNKS : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING , - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ 256K vocab divided in 4 chunks @@ -188,17 +188,17 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, + VOCAB_SIZE : tl.constexpr(tl.int32), + BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING , - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 16a7df63e76eda7e3f46d5fa3eb99ed35acd46e6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:07:23 -0800 Subject: [PATCH 0631/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 32 +++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index efe18f5c14..17168b230a 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,16 +32,16 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride : tl.constexpr, loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP : tl.constexpr, DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE : tl.constexpr, ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -105,17 +105,17 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride : tl.constexpr, loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - N_CHUNKS : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP : tl.constexpr, DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -188,17 +188,17 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride : tl.constexpr, dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP : tl.constexpr, DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From f65b064216555e37f3da18ef904169c237e744c3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:09:34 -0800 Subject: [PATCH 0632/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 17168b230a..15a148b00e 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,12 +36,12 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -109,13 +109,13 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, + VOCAB_SIZE , + N_CHUNKS , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -193,12 +193,12 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 1ff49b8984ec5226ea9d31630a6e6420ad7bc115 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:11:34 -0800 Subject: [PATCH 0633/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 15a148b00e..bcdef31997 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,11 +32,11 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr, + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -105,12 +105,12 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr, + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -188,12 +188,12 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride : tl.constexpr, + logits_row_stride , dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , From 080e558ddf98ea90a79c4d5c005b33cd0853f16e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:16:19 -0800 Subject: [PATCH 0634/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index bcdef31997..4ac5a0c8b8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,7 +36,7 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -109,8 +109,8 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, + VOCAB_SIZE , + N_CHUNKS , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -193,7 +193,7 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -272,16 +272,20 @@ def _cross_entropy_backward( class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod - def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): + def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : float = 0): + n_rows : int + vocab_size : int n_rows, vocab_size = logits.shape div, mod = divmod(vocab_size, MAX_FUSED_SIZE) - n_chunks = div + (mod != 0) + n_chunks : int = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) + BLOCK_SIZE : int + num_warps : int if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) @@ -336,11 +340,13 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors + n_rows : int + vocab_size : int n_rows, vocab_size = logits.shape - BLOCK_SIZE = 4096 + BLOCK_SIZE : int = 4096 div, mod = divmod(vocab_size, BLOCK_SIZE) - n_blocks = div + (mod != 0) + n_blocks : int = div + (mod != 0) _cross_entropy_backward[(n_rows, n_blocks,)]( logits, logits.stride(0), From f6d50c78ac30e7236386a685a7a151a882cb3a50 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:18:26 -0800 Subject: [PATCH 0635/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 4ac5a0c8b8..3d4a548a1f 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -37,10 +37,10 @@ def _cross_entropy_forward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , + BLOCK_SIZE : tl.constexpr(tl.int32), + DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP , - DO_LOGIT_SCALING , + DO_LOGIT_SCALING : tl.constexpr(tl.int1), LOGIT_SCALE , ): """ @@ -111,10 +111,10 @@ def _chunked_cross_entropy_forward( labels_ptr , VOCAB_SIZE , N_CHUNKS , - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , + BLOCK_SIZE : tl.constexpr(tl.int32), + DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP , - DO_LOGIT_SCALING , + DO_LOGIT_SCALING : tl.constexpr(tl.int1), LOGIT_SCALE , ): """ @@ -194,10 +194,10 @@ def _cross_entropy_backward( logsumexp_ptr , labels_ptr , VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , + BLOCK_SIZE : tl.constexpr(tl.int32), + DO_SOFTCAPPING : tl.constexpr(tl.int1), SOFTCAP , - DO_LOGIT_SCALING , + DO_LOGIT_SCALING : tl.constexpr(tl.int1), LOGIT_SCALE , ): """ From fad420255846915d1fb1ba30cd27eb2a01c5c735 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:19:42 -0800 Subject: [PATCH 0636/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 3d4a548a1f..4ebe69d565 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -25,10 +25,10 @@ ) -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), +}) @triton.jit def _cross_entropy_forward( logits_ptr , @@ -98,10 +98,10 @@ def _cross_entropy_forward( pass -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), +}) @triton.jit def _chunked_cross_entropy_forward( logits_ptr , @@ -181,10 +181,10 @@ def _chunked_cross_entropy_forward( pass -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], -# "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), +}) @triton.jit def _cross_entropy_backward( logits_ptr , From 736b16ac6850c55669902334e6e53b79c0f79a7b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:22:08 -0800 Subject: [PATCH 0637/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 4ebe69d565..d396538e69 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,16 +32,16 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -105,17 +105,17 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , + VOCAB_SIZE : tl.constexpr(tl.int32), + N_CHUNKS : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ 256K vocab divided in 4 chunks @@ -188,17 +188,17 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From eb764169978a6af95c27a34cc2830d61c04cbcec Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:23:28 -0800 Subject: [PATCH 0638/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d396538e69..09eb0854e8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -190,7 +190,7 @@ def _cross_entropy_backward( logits_ptr , logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , - dloss_row_stride , + dloss_row_stride : tl.constexpr(tl.int32), logsumexp_ptr , labels_ptr , VOCAB_SIZE : tl.constexpr(tl.int32), From 367e43fe0a81a0cc17a4af050ca6b9cf0fe21c13 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:25:32 -0800 Subject: [PATCH 0639/1088] typing --- unsloth/kernels/cross_entropy_loss.py | 2 +- unsloth/kernels/rms_layernorm.py | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 09eb0854e8..d396538e69 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -190,7 +190,7 @@ def _cross_entropy_backward( logits_ptr , logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , - dloss_row_stride : tl.constexpr(tl.int32), + dloss_row_stride , logsumexp_ptr , labels_ptr , VOCAB_SIZE : tl.constexpr(tl.int32), diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 13faf08d6a..c0fb222b8a 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -53,7 +53,7 @@ def _rms_layernorm_forward( pass -@triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) +@triton.heuristics({"GEMMA": lambda args: bool(args["GEMMA"]),}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, @@ -130,11 +130,15 @@ def _gemma_rms_layernorm_forward( class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps, gemma = False): + def forward(ctx, X, W, eps :float, gemma : bool = False): shape = X.shape - dim = shape[-1] + dim : int = shape[-1] X = X.view(-1, dim) + n_rows : int + n_cols : int n_rows, n_cols = X.shape + BLOCK_SIZE : int + num_warps : int BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") @@ -161,9 +165,11 @@ def forward(ctx, X, W, eps, gemma = False): @staticmethod def backward(ctx, dY): shape = dY.shape - dim = shape[-1] + dim : int = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors + n_rows : int + n_cols : int n_rows, n_cols = dY.shape dW = X From 993df2043cdaccae639b97350e7991d432ac3fa3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:27:37 -0800 Subject: [PATCH 0640/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d396538e69..f92ddbd05d 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,7 +32,7 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , @@ -105,7 +105,7 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , @@ -188,7 +188,7 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , dloss_ptr , dloss_row_stride , logsumexp_ptr , From 8f566b31b66a8071fec2231db3a00a16b7138f86 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:29:19 -0800 Subject: [PATCH 0641/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f92ddbd05d..4ebe69d565 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,12 +36,12 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -109,13 +109,13 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - N_CHUNKS : tl.constexpr(tl.int32), + VOCAB_SIZE , + N_CHUNKS , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -193,12 +193,12 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From 22bb46b21b8ec5e824f174104d4d9c10ecd899da Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:31:18 -0800 Subject: [PATCH 0642/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 4ebe69d565..7939758fda 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -39,9 +39,9 @@ def _cross_entropy_forward( VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -113,9 +113,9 @@ def _chunked_cross_entropy_forward( N_CHUNKS , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ 256K vocab divided in 4 chunks @@ -196,9 +196,9 @@ def _cross_entropy_backward( VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP , + SOFTCAP : tl.constexpr(tl.float32), DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE , + LOGIT_SCALE : tl.constexpr(tl.float32), ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From b5c9f8193e050181328b5d53356862b269009cff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:33:26 -0800 Subject: [PATCH 0643/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 7939758fda..eea6e86065 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,16 +32,16 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -105,17 +105,17 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , + VOCAB_SIZE : tl.constexpr(tl.int32), + N_CHUNKS : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -188,17 +188,17 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride , + logits_row_stride : tl.constexpr(tl.int64), dloss_ptr , dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING : tl.constexpr(tl.int1), - SOFTCAP : tl.constexpr(tl.float32), + SOFTCAP , DO_LOGIT_SCALING : tl.constexpr(tl.int1), - LOGIT_SCALE : tl.constexpr(tl.float32), + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) From c7b22206e8ec4f15f63e812f70a3090c99f712fd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:35:01 -0800 Subject: [PATCH 0644/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index eea6e86065..9365b3f5c8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -32,7 +32,7 @@ @triton.jit def _cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , @@ -105,7 +105,7 @@ def _cross_entropy_forward( @triton.jit def _chunked_cross_entropy_forward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , loss_ptr , logsumexp_ptr , labels_ptr , @@ -188,7 +188,7 @@ def _chunked_cross_entropy_forward( @triton.jit def _cross_entropy_backward( logits_ptr , - logits_row_stride : tl.constexpr(tl.int64), + logits_row_stride , dloss_ptr , dloss_row_stride , logsumexp_ptr , From 2d0ab26c10b970426b4c074fd6e94c82e6561833 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:35:27 -0800 Subject: [PATCH 0645/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9365b3f5c8..19b384a3bd 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * tl.int64(logits_row_stride) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -143,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * tl.int64(logits_row_stride) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -218,7 +218,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * tl.int64(logits_row_stride) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE From 428f662c12261dc073256efbfd091b56ef628a00 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:38:19 -0800 Subject: [PATCH 0646/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 19b384a3bd..8b2204436b 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -38,9 +38,9 @@ def _cross_entropy_forward( labels_ptr , VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), + DO_SOFTCAPPING , SOFTCAP , - DO_LOGIT_SCALING : tl.constexpr(tl.int1), + DO_LOGIT_SCALING , LOGIT_SCALE , ): """ @@ -112,9 +112,9 @@ def _chunked_cross_entropy_forward( VOCAB_SIZE : tl.constexpr(tl.int32), N_CHUNKS : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), + DO_SOFTCAPPING , SOFTCAP , - DO_LOGIT_SCALING : tl.constexpr(tl.int1), + DO_LOGIT_SCALING , LOGIT_SCALE , ): """ @@ -195,9 +195,9 @@ def _cross_entropy_backward( labels_ptr , VOCAB_SIZE : tl.constexpr(tl.int32), BLOCK_SIZE : tl.constexpr(tl.int32), - DO_SOFTCAPPING : tl.constexpr(tl.int1), + DO_SOFTCAPPING , SOFTCAP , - DO_LOGIT_SCALING : tl.constexpr(tl.int1), + DO_LOGIT_SCALING , LOGIT_SCALE , ): """ From 5023ce908670a380e7bfe0fa57d38952039f9d73 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:40:13 -0800 Subject: [PATCH 0647/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 8b2204436b..a7ef164df0 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * tl.int64(logits_row_stride) + logits_ptr += row_idx * logits_row_stride loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -143,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * tl.int64(logits_row_stride) + logits_ptr += row_idx * logits_row_stride loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -218,7 +218,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * tl.int64(logits_row_stride) + logits_ptr += row_idx * logits_row_stride dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE From 5ca3d4ad92c6dd76b54bf1f990beecd746d6200e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 21:42:13 -0800 Subject: [PATCH 0648/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index a7ef164df0..b4780db278 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,7 +36,7 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , SOFTCAP , @@ -109,8 +109,8 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), - N_CHUNKS : tl.constexpr(tl.int32), + VOCAB_SIZE , + N_CHUNKS , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , SOFTCAP , @@ -193,7 +193,7 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr(tl.int32), DO_SOFTCAPPING , SOFTCAP , From 3b32d81f9ed40ed81cf98f5e735a14c43bd3ba66 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 23:53:03 -0800 Subject: [PATCH 0649/1088] int64 --- unsloth/kernels/cross_entropy_loss.py | 20 ++++++++++---------- unsloth/models/_utils.py | 13 ++++++++++++- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index b4780db278..11e582711d 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,8 +36,8 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * logits_row_stride.to(tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -109,9 +109,9 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -143,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * logits_row_stride.to(tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -193,8 +193,8 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - BLOCK_SIZE : tl.constexpr(tl.int32), + VOCAB_SIZE : tl.constexpr, + BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , DO_LOGIT_SCALING , @@ -218,7 +218,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride + logits_ptr += row_idx * logits_row_stride.to(tl.int64) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 837b4849f1..a260466915 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -92,6 +92,17 @@ # Stop "Special tokens have been added in the vocabulary, ..." import logging logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.CRITICAL+1) + +# Ignore logging messages +class HideLoggingMessage(logging.Filter): + def __init__(self, text): self.text = text + def filter(self, x): return not x.getMessage().startswith(self.text) +pass + +# The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. +import transformers.training_args.logger +transformers.training_args.logger.addFilter(HideLoggingMessage("The speedups")) + # ============================================= # ============================================= @@ -380,7 +391,7 @@ def is_big_gpu(index): import accelerate def torch_compile_kwargs(*args, **kwargs): print("Unsloth: Enabled auto compiling") - return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options} + return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options,} pass accelerate.utils.dataclasses.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs From 9bae6e21492b95b512d8f76375a4510ed1d7ec1c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 3 Nov 2024 23:55:39 -0800 Subject: [PATCH 0650/1088] Update _utils.py --- unsloth/models/_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a260466915..c309e1c1cd 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -100,8 +100,9 @@ def filter(self, x): return not x.getMessage().startswith(self.text) pass # The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. -import transformers.training_args.logger -transformers.training_args.logger.addFilter(HideLoggingMessage("The speedups")) +from transformers.training_args import logger as transformers_training_args_logger +transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups")) +del transformers_training_args_logger # ============================================= From 5123623d3e45a2895769dcb4862812dbd8e6ada3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 00:01:04 -0800 Subject: [PATCH 0651/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 30 +++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 11e582711d..91b57fe85d 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -38,10 +38,10 @@ def _cross_entropy_forward( labels_ptr , VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -112,10 +112,10 @@ def _chunked_cross_entropy_forward( VOCAB_SIZE : tl.constexpr, N_CHUNKS : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -143,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -195,10 +195,10 @@ def _cross_entropy_backward( labels_ptr , VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) @@ -218,7 +218,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE From 4b1d9e262216608fcc6db9172230f467656fb785 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 00:03:24 -0800 Subject: [PATCH 0652/1088] constexpr --- unsloth/kernels/cross_entropy_loss.py | 12 ++++++------ unsloth/models/_utils.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 91b57fe85d..9cf7ddc36d 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -38,9 +38,9 @@ def _cross_entropy_forward( labels_ptr , VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, + DO_SOFTCAPPING , SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING : tl.constexpr, + DO_LOGIT_SCALING , LOGIT_SCALE : tl.constexpr, ): """ @@ -112,9 +112,9 @@ def _chunked_cross_entropy_forward( VOCAB_SIZE : tl.constexpr, N_CHUNKS : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, + DO_SOFTCAPPING , SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING : tl.constexpr, + DO_LOGIT_SCALING , LOGIT_SCALE : tl.constexpr, ): """ @@ -195,9 +195,9 @@ def _cross_entropy_backward( labels_ptr , VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, + DO_SOFTCAPPING , SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING : tl.constexpr, + DO_LOGIT_SCALING , LOGIT_SCALE : tl.constexpr, ): """ diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c309e1c1cd..dd37d26ae4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -366,8 +366,8 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = True - +UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ +UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -379,7 +379,7 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = False) +patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = UNSLOTH_COMPILE_MAXIMUM) torch_compile_options = { "epilogue_fusion" : True, From 7d5111a40c13de36aa675d19fcb9c6c6a9deb5de Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 00:05:51 -0800 Subject: [PATCH 0653/1088] constexpr --- unsloth/kernels/cross_entropy_loss.py | 12 ++++++------ unsloth/models/_utils.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9cf7ddc36d..d1b8ae8275 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -39,9 +39,9 @@ def _cross_entropy_forward( VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -113,9 +113,9 @@ def _chunked_cross_entropy_forward( N_CHUNKS : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -196,9 +196,9 @@ def _cross_entropy_backward( VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , - SOFTCAP : tl.constexpr, + SOFTCAP , DO_LOGIT_SCALING , - LOGIT_SCALE : tl.constexpr, + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index dd37d26ae4..5c099548d6 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -366,7 +366,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ +UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools From dff5a5250eee1a751fc4d1efb886b9989ee56274 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 00:07:52 -0800 Subject: [PATCH 0654/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d1b8ae8275..abe08d7d06 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -110,7 +110,7 @@ def _chunked_cross_entropy_forward( logsumexp_ptr , labels_ptr , VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, + N_CHUNKS , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , From 969d1bd287e8d3c3f31c2f4ee948dc44ca61913b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 00:32:35 -0800 Subject: [PATCH 0655/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index abe08d7d06..f0193c74d8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -36,7 +36,7 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , @@ -109,7 +109,7 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , N_CHUNKS , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , @@ -193,7 +193,7 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE : tl.constexpr, + VOCAB_SIZE , BLOCK_SIZE : tl.constexpr, DO_SOFTCAPPING , SOFTCAP , From 4b5847ffd53fb4dc33e982b31c97d45ad9b0383f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 01:20:31 -0800 Subject: [PATCH 0656/1088] Update _utils.py --- unsloth/models/_utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5c099548d6..7a8dcedada 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -41,14 +41,17 @@ "torch_amp_custom_bwd", "accelerate_old_send_to_device", "accelerate_new_send_to_device", - "patch_gradient_checkpointing", - "unpatch_gradient_checkpointing", "patch_gradient_accumulation_fix", "patch_compiling_bitsandbytes", "patch_regional_compilation", "patch_layernorm", "patch_torch_compile", "patch_model_and_tokenizer", + + "patch_unsloth_gradient_checkpointing", + "unpatch_unsloth_gradient_checkpointing", + "patch_gradient_checkpointing", + "unpatch_gradient_checkpointing", ] import torch From 766bf1ef658a01cf793deba145278331cbaa689a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 01:23:24 -0800 Subject: [PATCH 0657/1088] Update _utils.py --- unsloth/models/_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7a8dcedada..182fdb1dd4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -75,6 +75,11 @@ from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, unsloth_offloaded_gradient_checkpoint, + patch_unsloth_gradient_checkpointing, + unpatch_unsloth_gradient_checkpointing, + + Unsloth_Gradient_Checkpointer, + unsloth_gradient_checkpoint, patch_gradient_checkpointing, unpatch_gradient_checkpointing, ) From 646f1b7fcaae90a693707b2bd16519aa7cf13c8a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 17:56:16 -0800 Subject: [PATCH 0658/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 182fdb1dd4..7ed9f7360e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -375,7 +375,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From 97f37ace9678df055f0027c6ba31e986550755fe Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 19:45:10 -0800 Subject: [PATCH 0659/1088] CE --- unsloth/kernels/cross_entropy_loss.py | 2 ++ unsloth/models/_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f0193c74d8..9cc983d803 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -348,6 +348,8 @@ def backward(ctx, dlosses): div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks : int = div + (mod != 0) + print("111") + _cross_entropy_backward[(n_rows, n_blocks,)]( logits, logits.stride(0), dlosses, dlosses.stride(0), diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7ed9f7360e..cf34767d7b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -375,7 +375,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_MAXIMUM = False #"UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From cc563fac522bf00f6b28eeb9ac704f65a13d708a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 19:48:09 -0800 Subject: [PATCH 0660/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 9cc983d803..f0193c74d8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -348,8 +348,6 @@ def backward(ctx, dlosses): div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks : int = div + (mod != 0) - print("111") - _cross_entropy_backward[(n_rows, n_blocks,)]( logits, logits.stride(0), dlosses, dlosses.stride(0), From f643148ea5f2b760a1c9ab1db573e53ac0c7b613 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 21:47:48 -0800 Subject: [PATCH 0661/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cf34767d7b..7ed9f7360e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -375,7 +375,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = False #"UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From f28d7f6f678ea2a241171a5c438b8363cce10919 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 22:08:49 -0800 Subject: [PATCH 0662/1088] Update llama.py --- unsloth/models/llama.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3c4d8f3b38..c8bf5f8894 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1028,7 +1028,6 @@ def _CausalLM_fast_forward( pass -@torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, input_ids=None, From d8103e16b1c116e98c01e77c2f14f3eebeb93437 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 22:10:52 -0800 Subject: [PATCH 0663/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7ed9f7360e..cf34767d7b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -375,7 +375,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_MAXIMUM = False #"UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From b9e1a49d2b01dd8557593c3f950b6ce74c4616d9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 22:38:47 -0800 Subject: [PATCH 0664/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index c0fb222b8a..b52afa18c2 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -130,7 +130,7 @@ def _gemma_rms_layernorm_forward( class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps :float, gemma : bool = False): + def forward(ctx, X, W, eps : float, gemma : bool = False): shape = X.shape dim : int = shape[-1] X = X.view(-1, dim) @@ -144,16 +144,26 @@ def forward(ctx, X, W, eps :float, gemma : bool = False): Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward - fx[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols, eps, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + if not gemma: + _rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + else: + _gemma_rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps From 56af302c45a8f72683b2467d5193a81a57a7f1a3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:01:14 -0800 Subject: [PATCH 0665/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index b52afa18c2..2a9fbccd9e 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -20,12 +20,17 @@ @triton.jit def _rms_layernorm_forward( - Y, Y_row_stride, - X, X_row_stride, - W, W_row_stride, - r, r_row_stride, - n_cols, eps, - BLOCK_SIZE : tl.constexpr + Y, + Y_row_stride, + X, + X_row_stride, + W, + W_row_stride, + r, + r_row_stride, + n_cols, + eps, + BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel @@ -150,7 +155,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): X, X.stride(0), W, W.stride(0), r, r.stride(0), - n_cols, eps, + n_cols = int(n_cols), + eps = float(eps), BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) From a3c84a385119b43bae86dc7c36d0ffd2a3441690 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:04:34 -0800 Subject: [PATCH 0666/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 2a9fbccd9e..0dc4be6ba7 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -149,7 +149,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - if not gemma: + if gemma == False: _rms_layernorm_forward[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), From f7d5c565847ef83d4a7888f7c2251661cf3a8803 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:06:13 -0800 Subject: [PATCH 0667/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 0dc4be6ba7..4074d3a502 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -138,7 +138,7 @@ class Fast_RMS_Layernorm(torch.autograd.Function): def forward(ctx, X, W, eps : float, gemma : bool = False): shape = X.shape dim : int = shape[-1] - X = X.view(-1, dim) + X : torch.Tensor = X.view(-1, dim) n_rows : int n_cols : int n_rows, n_cols = X.shape From 8496ff69e1d4217e915f5446873424c55403cb8c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:08:05 -0800 Subject: [PATCH 0668/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 42 ++++++++++++++++---------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4074d3a502..7a788f36a7 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -149,27 +149,27 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - if gemma == False: - _rms_layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols = int(n_cols), - eps = float(eps), - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) - else: - _gemma_rms_layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols, eps, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + # if gemma == False: + _rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols = int(n_cols), + eps = float(eps), + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) + # else: + # _gemma_rms_layernorm_forward[(n_rows,)]( + # Y, Y.stride(0), + # X, X.stride(0), + # W, W.stride(0), + # r, r.stride(0), + # n_cols, eps, + # BLOCK_SIZE = BLOCK_SIZE, + # num_warps = num_warps, + # ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps From 2909eaf08e92f81aa509d958d2c5bb17f2ddc870 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:11:03 -0800 Subject: [PATCH 0669/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 44 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 7a788f36a7..b6ffa5fee9 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -138,7 +138,7 @@ class Fast_RMS_Layernorm(torch.autograd.Function): def forward(ctx, X, W, eps : float, gemma : bool = False): shape = X.shape dim : int = shape[-1] - X : torch.Tensor = X.view(-1, dim) + X = X.view(-1, dim) n_rows : int n_cols : int n_rows, n_cols = X.shape @@ -149,27 +149,27 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - # if gemma == False: - _rms_layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols = int(n_cols), - eps = float(eps), - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) - # else: - # _gemma_rms_layernorm_forward[(n_rows,)]( - # Y, Y.stride(0), - # X, X.stride(0), - # W, W.stride(0), - # r, r.stride(0), - # n_cols, eps, - # BLOCK_SIZE = BLOCK_SIZE, - # num_warps = num_warps, - # ) + if not gemma: + _rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols = int(n_cols), + eps = float(eps), + BLOCK_SIZE = BLOCK_SIZE, + num_warps = 16, + ) + else: + _gemma_rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps From afc8af69f5a883e0699d9f9abd4c781da5ccef12 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:16:01 -0800 Subject: [PATCH 0670/1088] Update utils.py --- unsloth/kernels/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index a8c20c75a4..b394d122fd 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -13,7 +13,7 @@ # limitations under the License. import triton -MAX_FUSED_SIZE = 65536 +MAX_FUSED_SIZE : int = 65536 next_power_of_2 = triton.next_power_of_2 # torch.cuda.amp.custom_fwd is deprecated >= 2.4 @@ -40,12 +40,12 @@ pass -def calculate_settings(n): - BLOCK_SIZE = next_power_of_2(n) +def calculate_settings(n : int) -> (int, int,): + BLOCK_SIZE : int = next_power_of_2(n) if BLOCK_SIZE > MAX_FUSED_SIZE: raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\ f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") - num_warps = 4 + num_warps : int = 4 if BLOCK_SIZE >= 32768: num_warps = 32 elif BLOCK_SIZE >= 8192: num_warps = 16 elif BLOCK_SIZE >= 2048: num_warps = 8 From 2d8d1e1e2da19235ca29641040f95619e690b16a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:19:02 -0800 Subject: [PATCH 0671/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index b6ffa5fee9..23b7342c87 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -144,7 +144,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_rows, n_cols = X.shape BLOCK_SIZE : int num_warps : int - BLOCK_SIZE, num_warps = calculate_settings(n_cols) + # BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") @@ -157,7 +157,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): r, r.stride(0), n_cols = int(n_cols), eps = float(eps), - BLOCK_SIZE = BLOCK_SIZE, + BLOCK_SIZE = 4096, num_warps = 16, ) else: From ecc1ad223c321c8ce009f702166397eba105b008 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:20:32 -0800 Subject: [PATCH 0672/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 23b7342c87..821f6b540d 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -171,8 +171,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): num_warps = num_warps, ) ctx.eps = eps - ctx.BLOCK_SIZE = BLOCK_SIZE - ctx.num_warps = num_warps + ctx.BLOCK_SIZE = 4096 + ctx.num_warps = 16 ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) From ae7cb78e9122b26536e8c25b1c79a2bd642e86da Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:24:33 -0800 Subject: [PATCH 0673/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 821f6b540d..53f1efa9f5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -16,6 +16,7 @@ import triton.language as tl import torch from .utils import calculate_settings +next_power_of_2 = triton.next_power_of_2 @triton.jit @@ -142,9 +143,15 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_rows : int n_cols : int n_rows, n_cols = X.shape - BLOCK_SIZE : int - num_warps : int - # BLOCK_SIZE, num_warps = calculate_settings(n_cols) + BLOCK_SIZE : int = next_power_of_2(n_cols) + MAX_FUSED_SIZE : int = 65536 + if BLOCK_SIZE > MAX_FUSED_SIZE: + raise RuntimeError(f"Cannot launch Triton kernel since n = {n_cols} exceeds "\ + f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") + num_warps : int = 4 + if BLOCK_SIZE >= 32768: num_warps = 32 + elif BLOCK_SIZE >= 8192: num_warps = 16 + elif BLOCK_SIZE >= 2048: num_warps = 8 Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") @@ -157,8 +164,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): r, r.stride(0), n_cols = int(n_cols), eps = float(eps), - BLOCK_SIZE = 4096, - num_warps = 16, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, ) else: _gemma_rms_layernorm_forward[(n_rows,)]( @@ -171,8 +178,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): num_warps = num_warps, ) ctx.eps = eps - ctx.BLOCK_SIZE = 4096 - ctx.num_warps = 16 + ctx.BLOCK_SIZE = BLOCK_SIZE + ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) From 22da2662197fd0feb2843eca12036e6027b28670 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:27:50 -0800 Subject: [PATCH 0674/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 53f1efa9f5..a7385cb38e 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -15,8 +15,11 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings -next_power_of_2 = triton.next_power_of_2 +from .utils import ( + calculate_settings, + MAX_FUSED_SIZE, + next_power_of_2, +) @triton.jit @@ -143,8 +146,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_rows : int n_cols : int n_rows, n_cols = X.shape - BLOCK_SIZE : int = next_power_of_2(n_cols) - MAX_FUSED_SIZE : int = 65536 + + BLOCK_SIZE : int = n_cols if BLOCK_SIZE > MAX_FUSED_SIZE: raise RuntimeError(f"Cannot launch Triton kernel since n = {n_cols} exceeds "\ f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") From beb6854e76db7ebe0419735f0b1e58683da3ad6a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:28:57 -0800 Subject: [PATCH 0675/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index a7385cb38e..4079921f91 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -15,11 +15,7 @@ import triton import triton.language as tl import torch -from .utils import ( - calculate_settings, - MAX_FUSED_SIZE, - next_power_of_2, -) +from .utils import calculate_settings @triton.jit @@ -147,19 +143,13 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_cols : int n_rows, n_cols = X.shape - BLOCK_SIZE : int = n_cols - if BLOCK_SIZE > MAX_FUSED_SIZE: - raise RuntimeError(f"Cannot launch Triton kernel since n = {n_cols} exceeds "\ - f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") - num_warps : int = 4 - if BLOCK_SIZE >= 32768: num_warps = 32 - elif BLOCK_SIZE >= 8192: num_warps = 16 - elif BLOCK_SIZE >= 2048: num_warps = 8 - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + BLOCK_SIZE : int + num_warps : int if not gemma: + BLOCK_SIZE, num_warps = calculate_settings(n_cols) _rms_layernorm_forward[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), From 14c3d2f900aa3cdb15d5c9e85ddd187f9fe532d5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:30:55 -0800 Subject: [PATCH 0676/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4079921f91..3176d4e358 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -142,14 +142,14 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_rows : int n_cols : int n_rows, n_cols = X.shape + BLOCK_SIZE : int + num_warps : int + BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - BLOCK_SIZE : int - num_warps : int if not gemma: - BLOCK_SIZE, num_warps = calculate_settings(n_cols) _rms_layernorm_forward[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), @@ -158,7 +158,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): n_cols = int(n_cols), eps = float(eps), BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, + num_warps = 16, ) else: _gemma_rms_layernorm_forward[(n_rows,)]( @@ -168,7 +168,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): r, r.stride(0), n_cols, eps, BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, + num_warps = 16, ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE From ef4b079b87cb3b3956b34700d6de78ef81912f9c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:33:08 -0800 Subject: [PATCH 0677/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 3176d4e358..585684567c 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -206,9 +206,9 @@ def backward(ctx, dY): pass -def fast_rms_layernorm(layernorm, X, gemma = False): +def fast_rms_layernorm(layernorm, X, gemma : bool = False): W = layernorm.weight - eps = layernorm.variance_epsilon if \ + eps : float = layernorm.variance_epsilon if \ hasattr(layernorm, "variance_epsilon") \ else layernorm.eps out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) From ef684f8d23ba10fd96b6f16cff1681c0421862cd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:34:11 -0800 Subject: [PATCH 0678/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 585684567c..c331844e16 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -157,7 +157,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): r, r.stride(0), n_cols = int(n_cols), eps = float(eps), - BLOCK_SIZE = BLOCK_SIZE, + BLOCK_SIZE = triton.next_power_of_2(n_cols), num_warps = 16, ) else: @@ -167,7 +167,7 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): W, W.stride(0), r, r.stride(0), n_cols, eps, - BLOCK_SIZE = BLOCK_SIZE, + BLOCK_SIZE = triton.next_power_of_2(n_cols), num_warps = 16, ) ctx.eps = eps From 3e4c42f79ca041d388303880fef3a75e4728071e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:39:03 -0800 Subject: [PATCH 0679/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index c331844e16..ae16b40925 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -146,8 +146,8 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): num_warps : int BLOCK_SIZE, num_warps = calculate_settings(n_cols) - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") - r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + Y : torch.Tensor = torch.empty(X.shape, dtype = X.dtype, device = "cuda:0") + r : torch.Tensor = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") if not gemma: _rms_layernorm_forward[(n_rows,)]( From 8f825eb2845518a974245e99d80f713a0c97a1ee Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:45:25 -0800 Subject: [PATCH 0680/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 46 ++++++++++++++++---------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index ae16b40925..c6aecc6d2f 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -138,7 +138,7 @@ class Fast_RMS_Layernorm(torch.autograd.Function): def forward(ctx, X, W, eps : float, gemma : bool = False): shape = X.shape dim : int = shape[-1] - X = X.view(-1, dim) + # X = X.view(-1, dim) n_rows : int n_cols : int n_rows, n_cols = X.shape @@ -149,33 +149,33 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): Y : torch.Tensor = torch.empty(X.shape, dtype = X.dtype, device = "cuda:0") r : torch.Tensor = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - if not gemma: - _rms_layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols = int(n_cols), - eps = float(eps), - BLOCK_SIZE = triton.next_power_of_2(n_cols), - num_warps = 16, - ) - else: - _gemma_rms_layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols, eps, - BLOCK_SIZE = triton.next_power_of_2(n_cols), - num_warps = 16, - ) + # if not gemma: + _rms_layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols = int(n_cols), + eps = float(eps), + BLOCK_SIZE = triton.next_power_of_2(n_cols), + num_warps = 16, + ) + # else: + # _gemma_rms_layernorm_forward[(n_rows,)]( + # Y, Y.stride(0), + # X, X.stride(0), + # W, W.stride(0), + # r, r.stride(0), + # n_cols, eps, + # BLOCK_SIZE = triton.next_power_of_2(n_cols), + # num_warps = 16, + # ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) - return Y.view(*shape) + return Y#.view(*shape) pass @staticmethod From bd4ac7b21840d0c83464589f179eb16bce793c83 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:47:38 -0800 Subject: [PATCH 0681/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index c6aecc6d2f..287c8219b5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -137,8 +137,8 @@ class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod def forward(ctx, X, W, eps : float, gemma : bool = False): shape = X.shape - dim : int = shape[-1] - # X = X.view(-1, dim) + # dim : int = shape[-1] + X = X.view(shape[0] * shape[1], shape[2]) n_rows : int n_cols : int n_rows, n_cols = X.shape From 6f38731dfbf36093561c220f218acc3aec772a64 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 4 Nov 2024 23:54:02 -0800 Subject: [PATCH 0682/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 55 ++++++++++++-------------------- 1 file changed, 20 insertions(+), 35 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 287c8219b5..0846a09de6 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -20,17 +20,12 @@ @triton.jit def _rms_layernorm_forward( - Y, - Y_row_stride, - X, - X_row_stride, - W, - W_row_stride, - r, - r_row_stride, - n_cols, - eps, - BLOCK_SIZE : tl.constexpr, + Y, Y_row_stride, + X, X_row_stride, + W, W_row_stride, + r, r_row_stride, + n_cols, eps, + BLOCK_SIZE : tl.constexpr ): """ Fast RMS Layernorm kernel @@ -135,10 +130,10 @@ def _gemma_rms_layernorm_forward( class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps : float, gemma : bool = False): + def forward(ctx, X, W, eps :float, gemma : bool = False): shape = X.shape - # dim : int = shape[-1] - X = X.view(shape[0] * shape[1], shape[2]) + dim : int = shape[-1] + X = X.view(-1, dim) n_rows : int n_cols : int n_rows, n_cols = X.shape @@ -146,36 +141,25 @@ def forward(ctx, X, W, eps : float, gemma : bool = False): num_warps : int BLOCK_SIZE, num_warps = calculate_settings(n_cols) - Y : torch.Tensor = torch.empty(X.shape, dtype = X.dtype, device = "cuda:0") - r : torch.Tensor = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") + r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - # if not gemma: - _rms_layernorm_forward[(n_rows,)]( + fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward + fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), r, r.stride(0), - n_cols = int(n_cols), - eps = float(eps), - BLOCK_SIZE = triton.next_power_of_2(n_cols), - num_warps = 16, + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, ) - # else: - # _gemma_rms_layernorm_forward[(n_rows,)]( - # Y, Y.stride(0), - # X, X.stride(0), - # W, W.stride(0), - # r, r.stride(0), - # n_cols, eps, - # BLOCK_SIZE = triton.next_power_of_2(n_cols), - # num_warps = 16, - # ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) - return Y#.view(*shape) + return Y.view(*shape) pass @staticmethod @@ -206,9 +190,10 @@ def backward(ctx, dY): pass -def fast_rms_layernorm(layernorm, X, gemma : bool = False): +@torch.compiler.disable +def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight - eps : float = layernorm.variance_epsilon if \ + eps = layernorm.variance_epsilon if \ hasattr(layernorm, "variance_epsilon") \ else layernorm.eps out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) From 2df35d43e517916060266723254638756a8fc111 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 00:05:56 -0800 Subject: [PATCH 0683/1088] typing --- unsloth/kernels/rms_layernorm.py | 17 +++++++++-------- unsloth/kernels/rope_embedding.py | 18 ++++++++++++++++-- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 0846a09de6..4b22f8c3e5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -60,7 +60,7 @@ def _rms_layernorm_backward( X, X_row_stride, W, W_row_stride, r, r_row_stride, - dW, dW_row_stride, + # dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, @@ -130,7 +130,7 @@ def _gemma_rms_layernorm_forward( class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps :float, gemma : bool = False): + def forward(ctx, X : torch.Tensor, W : torch.Tensor, eps : float, gemma : bool = False): shape = X.shape dim : int = shape[-1] X = X.view(-1, dim) @@ -163,7 +163,7 @@ def forward(ctx, X, W, eps :float, gemma : bool = False): pass @staticmethod - def backward(ctx, dY): + def backward(ctx, dY : torch.Tensor): shape = dY.shape dim : int = shape[-1] dY = dY.view(-1, dim) @@ -171,14 +171,14 @@ def backward(ctx, dY): n_rows : int n_cols : int n_rows, n_cols = dY.shape - dW = X + # dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), - dW, dW.stride(0), + # dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, @@ -190,10 +190,11 @@ def backward(ctx, dY): pass +# [TODO] Unsure why RMS Layernorm is not torch.compiling properly @torch.compiler.disable -def fast_rms_layernorm(layernorm, X, gemma = False): - W = layernorm.weight - eps = layernorm.variance_epsilon if \ +def fast_rms_layernorm(layernorm, X : torch.Tensor, gemma : bool = False): + W : torch.Tensor = layernorm.weight + eps : float = layernorm.variance_epsilon if \ hasattr(layernorm, "variance_epsilon") \ else layernorm.eps out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 2934ac41c9..44a7cda12f 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -18,7 +18,7 @@ from .utils import calculate_settings ROPE_GROUP_SIZE = 4 -@triton.heuristics({"BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],}) +@triton.heuristics({"BACKWARD_PASS": lambda args: bool(args["BACKWARD_PASS"]),}) @triton.jit def _rope_embedding( Q, Q_row_stride, @@ -75,8 +75,14 @@ class Fast_RoPE_Embedding(torch.autograd.Function): @staticmethod def forward(ctx, Q, cos, sin): cos, sin = cos.squeeze(), sin.squeeze() + batch : int + seq_len : int + n_heads : int + head_dim : int batch, seq_len, n_heads, head_dim = Q.shape Q = Q.view(batch*seq_len, n_heads*head_dim) + n_rows : int + n_cols : int n_rows, n_cols = Q.shape assert(seq_len <= cos.shape[0]) @@ -85,8 +91,10 @@ def forward(ctx, Q, cos, sin): BLOCK_SIZE, num_warps = calculate_settings(head_dim//2) # (head_dim//2) # group_size = 4 # 4 or 8, too large group_size can hurt performance. + div : int + mod : int div, mod = divmod(n_heads, ROPE_GROUP_SIZE) - n_groups = div + (mod != 0) + n_groups : int = div + (mod != 0) _rope_embedding[(n_rows, n_groups, )]( Q, Q.stride(0), @@ -108,9 +116,15 @@ def forward(ctx, Q, cos, sin): @staticmethod def backward(ctx, dY): + batch : int + seq_len : int + n_heads : int + head_dim : int batch, seq_len, n_heads, head_dim = dY.shape dY = dY.reshape(batch*seq_len, n_heads*head_dim) # Must be reshape not view + n_rows : int + n_cols : int n_rows, n_cols = dY.shape cos = ctx.cos From 74d89d11552d24df9ee76c4cec94cd895c363c0c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 00:08:16 -0800 Subject: [PATCH 0684/1088] Update rope_embedding.py --- unsloth/kernels/rope_embedding.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 44a7cda12f..a8173d2453 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -25,9 +25,9 @@ def _rope_embedding( cos, cos_row_stride, sin, sin_row_stride, seqlen, - head_dim : tl.constexpr, - n_heads : tl.constexpr, - BACKWARD_PASS : tl.constexpr, + head_dim, + n_heads, + BACKWARD_PASS, BLOCK_SIZE : tl.constexpr, ): """ From 98927ee333aed07f9566dd05aa8ed1c357d993ae Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 00:09:11 -0800 Subject: [PATCH 0685/1088] types --- unsloth/kernels/rms_layernorm.py | 2 +- unsloth/kernels/rope_embedding.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4b22f8c3e5..10b435dd57 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -191,7 +191,7 @@ def backward(ctx, dY : torch.Tensor): # [TODO] Unsure why RMS Layernorm is not torch.compiling properly -@torch.compiler.disable +# @torch.compiler.disable def fast_rms_layernorm(layernorm, X : torch.Tensor, gemma : bool = False): W : torch.Tensor = layernorm.weight eps : float = layernorm.variance_epsilon if \ diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index a8173d2453..246055dc7b 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -16,7 +16,7 @@ import triton.language as tl import torch from .utils import calculate_settings -ROPE_GROUP_SIZE = 4 +ROPE_GROUP_SIZE : int = 4 @triton.heuristics({"BACKWARD_PASS": lambda args: bool(args["BACKWARD_PASS"]),}) @triton.jit From f3e2bd6e5ebb83c21d3bd07c3268d38cf7fb4418 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 00:11:32 -0800 Subject: [PATCH 0686/1088] Disable compiling --- unsloth/kernels/rms_layernorm.py | 2 +- unsloth/kernels/rope_embedding.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 10b435dd57..4b22f8c3e5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -191,7 +191,7 @@ def backward(ctx, dY : torch.Tensor): # [TODO] Unsure why RMS Layernorm is not torch.compiling properly -# @torch.compiler.disable +@torch.compiler.disable def fast_rms_layernorm(layernorm, X : torch.Tensor, gemma : bool = False): W : torch.Tensor = layernorm.weight eps : float = layernorm.variance_epsilon if \ diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 246055dc7b..7fe15d0e3b 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -25,9 +25,9 @@ def _rope_embedding( cos, cos_row_stride, sin, sin_row_stride, seqlen, - head_dim, - n_heads, - BACKWARD_PASS, + head_dim : tl.constexpr, + n_heads : tl.constexpr, + BACKWARD_PASS : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ @@ -144,7 +144,8 @@ def backward(ctx, dY): pass pass - +# [TODO] Unsure why RoPE Embedding is not torch.compiling properly +@torch.compiler.disable def fast_rope_embedding(Q, K, cos, sin): Q = Fast_RoPE_Embedding.apply(Q.transpose(1, 2), cos, sin).transpose(1, 2) K = Fast_RoPE_Embedding.apply(K.transpose(1, 2), cos, sin).transpose(1, 2) From c30bd2a2caaff79ca71f9c2df004fdef810f3a91 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 00:15:56 -0800 Subject: [PATCH 0687/1088] Update _utils.py --- unsloth/models/_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cf34767d7b..cdb28234c7 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -321,7 +321,8 @@ def _is_openai_available(): return False ) pass import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention +# [TODO] Unsure why Xformers is also breaking as well +xformers_attention = torch.compiler.disable(xformers.memory_efficient_attention) # Check TRL version from trl import __version__ as trl_version From 813cbdd220e8b7a994fd33373122f2189556e2bf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:30:34 -0800 Subject: [PATCH 0688/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cdb28234c7..22aa4d3d80 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -395,13 +395,13 @@ def is_big_gpu(index): "max_autotune" : True, "shape_padding" : True, "trace.enabled" : UNSLOTH_COMPILE_DEBUG, - "triton.cudagraphs" : False, + "triton.cudagraphs" : True, } import accelerate def torch_compile_kwargs(*args, **kwargs): print("Unsloth: Enabled auto compiling") - return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options,} + return {"dynamic" : True, "fullgraph" : True, "options" : torch_compile_options,} pass accelerate.utils.dataclasses.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs From 34ce5d1dd9c44e52f098d0c2536b76a35ff7771f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:36:11 -0800 Subject: [PATCH 0689/1088] Forward hook --- unsloth/models/_utils.py | 14 +++++++------- unsloth/models/llama.py | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 22aa4d3d80..72994fa623 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -467,13 +467,13 @@ def prepare_model_for_kbit_training( pass # If use_reentrant = True which is the Pytorch default, we just make the input requires_grad. - if use_reentrant: - if hasattr(model, "enable_input_require_grads"): - model.enable_input_require_grads() - else: - def make_inputs_require_grad(module, input, output): - output.requires_grad_(True) - model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + # if use_reentrant: + # if hasattr(model, "enable_input_require_grads"): + # model.enable_input_require_grads() + # else: + # def make_inputs_require_grad(module, input, output): + # output.requires_grad_(True) + # model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c8bf5f8894..61857431b9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -606,6 +606,7 @@ def LlamaModel_fast_forward( # Embed positions if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + inputs_embeds.requires_grad_(True) # inputs_embeds = inputs_embeds.to(self.config.torch_dtype) torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) From f84cf4be32c4b13120f1e81e85ebbf44c0c19703 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:41:04 -0800 Subject: [PATCH 0690/1088] Update _utils.py --- unsloth/models/_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 72994fa623..cdb28234c7 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -395,13 +395,13 @@ def is_big_gpu(index): "max_autotune" : True, "shape_padding" : True, "trace.enabled" : UNSLOTH_COMPILE_DEBUG, - "triton.cudagraphs" : True, + "triton.cudagraphs" : False, } import accelerate def torch_compile_kwargs(*args, **kwargs): print("Unsloth: Enabled auto compiling") - return {"dynamic" : True, "fullgraph" : True, "options" : torch_compile_options,} + return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options,} pass accelerate.utils.dataclasses.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs @@ -467,13 +467,13 @@ def prepare_model_for_kbit_training( pass # If use_reentrant = True which is the Pytorch default, we just make the input requires_grad. - # if use_reentrant: - # if hasattr(model, "enable_input_require_grads"): - # model.enable_input_require_grads() - # else: - # def make_inputs_require_grad(module, input, output): - # output.requires_grad_(True) - # model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + if use_reentrant: + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model pass From 745814c4234df38087780ad0f6598321d903b1ba Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:42:44 -0800 Subject: [PATCH 0691/1088] Update llama.py --- unsloth/models/llama.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 61857431b9..d379c56a84 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -390,7 +390,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if (not HAS_FLASH_ATTENTION and attention_mask is None): + if False:#(not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -430,7 +430,7 @@ def LlamaAttention_fast_forward( Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) + A = scaled_dot_product_attention(Q, K, V, is_causal = True) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass @@ -606,7 +606,6 @@ def LlamaModel_fast_forward( # Embed positions if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - inputs_embeds.requires_grad_(True) # inputs_embeds = inputs_embeds.to(self.config.torch_dtype) torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) From ab9f8e17513dc266fcdb1756b0d9b4fe0ae12ce8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:52:27 -0800 Subject: [PATCH 0692/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cdb28234c7..5a7d0b005f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -376,7 +376,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = False #"UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From daa79099940bcea1964c4973facb1185c19c1430 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 01:55:17 -0800 Subject: [PATCH 0693/1088] Update llama.py --- unsloth/models/llama.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d379c56a84..3b8582581f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -390,7 +390,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if False:#(not HAS_FLASH_ATTENTION and attention_mask is None): + if (not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -430,7 +430,7 @@ def LlamaAttention_fast_forward( Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, is_causal = True) + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass @@ -527,6 +527,7 @@ def LlamaDecoderLayer_fast_forward( } # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +@torch._disable_dynamo def LlamaModel_fast_forward( self, input_ids: torch.LongTensor, From 536a1a6e522d6efda1b41896d787fb624bce2688 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 02:06:27 -0800 Subject: [PATCH 0694/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3b8582581f..3c4d8f3b38 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -527,7 +527,6 @@ def LlamaDecoderLayer_fast_forward( } # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 -@torch._disable_dynamo def LlamaModel_fast_forward( self, input_ids: torch.LongTensor, @@ -1029,6 +1028,7 @@ def _CausalLM_fast_forward( pass +@torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, input_ids=None, From 648ca59185637a540dea2c233f2d067bad83eb3c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 12:05:17 -0800 Subject: [PATCH 0695/1088] Update _utils.py --- unsloth/models/_utils.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5a7d0b005f..94cf1b74e0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -321,8 +321,7 @@ def _is_openai_available(): return False ) pass import xformers.ops.fmha as xformers -# [TODO] Unsure why Xformers is also breaking as well -xformers_attention = torch.compiler.disable(xformers.memory_efficient_attention) +xformers_attention = xformers.memory_efficient_attention # Check TRL version from trl import __version__ as trl_version @@ -375,8 +374,8 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = True #"UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = True #"UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ +UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) From 486d0d6b338d472deb233a3fc0cfb82b5edaeadd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 13:28:25 -0800 Subject: [PATCH 0696/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8922cc7c8d..e0c5d93562 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo", + "unsloth_zoo>=2024.11.1", "packaging", "tyro", "transformers>=4.46.1", @@ -244,7 +244,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo", + "unsloth_zoo>=2024.11.1", "packaging", "tyro", "transformers>=4.46.1", From e2e406eba603eb605d622f17fa33660bbf4ad649 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 13:29:37 -0800 Subject: [PATCH 0697/1088] Bug fixes (#1245) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 --- pyproject.toml | 8 +- unsloth/__init__.py | 28 +- unsloth/kernels/__init__.py | 17 +- unsloth/kernels/cross_entropy_loss.py | 214 +++++---------- unsloth/kernels/layernorm.py | 24 +- unsloth/kernels/rms_layernorm.py | 30 +- unsloth/kernels/rope_embedding.py | 23 +- unsloth/kernels/utils.py | 8 +- unsloth/models/_utils.py | 377 +++++--------------------- unsloth/models/gemma.py | 59 +--- unsloth/models/gemma2.py | 59 +--- unsloth/models/llama.py | 94 +------ unsloth/save.py | 66 +++-- unsloth/tokenizer_utils.py | 7 +- 14 files changed, 287 insertions(+), 727 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fc9c8256ad..e0c5d93562 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo", + "unsloth_zoo>=2024.11.1", "packaging", "tyro", - "transformers>=4.44.2", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -244,10 +244,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo", + "unsloth_zoo>=2024.11.1", "packaging", "tyro", - "transformers>=4.44.2", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 458c2696bc..5102d8f466 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -27,13 +27,6 @@ # pass # pass -# Check for unsloth_zoo -try: - import unsloth_zoo -except: - raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") -pass - # Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! @@ -60,6 +53,14 @@ # Reduce VRAM usage by reducing fragmentation os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +# Hugging Face Hub faster downloads +if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +pass + +# Log Unsloth is being used +os.environ["UNSLOTH_IS_PRESENT"] = "1" + try: import torch except ModuleNotFoundError: @@ -71,12 +72,6 @@ raise exception pass -# Hugging Face Hub faster downloads (only enable during Colab and Kaggle sessions) -keynames = "\n" + "\n".join(os.environ.keys()) -if "\nCOLAB_" in keynames or "\nKAGGLE_" in keynames: - os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -pass - # We support Pytorch 2 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") @@ -165,6 +160,13 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 pass pass +# Check for unsloth_zoo +try: + import unsloth_zoo +except: + raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") +pass + from .models import * from .save import * from .chat_templates import * diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 3e55332c80..82e7641693 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -14,8 +14,8 @@ from .cross_entropy_loss import ( fast_cross_entropy_loss, - patch_llama_for_causal_lm, - unpatch_llama_for_causal_lm, + post_patch_loss_function, + patch_loss_functions, ) from .rms_layernorm import ( fast_rms_layernorm, @@ -25,7 +25,6 @@ from .layernorm import ( fast_layernorm, patch_layernorm, - unpatch_layernorm, ) from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel @@ -54,8 +53,12 @@ create_flex_attention_sliding_window_mask, ) -try: - print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") -except: - print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") +import os +if "UNSLOTH_ZOO_IS_PRESENT" not in os.environ: + try: + print("🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.") + except: + print("Unsloth: Will patch your computer to enable 2x faster free finetuning.") + pass pass +del os diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f2377d55cc..f0193c74d8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -17,24 +17,31 @@ import torch from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh from transformers.models.llama.modeling_llama import logger +from packaging.version import Version + +from unsloth_zoo.loss_utils import ( + patch_loss_functions as _patch_loss_functions, + post_patch_loss_function, +) @triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), }) @triton.jit def _cross_entropy_forward( - logits_ptr, logits_row_stride, - loss_ptr, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + loss_ptr , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -57,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -71,7 +78,7 @@ def _cross_entropy_forward( # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -92,22 +99,23 @@ def _cross_entropy_forward( @triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr, logits_row_stride, - loss_ptr, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + loss_ptr , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + N_CHUNKS , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -135,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -149,7 +157,7 @@ def _chunked_cross_entropy_forward( # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -174,21 +182,23 @@ def _chunked_cross_entropy_forward( @triton.heuristics({ - "DO_SOFTCAPPING": lambda args: args["DO_SOFTCAPPING" ], - "DO_LOGIT_SCALING": lambda args: args["DO_LOGIT_SCALING"], + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), }) @triton.jit def _cross_entropy_backward( - logits_ptr, logits_row_stride, - dloss_ptr, dloss_row_stride, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + dloss_ptr , + dloss_row_stride , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) @@ -208,7 +218,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE @@ -228,9 +238,10 @@ def _cross_entropy_backward( pass # Do logit softcapping for Gemma 2: t * tanh(1/t * x) + partial = x if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) - partial = triton_tanh(x / SOFTCAP) + partial = triton_tanh(x.to(tl.float32) / SOFTCAP).to(x.dtype) x = SOFTCAP * partial pass @@ -261,16 +272,20 @@ def _cross_entropy_backward( class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod - def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): + def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : float = 0): + n_rows : int + vocab_size : int n_rows, vocab_size = logits.shape div, mod = divmod(vocab_size, MAX_FUSED_SIZE) - n_chunks = div + (mod != 0) + n_chunks : int = div + (mod != 0) losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - DO_SOFTCAPPING = (logit_softcapping != 0) - DO_LOGIT_SCALING = (logit_scaling != 0) + DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) + DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) + BLOCK_SIZE : int + num_warps : int if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) @@ -325,11 +340,13 @@ def forward(ctx, logits, labels, logit_softcapping = 0, logit_scaling = 0): @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors + n_rows : int + vocab_size : int n_rows, vocab_size = logits.shape - BLOCK_SIZE = 4096 + BLOCK_SIZE : int = 4096 div, mod = divmod(vocab_size, BLOCK_SIZE) - n_blocks = div + (mod != 0) + n_blocks : int = div + (mod != 0) _cross_entropy_backward[(n_rows, n_blocks,)]( logits, logits.stride(0), @@ -342,14 +359,13 @@ def backward(ctx, dlosses): SOFTCAP = ctx.logit_softcapping, DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, LOGIT_SCALE = ctx.logit_scaling, - num_warps = 8, + num_warps = 8, ) return logits, None, None, None, pass pass -@torch._disable_dynamo def fast_cross_entropy_loss( logits, labels, @@ -377,96 +393,12 @@ def fast_cross_entropy_loss( n_items = torch.count_nonzero(labels != -100) return loss.sum() / n_items pass - - -from transformers.models.llama.modeling_llama import ( - LlamaForCausalLM, - CausalLMOutputWithPast, - Optional, - Union, - Cache, - List, - Tuple, -) - -# Transformers 4.47 need Unpack, KwargsForCausalLM -try: - from transformers.models.llama.modeling_llama import Unpack, KwargsForCausalLM -except: - pass -pass - -import inspect, re -function = inspect.getsource(LlamaForCausalLM.forward) -function = function.split("\n") -i = re.match(r"[ ]{1,}", function[0]).span(0)[1] -function = [x[i:] for x in function] -function = "\n".join(function) -function = function[function.find("def forward"):] -replacement = """ loss = None - logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - logit_scaling = getattr(self.config, "logit_scale", 0) - if labels is not None: - shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - pass - - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, - logit_softcapping = logit_softcapping, - logit_scaling = logit_scaling, - n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), - ) - else: - if logit_scaling != 0: - if logits.requires_grad: - logits = logit_scaling * logits - else: - logits *= logit_scaling - pass - pass - if logit_softcapping != 0: - if logits.requires_grad: - logits = (1.0 / logit_softcapping) * logits - logits = torch.tanh(logits) - logits = logit_softcapping * logits - else: - logits *= (1.0 / logit_softcapping) - torch.tanh(logits, out = logits) - logits *= logit_softcapping - pass - pass - pass -""" -function = \ - function[:function.find(" loss = None")] + \ - replacement + \ - function[ function.find(" if not return_dict"):] -function = function.replace("logits = logits.float()", "\n") -# Missed spaces -function = function.split("\n") -# Not the first one though! -function = [function[0]] + [" "*4 + x for x in function[1:]] -function = "\n".join(function) -function = f"class Unsloth_LlamaForCausalLM(LlamaForCausalLM):\n"\ -f" {function}\n" -exec(function, globals()) -del function, replacement, inspect, re - - -def patch_llama_for_causal_lm(): - import transformers.models.llama.modeling_llama - transformers.models.llama.modeling_llama.LlamaForCausalLM = Unsloth_LlamaForCausalLM - return +if (Version(torch.__version__) < Version("2.4.0")) and \ + not hasattr(fast_cross_entropy_loss, "__wrapped__"): + fast_cross_entropy_loss = torch._disable_dynamo(fast_cross_entropy_loss) pass - -def unpatch_llama_for_causal_lm(): - import transformers.models.llama.modeling_llama - transformers.models.llama.modeling_llama.LlamaForCausalLM = LlamaForCausalLM - return +# Patch CE Losses in transformers +def patch_loss_functions(): + _patch_loss_functions(fast_cross_entropy_loss) pass diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 48ade6d5ec..a5f7926e2e 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -17,6 +17,9 @@ import triton.language as tl import torch from .utils import calculate_settings +from unsloth_zoo.patching_utils import ( + patch_layernorm, +) @triton.jit @@ -162,27 +165,6 @@ def fast_layernorm(layernorm, X): pass -from torch.nn import LayerNorm -class Unsloth_LayerNorm(LayerNorm): - def forward(self, X): - return fast_layernorm(self, X) - pass -pass - - -def patch_layernorm(): - import torch.nn - torch.nn.LayerNorm = Unsloth_LayerNorm - return -pass - - -def unpatch_layernorm(): - import torch.nn - torch.nn.LayerNorm = LayerNorm - return -pass - def test_layernorm( dim = 1024, eps = 1e-5, dtype = torch.float16, diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 13faf08d6a..4b22f8c3e5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -53,14 +53,14 @@ def _rms_layernorm_forward( pass -@triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) +@triton.heuristics({"GEMMA": lambda args: bool(args["GEMMA"]),}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, - dW, dW_row_stride, + # dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, @@ -130,11 +130,15 @@ def _gemma_rms_layernorm_forward( class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod - def forward(ctx, X, W, eps, gemma = False): + def forward(ctx, X : torch.Tensor, W : torch.Tensor, eps : float, gemma : bool = False): shape = X.shape - dim = shape[-1] + dim : int = shape[-1] X = X.view(-1, dim) + n_rows : int + n_cols : int n_rows, n_cols = X.shape + BLOCK_SIZE : int + num_warps : int BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") @@ -159,20 +163,22 @@ def forward(ctx, X, W, eps, gemma = False): pass @staticmethod - def backward(ctx, dY): + def backward(ctx, dY : torch.Tensor): shape = dY.shape - dim = shape[-1] + dim : int = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors + n_rows : int + n_cols : int n_rows, n_cols = dY.shape - dW = X + # dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), - dW, dW.stride(0), + # dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, @@ -184,9 +190,11 @@ def backward(ctx, dY): pass -def fast_rms_layernorm(layernorm, X, gemma = False): - W = layernorm.weight - eps = layernorm.variance_epsilon if \ +# [TODO] Unsure why RMS Layernorm is not torch.compiling properly +@torch.compiler.disable +def fast_rms_layernorm(layernorm, X : torch.Tensor, gemma : bool = False): + W : torch.Tensor = layernorm.weight + eps : float = layernorm.variance_epsilon if \ hasattr(layernorm, "variance_epsilon") \ else layernorm.eps out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 2934ac41c9..7fe15d0e3b 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -16,9 +16,9 @@ import triton.language as tl import torch from .utils import calculate_settings -ROPE_GROUP_SIZE = 4 +ROPE_GROUP_SIZE : int = 4 -@triton.heuristics({"BACKWARD_PASS": lambda args: args["BACKWARD_PASS"],}) +@triton.heuristics({"BACKWARD_PASS": lambda args: bool(args["BACKWARD_PASS"]),}) @triton.jit def _rope_embedding( Q, Q_row_stride, @@ -75,8 +75,14 @@ class Fast_RoPE_Embedding(torch.autograd.Function): @staticmethod def forward(ctx, Q, cos, sin): cos, sin = cos.squeeze(), sin.squeeze() + batch : int + seq_len : int + n_heads : int + head_dim : int batch, seq_len, n_heads, head_dim = Q.shape Q = Q.view(batch*seq_len, n_heads*head_dim) + n_rows : int + n_cols : int n_rows, n_cols = Q.shape assert(seq_len <= cos.shape[0]) @@ -85,8 +91,10 @@ def forward(ctx, Q, cos, sin): BLOCK_SIZE, num_warps = calculate_settings(head_dim//2) # (head_dim//2) # group_size = 4 # 4 or 8, too large group_size can hurt performance. + div : int + mod : int div, mod = divmod(n_heads, ROPE_GROUP_SIZE) - n_groups = div + (mod != 0) + n_groups : int = div + (mod != 0) _rope_embedding[(n_rows, n_groups, )]( Q, Q.stride(0), @@ -108,9 +116,15 @@ def forward(ctx, Q, cos, sin): @staticmethod def backward(ctx, dY): + batch : int + seq_len : int + n_heads : int + head_dim : int batch, seq_len, n_heads, head_dim = dY.shape dY = dY.reshape(batch*seq_len, n_heads*head_dim) # Must be reshape not view + n_rows : int + n_cols : int n_rows, n_cols = dY.shape cos = ctx.cos @@ -130,7 +144,8 @@ def backward(ctx, dY): pass pass - +# [TODO] Unsure why RoPE Embedding is not torch.compiling properly +@torch.compiler.disable def fast_rope_embedding(Q, K, cos, sin): Q = Fast_RoPE_Embedding.apply(Q.transpose(1, 2), cos, sin).transpose(1, 2) K = Fast_RoPE_Embedding.apply(K.transpose(1, 2), cos, sin).transpose(1, 2) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index a8c20c75a4..b394d122fd 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -13,7 +13,7 @@ # limitations under the License. import triton -MAX_FUSED_SIZE = 65536 +MAX_FUSED_SIZE : int = 65536 next_power_of_2 = triton.next_power_of_2 # torch.cuda.amp.custom_fwd is deprecated >= 2.4 @@ -40,12 +40,12 @@ pass -def calculate_settings(n): - BLOCK_SIZE = next_power_of_2(n) +def calculate_settings(n : int) -> (int, int,): + BLOCK_SIZE : int = next_power_of_2(n) if BLOCK_SIZE > MAX_FUSED_SIZE: raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\ f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") - num_warps = 4 + num_warps : int = 4 if BLOCK_SIZE >= 32768: num_warps = 32 elif BLOCK_SIZE >= 8192: num_warps = 16 elif BLOCK_SIZE >= 2048: num_warps = 8 diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 51c63fc7dd..94cf1b74e0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.10.7" +__version__ = "2024.11.1" __all__ = [ "prepare_model_for_kbit_training", @@ -41,9 +41,17 @@ "torch_amp_custom_bwd", "accelerate_old_send_to_device", "accelerate_new_send_to_device", + "patch_gradient_accumulation_fix", + "patch_compiling_bitsandbytes", + "patch_regional_compilation", + "patch_layernorm", + "patch_torch_compile", + "patch_model_and_tokenizer", + + "patch_unsloth_gradient_checkpointing", + "unpatch_unsloth_gradient_checkpointing", "patch_gradient_checkpointing", "unpatch_gradient_checkpointing", - "patch_gradient_accumulation_fix", ] import torch @@ -54,6 +62,28 @@ import warnings, subprocess, re, inspect, psutil, os, math from packaging.version import Version +from unsloth_zoo.tokenizer_utils import ( + patch_tokenizer as _patch_tokenizer, +) +from unsloth_zoo.patching_utils import ( + patch_compiling_bitsandbytes, + patch_layernorm, + patch_torch_compile, + patch_regional_compilation, + patch_model_and_tokenizer, +) +from unsloth_zoo.gradient_checkpointing import ( + Unsloth_Offloaded_Gradient_Checkpointer, + unsloth_offloaded_gradient_checkpoint, + patch_unsloth_gradient_checkpointing, + unpatch_unsloth_gradient_checkpointing, + + Unsloth_Gradient_Checkpointer, + unsloth_gradient_checkpoint, + patch_gradient_checkpointing, + unpatch_gradient_checkpointing, +) + # ============================================= # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") @@ -70,6 +100,18 @@ # Stop "Special tokens have been added in the vocabulary, ..." import logging logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.CRITICAL+1) + +# Ignore logging messages +class HideLoggingMessage(logging.Filter): + def __init__(self, text): self.text = text + def filter(self, x): return not x.getMessage().startswith(self.text) +pass + +# The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. +from transformers.training_args import logger as transformers_training_args_logger +transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups")) +del transformers_training_args_logger + # ============================================= # ============================================= @@ -129,7 +171,6 @@ def patch_mistral_nemo_config(config): # ============================================= # torch.cuda.amp.custom_fwd is deprecated >= 2.4 -import torch torch_version = torch.__version__ if Version(torch_version) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd @@ -333,7 +374,8 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings - +UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ +UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -345,47 +387,27 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu +patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = UNSLOTH_COMPILE_MAXIMUM) - -# Torch compile arguments -torch_compile_arguments = [ - "config.dce = True", - "config.memory_planning = True", - "config.memory_pool = 'combined'", - "config.coordinate_descent_tuning = True", - "config.max_autotune_gemm = False", # GEMM is unnecessary - "config.autotune_multi_device = False", - "config.max_autotune_gemm_backends = 'TRITON,ATEN,CPP'", # Not much faster - "config.aggressive_fusion = False", # Careful changes results! - "config.cuda.enable_cuda_lto = True", - "config.cuda.use_fast_math = True", - "config.cuda.compile_opt_level = '-O2'", -] -# Torch dynamo arguments -torch_dynamo_arguments = [ - "config.accumulated_cache_size_limit = 1024", # Bump up a bit from 256 - "config.suppress_errors = True", # Supress errors for now - "config.do_not_emit_runtime_asserts = True", - "config.cache_size_limit = 1024", # Flex Attention - "config.inline_inbuilt_nn_modules = True", # Torch 2.5 Regional recompilation -] -import torch._inductor.config as config -for _try_compile_argument in torch_compile_arguments: - try: exec(_try_compile_argument) - except: pass -pass -import torch._dynamo.config as config -for _try_dynamo_argument in torch_dynamo_arguments: - try: exec(_try_dynamo_argument) - except: pass -pass torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, "shape_padding" : True, - "trace.enabled" : False, # Output Triton kernel outputs! + "trace.enabled" : UNSLOTH_COMPILE_DEBUG, "triton.cudagraphs" : False, } + +import accelerate +def torch_compile_kwargs(*args, **kwargs): + print("Unsloth: Enabled auto compiling") + return {"dynamic" : True, "fullgraph" : False, "options" : torch_compile_options,} +pass + +accelerate.utils.dataclasses.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +accelerate.utils.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +accelerate.accelerator.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs +del accelerate + # ============================================= def prepare_model_for_kbit_training( @@ -455,137 +477,6 @@ def make_inputs_require_grad(module, input, output): return model pass - -def patch_tokenizer(model, tokenizer): - """ - Phi3's pad_token isn't set. We set it to <|placeholder... - Llama-3 is <|reserved... - Llama-2 is - Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! - Fixes https://github.com/unslothai/unsloth/issues/5 - """ - possible_reserved_tokens = ( - "<|finetune_right_pad_id|>", # Llama-3.1 - "", # Mistral Nemo - "<|reserved", # Llama-3 - "<|placeholder", # Phi-3 - "[control", # Mistral type models - ) - joiner = "\1\0=+=\0\1" - number_repetitions = 3 - 1 # Number of reserved tokens needed - - if model is not None: - model.config.update({"unsloth_version" : __version__}) - - bad_pad_token = False - if hasattr(tokenizer, "pad_token") and tokenizer.pad_token is not None: - # Check if pad_token is not the same as eos_token otherwise the loss will ignore it!! - bad_pad_token = tokenizer.eos_token == tokenizer.pad_token - elif hasattr(tokenizer, "pad_token") and tokenizer.pad_token is None: - bad_pad_token = True - else: - bad_pad_token = False - pass - - if bad_pad_token: - # Find a better pad token - added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] - all_added_tokens = joiner.join(added_tokens[::-1]) - all_added_tokens += joiner - - final_pad_token = None - final_good_match = False - - for possible_reserved_token in possible_reserved_tokens: - possible_reserved_token = re.escape(possible_reserved_token) - found = re.finditer(f"{possible_reserved_token}", all_added_tokens) - first_match = None - good_match = False - for j, x in enumerate(found): - if j == 0: first_match = x - if j >= number_repetitions: - good_match = True - break - pass - pass - - if first_match is None: continue - - # If it ends with |> or > etc, then set it as a good pad token! - start = first_match.span(0)[0] - possible_pad_token = first_match.group(0) - end = all_added_tokens.find(joiner, start) - first_match = all_added_tokens[start:end] - - if first_match is not None: - good_match = possible_pad_token.endswith((">", "|>", "]", ")")) - pass - possible_pad_token = first_match - - # Replace current pad token if another exact match is found - if not final_good_match and good_match: - final_good_match = True - final_pad_token = possible_pad_token - break - else: - final_good_match = False - final_pad_token = possible_pad_token - pass - pass - possible_pad_token = final_pad_token - - # Try unk_token - if possible_pad_token is None and hasattr(tokenizer, "unk_token"): - possible_pad_token = tokenizer.unk_token - pass - - # Check pad token's id must be less than vocab size - if possible_pad_token is not None: - check_pad_token = tokenizer(possible_pad_token, add_special_tokens = False).input_ids - if len(check_pad_token) != 1: - possible_pad_token = None - if model is not None and check_pad_token[0] >= model.config.vocab_size: - possible_pad_token = None - pass - - if possible_pad_token is None: - # Failure to find a good replacement!! We shall manually add one! - new_pad_token = "<|PAD_TOKEN|>" - while new_pad_token in tokenizer.get_vocab(): - new_pad_token = f"<{new_pad_token}>" - pass - possible_pad_token = new_pad_token - pass - - name = model.config._name_or_path if model is not None else "Model" - logger.warning_once( - f"{name} does not have a padding token! Will use pad_token = {possible_pad_token}." - ) - - # Edit pad_token - tokenizer.add_special_tokens({"pad_token" : possible_pad_token}) - tokenizer.pad_token = possible_pad_token - if model is not None: - model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - if getattr(model, "generation_config") is not None: - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) - else: - if model is not None: - if model.config.pad_token_id is None: - model.config.update({"pad_token_id" : tokenizer.pad_token_id}) - if getattr(model, "generation_config") is not None: - model.generation_config.update(pad_token_id = tokenizer.pad_token_id) - pass - pass - - if model is not None: - if getattr(model, "generation_config") is not None: - model.generation_config.update(max_length = model.config.max_position_embeddings) - - return model, tokenizer -pass - - # ============================================= # Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? # For mixed precision, we need it to be in float32 not float16. @@ -618,6 +509,7 @@ def patch_tokenizer(model, tokenizer): ) pass pass + # ============================================= import psutil @@ -678,7 +570,9 @@ def get_statistics(): # We log some basic stats about which environment is being used. # We simply download a README.md file from HF - all data is made public. # This is simply so we can check if some envs are broken or not. - # You can disable this by commenting the below out + # You can disable this by setting UNSLOTH_DISABLE_STATISTICS + import os + if "UNSLOTH_DISABLE_STATISTICS" in os.environ: return from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled disabled = False if not are_progress_bars_disabled(): @@ -710,139 +604,6 @@ def get_statistics(): pass -def _calculate_n_gradient_checkpoints( - n_layers : int, - method : Optional[Union[str, int]] = "sqrt", -) -> List[int]: - assert(type(n_layers) is int and n_layers > 0) - - if method is None: method = "sqrt" - - if method == "sqrt": - n_checkpoints = int(n_layers**0.5) - elif type(method) is int and method > 0: - n_checkpoints = int(np.ceil(n_layers / method)) - else: - raise ValueError("method must be 'sqrt' or an int >0 and <= n_layers.") - - size = n_layers // n_checkpoints - sizes = np.full(n_checkpoints, size, dtype = int) - leftovers = n_layers % n_checkpoints - # We append leftovers from the right - for k in range(leftovers): - sizes[n_checkpoints-1-k] += 1 - boundaries = np.hstack((0, np.cumsum(sizes))) - boundaries = boundaries.tolist() - return boundaries -pass - - -def calculate_n_gradient_checkpoints( - n_layers : int, - layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", -) -> List[int]: - assert(type(n_layers) is int and n_layers > 0) - - if layers_per_checkpoint is None or layers_per_checkpoint == 1: - return None - - boundaries = _calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) - - assert(boundaries[0] == 0 and boundaries[-1] == n_layers) - assert(min(boundaries) == 0 and max(boundaries) == n_layers) - assert(np.diff(boundaries).min() >= 0) - return boundaries -pass - - -def prepare_n_gradient_checkpoints( - model : Any, - layers_per_checkpoint : Optional[Union[str, int]] = "sqrt", - use_reentrant : Optional[bool] = True, -) -> None: - """ - Calculates where to place the gradient checkpoints given n_layers. - - Args: - model: Any LlamaModel with layers. - layers_per_checkpoint (`Union[str, int]`, *optional*): - Can either be `sqrt` or an integer for how many layers per checkpoint you want. - The more, the less memory usage, but can be slower. Default is `sqrt`. - Choose 1 for Pytorch gradient checkpointing. 2 to wrap 2 layers in 1 module etc. - use_reentrant (`bool`, *optional*): - https://github.com/pytorch/pytorch/blob/main/torch/utils/checkpoint.py#L354 - Optimal gradient checkpointing algorithm `use_reentrant=False` which will - be the default in future Pytorch versions doesn't seem to work?? - """ - _model = None - if hasattr(model, "layers"): - _model = model - elif hasattr(model, "model"): - if hasattr(model.model, "layers"): - _model = model.model - if _model is None: - raise TypeError("`model` or `model.model` does not have attribute `layers`. Are you sure this is a model?") - pass - - if use_reentrant is False: - use_reentrant = True - pass - - n_layers = len(_model.layers) - boundaries = calculate_n_gradient_checkpoints(n_layers, layers_per_checkpoint) - _model._gradient_checkpointing_boundaries = boundaries - _model._gradient_checkpointing_use_reentrant = use_reentrant -pass - - -class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): - """ - Saves VRAM by smartly offloading to RAM. - Tiny hit to performance, since we mask the movement via non blocking calls. - """ - @staticmethod - @torch_amp_custom_fwd - def forward(ctx, forward_function, hidden_states, *args): - saved_hidden_states = hidden_states.to("cpu", non_blocking = True) - with torch.no_grad(): - output = forward_function(hidden_states, *args) - ctx.save_for_backward(saved_hidden_states) - ctx.forward_function = forward_function - ctx.args = args - return output - pass - - @staticmethod - @torch_amp_custom_bwd - def backward(ctx, dY): - (hidden_states,) = ctx.saved_tensors - hidden_states = hidden_states.to("cuda:0", non_blocking = True).detach() - hidden_states.requires_grad_(True) - with torch.enable_grad(): - (output,) = ctx.forward_function(hidden_states, *ctx.args) - torch.autograd.backward(output, dY) - return (None, hidden_states.grad,) + (None,)*len(ctx.args) - pass -pass - - -@torch._disable_dynamo -def unsloth_offloaded_gradient_checkpoint(function, *args, use_reentrant = None, **kwargs): - return Unsloth_Offloaded_Gradient_Checkpointer.apply(function, *args) -pass - - -import torch.utils -old_checkpoint = torch.utils.checkpoint -def patch_gradient_checkpointing(): - torch.utils.checkpoint = unsloth_offloaded_gradient_checkpoint -pass - -def unpatch_gradient_checkpointing(): - torch.utils.checkpoint = old_checkpoint -pass - - # ============================================= # Fixes Bitsandbytes to remove missing warnings from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod @@ -1189,6 +950,7 @@ def patch_gradient_accumulation_fix(Trainer): # Fixes gradient accumulation import inspect if hasattr(Trainer, "get_batch_samples"): + if Trainer.get_batch_samples.__name__ == "_unsloth_get_batch_samples": return if \ not inspect.getsource(Trainer.get_batch_samples).strip()\ .endswith("return batch_samples, num_items_in_batch"): @@ -1215,6 +977,7 @@ def patch_gradient_accumulation_fix(Trainer): pass # Also fix up loss scaling ie negate loss *= self.args.gradient_accumulation_steps + if Trainer.training_step.__name__ == "_unsloth_training_step": return if "num_items_in_batch" not in inspect.signature(Trainer.training_step).parameters: return function = inspect.getsource(Trainer.training_step) @@ -1243,3 +1006,11 @@ def patch_gradient_accumulation_fix(Trainer): exec(function, globals()) Trainer.training_step = _unsloth_training_step pass + + +def patch_tokenizer(model, tokenizer): + model, tokenizer = _patch_tokenizer(model, tokenizer) + if model is not None: + model.config.update({"unsloth_version" : __version__}) + return model, tokenizer +pass diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 45f14c1131..1d9a0c1334 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -339,60 +339,9 @@ def pre_patch(): @staticmethod - def post_patch(model): - # Patch model for Gemma - layers = model.model.layers - - # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2.2 - model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.lm_head.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - - # Gemma has tied weights! This means lm_head == embed_tokens - if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.model.embed_tokens.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - pass - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - # RoPE must be done in float32 for Gemma - # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - # and (module.cos_cached.dtype != correct_dtype): - - # module.cos_cached = module.cos_cached.to(correct_dtype) - # module.sin_cached = module.sin_cached.to(correct_dtype) - # pass - # pass - pass + def post_patch(model, tokenizer): + # Gemma does not downcast RoPE + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = False) # Add 1 to weight # return output * (1 + self.weight) @@ -425,6 +374,6 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass pass diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index bf40ea8a27..4eb9d64313 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -490,60 +490,9 @@ def pre_patch(): @staticmethod - def post_patch(model): - # Patch model for Gemma - layers = model.model.layers - - # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2.2 - model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.lm_head.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - - # Gemma has tied weights! This means lm_head == embed_tokens - if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.model.embed_tokens.weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - pass - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - # RoPE must be done in float32 for Gemma - # if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")) \ - # and (module.cos_cached.dtype != correct_dtype): - - # module.cos_cached = module.cos_cached.to(correct_dtype) - # module.sin_cached = module.sin_cached.to(correct_dtype) - # pass - # pass - pass + def post_patch(model, tokenizer): + # Gemma does not downcast RoPE + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = False) # Add 1 to weight # return output * (1 + self.weight) @@ -576,6 +525,6 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cf05d432c7..3c4d8f3b38 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -57,8 +57,6 @@ from transformers import set_seed as transformers_set_seed from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model from peft import PeftModelForCausalLM -from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit -from peft.tuners.lora import Linear4bit as Peft_Linear4bit from ..save import patch_saving_functions import re, os, inspect, math, sys try: @@ -1518,6 +1516,7 @@ def from_pretrained( pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking @@ -1621,7 +1620,7 @@ def from_pretrained( ) model, tokenizer = patch_tokenizer(model, tokenizer) - model = model_patcher.post_patch(model) + model, tokenizer = model_patcher.post_patch(model, tokenizer) # Patch up QKV / O and MLP for idx, layer in enumerate(model.model.layers): @@ -1797,93 +1796,15 @@ def from_pretrained( internal_model = internal_model.model pass internal_model._saved_temp_tokenizer = tokenizer - - # Also fix torch_dtype - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass return model, tokenizer pass @staticmethod - def post_patch(model): - # Patch model - layers = model.model.layers - - # Torch.compile fails on embedding matrix?? - # Workaround randomnly fixes it for torch versions < 2. - model.set_input_embeddings(torch.nn.Embedding.from_pretrained(model.get_input_embeddings().weight)) - model.config.update({"unsloth_version" : __version__}) - - # We also do this for the lm_head - lm_head = torch.nn.Linear(1, 1, bias = None) - del lm_head.weight - lm_head.weight = model.get_output_embeddings().weight - lm_head.in_features = lm_head.weight.shape[1] - lm_head.out_features = lm_head.weight.shape[0] - model.lm_head = lm_head - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - # Downcast RoPE embedding to correct data type - if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")): - - if hasattr(module, "cos_cached") and \ - (module.cos_cached.dtype != correct_dtype): - - module.cos_cached = module.cos_cached.to(correct_dtype) - module.sin_cached = module.sin_cached.to(correct_dtype) - - elif hasattr(module, "short_cos_cached") and \ - (module.short_cos_cached.dtype != correct_dtype): - - module.short_cos_cached = module.short_cos_cached.to(correct_dtype) - module.short_sin_cached = module.short_sin_cached.to(correct_dtype) - pass - pass - pass - - # Clear deleted GPU items - for _ in range(3): - gc.collect() - torch.cuda.empty_cache() - return model + def post_patch(model, tokenizer): + model, tokenizer = patch_model_and_tokenizer(model, tokenizer, downcast_rope = True) + return model, tokenizer pass @@ -1910,6 +1831,11 @@ def get_peft_model( ): transformers_set_seed(random_state) + if type(r) is not int: + raise TypeError(f"Unsloth: Rank of {str(r)} must be an integer.") + if r <= 0: + raise TypeError(f"Unsloth: Rank of {str(r)} must be larger than 0.") + if isinstance(model, PeftModelForCausalLM): # Check if exactly the same and then pass through! assert(hasattr(model, "peft_config")) diff --git a/unsloth/save.py b/unsloth/save.py index ccda79aeee..b4c6b499cf 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -49,6 +49,7 @@ keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +KAGGLE_TMP = "/tmp" del keynames # Weights @@ -447,13 +448,20 @@ def unsloth_save_model( if push_to_hub and "/" in save_directory: # +1 solves absolute path issues - username = save_directory[:save_directory.find("/")] - new_save_directory = save_directory[save_directory.find("/")+1:] - - logger.warning_once( - f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"\ - f"We shall truncate {save_directory} to {new_save_directory}" - ) + new_save_directory = save_directory + username = new_save_directory[:new_save_directory.find("/")] + new_save_directory = new_save_directory[new_save_directory.find("/")+1:] + if IS_KAGGLE_ENVIRONMENT: + new_save_directory = os.path.join(KAGGLE_TMP, new_save_directory[new_save_directory.find("/")+1:]) + logger.warning_once( + "Unsloth: You are pushing to hub in Kaggle environment.\n"\ + f"To save memory, we shall move {save_directory} to {new_save_directory}" + ) + else: + logger.warning_once( + f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"\ + f"We shall truncate {save_directory} to {new_save_directory}" + ) save_pretrained_settings["save_directory"] = new_save_directory tokenizer_save_settings ["save_directory"] = new_save_directory @@ -507,6 +515,10 @@ def unsloth_save_model( f"{round(max_ram/1024/1024/1024, 2)} out of "\ f"{round(psutil.virtual_memory().total/1024/1024/1024, 2)} RAM for saving.") + # Move temporary_location to /tmp in Kaggle + if IS_KAGGLE_ENVIRONMENT: + temporary_location = os.path.join(KAGGLE_TMP, temporary_location) + # Max directory for disk saving if not os.path.exists(temporary_location): os.makedirs(temporary_location) @@ -708,7 +720,7 @@ def unsloth_save_model( print("Done.") if push_to_hub and hasattr(model, "config"): - print(f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/')}") + print(f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/').split('/')[-1]}") pass save_pretrained_settings["state_dict"] = None @@ -1108,14 +1120,17 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): if IS_KAGGLE_ENVIRONMENT: - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You are in a Kaggle environment, which might be the reason this is failing.\n"\ - "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ - "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ - "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ - "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." - ) + if not Path(final_location).resolve().is_relative_to(Path('/tmp').resolve()): + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space in the working directory.\n"\ + "Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "You can try saving it to the `/tmp` directory for larger disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) else: raise RuntimeError( f"Unsloth: Quantization failed for {final_location}\n"\ @@ -1156,14 +1171,17 @@ def save_to_gguf( # Check if quantization succeeded! if not os.path.isfile(final_location): if IS_KAGGLE_ENVIRONMENT: - raise RuntimeError( - f"Unsloth: Quantization failed for {final_location}\n"\ - "You are in a Kaggle environment, which might be the reason this is failing.\n"\ - "Kaggle only provides 20GB of disk space. Merging to 16bit for 7b models use 16GB of space.\n"\ - "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ - "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ - "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." - ) + if not Path(final_location).resolve().is_relative_to(Path('/tmp').resolve()): + raise RuntimeError( + f"Unsloth: Quantization failed for {final_location}\n"\ + "You are in a Kaggle environment, which might be the reason this is failing.\n"\ + "Kaggle only provides 20GB of disk space in the working directory.\n"\ + "Merging to 16bit for 7b models use 16GB of space.\n"\ + "This means using `model.{save_pretrained/push_to_hub}_merged` works, but\n"\ + "`model.{save_pretrained/push_to_hub}_gguf will use too much disk space.\n"\ + "You can try saving it to the `/tmp` directory for larger disk space.\n"\ + "I suggest you to save the 16bit model first, then use manual llama.cpp conversion." + ) else: raise RuntimeError( "Unsloth: Quantization failed! You might have to compile llama.cpp yourself, then run this again.\n"\ diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index c05485f902..c639dbf1a0 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -64,6 +64,7 @@ keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames +KAGGLE_TMP = "/tmp" del keynames @@ -470,8 +471,12 @@ def _load_correct_tokenizer( cache_dir = "huggingface_tokenizers_cache", fix_tokenizer = True, ): - if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT: + if IS_COLAB_ENVIRONMENT: cache_dir = cache_dir + elif IS_KAGGLE_ENVIRONMENT: + # /tmp of Kaggle seems has a 80GB limit! + # Let's utilize them + cache_dir = os.path.join(KAGGLE_TMP, cache_dir) else: cache_dir = None pass From eb4da9d8cd1aeb524e4b83b168c8977834ba1ad5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 13:35:43 -0800 Subject: [PATCH 0698/1088] Update _utils.py --- unsloth/models/_utils.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 94cf1b74e0..b5c17434c7 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -88,8 +88,9 @@ # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "xformers") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") @@ -374,8 +375,7 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ + # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -387,7 +387,14 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = UNSLOTH_COMPILE_MAXIMUM) + + +UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" +UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" +patch_torch_compile( + debug = UNSLOTH_COMPILE_DEBUG, + O3 = UNSLOTH_COMPILE_MAXIMUM, +) torch_compile_options = { "epilogue_fusion" : True, From da397f4f184039a4de1a2af902e213c73653f33b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 13:52:12 -0800 Subject: [PATCH 0699/1088] Update llama.py --- unsloth/models/llama.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3c4d8f3b38..c4488127d9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -390,7 +390,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if (not HAS_FLASH_ATTENTION and attention_mask is None): + if False:#(not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -427,10 +427,10 @@ def LlamaAttention_fast_forward( pass # Must be contiguous or else results are False! # https://github.com/pytorch/pytorch/issues/112577 - Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + # Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) + A = scaled_dot_product_attention(Q, K, V, is_causal = True) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass @@ -1028,7 +1028,6 @@ def _CausalLM_fast_forward( pass -@torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, input_ids=None, From 70b65cf7a25dd6e73df704ffe040711826bbb2d1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 14:40:57 -0800 Subject: [PATCH 0700/1088] CE Loss --- unsloth/kernels/cross_entropy_loss.py | 10 ++++++---- unsloth/models/_utils.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f0193c74d8..13d90baafe 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -181,10 +181,10 @@ def _chunked_cross_entropy_forward( pass -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), - "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), -}) +# @triton.heuristics({ +# "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), +# "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), +# }) @triton.jit def _cross_entropy_backward( logits_ptr , @@ -345,6 +345,8 @@ def backward(ctx, dlosses): n_rows, vocab_size = logits.shape BLOCK_SIZE : int = 4096 + div : int + mod : int div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks : int = div + (mod != 0) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b5c17434c7..bb004d588d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -393,7 +393,7 @@ def is_big_gpu(index): UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" patch_torch_compile( debug = UNSLOTH_COMPILE_DEBUG, - O3 = UNSLOTH_COMPILE_MAXIMUM, + O3 = UNSLOTH_COMPILE_MAXIMUM, ) torch_compile_options = { From aeec57e110994142ac3d64bc4a74e077c2055565 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 14:43:49 -0800 Subject: [PATCH 0701/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 13d90baafe..e5136f8412 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -181,10 +181,10 @@ def _chunked_cross_entropy_forward( pass -# @triton.heuristics({ -# "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), -# "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), -# }) +@triton.heuristics({ + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), +}) @triton.jit def _cross_entropy_backward( logits_ptr , @@ -337,6 +337,8 @@ def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : return losses pass + + @torch.compiler.disable @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors From fb393fc9baa84794b95becad0a030e8e16b3d35a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 14:54:07 -0800 Subject: [PATCH 0702/1088] Update _utils.py --- unsloth/models/_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bb004d588d..95ea381f00 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -389,11 +389,13 @@ def is_big_gpu(index): torch._inductor.utils.is_big_gpu = is_big_gpu -UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" -UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" +UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" +UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" +UNSLOTH_COMPILE_IGNORE_ERRORS = os.environ.get("UNSLOTH_COMPILE_IGNORE_ERRORS", "0") == "1" patch_torch_compile( - debug = UNSLOTH_COMPILE_DEBUG, - O3 = UNSLOTH_COMPILE_MAXIMUM, + debug = UNSLOTH_COMPILE_DEBUG, + O3 = UNSLOTH_COMPILE_MAXIMUM, + ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, ) torch_compile_options = { From cab1e722d68d3255c4a46c5790411047e34e2993 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 20:29:30 -0800 Subject: [PATCH 0703/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 28 +++++++++++++-------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index e5136f8412..bb8f002ac8 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -104,18 +104,17 @@ def _cross_entropy_forward( }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr , - logits_row_stride , - loss_ptr , - logsumexp_ptr , - labels_ptr , - VOCAB_SIZE , - N_CHUNKS , - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + logits_ptr, logits_row_stride, + loss_ptr, + logsumexp_ptr, + labels_ptr, + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING: tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -143,7 +142,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * logits_row_stride.to(tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -157,7 +156,7 @@ def _chunked_cross_entropy_forward( # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) logits = logits.to(tl.float32) c = tl.max(logits, 0) @@ -338,7 +337,6 @@ def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : pass - @torch.compiler.disable @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors From 51fea97dec9e94350389d3b71a5932c0ad6c1564 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 20:58:15 -0800 Subject: [PATCH 0704/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 28 +++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index bb8f002ac8..64825bac5c 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -104,17 +104,18 @@ def _cross_entropy_forward( }) @triton.jit def _chunked_cross_entropy_forward( - logits_ptr, logits_row_stride, - loss_ptr, - logsumexp_ptr, - labels_ptr, - VOCAB_SIZE : tl.constexpr, - N_CHUNKS : tl.constexpr, - BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING : tl.constexpr, - SOFTCAP : tl.constexpr, - DO_LOGIT_SCALING: tl.constexpr, - LOGIT_SCALE : tl.constexpr, + logits_ptr , + logits_row_stride , + loss_ptr , + logsumexp_ptr , + labels_ptr , + VOCAB_SIZE , + N_CHUNKS , + BLOCK_SIZE : tl.constexpr, + DO_SOFTCAPPING , + SOFTCAP , + DO_LOGIT_SCALING , + LOGIT_SCALE , ): """ 256K vocab divided in 4 chunks @@ -142,7 +143,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * logits_row_stride.to(tl.int64) + logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -151,14 +152,13 @@ def _chunked_cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) - logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) From 58e541bd910e28ee617bc6dd044dcbfee400e8be Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 21:01:33 -0800 Subject: [PATCH 0705/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 64825bac5c..0c07035097 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -73,14 +73,13 @@ def _cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) - - logits = logits.to(tl.float32) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -228,7 +227,7 @@ def _cross_entropy_backward( else: dloss = 0.0 - x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Do logit scaling for Cohere if DO_LOGIT_SCALING: @@ -240,12 +239,12 @@ def _cross_entropy_backward( partial = x if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) - partial = triton_tanh(x.to(tl.float32) / SOFTCAP).to(x.dtype) + partial = triton_tanh(x / SOFTCAP) x = SOFTCAP * partial pass logsumexp = tl.load(logsumexp_ptr + row_idx) - y = tl.exp(x.to(tl.float32) - logsumexp) + y = tl.exp(x - logsumexp) y = tl.where( col_offsets == label_idx, y - 1.0, # exp(x - logsumexp) - 1 From ef2c56f3da1508bb2753f164d35b853e67a81c4f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 21:07:02 -0800 Subject: [PATCH 0706/1088] Update llama.py --- unsloth/models/llama.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c4488127d9..3c4d8f3b38 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -390,7 +390,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if False:#(not HAS_FLASH_ATTENTION and attention_mask is None): + if (not HAS_FLASH_ATTENTION and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -427,10 +427,10 @@ def LlamaAttention_fast_forward( pass # Must be contiguous or else results are False! # https://github.com/pytorch/pytorch/issues/112577 - # Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, is_causal = True) + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass @@ -1028,6 +1028,7 @@ def _CausalLM_fast_forward( pass +@torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, input_ids=None, From 3ea7044fc0770499abc2ddc19b85e1962fbbaea0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 21:08:11 -0800 Subject: [PATCH 0707/1088] Bug fix (#1249) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 --- unsloth/kernels/cross_entropy_loss.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f0193c74d8..0c07035097 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -73,14 +73,13 @@ def _cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) - - logits = logits.to(tl.float32) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) + c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -152,14 +151,13 @@ def _chunked_cross_entropy_forward( mask = col_offsets < VOCAB_SIZE label_idx = tl.load(labels_ptr).to(tl.int32) - logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: logits = LOGIT_SCALE * logits # Do logit softcapping for Gemma 2: t * tanh(1/t * x) - if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits.to(tl.float32) / SOFTCAP).to(logits.dtype) + if DO_SOFTCAPPING: logits = SOFTCAP * triton_tanh(logits / SOFTCAP) - logits = logits.to(tl.float32) c = tl.max(logits, 0) logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) @@ -229,7 +227,7 @@ def _cross_entropy_backward( else: dloss = 0.0 - x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")) + x = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) # Do logit scaling for Cohere if DO_LOGIT_SCALING: @@ -241,12 +239,12 @@ def _cross_entropy_backward( partial = x if DO_SOFTCAPPING: # d/dx [t * tanh(1/t * x)] = 1 - tanh^2(1/t * x) - partial = triton_tanh(x.to(tl.float32) / SOFTCAP).to(x.dtype) + partial = triton_tanh(x / SOFTCAP) x = SOFTCAP * partial pass logsumexp = tl.load(logsumexp_ptr + row_idx) - y = tl.exp(x.to(tl.float32) - logsumexp) + y = tl.exp(x - logsumexp) y = tl.where( col_offsets == label_idx, y - 1.0, # exp(x - logsumexp) - 1 @@ -337,6 +335,7 @@ def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : return losses pass + @staticmethod def backward(ctx, dlosses): logits, logsumexp, labels = ctx.saved_tensors @@ -345,6 +344,8 @@ def backward(ctx, dlosses): n_rows, vocab_size = logits.shape BLOCK_SIZE : int = 4096 + div : int + mod : int div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks : int = div + (mod != 0) From 13d7412bbfa82b4c2058da1b5a5f452dc868aa60 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 21:24:56 -0800 Subject: [PATCH 0708/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 94cf1b74e0..e1a90993ed 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.1" +__version__ = "2024.11.3" __all__ = [ "prepare_model_for_kbit_training", From 5a7eaf8a60d6bf187304f10eb0ebdc5f5d2814e2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 21:44:38 -0800 Subject: [PATCH 0709/1088] Update _utils.py --- unsloth/models/_utils.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e1a90993ed..b105ea7494 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -88,8 +88,9 @@ # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "xformers") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") @@ -374,8 +375,9 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" +UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" +UNSLOTH_COMPILE_IGNORE_ERRORS = os.environ.get("UNSLOTH_COMPILE_IGNORE_ERRORS", "0") == "1" # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -387,7 +389,11 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = UNSLOTH_COMPILE_MAXIMUM) +patch_torch_compile( + debug = UNSLOTH_COMPILE_DEBUG, + O3 = UNSLOTH_COMPILE_MAXIMUM, + ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, +) torch_compile_options = { "epilogue_fusion" : True, From d2186ed3c83d7aab612cad55a3c23201a26e16f1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 5 Nov 2024 22:48:36 -0800 Subject: [PATCH 0710/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b105ea7494..4a9b8847ad 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -396,8 +396,8 @@ def is_big_gpu(index): ) torch_compile_options = { - "epilogue_fusion" : True, - "max_autotune" : True, + "epilogue_fusion" : False, + "max_autotune" : False, "shape_padding" : True, "trace.enabled" : UNSLOTH_COMPILE_DEBUG, "triton.cudagraphs" : False, From 6434447f25e63cfdc5afa5bc7dbfe742aed1c2cc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 00:18:04 -0800 Subject: [PATCH 0711/1088] Update _utils.py --- unsloth/models/_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4a9b8847ad..cdc2cd45fd 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -377,7 +377,7 @@ def _is_openai_available(): return False # Torch compile settings UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" -UNSLOTH_COMPILE_IGNORE_ERRORS = os.environ.get("UNSLOTH_COMPILE_IGNORE_ERRORS", "0") == "1" +UNSLOTH_COMPILE_IGNORE_ERRORS = os.environ.get("UNSLOTH_COMPILE_IGNORE_ERRORS", "1") == "1" # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -396,8 +396,8 @@ def is_big_gpu(index): ) torch_compile_options = { - "epilogue_fusion" : False, - "max_autotune" : False, + "epilogue_fusion" : True, + "max_autotune" : True, "shape_padding" : True, "trace.enabled" : UNSLOTH_COMPILE_DEBUG, "triton.cudagraphs" : False, From 67611e624670cb1163283c889734a996485fc1e3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 02:10:37 -0800 Subject: [PATCH 0712/1088] Update _utils.py --- unsloth/models/_utils.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cdc2cd45fd..903093e60f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -69,7 +69,6 @@ patch_compiling_bitsandbytes, patch_layernorm, patch_torch_compile, - patch_regional_compilation, patch_model_and_tokenizer, ) from unsloth_zoo.gradient_checkpointing import ( @@ -414,6 +413,26 @@ def torch_compile_kwargs(*args, **kwargs): accelerate.accelerator.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs del accelerate +def patch_regional_compilation(): + # Regional torch 2.5 Recompilation - weirdly very slow?? + if torch.nn.ModuleList.__name__ == "UnslothModuleList": return + # Only works for torch 2.5 + if Version(torch.__version__) < Version("2.5.0"): return + + old_module_list = torch.nn.ModuleList + os.environ["UNSLOTH_PATCHED"] = "1" + + def UnslothModuleList(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0 and type(args[0]) is list: + args = [old_module_list([torch.compile(x, dynamic = True, options = torch_compile_options, fullgraph = False) for x in args[0]])] + return old_module_list(*args, **kwargs) + pass + UnslothModuleList.__doc__ = old_module_list.__doc__ + + torch.nn.ModuleList = UnslothModuleList + return +pass + # ============================================= def prepare_model_for_kbit_training( From 55f76905c060cb76e0cfb85de6d646a8d7add6b4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 12:08:55 -0800 Subject: [PATCH 0713/1088] Bug fixes (#1255) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 --- unsloth/models/_utils.py | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 94cf1b74e0..903093e60f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.1" +__version__ = "2024.11.3" __all__ = [ "prepare_model_for_kbit_training", @@ -69,7 +69,6 @@ patch_compiling_bitsandbytes, patch_layernorm, patch_torch_compile, - patch_regional_compilation, patch_model_and_tokenizer, ) from unsloth_zoo.gradient_checkpointing import ( @@ -88,8 +87,9 @@ # Disable some warnings which can get annoying warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub") -warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "huggingface_hub") +warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl") +warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "trl") warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "xformers") warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess") warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers") @@ -374,8 +374,9 @@ def _is_openai_available(): return False # ============================================= # Torch compile settings -UNSLOTH_COMPILE_DEBUG = "UNSLOTH_COMPILE_DEBUG" in os.environ -UNSLOTH_COMPILE_MAXIMUM = "UNSLOTH_COMPILE_MAXIMUM" in os.environ +UNSLOTH_COMPILE_DEBUG = os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1" +UNSLOTH_COMPILE_MAXIMUM = os.environ.get("UNSLOTH_COMPILE_MAXIMUM", "0") == "1" +UNSLOTH_COMPILE_IGNORE_ERRORS = os.environ.get("UNSLOTH_COMPILE_IGNORE_ERRORS", "1") == "1" # Just remove max_autotune_gemm warning import functools @functools.lru_cache(None) @@ -387,7 +388,11 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile(debug = UNSLOTH_COMPILE_DEBUG, O3 = UNSLOTH_COMPILE_MAXIMUM) +patch_torch_compile( + debug = UNSLOTH_COMPILE_DEBUG, + O3 = UNSLOTH_COMPILE_MAXIMUM, + ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, +) torch_compile_options = { "epilogue_fusion" : True, @@ -408,6 +413,26 @@ def torch_compile_kwargs(*args, **kwargs): accelerate.accelerator.TorchDynamoPlugin.to_kwargs = torch_compile_kwargs del accelerate +def patch_regional_compilation(): + # Regional torch 2.5 Recompilation - weirdly very slow?? + if torch.nn.ModuleList.__name__ == "UnslothModuleList": return + # Only works for torch 2.5 + if Version(torch.__version__) < Version("2.5.0"): return + + old_module_list = torch.nn.ModuleList + os.environ["UNSLOTH_PATCHED"] = "1" + + def UnslothModuleList(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0 and type(args[0]) is list: + args = [old_module_list([torch.compile(x, dynamic = True, options = torch_compile_options, fullgraph = False) for x in args[0]])] + return old_module_list(*args, **kwargs) + pass + UnslothModuleList.__doc__ = old_module_list.__doc__ + + torch.nn.ModuleList = UnslothModuleList + return +pass + # ============================================= def prepare_model_for_kbit_training( From f24aef5cbc50467493d906e61ce95d3159ee957f Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Thu, 7 Nov 2024 00:16:02 +0400 Subject: [PATCH 0714/1088] Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 0c07035097..cc3dbb1d87 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -84,12 +84,12 @@ def _cross_entropy_forward( logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) if label_idx != -100: - x = tl.load(logits_ptr + label_idx) + x = tl.load(logits_ptr + label_idx).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) - loss = logsumexp - x.to(tl.float32) + loss = logsumexp - x else: loss = 0.0 tl.store(logsumexp_ptr, logsumexp) @@ -170,7 +170,7 @@ def _chunked_cross_entropy_forward( if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) - loss = -1.0 * x.to(tl.float32) + loss = -1.0 * x else: loss = 0.0 tl.store(loss_ptr, loss) From 3d906e637847b689544dc804305ffc8365e17af3 Mon Sep 17 00:00:00 2001 From: Datta Nimmaturi Date: Thu, 7 Nov 2024 01:52:08 +0530 Subject: [PATCH 0715/1088] Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han --- unsloth/models/llama.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3c4d8f3b38..7f07bea4c5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1376,6 +1376,15 @@ def _wrap_fast_inference(generate, device_type, dtype, model): @torch.inference_mode def _fast_generate(*args, **kwargs): + if hasattr(model, "config") and hasattr(model.config, "max_position_embeddings"): + if "input_ids" in kwargs and kwargs["input_ids"] is not None and "max_new_tokens" in kwargs: + if kwargs["input_ids"].shape[-1] + kwargs["max_new_tokens"] > model.config.max_position_embeddings: + raise ValueError( + f'Unsloth: input length {kwargs["input_ids"].shape[-1]} + max_new_tokens {kwargs["max_new_tokens"]} exceeds the maximum sequence length of {model.config.max_position_embeddings}!\n'\ + 'You will need to do long context extension by increasing the `max_seq_length` in `FastLanguageModel.from_pretrained`.' + ) + pass + # Set a flag for generation! internal_model = model while hasattr(internal_model, "model"): From de1049bcc524e6c3eff5874e1c79aad603cd9f97 Mon Sep 17 00:00:00 2001 From: Edwin Fennell Date: Wed, 6 Nov 2024 20:23:09 +0000 Subject: [PATCH 0716/1088] CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root --- unsloth/models/loader.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index db7259b1d9..8dcdebab12 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -44,6 +44,25 @@ from .gemma2 import FastGemma2Model pass +def get_dtype_from_input( + dtype +): + '''Converts user-defined dtype input string to a usable dtype''' + TORCH_FLOAT16_SYNONYMS = {"torch.float16"} + TORCH_BFLOAT16_SYNONYMS = {"torch.bfloat16"} + TORCH_FLOAT32_SYNONYMS = {"torch.float32"} + if dtype in TORCH_FLOAT16_SYNONYMS: + return torch.float16 + if dtype in TORCH_BFLOAT16_SYNONYMS: + return torch.bfloat16 + if dtype in TORCH_FLOAT32_SYNONYMS: + return torch.float32 + if dtype != "None": + print(f"--------------------------------------------------\n"\ + f"User-specified dtype not recognised. Defaulting to dtype = None\n"\ + f"--------------------------------------------------") + return None + def __get_model_name( model_name, @@ -332,7 +351,7 @@ def from_pretrained( model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, - dtype = dtype, + dtype = get_dtype_from_input(dtype), load_in_4bit = load_in_4bit, token = token, device_map = device_map, From be72975d8aba9c7cbbdc906d87a41f5ac58d8cea Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 13:09:29 -0800 Subject: [PATCH 0717/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 08426b69e0..7deb9a96d3 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -20,7 +20,7 @@ "epilogue_fusion" : True, "max_autotune" : True, "shape_padding" : True, - "trace.enabled" : False, # Output Triton kernel outputs! + "trace.enabled" : os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1", "triton.cudagraphs" : False, } From 05170cd81a522d3a6f9523c60a2a6e3ea31da5cb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 14:05:52 -0800 Subject: [PATCH 0718/1088] Update _utils.py --- unsloth/models/_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 903093e60f..d645235f02 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -388,11 +388,11 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -patch_torch_compile( - debug = UNSLOTH_COMPILE_DEBUG, - O3 = UNSLOTH_COMPILE_MAXIMUM, - ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, -) +# patch_torch_compile( +# debug = UNSLOTH_COMPILE_DEBUG, +# O3 = UNSLOTH_COMPILE_MAXIMUM, +# ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, +# ) torch_compile_options = { "epilogue_fusion" : True, From 7e0877d383a1937039902b65d48fdc16ac6c8c0a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 14:49:37 -0800 Subject: [PATCH 0719/1088] Update _utils.py --- unsloth/models/_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d645235f02..903093e60f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -388,11 +388,11 @@ def is_big_gpu(index): return True import torch._inductor.utils torch._inductor.utils.is_big_gpu = is_big_gpu -# patch_torch_compile( -# debug = UNSLOTH_COMPILE_DEBUG, -# O3 = UNSLOTH_COMPILE_MAXIMUM, -# ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, -# ) +patch_torch_compile( + debug = UNSLOTH_COMPILE_DEBUG, + O3 = UNSLOTH_COMPILE_MAXIMUM, + ignore_errors = UNSLOTH_COMPILE_IGNORE_ERRORS, +) torch_compile_options = { "epilogue_fusion" : True, From 6b5c5993947fac6fcd62041a82587a87a41e2176 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 14:51:52 -0800 Subject: [PATCH 0720/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 7deb9a96d3..887ffca1b7 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -15,6 +15,7 @@ import torch from functools import lru_cache from transformers.models.llama.modeling_llama import logger +import os torch_compile_options = { "epilogue_fusion" : True, From 1ba9f2ed87133f9fb3c7c5ec69e3d0f9d78fd949 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 14:54:53 -0800 Subject: [PATCH 0721/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 887ffca1b7..dfd48504d5 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -40,6 +40,14 @@ if not HAS_FLEX_ATTENTION: + # Below fails on compiled_autograd, so disable it + try: + old_compiled_autograd = torch._dynamo.config.compiled_autograd + torch._dynamo.config.compiled_autograd = False + except: + old_compiled_autograd = False + pass + # Logit softcapping @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): @@ -74,6 +82,13 @@ def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): return A pass + # Return compiled_autograd back + try: + torch._dynamo.config.compiled_autograd = old_compiled_autograd + except: + pass + pass + create_flex_attention_causal_mask = None create_flex_attention_sliding_window_mask = None else: From da61c4dde9247fcf4623c387fac4ee5612163367 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 15:05:10 -0800 Subject: [PATCH 0722/1088] Update loader.py --- unsloth/models/loader.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 8dcdebab12..cafb1282f7 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -44,24 +44,23 @@ from .gemma2 import FastGemma2Model pass -def get_dtype_from_input( - dtype -): - '''Converts user-defined dtype input string to a usable dtype''' - TORCH_FLOAT16_SYNONYMS = {"torch.float16"} - TORCH_BFLOAT16_SYNONYMS = {"torch.bfloat16"} - TORCH_FLOAT32_SYNONYMS = {"torch.float32"} - if dtype in TORCH_FLOAT16_SYNONYMS: + +def _get_dtype(dtype): + __DTYPE_MAP = { + "float32": torch.float32, + torch.float32: torch.float32, + "float16": torch.float16, + torch.float16: torch.float16, + "bfloat16": torch.bfloat16, + torch.bfloat16: torch.bfloat16, + } + if dtype in __DTYPE_MAP: + return __DTYPE_MAP[dtype] + else: + print(f"Unsloth: {dtype} is not recognized, so we'll default to torch.float16") return torch.float16 - if dtype in TORCH_BFLOAT16_SYNONYMS: - return torch.bfloat16 - if dtype in TORCH_FLOAT32_SYNONYMS: - return torch.float32 - if dtype != "None": - print(f"--------------------------------------------------\n"\ - f"User-specified dtype not recognised. Defaulting to dtype = None\n"\ - f"--------------------------------------------------") - return None + pass +pass def __get_model_name( @@ -351,7 +350,7 @@ def from_pretrained( model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, - dtype = get_dtype_from_input(dtype), + dtype = _get_dtype(dtype), load_in_4bit = load_in_4bit, token = token, device_map = device_map, From 3316ee2282869ae8f85b571b420d760d665f016f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 15:13:03 -0800 Subject: [PATCH 0723/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index cafb1282f7..4566302ed0 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -43,7 +43,7 @@ if SUPPORTS_GEMMA2: from .gemma2 import FastGemma2Model pass - +import torch def _get_dtype(dtype): __DTYPE_MAP = { From 501ca842970d17bfc7d77d007fa18d80d86381f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 15:39:32 -0800 Subject: [PATCH 0724/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index dfd48504d5..678574928c 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -42,14 +42,15 @@ # Below fails on compiled_autograd, so disable it try: - old_compiled_autograd = torch._dynamo.config.compiled_autograd - torch._dynamo.config.compiled_autograd = False + disable_compiled_autograd = torch._dynamo.compiled_autograd.disable except: - old_compiled_autograd = False + disable_compiled_autograd = lambda *args, **kwargs: *args, **kwargs pass # Logit softcapping - @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) + @disable_compiled_autograd( + torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) + ) def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): n_heads = self.num_heads head_dim = self.head_dim @@ -82,13 +83,6 @@ def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): return A pass - # Return compiled_autograd back - try: - torch._dynamo.config.compiled_autograd = old_compiled_autograd - except: - pass - pass - create_flex_attention_causal_mask = None create_flex_attention_sliding_window_mask = None else: From ce621b7af32b44ee2c6bbce5f9d302b6e0bc677e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 15:56:26 -0800 Subject: [PATCH 0725/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 678574928c..99e46a904c 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -44,7 +44,7 @@ try: disable_compiled_autograd = torch._dynamo.compiled_autograd.disable except: - disable_compiled_autograd = lambda *args, **kwargs: *args, **kwargs + disable_compiled_autograd = lambda f: f pass # Logit softcapping From 4b01ff1724f03bed74bfddf8963850e7c84d9d3f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 15:56:52 -0800 Subject: [PATCH 0726/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 99e46a904c..1342bfafc2 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -42,15 +42,13 @@ # Below fails on compiled_autograd, so disable it try: - disable_compiled_autograd = torch._dynamo.compiled_autograd.disable + disable_compile = torch._dynamo.compiled_autograd.disable except: - disable_compiled_autograd = lambda f: f + disable_compile = lambda f: f pass # Logit softcapping - @disable_compiled_autograd( - torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) - ) + @disable_compile(torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options)) def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): n_heads = self.num_heads head_dim = self.head_dim From ef5052a8bc97bbb6ca6e82fc8f7ede47ad37ca2d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 16:59:15 -0800 Subject: [PATCH 0727/1088] Update flex_attention.py --- unsloth/kernels/flex_attention.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 1342bfafc2..887ffca1b7 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -40,15 +40,8 @@ if not HAS_FLEX_ATTENTION: - # Below fails on compiled_autograd, so disable it - try: - disable_compile = torch._dynamo.compiled_autograd.disable - except: - disable_compile = lambda f: f - pass - # Logit softcapping - @disable_compile(torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options)) + @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): n_heads = self.num_heads head_dim = self.head_dim From 52bca32ce9b2162aa60061d54a4f6c9a78a145f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 17:14:56 -0800 Subject: [PATCH 0728/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 903093e60f..a6cd13d251 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.3" +__version__ = "2024.11.4" __all__ = [ "prepare_model_for_kbit_training", From 8d6d78fee60d3e75a2aa74562b4227673e7d7c95 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 17:17:19 -0800 Subject: [PATCH 0729/1088] Bug fixes (#1259) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 Co-authored-by: Edwin Fennell Co-authored-by: root --- unsloth/kernels/cross_entropy_loss.py | 6 +++--- unsloth/kernels/flex_attention.py | 3 ++- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 9 +++++++++ unsloth/models/loader.py | 20 +++++++++++++++++++- 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 0c07035097..cc3dbb1d87 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -84,12 +84,12 @@ def _cross_entropy_forward( logsumexp = c + tl.log(tl.sum(tl.exp(logits - c), 0)) if label_idx != -100: - x = tl.load(logits_ptr + label_idx) + x = tl.load(logits_ptr + label_idx).to(tl.float32) # Go logit scaling for Cohere: t * x if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) - loss = logsumexp - x.to(tl.float32) + loss = logsumexp - x else: loss = 0.0 tl.store(logsumexp_ptr, logsumexp) @@ -170,7 +170,7 @@ def _chunked_cross_entropy_forward( if DO_LOGIT_SCALING: x = LOGIT_SCALE * x # Do logit softcapping for Gemma 2: t * tanh(1/t * x) if DO_SOFTCAPPING: x = SOFTCAP * triton_tanh(x / SOFTCAP) - loss = -1.0 * x.to(tl.float32) + loss = -1.0 * x else: loss = 0.0 tl.store(loss_ptr, loss) diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 08426b69e0..887ffca1b7 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -15,12 +15,13 @@ import torch from functools import lru_cache from transformers.models.llama.modeling_llama import logger +import os torch_compile_options = { "epilogue_fusion" : True, "max_autotune" : True, "shape_padding" : True, - "trace.enabled" : False, # Output Triton kernel outputs! + "trace.enabled" : os.environ.get("UNSLOTH_COMPILE_DEBUG", "0") == "1", "triton.cudagraphs" : False, } diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 903093e60f..a6cd13d251 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.3" +__version__ = "2024.11.4" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3c4d8f3b38..7f07bea4c5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1376,6 +1376,15 @@ def _wrap_fast_inference(generate, device_type, dtype, model): @torch.inference_mode def _fast_generate(*args, **kwargs): + if hasattr(model, "config") and hasattr(model.config, "max_position_embeddings"): + if "input_ids" in kwargs and kwargs["input_ids"] is not None and "max_new_tokens" in kwargs: + if kwargs["input_ids"].shape[-1] + kwargs["max_new_tokens"] > model.config.max_position_embeddings: + raise ValueError( + f'Unsloth: input length {kwargs["input_ids"].shape[-1]} + max_new_tokens {kwargs["max_new_tokens"]} exceeds the maximum sequence length of {model.config.max_position_embeddings}!\n'\ + 'You will need to do long context extension by increasing the `max_seq_length` in `FastLanguageModel.from_pretrained`.' + ) + pass + # Set a flag for generation! internal_model = model while hasattr(internal_model, "model"): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index db7259b1d9..4566302ed0 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -43,6 +43,24 @@ if SUPPORTS_GEMMA2: from .gemma2 import FastGemma2Model pass +import torch + +def _get_dtype(dtype): + __DTYPE_MAP = { + "float32": torch.float32, + torch.float32: torch.float32, + "float16": torch.float16, + torch.float16: torch.float16, + "bfloat16": torch.bfloat16, + torch.bfloat16: torch.bfloat16, + } + if dtype in __DTYPE_MAP: + return __DTYPE_MAP[dtype] + else: + print(f"Unsloth: {dtype} is not recognized, so we'll default to torch.float16") + return torch.float16 + pass +pass def __get_model_name( @@ -332,7 +350,7 @@ def from_pretrained( model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, - dtype = dtype, + dtype = _get_dtype(dtype), load_in_4bit = load_in_4bit, token = token, device_map = device_map, From d920393690e732b4bd9091ab2b4083fe3b027719 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 19:00:13 -0800 Subject: [PATCH 0730/1088] Update loader.py --- unsloth/models/loader.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 4566302ed0..0414d54460 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -54,11 +54,11 @@ def _get_dtype(dtype): "bfloat16": torch.bfloat16, torch.bfloat16: torch.bfloat16, } - if dtype in __DTYPE_MAP: - return __DTYPE_MAP[dtype] + if dtype is None or dtype == None: return None + if dtype in __DTYPE_MAP: return __DTYPE_MAP[dtype] else: - print(f"Unsloth: {dtype} is not recognized, so we'll default to torch.float16") - return torch.float16 + print(f"Unsloth: {dtype} is not recognized, so we'll default to None") + return None pass pass From 9a7477671257537e4903ff0174047746c54793c5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 19:00:23 -0800 Subject: [PATCH 0731/1088] Update loader.py --- unsloth/models/loader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0414d54460..7a6322d248 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -54,8 +54,8 @@ def _get_dtype(dtype): "bfloat16": torch.bfloat16, torch.bfloat16: torch.bfloat16, } - if dtype is None or dtype == None: return None - if dtype in __DTYPE_MAP: return __DTYPE_MAP[dtype] + if dtype is None or dtype == None: return None + elif dtype in __DTYPE_MAP: return __DTYPE_MAP[dtype] else: print(f"Unsloth: {dtype} is not recognized, so we'll default to None") return None From 0c8c5ed81e423658ab9ae81eac5aab8d18f5d7af Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 19:00:42 -0800 Subject: [PATCH 0732/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a6cd13d251..5fff96642d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.4" +__version__ = "2024.11.5" __all__ = [ "prepare_model_for_kbit_training", From 8b3e9c2ff0b3e5c6fb11d2913023a1d9fa069324 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Nov 2024 21:07:50 -0800 Subject: [PATCH 0733/1088] Update cross_entropy_loss.py --- unsloth/kernels/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index cc3dbb1d87..f82defd405 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -400,6 +400,6 @@ def fast_cross_entropy_loss( pass # Patch CE Losses in transformers -def patch_loss_functions(): - _patch_loss_functions(fast_cross_entropy_loss) +def patch_loss_functions(torch_compile = True): + _patch_loss_functions(fast_cross_entropy_loss, torch_compile = torch_compile) pass From 3a1e7ef8299f3c96fa6e8de11fd0772af3cbc83f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 7 Nov 2024 01:11:45 -0800 Subject: [PATCH 0734/1088] Update _utils.py --- unsloth/models/_utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5fff96642d..cb9ae48a86 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -104,7 +104,7 @@ # Ignore logging messages class HideLoggingMessage(logging.Filter): def __init__(self, text): self.text = text - def filter(self, x): return not x.getMessage().startswith(self.text) + def filter(self, x): return not (self.text in x.getMessage()) pass # The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. @@ -112,6 +112,14 @@ def filter(self, x): return not x.getMessage().startswith(self.text) transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups")) del transformers_training_args_logger +# Using the default loss: `ForCausalLMLoss`. +try: + from transformers.modeling_utils import logger as transformers_modeling_utils_logger + transformers_modeling_utils_logger.addFilter(HideLoggingMessage("ForCausalLMLoss")) + del transformers_modeling_utils_logger +except: + pass + # ============================================= # ============================================= From f1ec165096f7d9f54ed988b546d60dec9b443dab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 9 Nov 2024 17:01:33 -0800 Subject: [PATCH 0735/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index c639dbf1a0..6d0ee548c1 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1001,13 +1001,14 @@ def patch_sft_trainer_tokenizer(): # Also DPO weirdly tokenizes non numeric columns? Delete them! check_text += \ "\n"\ - "column_names = set(self.train_dataset.column_names)\n"\ - "check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ - " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ - " 'prompt_input_ids', 'prompt_attention_mask']\n"\ - "if all(x in column_names for x in check):\n"\ - " self.train_dataset = self.train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ - "del check, column_names\n"\ + "if hasattr(self.train_dataset, 'column_names''):\n" + " column_names = set(self.train_dataset.column_names)\n"\ + " check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ + " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ + " 'prompt_input_ids', 'prompt_attention_mask']\n"\ + " if all(x in column_names for x in check):\n"\ + " self.train_dataset = self.train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ + " del check, column_names\n"\ "\n" check_text = check_text.split("\n") From a4e970553164acca5fad28287c25e14af30891f9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 9 Nov 2024 17:34:47 -0800 Subject: [PATCH 0736/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 6d0ee548c1..7967676d80 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1001,7 +1001,7 @@ def patch_sft_trainer_tokenizer(): # Also DPO weirdly tokenizes non numeric columns? Delete them! check_text += \ "\n"\ - "if hasattr(self.train_dataset, 'column_names''):\n" + "if hasattr(self.train_dataset, 'column_names'):\n" " column_names = set(self.train_dataset.column_names)\n"\ " check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ From 92c6a2784043df578e7ba7c4ff0e92b5866da09d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 9 Nov 2024 17:37:11 -0800 Subject: [PATCH 0737/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 7967676d80..a9b635203e 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1015,6 +1015,8 @@ def patch_sft_trainer_tokenizer(): check_text = "\n".join(" "*where + x for x in check_text) function = function.replace(replacer, check_text + replacer) + print(function) + raise exec(function, globals()) exec(f"trl.trainer.{path_to_trainer}.{function_name} = {function_name}", globals()) From 673f541788bdd858cf4ddb98edd3fa52fe4892c1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 9 Nov 2024 17:40:32 -0800 Subject: [PATCH 0738/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index a9b635203e..57829e621d 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1001,7 +1001,7 @@ def patch_sft_trainer_tokenizer(): # Also DPO weirdly tokenizes non numeric columns? Delete them! check_text += \ "\n"\ - "if hasattr(self.train_dataset, 'column_names'):\n" + "if hasattr(self.train_dataset, 'column_names'):\n"\ " column_names = set(self.train_dataset.column_names)\n"\ " check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ @@ -1015,8 +1015,6 @@ def patch_sft_trainer_tokenizer(): check_text = "\n".join(" "*where + x for x in check_text) function = function.replace(replacer, check_text + replacer) - print(function) - raise exec(function, globals()) exec(f"trl.trainer.{path_to_trainer}.{function_name} = {function_name}", globals()) From 8fe9109431d9b13757429879338546ffba94ccf0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 11 Nov 2024 00:04:02 -0800 Subject: [PATCH 0739/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 57829e621d..ed95e07632 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -588,15 +588,21 @@ def load_correct_tokenizer( def _fix_chat_template(chat_template): endfor = "{% endfor %}" where = chat_template.find(endfor) - if where == -1: return chat_template + if where == -1: + endfor = "{%- endfor %}" + where = chat_template.find(endfor) + if where == -1: + return chat_template after_endfor = chat_template[where + len(endfor):] - if "{% if" not in after_endfor and "{% set " not in after_endfor and \ + dash = "-" if endfor.startswith("{%-") else "" + + if "{%" + dash + " if" not in after_endfor and "{%" + dash + " set " not in after_endfor and \ after_endfor.startswith("{{") and after_endfor.endswith("}}") and \ after_endfor.count("{{") == 1 and after_endfor.count("}}") == 1: - after_endfor = "{% if add_generation_prompt %}" + after_endfor + "{% endif %}" + after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endfor chat_template = chat_template[:where + len(endfor)] + after_endfor pass @@ -643,10 +649,12 @@ def fix_chat_template(tokenizer): if no == yes: # SAME?! That's not good! We check for add_generation_prompt - if "{% if add_generation_prompt %}" not in chat_template: + if "{% if add_generation_prompt %}" not in chat_template and \ + "{%- if add_generation_prompt %}" not in chat_template: # Try fixing it by adding it new_chat_template = _fix_chat_template(chat_template) - if "{% if add_generation_prompt %}" not in new_chat_template: + if "{% if add_generation_prompt %}" not in new_chat_template and \ + "{%- if add_generation_prompt %}" not in new_chat_template: raise RuntimeError( f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"\ "does not have a {% if add_generation_prompt %} for generation purposes.\n"\ From ad41479c5488cf69c79f1af32ea1d53bce0a08e3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 11 Nov 2024 00:17:22 -0800 Subject: [PATCH 0740/1088] triton_cast --- unsloth/kernels/cross_entropy_loss.py | 8 ++++---- unsloth/kernels/utils.py | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index f82defd405..d347cd1878 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh +from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh, triton_cast from transformers.models.llama.modeling_llama import logger from packaging.version import Version @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -142,7 +142,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -216,7 +216,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index b394d122fd..cef6ccb864 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -34,9 +34,15 @@ if Version(triton.__version__) >= Version("3.0.0"): from triton.language.extra import libdevice triton_tanh = libdevice.tanh + triton_cast = tl.cast else: import triton.language as tl triton_tanh = tl.math.tanh + # No casting in old Triton versions + @triton.jit + def triton_cast(x, dtype): + return x.to(dtype) + pass pass From fcf200997a9af81a9dd2799e584e622c5adee02d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 11 Nov 2024 00:37:37 -0800 Subject: [PATCH 0741/1088] Update utils.py --- unsloth/kernels/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index cef6ccb864..de543962ef 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -31,12 +31,12 @@ # tl.math.tanh now is libdevice.tanh from packaging.version import Version import triton +import triton.language as tl if Version(triton.__version__) >= Version("3.0.0"): from triton.language.extra import libdevice triton_tanh = libdevice.tanh triton_cast = tl.cast else: - import triton.language as tl triton_tanh = tl.math.tanh # No casting in old Triton versions @triton.jit From af9ba073da5b9c3dbcf6580d32faa9d3bd37a290 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 11 Nov 2024 18:46:05 -0800 Subject: [PATCH 0742/1088] Qwen 2.5 Coder --- unsloth/models/llama.py | 3 ++- unsloth/models/mapper.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7f07bea4c5..47a57024a2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2317,7 +2317,8 @@ def patch_peft_model( layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 else: - if model_type != "qwen2": + if model_type == "qwen2": n_qkv += 1 + else: logger.warning_once( "Not an error, but Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ "are not enabled or a bias term (like in Qwen) is used." diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 10e40ab7c6..d4f1278e1d 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -384,22 +384,54 @@ "unsloth/Qwen2.5-Math-72B-Instruct", "Qwen/Qwen2.5-Math-72B-Instruct", ), + "unsloth/Qwen2.5-Coder-0.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-0.5B", + "Qwen/Qwen2.5-Coder-0.5B", + ), "unsloth/Qwen2.5-Coder-1.5B-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-1.5B", "Qwen/Qwen2.5-Coder-1.5B", ), + "unsloth/Qwen2.5-Coder-3B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-3B", + "Qwen/Qwen2.5-Coder-3B", + ), "unsloth/Qwen2.5-Coder-7B-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-7B", "Qwen/Qwen2.5-Coder-7B", ), + "unsloth/Qwen2.5-Coder-14B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-14B", + "Qwen/Qwen2.5-Coder-14B", + ), + "unsloth/Qwen2.5-Coder-32B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-32B", + "Qwen/Qwen2.5-Coder-32B", + ), + "unsloth/Qwen2.5-Coder-0.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-Instruct-0.5B", + "Qwen/Qwen2.5-Coder-Instruct-0.5B", + ), "unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-Instruct-1.5B", "Qwen/Qwen2.5-Coder-Instruct-1.5B", ), + "unsloth/Qwen2.5-Coder-3B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-3B-Instruct", + "Qwen/Qwen2.5-Coder-3B-Instruct", + ), "unsloth/Qwen2.5-Coder-7B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", ), + "unsloth/Qwen2.5-Coder-14B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-14B-Instruct", + "Qwen/Qwen2.5-Coder-14B-Instruct", + ), + "unsloth/Qwen2.5-Coder-32B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-32B-Instruct", + "Qwen/Qwen2.5-Coder-32B-Instruct", + ), "unsloth/Llama-3.2-1B-bnb-4bit" : ( "unsloth/Llama-3.2-1B", "meta-llama/Llama-3.2-1B", From 899caf0bb5d0627b77e9ecffda5a8c0cbc2536f0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 12 Nov 2024 03:22:41 -0800 Subject: [PATCH 0743/1088] Qwen 2.5 (#1280) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 Co-authored-by: Edwin Fennell Co-authored-by: root --- unsloth/kernels/cross_entropy_loss.py | 12 +++++----- unsloth/kernels/utils.py | 8 ++++++- unsloth/models/_utils.py | 10 +++++++- unsloth/models/llama.py | 3 ++- unsloth/models/mapper.py | 32 ++++++++++++++++++++++++++ unsloth/tokenizer_utils.py | 33 +++++++++++++++++---------- 6 files changed, 77 insertions(+), 21 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index cc3dbb1d87..d347cd1878 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh +from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh, triton_cast from transformers.models.llama.modeling_llama import logger from packaging.version import Version @@ -64,7 +64,7 @@ def _cross_entropy_forward( This ensures exp(x - max(x))'s maximum is 1 as exp(0) = 1. """ row_idx = tl.program_id(0) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx labels_ptr += row_idx @@ -142,7 +142,7 @@ def _chunked_cross_entropy_forward( """ row_idx = tl.program_id(0) chunk_idx = tl.program_id(1) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) loss_ptr += row_idx logsumexp_ptr += row_idx * N_CHUNKS + chunk_idx labels_ptr += row_idx @@ -216,7 +216,7 @@ def _cross_entropy_backward( row_idx = tl.program_id(0) block_idx = tl.program_id(1) - logits_ptr += row_idx * tl.cast(logits_row_stride, tl.int64) + logits_ptr += row_idx * triton_cast(logits_row_stride, tl.int64) dloss_ptr += row_idx * dloss_row_stride col_offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = col_offsets < VOCAB_SIZE @@ -400,6 +400,6 @@ def fast_cross_entropy_loss( pass # Patch CE Losses in transformers -def patch_loss_functions(): - _patch_loss_functions(fast_cross_entropy_loss) +def patch_loss_functions(torch_compile = True): + _patch_loss_functions(fast_cross_entropy_loss, torch_compile = torch_compile) pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index b394d122fd..de543962ef 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -31,12 +31,18 @@ # tl.math.tanh now is libdevice.tanh from packaging.version import Version import triton +import triton.language as tl if Version(triton.__version__) >= Version("3.0.0"): from triton.language.extra import libdevice triton_tanh = libdevice.tanh + triton_cast = tl.cast else: - import triton.language as tl triton_tanh = tl.math.tanh + # No casting in old Triton versions + @triton.jit + def triton_cast(x, dtype): + return x.to(dtype) + pass pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5fff96642d..cb9ae48a86 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -104,7 +104,7 @@ # Ignore logging messages class HideLoggingMessage(logging.Filter): def __init__(self, text): self.text = text - def filter(self, x): return not x.getMessage().startswith(self.text) + def filter(self, x): return not (self.text in x.getMessage()) pass # The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. @@ -112,6 +112,14 @@ def filter(self, x): return not x.getMessage().startswith(self.text) transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups")) del transformers_training_args_logger +# Using the default loss: `ForCausalLMLoss`. +try: + from transformers.modeling_utils import logger as transformers_modeling_utils_logger + transformers_modeling_utils_logger.addFilter(HideLoggingMessage("ForCausalLMLoss")) + del transformers_modeling_utils_logger +except: + pass + # ============================================= # ============================================= diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7f07bea4c5..47a57024a2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -2317,7 +2317,8 @@ def patch_peft_model( layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 else: - if model_type != "qwen2": + if model_type == "qwen2": n_qkv += 1 + else: logger.warning_once( "Not an error, but Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ "are not enabled or a bias term (like in Qwen) is used." diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 10e40ab7c6..d4f1278e1d 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -384,22 +384,54 @@ "unsloth/Qwen2.5-Math-72B-Instruct", "Qwen/Qwen2.5-Math-72B-Instruct", ), + "unsloth/Qwen2.5-Coder-0.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-0.5B", + "Qwen/Qwen2.5-Coder-0.5B", + ), "unsloth/Qwen2.5-Coder-1.5B-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-1.5B", "Qwen/Qwen2.5-Coder-1.5B", ), + "unsloth/Qwen2.5-Coder-3B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-3B", + "Qwen/Qwen2.5-Coder-3B", + ), "unsloth/Qwen2.5-Coder-7B-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-7B", "Qwen/Qwen2.5-Coder-7B", ), + "unsloth/Qwen2.5-Coder-14B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-14B", + "Qwen/Qwen2.5-Coder-14B", + ), + "unsloth/Qwen2.5-Coder-32B-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-32B", + "Qwen/Qwen2.5-Coder-32B", + ), + "unsloth/Qwen2.5-Coder-0.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-Instruct-0.5B", + "Qwen/Qwen2.5-Coder-Instruct-0.5B", + ), "unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-Instruct-1.5B", "Qwen/Qwen2.5-Coder-Instruct-1.5B", ), + "unsloth/Qwen2.5-Coder-3B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-3B-Instruct", + "Qwen/Qwen2.5-Coder-3B-Instruct", + ), "unsloth/Qwen2.5-Coder-7B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", ), + "unsloth/Qwen2.5-Coder-14B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-14B-Instruct", + "Qwen/Qwen2.5-Coder-14B-Instruct", + ), + "unsloth/Qwen2.5-Coder-32B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-Coder-32B-Instruct", + "Qwen/Qwen2.5-Coder-32B-Instruct", + ), "unsloth/Llama-3.2-1B-bnb-4bit" : ( "unsloth/Llama-3.2-1B", "meta-llama/Llama-3.2-1B", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index c639dbf1a0..ed95e07632 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -588,15 +588,21 @@ def load_correct_tokenizer( def _fix_chat_template(chat_template): endfor = "{% endfor %}" where = chat_template.find(endfor) - if where == -1: return chat_template + if where == -1: + endfor = "{%- endfor %}" + where = chat_template.find(endfor) + if where == -1: + return chat_template after_endfor = chat_template[where + len(endfor):] - if "{% if" not in after_endfor and "{% set " not in after_endfor and \ + dash = "-" if endfor.startswith("{%-") else "" + + if "{%" + dash + " if" not in after_endfor and "{%" + dash + " set " not in after_endfor and \ after_endfor.startswith("{{") and after_endfor.endswith("}}") and \ after_endfor.count("{{") == 1 and after_endfor.count("}}") == 1: - after_endfor = "{% if add_generation_prompt %}" + after_endfor + "{% endif %}" + after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endfor chat_template = chat_template[:where + len(endfor)] + after_endfor pass @@ -643,10 +649,12 @@ def fix_chat_template(tokenizer): if no == yes: # SAME?! That's not good! We check for add_generation_prompt - if "{% if add_generation_prompt %}" not in chat_template: + if "{% if add_generation_prompt %}" not in chat_template and \ + "{%- if add_generation_prompt %}" not in chat_template: # Try fixing it by adding it new_chat_template = _fix_chat_template(chat_template) - if "{% if add_generation_prompt %}" not in new_chat_template: + if "{% if add_generation_prompt %}" not in new_chat_template and \ + "{%- if add_generation_prompt %}" not in new_chat_template: raise RuntimeError( f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"\ "does not have a {% if add_generation_prompt %} for generation purposes.\n"\ @@ -1001,13 +1009,14 @@ def patch_sft_trainer_tokenizer(): # Also DPO weirdly tokenizes non numeric columns? Delete them! check_text += \ "\n"\ - "column_names = set(self.train_dataset.column_names)\n"\ - "check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ - " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ - " 'prompt_input_ids', 'prompt_attention_mask']\n"\ - "if all(x in column_names for x in check):\n"\ - " self.train_dataset = self.train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ - "del check, column_names\n"\ + "if hasattr(self.train_dataset, 'column_names'):\n"\ + " column_names = set(self.train_dataset.column_names)\n"\ + " check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ + " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ + " 'prompt_input_ids', 'prompt_attention_mask']\n"\ + " if all(x in column_names for x in check):\n"\ + " self.train_dataset = self.train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ + " del check, column_names\n"\ "\n" check_text = check_text.split("\n") From d8ff860c842095f4729fdd1d5aedf567a9e2c4da Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 12 Nov 2024 10:54:58 -0800 Subject: [PATCH 0744/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cb9ae48a86..5d6f7921a6 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.5" +__version__ = "2024.11.6" __all__ = [ "prepare_model_for_kbit_training", From 3fec577bb24d64bdf4ace32c91d10eaf0588a330 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:53:50 +0400 Subject: [PATCH 0745/1088] Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han --- unsloth/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 5102d8f466..d5651d5edb 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -31,6 +31,10 @@ # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! # Thank you for your understanding and we appreciate it immensely! + +# Fixes https://github.com/unslothai/unsloth/issues/1266 +os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" + if "CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" devices = os.environ["CUDA_VISIBLE_DEVICES"] From 03c624375f3b1e1e3772cd20a41d865ae1db3d82 Mon Sep 17 00:00:00 2001 From: Uday Girish Maradana Date: Wed, 13 Nov 2024 02:55:28 -0500 Subject: [PATCH 0746/1088] DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 0a3c83f3fa..4d68d996f0 100644 --- a/README.md +++ b/README.md @@ -299,6 +299,9 @@ DPO (Direct Preference Optimization), PPO, Reward Modelling all seem to work as We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! ```python +import os +os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Optional set GPU device ID + from unsloth import FastLanguageModel, PatchDPOTrainer from unsloth import is_bfloat16_supported PatchDPOTrainer() From 10565efe27beffe9bd73827bb892b15aba62eb68 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:06:48 +0400 Subject: [PATCH 0747/1088] fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han --- unsloth/chat_templates.py | 97 +++++++++++++++++++++++++++++++++++---- 1 file changed, 89 insertions(+), 8 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index b254202c75..da10f7e003 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -39,6 +39,7 @@ train_on_responses_only, ) CHAT_TEMPLATES = {} +DEFAULT_SYSTEM_MESSAGE = {} # =========================================== Unsloth # Unsloth efficient template leverages from Zephyr @@ -48,7 +49,7 @@ "{{ messages[0]['content'] + '\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'You are a helpful assistant to the user\n' }}"\ + "{{ '{system_message}' + '\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -80,6 +81,7 @@ unsloth_eos_token = "eos_token" CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False, unsloth_ollama,) +DEFAULT_SYSTEM_MESSAGE["unsloth"] = "You are a helpful assistant to the user" pass # =========================================== Zephyr @@ -116,6 +118,7 @@ zephyr_eos_token = "eos_token" CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False, zephyr_ollama,) +DEFAULT_SYSTEM_MESSAGE["zephyr"] = None # No system message in Zephyr pass # =========================================== ChatML @@ -153,6 +156,7 @@ chatml_eos_token = "<|im_end|>" CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True, chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["chatml"] = None # No system message in ChatML pass # =========================================== Mistral-1 @@ -193,6 +197,7 @@ mistral_eos_token = "eos_token" CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False, mistral_ollama,) +DEFAULT_SYSTEM_MESSAGE["mistral"] = None # No system message in Mistral pass # =========================================== Llama-2 @@ -234,6 +239,7 @@ llama_eos_token = "eos_token" CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False, llama_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama"] = None # No system message in Llama pass # =========================================== Vicuna @@ -244,7 +250,7 @@ "{{ messages[0]['content'] + ' ' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' + ' ' }}"\ + "{{ '{system_message}' + ' ' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -273,6 +279,7 @@ vicuna_eos_token = "eos_token" CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False, vicuna_ollama,) +DEFAULT_SYSTEM_MESSAGE["vicuna"] = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." pass # =========================================== Vicuna Old @@ -283,7 +290,7 @@ "{{ messages[0]['content'] + '\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions.' + '\n' }}"\ + "{{ '{system_message}' + '\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -315,6 +322,10 @@ vicuna_old_eos_token = "eos_token" CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False, vicuna_old_ollama,) +DEFAULT_SYSTEM_MESSAGE["vicuna_old"] = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions." + +CHAT_TEMPLATES["vicuna old"] = CHAT_TEMPLATES["vicuna_old"] +DEFAULT_SYSTEM_MESSAGE["vicuna old"] = DEFAULT_SYSTEM_MESSAGE["vicuna_old"] pass # =========================================== Alpaca multi turn @@ -325,7 +336,7 @@ "{{ messages[0]['content'] + '\n\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'Below are some instructions that describe some tasks. Write responses that appropriately complete each request.\n\n' }}"\ + "{{ '{system_message}' + '\n\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -362,6 +373,7 @@ alpaca_eos_token = "eos_token" CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False, alpaca_ollama,) +DEFAULT_SYSTEM_MESSAGE["alpaca"] = "Below are some instructions that describe some tasks. Write responses that appropriately complete each request." pass # =========================================== Gemma @@ -372,7 +384,7 @@ "{{ bos_token }}"\ "{% if messages[0]['role'] == 'system' %}"\ "{{'user\n' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + '\n'}}"\ - "{% set loop_messages = messages[2:] %}"\ + "{% set messages = messages[2:] %}"\ "{% endif %}"\ "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ @@ -407,6 +419,7 @@ gemma_eos_token = "" CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True, gemma_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma"] = None # No system message in Gemma pass # =========================================== Gemma with ChatML instead @@ -437,6 +450,7 @@ "<|im_end|>", ) CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True, gemma_chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma_chatml"] = None # No system message in Gemma pass # =========================================== Gemma 2 @@ -446,12 +460,14 @@ gemma2_ollama = gemma_ollama + "PARAMETER num_ctx 4096\n" gemma2_eos_token = "" CHAT_TEMPLATES["gemma2"] = (gemma2_template, gemma2_eos_token, True, gemma2_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma2"] = None # No system message in Gemma 2 # =========================================== Gemma 2 with ChatML instead gemma2_chatml_template = gemma_chatml_template gemma2_chatml_ollama = gemma_chatml_ollama + "PARAMETER num_ctx 4096\n" gemma2_chatml_eos_token = gemma_chatml_eos_token CHAT_TEMPLATES["gemma2_chatml"] = (gemma2_chatml_template, gemma2_chatml_eos_token, True, gemma2_chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma2_chatml"] = None # No system message in Gemma 2 pass # =========================================== Llama-3 @@ -491,7 +507,12 @@ ''' llama3_template_eos_token = "eos_token" + CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-3"] = None # No system message in Llama-3 + +CHAT_TEMPLATES["llama3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama3"] = None # No system message in Llama-3 pass @@ -532,8 +553,13 @@ phi3_template_eos_token = "<|end|>" CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) +DEFAULT_SYSTEM_MESSAGE["phi-3"] = None # No system message in Phi-3 + CHAT_TEMPLATES["phi-35"] = CHAT_TEMPLATES["phi-3"] +DEFAULT_SYSTEM_MESSAGE["phi-35"] = None # No system message in Phi-3.5 + CHAT_TEMPLATES["phi-3.5"] = CHAT_TEMPLATES["phi-3"] +DEFAULT_SYSTEM_MESSAGE["phi-3.5"] = None # No system message in Phi-3.5 pass # =========================================== Llama-3.1 @@ -573,7 +599,7 @@ {%- set system_message = messages[0]['content'] %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "" %} + {%- set system_message = "{system_message}" %} {%- endif %} {#- System message + builtin tools #} @@ -729,7 +755,10 @@ llama31_template_eos_token = "eos_token" CHAT_TEMPLATES["llama-3.1"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-3.1"] = "" # Llama3.1 default system message is empty + the dates + CHAT_TEMPLATES["llama-31"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-31"] = "" # Llama3.1 default system message is empty + the dates pass @@ -751,7 +780,7 @@ {%- if messages[0][\'role\'] == \'system\' %} {{- \'<|im_start|>system\\n\' + messages[0][\'content\'] + \'<|im_end|>\\n\' }} {%- else %} - {{- \'<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n\' }} + {{- \'<|im_start|>system\\n{system_message}<|im_end|>\\n\' }} {%- endif %}\n{%- endif %}\n{%- for message in messages %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} {{- \'<|im_start|>\' + message.role + \'\\n\' + message.content + \'<|im_end|>\' + \'\\n\' }} @@ -847,10 +876,53 @@ ''' qwen25_template_eos_token = "eos_token" +qwen25_default_system_message = "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." CHAT_TEMPLATES["qwen-2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen-2.5"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen-25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen-25"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen25"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen2.5"] = qwen25_default_system_message # No system message in Qwen 2.5 +pass + +def _change_system_message(template: str, type_chat_template: str, system_message: str = None): + system_message_pattern = r"\{system_message\}" + + # For predefined templates, check if default system message exists + default_system_message = DEFAULT_SYSTEM_MESSAGE.get(f"{type_chat_template}", None) + if default_system_message is None: + if system_message is not None: + logger.warning_once( + f"Unsloth: You tried to change the system message for {type_chat_template}, " + "but it doesn't have a default system message. " + "You need to manually add the system message in your data." + ) + return template, system_message + pass + + # For custom templates + if type_chat_template is None: + has_placeholder = re.search(system_message_pattern, template) is not None + + if has_placeholder: + if system_message is None: + raise ValueError("Unsloth: You need to provide a system message for custom templates.") + new_template = re.sub(system_message_pattern, system_message, template) + return new_template, system_message + + return template, system_message + pass + + # For predefined templates with default system message + message_to_use = system_message if system_message is not None else default_system_message + new_template = re.sub(system_message_pattern, message_to_use, template) + + return new_template, message_to_use pass @@ -886,14 +958,20 @@ def get_chat_template( old_padding_side = tokenizer.padding_side same_padding_token = False - + type_chat_template = None + if type(chat_template) in (list, tuple,): + # For changing system message later + # Since it's not supported yet, we will raise an error first! + type_chat_template = chat_template[0].lower() chat_template, stop_word = chat_template assert(type(chat_template) is str) assert(type(stop_word) is str) ollama_modelfile = None elif type(chat_template) is str: + # For changing system message later + type_chat_template = chat_template.lower() chat_template, stop_word, yes_map_eos_token, ollama_modelfile = CHAT_TEMPLATES[chat_template] @@ -1052,6 +1130,9 @@ def get_chat_template( else: chat_template = new_chat_template pass + + chat_template, system_message = _change_system_message(chat_template, type_chat_template, system_message) + tokenizer.chat_template = chat_template # Also fix up other tokens From dc0232c883b8f0ac44b59bc4ae917f73402448f5 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Thu, 14 Nov 2024 05:33:30 +0400 Subject: [PATCH 0748/1088] fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han --- unsloth/__init__.py | 3 ++ unsloth/trainer.py | 112 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 105 insertions(+), 10 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d5651d5edb..d94eeb8973 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -176,3 +176,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 from .chat_templates import * from .tokenizer_utils import * from .trainer import * + +# patch sft trainer +_patch_trl_trainer() \ No newline at end of file diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 25bb434023..b03cc347c6 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from dataclasses import dataclass, field from typing import Optional +from functools import wraps +import trl +import inspect from trl import SFTTrainer try: from trl import SFTConfig as TrainingArguments @@ -24,30 +28,38 @@ from . import is_bfloat16_supported from unsloth_zoo.training_utils import unsloth_train as _unsloth_train from packaging.version import Version +import dataclasses + +__all__ = [ + "UnslothTrainingArguments", + "UnslothTrainer", + "unsloth_train", + "_patch_trl_trainer", +] # Unsloth gradient accumulation fix: from transformers import __version__ as transformers_version if Version(transformers_version) > Version("4.45.2"): - def unsloth_train(trainer): - return trainer.train() + def unsloth_train(trainer, *args, **kwargs): + return trainer.train(*args, **kwargs) pass else: - def unsloth_train(trainer): + def unsloth_train(trainer, *args, **kwargs): + if len(args) != 0 or len(kwargs) != 0: + raise RuntimeError( + "Unsloth: Our custom gradient accumulation fixed trainer does not support other arguments.\n"\ + "If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"\ + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`' + ) print( "Unsloth: Using our custom gradient accumulation fixed trainer, which is not feature complete.\n"\ "If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"\ - '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`' ) return _unsloth_train(trainer) pass pass -__all__ = [ - "UnslothTrainingArguments", - "UnslothTrainer", - "unsloth_train", -] - @dataclass class UnslothTrainingArguments(TrainingArguments): @@ -119,3 +131,83 @@ def create_optimizer(self): return self.optimizer pass pass + +# From `trl>=0.13.0`, they changed how to pass several params to the trainer +# We need to patch to make the transition smooth +def create_backwards_compatible_trainer(trainer_class, config_class): + original_init = trainer_class.__init__ + + @wraps(original_init) + def new_init(self, *args, **kwargs): + # All Trainer tokenizer is now called processing_class + if "tokenizer" in kwargs: + kwargs["processing_class"] = kwargs.pop("tokenizer") + + if "args" in kwargs: + training_args = kwargs.pop("args", None) + + # Get parameters that Trainer.__init__ actually expects + trainer_params = set(inspect.signature(original_init).parameters.keys()) + trainer_params.remove('self') + trainer_params.remove('args') + + # Get fields that should be passed to Config init + config_fields = { + field.name: field for field in dataclasses.fields(config_class) + if field.init + } + + # Create config dict with valid fields from training_args + config_dict = { + name: getattr(training_args, name) + for name in config_fields + if hasattr(training_args, name) + } + + # Get parameters that exist in Config but not in TrainingArguments + moved_params = \ + set(inspect.signature(config_class) .parameters.keys()) - \ + set(inspect.signature(TrainingArguments).parameters.keys()) + + # Separate kwargs into trainer kwargs and config kwargs + trainer_kwargs = {} + additional_config_kwargs = {} + + for key, value in kwargs.items(): + if key in trainer_params: trainer_kwargs[key] = value + elif key in moved_params or key in config_fields: + additional_config_kwargs[key] = value + else: + additional_config_kwargs[key] = value + pass + + # Update config_dict with additional kwargs + config_dict.update(additional_config_kwargs) + + # Create Config with all the collected parameters + config = config_class(**config_dict) + + # Reconstruct kwargs for Trainer + kwargs = trainer_kwargs + kwargs["args"] = config + pass + original_init(self, *args, **kwargs) + pass + return new_init + +if Version(trl.__version__) >= Version("0.13.0.dev0"): + # print("Patching TRL Trainer to maintain backward compatibility with the old syntax.") + def _patch_trl_trainer(): + import trl.trainer + trl_classes = dir(trl.trainer) + + non_convertable_trainer = set(["PPOv2", "AlignProp"]) + trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer + trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer + trl_classes = list(trl_trainers & trl_configs) + for x in trl_classes: + exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + pass +else: + def _patch_trl_trainer(): return +pass From 84d6d36cdeb8f6b1fc7b659e208fd8dd7f01d76d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 17:38:26 -0800 Subject: [PATCH 0749/1088] Update __init__.py --- unsloth/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d94eeb8973..745b210208 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -177,5 +177,5 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 from .tokenizer_utils import * from .trainer import * -# patch sft trainer -_patch_trl_trainer() \ No newline at end of file +# Patch TRL trainers for backwards compatibility +_patch_trl_trainer() From a31027c9e4c433f5376eeb07e664120475a821d6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 18:44:54 -0800 Subject: [PATCH 0750/1088] Update trainer.py --- unsloth/trainer.py | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/unsloth/trainer.py b/unsloth/trainer.py index b03cc347c6..896db35ecd 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -139,15 +139,17 @@ def create_backwards_compatible_trainer(trainer_class, config_class): @wraps(original_init) def new_init(self, *args, **kwargs): - # All Trainer tokenizer is now called processing_class - if "tokenizer" in kwargs: + # All Trainer tokenizer are now called processing_class + trainer_params = set(inspect.signature(original_init).parameters.keys()) + + if "processing_class" in trainer_params and "tokenizer" in kwargs: kwargs["processing_class"] = kwargs.pop("tokenizer") + pass - if "args" in kwargs: + if ("args" in kwargs) and (Version(trl.__version__) >= Version("0.13.0.dev0")): training_args = kwargs.pop("args", None) # Get parameters that Trainer.__init__ actually expects - trainer_params = set(inspect.signature(original_init).parameters.keys()) trainer_params.remove('self') trainer_params.remove('args') @@ -179,6 +181,7 @@ def new_init(self, *args, **kwargs): additional_config_kwargs[key] = value else: additional_config_kwargs[key] = value + pass pass # Update config_dict with additional kwargs @@ -194,20 +197,24 @@ def new_init(self, *args, **kwargs): original_init(self, *args, **kwargs) pass return new_init +pass + + +def _patch_trl_trainer(): + if hasattr(trl, "__UNSLOTH_BACKWARDS_COMPATIBLE__"): return + if Version(trl.__version__) <= Version("0.11.0"): return -if Version(trl.__version__) >= Version("0.13.0.dev0"): - # print("Patching TRL Trainer to maintain backward compatibility with the old syntax.") - def _patch_trl_trainer(): - import trl.trainer - trl_classes = dir(trl.trainer) - - non_convertable_trainer = set(["PPOv2", "AlignProp"]) - trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer - trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer - trl_classes = list(trl_trainers & trl_configs) - for x in trl_classes: - exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + import trl.trainer + trl_classes = dir(trl.trainer) + + non_convertable_trainer = set(["PPOv2", "AlignProp"]) + trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer + trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer + trl_classes = list(trl_trainers & trl_configs) + + for x in trl_classes: + exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) pass -else: - def _patch_trl_trainer(): return + + trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ = True pass From 035bccee7560558f0de37066e5d7b7b294eab939 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 18:48:59 -0800 Subject: [PATCH 0751/1088] Update trainer.py --- unsloth/trainer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 896db35ecd..25b8999959 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -201,7 +201,10 @@ def new_init(self, *args, **kwargs): def _patch_trl_trainer(): - if hasattr(trl, "__UNSLOTH_BACKWARDS_COMPATIBLE__"): return + try: + trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ + return + except: pass if Version(trl.__version__) <= Version("0.11.0"): return import trl.trainer From 597169c14bfe28e9484066a80a09a14e47528519 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 18:53:40 -0800 Subject: [PATCH 0752/1088] Update trainer.py --- unsloth/trainer.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 25b8999959..00956ed41b 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -201,10 +201,8 @@ def new_init(self, *args, **kwargs): def _patch_trl_trainer(): - try: - trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ - return - except: pass + import trl + if hasattr(trl, "__UNSLOTH_BACKWARDS_COMPATIBLE__"): return if Version(trl.__version__) <= Version("0.11.0"): return import trl.trainer From 11b350f7c17a6f313915fb71778b40ae35a34ddb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 19:05:15 -0800 Subject: [PATCH 0753/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index ed95e07632..302017d566 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -586,10 +586,10 @@ def load_correct_tokenizer( def _fix_chat_template(chat_template): - endfor = "{% endfor %}" + endfor = "{% endif %}" where = chat_template.find(endfor) if where == -1: - endfor = "{%- endfor %}" + endfor = "{%- endif %}" where = chat_template.find(endfor) if where == -1: return chat_template From 0de54572525788d09a6a9ef1efc7611e65dd7547 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 19:05:40 -0800 Subject: [PATCH 0754/1088] Bug fixes (#1288) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py * Update _utils.py * fix/transformers-unpack (#1180) * Fix DPO, ORPO (#1177) * Fix TRL * Update mistral.py * Patch processing_class * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Installation guide (#1165) * chore: update chat_templates.py (#1166) orginal -> original * Disable Flex Attention * Update tokenizer_utils.py * Update _utils.py * n_items * Update cross_entropy_loss.py * Fix DPO, ORPO * Update _utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Add warning for missing Unpack and KwargsForCausalLM in older Transformers versions --------- Co-authored-by: Daniel Han Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * donot upcast lm_head and embeddings to float32 (#1186) * Cleanup upcast logs (#1188) * Fix/phi-longrope (#1193) * Enhance rotary embedding handling in LlamaAttention and LongRopeRotaryEmbedding * Typo * Improve rotary embedding handling in LlamaAttention to prevent errors with short KV cache * Update llama.py * Update llama.py --------- Co-authored-by: Daniel Han * Update transformers * Unk token issues * Update _utils.py * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py --------- Co-authored-by: timothelaborie <97834767+timothelaborie@users.noreply.github.com> Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana --- README.md | 3 + unsloth/__init__.py | 7 +++ unsloth/chat_templates.py | 97 +++++++++++++++++++++++++++--- unsloth/tokenizer_utils.py | 4 +- unsloth/trainer.py | 120 +++++++++++++++++++++++++++++++++---- 5 files changed, 211 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 0a3c83f3fa..4d68d996f0 100644 --- a/README.md +++ b/README.md @@ -299,6 +299,9 @@ DPO (Direct Preference Optimization), PPO, Reward Modelling all seem to work as We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! ```python +import os +os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Optional set GPU device ID + from unsloth import FastLanguageModel, PatchDPOTrainer from unsloth import is_bfloat16_supported PatchDPOTrainer() diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 5102d8f466..745b210208 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -31,6 +31,10 @@ # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! # Thank you for your understanding and we appreciate it immensely! + +# Fixes https://github.com/unslothai/unsloth/issues/1266 +os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" + if "CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" devices = os.environ["CUDA_VISIBLE_DEVICES"] @@ -172,3 +176,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 from .chat_templates import * from .tokenizer_utils import * from .trainer import * + +# Patch TRL trainers for backwards compatibility +_patch_trl_trainer() diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index b254202c75..da10f7e003 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -39,6 +39,7 @@ train_on_responses_only, ) CHAT_TEMPLATES = {} +DEFAULT_SYSTEM_MESSAGE = {} # =========================================== Unsloth # Unsloth efficient template leverages from Zephyr @@ -48,7 +49,7 @@ "{{ messages[0]['content'] + '\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'You are a helpful assistant to the user\n' }}"\ + "{{ '{system_message}' + '\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -80,6 +81,7 @@ unsloth_eos_token = "eos_token" CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False, unsloth_ollama,) +DEFAULT_SYSTEM_MESSAGE["unsloth"] = "You are a helpful assistant to the user" pass # =========================================== Zephyr @@ -116,6 +118,7 @@ zephyr_eos_token = "eos_token" CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False, zephyr_ollama,) +DEFAULT_SYSTEM_MESSAGE["zephyr"] = None # No system message in Zephyr pass # =========================================== ChatML @@ -153,6 +156,7 @@ chatml_eos_token = "<|im_end|>" CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True, chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["chatml"] = None # No system message in ChatML pass # =========================================== Mistral-1 @@ -193,6 +197,7 @@ mistral_eos_token = "eos_token" CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False, mistral_ollama,) +DEFAULT_SYSTEM_MESSAGE["mistral"] = None # No system message in Mistral pass # =========================================== Llama-2 @@ -234,6 +239,7 @@ llama_eos_token = "eos_token" CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False, llama_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama"] = None # No system message in Llama pass # =========================================== Vicuna @@ -244,7 +250,7 @@ "{{ messages[0]['content'] + ' ' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\'s questions.' + ' ' }}"\ + "{{ '{system_message}' + ' ' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -273,6 +279,7 @@ vicuna_eos_token = "eos_token" CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False, vicuna_ollama,) +DEFAULT_SYSTEM_MESSAGE["vicuna"] = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions." pass # =========================================== Vicuna Old @@ -283,7 +290,7 @@ "{{ messages[0]['content'] + '\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions.' + '\n' }}"\ + "{{ '{system_message}' + '\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -315,6 +322,10 @@ vicuna_old_eos_token = "eos_token" CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False, vicuna_old_ollama,) +DEFAULT_SYSTEM_MESSAGE["vicuna_old"] = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions." + +CHAT_TEMPLATES["vicuna old"] = CHAT_TEMPLATES["vicuna_old"] +DEFAULT_SYSTEM_MESSAGE["vicuna old"] = DEFAULT_SYSTEM_MESSAGE["vicuna_old"] pass # =========================================== Alpaca multi turn @@ -325,7 +336,7 @@ "{{ messages[0]['content'] + '\n\n' }}"\ "{% set loop_messages = messages[1:] %}"\ "{% else %}"\ - "{{ 'Below are some instructions that describe some tasks. Write responses that appropriately complete each request.\n\n' }}"\ + "{{ '{system_message}' + '\n\n' }}"\ "{% set loop_messages = messages %}"\ "{% endif %}"\ "{% for message in loop_messages %}"\ @@ -362,6 +373,7 @@ alpaca_eos_token = "eos_token" CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False, alpaca_ollama,) +DEFAULT_SYSTEM_MESSAGE["alpaca"] = "Below are some instructions that describe some tasks. Write responses that appropriately complete each request." pass # =========================================== Gemma @@ -372,7 +384,7 @@ "{{ bos_token }}"\ "{% if messages[0]['role'] == 'system' %}"\ "{{'user\n' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + '\n'}}"\ - "{% set loop_messages = messages[2:] %}"\ + "{% set messages = messages[2:] %}"\ "{% endif %}"\ "{% for message in messages %}"\ "{% if message['role'] == 'user' %}"\ @@ -407,6 +419,7 @@ gemma_eos_token = "" CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True, gemma_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma"] = None # No system message in Gemma pass # =========================================== Gemma with ChatML instead @@ -437,6 +450,7 @@ "<|im_end|>", ) CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True, gemma_chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma_chatml"] = None # No system message in Gemma pass # =========================================== Gemma 2 @@ -446,12 +460,14 @@ gemma2_ollama = gemma_ollama + "PARAMETER num_ctx 4096\n" gemma2_eos_token = "" CHAT_TEMPLATES["gemma2"] = (gemma2_template, gemma2_eos_token, True, gemma2_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma2"] = None # No system message in Gemma 2 # =========================================== Gemma 2 with ChatML instead gemma2_chatml_template = gemma_chatml_template gemma2_chatml_ollama = gemma_chatml_ollama + "PARAMETER num_ctx 4096\n" gemma2_chatml_eos_token = gemma_chatml_eos_token CHAT_TEMPLATES["gemma2_chatml"] = (gemma2_chatml_template, gemma2_chatml_eos_token, True, gemma2_chatml_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma2_chatml"] = None # No system message in Gemma 2 pass # =========================================== Llama-3 @@ -491,7 +507,12 @@ ''' llama3_template_eos_token = "eos_token" + CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-3"] = None # No system message in Llama-3 + +CHAT_TEMPLATES["llama3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama3"] = None # No system message in Llama-3 pass @@ -532,8 +553,13 @@ phi3_template_eos_token = "<|end|>" CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,) +DEFAULT_SYSTEM_MESSAGE["phi-3"] = None # No system message in Phi-3 + CHAT_TEMPLATES["phi-35"] = CHAT_TEMPLATES["phi-3"] +DEFAULT_SYSTEM_MESSAGE["phi-35"] = None # No system message in Phi-3.5 + CHAT_TEMPLATES["phi-3.5"] = CHAT_TEMPLATES["phi-3"] +DEFAULT_SYSTEM_MESSAGE["phi-3.5"] = None # No system message in Phi-3.5 pass # =========================================== Llama-3.1 @@ -573,7 +599,7 @@ {%- set system_message = messages[0]['content'] %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "" %} + {%- set system_message = "{system_message}" %} {%- endif %} {#- System message + builtin tools #} @@ -729,7 +755,10 @@ llama31_template_eos_token = "eos_token" CHAT_TEMPLATES["llama-3.1"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-3.1"] = "" # Llama3.1 default system message is empty + the dates + CHAT_TEMPLATES["llama-31"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) +DEFAULT_SYSTEM_MESSAGE["llama-31"] = "" # Llama3.1 default system message is empty + the dates pass @@ -751,7 +780,7 @@ {%- if messages[0][\'role\'] == \'system\' %} {{- \'<|im_start|>system\\n\' + messages[0][\'content\'] + \'<|im_end|>\\n\' }} {%- else %} - {{- \'<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n\' }} + {{- \'<|im_start|>system\\n{system_message}<|im_end|>\\n\' }} {%- endif %}\n{%- endif %}\n{%- for message in messages %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} {{- \'<|im_start|>\' + message.role + \'\\n\' + message.content + \'<|im_end|>\' + \'\\n\' }} @@ -847,10 +876,53 @@ ''' qwen25_template_eos_token = "eos_token" +qwen25_default_system_message = "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." CHAT_TEMPLATES["qwen-2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen-2.5"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen-25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen-25"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen25"] = qwen25_default_system_message # No system message in Qwen 2.5 + CHAT_TEMPLATES["qwen2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,) +DEFAULT_SYSTEM_MESSAGE["qwen2.5"] = qwen25_default_system_message # No system message in Qwen 2.5 +pass + +def _change_system_message(template: str, type_chat_template: str, system_message: str = None): + system_message_pattern = r"\{system_message\}" + + # For predefined templates, check if default system message exists + default_system_message = DEFAULT_SYSTEM_MESSAGE.get(f"{type_chat_template}", None) + if default_system_message is None: + if system_message is not None: + logger.warning_once( + f"Unsloth: You tried to change the system message for {type_chat_template}, " + "but it doesn't have a default system message. " + "You need to manually add the system message in your data." + ) + return template, system_message + pass + + # For custom templates + if type_chat_template is None: + has_placeholder = re.search(system_message_pattern, template) is not None + + if has_placeholder: + if system_message is None: + raise ValueError("Unsloth: You need to provide a system message for custom templates.") + new_template = re.sub(system_message_pattern, system_message, template) + return new_template, system_message + + return template, system_message + pass + + # For predefined templates with default system message + message_to_use = system_message if system_message is not None else default_system_message + new_template = re.sub(system_message_pattern, message_to_use, template) + + return new_template, message_to_use pass @@ -886,14 +958,20 @@ def get_chat_template( old_padding_side = tokenizer.padding_side same_padding_token = False - + type_chat_template = None + if type(chat_template) in (list, tuple,): + # For changing system message later + # Since it's not supported yet, we will raise an error first! + type_chat_template = chat_template[0].lower() chat_template, stop_word = chat_template assert(type(chat_template) is str) assert(type(stop_word) is str) ollama_modelfile = None elif type(chat_template) is str: + # For changing system message later + type_chat_template = chat_template.lower() chat_template, stop_word, yes_map_eos_token, ollama_modelfile = CHAT_TEMPLATES[chat_template] @@ -1052,6 +1130,9 @@ def get_chat_template( else: chat_template = new_chat_template pass + + chat_template, system_message = _change_system_message(chat_template, type_chat_template, system_message) + tokenizer.chat_template = chat_template # Also fix up other tokens diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index ed95e07632..302017d566 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -586,10 +586,10 @@ def load_correct_tokenizer( def _fix_chat_template(chat_template): - endfor = "{% endfor %}" + endfor = "{% endif %}" where = chat_template.find(endfor) if where == -1: - endfor = "{%- endfor %}" + endfor = "{%- endif %}" where = chat_template.find(endfor) if where == -1: return chat_template diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 25bb434023..00956ed41b 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings from dataclasses import dataclass, field from typing import Optional +from functools import wraps +import trl +import inspect from trl import SFTTrainer try: from trl import SFTConfig as TrainingArguments @@ -24,30 +28,38 @@ from . import is_bfloat16_supported from unsloth_zoo.training_utils import unsloth_train as _unsloth_train from packaging.version import Version +import dataclasses + +__all__ = [ + "UnslothTrainingArguments", + "UnslothTrainer", + "unsloth_train", + "_patch_trl_trainer", +] # Unsloth gradient accumulation fix: from transformers import __version__ as transformers_version if Version(transformers_version) > Version("4.45.2"): - def unsloth_train(trainer): - return trainer.train() + def unsloth_train(trainer, *args, **kwargs): + return trainer.train(*args, **kwargs) pass else: - def unsloth_train(trainer): + def unsloth_train(trainer, *args, **kwargs): + if len(args) != 0 or len(kwargs) != 0: + raise RuntimeError( + "Unsloth: Our custom gradient accumulation fixed trainer does not support other arguments.\n"\ + "If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"\ + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`' + ) print( "Unsloth: Using our custom gradient accumulation fixed trainer, which is not feature complete.\n"\ "If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"\ - '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git"`' + '`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`' ) return _unsloth_train(trainer) pass pass -__all__ = [ - "UnslothTrainingArguments", - "UnslothTrainer", - "unsloth_train", -] - @dataclass class UnslothTrainingArguments(TrainingArguments): @@ -119,3 +131,91 @@ def create_optimizer(self): return self.optimizer pass pass + +# From `trl>=0.13.0`, they changed how to pass several params to the trainer +# We need to patch to make the transition smooth +def create_backwards_compatible_trainer(trainer_class, config_class): + original_init = trainer_class.__init__ + + @wraps(original_init) + def new_init(self, *args, **kwargs): + # All Trainer tokenizer are now called processing_class + trainer_params = set(inspect.signature(original_init).parameters.keys()) + + if "processing_class" in trainer_params and "tokenizer" in kwargs: + kwargs["processing_class"] = kwargs.pop("tokenizer") + pass + + if ("args" in kwargs) and (Version(trl.__version__) >= Version("0.13.0.dev0")): + training_args = kwargs.pop("args", None) + + # Get parameters that Trainer.__init__ actually expects + trainer_params.remove('self') + trainer_params.remove('args') + + # Get fields that should be passed to Config init + config_fields = { + field.name: field for field in dataclasses.fields(config_class) + if field.init + } + + # Create config dict with valid fields from training_args + config_dict = { + name: getattr(training_args, name) + for name in config_fields + if hasattr(training_args, name) + } + + # Get parameters that exist in Config but not in TrainingArguments + moved_params = \ + set(inspect.signature(config_class) .parameters.keys()) - \ + set(inspect.signature(TrainingArguments).parameters.keys()) + + # Separate kwargs into trainer kwargs and config kwargs + trainer_kwargs = {} + additional_config_kwargs = {} + + for key, value in kwargs.items(): + if key in trainer_params: trainer_kwargs[key] = value + elif key in moved_params or key in config_fields: + additional_config_kwargs[key] = value + else: + additional_config_kwargs[key] = value + pass + pass + + # Update config_dict with additional kwargs + config_dict.update(additional_config_kwargs) + + # Create Config with all the collected parameters + config = config_class(**config_dict) + + # Reconstruct kwargs for Trainer + kwargs = trainer_kwargs + kwargs["args"] = config + pass + original_init(self, *args, **kwargs) + pass + return new_init +pass + + +def _patch_trl_trainer(): + import trl + if hasattr(trl, "__UNSLOTH_BACKWARDS_COMPATIBLE__"): return + if Version(trl.__version__) <= Version("0.11.0"): return + + import trl.trainer + trl_classes = dir(trl.trainer) + + non_convertable_trainer = set(["PPOv2", "AlignProp"]) + trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer + trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer + trl_classes = list(trl_trainers & trl_configs) + + for x in trl_classes: + exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + pass + + trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ = True +pass From f26d4e739ed507de7a9088da53d10fd02f58d160 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 13 Nov 2024 19:07:26 -0800 Subject: [PATCH 0755/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5d6f7921a6..daa81d97ac 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.6" +__version__ = "2024.11.7" __all__ = [ "prepare_model_for_kbit_training", From 3b11ae7dd2e352775177835b12024a7fe7cf447b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 14 Nov 2024 01:11:16 -0800 Subject: [PATCH 0756/1088] Update llama.py --- unsloth/models/llama.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 47a57024a2..f4609b81e9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -738,6 +738,10 @@ def LlamaModel_fast_forward( past_key_values_length, sliding_window = None, ) + # Fixes https://github.com/unslothai/unsloth/issues/853 + # Unsloth needs a 2D mask, not a [2, 1, n, n] mask! + if self.SWA_mask.dim() == 4: self.SWA_mask = self.SWA_mask[0][0] + if self. GA_mask.dim() == 4: self. GA_mask = self. GA_mask[0][0] elif not hasattr(self, "SWA_mask"): if HAS_FLEX_ATTENTION: # Use Flex Attention instead! From 5eb971fa4ed7070d08ef854df4e4f213adab7b83 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 14 Nov 2024 01:26:13 -0800 Subject: [PATCH 0757/1088] Fix #853 --- unsloth/models/llama.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f4609b81e9..afff9ad9eb 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -719,29 +719,33 @@ def LlamaModel_fast_forward( pass # Gemma2 has alternating SWA and global attn + use_static_mask = True + dynamic_SWA_mask = None + dynamic_GA_mask = None if IS_GEMMA2: if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: self.SWA_mask = True self.GA_mask = False elif attention_mask is not None: - self.SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + + # Fixes https://github.com/unslothai/unsloth/issues/853 + # Unsloth needs a 2D mask, not a [2, 1, n, n] mask! + dynamic_SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window = self.config.sliding_window, - ) - self.GA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + )[0][0] + dynamic_GA_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window = None, - ) - # Fixes https://github.com/unslothai/unsloth/issues/853 - # Unsloth needs a 2D mask, not a [2, 1, n, n] mask! - if self.SWA_mask.dim() == 4: self.SWA_mask = self.SWA_mask[0][0] - if self. GA_mask.dim() == 4: self. GA_mask = self. GA_mask[0][0] + )[0][0] + use_static_mask = False + elif not hasattr(self, "SWA_mask"): if HAS_FLEX_ATTENTION: # Use Flex Attention instead! @@ -776,7 +780,12 @@ def LlamaModel_fast_forward( past_key_value = past_key_values[idx] if past_key_values is not None else None mask = causal_mask - if IS_GEMMA2: mask = self.SWA_mask if (idx % 2 == 0) else self.GA_mask + if IS_GEMMA2: + if (idx % 2 == 0): + mask = self.SWA_mask if use_static_mask else dynamic_SWA_mask + else: + mask = self. GA_mask if use_static_mask else dynamic_GA_mask + pass if offloaded_gradient_checkpointing: hidden_states = Unsloth_Offloaded_Gradient_Checkpointer.apply( From a146521a50011f32d9366ab1a93e2d5d642b2a84 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Fri, 15 Nov 2024 05:07:29 +0400 Subject: [PATCH 0758/1088] fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han --- unsloth/trainer.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 00956ed41b..14fd1631df 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -20,11 +20,6 @@ import trl import inspect from trl import SFTTrainer -try: - from trl import SFTConfig as TrainingArguments -except: - from transformers import TrainingArguments -pass from . import is_bfloat16_supported from unsloth_zoo.training_utils import unsloth_train as _unsloth_train from packaging.version import Version @@ -60,7 +55,11 @@ def unsloth_train(trainer, *args, **kwargs): pass pass - +try: + from trl import SFTConfig as TrainingArguments +except: + from transformers import TrainingArguments +pass @dataclass class UnslothTrainingArguments(TrainingArguments): embedding_learning_rate : Optional[float] = field( @@ -134,7 +133,7 @@ def create_optimizer(self): # From `trl>=0.13.0`, they changed how to pass several params to the trainer # We need to patch to make the transition smooth -def create_backwards_compatible_trainer(trainer_class, config_class): +def _backwards_compatible_trainer(trainer_class, config_class): original_init = trainer_class.__init__ @wraps(original_init) @@ -167,6 +166,7 @@ def new_init(self, *args, **kwargs): } # Get parameters that exist in Config but not in TrainingArguments + from transformers import TrainingArguments moved_params = \ set(inspect.signature(config_class) .parameters.keys()) - \ set(inspect.signature(TrainingArguments).parameters.keys()) @@ -207,14 +207,13 @@ def _patch_trl_trainer(): import trl.trainer trl_classes = dir(trl.trainer) - - non_convertable_trainer = set(["PPOv2", "AlignProp"]) - trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer - trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer + trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) + trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) trl_classes = list(trl_trainers & trl_configs) for x in trl_classes: - exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + try: exec(f"trl.{x}Trainer.__init__ = _backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + except: continue pass trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ = True From 74382dea479aecb830d553da8c2edef1f66f5b3b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 12:18:47 -0800 Subject: [PATCH 0759/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4b22f8c3e5..3b54604a0b 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -57,6 +57,7 @@ def _rms_layernorm_forward( @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, + dX, dX_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, @@ -78,6 +79,9 @@ def _rms_layernorm_backward( X += row_idx * X_row_stride r += row_idx * r_row_stride + if GEMMA: dX += row_idx * dY_row_stride + else: dX = dY + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) @@ -91,7 +95,7 @@ def _rms_layernorm_backward( rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) - tl.store(dY + col_offsets, output, mask = mask) + tl.store(dX + col_offsets, output, mask = mask) pass @@ -172,9 +176,11 @@ def backward(ctx, dY : torch.Tensor): n_cols : int n_rows, n_cols = dY.shape # dW = X + dX = torch.empty_like(dY, device = "cuda:0") if ctx.GEMMA else dY _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), + dX, dX.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), From a6b8dda9941951e522059b7d2696fd31eb35a35d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 13:01:54 -0800 Subject: [PATCH 0760/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 3b54604a0b..4b22f8c3e5 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -57,7 +57,6 @@ def _rms_layernorm_forward( @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, - dX, dX_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, @@ -79,9 +78,6 @@ def _rms_layernorm_backward( X += row_idx * X_row_stride r += row_idx * r_row_stride - if GEMMA: dX += row_idx * dY_row_stride - else: dX = dY - dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) @@ -95,7 +91,7 @@ def _rms_layernorm_backward( rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) - tl.store(dX + col_offsets, output, mask = mask) + tl.store(dY + col_offsets, output, mask = mask) pass @@ -176,11 +172,9 @@ def backward(ctx, dY : torch.Tensor): n_cols : int n_rows, n_cols = dY.shape # dW = X - dX = torch.empty_like(dY, device = "cuda:0") if ctx.GEMMA else dY _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), - dX, dX.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), From 82e44662d92957077933f16b87ace0ddd33fca86 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 13:55:11 -0800 Subject: [PATCH 0761/1088] Gemma --- unsloth/kernels/rms_layernorm.py | 8 +++++++- unsloth/models/gemma2.py | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4b22f8c3e5..3b54604a0b 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -57,6 +57,7 @@ def _rms_layernorm_forward( @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, + dX, dX_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, @@ -78,6 +79,9 @@ def _rms_layernorm_backward( X += row_idx * X_row_stride r += row_idx * r_row_stride + if GEMMA: dX += row_idx * dY_row_stride + else: dX = dY + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) @@ -91,7 +95,7 @@ def _rms_layernorm_backward( rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) - tl.store(dY + col_offsets, output, mask = mask) + tl.store(dX + col_offsets, output, mask = mask) pass @@ -172,9 +176,11 @@ def backward(ctx, dY : torch.Tensor): n_cols : int n_rows, n_cols = dY.shape # dW = X + dX = torch.empty_like(dY, device = "cuda:0") if ctx.GEMMA else dY _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), + dX, dX.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 4eb9d64313..872824b11a 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -207,7 +207,7 @@ def Gemma2DecoderLayer_fast_forward( hidden_states += residual else: residual = hidden_states - hidden_states = fast_rms_layernorm_gemma2_compiled(self.input_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states, gemma = True) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, causal_mask=causal_mask, @@ -218,14 +218,14 @@ def Gemma2DecoderLayer_fast_forward( use_cache=use_cache, padding_mask=padding_mask, ) - hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_attention_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states, gemma = True) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm_gemma2_compiled(self. pre_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self. pre_feedforward_layernorm, hidden_states, gemma = True) hidden_states = self.mlp(hidden_states) - hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.post_feedforward_layernorm, hidden_states, gemma = True) hidden_states = residual + hidden_states pass From 50b0aba3d06339183d80a12b95cdeec8142e7e90 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 15:00:28 -0800 Subject: [PATCH 0762/1088] Update rms_layernorm.py --- unsloth/kernels/rms_layernorm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 3b54604a0b..b74d636c63 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -190,7 +190,7 @@ def backward(ctx, dY : torch.Tensor): BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) - dX = dY.view(*shape) + dX = dX.view(*shape) return dX, None, None, None pass pass From 9773fee4b8e899138dc7b56ea98d2a24676e8527 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 15:18:38 -0800 Subject: [PATCH 0763/1088] Update gemma2.py --- unsloth/models/gemma2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 872824b11a..62ecb9690f 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -60,8 +60,7 @@ from flash_attn import flash_attn_func # [TODO] We must randomnly use torch.compile? -# I checked the gradients and formulas and I'm sure it's correct. -# I'm stumped :( +# Gemma 2 uses double RMS Layernorms, so the backward passes should not overwrite the gradients! @torch.compile(fullgraph = False, dynamic = True, options = torch_compile_options) def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): old_dtype = X.dtype From 1a3d2d585929bf1a85c44a1abded112fdbb6a94a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 16 Nov 2024 23:53:46 -0800 Subject: [PATCH 0764/1088] Cut Cross Entropy --- unsloth/__init__.py | 2 +- unsloth/models/llama.py | 122 +++++++++++++++++++++++----------------- 2 files changed, 70 insertions(+), 54 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 745b210208..7e5e7192ff 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -55,7 +55,7 @@ pass # Reduce VRAM usage by reducing fragmentation -os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,roundup_power2_divisions:[64:64,256:32,>:16]" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index afff9ad9eb..cf1b5e4b9a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -917,6 +917,9 @@ def LlamaModel_fast_forward_inference( pass +from cut_cross_entropy import linear_cross_entropy + + def CausalLM_fast_forward(fast_forward_inference): def _CausalLM_fast_forward( self, @@ -968,62 +971,75 @@ def _CausalLM_fast_forward( ) pass hidden_states = outputs[0] - bsz, q_len, hd = hidden_states.shape - lm_head = self.lm_head.weight - if bsz == 1 and q_len == 1: - logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) - logits = logits.unsqueeze(0).unsqueeze(0) - elif num_logits_to_keep != 0: - logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) - else: - logits = self.lm_head(hidden_states.to(lm_head.dtype)) - pass - torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) - if torch_dtype is not None: - logits = logits.to(torch_dtype) - else: - raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") - pass + loss = linear_cross_entropy( + hidden_states, + self.lm_head.weight, + targets = labels, + ignore_index = -100, + softcap = None, + reduction = "sum", + shift = True, + filter_eps = "auto", + ) / kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + logits = None + + # bsz, q_len, hd = hidden_states.shape + # lm_head = self.lm_head.weight + # if bsz == 1 and q_len == 1: + # logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) + # logits = logits.unsqueeze(0).unsqueeze(0) + # elif num_logits_to_keep != 0: + # logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) + # else: + # logits = self.lm_head(hidden_states.to(lm_head.dtype)) + # pass - loss = None - logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - logit_scaling = getattr(self.config, "logit_scale", 0) - if labels is not None: - shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - pass + # torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) + # if torch_dtype is not None: + # logits = logits.to(torch_dtype) + # else: + # raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") + # pass - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - loss = fast_cross_entropy_loss( - logits = shift_logits, - labels = shift_labels, - logit_softcapping = logit_softcapping, - logit_scaling = logit_scaling, - n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), - ) - else: - if logit_scaling != 0: - if logits.requires_grad: - logits = logit_scaling * logits - else: - logits *= logit_scaling - pass - pass - if logit_softcapping != 0: - if logits.requires_grad: - logits = (1.0 / logit_softcapping) * logits - logits = torch.tanh(logits) - logits = logit_softcapping * logits - else: - logits *= (1.0 / logit_softcapping) - torch.tanh(logits, out = logits) - logits *= logit_softcapping - pass - pass - pass + # loss = None + # logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + # logit_scaling = getattr(self.config, "logit_scale", 0) + # if labels is not None: + # shift_logits = logits + # if not hasattr(self, "extra_ignored_labels"): + # # Fixes https://github.com/unslothai/unsloth/issues/10 + # self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + # pass + + # shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + # loss = fast_cross_entropy_loss( + # logits = shift_logits, + # labels = shift_labels, + # logit_softcapping = logit_softcapping, + # logit_scaling = logit_scaling, + # n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), + # ) + # else: + # if logit_scaling != 0: + # if logits.requires_grad: + # logits = logit_scaling * logits + # else: + # logits *= logit_scaling + # pass + # pass + # if logit_softcapping != 0: + # if logits.requires_grad: + # logits = (1.0 / logit_softcapping) * logits + # logits = torch.tanh(logits) + # logits = logit_softcapping * logits + # else: + # logits *= (1.0 / logit_softcapping) + # torch.tanh(logits, out = logits) + # logits *= logit_softcapping + # pass + # pass + # pass if not return_dict: output = (logits,) + outputs[1:] From 4f51d87949050d2c9330480b5ccd3248a2679e05 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 00:21:44 -0800 Subject: [PATCH 0765/1088] Update llama.py --- unsloth/models/llama.py | 136 ++++++++++++++++++++-------------------- 1 file changed, 67 insertions(+), 69 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cf1b5e4b9a..cfe41558e1 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -917,9 +917,6 @@ def LlamaModel_fast_forward_inference( pass -from cut_cross_entropy import linear_cross_entropy - - def CausalLM_fast_forward(fast_forward_inference): def _CausalLM_fast_forward( self, @@ -972,74 +969,75 @@ def _CausalLM_fast_forward( pass hidden_states = outputs[0] - loss = linear_cross_entropy( - hidden_states, - self.lm_head.weight, - targets = labels, - ignore_index = -100, - softcap = None, - reduction = "sum", - shift = True, - filter_eps = "auto", - ) / kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) - logits = None - - # bsz, q_len, hd = hidden_states.shape - # lm_head = self.lm_head.weight - # if bsz == 1 and q_len == 1: - # logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) - # logits = logits.unsqueeze(0).unsqueeze(0) - # elif num_logits_to_keep != 0: - # logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) - # else: - # logits = self.lm_head(hidden_states.to(lm_head.dtype)) - # pass + # from cut_cross_entropy import linear_cross_entropy + # loss = linear_cross_entropy( + # hidden_states, + # self.lm_head.weight, + # targets = labels, + # ignore_index = -100, + # softcap = None, + # reduction = "sum", + # shift = True, + # filter_eps = "auto", + # ) / kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + # logits = None + + bsz, q_len, hd = hidden_states.shape + lm_head = self.lm_head.weight + if bsz == 1 and q_len == 1: + logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) + logits = logits.unsqueeze(0).unsqueeze(0) + elif num_logits_to_keep != 0: + logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) + else: + logits = self.lm_head(hidden_states.to(lm_head.dtype)) + pass - # torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) - # if torch_dtype is not None: - # logits = logits.to(torch_dtype) - # else: - # raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") - # pass + torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) + if torch_dtype is not None: + logits = logits.to(torch_dtype) + else: + raise TypeError("Unsloth: torch_dtype for models is not bfloat16, float16 or float32!") + pass - # loss = None - # logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - # logit_scaling = getattr(self.config, "logit_scale", 0) - # if labels is not None: - # shift_logits = logits - # if not hasattr(self, "extra_ignored_labels"): - # # Fixes https://github.com/unslothai/unsloth/issues/10 - # self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - # pass - - # shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - # loss = fast_cross_entropy_loss( - # logits = shift_logits, - # labels = shift_labels, - # logit_softcapping = logit_softcapping, - # logit_scaling = logit_scaling, - # n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), - # ) - # else: - # if logit_scaling != 0: - # if logits.requires_grad: - # logits = logit_scaling * logits - # else: - # logits *= logit_scaling - # pass - # pass - # if logit_softcapping != 0: - # if logits.requires_grad: - # logits = (1.0 / logit_softcapping) * logits - # logits = torch.tanh(logits) - # logits = logit_softcapping * logits - # else: - # logits *= (1.0 / logit_softcapping) - # torch.tanh(logits, out = logits) - # logits *= logit_softcapping - # pass - # pass - # pass + loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + logit_softcapping = logit_softcapping, + logit_scaling = logit_scaling, + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None), + ) + else: + if logit_scaling != 0: + if logits.requires_grad: + logits = logit_scaling * logits + else: + logits *= logit_scaling + pass + pass + if logit_softcapping != 0: + if logits.requires_grad: + logits = (1.0 / logit_softcapping) * logits + logits = torch.tanh(logits) + logits = logit_softcapping * logits + else: + logits *= (1.0 / logit_softcapping) + torch.tanh(logits, out = logits) + logits *= logit_softcapping + pass + pass + pass if not return_dict: output = (logits,) + outputs[1:] From b18edb9733640c0c1b4a7e5f8911d8c1d5b25c39 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 14:32:41 -0800 Subject: [PATCH 0766/1088] Cut Cross Entropy --- unsloth/models/_utils.py | 7 +++++++ unsloth/models/llama.py | 39 ++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index daa81d97ac..adcab5e980 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -52,6 +52,9 @@ "unpatch_unsloth_gradient_checkpointing", "patch_gradient_checkpointing", "unpatch_gradient_checkpointing", + + "HAS_CUT_CROSS_ENTROPY", + "fused_linear_cross_entropy", ] import torch @@ -82,6 +85,10 @@ patch_gradient_checkpointing, unpatch_gradient_checkpointing, ) +from unsloth_zoo.loss_utils import ( + HAS_CUT_CROSS_ENTROPY, + fused_linear_cross_entropy, +) # ============================================= # Disable some warnings which can get annoying diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cfe41558e1..0256fc1830 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -969,27 +969,38 @@ def _CausalLM_fast_forward( pass hidden_states = outputs[0] - # from cut_cross_entropy import linear_cross_entropy - # loss = linear_cross_entropy( - # hidden_states, - # self.lm_head.weight, - # targets = labels, - # ignore_index = -100, - # softcap = None, - # reduction = "sum", - # shift = True, - # filter_eps = "auto", - # ) / kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) - # logits = None - bsz, q_len, hd = hidden_states.shape lm_head = self.lm_head.weight + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: + if HAS_CUT_CROSS_ENTROPY and labels is not None: + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + loss = fused_linear_cross_entropy( + hidden_states = hidden_states, + lm_weight = lm_head, + labels = labels, + num_items_in_batch = n_items, + logit_softcapping = logit_softcapping, + ) + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=None, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass @@ -1001,8 +1012,6 @@ def _CausalLM_fast_forward( pass loss = None - logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - logit_scaling = getattr(self.config, "logit_scale", 0) if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): From 0a5c519c8eef98d1b3e441702796c6a2082b2792 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 15:54:12 -0800 Subject: [PATCH 0767/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0256fc1830..7f3be1c279 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -980,6 +980,7 @@ def _CausalLM_fast_forward( elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: + print(HAS_CUT_CROSS_ENTROPY, labels) if HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( From 59caca96e9733e6d13162f03b00da37812ffbea3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 15:58:00 -0800 Subject: [PATCH 0768/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7f3be1c279..06bbcb2057 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -980,9 +980,9 @@ def _CausalLM_fast_forward( elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: - print(HAS_CUT_CROSS_ENTROPY, labels) if HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + print(HAS_CUT_CROSS_ENTROPY, labels) loss = fused_linear_cross_entropy( hidden_states = hidden_states, lm_weight = lm_head, From 49df51f11d151b7befacccb01483932de673e647 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 16:00:49 -0800 Subject: [PATCH 0769/1088] Update llama.py --- unsloth/models/llama.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 06bbcb2057..0256fc1830 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -982,7 +982,6 @@ def _CausalLM_fast_forward( else: if HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) - print(HAS_CUT_CROSS_ENTROPY, labels) loss = fused_linear_cross_entropy( hidden_states = hidden_states, lm_weight = lm_head, From cc314c8d48683a8dbb410a16040ed6249ce0061e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 16:01:06 -0800 Subject: [PATCH 0770/1088] Update __init__.py --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 7e5e7192ff..745b210208 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -55,7 +55,7 @@ pass # Reduce VRAM usage by reducing fragmentation -os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,roundup_power2_divisions:[64:64,256:32,>:16]" +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: From 42a76f167351caf1657296b175b3087ccce83c3c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 16:13:13 -0800 Subject: [PATCH 0771/1088] Update __init__.py --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 745b210208..980425e1f1 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -55,7 +55,7 @@ pass # Reduce VRAM usage by reducing fragmentation -os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,roundup_power2_divisions:[64:128,256:64,>:32]" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: From 4ed6ae8726aa8545e251447bb9c0024335e3a7ae Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 16:54:44 -0800 Subject: [PATCH 0772/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index adcab5e980..e07f5ac2ce 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -55,6 +55,10 @@ "HAS_CUT_CROSS_ENTROPY", "fused_linear_cross_entropy", + "patch_unsloth_smart_gradient_checkpointing", + "unpatch_unsloth_smart_gradient_checkpointing", + + "patch_compiled_autograd", ] import torch @@ -73,6 +77,7 @@ patch_layernorm, patch_torch_compile, patch_model_and_tokenizer, + patch_compiled_autograd, ) from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, @@ -84,6 +89,9 @@ unsloth_gradient_checkpoint, patch_gradient_checkpointing, unpatch_gradient_checkpointing, + + patch_unsloth_smart_gradient_checkpointing, + unpatch_unsloth_smart_gradient_checkpointing, ) from unsloth_zoo.loss_utils import ( HAS_CUT_CROSS_ENTROPY, From 2fade2702960ac60cab82723db30f6168aed1be4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 16:56:26 -0800 Subject: [PATCH 0773/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e07f5ac2ce..629cee1c7e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -59,6 +59,7 @@ "unpatch_unsloth_smart_gradient_checkpointing", "patch_compiled_autograd", + "process_vision_info", ] import torch @@ -97,6 +98,9 @@ HAS_CUT_CROSS_ENTROPY, fused_linear_cross_entropy, ) +from unsloth_zoo.vision_utils import ( + process_vision_info, +) # ============================================= # Disable some warnings which can get annoying From 07ee0da423626a88450369cc496157101a9590a7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 17:12:34 -0800 Subject: [PATCH 0774/1088] Update _utils.py --- unsloth/models/_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 629cee1c7e..7243d34c3a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -57,6 +57,7 @@ "fused_linear_cross_entropy", "patch_unsloth_smart_gradient_checkpointing", "unpatch_unsloth_smart_gradient_checkpointing", + "create_gradient_checkpointing_buffer", "patch_compiled_autograd", "process_vision_info", @@ -93,6 +94,7 @@ patch_unsloth_smart_gradient_checkpointing, unpatch_unsloth_smart_gradient_checkpointing, + create_gradient_checkpointing_buffer, ) from unsloth_zoo.loss_utils import ( HAS_CUT_CROSS_ENTROPY, From 8eae7f94b35053220247f2509095b959eb9069fb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 20:16:33 -0800 Subject: [PATCH 0775/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7243d34c3a..599f5a202f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -61,6 +61,7 @@ "patch_compiled_autograd", "process_vision_info", + "unsloth_compile_transformers", ] import torch @@ -103,6 +104,9 @@ from unsloth_zoo.vision_utils import ( process_vision_info, ) +from unsloth_zoo.compiler import ( + unsloth_compile_transformers, +) # ============================================= # Disable some warnings which can get annoying From 6ab1d3af1b00917265db69247b91be24990310ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 20:24:10 -0800 Subject: [PATCH 0776/1088] Update _utils.py --- unsloth/models/_utils.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 599f5a202f..e709c8263c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -105,7 +105,8 @@ process_vision_info, ) from unsloth_zoo.compiler import ( - unsloth_compile_transformers, + get_transformers_model_type, + unsloth_compile_transformers as _unsloth_compile_transformers, ) # ============================================= @@ -1072,3 +1073,40 @@ def patch_tokenizer(model, tokenizer): model.config.update({"unsloth_version" : __version__}) return model, tokenizer pass + + +def unsloth_compile_transformers( + model_name, + token = None, + revision = None, + trust_remote_code : bool = False, + sdpa_causal_only : bool = False, + sdap_bool_masks : bool = True, + sdpa_gqa_replace : bool = True, + sdpa_disable_compile : bool = True, + remove_causal_masks : bool = True, + import_from_cache : bool = False, + compile_functions : bool = True, + fuse_lm_head : bool = True, +): + model_types = get_transformers_model_type( + model_name = model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + for model_type in model_types: + _unsloth_compile_transformers( + model_type = model_type, + sdpa_causal_only = sdpa_causal_only, + sdap_bool_masks = sdap_bool_masks, + sdpa_gqa_replace = sdpa_gqa_replace, + sdpa_disable_compile = sdpa_disable_compile, + remove_causal_masks = remove_causal_masks, + import_from_cache = import_from_cache, + compile_functions = compile_functions, + fuse_lm_head = fuse_lm_head, + ) + pass + return +pass From d5c1c171c72924b2ca1ced54714b9404c9f5ebb6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 21:31:17 -0800 Subject: [PATCH 0777/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e709c8263c..d559232d1a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1084,7 +1084,7 @@ def unsloth_compile_transformers( sdap_bool_masks : bool = True, sdpa_gqa_replace : bool = True, sdpa_disable_compile : bool = True, - remove_causal_masks : bool = True, + disable_causal_masks : bool = True, import_from_cache : bool = False, compile_functions : bool = True, fuse_lm_head : bool = True, @@ -1102,7 +1102,7 @@ def unsloth_compile_transformers( sdap_bool_masks = sdap_bool_masks, sdpa_gqa_replace = sdpa_gqa_replace, sdpa_disable_compile = sdpa_disable_compile, - remove_causal_masks = remove_causal_masks, + disable_causal_masks = disable_causal_masks, import_from_cache = import_from_cache, compile_functions = compile_functions, fuse_lm_head = fuse_lm_head, From 4abf3deed404cd97bf10ec3c5c53651b19d70372 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 22:01:07 -0800 Subject: [PATCH 0778/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d559232d1a..1d22b4afb0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1096,6 +1096,7 @@ def unsloth_compile_transformers( trust_remote_code = trust_remote_code, ) for model_type in model_types: + print("Unsloth: Automatic compiler will now patch {model_type}") _unsloth_compile_transformers( model_type = model_type, sdpa_causal_only = sdpa_causal_only, From b144ff47bf5c0a0340b4e536dcd3bdb88e89a221 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 17 Nov 2024 22:01:20 -0800 Subject: [PATCH 0779/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1d22b4afb0..4a14f9a57f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1095,8 +1095,8 @@ def unsloth_compile_transformers( revision = revision, trust_remote_code = trust_remote_code, ) + print(f"Unsloth: Automatic compiler will now patch {model_types}") for model_type in model_types: - print("Unsloth: Automatic compiler will now patch {model_type}") _unsloth_compile_transformers( model_type = model_type, sdpa_causal_only = sdpa_causal_only, From b9b7a5bf6606e0392e5b8cb093cb7167c49fbf35 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 18 Nov 2024 12:18:34 -0800 Subject: [PATCH 0780/1088] Update mapper.py --- unsloth/models/mapper.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index d4f1278e1d..627aab2054 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -409,12 +409,12 @@ "Qwen/Qwen2.5-Coder-32B", ), "unsloth/Qwen2.5-Coder-0.5B-Instruct-bnb-4bit" : ( - "unsloth/Qwen2.5-Coder-Instruct-0.5B", - "Qwen/Qwen2.5-Coder-Instruct-0.5B", + "unsloth/Qwen2.5-Coder-0.5B-Instruct", + "Qwen/Qwen2.5-Coder-0.5B-Instruct", ), "unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : ( - "unsloth/Qwen2.5-Coder-Instruct-1.5B", - "Qwen/Qwen2.5-Coder-Instruct-1.5B", + "unsloth/Qwen2.5-Coder-1.5B-Instruct", + "Qwen/Qwen2.5-Coder-1.5B-Instruct", ), "unsloth/Qwen2.5-Coder-3B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-3B-Instruct", From 9f93c49ee7405f6570c9dbff3acba6e68364191f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 02:25:22 -0800 Subject: [PATCH 0781/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4a14f9a57f..ec84c29fbd 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1081,7 +1081,7 @@ def unsloth_compile_transformers( revision = None, trust_remote_code : bool = False, sdpa_causal_only : bool = False, - sdap_bool_masks : bool = True, + sdpa_bool_masks : bool = True, sdpa_gqa_replace : bool = True, sdpa_disable_compile : bool = True, disable_causal_masks : bool = True, @@ -1100,7 +1100,7 @@ def unsloth_compile_transformers( _unsloth_compile_transformers( model_type = model_type, sdpa_causal_only = sdpa_causal_only, - sdap_bool_masks = sdap_bool_masks, + sdpa_bool_masks = sdpa_bool_masks, sdpa_gqa_replace = sdpa_gqa_replace, sdpa_disable_compile = sdpa_disable_compile, disable_causal_masks = disable_causal_masks, From d00dc529021b848507966f3c02659117e76b8206 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 03:45:23 -0800 Subject: [PATCH 0782/1088] Update _utils.py --- unsloth/models/_utils.py | 56 ++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ec84c29fbd..99d1944d85 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1077,17 +1077,25 @@ def patch_tokenizer(model, tokenizer): def unsloth_compile_transformers( model_name, - token = None, - revision = None, - trust_remote_code : bool = False, - sdpa_causal_only : bool = False, - sdpa_bool_masks : bool = True, - sdpa_gqa_replace : bool = True, - sdpa_disable_compile : bool = True, - disable_causal_masks : bool = True, - import_from_cache : bool = False, - compile_functions : bool = True, - fuse_lm_head : bool = True, + token = None, + revision = None, + trust_remote_code : bool = False, + sdpa_causal_only : bool = False, + sdpa_bool_masks : bool = True, + sdpa_gqa_replace : bool = True, + sdpa_disable_compile : bool = True, + compile_attention : bool = True, + disable_causal_masks : bool = True, + import_from_cache : bool = False, + compile_torch_modules : bool = True, + compile_custom_modules : bool = True, + compile_function_calls : bool = True, + fuse_lm_head : bool = True, + epilogue_fusion : bool = True, + max_autotune : bool = False, + shape_padding : bool = True, + cudagraphs : bool = True, + debug : bool = False, ): model_types = get_transformers_model_type( model_name = model_name, @@ -1098,15 +1106,23 @@ def unsloth_compile_transformers( print(f"Unsloth: Automatic compiler will now patch {model_types}") for model_type in model_types: _unsloth_compile_transformers( - model_type = model_type, - sdpa_causal_only = sdpa_causal_only, - sdpa_bool_masks = sdpa_bool_masks, - sdpa_gqa_replace = sdpa_gqa_replace, - sdpa_disable_compile = sdpa_disable_compile, - disable_causal_masks = disable_causal_masks, - import_from_cache = import_from_cache, - compile_functions = compile_functions, - fuse_lm_head = fuse_lm_head, + model_type, + sdpa_causal_only, + sdpa_bool_masks, + sdpa_gqa_replace, + sdpa_disable_compile, + compile_attention, + disable_causal_masks, + import_from_cache, + compile_torch_modules, + compile_custom_modules, + compile_function_calls, + fuse_lm_head, + epilogue_fusion, + max_autotune, + shape_padding, + cudagraphs, + debug, ) pass return From caf4cd40ca7985143ba5cdd05cc9d7d4bc280e12 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 03:46:32 -0800 Subject: [PATCH 0783/1088] Update _utils.py --- unsloth/models/_utils.py | 72 ++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 99d1944d85..f81468f80a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -684,7 +684,7 @@ def get_statistics(): ) def _prepare_backend( - self, cpu: bool = False, sagemaker_dp = False, backend: str = None, + self, cpu = False, sagemaker_dp = False, backend: str = None, ) -> tuple[str, DistributedType]: return None, DistributedType.NO pass @@ -1077,25 +1077,25 @@ def patch_tokenizer(model, tokenizer): def unsloth_compile_transformers( model_name, - token = None, - revision = None, - trust_remote_code : bool = False, - sdpa_causal_only : bool = False, - sdpa_bool_masks : bool = True, - sdpa_gqa_replace : bool = True, - sdpa_disable_compile : bool = True, - compile_attention : bool = True, - disable_causal_masks : bool = True, - import_from_cache : bool = False, - compile_torch_modules : bool = True, - compile_custom_modules : bool = True, - compile_function_calls : bool = True, - fuse_lm_head : bool = True, - epilogue_fusion : bool = True, - max_autotune : bool = False, - shape_padding : bool = True, - cudagraphs : bool = True, - debug : bool = False, + token = None, + revision = None, + trust_remote_code = False, + sdpa_causal_only = False, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_disable_compile = True, + compile_attention = True, + disable_causal_masks = True, + import_from_cache = False, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = True, + debug = False, ): model_types = get_transformers_model_type( model_name = model_name, @@ -1107,22 +1107,22 @@ def unsloth_compile_transformers( for model_type in model_types: _unsloth_compile_transformers( model_type, - sdpa_causal_only, - sdpa_bool_masks, - sdpa_gqa_replace, - sdpa_disable_compile, - compile_attention, - disable_causal_masks, - import_from_cache, - compile_torch_modules, - compile_custom_modules, - compile_function_calls, - fuse_lm_head, - epilogue_fusion, - max_autotune, - shape_padding, - cudagraphs, - debug, + sdpa_causal_only = sdpa_causal_only, + sdpa_bool_masks = sdpa_bool_masks, + sdpa_gqa_replace = sdpa_gqa_replace, + sdpa_disable_compile = sdpa_disable_compile, + compile_attention = compile_attention, + disable_causal_masks = disable_causal_masks, + import_from_cache = import_from_cache, + compile_torch_modules = compile_torch_modules, + compile_custom_modules = compile_custom_modules, + compile_function_calls = compile_function_calls, + fuse_lm_head = fuse_lm_head, + epilogue_fusion = epilogue_fusion, + max_autotune = max_autotune, + shape_padding = shape_padding, + cudagraphs = cudagraphs, + debug = debug, ) pass return From 4cd14bb9eefc307d61b54ff25c0aba048783997f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 03:50:20 -0800 Subject: [PATCH 0784/1088] Update _utils.py --- unsloth/models/_utils.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f81468f80a..af1d12c293 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1107,22 +1107,22 @@ def unsloth_compile_transformers( for model_type in model_types: _unsloth_compile_transformers( model_type, - sdpa_causal_only = sdpa_causal_only, - sdpa_bool_masks = sdpa_bool_masks, - sdpa_gqa_replace = sdpa_gqa_replace, - sdpa_disable_compile = sdpa_disable_compile, - compile_attention = compile_attention, - disable_causal_masks = disable_causal_masks, - import_from_cache = import_from_cache, - compile_torch_modules = compile_torch_modules, + sdpa_causal_only = sdpa_causal_only, + sdpa_bool_masks = sdpa_bool_masks, + sdpa_gqa_replace = sdpa_gqa_replace, + sdpa_disable_compile = sdpa_disable_compile, + compile_attention = compile_attention, + disable_causal_masks = disable_causal_masks, + import_from_cache = import_from_cache, + compile_torch_modules = compile_torch_modules, compile_custom_modules = compile_custom_modules, compile_function_calls = compile_function_calls, - fuse_lm_head = fuse_lm_head, - epilogue_fusion = epilogue_fusion, - max_autotune = max_autotune, - shape_padding = shape_padding, - cudagraphs = cudagraphs, - debug = debug, + fuse_lm_head = fuse_lm_head, + epilogue_fusion = epilogue_fusion, + max_autotune = max_autotune, + shape_padding = shape_padding, + cudagraphs = cudagraphs, + debug = debug, ) pass return From a0e709b2149a00fc44980bd77527cc8a276ef782 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 03:53:17 -0800 Subject: [PATCH 0785/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index af1d12c293..b73b2a9bbf 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1080,13 +1080,12 @@ def unsloth_compile_transformers( token = None, revision = None, trust_remote_code = False, - sdpa_causal_only = False, + sdpa_dynamic_mask = True, sdpa_bool_masks = True, sdpa_gqa_replace = True, sdpa_disable_compile = True, compile_attention = True, disable_causal_masks = True, - import_from_cache = False, compile_torch_modules = True, compile_custom_modules = True, compile_function_calls = True, @@ -1096,6 +1095,7 @@ def unsloth_compile_transformers( shape_padding = True, cudagraphs = True, debug = False, + import_from_cache = False, ): model_types = get_transformers_model_type( model_name = model_name, @@ -1107,13 +1107,12 @@ def unsloth_compile_transformers( for model_type in model_types: _unsloth_compile_transformers( model_type, - sdpa_causal_only = sdpa_causal_only, + sdpa_dynamic_mask = sdpa_dynamic_mask, sdpa_bool_masks = sdpa_bool_masks, sdpa_gqa_replace = sdpa_gqa_replace, sdpa_disable_compile = sdpa_disable_compile, compile_attention = compile_attention, disable_causal_masks = disable_causal_masks, - import_from_cache = import_from_cache, compile_torch_modules = compile_torch_modules, compile_custom_modules = compile_custom_modules, compile_function_calls = compile_function_calls, @@ -1123,6 +1122,7 @@ def unsloth_compile_transformers( shape_padding = shape_padding, cudagraphs = cudagraphs, debug = debug, + import_from_cache = import_from_cache, ) pass return From f92c16d9bcd562b2c8ae862c2c970419110ad1ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 03:54:35 -0800 Subject: [PATCH 0786/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b73b2a9bbf..f3d7e4a439 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1093,7 +1093,7 @@ def unsloth_compile_transformers( epilogue_fusion = True, max_autotune = False, shape_padding = True, - cudagraphs = True, + cudagraphs = False, debug = False, import_from_cache = False, ): From c7c984f72591b03a74369f7e7216136ab0efc2a2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 13:01:38 -0800 Subject: [PATCH 0787/1088] Update _utils.py --- unsloth/models/_utils.py | 96 +++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f3d7e4a439..e1fa66fe32 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -308,54 +308,60 @@ def _is_openai_available(): return False # ============================================= # Get Xformers -from xformers import __version__ as xformers_version -# Temporarily disable 0.0.27 and higher - inference issues -if False: #Version(xformers_version) >= Version("0.0.27"): - raise ImportError( - "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ - "then press Disconnect Runtime and then Restart it.\n"\ - "\n"\ - "%%capture\n" - "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" - '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ - '\n'\ - f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ - 'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"' - ) -pass +try: + from xformers import __version__ as xformers_version + # Temporarily disable 0.0.27 and higher - inference issues + if False: #Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ + 'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"' + ) + pass -if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.24 for torch = {torch_version}." - ) -elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.26 for torch = {torch_version}." - ) -elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) > Version("0.0.27"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers <= 0.0.27 for torch = {torch_version}." - ) -pass + if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.24 for torch = {torch_version}." + ) + elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.26 for torch = {torch_version}." + ) + elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) > Version("0.0.27"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers <= 0.0.27 for torch = {torch_version}." + ) + pass -from xformers._cpp_lib import _register_extensions -try: - _register_extensions() # Check if C++ modules are loaded correctly -except Exception as error: - raise ImportError( - "Unsloth: Xformers was not installed correctly.\n"\ - "Please install xformers separately first.\n"\ - "Then confirm if it's correctly installed by running:\n"\ - "python -m xformers.info\n\n" - "Longer error message:\n" + str(error) - ) + from xformers._cpp_lib import _register_extensions + try: + _register_extensions() # Check if C++ modules are loaded correctly + except Exception as error: + raise ImportError( + "Unsloth: Xformers was not installed correctly.\n"\ + "Please install xformers separately first.\n"\ + "Then confirm if it's correctly installed by running:\n"\ + "python -m xformers.info\n\n" + "Longer error message:\n" + str(error) + ) + pass + import xformers.ops.fmha as xformers + xformers_attention = xformers.memory_efficient_attention +except: + xformers = None + xformers_attention = None + xformers_version = None pass -import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention # Check TRL version from trl import __version__ as trl_version From 81538c3046a7f98c7c5096c18b97c91799e4637f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 15:49:01 -0800 Subject: [PATCH 0788/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e1fa66fe32..5cdb67cfd3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1089,7 +1089,7 @@ def unsloth_compile_transformers( sdpa_dynamic_mask = True, sdpa_bool_masks = True, sdpa_gqa_replace = True, - sdpa_disable_compile = True, + sdpa_dynamic_compile = True, compile_attention = True, disable_causal_masks = True, compile_torch_modules = True, @@ -1116,7 +1116,7 @@ def unsloth_compile_transformers( sdpa_dynamic_mask = sdpa_dynamic_mask, sdpa_bool_masks = sdpa_bool_masks, sdpa_gqa_replace = sdpa_gqa_replace, - sdpa_disable_compile = sdpa_disable_compile, + sdpa_dynamic_compile = sdpa_dynamic_compile, compile_attention = compile_attention, disable_causal_masks = disable_causal_masks, compile_torch_modules = compile_torch_modules, From 029f5d5c268dc7e0134ce96a388fdd43d072196e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 19 Nov 2024 16:56:53 -0800 Subject: [PATCH 0789/1088] Update _utils.py --- unsloth/models/_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5cdb67cfd3..a93c443042 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1096,6 +1096,7 @@ def unsloth_compile_transformers( compile_custom_modules = True, compile_function_calls = True, fuse_lm_head = True, + gradient_checkpointing = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, @@ -1123,6 +1124,7 @@ def unsloth_compile_transformers( compile_custom_modules = compile_custom_modules, compile_function_calls = compile_function_calls, fuse_lm_head = fuse_lm_head, + gradient_checkpointing = gradient_checkpointing, epilogue_fusion = epilogue_fusion, max_autotune = max_autotune, shape_padding = shape_padding, From 0074b5c9ba08a9d40371195492aa1870c1f07822 Mon Sep 17 00:00:00 2001 From: Michael <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 20 Nov 2024 01:44:15 -0800 Subject: [PATCH 0790/1088] Add files via upload --- images/Documentation Button.png | Bin 0 -> 11802 bytes images/documentation white button.png | Bin 0 -> 11170 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Documentation Button.png create mode 100644 images/documentation white button.png diff --git a/images/Documentation Button.png b/images/Documentation Button.png new file mode 100644 index 0000000000000000000000000000000000000000..1ac31726b554ff2d36be173dc30719258b6bcea1 GIT binary patch literal 11802 zcmYLPcRbbY`*-Y3vdRt#*=2K#tgNhL?-WAzc6gE}WE^C#$R1^HqL7`LO@(8xV>|rr zPd(q)?+@d3f9`SJ*L{umHQqeZQXwP0NsNVsMW&|uKo<)O8-;}h!zIKABYyWjqro2{ zS5+f-EG&}i&<_kNHJu&{i?3hpfxNy?`qo@<`eVraMT>*~<|?)s^s z`cUp0&zBMB($F;?Ji;_f)yKMWvv;4>JHO}kSfzMmt(0&}j2XAFmN@dw7mCE#I0eTa z29r1H)$JasH(iOlFJXzeXKXYMHVhvAIt-954&V;?74`%6;udnAgjarc*Pc4GEx^=WST9R7K_~9P;mS$Um`A z8Rk>=wUmol;0h7KwcyG~apFdDs2TYa`FvbE)TsQBAd2HBeJSR8L;en2YRQ;G4f+0v zVUf)eZ+GLBBew)T8s&k_PtZz=)~uygL*_M45zi9b?Uvn$b4`eyHN(1#V~)Sw?#i!* z>{Yi2B}EJGR^U#lN(g4#uRgK7`&s-wLr5E!&%d$Yat)H?^`Wc`GU~C3(VEroZ`sKW z^{XHl2%VAjF}W<*tYCLm_BoZ<6ld0+N5tO{%!6y~*xBUC6zQqY?gN^?F6oDE>J!Xr z#TF&JhPAw4!3iR4#xf;HPyYLP?ZKiWZpwN%?hW-a!R)$C+t8Bc1?>2bR|(*7>S)fM z$8DTF##~$`T2{i@Eaf7uB)hjh8)S+FJw$RS?TCk#9^R5aD%Pk?@35Ams3s_fIfS@i zF(LU>kQ_yKJy?6pn+PkPoZI$slj3{OD+^K&-)i=Y=TLj7z$s7b|Bf{+vBr%YWe+O} zscDxWn!E*FC%z1Cg3`98Qa>7o5w{fGbzH2tUakc0qH-=R|2R$a#{Bgs&agnr&xTxA z;lcq`oNCC3o|m{OTSlRzhN8Wk*;!b#FlllR?Rr`@BuCg}25ToDM~??*1tazuUb}lc zb?>hQUNxk?!!#_ONO;bks+`m71LKn!9Crj4LIlYN9d%9oAJ&;U4I*zt{&B=S&58D6 zlXzx6U&M}sH$o0Qu z?@7VxlUQ+>vt0S&xDyfPWKLLJTxS1*Mf=Xu^2aOdc_eQXxN*@WM6L5D`XgbgKwE?` zgu+W-pi5FT0< z7iZ-7;os?R&D(E>z8sBI_=Jo8-2SV*NR1A9m-p_(^6wavJ=m-&+Q`2RV?qzat--)aXV`+3?Vxf2=~QPd)GMVhM6ANs@CXAm&L<#IkSwJ;5zZY;U`?wa zcSXPJ9{PzJ$V(naKr(B5(A_l!BsmJNiVYm4e?{A|ju;;Cw{1VmO z)*cR=Gp6=cD$_PFZ45aCi$ogx>cTJ#?O$F^@Bs#Y5D8(eS}~)7VH7Nbus0IHG+`kY z`qZ7o6{&1*HhTLJf@@$}(QqKv6LS4k_YbF}sP1bX^%cFpBq3r<^*af_j(!V zzFOFCN8BkfT^#~61@^J!B&2GItrV2D3lz!?!-Y6!VhDG3bD%Lu{lY=KR+?q(TvC)h z&ca()Ux-4XWVP5Z_G@->Z8YMt9m~*#zb^WO-er7Kh<&EiZlO|r7vk8Di~I7o2aTv< zEi%{4g@9tHDLKI8MF-WWVVDxRA;Nd;U=~f4JHpY;Otl==A|fa?c?;qT)t*^sRCIarmQbV5BVPmOOa)P4+eih?(+CV>RSbXkT<(g;RpIpkbf@KYTw%sk>3AS} zt}m3Ap`lcT*bj=|II=C2$#Ke|7qDBWUR1Z@8sx|hW^BpNc#Z)bq4OAgUa8c>_^%kLBR6N%8f8 zCL*bOlL#xlBBzN_S@^Qdz7h}}zUShuL5{&ryxA61fzY`~M4RIlMHwF21|iU;2iYq9 zBjxW8#gZnl{ahB&^~G^usCTxTNcdRCm;}ZCvjj51RAsi?6WC`x^d!({+~AoJ0TW^r z3n#rCfD>xE0e(1KET?_%u+JV%({ zlJ)*A5Dml-yI`dN;!x3MhH~s@3JKwCi`cfmOkEYb%E4Re+MGHMR!aKgC~gz$;XJnc9z9mge>EUukd|t&S=Cuva9PK>9&%0SFZ7OZ8Eu&h;i6vY!%Sy{X5892zn`@f2g=_8*(aV& zd|JT{?NZRUmbbtxQoH%b!M&ALMyb_L$Oee zpl8Wn-4PTVkE4lo!HtXnUub9df~Tr|O-7bgbq8OQF)3(j&TQ#yFk_Ua782->z5Moz z3Q`2Oiyw_{&o=Ouc&aKD%E;edNF_!c!3`q;S6zRl8}%p4OvdsTp~lBNZ2CXsPN(PB zdU;O~pzG`Oz#Equo%iVc%3>+{w8whP=g$glQcU;p{`lABbPiq}tCB)g`&;*?LfLIN z7**tZA(u;RukqI5^#uk73Spb0*~+#jx>FJa^E*G?yh6k7C{mQ0etki*Lo5?$V8?Y9 zv~TL*(D?nZUqpf8#keU?o_6LL8O1wfo@!Luy~K@tjo%$ZWU`x2hYuM<-4-edq!$Vq zGw^l@JP6wPtq#e~jgEg@56@&aHkf+D%ir|75XV{)JZZ9sqoiK`oLpe%WpZ&MM`HX8 z?L2ho`{K&$W<%0*+X+10_Mgvl>(f&#*?OzaUSno!clo%%AH+MzOBMX5%j!3Ja|l490@$s8i_e8TCL!KK1#_vLql(?iywV9 z<-mj%ZR*$I6eG(O!g9d(s@1`QPkWYyYgb1u%3n-@zUxjEIcN_Pm|=&8tIYZyQ{`)B zqSTI2+rG0-#tzTM{ky! z8MnD{&W^3*u7c7pzQ>EN9)_-Ki6d*MI!zXrOa-2rLxV#>N)LXHLIsCf82~ zTr7TnE6C(8%vlEpVK3}jr}%lPh{Rf6vEmM!>obkbRj z69ku5is6F(6KK(bH~a2X<7bs-gU~jCYjkTmqcWzSak-|$uU?QvCh=3;4^+1iQfY}c z9rM`eg^a=Db5*lBbLPO!J=OL8Wf{i3(?}6Naz%%QK@aIo!S3&OY^!4I{5PL%$dDfF zV9<+JMgw$V6SaD>F*add(cG^ws&CF#kK_vSN**m4p*#KSkGGYc7i<7C1gu>pGhLwE zm8=FMTr_vlGcO{7oRs-q?B{>*D@%^>F4xYlsAlBv&>-WGUltc{e#s-U8I_%G zy>ZgtlQ39q#OC_0Q6Jh5>8x}hOG+fP_o2zB3>)KLLHh3`{o@Ob zYhIjJ<>X79m-c768vtuUkTf@ttgd3G_xKKEyOB~ZF~3<8V*FG{+mgdboCnpP@!$hB zTI!Ohaw8w)hK!C7=1egwytr5EuBoeg9-RqgbBqpdK|S1E#-Xtabrc+Z=!>t@7EW1x zO8`vJ35(pb5wQQvC#KZM#Ck$I7&~8SE!AE96;tSBP=f;k2^t;^NUI$lBxTC@zcOC| zi~zoHMNw@2YmtiPaXN)U(YwfiF%z@7Fr~Vy10BAW<&{_0CEb8YH#*#MQ3pOI;8nQBBm5TanFLH58RacABdKY`VwoC%SCw{_N$5f zA2c%(`Fz_dc3YkJ9yM#HW;M?XiZ*8@#$d(3ii+f0WF_I)Kji1byFb^cJnt4a7zfUO z_GLHHMuJV=b?AX?#}3#Da(G<_oO>y0zzg*V5Cis!PXsG^dM1TZ9E!Z%R|9!QXsccap8NP6(3<-$2fG<-0}xSOzlj`Xzx`@XFMuV*1GhlvxkVw z<9EM^7ThVyv8MFLpio%eqvKp*Z&Uxpf7A?fetNGL6;z+q0dr^TA!5#hDD_9vm66*e z`dJS2T!(emurptFX1tj$S59E^Nhz`|@M~*t2h^Mk z?f}8PZFo8zlTdiH6%qki+3B->cL@QT{riUDnO+0aD)k4MAD(v`Q~=VZ=ltt~zs88% zWbgu?3uJt{bX)>EgTQ_0>iC)?0RnMy_XrvLmOI;B{K84PC&OESh=zVr<_Om$%a?y6 zt+OMV%(Tg$;@53WyUW>>9FzRcSM!2d4FLF7OJw>xPJo%gKA6KtV)N#w$4OtQhDW!! ziZH%=Bg;|D_mMzqNthz|Zybi@8uQKg2~>bPz3};V#AXeA0lkE^4{EKG8O~b&{3^$w zg#88`oU>?)x(3(@Ti%89II8haW+OC(AMMVu-X{Pdq?jxA9YV4I3>F$SVYKiu3o!mA z7zj&kt#n@%RVn+o$(&FtqHMhB=@z@G*RoHi?=ShM{h4VxPfA^W3?)1l8~*5Gp}f&( z|1Cy%ety(`Kng9s<1Q5H=~S?>fiTic&q&KyukC&VivcD zN8$Bu1`|8K;DJ{b3Yy+4Hwx-bj|IfGdx*s9q{!{?m*zQYsP=df0`mXkrD6rOI3Dh#J_I-zgZ5tw$O@i|N2&*IQE z=FJ`wjuWy-uSVb^`7WCPrXV}4*#j2lf$>gy!$HH*#zO6tMLc-(-_?OYHXal{MguG) z5vZpFM8l-kSzF4?B&!I}Rz6L4>%8}3X&kOCr3pYhyU}FSlzUZU&qE@AdDxmtTyHoG zbN}Yir_P-xAzp(^PlS)T~SA zXIuz4kefIVH3VD)w&vh$F~#Y_MlbFjBfR(2N8f2RStIdRP@dwvGevVe@Rz0q>HFUw zjbfG#Cu7d$0B{{0tlR!>VQZ?FY?m+PQ#-cBTI6TY?6&oeI$wPpzz%m~l}|ID4o=?_ z(pt@{$FO{dFby)o4nD%>QiIbm_p_vu`e<;Qz3p{L@n(Y8y?=lOw_fwkaNtv2?P0JU z88QziL?0e7x6G(0hyo&CG^RF~i>* zfQ#nQbOz;hHb0j$ENK*NHfN%u%y%O$Tag`A zo{x}JH5D?CT5awX8N^pKNRD~LR4l|EN;YQtDo-5T{-Kz?L31|Rc}&dP z=+RM%xWqESzIkAp!!O}$e!nQ6nm*;^>c>$fzbCqauGU*t;bw*tn~ls!kGe-Cur<}A zTsvQrt4Xyw_5v-MX6)dZ8=U|kARoAisCAND@TZ!N{_R_W7(X8+-NUXAHcCn(oOj-O zXdiCsjJ9OIcF@~+xOJjK3L*@4JIgGwH|w!L*LDXs3SMVk<|M*j9PG4JsKdXEe~jcO zZE5}3akuKjbjO(gw?YA113fC>|LM1hX9qKyMzpSb-Q(me8H;B5nhmiVe|-k%?u_9# zhT%2}vB8wYqt6uF8)8lx{_ZGuDX)C9LEvlI$>RRA>CQfp@QTm{csh@C^98{@ELogdQUe1o>lO5K zjWkIK4{?GgEl*z*c5{UXGP(h)*WldyWq8A7pnq)?WM8}iKm31tfn398aUk2)1?KGN zDR8Am<3CJ63Pz*RkC8I{0hDgZ!q`e2tPYZn_tLKLj)A~8<(`$(#Qn$Mr0E)q0n|ax z?mEL!RZwSF^{8f}pQ&vb0)W%~$QrmEpu}zpZ~d*qC+0I z4S_XI@44?Q_NCNYvj%BisZJ(lESGin|F|+8fZI;p?S*}Y+e?$;w9;p|9|G$fr{F`x zN;BLt#%l%1A2?%DGo^>AE+_`#CkSp5UzR)r0qx~pzKL+EJpd*-#^S7mNcHVznOxaa zz`@GjgENX*h3o4_l|F0xkFQ$6??+`Q%5+yf1dQKmfbY9OO`vbGP4c zo%?#1*;jCgfPQ-k2r(hDni5F}l}fyhGr`*NaWg2d2he!@xSw(3V)5o|%2ZW;_@z@q zDCB_FncoeF*+Fn%Jc4|R1rBms9muBbfA6O&HvjKhCuDNxEAT>b5aa2iawv0kfhFP| zb?$NaQRtsEpXJp&odBe_0YulO3m@ZN%w#PCaAK0~j4q5ruVLOqY3m;Ix4^gVdGnd~ zhMG9{7SDH!Nl*P0y$O^U`2SxZ^F%JwC5=1x#&~80hslf{!*U>P&;#P;=RfLV&d%^z>1U*7u8#p6^XewX7T_gm0H~Dd0NxlR2uLyJ`TQf^9(LNjI%^`y#K?Rg$NRv!?uE)i4=Edj9=;yjzoEy3d z^12O+7YD-AJ83NN=nMRc;$q?4$g?~Df34Le45F3&vx0uR8`&y105sW#L++K5yC4^k zen8}4xJA;R*2aoPL$JNvJ~Ph1(T(~LcsAS5OjlxC9zd3c{7X`Qe)3s!8RWHS)N8D<}l+?ydvIjAReVN zXh@TFG)MWfzMlY@wkh{juF7qW*&3hcPJQJ7eghz=Zr%3AT}r78aw;VAAByl~Xoe}Q z;U4#)7n5*bRhhr}XyGG#hhW&W6{h;$90B3x+_Wgs7xP^pJ5FsP1N@&6u(eKolS_b< za6Xf|RnH7C2!V1VGBUcaLGI2KnJ@U^ENqz1i6z!%L9_qqlkqOL{B1b(Ff3CupOz*9 zN~YJl(XgwST{D3q8JkcUL+wm4PJYWDsTsB#>3jCk%md-l$rAec6?t@TFlv?t(24>~@39KdlVpnG+zN z%(V0<>A;H*qr3dUYC#r6@ezIHj9E$l1o9vt!Mw{=J{H!6HafT^<*Jo+KL)Q(AY$u5 z6UYPdH@O99u{}R4^^L2|zYkt~aQ2k^#=BVG13E$b?Tg{gBRd=LTFJs&uPaGcxpj8L zRHi`)6JHR-zF)3hWMkU;r3ENY6aX#XK+e8(N}%tU(CjsU&&)TS1&Y!-plI;8;zVdt zyXAJJP~fD?kDg!ZkP}o{Bht!a1N%znl z*BOc?Q>CdU{BF_itJD4u{5_72-c^9f0WMVl=u&D) zZRu-C@4e~_x&pP8{oi`ps{xzOgG!~`7Mz08dC`F)+ka#0LZ^$HsKm3S9>k@hrEV*f zyGQo^$Y_V>1&LQm^$8r9{H@nzI$d_g!7yszGsm)C8Z3}v*X`2bPvLI5uRU@g`1{De zd~H42f9s4=JpOgm!z=@eP=f%{xM!XZ58aJu`(p8Dvx!TE*cOUWUAbVIAI$!LYlbsg z(9pD=B1?1)Xqn+qD=|5%LIl=`Y`}i6eFg0i2fEUgJI17HZy`XMa?pD z3`0x~yxBN+V`~4_1Zz@0jpCu1+TWr6yB}?6)LgL#*f#Uo-;w#B9@k$Hq549^+$*4s zc^qA^j|Z>*Fbl$+u0Vp9!7@Jj?Wiqf~Gv) z!V(Qj0X1BLlOV7Erc8Xi{*-E%1<=?u^A#rO}SvA zC&;e>V4AK(LlUc$Jx3a#l6}|ff7R_5mgb0KBhS~{ulp=eUU|9{g9*EQSR>mYY=usjtxHlvm8}QMA?eLX) zQi$jJ{cPR%o2EFDpg=uG^VtpJg>`KtE5j;B1metV;FWx z0^7zpjW8jGm+uOc;G=->)1x-Xu zhYG(4QCL-oBx~bZa8g2b5+dd0UGZ{65Y~yXmnT%ITJw!z-C+LLX6YrK2ZF4Bl zWw=ISv@h{J2vae2-Oc1+OKyuyw)}EF?w15;Z-l@#y#uJBBteCMuySkWWjV(-f_xWS zkdtBq(yE{L@Cx7hbDo0Jk71Dq)u305gWEx!Q252>KFu)fHjc@Dy(~0FY(iM+Wd2(u zyN&FE=}}ge7n*RVYWTBw2fV>cdRzMa*Q>Jk zB6Ks=Pz~@o_%`Yp)Ehzb{v2U~eDYEkaHOO?n4>-j3>I--!89H^EUV!I@^#vyaUG$? ziqskTSe*s6Vgv_{WIId95YAPiD-$4LB>0-`DEzk3gAwplWI~~@#9o2fz0)iqany31 zFWJCps|W$?w||?9Do0ggm9o_8$+KOtB|*~ZxXBfrTCgcwEn+55AP z9)zNcu8W6xP$Pow_cL}^q1`7VVR?F_;1Ht4s&-zYNg(QZKM#0zB`j@5!d~gzg)vz& z2x6Uh{_V4Rc5zcYhE<(5%$gW_4!MZM5+cA*&TXO4%_m77Y$ol`1k&{TTxBQ#ge&z ztC&*z%Y|pV=vY{)|JO`rLMUwYeWBzDF?fpTt1KAp|Ju)p@qjM{Z?&*m zv76&uAsZgCz<@jDBF%~ZeG2;7xQ}5iAqCgV6(KH9X~dpxtJg>OBwEW6{7@8JS(av8 z24=7*p;|egR+_8vEF&`aIfhB<$xKFp z`bB*A$2HnE4Rtac>1E5tVZ_RZt>Q>g{b~4H+QO36j9s4BsDIadCkc^RnPM5xZdx8IiNE%l<&6SU{ z8^PQriz^>^&sF&k!|gUWL**_hKk_XZKTL(mw4K2^)66j;6dWee;BF=Jc%nFtY33$K zzJdVy;rLOa6JAStS0UGjf6m?`-X;7B=$fsriPzdve;LI@W&J_Aq-*oyoboV&{4t*TAxpAZBj(cH>M4h{|(LjI{L4i4Tj4h}9pF(Ft%^0)4S zKP2|@I!-t^a4P5*7bhW!76*q@9r08~-92ez*4K-E&wJ@_tO1eyA5ji+w)adVyoOS! z%KYaelyn0JFP?`ySuu#5(z$1IN7HBbKZcG?W)_0sXErp|oE(Lj}#WY}ukTg>k8ibUop0^R6m1e+5OWgz-W6-WM+K{)+iwn9YKT|*)t!V^mO z>+V$UX3kh&bP!qvMI)CB%N$jV=FxCSG?y;r=z=Zb!BI}AO1a#6X|>NTh={D!hj_a% zDca;_qIFmb;hcOqx9ltFQo}9$7@Q!~4)Nn7@mF#QvS4GvE*x{yzibY+PT)#aHBi_~ zJ5>0LPv&;Zo82W5{Mi>Qh=_=7MgJT7v*_QMQbel7@+e%1=UaH1II+QJ;;iFVU<2lM z8EhGk5!X<;HmTE8y0l0Z-|vLdU(>O%P(C8d%7u2Bjd;xffA+OHF5&Kmx9$3NC0hV2zeQ+n{~3^c42^(o{D1F>)9ye_M#m`?HDoGTzMFUcJ@oTrli^ zb1<*aG_fEdAB6~(jx{oWTb@)RL;3oc0uyPeskfL)X}azGC~N2rZc;EoSS=0@xZwE1 zG$M1HRQxkacembZ`3M$)1RB2Ii7Se%=C~{0v6&xJ1vY&@d;2K!cMC8bk zFI!xjL7a$L(%c1C$P`z4=s!m>!p%Z9I?8>Upo_{Y;Ge;jB|zl(ay!1x2G1KbuGp<& zf>SA~U7Lhd*E`?~shL!3lmB^w>z(M4dtA$Wempo~#=^0HJk}i&-kD_l_>xV5$8fAF zX3LLnl=~?-V&-w<&;;|h3XV{fs7)KZO)L1<=wXs#1k3n4K9Y)At}%!CU8IdnDtv}W z|H}muh5rmOj{cIXF4>HFL+2kcWPBIfOJ&iKXbq0ZYmG=N87V)^qMyL!O+HWI>BGLJ zxZZKdi$(cB0)2P+L0z20i`t!g<8iMBg@L|L5Qgj#1WBNT;;L!eyh7s_$8n_e`IR!5 zFvrGYAh@W8NaBj079-qQ^wm=x3pQv2bl2r2G*%SRxqhj+2J-6Fv_tbGZ(-cZjaLy7 zEE?6%P#Wk$OBzbqVuD>hlrP?mEQ61b60}KT-15>BpIPTqU|(YrX$M#l5!7>d7(M*o zIg{^oNtXLiU8sYc%gqBE_&WdEs^BDULC z?o&IJ3%uiB*)THxgqQ-2e%w>iSDg0xmj`T=>Q}0M+^zL@4EkcQ&4axEmdy&{m|&h zh5X4jJTOl(rPLz#9|mo)EKj_K)J1hlnYlYBe}7e;FO7-FDdXSouBKZHZB*ivF)yy;x1~JgdPOAp5wHp|zVXnW zWIIHmq-q5B)EUEp$x-F2T36+wNnXcOT%2={8A zv43vJnKdO`<$jtF?lFolQo9aMa8-=_TBVsx;rLhitAk zkZ8Qcu6Pk2)(X?ya+CI@YyI*W{#!o}99Pdql6){c@z93{HwN{kUf8ATZ*5{PwgH09 zAO4bI7^?qB9JpQpstRyRsqptKeranbaV;lam}CCmB6 zWkbBhu(JnxX{qf{M^XsM>bF5uJ-SuZP+u92O*OfI z*6As>U9W1SZw1F%qA5_n6%EQEa=PYjyMI~o%j0=aO>ioA;j2bWLy2tfEd*p&rL{0E z!{#)5Nr9nd5&m`Zzh;mU@-IVGcWClL=OR9P(AgV|(a74!_|nKoCB(nf0x($w(u^_A znCLQ|k5m3<>=#VWDl@12yOQxzcPI>Y1`}r0eGUooiI}`S(^8_bdFY6L%H@4zT}bUc zzQ<=)r5)hG74ji9w6Z@&1sI!Onf5=swD~MmuG`&XvH@hvsrQ;d6k|c4+1tNS=LGeY z;z@N5>j4_F9IwE(2n-XgavpLm$6j)vSAeQ6((+nQ}{dpEJp+f-@ZE6@+iu}@1Vl~($X=s4{;KS677GvRx;;TKR7|V zY`0)BO8a4I1iSNLEtljxV?Pe;X{b|z5uy&*)gh6Lb+c_O^6J2`cQ1IXsyg@VOMTcQ z9qAVSdR_3Hngkv2q zfsEu_Gu_Hmq8`#>F@mg}1ykE~{yKx3Tk7r`D@cW+ikNV7LryymZ|s_19?4Ep_|h6i z%QO_LS#n8hMESXiG}xTKwy}|2TN|66o?fLGK4Sm#`}s|#As!HL^6}*4ZC|&4Ad_99qo+3=ND{f8C$BIY z=(l0-Y<=|0+3MvjEEfB{%owH^%WZaca`bk(rr*amBI`5lty`qm2XHqVx0>GRL?cjAhUsc+tz{1;$`Q9^Iul&XohUi<3Z1r?AVwp@_fSn+ueUuuH!YF4hp0f>(y3R3^LZ(&D+Yy zs_*P?%@3}Q7v?7Af}fu-&VoU=z>?g(g)7cuN7G?@&Z=4+v@2-eIk~c z8LJbMlU=>hoHuD{#}w=43WDHj!tZY>~#t`x|8E#t5<{diO#K>{n{|%cu4F zpt=+t&q0f<7^ZDskrPGq~ZTCMrKP*!FKe4BD`xC(9qQ%G(P~=8acs&dg+~K_oRcipAJS%waIE-ge2eMRJJc<^E-qn@#;K zD*3y}(%oHn{>KlUeNpzQ$+oX0w`$c`2TaD1A^8En<@2W=OQxP9r!HIZEe$@+3v#?d%xe>MNQIt^nsM?;ML&NBR?d9`Te>gX>!2GC+6oX|w zGc&Wk&(Q+#Wergkk9C@m0+aeq&mX8r6hCb$E-tRBso~}4kNPfZ)q_UkaB7u^kMLCK z1D*2vMU^UCAr%}PJTx`c+u*YHTE|BQm{p>n&3#W1#FkT{klkWN5Vyjv2cbht;MHKp z@u?|YH5I-4{66C|vNe`}M<>lyQf$jy>X=ARYGdqL zbyd}SLqlI*U&+LN&7y^znHYZ--R7JGHmj9nc~JYrj@Gt(ADf#?VPa%d_F|%+Zn&wD zeNdk_j|%Xnh&-XI_^}=(plLOIeR`>}p@FjW&fD5}s~KfPQkT`jEj4&oxayt(KuR%M z^4N}J$u#zTnNa3?5g9TH3N3#3C{H{FdU~Y?_ml+y9n{6ODKI2!jUDTRnnLPs)qedw zp%#sHPhvGU_5yV>wT+$G+1Y%%%yKmW+uXWAlWONn*V>aX?waZ?~@CEwJeflMwRY>z*@IIWgT`!PXi(5w)qFUR$6+9q?66wZ!Iis7X9m8Qd}IO&+Rrw zz2Ij;00dA}U)rdrr$@Dsp%&`rhg;H&U9IYSbFbPpB>%>^7rj6ol{rpVwn0F_URYNdwftl%uxYE4;SCpFaPfY7OdQs<(rQRFXtW8>xmX?-3KXaB~sz#ML zttEg)h@Wh?7wJ~tA3HG)gCcfk2;I?kpp=rD+UWb8BAv<)({?S%$D55j>Qcub(3Z5E zAvZxT@uFHYh{xw|kcQ3HmsElv7Z5qrw8*!jv)@4><+~|t2G3`X)S@| zCjOtX7yzs9HtS+monlt0BY99i_`Kft3K?0s&nZ&SZjRn69t2VlqiK?P)?ItdD`$L; zH^Ldk$EK(I3N#8IGq3}#kY%z`?e)|i8Xj&XpwUO`SR}iQ%|8TgdalXSW4o2;iQTk6 z@H{3pT_EjXOZEjmRd#Y1>|V)DHFTb(lK9Vjy2d8lR{4t48P4}6Poa72M6E(|xk&0D z(@rCQEmfAAA>#O!nnJ&c#;UMtfgIarT+Y}FIp1a$5K?0e)S@K)UIx+yi>e#O(dkAO z38nH#?rH%Sg=G|a%(Sb-MwQRxCL-i3H>>$_zy6``lQ{2^@9vvwkPF=Su%(I}kP}gE z0IqJYJ!~pa@Y|4Qw&c^dz@s^Cyqq6Z?dk}--?O29bGl)kvOx_ZB`W$*KK&SmglV-69M*TXU1UnR9oP06jTG808Qe3mW0sI&G7_1l%oaOPJ8Cvs&KK{^qRMLQ9~CoNePqO~pab;-a~zsOaYbntkE> zY>_=>-pBhdj1IykXJ)?bZ%jYwZ**CkD5w|NDrwv#6)>t_qCjaBYKHeM^y@`ws3P*m|to>v~dOYRW8O+GD9*3r?9v~+t|A4efG9qwwb&VP0 zRUPD72GP!Jk}l`EMCgsovi2{^Y~LPHcI|(xXnq|VP{!q(?FHjC{9Z!vGBiC)_d~nw zbX_B5R3n9Q&JEom>y2Afdv>LZzvVx9+K6teWg)2PlFja`NbIQ(d+^3zNyU!kDj0UT z|9Bgt#HQxl{K*PRa)&3)c;maDe1C4%pCG_m9XwVEEGW7uSj87RD!UXf;pzHgO$THz zI;9j&!TJsz*UgGeiU4KHjR=s%%a90lYCyDS)IDBpo9xL-bKdX=w9!L-<{4*IUU@<^n@#Ae#`x~}w zZ7nwW#gpOgrA*z%&ZpxayTU`=O$fKmr!?}A66Ys7>5GXXPK%FPJpY70``pYOIx(Ue zQ}3~}XwvEZywGsG#4(T+&PT1A|D=38XrV85@@X0Ijv?~wqZUYXM$|bWaNeTZl~si= z_OP8o&DX4&{~EQ21P4$W&`5<%Dr|%=jW>+a|B`j-$I|FuFp@0 z2?X(ub7Njak(R3W&dEy;5HP|!-GOdIf*ZIrfMoSaKTQs+jOxZmlRv}=)gB*S26qlX zj5pHL^GCgt#qbfx*PiblL{p~}zl(?}h&9LIfh0KSGHF0#)ja66$j4f0GT-cNM)5>o zydqNDTy>A_=g=L`QjN+_a+0@r68ry!hk0e;jnns^75pu%-gyRDM=*b9A75EeQ0I>t z8@;>LKYm!GjD#}M& zG)7(y!)7dx<{ab9l-2d(kW60Ntr1oV?Gwd%WNsRpBZ@p+qAK5qqoPWXV!mVkKbd4;@Y`e zo~;OD z1{5=kn4ET~13DY917a|+LmlVlhwOPE;M&$N#JpG|`yHcaOS0z{b!+Gti+%0wwF4C) z4bB`vG&~nUhMT3Md5)lNt)#KHSs0WW?;CD&G{AYjmMx2el+OHMbM|_k9D$0|sq>%7 zB;T_m*(IpT1;tx-6o0<(VCu{0isWDF)|0dx{;*L$8r&8lJ86()zWq@VkgXW|fqV&v zPRRDk-r=9JuqSh%mv zu&afBn7b9oJuCBpq@JATiI1x*0***smIjj3QwYW4K~%S``}slsIIzvy$|@r>GjrKX z%gEd1Z87S}lPBL&rBb={YWPiQ@*akXw}4cH*)DN$aWUh7T6)`$GjxXt4S`JFQ3xOV zA2+|qp=ff$&IY9uqVp=CH;@_|I>4x$>S}=z>*?uf<~_BR)q=vu{k4FmnzuhF^JB^d zZKkNPcJT!{J#yJM?PeQX%nVG42nnGqkNrwB$h&?+FDKyjBBQp}ZN%)oO}ZuU^If3~ z;nwRL8)kohb(j%&U8h-`hJ>c8r`JdR1n}?Kej|7+@i}nwb?fymV(d>8DqR-Ou4(wd z;94`Y)79Ng4EXdKU}NRUjAN-)OS0Oy#T>Bx0O!an#Y^(MWv75tdYB`&PgZvkR5#7J zx$LQ+c-q+5NS}B#9Mx~W1M2JW^Bhq5A{jv3If4qIK1O=_Z?)CcvRJy$pDMoQ?Je#c zd2z?~S@6cc=o>WUjkMi4J3Eu#G6Kb2-hrLH{lod;;cFh}#Fa+-ySx82`>L>g^VIsZ zhoAc+KVWta6lb8+Gg3{KkdTPnbTd8(X>+{4$H-RQT-Igu?AfzFNIhL$N(tZH-QB0U z&%S%x-$mHww#`#^dFp}WPxySl?0%DE^nx|G(PQZrfrZ!n_g4|Ms%rT^#ZxKVUdJuJ zpP!wTSm6aVnIdU8Kq9*qa?7i0LQErWV8Jn*r>H|Y?j^U35ushAn@e4#2S zOiOu9rCyDrID4UrEmjO95JLtxM`lV)*!70kQ`_!mcW@7Ie4E_a8~I0*J^grWkDP+y z8CEWfQY}AXM%kgI7OInCL7pNkl{%pxSXD!{G*xZAvbm|e{&;$BZXiX%vzHVBaFw6ebbw%gwD>VX3v^rjRhb>_Zq5KFpLr+|Ra-za@F zl1Bo=rcjfVs z@c6N2bocB65& z;7G8>>hbrvIms2;>F6nJ{s1SvZ#T3RmU7 zEEq+cm#>0%*xw7WMv|-ZD&l{i-guym&d*Kh1$AXKHpB)*3m~5DOs3P1Fe37Y431a{;9{U(Rl4nyrf&; zhw9m!Py^%E_gQo<{G88wMryO!%zpBMf9L| zk?(LyE9|n)(^;Da6W5K0nj%>r+?4iH?xmeO5s*UN7k-)jl)RumPvV5BT#XE`G~WQ- z9n`Eo1erx?s)-=SR%Q@(!2a^GN3>}XVNb3Gu=8~3L4BFA?VNAlJMgfQn4Uu`Xv?js zNc@|gsvUTu(iY+uy(ls}#qygu4Tvg?mLC(0?|0rs=j%l~+S6|`CxJr=j#vii~-&f>M1mYWC;uI1p8?%W^<1l6&g z0`bxdBwQ5#clrU$j>iQTtg13#Z$6BDb+Xf~U?XGU9FW-O-UNXN6I6RG9QaRn2-ln{ zc$v2`3F`DCTz;9|hJ96E#p|^Mn>*I}^glqGdCnqu+eD(~{mCD~mI(&9-DDuhWXFh}61Nso&GjZl{Hgg_L7!-IRFfZ;pD4Gl?G zC-I;L>8))-naP-D;Tt!!0NE-xW@&%_gYKp**&Cf1+LfAQ@9 zZEvIHXW+^i1PuO_6V&8y5-PqbOrFda1IYxv?5Hwfm5`=b?q>)Ju1su zUO4HA8J;M*2^~XP3<3T;9ZSq>^XvX_M>fVCL*idbaB=OwEcVy7k*u1UJt%T}!d51C zRaUM0;EK`9?D%BgF2)B|f2nTMZxabiHh>ca^Tz(`y5;ud(G<%#VK}MJjhi4&QKvF@ zmt5Ca;pR56!Ieyz=!K}`n1SmjcYYqdvfPC`?H5yZ(e`F5qtYh%8sFIqmT|W58HsLW z>AVMk`K_2*KXR4kXh=^1rt)JFGo>IHRpJz0Z+0!LM(PmGUzhw85r5Sn`?@t(h&Icj zA4nGk*2%N&iB`EBSRq@=#n;&tq{}!I zn0LVdiu%m}qH7Xh`o=^<4=4}mb|5Xp)~KX*{P-PCOB!B70YYcgv7c1i7tr0!Iy|MX zi@5EinQTRC?r^^l<%=^Hb1}J70Jpnp`i*Fm+msi~tK6a8WFtztoargBlp;9e+ztVQ zDGFTp&Zu*|6Q)<1y)Hm)^97qoPsq2QP6be_apE$(dbF*A=D3_SN%)7?`tH#|mVrF& zI5Kki={^n(&iwPs0hr(|ycR^rd#Ai&6!|v2{T+=tJIDq!e$wIp_*-8E|CBL-CyIX> zD0yWj@{%~+gR7>UTBF=71-e(3uLc_&IbbSl;hXxN`tlIC{^tC%_uq0fs>xa^p>(^EAn6f*ui7Hu|jdj5X^!C@v{x@mkV05?J zUw~;K-3RF1nlz`qz_#wim==de={3_90eqb@?lDE0*uRT-=h6=svjG9CgSCE~s53yd zonH*Q|Q^c-4*lO?aBA1{Us;a8yg@ zgfnCgyNdn~YQKn5EDh0uFQ!lbnopF6h*H!96)%kVdSZ!~#0_Fh4lm$)6MffZ2 zd*a_LzSAzk?<&o6C~D8WZGq;@*95M9EY9#w_v|7m9*D8 zAS_i6ADL~Op2YmoTvTT(ig6$w?Sk&|%ekDA6JSQiqNa&c2n#sC(4NS(L;h_k)WHR* zs@0yjhj=nlJF#jd+2*uEicaG8A~J>8w#$=($EX&(t|p%Xa3yZSQDL}+ejXf? z;QrLL7APBO@?}JQOoNHE%G_)JwC03Y{InMWdLVOq5Z4o2ujJ+s=Nb&%JG>%DOREfiLU*89(WICtXDD9&x*VWvm0ZYwn5Gq0kI&rp;AZjw zZceT)_*!7mf)D068Ix1cxt^K?Fyx--wZp61YxpL`JS=rT$7~;g@r;6Yd?DBUP~~~t z=1*LIGN#PL6`AR Date: Wed, 20 Nov 2024 01:47:23 -0800 Subject: [PATCH 0791/1088] Add files via upload --- images/documentation green button.png | Bin 0 -> 11757 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/documentation green button.png diff --git a/images/documentation green button.png b/images/documentation green button.png new file mode 100644 index 0000000000000000000000000000000000000000..0deccd386dfa3b1d918dfa93e351d735b15eb743 GIT binary patch literal 11757 zcmX|HbzGHA)8=pp2|)?zP)a~rN*W|3BqRkyN~F8tfD+OvAkr<}4F`~rRyri5Q#u5` zz4dv&Km4hC?%j!C zvAD>m-rrs=50xQw`B2z(d~$C}Z$fr(n{8@_AtAiB#8cHVa=`Se+D!g&bf2_v%z`u_ zfe%)`-vU0V+3?-yyX}#<0^FZ2vvTbbNgJEHUrMfjdx%nBAvOw1E)<=$#u{6$ifvPz zrVoTv<@3=8cilgKKg@?K!h6-i=M^MOK%a)**+~CSa41e z$0;jbD@UJ1`$N7SQX$^ijWDPSgz+Fk5%OpmUXNPLW9SQf$B^VHT}!3QTHj! zLeiZL(7VdC1O^#y{o?Vwrji}LEgeJ8tVN6r5Q(((O!bUR5AY@~t5}=g`Jje&QFSrW80qRafHk4!fV=*k4Au|7MoWybVveDgcdvxG-O~2=9;aJ;c)$LV zbN^NuVI4_FJ-s{&hx#q+0c9QzxBS{c4Sge*i(AXGnS z(gBeuW$v2fY5A2Bny};;iPLC{>~kRdkiMvajJo0U@@n?v*pKLYG;SoL%180x9`l2= z0(~94oSUzg3S4|M(s0}%fn(Qa|SIOCgSE3S{mwLVX zpO6kX>og2!o4;Ds0&Q!69CyhSqoxdKd+bcJUZP%L`g!{0`5hAeJ#R5ZC2Db(uE&X? zloItK$j=al^Dwei3)tEyF6#AX+K~apx{s6b4zG$)uJPP3-YC`($pf=ZkEWgZDUao< z#Xg3ZLn}gS30t;Mh@p>wbCD35v_~@O%aw$Pr2F1I<9eRpbRVck;U$#>-=Eh%0)Ay`!#x=6 z#Mxf~eSMR(xEKB$j zg_HH)5Bp}S?@1!>sf(v6a?*M`{v}=T<}uvzR+kb7b|=9w2Rs!+hT*S-A%jV1ItO&} zBm^>CXh(&J6>+8izP}CkL3^3xbbo`$L%bE^Oq7%U-|0tR{FrW$BXBhDV;;GK=Mfvr z{uGG7uuSBSv9J}xo-GzYJ}idwha$qB9VlZx1^15-I{6PAu5?4V7X|b}VwMhJ{!u=F z5M}8w>ctB*)?Pd4)4CkVi@aF&>{?0e^#r3dMnx30^sgl6)FMh$g0Yce zc;fV(8x%-%dk*?tzv&$vmGYsJ`1e**Xp`@h{ z9Hy5`R&WoI{k$o54QS2Y7yEO{08ZuYbzeB{;6XKXegOsUp9Cv|L?v-s6q|lCBWR7R z{_dOpBk3u$qZvf+BLgN#Be4n#96a8txiD`5JE>fpXh64j^fnsVlPX2{MH-Bzf1elH zT7N-HhNqM&FD^z_$D8EmLNF}$3=ducU4iMQ;)0_Q(;$8Ne5yMeScpNi zVhb5fB1CV$87zVHU$D(fXtG(=j&YP+SqiyRg0`R1BiMw)L8ucenvgfrI4NQ18Ha-< zu^=f{$Wj4iPL7{XjZiO09RBUOU*X(mUhsa0LL@y_T|=wk5y`L$ z>NRJoF8Cz7f=J?{F=E5N@~tyDo{|ighLqm{N67DE zdW%eHiHoJYLUD%v|GWN{!15W>CHA@&H?l4ati(L9Cy+Q1l`(`}5&r&1_pEt2ZJjmU zR~R31tn&V>5xz@j2p@J%7O`q*!F2DDKd#T{Z`f7GdEsA`ab}(3V>p4QQFrcaoM^}& zXfakTpYVWTk*?V{pL9I|Y_hk=FIsx0#FIF~d7ik&zh}MXH|{L_qns_HH@itFH~LTZ z$jAAli<<(Z-jVO&gB6KxrdHB(VOJVaNaTrP8gt(!EGQq2E)StqecHS|TlCv57vMQmv!sKCj~X@$AwrJKv7Ad1r}M zo%ves*SE3s@p+!f=J=$8uzLn>f@1ELLY6|OzY18Gx!xh3`K=Vts4<=j)+?wvcfQJv zw0iyh>r|EgImztU;}IWU5Qh>q_4|&Zl`?X#LLE86>&XjVQ4TCM!#x~P1M*Qcv~X81 z$`dNAR+Yep*udT|*}xj(%Z5I4u^&~%%8Yhvf6#ayVfK3t=KUIMK)w_^QA@4gHoF&b z;H4B3*Ay-FHiliIH-QlK;?6>o$y?Qof)djH%nz0BuMfs*a-YRKsBOL686Mdc^`h-k z$o6-jmS7a~h+Y?1fPaY3@_+1YLI|m+_2wA+-6NQ;(Q%V$%D zaipgn7vJ<|bb3}Ya=nd8yJ_OXvHcZW?h_C145f?m;uSBqgq8*#M^gM?Aa0iDHFvn0 z^y7HqwTPrJr5Ro}Pyy;HtFm6kT27-Wt!xY}wKYr`(wz9oVZ3bhq1h z_I4HbRFXjT$Z{2|Dh|@{aUTt9Hs?rsbZ^==R>sjwFC92Pt1Gl%tla9RapYPLnW)`A-YFEB z%cl(YJB53!eo&Im+UdDJ{iKGf+4BuB6dV~9L8G9=zua66=heIX%&MXiB+{yn$>w(O z>{GC@)sY9E5q^=KOi^30!EF)?>8r=Omnm#1k}Uc2WoGg+9ohrdiTyy`T$+sFVSoJMH-Z7niYL&3l-Ew$A3Z1i#~%@) z92+Otv30I(O_%j0(0(4sO>GsO)rFQ3y2Oq zQ7xx3zF2b6IFR`aC0{pp>qIsbh}O@3k&GzEZV0STxq*j~(e=!*vN52LPc-%LV09wg z=!A@fbNMJYODL)ANn@k-u^G-Y15Z&~%yQtLX5CJty)u2gx{f6|yY>$BIZT1u1Fowv zub;~9(?c;PRW^>9^Zrtbj_`2xB+dta1}a>DGjg5bD+9hJBX`g8(Q1A|bSOCLPiK9C zSv4f)!iSTA=+UG;Pq&?^SndXVskbwOPQu%qm){?nREC>2?vbvG;2xfiU#+*AA2&Qa zs!ctpr(vx!R^+mhRT$MLMh1ghUOeyZ8!CJB74OwW4cgYQB#DL zK3AfSdARYo&hlhaZHkc3)$*i^WI@#9t)f|*mgFy|UClef!wn_0dT+2LfL$9lFN740 zp8`MlsA=D!M8NwIzwqfhAF(XRI{GH@&58OW4ZF-Ka`y*{Tf=g4-s(ED8B*MQ_*@eW z+$*%lhFs{~Wo=1sAC|NN`<~IBwGhp1Li@M>9Qu#B=@zkqQ_jI5U1pXVV@2QDh~B~A zQO?jGT;ndgMx9%Fl1f6pqi01Q?X^Y}sXvQy03n- z{|e=xXEJAID!74_WRjQQO};QWWJ7lhoH+ayHni5%!W5RjmeglsHn?vrW_~;V#UqQ< z4q)q**lW#@rw9K$Uq#nieVn$OBkHJXO*cmTPRDI=MOeg{dgROU{ITpYH7ew3$Gy)2 zGiR06w(if^&FO7c)?~XYqs7P9ycX&@>fXtJH0BYVe^G5V6!CmSU)#|`=FmF7==IMa z+Xdj$&D1p?PDD1x<>d{AnlQyhdaUs&u9g=}tS~$#eg|Gp{oYccBi1oql}SIEk&VpBImX`Hma1Nai{%+Ji4X zyGj;(4D}kF0X|BW<7$~u*Q0M>xVLTA_4k<0@eWN7Fo4+^;mbDyky|1IyaFp*Jdh;; zWrv&Ajb_BRVX2Pp-EX2{4{6JEWH(w2Na*If0d^MpM=$xcmQ{D6UmaC^wGoEX?KdNH zM$jI*EN5`43CC)VNbpI<2f!h^VGID1xsPwsZ?47+e18)Us2@LP3!mvEwc{M zr9%tW%O$~AJhkq&Lio1&-u=cFGhJc3*tmA{omwP6{}Fci{;d;eaF%a3B)jh+Yo|*X zGf_YjReZfV3nDhQL^5D20B-yK)9z@m-_T);M;ozD_1o;O?49D%#A+ibI07tFw~~`5 zsdST*OBOvgOz6(URWxD$Jh5|GjisxB=fdEd%R0+NbQcY6=f*DfXM%jl;T+-fndDNd0w$Zn2ZtCnX52*0t(+Es+a#uNUu8oBWn}t~(7f(iT1!O<3&En>- zL(Nxgz$R|}E){rs*$ChncLOO0u{eYQTBUSyY_uJfjL&xOI8rbkhKg8QxIR7H>M}X1 zwomi``qk$2P>@-+GJGM{hEV1lzt%GF$-OZe(ADH_wZU}_Yvj%wuN;3zHGA3Hdo4!8 z#>zu|u>Yy+TL!Afq;>Z3E6iHan~vvFO~pI29ZgDoiLF}<_f#%4rguz$y=vPCkI#VM zU1m0fH@-XX-apS--0*Q;Xy+wxQC2c^Hj1RGL%^O0$Gbc9v`#NSF;|=C>32ap4u`L+ z`t}N8%O$xn10|S$G%iih1^idK{XY#DlxJ6Z*QioCY^d)^TES9 z>S;R67gBJOQ=CNDYaR3qVa&OAsoKs&OU=$z#DM7U zpf%DDlO>U(67KabnjYL`-<>-cdG7Z+I>Nf>Wi`)MXh(a*?Iv&%xS#fzRly9s+>ZBi zoN4s~o->$~L(O85PFBnOTQvC2i#u$_VwBIsRccd;fJUYVMIO!legt{pnL1X z^SOw){7ngYt@QSfJGXR)wW*X;V%2VB46RGvPqX;cP2_akKUISj?{xKJO=ZJd~8Ai6MS5&^Tcm>}!m&UK1TwLvE0&~c$nNGK7zXFICL@t57 zkU3rH$jqe%U>FuvaK%sGY-Iw>w^!^;;LUxLskfHJCLJa&EZk%QuPv1%ZLmP7dx>f& zaLx};V}v*_@-}Z|{+2!n>w>I%Lz@WwklpzPmO{O^wLqwen)kNo9&THkMf2TI9DF{K zqQ&EYoDZ6kX{lj29qO#`-pQ~L<=YB7bkJ2+*raDozBgXRM0DrqiY+em&}<^8(L%2T zXjb{?_C}{`*f$M98;Bj=^p}B#$vjP>8m{k%OKm@7mmUL_DypoFr#Hxh#*fckl?qF4 zA6NhWQ4=0L?g>PGS^eAo^5z%>WRbk%OS$fv=f&&Am_iO0xv#C)sK>W$2JY3}Bo|8X||gyzosBhIuo zwHLtqTE)${WM}yZ-72eBzIsx95LaY)Sw7uWH3D@0dxGQY@s=s&9*wSMz=c9->9nY$ z%!OzmK#6~(`o0vs-RfiG5P5OOTL9-hT(DfV|2X4_3BjI;!ued=cbXGhNyI!ow~nq%?YokI>_1i*)ko z%m%(DiKTl-PPSPOMkx<`O-bMvbt$EZi5C~@l1CPoK3iRKjZOe@e?8UX&u#8JjGL~^ z$cbG05!_(VNw=#?r{;4o?s6~YYT!+~*Q$@>FT3nzqM!yLyMZLZ6anh6gYgODz${DW)7d#u=y^lYB z*xQ+oaao(vs(qKe`QmqPwVqlc`wLfNVE*eJcd{e25J3Tz* zJK`7arzKp9B>wz7lr1Xc!6YUDv8B5T^|uZU8g7C$0rZX0#GQ$b_^k(TxZw5GsJq{h z+INuIGP+Z=d}npnB`KuRKt5;SO7AU(~y9_nPW6ETZ0v6IQ~CgTGuLwol^4V*E7Cx{y=$h&D+IDcJ?QO z`bs=f)v0jd{b@&U(HpsLzFp-qZ|1NiXhHqZdO4f8}T3R*{peu zDJ)_N_BX2QF$QZ%)W+GkQ=QV?pA7jKcg{+flTT~#1>V1CpvVNkqXWc+uuU9eU1#fQ z!mz>J5<(k;XpLt3f*SPZg`CF1wfA?A(RnqJFSOoXM!tS-)lJF__iU2WkR2d17+R7~ z!xp)M7jI$w@wN;$$`^I)svGXZVeJ>b{KBT#%%AG5s>#ccVgDv&u>{#%#_!GQG5I|% zu1Iahm*^cNBH21AnhC#5szNUWAOO6wu>BKNv2@I4!GZVl&g-*IOL8Vz@&^z;$+dTw z-nf8{P0OdDaS!cpR~;ZLeOE@Z%1%mUL!auzIQE(Bo0n4&34T+O%O_T4n&cpbQ9Y@# z04pOIA>A0if)p?|$?@CmK;e>b&M<_X#+-QJ8#^r-}JRXb}L{ z=8O5O!Wvm|IF`yoXS_q(7D_@&0GqV%T@y~ljg~$;rkXJHmI9~_(fGhL z%^&n>7h-Gxr~UTMWCFv0qvQ=6eC%k1zc%nwcTr{6ic?eyPgAO=*wKdQyFIW_4r)Kb zT<32=8oJ?1ZPY56GQ*i9z4kK3J{%yd{eDyak@a=La_mEE4WMEuK^Qd3g3U$e34 zFiHLV`y3`_oU>kkwGkB6Uo&>xXiXuhl(2Q31(XfQCpv2$T@|V?{X;upM3SuHEa|oM z+QF~K0U`&F8>%}oF&Yk3(ikF-jX4JDUQ|JV%tkl$yXCz-H$DvuJx|7SxPNCG>VhRd*!57 z?6>zp1*r_wxvEXwkB^5IcbOfn1&7{&3zn?T zi|EnJ1oanBZNB+9wz=O{`I=5WJ~vzdRH_V~c@-^EF6sY8abZO5nuw4&v3voLw*45E zx-XLhgaF{=aiLMxuD2-z@Zl71r?KcZb@&b$HJe%jcs*V0K#t_CTx+MnGh#XP?TprW zk99-u=4K;4?Qr49U1#jFBjTgz#CPy^y^3n0r;eAUBDwG-Tr;o7&w;6cB>vXVb`elO z8+z%&Q#fa3G>}U5XSK(OdSeneZ(^RH$jppQjok5wh#gkPr8zo)rb4zV`M34RK+9_ z1tg@Lq_+V;mz~~ys#0Z(M}ZQ)y*l5mFT;DLoAlm?b1r=UYBJ0G zvUoICSJ|I<$X@V<$?swE^_fwrA+4o_2Q2SsladVu(1mIPWBY(#%AT$0zU{!oFP>skP*%4jS)ox zvt4QbG{W8m(^LF_sl#d^-1p?4(LjB;wL zPrRwQFP=}ER>*m`7l|AjU>)$bxHqe`8 ztuYQ84RYPEkWfKC>X^jW7lCR^))XIQ=E#R#em$RaO|SQYM?D23n$DbLNXXb`CI&?~ zM5**dz|oM4wK|+Aw{hFWESgH0-#QTj)h%#(1vxo;$c~x9&J=u5jOPB;V)2G7tXFz= zi&+7H?dXerZcrvBABGvo<=!I>0l5rhtiQ!B6DBXd=eVSw-=bwTn2$M(s08)X-0Oq3 z=UG<2z5pdtj6eGJ77YQE8(!Q6DnUyD@Qt9C>W`-*D3`v3M^;v`i7`n7xC`+xJUrzvwH)+b0` z1{4J&@M;rArHyK9i+;6e@$Zk}lmuHT&I8cDSZa7-@knUE)5T^fh>GnIsYn16*STfR z0D=egHTDYIN1$^2jmJ~>{NPMSP+y?I%oWGb{XD5+^Wb?fa2H*@FNEl>`X$Ozyu2^p z&d*a9?5vg!UcQkb5zy2M^b`Z-vBiJ2Y_q{VgA)tBZk^9cM_FNE@x}Ef4>NpKDkl~-!0Pyl?{y0V| zfH-O{t^Q?KyRtQtQMU>QKX4qCRzC^;E2h3DPY_kFZ;G^ZsokHnSPhUQc5w%oi;_F7Oc)RB8je_MkrmaTL> zbKRhywuhP;+Dr*Z7Bq>-Mm0x%pVyc2y<^D?XvCzhZ+*K)y@=(y_qX%sGdU4^g?_Y# zp!z)qEyPz-9gP$>wIZ&CURhYJKy0m8GhgBM|1cECbQAusJ2+uGPW>>wwB3ds{H`|Y za>ryx^fuA3FQymMfdkY(JUF4ksM{MBhf%$fulgsBEix>HJ>3>{TvqF^FyrHdCfhza zU{vXb{C~Ynv7cz5)k&~WTmQE|7+*b)dXs9C$qBon-Pa!c$IT9xzm-TjCyZ*r_kUf} z7Gkr^GMG1;#gj^?6}vgX_(~}es{_Lc;oJLP>-8sZKg)4%e`#)vbMzvi9aAU3S zp`4n&{MQ$~W|H`b#}JE1{Zuc-4z*tw|9r%ysOP!8eu=>3@d8shF&ZV*^gY8Frogpb zlL^FhZb8yIIJ~D7GN(^Q%wY>h{kL>LE8#LI#|PMKh3V_GXoAyNob{1f74Akguq)oC z#(c1IG|l#|$!eG>1vm{R@VR3P>d31(k)FM+;TU8{Z-sf$1x-LA<*ZZuvU@l567>m~ z9}*E9l!A^U_Qc|L7vHhhQ2gCPae7 zPEUg;lsny)p|25YG0RJDvm#kl2Ojkz@b>5Z45futAX9s8jk+7=5U7fIM293nxYDJT zLbTt^q1!O_AfrD#NrwyL%o28ZK_W^zaFp?FCUxB%hTVe|a}DT$Qx$Ys_xbJn1xgTe zSbwlwoIx{QZNoj1cFU!U3rs*fK@Ryf8vi#I(TG)H^Joco{m$b{F_J@WKq%YGJvFhR zCi^SZ@H|XZR&n44l8D-wz+|RGfP}ym7%!Q2RM1}&ve!jvB07~k0Mw~bcZdF)-VhOo zrZ-A~Ol!ysVRjX;KcwBm|4}l@IqteXa!g$IfAb*kXggX`O3_z&(&0_mf9^&^LUS4I zr|c3g-n?lz1Avw`Y&II{07F<5k5XU^@S;+g} z=!%M;9vEGTfn8C(VlQKEBZq{VKemB`wf9tMswz!F0%LpT6gicY7qPO0L;hS zDGFAJ-VXsY6lyv%_%Eelk10tc!NJ7)LCB13Xre)FO7Llvbec*XUb-F7;s|n0uB;WZ zi{3#YG_s?OY#e#!Y#_B%Q#3UHa6sowmr28@FJ~KYZy45s-`^btiFelYEK0C7&FOwO z>sRnG)xsg^4oeFjJIdYK_TP$6w4AmTt8S{7eNZL|tA8VliFqB6MIORdEAStjW?gQ#e zl{j^%XVO0c-m5+WE`^ShYVz~nkt-JRWIr&jR`HNX9o#VS*)8e)F=#rE;FL(h#fx`C zl3UdZM&*Ak)rv2i_-}_cK7mQsl{`sI!E0bZxUFt2hUJiia9VwRsZk=fhRGa$&9@Cr`*@Ti?wIAYL*kBK0%|pF-JZ9H*o7!*-=Hab|UbGB{^4DwCv(Y0B>ps>=Ip+Ft% znGhJEJlBeViL4O6;se%J^WGA&wy1B{QM6gjpY7N_Fs~s}58$ta?S#rWzAFIndPhY4 zAMvHqCdc!_Jr<}I(h%iHG2y^SOnF+!d7HwO9WXqtZY{Zp*y7OWI2Z@e+M>+~5%^X&WDrTqc(Jf^JzjCMJ7opeP z6~M1ccqI%?qnPH#(mqR(jVjiSVaqEc8}6_`zlWOTcZ0gEPy^y*c4k>9txz~)k6Q7! zkLr}T>q%2MUc_%uY*2jS-#`;U0TzUIgc4>KEE0igau`S3PB8qtv)gYN%d*!5!FGo+ z>8KB8sDyr)oHVALh}1?y6koJ3R2y_YdOZ+X)C{a?d&IMDaewCkUK;fGcjmRgj+=q<4D^}vboT!5gN+m)CH4SA@5fY7l4#fxe&n<1gtWx^S z2OFrplLialQ!&K}_wT-v88W6c_{s`R@==$eYmXhCq0fN?gLD{2_Sw+#VrrT!N&+g) z1JW;V9<-t7xr730NgfFc?SqEL!l{^=#yIX0-037G&B$1h$T)SxZ?vqhVVGSgYbSs7 z;`dM)K6wmFP~ Q`E3+=Y2~LSlCS*#4==*xp8x;= literal 0 HcmV?d00001 From bd1a17538d41c1adb2ec28b074c63a575dbab1d3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Nov 2024 03:36:41 -0800 Subject: [PATCH 0792/1088] patch_fast_lora --- unsloth/kernels/__init__.py | 1 + unsloth/kernels/fast_lora.py | 75 ++++++++++++++++++++++++++++++++++++ unsloth/models/_utils.py | 7 ++++ 3 files changed, 83 insertions(+) diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 82e7641693..ef5fa5da70 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -42,6 +42,7 @@ apply_lora_mlp_geglu_approx, apply_lora_qkv, apply_lora_o, + fast_lora_forward, ) from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 2177b43b9e..6481661d88 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -410,3 +410,78 @@ def apply_lora_o(self, X): O = LoRA_W.apply(X, OW, OW_quant, OA, OB, OS) return O pass + + +IDENTITY_DROPOUT = torch.nn.Identity +@torch._disable_dynamo +def fast_lora_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + # Fastpath + if len(self.active_adapters) == 1: + active_adapter = self.active_adapters[0] + if active_adapter not in self.lora_A.keys(): return self.base_layer(x, *args, **kwargs) + + dropout = self.lora_dropout[active_adapter] + if isinstance(dropout, IDENTITY_DROPOUT) and not self.use_dora[active_adapter]: + lora_A = self.lora_A[active_adapter].weight + lora_B = self.lora_B[active_adapter].weight + scaling = self.scaling[active_adapter] + W = self.base_layer.weight + return LoRA_W.apply(x, W, QUANT_STATE(W), lora_A, lora_B, scaling) + pass + pass + + result = self.base_layer(x, *args, **kwargs) + # As per Tim Dettmers, for 4bit, we need to defensively clone here. + # The reason is that in some cases, an error can occur that backprop + # does not work on a manipulated view. This issue may be solved with + # newer PyTorch versions but this would need extensive testing to be + # sure. + result = result.clone() + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + if not self.use_dora[active_adapter]: + result = result + lora_B(lora_A(dropout(x))) * scaling + else: + if isinstance(dropout, torch.nn.Identity) or not self.training: + base_result = result + else: + x = dropout(x) + base_result = None + + result = result + self.lora_magnitude_vector[active_adapter]( + x, + lora_A=lora_A, + lora_B=lora_B, + scaling=scaling, + base_layer=self.get_base_layer(), + base_result=base_result, + ) + if requires_conversion: + result = result.to(expected_dtype) + + return result +pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a93c443042..4271eb6a82 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -62,6 +62,7 @@ "patch_compiled_autograd", "process_vision_info", "unsloth_compile_transformers", + "patch_fast_lora", ] import torch @@ -1081,6 +1082,12 @@ def patch_tokenizer(model, tokenizer): pass +def patch_fast_lora(): + import peft.tuners.lora.bnb + peft.tuners.lora.bnb.Linear4bit.forward = fast_lora_forward +pass + + def unsloth_compile_transformers( model_name, token = None, From cabf21f0dde2438e9284974e709b5d698808c18b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Nov 2024 04:15:53 -0800 Subject: [PATCH 0793/1088] vision --- unsloth/models/loader.py | 5 +++-- unsloth/models/mapper.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 7a6322d248..1d4bb71e85 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -153,8 +153,9 @@ def get_model_name(model_name, load_in_4bit = True): if upgraded_model_name is not None: raise NotImplementedError( f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ - 'pip uninstall unsloth -y\n'\ - 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' + 'pip uninstall unsloth unsloth_zoo -y\n'\ + 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'\ + 'pip install --upgrade --no-cache-dir "git+https://github.com/unslothai/unsloth-zoo.git"\n'\ ) pass pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 627aab2054..03047b8103 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -452,6 +452,38 @@ "unsloth/Llama-3.1-Nemotron-70B-Instruct", "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", ), + "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-2B-Instruct", + "Qwen/Qwen2-VL-2B-Instruct", + ), + "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-7B-Instruct", + "Qwen/Qwen2-VL-7B-Instruct", + ), + "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision-Instruct", + "meta-llama/Llama-3.2-11B-Vision-Instruct", + ), + "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-Instruct", + "meta-llama/Llama-3.2-90B-Vision-Instruct", + ), + "unsloth/Llama-3.2-11B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision", + "meta-llama/Llama-3.2-11B-Vision", + ), + "unsloth/Llama-3.2-90B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision", + "meta-llama/Llama-3.2-90B-Vision", + ), + "unsloth/Pixtral-12B-2409-bnb-4bit" : ( + "unsloth/Pixtral-12B-2409", + "mistralai/Pixtral-12B-2409", + ), + "unsloth/Pixtral-12B-2409-Base-bnb-4bit" : ( + "unsloth/Pixtral-12B-Base-2409", + "mistralai/Pixtral-12B-Base-2409", + ), } INT_TO_FLOAT_MAPPER = {} From 7d5c9eddee37c7ec7f55dcf6b4a99f2cbb2912aa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Nov 2024 17:07:53 -0800 Subject: [PATCH 0794/1088] Update fast_lora.py --- unsloth/kernels/fast_lora.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 6481661d88..c2b7929a29 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -415,6 +415,9 @@ def apply_lora_o(self, X): IDENTITY_DROPOUT = torch.nn.Identity @torch._disable_dynamo def fast_lora_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError( + "Unsloth: Currently not supported yet - reshaping done incorrectly" + ) self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) From 4ddd1bb6d39d9819046a1100b1f2a5a6834a8eea Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Nov 2024 19:15:05 -0800 Subject: [PATCH 0795/1088] Update _utils.py --- unsloth/models/_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4271eb6a82..f5075a5445 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1104,6 +1104,7 @@ def unsloth_compile_transformers( compile_function_calls = True, fuse_lm_head = True, gradient_checkpointing = True, + manual_replacements = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, @@ -1132,6 +1133,7 @@ def unsloth_compile_transformers( compile_function_calls = compile_function_calls, fuse_lm_head = fuse_lm_head, gradient_checkpointing = gradient_checkpointing, + manual_replacements = manual_replacements, epilogue_fusion = epilogue_fusion, max_autotune = max_autotune, shape_padding = shape_padding, From 1c94f04c9eddefa4f305e3f8f5ea7775430c7304 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 20 Nov 2024 19:40:08 -0800 Subject: [PATCH 0796/1088] Update _utils.py --- unsloth/models/_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f5075a5445..e1214a89e8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1111,7 +1111,9 @@ def unsloth_compile_transformers( cudagraphs = False, debug = False, import_from_cache = False, + disable = False, ): + if disable: return model_types = get_transformers_model_type( model_name = model_name, token = token, @@ -1140,6 +1142,7 @@ def unsloth_compile_transformers( cudagraphs = cudagraphs, debug = debug, import_from_cache = import_from_cache, + disable = disable, ) pass return From d6ccbfb27ea09dfea468fed3a30414e0fa7b6726 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 01:48:20 -0800 Subject: [PATCH 0797/1088] Vision --- unsloth/models/_utils.py | 5 +- unsloth/models/loader.py | 388 ++++++++++++++++++++------- unsloth/models/loader_utils.py | 114 ++++++++ unsloth/models/mapper.py | 8 + unsloth/models/vision.py | 462 +++++++++++---------------------- unsloth/save.py | 170 +++++++++++- 6 files changed, 727 insertions(+), 420 deletions(-) create mode 100644 unsloth/models/loader_utils.py diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e1214a89e8..460b23bd0d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.7" +__version__ = "2024.11.8" __all__ = [ "prepare_model_for_kbit_training", @@ -1120,7 +1120,6 @@ def unsloth_compile_transformers( revision = revision, trust_remote_code = trust_remote_code, ) - print(f"Unsloth: Automatic compiler will now patch {model_types}") for model_type in model_types: _unsloth_compile_transformers( model_type, @@ -1145,5 +1144,5 @@ def unsloth_compile_transformers( disable = disable, ) pass - return + return model_types pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 1d4bb71e85..b561239313 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -20,7 +20,7 @@ from transformers import AutoConfig from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel -from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit +from .loader_utils import get_model_name import os try: from huggingface_hub.utils import get_token @@ -63,128 +63,324 @@ def _get_dtype(dtype): pass -def __get_model_name( - model_name, - load_in_4bit = True, - INT_TO_FLOAT_MAPPER = None, - FLOAT_TO_INT_MAPPER = None, - MAP_TO_UNSLOTH_16bit = None, -): - model_name = str(model_name) - lower_model_name = model_name.lower() - - if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: - - model_name = INT_TO_FLOAT_MAPPER[lower_model_name] - logger.warning_once( - f"Unsloth: Your transformers version of {transformers_version} does not support native "\ - f"4bit loading.\nThe minimum required version is 4.37.\n"\ - f'Try `pip install --upgrade "transformers>=4.37"`\n'\ - f"to obtain the latest transformers build, then restart this session.\n"\ - f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." - ) - return model_name - - elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: - - new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] - # logger.warning_once( - # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ - # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." - # ) - return new_model_name - - elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit: - - new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name] - return new_model_name - - elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: - - new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] - # logger.warning_once( - # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ - # f"We shall load `{new_model_name}` for 4x faster loading." - # ) - return new_model_name - pass +class FastLanguageModel(FastLlamaModel): + @staticmethod + def from_pretrained( + model_name = "unsloth/llama-3-8b-bnb-4bit", + max_seq_length = None, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + fix_tokenizer = True, + trust_remote_code = False, + use_gradient_checkpointing = "unsloth", + resize_model_vocab = None, + revision = None, + *args, **kwargs, + ): + if token is None: token = get_token() + + old_model_name = model_name + model_name = get_model_name(model_name, load_in_4bit) - return None -pass + # First check if it's a normal model via AutoConfig + from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + was_disabled = are_progress_bars_disabled() + disable_progress_bars() + + autoconfig_error = None + peft_error = None + try: + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + is_model = True + except Exception as error: + autoconfig_error = str(error) + is_model = False + try: + peft_config = PeftConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + is_peft = True + except Exception as error: + peft_error = str(error) + is_peft = False + pass + # Both config.json and adapter_config.json should not exist! -def _get_new_mapper(): - try: - import requests - new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" - with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text - new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] - new_mapper = new_mapper\ - .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ - .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")\ - .replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit") - - exec(new_mapper, globals()) - return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit - except: - return {}, {}, {} - pass -pass + # Old transformers versions check + both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + + # New transformers need to check manually. + if SUPPORTS_LLAMA32: + # Check if folder exists locally + if os.path.isdir(model_name): + exist_adapter_config = os.path.exists(os.path.join(model_name, "adapter_config.json")) + exist_config = os.path.exists(os.path.join(model_name, "config.json")) + both_exist = exist_adapter_config and exist_config + else: + files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + files = (os.path.split(x)[-1] for x in files) + if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: + both_exist = True + pass + pass + pass + # Error out if both LoRA and normal model config exists. + if both_exist: + raise RuntimeError( + "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ + "You have 2 files `config.json` and `adapter_config.json`.\n"\ + "We must only allow one config file.\n"\ + "Please separate the LoRA and base models to 2 repos." + ) -def get_model_name(model_name, load_in_4bit = True): - new_model_name = __get_model_name( - model_name = model_name, - load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, - MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit, - ) - if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): - # Try checking if a new Unsloth version allows it! - NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = _get_new_mapper() - upgraded_model_name = __get_model_name( - model_name = model_name, - load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, - MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit, - ) - if upgraded_model_name is not None: + elif not is_model and not is_peft: + error = autoconfig_error or peft_error + # Old transformers version + if "rope_scaling" in error.lower() and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support new RoPE scaling methods.\n"\ + f"This includes Llama 3.1. The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + raise RuntimeError(autoconfig_error or peft_error) + pass + + # Get base model for PEFT: + if is_peft: + # Check base model again for PEFT + model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + pass + + if not was_disabled: enable_progress_bars() + + model_type = model_config.model_type + + if model_type == "llama": + scaling_type = None + if getattr(model_config, "rope_scaling", None) is not None: + scaling_type1 = model_config.rope_scaling.get("type", None) + scaling_type2 = model_config.rope_scaling.get("rope_type", None) + scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 + pass + + if scaling_type == "llama3" and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ + f"The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + + dispatch_model = FastLlamaModel + + elif model_type == "mistral": dispatch_model = FastMistralModel + elif model_type == "gemma": + if not SUPPORTS_GEMMA: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"\ + f"The minimum required version is 4.38.\n"\ + f'Try `pip install --upgrade "transformers>=4.38"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + dispatch_model = FastGemmaModel + elif model_type == "gemma2": + if not SUPPORTS_GEMMA2: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + # Also check for softcapping support in flash-attn which is faster! + if is_bfloat16_supported() and not HAS_FLASH_ATTENTION: + print( + "Unsloth: If you want to finetune Gemma 2, install flash-attn to make it faster!\n"\ + "To install flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + elif HAS_FLASH_ATTENTION and not HAS_FLASH_ATTENTION_SOFTCAPPING: + print( + "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ + "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ + "To update flash-attn, do the below:\n"\ + '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' + ) + + dispatch_model = FastGemma2Model + elif model_type == "qwen2": + dispatch_model = FastQwen2Model + elif model_type == "cohere": + dispatch_model = FastCohereModel + else: raise NotImplementedError( - f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ - 'pip uninstall unsloth unsloth_zoo -y\n'\ - 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'\ - 'pip install --upgrade --no-cache-dir "git+https://github.com/unslothai/unsloth-zoo.git"\n'\ + f"Unsloth: {model_name} not supported yet!\n"\ + "Maybe you're doing vision finetuning? Please use FastVisionModel instead!\n"\ + "Otherwise, make an issue to https://github.com/unslothai/unsloth!", ) pass + + # Check if this is local model since the tokenizer gets overwritten + if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ + os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ + os.path.exists(os.path.join(old_model_name, "special_tokens_map.json")): + + tokenizer_name = old_model_name + else: + tokenizer_name = None + pass + + model, tokenizer = dispatch_model.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = _get_dtype(dtype), + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + fix_tokenizer = fix_tokenizer, + model_patcher = dispatch_model, + tokenizer_name = tokenizer_name, + trust_remote_code = trust_remote_code, + revision = revision if not is_peft else None, + *args, **kwargs, + ) + + if resize_model_vocab is not None: + model.resize_token_embeddings(resize_model_vocab) + pass + + # In case the model supports tagging, add the unsloth tag. + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth",]) + pass + if hasattr(tokenizer, "add_model_tags"): + tokenizer.add_model_tags(["unsloth",]) + pass + + if load_in_4bit: + # Fix up bitsandbytes config + quantization_config = \ + { + # Sometimes torch_dtype is not a string!! + "bnb_4bit_compute_dtype" : model.config.to_dict()["torch_dtype"], + "bnb_4bit_quant_type" : "nf4", + "bnb_4bit_use_double_quant" : True, + "llm_int8_enable_fp32_cpu_offload" : False, + "llm_int8_has_fp16_weight" : False, + "llm_int8_skip_modules" : None, + "llm_int8_threshold" : 6.0, + "load_in_4bit" : True, + "load_in_8bit" : False, + "quant_method" : "bitsandbytes", + } + model.config.update({"quantization_config" : quantization_config}) + pass + + if is_peft: + # From https://github.com/huggingface/peft/issues/184 + # Now add PEFT adapters + model.enable_input_require_grads() + model = PeftModel.from_pretrained( + model, + old_model_name, + token = token, + revision = revision, + is_trainable = True, + trust_remote_code = trust_remote_code, + ) + # Patch it as well! + model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) + pass + return model, tokenizer pass - return new_model_name if new_model_name is not None else model_name pass -class FastLanguageModel(FastLlamaModel): +from ._utils import ( + patch_compiling_bitsandbytes, + patch_model_and_tokenizer, + prepare_model_for_kbit_training, + patch_unsloth_smart_gradient_checkpointing, + patch_compiled_autograd, + process_vision_info, + unsloth_compile_transformers, +) +from ..kernels import ( + patch_loss_functions, + post_patch_loss_function, +) + +class FastVisionModel: @staticmethod def from_pretrained( - model_name = "unsloth/llama-3-8b-bnb-4bit", - max_seq_length = None, + model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", + max_seq_length = None, # [TODO] No effect dtype = None, load_in_4bit = True, token = None, device_map = "sequential", - rope_scaling = None, - fix_tokenizer = True, + rope_scaling = None, # [TODO] No effect + fix_tokenizer = True, # [TODO] No effect trust_remote_code = False, use_gradient_checkpointing = "unsloth", - resize_model_vocab = None, + resize_model_vocab = None, # [TODO] No effect revision = None, *args, **kwargs, ): if token is None: token = get_token() + + patch_compiled_autograd() + patch_loss_functions(torch_compile = False) + patch_compiling_bitsandbytes() + if use_gradient_checkpointing == "unsloth": + patch_unsloth_smart_gradient_checkpointing() old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + ) + # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() @@ -412,4 +608,4 @@ def from_pretrained( pass return model, tokenizer pass -pass \ No newline at end of file +pass diff --git a/unsloth/models/loader_utils.py b/unsloth/models/loader_utils.py new file mode 100644 index 0000000000..dcb17b41dc --- /dev/null +++ b/unsloth/models/loader_utils.py @@ -0,0 +1,114 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit + +def __get_model_name( + model_name, + load_in_4bit = True, + INT_TO_FLOAT_MAPPER = None, + FLOAT_TO_INT_MAPPER = None, + MAP_TO_UNSLOTH_16bit = None, +): + model_name = str(model_name) + lower_model_name = model_name.lower() + + if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: + + model_name = INT_TO_FLOAT_MAPPER[lower_model_name] + print( + f"Unsloth: Your transformers version of {transformers_version} does not support native "\ + f"4bit loading.\nThe minimum required version is 4.37.\n"\ + f'Try `pip install --upgrade "transformers>=4.37"`\n'\ + f"to obtain the latest transformers build, then restart this session.\n"\ + f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." + ) + return model_name + + elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: + + new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ + # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." + # ) + return new_model_name + + elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit: + + new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name] + return new_model_name + + elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: + + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ + # f"We shall load `{new_model_name}` for 4x faster loading." + # ) + return new_model_name + pass + + return None +pass + + +def _get_new_mapper(): + try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")\ + .replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit") + + exec(new_mapper, globals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit + except: + return {}, {}, {} + pass +pass + + +def get_model_name(model_name, load_in_4bit = True): + new_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit, + ) + if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): + # Try checking if a new Unsloth version allows it! + NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = _get_new_mapper() + upgraded_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit, + ) + if upgraded_model_name is not None: + raise NotImplementedError( + f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ + 'pip uninstall unsloth unsloth_zoo -y\n'\ + 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'\ + 'pip install --upgrade --no-cache-dir "git+https://github.com/unslothai/unsloth-zoo.git"\n'\ + ) + pass + pass + return new_model_name if new_model_name is not None else model_name +pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 03047b8103..fc1dc8cdb0 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -484,6 +484,14 @@ "unsloth/Pixtral-12B-Base-2409", "mistralai/Pixtral-12B-Base-2409", ), + "unsloth/llava-1.5-7b-hf-bnb-4bit" : ( + "unsloth/llava-1.5-7b-hf", + "llava-hf/llava-1.5-7b-hf", + ), + "unsloth/llava-v1.6-mistral-7b-hf-bnb-4bit" : ( + "unsloth/llava-v1.6-mistral-7b-hf", + "llava-hf/llava-v1.6-mistral-7b-hf", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 0b8c08a371..ccfc261c94 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -1,58 +1,49 @@ +# Unsloth Zoo - Utilities for Unsloth # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # -# http://www.apache.org/licenses/LICENSE-2.0 +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . + +import torch +from transformers import ( + BitsAndBytesConfig, + AutoModelForVision2Seq, + AutoProcessor, +) from .llama import * -from ..kernels import patch_layernorm, unpatch_layernorm -from ..kernels import patch_rms_layernorm, unpatch_rms_layernorm -from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm -from ._utils import patch_gradient_checkpointing - -from transformers import AutoProcessor -try: - from transformers import MllamaForConditionalGeneration -except: - raise ImportError( - "Unsloth: Please update your transformers version to 4.46.0 for Llama 3.2 support!" - ) -pass +from ..kernels import ( + post_patch_loss_function, +) +from peft import LoraConfig, TaskType, get_peft_model +from transformers import set_seed as transformers_set_seed +from unsloth_zoo.peft_utils import ( + get_peft_regex, + merge_and_overwrite_lora, +) -class FastVisionModel: - - def pre_patch(self): - patch_gradient_checkpointing() - patch_layernorm() - patch_rms_layernorm() - patch_llama_for_causal_lm() - pass - - def post_unpatch(self): - unpatch_layernorm() - unpatch_rms_layernorm() - unpatch_llama_for_causal_lm() - pass +class FastBaseVisionModel: @staticmethod def from_pretrained( - model_name = "llava-hf/llava-1.5-7b-hf", + model_name = "unsloth/llama-3-8b-bnb-4bit", max_seq_length = None, dtype = None, load_in_4bit = True, token = None, device_map = "sequential", - rope_scaling = None, trust_remote_code = False, + model_types = None, **kwargs, ): if trust_remote_code: @@ -67,7 +58,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0]} vision patching. Transformers = {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ @@ -81,7 +72,9 @@ def from_pretrained( pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" + model_patcher.pre_patch() get_statistics() # For debugging - we use a download counter to see if environments are not breaking if dtype is None: @@ -105,160 +98,36 @@ def from_pretrained( ) pass + kwargs.pop("attn_implementation", None); # No need since we auto call it + # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config - self.pre_patch() - model = MllamaForConditionalGeneration.from_pretrained( + model = AutoModelForVision2Seq.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, - # quantization_config = bnb_config, + # quantization_config = bnb_config, token = token, - max_position_embeddings = max_position_embeddings, trust_remote_code = trust_remote_code, - attn_implementation = "sdpa", + # attn_implementation = "sdpa", [TODO] Pixtral for eg fails **kwargs, ) - self.post_unpatch() - # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! post_check = check_nvidia() # Counteract saved tokenizers + tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoProcessor.from_pretrained( - model_name, + tokenizer_name, + padding_side = "right", + token = token, ) - model = FastVisionModel.post_patch(model) - # Patch Trainer - from transformers.trainer import Trainer - try: - if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - inner_training_loop = inspect.getsource(Trainer._inner_training_loop) - Trainer._original_training_loop = inner_training_loop - else: - inner_training_loop = Trainer._original_training_loop - except: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - pass - - if ((post_check - pre_check) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - - import transformers.trainer - items_in_trainer = dir(transformers.trainer) - good_items = [] - for item in items_in_trainer: - # TODO: Support Deepspeed - if item.startswith(("deepspeed", "xm", "met", "smp")): continue - if item in inner_training_loop: good_items.append(item) - pass - exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) - - start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] - end = inner_training_loop.find("\n\n", start) - original_debug = inner_training_loop[start:end] - spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] - front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) - - debug_info = """debug_info = \\ - f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning(debug_info) - import subprocess, re, gc, numpy as np - a = np.array([0,]) - try: - a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) - a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) - a = np.array([int(x.decode('utf-8'))/1024 for x in a]) - except: - if not torch.cuda.is_available(): - raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') - if ((a - PRE_CHECK) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - for _ in range(3): - gc.collect() - torch.cuda.empty_cache()""" - - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace(original_debug, debug_info) - - debug_info = """n_total_devices = total_train_batch_size // \\ - args.gradient_accumulation_steps // self._train_batch_size - if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') - debug_info =""" - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) - - front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) - inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = tpu_spmd_dataloader(train_dataloader)", - "raise RuntimeError('Unsloth: TPUs are not yet supported!')" - ) - inner_training_loop = inner_training_loop.replace( - "self.accelerator.free_memory()", - "self.accelerator.free_memory()\n" + \ - front_spaces + "if self.is_deepspeed_enabled:"\ - "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, - ) - - check_batches = """train_dataloader = self.get_train_dataloader() - ga = args.gradient_accumulation_steps - bsz = self._train_batch_size - total_batches = bsz * ga * args.world_size - n_total_devices = total_batches // ga // bsz - if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') - divisor = n_total_devices / 1 - bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 1: - divisor = n_total_devices / 1 - ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" - check_batches = check_batches.split('\n') - check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = self.get_train_dataloader()", - check_batches, 1, - ) - inner_training_loop = inner_training_loop.replace( - "_inner_training_loop", - "_fast_inner_training_loop", 1, - ) - exec(inner_training_loop, globals()) - - Trainer._inner_training_loop = _fast_inner_training_loop - inner_training_loop = inner_training_loop.replace( - "is_torch_tpu_available()", - "False", - ) - if "n_total_devices >" not in inner_training_loop: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - pass - inner_training_loop = inner_training_loop.replace( - "is_sagemaker_mp_enabled()", - "False", - ) - exec(inner_training_loop, globals()) - Trainer._inner_training_loop = _fast_inner_training_loop - - # Save max_seq_length - model.max_seq_length = max_position_embeddings - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_position_embeddings - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_position_embeddings + model, tokenizer = patch_tokenizer(model, tokenizer) + model = post_patch_loss_function(model) # Fix up config for transformers uploading PEFT # Not necessary anymore since we require transformers>=4.37! @@ -271,115 +140,80 @@ def from_pretrained( pass # Log Unsloth version for future fastpaths for inference - model.config.update({"unsloth_version" : __version__}) + if hasattr(model, "config"): + model.config.update({"unsloth_version" : __version__}) + pass + patch_saving_functions(model, vision = True) + patch_saving_functions(tokenizer, vision = True) - # Add save modules - patch_saving_functions(model) - Trainer._inner_training_loop = _fast_inner_training_loop + # Fix gradient accumulation + from transformers.trainer import Trainer + patch_gradient_accumulation_fix(Trainer) - # Also fix torch_dtype + # Save tokenizer for inference purposes + tokenizer.padding_side = "left" # Force inference internal_model = model while hasattr(internal_model, "model"): - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass + internal_model._saved_temp_tokenizer = tokenizer internal_model = internal_model.model pass - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass + internal_model._saved_temp_tokenizer = tokenizer return model, tokenizer pass - @staticmethod - def post_patch(model): - # Patch model - layers = model.model.layers - lm_head = model.get_output_embeddings().weight - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - pass - - # Clear deleted GPU items - for _ in range(3): - gc.collect() - torch.cuda.empty_cache() - return model - pass - - @staticmethod def get_peft_model( model, - r = 16, - target_modules = "all-linear", - lora_alpha = 16, - lora_dropout = 0, - bias = "none", - layers_to_transform = None, - layers_pattern = None, + r = 16, + target_modules = None, + lora_alpha = 16, + lora_dropout = 0, + bias = "none", + finetune_vision_layers = True, + finetune_language_layers = True, + finetune_attention_modules = True, + finetune_mlp_modules = True, + layers_to_transform = None, + layers_pattern = None, use_gradient_checkpointing = True, - random_state = 3407, - max_seq_length = 2048, # not used anymore - use_rslora = False, - modules_to_save = None, - init_lora_weights = True, - loftq_config = {}, - temporary_location = "_unsloth_temporary_saved_buffers", + random_state = 3407, + max_seq_length = 2048, # not used anymore + use_rslora = False, + modules_to_save = None, + init_lora_weights = True, + loftq_config = {}, + temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): transformers_set_seed(random_state) - # Get LoRA - arguments = dict( - r = r, - lora_alpha = lora_alpha, - target_modules = target_modules, - lora_dropout = lora_dropout, - bias = bias, - layers_to_transform = layers_to_transform, - init_lora_weights = init_lora_weights, - # loftq_config = loftq_config, - # use_rslora = use_rslora, - modules_to_save = modules_to_save, - **kwargs, - ) - - lora_config = LoraConfig(**arguments) + if type(r) is not int: + raise TypeError(f"Unsloth: Rank of {str(r)} must be an integer.") + if r <= 0: + raise TypeError(f"Unsloth: Rank of {str(r)} must be larger than 0.") - model = _get_peft_model(model, lora_config) + if isinstance(model, PeftModelForCausalLM): + raise RuntimeError("Unsloth: You already added LoRA adapters to your model!") - model = FastVisionModel.patch_peft_model(model, use_gradient_checkpointing) + if target_modules == "all-linear": + finetune_vision_layers = True + finetune_language_layers = True + finetune_attention_modules = True + finetune_mlp_modules = True + pass + if target_modules is None: + target_modules = get_peft_regex( + model, + finetune_vision_layers = finetune_vision_layers, + finetune_language_layers = finetune_language_layers, + finetune_attention_modules = finetune_attention_modules, + finetune_mlp_modules = finetune_mlp_modules, + ) + else: + assert(type(target_modules) in (list, tuple,)) + pass # Clear deleted GPU items for _ in range(3): @@ -387,35 +221,23 @@ def get_peft_model( torch.cuda.empty_cache() pass - return model - pass - - - @staticmethod - def patch_peft_model( - model, - use_gradient_checkpointing = True, - ): - + lora_config = LoraConfig( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = lora_dropout, + bias = bias, + task_type = TaskType.CAUSAL_LM, + ) + model = prepare_model_for_kbit_training( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + ) + model = get_peft_model(model, lora_config) model = prepare_model_for_kbit_training( model, use_gradient_checkpointing = use_gradient_checkpointing, - use_reentrant = True, ) - - # Fix up config for transformers uploading PEFT - for active_adapter in model.peft_config.keys(): - # Not necessary since we requires transformers >= 4.37 - if False: - name = model.peft_config[active_adapter].base_model_name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.peft_config[active_adapter].base_model_name_or_path = name - pass - # Add revision to enable future fast inference paths - # [TODO] Bugs out!see https://github.com/unslothai/unsloth/issues/492 - # model.peft_config[active_adapter].revision = f"unsloth" - pass from transformers.trainer import Trainer if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": @@ -427,24 +249,6 @@ def patch_peft_model( ) pass - logger.warning_once( - f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ - f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", - ) - patch_saving_functions(model) - - # Patch cross entropy loss labels - # Fixes https://github.com/unslothai/unsloth/issues/10 - max_seq_length = model.max_seq_length - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - model.model.extra_ignored_labels = extra_ignored_labels - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length - # Patch tokenizer to pad to the right internal_model = model while hasattr(internal_model, "model"): @@ -462,6 +266,8 @@ def patch_peft_model( gc.collect() torch.cuda.empty_cache() pass + patch_saving_functions(model, vision = True) + return model pass @@ -500,6 +306,24 @@ def for_inference(model): elif dtype == "bfloat16": dtype = torch.bfloat16 pass + # Wrap model.generate + if model.generate.__name__ != "_fast_generate": + model._unwrapped_old_generate = model.generate + model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) + pass + + # Patch tokenizer to pad to the left + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass + # Also disable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): embeddings = model.get_input_embeddings() @@ -520,12 +344,6 @@ def for_training(model, use_gradient_checkpointing = True): internal_model.gradient_checkpointing = use_gradient_checkpointing internal_model.training = True - # Delete all fast inference loras - for param in model.parameters(): - if hasattr(param, "_fast_lora"): - del param._fast_lora - pass - while hasattr(internal_model, "model"): internal_model = internal_model.model internal_model.gradient_checkpointing = use_gradient_checkpointing @@ -535,6 +353,24 @@ def for_training(model, use_gradient_checkpointing = True): internal_model.training = True pass + # Also revert model.generate + if hasattr(model, "_unwrapped_old_generate"): + model.generate = model._unwrapped_old_generate + del model._unwrapped_old_generate + pass + + # Patch tokenizer to pad to the right + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass + # Also re-enable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): embeddings = model.get_input_embeddings() @@ -548,3 +384,5 @@ def for_training(model, use_gradient_checkpointing = True): return model pass pass + + diff --git a/unsloth/save.py b/unsloth/save.py index b4c6b499cf..fe03368a6e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2041,8 +2041,152 @@ def unsloth_convert_lora_to_ggml_and_save_locally( print("Unsloth: Done.") print(f"Unsloth: Conversion completed! Output file: {output_file}") print("\nThis GGML making function was made by Maheswar. Ping him @Maheswar on the Unsloth Discord or on HuggingFace (@mahiatlinux) if you like this!") +pass + + +from unsloth_zoo.peft_utils import merge_and_overwrite_lora +from .loader_utils import get_model_name + +@torch.inference_mode +def unsloth_generic_save( + model, + tokenizer, + save_directory : Union[str, os.PathLike] = "unsloth_finetuned_merge", + save_method : str = "lora", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + + # Push to hub + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = "Trained with Unsloth", + private : Optional[bool] = None, + create_pr : bool = False, + revision : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", + tags : List[str] = None, + + # Our functions + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.9, +): + if token is None: token = get_token() + merge_and_overwrite_lora( + get_model_name, + create_huggingface_repo, + model, + save_location = save_directory, + push_to_hub = push_to_hub, + token = token, + upload_location = save_directory, + low_disk_space_usage = True, + private = private, + ) + return +pass + + +def unsloth_generic_save_pretrained_merged( + self, + save_directory : Union[str, os.PathLike], + tokenizer = None, + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + tags : List[str] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.75, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. -def patch_saving_functions(model): + Choose for `save_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.save_pretrained(...)`" + ) + pass + + arguments = dict(locals()) + arguments["model"] = self + del arguments["self"] + unsloth_generic_save(**arguments) + for _ in range(3): + gc.collect() +pass + + +def unsloth_generic_push_to_hub_merged( + self, + repo_id : str, + tokenizer = None, + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = "Trained with Unsloth", + private : Optional[bool] = None, + token : Union[bool, str, None] = None, + max_shard_size : Union[int, str, None] = "5GB", + create_pr : bool = False, + safe_serialization : bool = True, + revision : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", + tags : Optional[List[str]] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.75, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. + + Choose for `save_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.push_to_hub(...)`" + ) + pass + + arguments = dict(locals()) + arguments["model"] = self + arguments["save_directory"] = repo_id + arguments["push_to_hub"] = True + del arguments["self"] + del arguments["repo_id"] + unsloth_generic_save(**arguments) + for _ in range(3): + gc.collect() +pass + + +def not_implemented_save(*args, **kwargs): + raise NotImplementedError("Unsloth: Sorry GGUF is currently not supported for vision models!") +pass + + +def patch_saving_functions(model, vision = False): import inspect import types from typing import Callable, Optional, Union, List @@ -2131,14 +2275,22 @@ def patch_saving_functions(model): pass # Add saving methods to top level model - if hasattr(model, "config"): - # Counteract tokenizers - model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) - model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) - model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) - model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) - model.push_to_hub_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_push_to_hub, model) - model.save_pretrained_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_save_locally, model) + if not vision: + if hasattr(model, "config"): + # Counteract tokenizers + model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) + model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) + model.push_to_hub_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_push_to_hub, model) + model.save_pretrained_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_save_locally, model) + pass + else: + # Vision only 1 option + model.push_to_hub_merged = types.MethodType(unsloth_generic_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_generic_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(not_implemented_save, model) + model.save_pretrained_gguf = types.MethodType(not_implemented_save, model) pass return model pass From 8a44b6c3ce37209b271674b916816a711049befd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 01:49:46 -0800 Subject: [PATCH 0798/1088] Update trainer.py --- unsloth/trainer.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 14fd1631df..012be4b0cb 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -21,7 +21,12 @@ import inspect from trl import SFTTrainer from . import is_bfloat16_supported -from unsloth_zoo.training_utils import unsloth_train as _unsloth_train +from unsloth_zoo.training_utils import ( + unsloth_train as _unsloth_train, +) +from unsloth_zoo.vision_utils import ( + UnslothVisionDataCollator, +) from packaging.version import Version import dataclasses @@ -30,6 +35,7 @@ "UnslothTrainer", "unsloth_train", "_patch_trl_trainer", + "UnslothVisionDataCollator", ] # Unsloth gradient accumulation fix: From d5b84088ce4bc81b09a4b9cf46d6683bdcda2731 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 01:51:46 -0800 Subject: [PATCH 0799/1088] Update save.py --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index fe03368a6e..4bcf433530 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2045,7 +2045,7 @@ def unsloth_convert_lora_to_ggml_and_save_locally( from unsloth_zoo.peft_utils import merge_and_overwrite_lora -from .loader_utils import get_model_name +from .models.loader_utils import get_model_name @torch.inference_mode def unsloth_generic_save( From a5d4084686ec795aa446123be50405b8ed76cb5d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:12:45 -0800 Subject: [PATCH 0800/1088] FastBaseVisionModel --- unsloth/models/__init__.py | 2 +- unsloth/models/loader.py | 77 +++----------------------------------- unsloth/models/vision.py | 33 +++++++++++++++- 3 files changed, 37 insertions(+), 75 deletions(-) diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index e67a9e5fad..3230cdc207 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .loader import FastLanguageModel +from .loader import FastLanguageModel, FastVisionModel from .llama import FastLlamaModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index b561239313..6eb8ed7863 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -329,8 +329,10 @@ def from_pretrained( patch_loss_functions, post_patch_loss_function, ) +from .vision import FastBaseVisionModel -class FastVisionModel: + +class FastVisionModel(FastBaseVisionModel): @staticmethod def from_pretrained( model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", @@ -469,71 +471,6 @@ def from_pretrained( if not was_disabled: enable_progress_bars() - model_type = model_config.model_type - - if model_type == "llama": - scaling_type = None - if getattr(model_config, "rope_scaling", None) is not None: - scaling_type1 = model_config.rope_scaling.get("type", None) - scaling_type2 = model_config.rope_scaling.get("rope_type", None) - scaling_type = scaling_type1 if scaling_type1 is not None else scaling_type2 - pass - - if scaling_type == "llama3" and not SUPPORTS_LLAMA31: - raise ImportError( - f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"\ - f"The minimum required version is 4.43.2\n"\ - f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ - f"to obtain the latest transformers build, then restart this session."\ - ) - - dispatch_model = FastLlamaModel - - elif model_type == "mistral": dispatch_model = FastMistralModel - elif model_type == "gemma": - if not SUPPORTS_GEMMA: - raise ImportError( - f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"\ - f"The minimum required version is 4.38.\n"\ - f'Try `pip install --upgrade "transformers>=4.38"`\n'\ - f"to obtain the latest transformers build, then restart this session."\ - ) - dispatch_model = FastGemmaModel - elif model_type == "gemma2": - if not SUPPORTS_GEMMA2: - raise ImportError( - f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ - f"The minimum required version is 4.42.3.\n"\ - f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ - f"to obtain the latest transformers build, then restart this session."\ - ) - # Also check for softcapping support in flash-attn which is faster! - if is_bfloat16_supported() and not HAS_FLASH_ATTENTION: - print( - "Unsloth: If you want to finetune Gemma 2, install flash-attn to make it faster!\n"\ - "To install flash-attn, do the below:\n"\ - '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' - ) - elif HAS_FLASH_ATTENTION and not HAS_FLASH_ATTENTION_SOFTCAPPING: - print( - "Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"\ - "Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"\ - "To update flash-attn, do the below:\n"\ - '\npip install --no-deps --upgrade "flash-attn>=2.6.3"' - ) - - dispatch_model = FastGemma2Model - elif model_type == "qwen2": - dispatch_model = FastQwen2Model - elif model_type == "cohere": - dispatch_model = FastCohereModel - else: - raise NotImplementedError( - f"Unsloth: {model_name} not supported yet!\n"\ - "Make an issue to https://github.com/unslothai/unsloth!", - ) - pass - # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ @@ -544,17 +481,13 @@ def from_pretrained( tokenizer_name = None pass - model, tokenizer = dispatch_model.from_pretrained( + model, tokenizer = FastBaseVisionModel.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, dtype = _get_dtype(dtype), load_in_4bit = load_in_4bit, token = token, device_map = device_map, - rope_scaling = rope_scaling, - fix_tokenizer = fix_tokenizer, - model_patcher = dispatch_model, - tokenizer_name = tokenizer_name, trust_remote_code = trust_remote_code, revision = revision if not is_peft else None, *args, **kwargs, @@ -604,7 +537,7 @@ def from_pretrained( trust_remote_code = trust_remote_code, ) # Patch it as well! - model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) pass return model, tokenizer pass diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index ccfc261c94..a389639a63 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -31,6 +31,10 @@ merge_and_overwrite_lora, ) +__all__ = [ + "FastBaseVisionModel", +] + class FastBaseVisionModel: @@ -234,9 +238,35 @@ def get_peft_model( use_gradient_checkpointing = use_gradient_checkpointing, ) model = get_peft_model(model, lora_config) + + model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) + + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass + patch_saving_functions(model, vision = True) + + return model + pass + + + @staticmethod + def patch_peft_model( + model, + use_gradient_checkpointing = True, + ): + if not isinstance(model, PeftModelForCausalLM): + raise TypeError( + "Unsloth: Your model needs to call `.get_peft_model` first!" + ) + pass + model = prepare_model_for_kbit_training( model, use_gradient_checkpointing = use_gradient_checkpointing, + use_reentrant = True, ) from transformers.trainer import Trainer @@ -248,6 +278,7 @@ def get_peft_model( 'Thank you for your understanding and we appreciate it immensely!' ) pass + patch_saving_functions(model, vision = True) # Patch tokenizer to pad to the right internal_model = model @@ -266,8 +297,6 @@ def get_peft_model( gc.collect() torch.cuda.empty_cache() pass - patch_saving_functions(model, vision = True) - return model pass From 7f5a9a7c636f5ef76c849162c5c63f8a76a23412 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:15:43 -0800 Subject: [PATCH 0801/1088] Update loader_utils.py --- unsloth/models/loader_utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/unsloth/models/loader_utils.py b/unsloth/models/loader_utils.py index dcb17b41dc..b778b7e95b 100644 --- a/unsloth/models/loader_utils.py +++ b/unsloth/models/loader_utils.py @@ -13,6 +13,12 @@ # limitations under the License. from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit +# https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! +from packaging.version import Version +from transformers import __version__ as transformers_version +transformers_version = Version(transformers_version) +SUPPORTS_FOURBIT = transformers_version >= Version("4.37") + def __get_model_name( model_name, From d160618c96663bd82bcf8303debb0fba03ba4d44 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:17:20 -0800 Subject: [PATCH 0802/1088] Update vision.py --- unsloth/models/vision.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index a389639a63..1cfbb29bb3 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -24,6 +24,7 @@ from ..kernels import ( post_patch_loss_function, ) +from ._utils import __version__ from peft import LoraConfig, TaskType, get_peft_model from transformers import set_seed as transformers_set_seed from unsloth_zoo.peft_utils import ( @@ -250,7 +251,7 @@ def get_peft_model( return model pass - + @staticmethod def patch_peft_model( From 2420736e3c051d641b20043de38be295ca4b83d4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:18:59 -0800 Subject: [PATCH 0803/1088] Update loader.py --- unsloth/models/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 6eb8ed7863..6d388900de 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -490,6 +490,7 @@ def from_pretrained( device_map = device_map, trust_remote_code = trust_remote_code, revision = revision if not is_peft else None, + model_types = model_types, *args, **kwargs, ) From 0747078bf7685577a583b78bac103b5aaa954ddc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:20:47 -0800 Subject: [PATCH 0804/1088] Update vision.py --- unsloth/models/vision.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 1cfbb29bb3..57a2153a34 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -78,8 +78,7 @@ def from_pretrained( # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" - - model_patcher.pre_patch() + get_statistics() # For debugging - we use a download counter to see if environments are not breaking if dtype is None: From 1f32b23b67dfbe9db27b2db96a409bd382a94682 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:23:11 -0800 Subject: [PATCH 0805/1088] Update loader.py --- unsloth/models/loader.py | 48 +++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 6d388900de..bc8dca0ab2 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -21,7 +21,7 @@ from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel from .loader_utils import get_model_name -import os +import os, contextlib, sys try: from huggingface_hub.utils import get_token except: @@ -360,28 +360,30 @@ def from_pretrained( old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - ) + with contextlib.redirect_stdout(open(os.devnull, "w")): + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + ) + pass # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled From a45e564d61def0a3b362baa72bfb8404910f454d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:23:35 -0800 Subject: [PATCH 0806/1088] Update vision.py --- unsloth/models/vision.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 57a2153a34..a17c90626c 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -63,7 +63,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_types[0]} vision patching. Transformers = {transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers = {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ @@ -78,7 +78,7 @@ def from_pretrained( # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" - + get_statistics() # For debugging - we use a download counter to see if environments are not breaking if dtype is None: From 767a31fd9d2120ac525dd4f9b7d2f8415c726e03 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:27:07 -0800 Subject: [PATCH 0807/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 460b23bd0d..62881db09b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -147,6 +147,14 @@ def filter(self, x): return not (self.text in x.getMessage()) except: pass +# The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function. +try: + from accelerate.utils.modeling import logger as accelerate_utils_modeling_logger + accelerate_utils_modeling_logger.addFilter(HideLoggingMessage("The model weights are not tied")) + del accelerate_utils_modeling_logger +except: + pass + # ============================================= # ============================================= From 1ad1b46eaba26018de8db0a9fcc7a99f9f913100 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:28:00 -0800 Subject: [PATCH 0808/1088] tokenizer_name --- unsloth/models/loader.py | 1 + unsloth/models/vision.py | 1 + 2 files changed, 2 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index bc8dca0ab2..d81348e536 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -493,6 +493,7 @@ def from_pretrained( trust_remote_code = trust_remote_code, revision = revision if not is_peft else None, model_types = model_types, + tokenizer_name = tokenizer_name, *args, **kwargs, ) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index a17c90626c..f088797a8f 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -49,6 +49,7 @@ def from_pretrained( device_map = "sequential", trust_remote_code = False, model_types = None, + tokenizer_name = None, **kwargs, ): if trust_remote_code: From 26f23379de32c123d0ec3be88e3b2b42f6c3bb87 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 02:29:24 -0800 Subject: [PATCH 0809/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d81348e536..232fe6acff 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -352,7 +352,6 @@ def from_pretrained( if token is None: token = get_token() patch_compiled_autograd() - patch_loss_functions(torch_compile = False) patch_compiling_bitsandbytes() if use_gradient_checkpointing == "unsloth": patch_unsloth_smart_gradient_checkpointing() @@ -361,6 +360,7 @@ def from_pretrained( model_name = get_model_name(model_name, load_in_4bit) with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, sdpa_dynamic_mask = True, From 5ab4b604be3ae76f1d915b2d064e3cb390a827f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 03:05:13 -0800 Subject: [PATCH 0810/1088] Update vision.py --- unsloth/models/vision.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index f088797a8f..7d71bbf988 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -151,10 +151,6 @@ def from_pretrained( patch_saving_functions(model, vision = True) patch_saving_functions(tokenizer, vision = True) - # Fix gradient accumulation - from transformers.trainer import Trainer - patch_gradient_accumulation_fix(Trainer) - # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference internal_model = model From fc7d7470dabe51fa25d8de7b49c7de071fe5c96b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 03:18:31 -0800 Subject: [PATCH 0811/1088] Update save.py --- unsloth/save.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 4bcf433530..b819469ff0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2076,7 +2076,8 @@ def unsloth_generic_save( temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.9, ): - if token is None: token = get_token() + if token is None and push_to_hub: token = get_token() + merge_and_overwrite_lora( get_model_name, create_huggingface_repo, From e0b14fab34282bf6acb07ae271ceff7861dcee1e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 03:47:57 -0800 Subject: [PATCH 0812/1088] Update save.py --- unsloth/save.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index b819469ff0..b503b2b47a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2077,7 +2077,7 @@ def unsloth_generic_save( maximum_memory_usage : float = 0.9, ): if token is None and push_to_hub: token = get_token() - + merge_and_overwrite_lora( get_model_name, create_huggingface_repo, @@ -2085,7 +2085,7 @@ def unsloth_generic_save( save_location = save_directory, push_to_hub = push_to_hub, token = token, - upload_location = save_directory, + upload_location = save_directory if push_to_hub else None, low_disk_space_usage = True, private = private, ) From 677cf9f227558dab8949d91330bf5216b4eafad9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 03:54:09 -0800 Subject: [PATCH 0813/1088] Update vision.py --- unsloth/models/vision.py | 51 +++++++++++++--------------------------- 1 file changed, 16 insertions(+), 35 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 7d71bbf988..8d0f63c44d 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -300,33 +300,17 @@ def patch_peft_model( @staticmethod def for_inference(model): - # if model.config.model_type == "qwen2": - # FastLlamaModel.for_training(model) - # return - # pass - - internal_model = model - internal_model.gradient_checkpointing = False - internal_model.training = False - - while hasattr(internal_model, "model"): - internal_model = internal_model.model - internal_model.gradient_checkpointing = False - internal_model.training = False - pass - if hasattr(internal_model, "training"): - internal_model.training = False + model.gradient_checkpointing = False + model.training = False + + for name, module in model.named_modules(): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = False + if hasattr(module, "training"): + module.training = False pass - # Also check if lm_head / embeddings are trained - internal_model = model - while not hasattr(internal_model, "lm_head"): - internal_model = internal_model.model - pass - lm_head = internal_model.lm_head.weight - device_type = lm_head.device.type dtype = model.config.torch_dtype - if type(dtype) is str: if dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 @@ -366,17 +350,14 @@ def for_inference(model): @staticmethod def for_training(model, use_gradient_checkpointing = True): - internal_model = model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True - - while hasattr(internal_model, "model"): - internal_model = internal_model.model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True - pass - if hasattr(internal_model, "training"): - internal_model.training = True + model.gradient_checkpointing = use_gradient_checkpointing + model.training = True + + for name, module in model.named_modules(): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = use_gradient_checkpointing + if hasattr(module, "training"): + module.training = True pass # Also revert model.generate From 8ab5dcb99e349271c3b6afe770001eac6a606af6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 03:58:18 -0800 Subject: [PATCH 0814/1088] Update vision.py --- unsloth/models/vision.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 8d0f63c44d..237673f10c 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -36,6 +36,32 @@ "FastBaseVisionModel", ] +def _wrap_fast_inference(generate, device_type, dtype, model): + # Wraps inference with bfloat16 / float16 + @torch.inference_mode + def _fast_generate(*args, **kwargs): + # For num_logits_to_keep + kwargs["num_logits_to_keep"] = 1 + + # Remove token_type_ids + kwargs.pop("token_type_ids", None) + + # Check pad_token + model_eos_token_id = getattr(model.config, "eos_token_id", None) + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] + + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + + # Autocasted + with torch.autocast(device_type = device_type, dtype = dtype): + output = generate(*args, **kwargs) + pass + return output + pass + return _fast_generate +pass + class FastBaseVisionModel: From adaf6eef296be353750e938894d8c6f1e60298e0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 04:01:17 -0800 Subject: [PATCH 0815/1088] Update vision.py --- unsloth/models/vision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 237673f10c..87a3846fdb 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -54,7 +54,7 @@ def _fast_generate(*args, **kwargs): kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) # Autocasted - with torch.autocast(device_type = device_type, dtype = dtype): + with torch.autocast(device_type = model.device.type, dtype = dtype): output = generate(*args, **kwargs) pass return output From 1a548f32ddddb2293752a258e75bd106ab85a732 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 04:05:05 -0800 Subject: [PATCH 0816/1088] Update vision.py --- unsloth/models/vision.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 87a3846fdb..5cefab90e1 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -54,7 +54,7 @@ def _fast_generate(*args, **kwargs): kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) # Autocasted - with torch.autocast(device_type = model.device.type, dtype = dtype): + with torch.autocast(device_type = device_type, dtype = dtype): output = generate(*args, **kwargs) pass return output @@ -341,6 +341,7 @@ def for_inference(model): if dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 pass + device_type = model.device.type # Wrap model.generate if model.generate.__name__ != "_fast_generate": From 5886ecb246dcb54308d944aa03f3274695cb142f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 04:10:24 -0800 Subject: [PATCH 0817/1088] Update vision.py --- unsloth/models/vision.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 5cefab90e1..e741c1c628 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -53,6 +53,10 @@ def _fast_generate(*args, **kwargs): kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + if "pixel_values" in kwargs: + kwargs["pixel_values"] = kwargs["pixel_values"].to(model.dtype) + pass + # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): output = generate(*args, **kwargs) From a98fc9c57cf19b3da4dfd803e323ea1334ef1c5d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 04:10:43 -0800 Subject: [PATCH 0818/1088] Update vision.py --- unsloth/models/vision.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index e741c1c628..d083144651 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -53,9 +53,10 @@ def _fast_generate(*args, **kwargs): kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) - if "pixel_values" in kwargs: + try: kwargs["pixel_values"] = kwargs["pixel_values"].to(model.dtype) - pass + except: + pass # Autocasted with torch.autocast(device_type = device_type, dtype = dtype): From 535e89958e58ff3c2aa19b0fd5373e2f0eebaebe Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 04:13:54 -0800 Subject: [PATCH 0819/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 62881db09b..ee85ba3c36 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -155,6 +155,14 @@ def filter(self, x): return not (self.text in x.getMessage()) except: pass +# Setting `pad_token_id` to `eos_token_id` +try: + from transformers.generation.utils import logger as transformers_generation_utils_logger + transformers_generation_utils_logger.addFilter(HideLoggingMessage("Setting `pad_token_id` to `eos_token_id`")) + del transformers_generation_utils_logger +except: + pass + # ============================================= # ============================================= From 8b57e17c8db14eeec4edcf7cd24b33abeaaabb86 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 05:01:44 -0800 Subject: [PATCH 0820/1088] Vision support (#1315) * Fix pad token * Update llama.py * Typo * ignored labels * Revert "ignored labels" This reverts commit 9d07be077b3355b55dcf93098d0afe2591e67750. * More patching * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Feat/all tmp (#1219) * Update save.py Check whether path is in /tmp dir for Kaggle environment * Update save.py Move temporary_location to /tmp in Kaggle * Enhance Kaggle environment support in save and tokenizer utilities --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 * Bug fixes * Update pyproject.toml * Update _utils.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Tied weights * Revert "Tied weights" This reverts commit 8090b7c01aaceecac4263f9af2737fdb76ebd458. * Tied weights * Utils * CE Loss patching * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py --------- Co-authored-by: dendarrion <37800703+dendarrion@users.noreply.github.com> Co-authored-by: Erland366 Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana --- unsloth/__init__.py | 2 +- unsloth/kernels/__init__.py | 1 + unsloth/kernels/fast_lora.py | 78 +++++ unsloth/kernels/rms_layernorm.py | 10 +- unsloth/models/__init__.py | 2 +- unsloth/models/_utils.py | 209 +++++++++--- unsloth/models/gemma2.py | 11 +- unsloth/models/llama.py | 50 ++- unsloth/models/loader.py | 340 ++++++++++++++------ unsloth/models/loader_utils.py | 120 +++++++ unsloth/models/mapper.py | 48 ++- unsloth/models/vision.py | 529 ++++++++++++------------------- unsloth/save.py | 171 +++++++++- unsloth/trainer.py | 31 +- 14 files changed, 1083 insertions(+), 519 deletions(-) create mode 100644 unsloth/models/loader_utils.py diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 745b210208..980425e1f1 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -55,7 +55,7 @@ pass # Reduce VRAM usage by reducing fragmentation -os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,roundup_power2_divisions:[64:128,256:64,>:32]" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 82e7641693..ef5fa5da70 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -42,6 +42,7 @@ apply_lora_mlp_geglu_approx, apply_lora_qkv, apply_lora_o, + fast_lora_forward, ) from .utils import fast_dequantize, fast_gemv, QUANT_STATE, fast_linear_forward, matmul_lora diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 2177b43b9e..c2b7929a29 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -410,3 +410,81 @@ def apply_lora_o(self, X): O = LoRA_W.apply(X, OW, OW_quant, OA, OB, OS) return O pass + + +IDENTITY_DROPOUT = torch.nn.Identity +@torch._disable_dynamo +def fast_lora_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError( + "Unsloth: Currently not supported yet - reshaping done incorrectly" + ) + self._check_forward_args(x, *args, **kwargs) + adapter_names = kwargs.pop("adapter_names", None) + + if self.disable_adapters: + if self.merged: + self.unmerge() + result = self.base_layer(x, *args, **kwargs) + elif adapter_names is not None: + result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) + elif self.merged: + result = self.base_layer(x, *args, **kwargs) + else: + # Fastpath + if len(self.active_adapters) == 1: + active_adapter = self.active_adapters[0] + if active_adapter not in self.lora_A.keys(): return self.base_layer(x, *args, **kwargs) + + dropout = self.lora_dropout[active_adapter] + if isinstance(dropout, IDENTITY_DROPOUT) and not self.use_dora[active_adapter]: + lora_A = self.lora_A[active_adapter].weight + lora_B = self.lora_B[active_adapter].weight + scaling = self.scaling[active_adapter] + W = self.base_layer.weight + return LoRA_W.apply(x, W, QUANT_STATE(W), lora_A, lora_B, scaling) + pass + pass + + result = self.base_layer(x, *args, **kwargs) + # As per Tim Dettmers, for 4bit, we need to defensively clone here. + # The reason is that in some cases, an error can occur that backprop + # does not work on a manipulated view. This issue may be solved with + # newer PyTorch versions but this would need extensive testing to be + # sure. + result = result.clone() + + for active_adapter in self.active_adapters: + if active_adapter not in self.lora_A.keys(): + continue + lora_A = self.lora_A[active_adapter] + lora_B = self.lora_B[active_adapter] + dropout = self.lora_dropout[active_adapter] + scaling = self.scaling[active_adapter] + + requires_conversion = not torch.is_autocast_enabled() + if requires_conversion: + expected_dtype = result.dtype + x = x.to(lora_A.weight.dtype) + + if not self.use_dora[active_adapter]: + result = result + lora_B(lora_A(dropout(x))) * scaling + else: + if isinstance(dropout, torch.nn.Identity) or not self.training: + base_result = result + else: + x = dropout(x) + base_result = None + + result = result + self.lora_magnitude_vector[active_adapter]( + x, + lora_A=lora_A, + lora_B=lora_B, + scaling=scaling, + base_layer=self.get_base_layer(), + base_result=base_result, + ) + if requires_conversion: + result = result.to(expected_dtype) + + return result +pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 4b22f8c3e5..b74d636c63 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -57,6 +57,7 @@ def _rms_layernorm_forward( @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, + dX, dX_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, @@ -78,6 +79,9 @@ def _rms_layernorm_backward( X += row_idx * X_row_stride r += row_idx * r_row_stride + if GEMMA: dX += row_idx * dY_row_stride + else: dX = dY + dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) @@ -91,7 +95,7 @@ def _rms_layernorm_backward( rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) - tl.store(dY + col_offsets, output, mask = mask) + tl.store(dX + col_offsets, output, mask = mask) pass @@ -172,9 +176,11 @@ def backward(ctx, dY : torch.Tensor): n_cols : int n_rows, n_cols = dY.shape # dW = X + dX = torch.empty_like(dY, device = "cuda:0") if ctx.GEMMA else dY _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), + dX, dX.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), @@ -184,7 +190,7 @@ def backward(ctx, dY : torch.Tensor): BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) - dX = dY.view(*shape) + dX = dX.view(*shape) return dX, None, None, None pass pass diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index e67a9e5fad..3230cdc207 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .loader import FastLanguageModel +from .loader import FastLanguageModel, FastVisionModel from .llama import FastLlamaModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index daa81d97ac..ee85ba3c36 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.7" +__version__ = "2024.11.8" __all__ = [ "prepare_model_for_kbit_training", @@ -52,6 +52,17 @@ "unpatch_unsloth_gradient_checkpointing", "patch_gradient_checkpointing", "unpatch_gradient_checkpointing", + + "HAS_CUT_CROSS_ENTROPY", + "fused_linear_cross_entropy", + "patch_unsloth_smart_gradient_checkpointing", + "unpatch_unsloth_smart_gradient_checkpointing", + "create_gradient_checkpointing_buffer", + + "patch_compiled_autograd", + "process_vision_info", + "unsloth_compile_transformers", + "patch_fast_lora", ] import torch @@ -70,6 +81,7 @@ patch_layernorm, patch_torch_compile, patch_model_and_tokenizer, + patch_compiled_autograd, ) from unsloth_zoo.gradient_checkpointing import ( Unsloth_Offloaded_Gradient_Checkpointer, @@ -81,6 +93,21 @@ unsloth_gradient_checkpoint, patch_gradient_checkpointing, unpatch_gradient_checkpointing, + + patch_unsloth_smart_gradient_checkpointing, + unpatch_unsloth_smart_gradient_checkpointing, + create_gradient_checkpointing_buffer, +) +from unsloth_zoo.loss_utils import ( + HAS_CUT_CROSS_ENTROPY, + fused_linear_cross_entropy, +) +from unsloth_zoo.vision_utils import ( + process_vision_info, +) +from unsloth_zoo.compiler import ( + get_transformers_model_type, + unsloth_compile_transformers as _unsloth_compile_transformers, ) # ============================================= @@ -120,6 +147,22 @@ def filter(self, x): return not (self.text in x.getMessage()) except: pass +# The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function. +try: + from accelerate.utils.modeling import logger as accelerate_utils_modeling_logger + accelerate_utils_modeling_logger.addFilter(HideLoggingMessage("The model weights are not tied")) + del accelerate_utils_modeling_logger +except: + pass + +# Setting `pad_token_id` to `eos_token_id` +try: + from transformers.generation.utils import logger as transformers_generation_utils_logger + transformers_generation_utils_logger.addFilter(HideLoggingMessage("Setting `pad_token_id` to `eos_token_id`")) + del transformers_generation_utils_logger +except: + pass + # ============================================= # ============================================= @@ -282,54 +325,60 @@ def _is_openai_available(): return False # ============================================= # Get Xformers -from xformers import __version__ as xformers_version -# Temporarily disable 0.0.27 and higher - inference issues -if False: #Version(xformers_version) >= Version("0.0.27"): - raise ImportError( - "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ - "then press Disconnect Runtime and then Restart it.\n"\ - "\n"\ - "%%capture\n" - "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" - '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' - '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ - '\n'\ - f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ - 'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"' - ) -pass +try: + from xformers import __version__ as xformers_version + # Temporarily disable 0.0.27 and higher - inference issues + if False: #Version(xformers_version) >= Version("0.0.27"): + raise ImportError( + "Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "\ + "then press Disconnect Runtime and then Restart it.\n"\ + "\n"\ + "%%capture\n" + "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n" + '!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n' + '!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'\ + '\n'\ + f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"\ + 'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"' + ) + pass -if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.24 for torch = {torch_version}." - ) -elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers < 0.0.26 for torch = {torch_version}." - ) -elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) > Version("0.0.27"): - raise ImportError( - f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ - f"Please install xformers <= 0.0.27 for torch = {torch_version}." - ) -pass + if Version(torch_version) < Version("2.2.0") and Version(xformers_version) >= Version("0.0.24"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.24 for torch = {torch_version}." + ) + elif Version(torch_version) < Version("2.3.0") and Version(xformers_version) >= Version("0.0.26"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers < 0.0.26 for torch = {torch_version}." + ) + elif Version(torch_version) < Version("2.4.0") and Version(xformers_version) > Version("0.0.27"): + raise ImportError( + f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"\ + f"Please install xformers <= 0.0.27 for torch = {torch_version}." + ) + pass -from xformers._cpp_lib import _register_extensions -try: - _register_extensions() # Check if C++ modules are loaded correctly -except Exception as error: - raise ImportError( - "Unsloth: Xformers was not installed correctly.\n"\ - "Please install xformers separately first.\n"\ - "Then confirm if it's correctly installed by running:\n"\ - "python -m xformers.info\n\n" - "Longer error message:\n" + str(error) - ) + from xformers._cpp_lib import _register_extensions + try: + _register_extensions() # Check if C++ modules are loaded correctly + except Exception as error: + raise ImportError( + "Unsloth: Xformers was not installed correctly.\n"\ + "Please install xformers separately first.\n"\ + "Then confirm if it's correctly installed by running:\n"\ + "python -m xformers.info\n\n" + "Longer error message:\n" + str(error) + ) + pass + import xformers.ops.fmha as xformers + xformers_attention = xformers.memory_efficient_attention +except: + xformers = None + xformers_attention = None + xformers_version = None pass -import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention # Check TRL version from trl import __version__ as trl_version @@ -658,7 +707,7 @@ def get_statistics(): ) def _prepare_backend( - self, cpu: bool = False, sagemaker_dp = False, backend: str = None, + self, cpu = False, sagemaker_dp = False, backend: str = None, ) -> tuple[str, DistributedType]: return None, DistributedType.NO pass @@ -1047,3 +1096,69 @@ def patch_tokenizer(model, tokenizer): model.config.update({"unsloth_version" : __version__}) return model, tokenizer pass + + +def patch_fast_lora(): + import peft.tuners.lora.bnb + peft.tuners.lora.bnb.Linear4bit.forward = fast_lora_forward +pass + + +def unsloth_compile_transformers( + model_name, + token = None, + revision = None, + trust_remote_code = False, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, +): + if disable: return + model_types = get_transformers_model_type( + model_name = model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + for model_type in model_types: + _unsloth_compile_transformers( + model_type, + sdpa_dynamic_mask = sdpa_dynamic_mask, + sdpa_bool_masks = sdpa_bool_masks, + sdpa_gqa_replace = sdpa_gqa_replace, + sdpa_dynamic_compile = sdpa_dynamic_compile, + compile_attention = compile_attention, + disable_causal_masks = disable_causal_masks, + compile_torch_modules = compile_torch_modules, + compile_custom_modules = compile_custom_modules, + compile_function_calls = compile_function_calls, + fuse_lm_head = fuse_lm_head, + gradient_checkpointing = gradient_checkpointing, + manual_replacements = manual_replacements, + epilogue_fusion = epilogue_fusion, + max_autotune = max_autotune, + shape_padding = shape_padding, + cudagraphs = cudagraphs, + debug = debug, + import_from_cache = import_from_cache, + disable = disable, + ) + pass + return model_types +pass diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 4eb9d64313..62ecb9690f 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -60,8 +60,7 @@ from flash_attn import flash_attn_func # [TODO] We must randomnly use torch.compile? -# I checked the gradients and formulas and I'm sure it's correct. -# I'm stumped :( +# Gemma 2 uses double RMS Layernorms, so the backward passes should not overwrite the gradients! @torch.compile(fullgraph = False, dynamic = True, options = torch_compile_options) def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): old_dtype = X.dtype @@ -207,7 +206,7 @@ def Gemma2DecoderLayer_fast_forward( hidden_states += residual else: residual = hidden_states - hidden_states = fast_rms_layernorm_gemma2_compiled(self.input_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states, gemma = True) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, causal_mask=causal_mask, @@ -218,14 +217,14 @@ def Gemma2DecoderLayer_fast_forward( use_cache=use_cache, padding_mask=padding_mask, ) - hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_attention_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states, gemma = True) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states - hidden_states = fast_rms_layernorm_gemma2_compiled(self. pre_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self. pre_feedforward_layernorm, hidden_states, gemma = True) hidden_states = self.mlp(hidden_states) - hidden_states = fast_rms_layernorm_gemma2_compiled(self.post_feedforward_layernorm, hidden_states, gemma = True) + hidden_states = fast_rms_layernorm(self.post_feedforward_layernorm, hidden_states, gemma = True) hidden_states = residual + hidden_states pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 47a57024a2..0256fc1830 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -719,25 +719,33 @@ def LlamaModel_fast_forward( pass # Gemma2 has alternating SWA and global attn + use_static_mask = True + dynamic_SWA_mask = None + dynamic_GA_mask = None if IS_GEMMA2: if HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None: self.SWA_mask = True self.GA_mask = False elif attention_mask is not None: - self.SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + + # Fixes https://github.com/unslothai/unsloth/issues/853 + # Unsloth needs a 2D mask, not a [2, 1, n, n] mask! + dynamic_SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window = self.config.sliding_window, - ) - self.GA_mask = _prepare_4d_causal_attention_mask_for_sdpa( + )[0][0] + dynamic_GA_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window = None, - ) + )[0][0] + use_static_mask = False + elif not hasattr(self, "SWA_mask"): if HAS_FLEX_ATTENTION: # Use Flex Attention instead! @@ -772,7 +780,12 @@ def LlamaModel_fast_forward( past_key_value = past_key_values[idx] if past_key_values is not None else None mask = causal_mask - if IS_GEMMA2: mask = self.SWA_mask if (idx % 2 == 0) else self.GA_mask + if IS_GEMMA2: + if (idx % 2 == 0): + mask = self.SWA_mask if use_static_mask else dynamic_SWA_mask + else: + mask = self. GA_mask if use_static_mask else dynamic_GA_mask + pass if offloaded_gradient_checkpointing: hidden_states = Unsloth_Offloaded_Gradient_Checkpointer.apply( @@ -955,14 +968,39 @@ def _CausalLM_fast_forward( ) pass hidden_states = outputs[0] + bsz, q_len, hd = hidden_states.shape lm_head = self.lm_head.weight + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: + if HAS_CUT_CROSS_ENTROPY and labels is not None: + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + loss = fused_linear_cross_entropy( + hidden_states = hidden_states, + lm_weight = lm_head, + labels = labels, + num_items_in_batch = n_items, + logit_softcapping = logit_softcapping, + ) + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=None, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass @@ -974,8 +1012,6 @@ def _CausalLM_fast_forward( pass loss = None - logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) - logit_scaling = getattr(self.config, "logit_scale", 0) if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 7a6322d248..232fe6acff 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -20,8 +20,8 @@ from transformers import AutoConfig from transformers import __version__ as transformers_version from peft import PeftConfig, PeftModel -from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit -import os +from .loader_utils import get_model_name +import os, contextlib, sys try: from huggingface_hub.utils import get_token except: @@ -63,105 +63,6 @@ def _get_dtype(dtype): pass -def __get_model_name( - model_name, - load_in_4bit = True, - INT_TO_FLOAT_MAPPER = None, - FLOAT_TO_INT_MAPPER = None, - MAP_TO_UNSLOTH_16bit = None, -): - model_name = str(model_name) - lower_model_name = model_name.lower() - - if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: - - model_name = INT_TO_FLOAT_MAPPER[lower_model_name] - logger.warning_once( - f"Unsloth: Your transformers version of {transformers_version} does not support native "\ - f"4bit loading.\nThe minimum required version is 4.37.\n"\ - f'Try `pip install --upgrade "transformers>=4.37"`\n'\ - f"to obtain the latest transformers build, then restart this session.\n"\ - f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." - ) - return model_name - - elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: - - new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] - # logger.warning_once( - # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ - # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." - # ) - return new_model_name - - elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit: - - new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name] - return new_model_name - - elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: - - new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] - # logger.warning_once( - # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ - # f"We shall load `{new_model_name}` for 4x faster loading." - # ) - return new_model_name - pass - - return None -pass - - -def _get_new_mapper(): - try: - import requests - new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" - with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text - new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] - new_mapper = new_mapper\ - .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ - .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")\ - .replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit") - - exec(new_mapper, globals()) - return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit - except: - return {}, {}, {} - pass -pass - - -def get_model_name(model_name, load_in_4bit = True): - new_model_name = __get_model_name( - model_name = model_name, - load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, - MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit, - ) - if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): - # Try checking if a new Unsloth version allows it! - NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = _get_new_mapper() - upgraded_model_name = __get_model_name( - model_name = model_name, - load_in_4bit = load_in_4bit, - INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, - FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, - MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit, - ) - if upgraded_model_name is not None: - raise NotImplementedError( - f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ - 'pip uninstall unsloth -y\n'\ - 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"' - ) - pass - pass - return new_model_name if new_model_name is not None else model_name -pass - - class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( @@ -333,7 +234,8 @@ def from_pretrained( else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ - "Make an issue to https://github.com/unslothai/unsloth!", + "Maybe you're doing vision finetuning? Please use FastVisionModel instead!\n"\ + "Otherwise, make an issue to https://github.com/unslothai/unsloth!", ) pass @@ -411,4 +313,236 @@ def from_pretrained( pass return model, tokenizer pass -pass \ No newline at end of file +pass + + +from ._utils import ( + patch_compiling_bitsandbytes, + patch_model_and_tokenizer, + prepare_model_for_kbit_training, + patch_unsloth_smart_gradient_checkpointing, + patch_compiled_autograd, + process_vision_info, + unsloth_compile_transformers, +) +from ..kernels import ( + patch_loss_functions, + post_patch_loss_function, +) +from .vision import FastBaseVisionModel + + +class FastVisionModel(FastBaseVisionModel): + @staticmethod + def from_pretrained( + model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", + max_seq_length = None, # [TODO] No effect + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, # [TODO] No effect + fix_tokenizer = True, # [TODO] No effect + trust_remote_code = False, + use_gradient_checkpointing = "unsloth", + resize_model_vocab = None, # [TODO] No effect + revision = None, + *args, **kwargs, + ): + if token is None: token = get_token() + + patch_compiled_autograd() + patch_compiling_bitsandbytes() + if use_gradient_checkpointing == "unsloth": + patch_unsloth_smart_gradient_checkpointing() + + old_model_name = model_name + model_name = get_model_name(model_name, load_in_4bit) + + with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + ) + pass + + # First check if it's a normal model via AutoConfig + from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled + was_disabled = are_progress_bars_disabled() + disable_progress_bars() + + autoconfig_error = None + peft_error = None + try: + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + is_model = True + except Exception as error: + autoconfig_error = str(error) + is_model = False + try: + peft_config = PeftConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + is_peft = True + except Exception as error: + peft_error = str(error) + is_peft = False + pass + + # Both config.json and adapter_config.json should not exist! + + # Old transformers versions check + both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 + + # New transformers need to check manually. + if SUPPORTS_LLAMA32: + # Check if folder exists locally + if os.path.isdir(model_name): + exist_adapter_config = os.path.exists(os.path.join(model_name, "adapter_config.json")) + exist_config = os.path.exists(os.path.join(model_name, "config.json")) + both_exist = exist_adapter_config and exist_config + else: + files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + files = (os.path.split(x)[-1] for x in files) + if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: + both_exist = True + pass + pass + pass + + # Error out if both LoRA and normal model config exists. + if both_exist: + raise RuntimeError( + "Unsloth: Your repo has a LoRA adapter and a base model.\n"\ + "You have 2 files `config.json` and `adapter_config.json`.\n"\ + "We must only allow one config file.\n"\ + "Please separate the LoRA and base models to 2 repos." + ) + + elif not is_model and not is_peft: + error = autoconfig_error or peft_error + # Old transformers version + if "rope_scaling" in error.lower() and not SUPPORTS_LLAMA31: + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support new RoPE scaling methods.\n"\ + f"This includes Llama 3.1. The minimum required version is 4.43.2\n"\ + f'Try `pip install --upgrade "transformers>=4.43.2"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + raise RuntimeError(autoconfig_error or peft_error) + pass + + # Get base model for PEFT: + if is_peft: + # Check base model again for PEFT + model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + revision = revision, + trust_remote_code = trust_remote_code, + ) + pass + + if not was_disabled: enable_progress_bars() + + # Check if this is local model since the tokenizer gets overwritten + if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ + os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ + os.path.exists(os.path.join(old_model_name, "special_tokens_map.json")): + + tokenizer_name = old_model_name + else: + tokenizer_name = None + pass + + model, tokenizer = FastBaseVisionModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = _get_dtype(dtype), + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + trust_remote_code = trust_remote_code, + revision = revision if not is_peft else None, + model_types = model_types, + tokenizer_name = tokenizer_name, + *args, **kwargs, + ) + + if resize_model_vocab is not None: + model.resize_token_embeddings(resize_model_vocab) + pass + + # In case the model supports tagging, add the unsloth tag. + if hasattr(model, "add_model_tags"): + model.add_model_tags(["unsloth",]) + pass + if hasattr(tokenizer, "add_model_tags"): + tokenizer.add_model_tags(["unsloth",]) + pass + + if load_in_4bit: + # Fix up bitsandbytes config + quantization_config = \ + { + # Sometimes torch_dtype is not a string!! + "bnb_4bit_compute_dtype" : model.config.to_dict()["torch_dtype"], + "bnb_4bit_quant_type" : "nf4", + "bnb_4bit_use_double_quant" : True, + "llm_int8_enable_fp32_cpu_offload" : False, + "llm_int8_has_fp16_weight" : False, + "llm_int8_skip_modules" : None, + "llm_int8_threshold" : 6.0, + "load_in_4bit" : True, + "load_in_8bit" : False, + "quant_method" : "bitsandbytes", + } + model.config.update({"quantization_config" : quantization_config}) + pass + + if is_peft: + # From https://github.com/huggingface/peft/issues/184 + # Now add PEFT adapters + model.enable_input_require_grads() + model = PeftModel.from_pretrained( + model, + old_model_name, + token = token, + revision = revision, + is_trainable = True, + trust_remote_code = trust_remote_code, + ) + # Patch it as well! + model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) + pass + return model, tokenizer + pass +pass diff --git a/unsloth/models/loader_utils.py b/unsloth/models/loader_utils.py new file mode 100644 index 0000000000..b778b7e95b --- /dev/null +++ b/unsloth/models/loader_utils.py @@ -0,0 +1,120 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .mapper import INT_TO_FLOAT_MAPPER, FLOAT_TO_INT_MAPPER, MAP_TO_UNSLOTH_16bit +# https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! +from packaging.version import Version +from transformers import __version__ as transformers_version +transformers_version = Version(transformers_version) +SUPPORTS_FOURBIT = transformers_version >= Version("4.37") + + +def __get_model_name( + model_name, + load_in_4bit = True, + INT_TO_FLOAT_MAPPER = None, + FLOAT_TO_INT_MAPPER = None, + MAP_TO_UNSLOTH_16bit = None, +): + model_name = str(model_name) + lower_model_name = model_name.lower() + + if not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER: + + model_name = INT_TO_FLOAT_MAPPER[lower_model_name] + print( + f"Unsloth: Your transformers version of {transformers_version} does not support native "\ + f"4bit loading.\nThe minimum required version is 4.37.\n"\ + f'Try `pip install --upgrade "transformers>=4.37"`\n'\ + f"to obtain the latest transformers build, then restart this session.\n"\ + f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." + ) + return model_name + + elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER: + + new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name] + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ + # f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." + # ) + return new_model_name + + elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit: + + new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name] + return new_model_name + + elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: + + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] + # logger.warning_once( + # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ + # f"We shall load `{new_model_name}` for 4x faster loading." + # ) + return new_model_name + pass + + return None +pass + + +def _get_new_mapper(): + try: + import requests + new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py" + with requests.get(new_mapper, timeout = 3) as new_mapper: new_mapper = new_mapper.text + new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER"):] + new_mapper = new_mapper\ + .replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")\ + .replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")\ + .replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit") + + exec(new_mapper, globals()) + return NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit + except: + return {}, {}, {} + pass +pass + + +def get_model_name(model_name, load_in_4bit = True): + new_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit, + ) + if new_model_name is None and model_name.count("/") == 1 and model_name[0].isalnum(): + # Try checking if a new Unsloth version allows it! + NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = _get_new_mapper() + upgraded_model_name = __get_model_name( + model_name = model_name, + load_in_4bit = load_in_4bit, + INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER, + FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER, + MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit, + ) + if upgraded_model_name is not None: + raise NotImplementedError( + f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"\ + 'pip uninstall unsloth unsloth_zoo -y\n'\ + 'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'\ + 'pip install --upgrade --no-cache-dir "git+https://github.com/unslothai/unsloth-zoo.git"\n'\ + ) + pass + pass + return new_model_name if new_model_name is not None else model_name +pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index d4f1278e1d..fc1dc8cdb0 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -409,12 +409,12 @@ "Qwen/Qwen2.5-Coder-32B", ), "unsloth/Qwen2.5-Coder-0.5B-Instruct-bnb-4bit" : ( - "unsloth/Qwen2.5-Coder-Instruct-0.5B", - "Qwen/Qwen2.5-Coder-Instruct-0.5B", + "unsloth/Qwen2.5-Coder-0.5B-Instruct", + "Qwen/Qwen2.5-Coder-0.5B-Instruct", ), "unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : ( - "unsloth/Qwen2.5-Coder-Instruct-1.5B", - "Qwen/Qwen2.5-Coder-Instruct-1.5B", + "unsloth/Qwen2.5-Coder-1.5B-Instruct", + "Qwen/Qwen2.5-Coder-1.5B-Instruct", ), "unsloth/Qwen2.5-Coder-3B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-Coder-3B-Instruct", @@ -452,6 +452,46 @@ "unsloth/Llama-3.1-Nemotron-70B-Instruct", "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", ), + "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-2B-Instruct", + "Qwen/Qwen2-VL-2B-Instruct", + ), + "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-7B-Instruct", + "Qwen/Qwen2-VL-7B-Instruct", + ), + "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision-Instruct", + "meta-llama/Llama-3.2-11B-Vision-Instruct", + ), + "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-Instruct", + "meta-llama/Llama-3.2-90B-Vision-Instruct", + ), + "unsloth/Llama-3.2-11B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision", + "meta-llama/Llama-3.2-11B-Vision", + ), + "unsloth/Llama-3.2-90B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision", + "meta-llama/Llama-3.2-90B-Vision", + ), + "unsloth/Pixtral-12B-2409-bnb-4bit" : ( + "unsloth/Pixtral-12B-2409", + "mistralai/Pixtral-12B-2409", + ), + "unsloth/Pixtral-12B-2409-Base-bnb-4bit" : ( + "unsloth/Pixtral-12B-Base-2409", + "mistralai/Pixtral-12B-Base-2409", + ), + "unsloth/llava-1.5-7b-hf-bnb-4bit" : ( + "unsloth/llava-1.5-7b-hf", + "llava-hf/llava-1.5-7b-hf", + ), + "unsloth/llava-v1.6-mistral-7b-hf-bnb-4bit" : ( + "unsloth/llava-v1.6-mistral-7b-hf", + "llava-hf/llava-v1.6-mistral-7b-hf", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 0b8c08a371..d083144651 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -1,58 +1,86 @@ +# Unsloth Zoo - Utilities for Unsloth # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # -# http://www.apache.org/licenses/LICENSE-2.0 +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . + +import torch +from transformers import ( + BitsAndBytesConfig, + AutoModelForVision2Seq, + AutoProcessor, +) from .llama import * -from ..kernels import patch_layernorm, unpatch_layernorm -from ..kernels import patch_rms_layernorm, unpatch_rms_layernorm -from ..kernels import patch_llama_for_causal_lm, unpatch_llama_for_causal_lm -from ._utils import patch_gradient_checkpointing - -from transformers import AutoProcessor -try: - from transformers import MllamaForConditionalGeneration -except: - raise ImportError( - "Unsloth: Please update your transformers version to 4.46.0 for Llama 3.2 support!" - ) -pass +from ..kernels import ( + post_patch_loss_function, +) +from ._utils import __version__ +from peft import LoraConfig, TaskType, get_peft_model +from transformers import set_seed as transformers_set_seed +from unsloth_zoo.peft_utils import ( + get_peft_regex, + merge_and_overwrite_lora, +) + +__all__ = [ + "FastBaseVisionModel", +] + +def _wrap_fast_inference(generate, device_type, dtype, model): + # Wraps inference with bfloat16 / float16 + @torch.inference_mode + def _fast_generate(*args, **kwargs): + # For num_logits_to_keep + kwargs["num_logits_to_keep"] = 1 + + # Remove token_type_ids + kwargs.pop("token_type_ids", None) + + # Check pad_token + model_eos_token_id = getattr(model.config, "eos_token_id", None) + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] + + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) -class FastVisionModel: + try: + kwargs["pixel_values"] = kwargs["pixel_values"].to(model.dtype) + except: + pass - def pre_patch(self): - patch_gradient_checkpointing() - patch_layernorm() - patch_rms_layernorm() - patch_llama_for_causal_lm() + # Autocasted + with torch.autocast(device_type = device_type, dtype = dtype): + output = generate(*args, **kwargs) + pass + return output pass + return _fast_generate +pass - def post_unpatch(self): - unpatch_layernorm() - unpatch_rms_layernorm() - unpatch_llama_for_causal_lm() - pass +class FastBaseVisionModel: @staticmethod def from_pretrained( - model_name = "llava-hf/llava-1.5-7b-hf", + model_name = "unsloth/llama-3-8b-bnb-4bit", max_seq_length = None, dtype = None, load_in_4bit = True, token = None, device_map = "sequential", - rope_scaling = None, trust_remote_code = False, + model_types = None, + tokenizer_name = None, **kwargs, ): if trust_remote_code: @@ -67,7 +95,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers = {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ @@ -81,6 +109,7 @@ def from_pretrained( pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer + os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" get_statistics() # For debugging - we use a download counter to see if environments are not breaking @@ -105,160 +134,36 @@ def from_pretrained( ) pass + kwargs.pop("attn_implementation", None); # No need since we auto call it + # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config - self.pre_patch() - model = MllamaForConditionalGeneration.from_pretrained( + model = AutoModelForVision2Seq.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, - # quantization_config = bnb_config, + # quantization_config = bnb_config, token = token, - max_position_embeddings = max_position_embeddings, trust_remote_code = trust_remote_code, - attn_implementation = "sdpa", + # attn_implementation = "sdpa", [TODO] Pixtral for eg fails **kwargs, ) - self.post_unpatch() - # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! post_check = check_nvidia() # Counteract saved tokenizers + tokenizer_name = model_name if tokenizer_name is None else tokenizer_name tokenizer = AutoProcessor.from_pretrained( - model_name, - ) - model = FastVisionModel.post_patch(model) - - # Patch Trainer - from transformers.trainer import Trainer - try: - if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - inner_training_loop = inspect.getsource(Trainer._inner_training_loop) - Trainer._original_training_loop = inner_training_loop - else: - inner_training_loop = Trainer._original_training_loop - except: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - pass - - if ((post_check - pre_check) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - - import transformers.trainer - items_in_trainer = dir(transformers.trainer) - good_items = [] - for item in items_in_trainer: - # TODO: Support Deepspeed - if item.startswith(("deepspeed", "xm", "met", "smp")): continue - if item in inner_training_loop: good_items.append(item) - pass - exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) - - start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] - end = inner_training_loop.find("\n\n", start) - original_debug = inner_training_loop[start:end] - spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] - front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) - - debug_info = """debug_info = \\ - f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning(debug_info) - import subprocess, re, gc, numpy as np - a = np.array([0,]) - try: - a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) - a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) - a = np.array([int(x.decode('utf-8'))/1024 for x in a]) - except: - if not torch.cuda.is_available(): - raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') - if ((a - PRE_CHECK) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - for _ in range(3): - gc.collect() - torch.cuda.empty_cache()""" - - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace(original_debug, debug_info) - - debug_info = """n_total_devices = total_train_batch_size // \\ - args.gradient_accumulation_steps // self._train_batch_size - if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') - debug_info =""" - debug_info = debug_info.split('\n') - debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) - inner_training_loop = inner_training_loop.replace("debug_info =", debug_info, 1) - - front_spaces = re.match(r"[\t\s]{1,}", inner_training_loop).group(0) - inner_training_loop = re.sub(r"^" + front_spaces, "", inner_training_loop, flags = re.MULTILINE) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = tpu_spmd_dataloader(train_dataloader)", - "raise RuntimeError('Unsloth: TPUs are not yet supported!')" + tokenizer_name, + padding_side = "right", + token = token, ) - inner_training_loop = inner_training_loop.replace( - "self.accelerator.free_memory()", - "self.accelerator.free_memory()\n" + \ - front_spaces + "if self.is_deepspeed_enabled:"\ - "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, - ) - - check_batches = """train_dataloader = self.get_train_dataloader() - ga = args.gradient_accumulation_steps - bsz = self._train_batch_size - total_batches = bsz * ga * args.world_size - n_total_devices = total_batches // ga // bsz - if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') - divisor = n_total_devices / 1 - bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 1: - divisor = n_total_devices / 1 - ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" - check_batches = check_batches.split('\n') - check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = self.get_train_dataloader()", - check_batches, 1, - ) - inner_training_loop = inner_training_loop.replace( - "_inner_training_loop", - "_fast_inner_training_loop", 1, - ) - exec(inner_training_loop, globals()) - Trainer._inner_training_loop = _fast_inner_training_loop - inner_training_loop = inner_training_loop.replace( - "is_torch_tpu_available()", - "False", - ) - if "n_total_devices >" not in inner_training_loop: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - pass - inner_training_loop = inner_training_loop.replace( - "is_sagemaker_mp_enabled()", - "False", - ) - exec(inner_training_loop, globals()) - Trainer._inner_training_loop = _fast_inner_training_loop - - # Save max_seq_length - model.max_seq_length = max_position_embeddings - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_position_embeddings - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_position_embeddings + model, tokenizer = patch_tokenizer(model, tokenizer) + model = post_patch_loss_function(model) # Fix up config for transformers uploading PEFT # Not necessary anymore since we require transformers>=4.37! @@ -271,121 +176,105 @@ def from_pretrained( pass # Log Unsloth version for future fastpaths for inference - model.config.update({"unsloth_version" : __version__}) - - # Add save modules - patch_saving_functions(model) - Trainer._inner_training_loop = _fast_inner_training_loop + if hasattr(model, "config"): + model.config.update({"unsloth_version" : __version__}) + pass + patch_saving_functions(model, vision = True) + patch_saving_functions(tokenizer, vision = True) - # Also fix torch_dtype + # Save tokenizer for inference purposes + tokenizer.padding_side = "left" # Force inference internal_model = model while hasattr(internal_model, "model"): - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass + internal_model._saved_temp_tokenizer = tokenizer internal_model = internal_model.model pass - if hasattr(internal_model, "config"): - if internal_model.config.torch_dtype == "float32": - internal_model.config.torch_dtype = torch.float32 - elif internal_model.config.torch_dtype == "bfloat16": - internal_model.config.torch_dtype = torch.bfloat16 - elif internal_model.config.torch_dtype == "float16": - internal_model.config.torch_dtype = torch.float16 - pass - pass + internal_model._saved_temp_tokenizer = tokenizer return model, tokenizer pass - @staticmethod - def post_patch(model): - # Patch model - layers = model.model.layers - lm_head = model.get_output_embeddings().weight - - # Also patch all dtypes - BnB seems to not allocate the correct type? - # BnB default dtype seems to be float16! - correct_dtype = lm_head.weight.dtype - - for name, module in model.named_modules(): - if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): - weight = module.weight - quant_state = weight.quant_state - - if type(quant_state) is list: - # BnB seems to have float16 as default! - module.weight.quant_state[2] = correct_dtype # Cast to correct dtype - else: - # https://github.com/TimDettmers/bitsandbytes/pull/763/files - quant_state.dtype = correct_dtype - pass - pass - pass - - # Clear deleted GPU items - for _ in range(3): - gc.collect() - torch.cuda.empty_cache() - return model - pass - - @staticmethod def get_peft_model( model, - r = 16, - target_modules = "all-linear", - lora_alpha = 16, - lora_dropout = 0, - bias = "none", - layers_to_transform = None, - layers_pattern = None, + r = 16, + target_modules = None, + lora_alpha = 16, + lora_dropout = 0, + bias = "none", + finetune_vision_layers = True, + finetune_language_layers = True, + finetune_attention_modules = True, + finetune_mlp_modules = True, + layers_to_transform = None, + layers_pattern = None, use_gradient_checkpointing = True, - random_state = 3407, - max_seq_length = 2048, # not used anymore - use_rslora = False, - modules_to_save = None, - init_lora_weights = True, - loftq_config = {}, - temporary_location = "_unsloth_temporary_saved_buffers", + random_state = 3407, + max_seq_length = 2048, # not used anymore + use_rslora = False, + modules_to_save = None, + init_lora_weights = True, + loftq_config = {}, + temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): transformers_set_seed(random_state) - # Get LoRA - arguments = dict( - r = r, - lora_alpha = lora_alpha, - target_modules = target_modules, - lora_dropout = lora_dropout, - bias = bias, - layers_to_transform = layers_to_transform, - init_lora_weights = init_lora_weights, - # loftq_config = loftq_config, - # use_rslora = use_rslora, - modules_to_save = modules_to_save, - **kwargs, - ) + if type(r) is not int: + raise TypeError(f"Unsloth: Rank of {str(r)} must be an integer.") + if r <= 0: + raise TypeError(f"Unsloth: Rank of {str(r)} must be larger than 0.") + + if isinstance(model, PeftModelForCausalLM): + raise RuntimeError("Unsloth: You already added LoRA adapters to your model!") + + if target_modules == "all-linear": + finetune_vision_layers = True + finetune_language_layers = True + finetune_attention_modules = True + finetune_mlp_modules = True + pass + if target_modules is None: + target_modules = get_peft_regex( + model, + finetune_vision_layers = finetune_vision_layers, + finetune_language_layers = finetune_language_layers, + finetune_attention_modules = finetune_attention_modules, + finetune_mlp_modules = finetune_mlp_modules, + ) + else: + assert(type(target_modules) in (list, tuple,)) + pass - lora_config = LoraConfig(**arguments) + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass - model = _get_peft_model(model, lora_config) + lora_config = LoraConfig( + r = r, + lora_alpha = lora_alpha, + target_modules = target_modules, + lora_dropout = lora_dropout, + bias = bias, + task_type = TaskType.CAUSAL_LM, + ) + model = prepare_model_for_kbit_training( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + ) + model = get_peft_model(model, lora_config) - model = FastVisionModel.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) # Clear deleted GPU items for _ in range(3): gc.collect() torch.cuda.empty_cache() pass + patch_saving_functions(model, vision = True) return model pass @@ -396,6 +285,11 @@ def patch_peft_model( model, use_gradient_checkpointing = True, ): + if not isinstance(model, PeftModelForCausalLM): + raise TypeError( + "Unsloth: Your model needs to call `.get_peft_model` first!" + ) + pass model = prepare_model_for_kbit_training( model, @@ -403,20 +297,6 @@ def patch_peft_model( use_reentrant = True, ) - # Fix up config for transformers uploading PEFT - for active_adapter in model.peft_config.keys(): - # Not necessary since we requires transformers >= 4.37 - if False: - name = model.peft_config[active_adapter].base_model_name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.peft_config[active_adapter].base_model_name_or_path = name - pass - # Add revision to enable future fast inference paths - # [TODO] Bugs out!see https://github.com/unslothai/unsloth/issues/492 - # model.peft_config[active_adapter].revision = f"unsloth" - pass - from transformers.trainer import Trainer if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": raise RuntimeError( @@ -426,24 +306,7 @@ def patch_peft_model( 'Thank you for your understanding and we appreciate it immensely!' ) pass - - logger.warning_once( - f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ - f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", - ) - patch_saving_functions(model) - - # Patch cross entropy loss labels - # Fixes https://github.com/unslothai/unsloth/issues/10 - max_seq_length = model.max_seq_length - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - model.model.extra_ignored_labels = extra_ignored_labels - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model = internal_model.model - pass - internal_model.max_seq_length = max_seq_length + patch_saving_functions(model, vision = True) # Patch tokenizer to pad to the right internal_model = model @@ -468,37 +331,40 @@ def patch_peft_model( @staticmethod def for_inference(model): - # if model.config.model_type == "qwen2": - # FastLlamaModel.for_training(model) - # return - # pass - - internal_model = model - internal_model.gradient_checkpointing = False - internal_model.training = False + model.gradient_checkpointing = False + model.training = False - while hasattr(internal_model, "model"): - internal_model = internal_model.model - internal_model.gradient_checkpointing = False - internal_model.training = False - pass - if hasattr(internal_model, "training"): - internal_model.training = False + for name, module in model.named_modules(): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = False + if hasattr(module, "training"): + module.training = False pass - # Also check if lm_head / embeddings are trained - internal_model = model - while not hasattr(internal_model, "lm_head"): - internal_model = internal_model.model - pass - lm_head = internal_model.lm_head.weight - device_type = lm_head.device.type dtype = model.config.torch_dtype - if type(dtype) is str: if dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 pass + device_type = model.device.type + + # Wrap model.generate + if model.generate.__name__ != "_fast_generate": + model._unwrapped_old_generate = model.generate + model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) + pass + + # Patch tokenizer to pad to the left + internal_model = model + while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass + internal_model = internal_model.model + pass + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "left" + pass # Also disable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -516,23 +382,32 @@ def for_inference(model): @staticmethod def for_training(model, use_gradient_checkpointing = True): - internal_model = model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True + model.gradient_checkpointing = use_gradient_checkpointing + model.training = True - # Delete all fast inference loras - for param in model.parameters(): - if hasattr(param, "_fast_lora"): - del param._fast_lora + for name, module in model.named_modules(): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = use_gradient_checkpointing + if hasattr(module, "training"): + module.training = True pass + # Also revert model.generate + if hasattr(model, "_unwrapped_old_generate"): + model.generate = model._unwrapped_old_generate + del model._unwrapped_old_generate + pass + + # Patch tokenizer to pad to the right + internal_model = model while hasattr(internal_model, "model"): + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" + pass internal_model = internal_model.model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True pass - if hasattr(internal_model, "training"): - internal_model.training = True + if hasattr(internal_model, "_saved_temp_tokenizer"): + internal_model._saved_temp_tokenizer.padding_side = "right" pass # Also re-enable training for embeddings for NEFTune @@ -548,3 +423,5 @@ def for_training(model, use_gradient_checkpointing = True): return model pass pass + + diff --git a/unsloth/save.py b/unsloth/save.py index b4c6b499cf..b503b2b47a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2041,8 +2041,153 @@ def unsloth_convert_lora_to_ggml_and_save_locally( print("Unsloth: Done.") print(f"Unsloth: Conversion completed! Output file: {output_file}") print("\nThis GGML making function was made by Maheswar. Ping him @Maheswar on the Unsloth Discord or on HuggingFace (@mahiatlinux) if you like this!") +pass + + +from unsloth_zoo.peft_utils import merge_and_overwrite_lora +from .models.loader_utils import get_model_name + +@torch.inference_mode +def unsloth_generic_save( + model, + tokenizer, + save_directory : Union[str, os.PathLike] = "unsloth_finetuned_merge", + save_method : str = "lora", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + + # Push to hub + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = "Trained with Unsloth", + private : Optional[bool] = None, + create_pr : bool = False, + revision : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", + tags : List[str] = None, + + # Our functions + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.9, +): + if token is None and push_to_hub: token = get_token() + + merge_and_overwrite_lora( + get_model_name, + create_huggingface_repo, + model, + save_location = save_directory, + push_to_hub = push_to_hub, + token = token, + upload_location = save_directory if push_to_hub else None, + low_disk_space_usage = True, + private = private, + ) + return +pass + + +def unsloth_generic_save_pretrained_merged( + self, + save_directory : Union[str, os.PathLike], + tokenizer = None, + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + push_to_hub : bool = False, + token : Optional[Union[str, bool]] = None, + is_main_process : bool = True, + state_dict : Optional[dict] = None, + save_function : Callable = torch.save, + max_shard_size : Union[int, str] = "5GB", + safe_serialization : bool = True, + variant : Optional[str] = None, + save_peft_format : bool = True, + tags : List[str] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.75, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. -def patch_saving_functions(model): + Choose for `save_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.save_pretrained(...)`" + ) + pass + + arguments = dict(locals()) + arguments["model"] = self + del arguments["self"] + unsloth_generic_save(**arguments) + for _ in range(3): + gc.collect() +pass + + +def unsloth_generic_push_to_hub_merged( + self, + repo_id : str, + tokenizer = None, + save_method : str = "merged_16bit", # ["lora", "merged_16bit", "merged_4bit"] + use_temp_dir : Optional[bool] = None, + commit_message : Optional[str] = "Trained with Unsloth", + private : Optional[bool] = None, + token : Union[bool, str, None] = None, + max_shard_size : Union[int, str, None] = "5GB", + create_pr : bool = False, + safe_serialization : bool = True, + revision : str = None, + commit_description : str = "Upload model trained with Unsloth 2x faster", + tags : Optional[List[str]] = None, + temporary_location : str = "_unsloth_temporary_saved_buffers", + maximum_memory_usage : float = 0.75, +): + """ + Same as .push_to_hub(...) except 4bit weights are auto + converted to float16 with as few overhead as possible. + + Choose for `save_method` to be either: + 1. `16bit`: Merge LoRA into float16 weights. Useful for GGUF / llama.cpp. + 2. `4bit`: Merge LoRA into int4 weights. Useful for DPO / HF inference. + 3. `lora`: Save LoRA adapters with no merging. Useful for HF inference. + """ + if tokenizer is None: + logger.warning_once( + "Unsloth: You're not saving a tokenizer as well?\n"\ + "You can do it separately via `tokenizer.push_to_hub(...)`" + ) + pass + + arguments = dict(locals()) + arguments["model"] = self + arguments["save_directory"] = repo_id + arguments["push_to_hub"] = True + del arguments["self"] + del arguments["repo_id"] + unsloth_generic_save(**arguments) + for _ in range(3): + gc.collect() +pass + + +def not_implemented_save(*args, **kwargs): + raise NotImplementedError("Unsloth: Sorry GGUF is currently not supported for vision models!") +pass + + +def patch_saving_functions(model, vision = False): import inspect import types from typing import Callable, Optional, Union, List @@ -2131,14 +2276,22 @@ def patch_saving_functions(model): pass # Add saving methods to top level model - if hasattr(model, "config"): - # Counteract tokenizers - model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) - model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) - model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) - model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) - model.push_to_hub_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_push_to_hub, model) - model.save_pretrained_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_save_locally, model) + if not vision: + if hasattr(model, "config"): + # Counteract tokenizers + model.push_to_hub_merged = types.MethodType(unsloth_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(unsloth_push_to_hub_gguf, model) + model.save_pretrained_gguf = types.MethodType(unsloth_save_pretrained_gguf, model) + model.push_to_hub_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_push_to_hub, model) + model.save_pretrained_ggml = types.MethodType(unsloth_convert_lora_to_ggml_and_save_locally, model) + pass + else: + # Vision only 1 option + model.push_to_hub_merged = types.MethodType(unsloth_generic_push_to_hub_merged, model) + model.save_pretrained_merged = types.MethodType(unsloth_generic_save_pretrained_merged, model) + model.push_to_hub_gguf = types.MethodType(not_implemented_save, model) + model.save_pretrained_gguf = types.MethodType(not_implemented_save, model) pass return model pass diff --git a/unsloth/trainer.py b/unsloth/trainer.py index 00956ed41b..012be4b0cb 100644 --- a/unsloth/trainer.py +++ b/unsloth/trainer.py @@ -20,13 +20,13 @@ import trl import inspect from trl import SFTTrainer -try: - from trl import SFTConfig as TrainingArguments -except: - from transformers import TrainingArguments -pass from . import is_bfloat16_supported -from unsloth_zoo.training_utils import unsloth_train as _unsloth_train +from unsloth_zoo.training_utils import ( + unsloth_train as _unsloth_train, +) +from unsloth_zoo.vision_utils import ( + UnslothVisionDataCollator, +) from packaging.version import Version import dataclasses @@ -35,6 +35,7 @@ "UnslothTrainer", "unsloth_train", "_patch_trl_trainer", + "UnslothVisionDataCollator", ] # Unsloth gradient accumulation fix: @@ -60,7 +61,11 @@ def unsloth_train(trainer, *args, **kwargs): pass pass - +try: + from trl import SFTConfig as TrainingArguments +except: + from transformers import TrainingArguments +pass @dataclass class UnslothTrainingArguments(TrainingArguments): embedding_learning_rate : Optional[float] = field( @@ -134,7 +139,7 @@ def create_optimizer(self): # From `trl>=0.13.0`, they changed how to pass several params to the trainer # We need to patch to make the transition smooth -def create_backwards_compatible_trainer(trainer_class, config_class): +def _backwards_compatible_trainer(trainer_class, config_class): original_init = trainer_class.__init__ @wraps(original_init) @@ -167,6 +172,7 @@ def new_init(self, *args, **kwargs): } # Get parameters that exist in Config but not in TrainingArguments + from transformers import TrainingArguments moved_params = \ set(inspect.signature(config_class) .parameters.keys()) - \ set(inspect.signature(TrainingArguments).parameters.keys()) @@ -207,14 +213,13 @@ def _patch_trl_trainer(): import trl.trainer trl_classes = dir(trl.trainer) - - non_convertable_trainer = set(["PPOv2", "AlignProp"]) - trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) - non_convertable_trainer - trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) - non_convertable_trainer + trl_trainers = set(x[:-len("Trainer")] for x in trl_classes if x.endswith("Trainer")) + trl_configs = set(x[:-len("Config")] for x in trl_classes if x.endswith("Config")) trl_classes = list(trl_trainers & trl_configs) for x in trl_classes: - exec(f"trl.{x}Trainer.__init__ = create_backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + try: exec(f"trl.{x}Trainer.__init__ = _backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)", globals()) + except: continue pass trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ = True From 69cbd13a41694b63951ec5cb9c51d6a5a40cb393 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 06:07:06 -0800 Subject: [PATCH 0821/1088] Update vision.py --- unsloth/models/vision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index d083144651..69fb3fd986 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -41,7 +41,7 @@ def _wrap_fast_inference(generate, device_type, dtype, model): @torch.inference_mode def _fast_generate(*args, **kwargs): # For num_logits_to_keep - kwargs["num_logits_to_keep"] = 1 + # kwargs["num_logits_to_keep"] = 1 # Remove token_type_ids kwargs.pop("token_type_ids", None) From c2b185e7dbe04cdf2b95c681f42416dbe19d5f97 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 06:45:40 -0800 Subject: [PATCH 0822/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ee85ba3c36..69f36f0d46 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.8" +__version__ = "2024.11.9" __all__ = [ "prepare_model_for_kbit_training", From d30c363d25668b8059237c58586d0f2d10903682 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 21 Nov 2024 11:24:12 -0800 Subject: [PATCH 0823/1088] Vision (#1318) * Add files via upload * Add files via upload * Add files via upload * Add files via upload * Update README.md * Update README.md * Update README.md * Update README.md --------- Co-authored-by: Michael <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 36 +++++++++++-------------- docs github button.png | Bin 0 -> 11805 bytes images/documentation github button.png | Bin 0 -> 11807 bytes images/documentation lighter.png | Bin 0 -> 11777 bytes 4 files changed, 15 insertions(+), 21 deletions(-) create mode 100644 docs github button.png create mode 100644 images/documentation github button.png create mode 100644 images/documentation lighter.png diff --git a/README.md b/README.md index 4d68d996f0..07d4e0abb6 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ - + -### Finetune Llama 3.2, Mistral, Phi-3.5 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.2, Mistral, Phi-3.5, Qwen 2.5 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -23,43 +23,37 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) | 2x faster | 60% less | +| **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing) | 2x faster | 40% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | | **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | -| **Mistral Small (22B)** | [▶️ Start for free](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) | 2x faster | 60% less | -| **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | +| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | +| **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | -- **Kaggle Notebooks** for [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Run [Llama 3.2 1B 3B notebook](https://colab.research.google.com/drive/1hoHFpf7ROqk_oZHzxQdfPW9yvTxnvItq?usp=sharing) and [Llama 3.2 conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) -- Run [Llama 3.1 conversational notebook](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) +- **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language -- Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth. +- Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) +- 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook] - 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. -- 📣 NEW! [Llama 3.2 Conversational notebook](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) includes training only on completions / outputs (increase accuracy), ShareGPT standardization and more! -- 📣 NEW! [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook) and [Llama 3.2 Kaggle conversational notebook](https://www.kaggle.com/code/danielhanchen/kaggle-llama-3-2-1b-3b-conversational-unsloth/notebook) -- 📣 NEW! [Qwen 2.5 7b notebook](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) finetuning is supported! Qwen 2.5 comes in multiple sizes - check our [4bit uploads](https://huggingface.co/unsloth) for 4x faster downloads!. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! -- 📣 NEW! [Phi-3.5 (mini)](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) now supported -- 📣 NEW! [Gemma-2-2b](https://colab.research.google.com/drive/1weTpKOjBZxZJ5PQ-Ql8i6ptAY2x-FWVA?usp=sharing) now supported! Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! -- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported
    Click for more news +- 📣 Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! +- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. -- 📣 NEW! [Gemma-2-9b](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) and Gemma-2-27b now supported -- 📣 UPDATE! [Phi-3 mini](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) model updated. [Phi-3 Medium](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing) 2x faster finetuning. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! -- 📣 NEW! Qwen2 now works -- 📣 [Mistral v0.3 Base](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) and [Mistral v0.3 Instruct] -- 📣 [ORPO support](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) is here + [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models +- 📣 [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models - 📣 We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support [4x longer context windows](https://unsloth.ai/blog/long-context)! --
    ## 🔗 Links and Resources @@ -69,7 +63,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| | 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#-installation-instructions)| | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) -| 🌐 **Released Models** | [Unsloth Releases](https://huggingface.co/unsloth)| +| 🌐 **Released Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| ## ⭐ Key Features diff --git a/docs github button.png b/docs github button.png new file mode 100644 index 0000000000000000000000000000000000000000..6b129e0db728a4ffac63419eb054653930dee372 GIT binary patch literal 11805 zcmXw9bySp17iURn1rd>!5|9w28$?n`=@3cj?pRPjx=T`NN$C!eMnJj*1c4>BzmP4@r2I>|A6_7MMsX#;oKINhI&W zA1vWKzN6_7iAmzHhx3`t!H6NI&IsR9os0F2xPWZN3C_a>U zPd|^aAo_8do#y~gS>M?CQo6G~bbh!oiBNPIkJ>2xVkdSvw!h`tqau_2(#UTyE3q5K z7A2o0wEK2ZjV(&VUkOzou7u)6+M+HO8{dB3mgye_uk~+5v8xuoLbt}KL9we6_MQBD zBK2G0)mQ_gKD?rWR2laG(-Rh4ElhyXz@U{xFBdv7?>+aS#_!(HEM^l<)5u>f!eX?K zaKg`zmG_=xk(Mc;GQoWOv#}*lf1iAmi_JN5K|8Aw);TBN<&x0ygK41^KeR~KjG6wy z&Mv)tJ3xHCb;X<`!1hrry`00zhpa#!lJbg`H9MjbSvX8YE)%r{70>ciJKcf@Y$(S* zAuC5e!PxCK>1S5LJovVuaJvWVPF3uzzH}@-NfDv{HRG*Kgzu(_4braC*7efbgfO zG~7J5O&$fBkS5uCqE4bDp`0`7hSJNa_Ii*eW6Tj)%htp%nd5qP6QExayJO!E`>hvC z&rEubs!43}<(j@n%Q6_&Y=ecefMx(=@3>L}oT^O4(dafyMpqNa)}$?&h%fxSYAb28 zyCu5fv78T1J~(>Vqti;;WAH1EEx&H-YJJDo9@apFmvC~+{z17+#R`x}LZApd4j$%j z)AnF7`xz?>YE4!LO~5C3-f})4cvE_3rk*!|vC0aKLD(hd>nFQvS(-PZWbQE$!|+v% z9`92Xz&vP%r@~47D~U?M>&P_<#?ci0dPX)+tYr(kycrlWnwthdppFCOric zK?U(^;az@vxsgC1rou&bTEzUtX%23Pr1>p`3%a2lMbgc2@&ie?xQz5i0{;|^p`G!; zGhqpGq!2^jUlEyf*wO3Dipuk3Az^beoZkm`{@t_mRY>A_Qxpw%Vwx%*A`{&gPcQA} zFcuwBVv{Zk z7TpcW!Ht2Ga1-+}qXi1PPAA3i5SH()t&|&jHl7}VRZ+fm$r-g`*ro6yjA9w6rQFY1 z;$(%Gpo2Gfhz|ejcaW;e?M3=XXC9w($*EsA(U%~o(N?I6rQIk z09lqFp|8Twh)Rtc-(Vw}{R73N|H*hu9pzd&M)89-9_&(lkqO;cO&**ecS0}0D8jie z8B2hWuB3z5J_`N(o4*&{!nFHH`tPd%u=FhY1vNwa`NKFogc}Tvc&-JkjV1cG9pvz~ zoaZ-EM}K!MLt~`>Y9t4Y>b)^g_b-xm{2})a1Q&&{tu?_leO+$wQJm(Vl2`}tZgr(N zKLl?`Pty3yAIpD;)G<2{58-vkxvuQ-A}`Rc9B*~hsM{vf0gp_?GYPy4;j?kz!q2-5 zIPRkspHR;x{S|Pk*r2;ClWv>LOp!dyqQ;)nhi?N_l5_Y-KtXc+e2D~mJZsv1#C6S#Ir^SW*K4~gt(`JeX3tK(7fsTNOqtIBU4@4;a}Z0uTr86 zeqw~bZCe$8e z|M)_Dfw+#P$Pp~j>&r#wO7${kWn!tD>qmkW_4zOUcZ^5}8M6$NeAvK2vG6@3QatkE zC@jx4jWB4(kteaqM))_(0|lot$cqxSZ+#{U#73N>uB~eR+i{gbqwDraQJmUeiWPd# z(-|y&u~A7Q)U$#MD@ci+eXvev&N{}749mL*alhpgg7>b>1P4Kux(L*k-ZZx({FjRO zLd0WCb?|brcl&My6#vSLL_G@;fAI&32XZSVnEZv!Vy<_GgJle%Yv~uaj;@9c#24`I z-&2A}SDik8u z_)cqxmp!~Mt`D8yeTIs)bLKsC$QTlyW6e=h->9K9q1MqQ}tD5snF$5I8YlL+mrIw z@7FX9FY&D=cp3B})a{wju9+^ygs=|caCN~6ESe%;itdXY$p;ZkJDert4T&A)VrSlA z#sR%t$cBo3Ct9W1aAjWJFVmI&LHOslE|KJ;Is~phP><^v|SMiyK+Gg|Ly! zRW0`V8e-b7$?0@77htUxn^T2_Y)x7|r%ZCF|GhRbyCS`^q_~)0Zfte>wRh|@y}G)( zy?5(!^02+gW`=&}pAh_mZM9Tq?bsZy;aVk|0(%0^)e4m-nsg))+uBVo;>dJyS{76i z5)wUSv4B&;AEtl5V|uDhp+VrG~q5u?59j8P3?MpSFmG#b6XMifk;r;6O!-x(q<;YsU^we;bBAQPT0 z`JB&==n{+zq62R@ULSw&Q&ACYS#S{Mw8E_QUTb`;|CMXqk)GMEBN%@bx4KzS&+PZ- z?!K$M?{&wc6(mtkr^)@2Y>lqJ8q#WL6S7LtQ!b{R#cSUs&5Tv%1qtQ7Fx@)AlxoO{ zJr0~P06cx}Fg38&F+75a>|9ov*%0HBvdx=b?5F3dGCx43q>S?B5RH?&^Zx40U5@Hy zu>y3@J+WZ{7?nvs6!?X@8Y7|?FE(c{y;u_7Y@GyET$krmjN*#!EIGOd1U^!5S{*e0 z8(;V-2??Gxgy~=zkj%fd+aXD4Gp-|;W9a@fH{&sbT+Ds03vWgx>(|3%`ABToTuLU^EjGWKL>?)vh)|_w~9G06DKDY8dSx#CSW8pGNw{G|IPwTlV zfl@~XIa3pGrD6tG0@e5{3YR~3T=UiIh44hFI#;66aaH_n=BiS9)$$yV>JJA}ggA=U zj*1J7E~Z$GzbMA4m-5=qSo0_8aCwlv_RG?(sD6aKo+auz@Jbusn8&&2br9j^Nf#9` z6Vnp%;bz?3(;QZnio{Z_-s!c8%1`wI2Tp08+8|=x!*YcY$T#3H*e&P%&~yKZ7^ZDD ze}TVTMn&xO3fQIT+P*%#FQ-V^VXE#e_oUDAbJarif|VgMU)KQ=aC|~~Un#oJFau4p z@RFC?3vu_6!sGOs#OtHdT1 zoK}6=?6BJ|Qdm$hWhz#7G5#gZj#r^Iqnpa7#kx`KhLqB!e@aehpy*b*b^gg3_fXrJt5>szQm@aGr-nqXQOjDdJpZgE zzKQdtj1pP3fNRYU&AckqH=bE?7+d!-tCflrJkwZg00O;n5zAcDk?%6XfQj&pxvwjg z_t{R zJACha-;llk^t+h%)a$%?N2lQaXQ!eemR|Lx<|pOtdRk%uGk9uIwKYI;R2kQ|fUm); z_SQ9iBGVk<4ID#%a{u!c=WuHa+f^npZ6dk!)21N9^L$$EA05%6HIobG|J@Gp(ya+E1yZ;1}!mAbkz->&)wW`?}lsHg4OM6Iz9cCM7_uQs)P9i@hO z+)bI?wmA>!X-PFa;D#JVzmq%8EF>iSs`nl+^*)Wi3a_`+YefE3q1z;L*^}1B*iWx@ z7~HK)Ien-7wMzXEu*7%0ZMZM^+;d{g9{7s;r-!)7KG$gh-dVE!YdqFI)})_}9=`nN z)BD2JW^0))A9S4>Y=+BkKNzKo%C6W?Ip7Q%A($w(d9z-z@+naSnO3$^712vfp8V5Y z(|~C(eSG5R&6Si>ittz33YWYW8B=W+!KQf?o+;w8*%SaOW)yz7N`T!dWYMrQr&x{~T}i^1ZLtxeHDzlys0o z7G34K_rou`#mACP+g#?eCq9h5fVzEz0axKKzZ^1HufxM?{dgvPy;eq9driY~GWp1z zA|dy(Sx$Xh=kkF2Q=-t{9=Zx0yQmG8M0V{S;Iti?S>tgxWXdhB-o>Q7$5BBJ)4ua~ zJrN%8K2KGdFH6i#G4-RH{mY_)g2mZ|hPUVEp8%zV10V0K2eaKW=zbKSHixXAnK2ON z+?>~1K;i2RHok5yy$#>Fi?hD=g)F8i<_GB>)P=(5DhF)83v|-39hQ z?$IyJC{%lpnEgo#v)fT3U@;>^^8G@Ce?FI`iOHM^_P_{s}>wD@@VScIrl0pd@BNi%k1y3_CJ@3%`h46u-c0Gl! z=tLN8tV+6z@rxig-EGiYjUOk;Cg!`95HmjO`FOkB(#q?})y5MHN}KE{8w*>rl`@UJ z9UTUitGQ#0Hc zc+1IGH)Wg?jVw{ImY4KhUc|n##HnxWjv=Vcvin+>vGH8^@tyGJNb`* zwlW$93P#2kld6z=mWo&m3Nyz`NYg>%rbJees&FMa=qkx@6M z0r;M_W5~SgU{+ys%Z}<};;A~&3(zRQoKhmHd_(!gwBt51(o4~53JA#4?M7K`S0#c962Q$cf>rURv zZRZz2%xMOS0_JS>V@t(5*(R|+jC2`_Fgh2l9+M+~&jiV7+c*J45#I8KV!^J7Y3whs z4Izr^hdZ9Mb&qN^k>8}l$ebrQJ3LrAr}pr*13M#9d7XYsXHQp{T4~Il`7VoSBaz_2 zW<}Fo8Jn*rRVc4S2E1(h6H5dF_p5SQHMm^8R~=GWSsT}!ovgf63-99dz3kiIPZ989 zH}2+p|3Cs~5s`iVN9|1xd$WlNLOg!+l>4jiWcRd9bm;#1-| zxX{C8E!oxF7(r|y!{c)rZ=bFGOdqLMlxcQSe<+||E|fieJkL9pSPlvd}Hw&JH zwEH@j#)k+Wm10Xm0?`tl1s#kuc9|4WMA2d3AvTmnJIcoK5!7EA+X23$TelL~9(;a} zz0aFyQ(J9xwYA7Db4J;aoI|gQf5emAHtqYVVe8nHAKS-|6KmCeYaCh>Ur2nUoSY06 z)Qh!ezXJ{i>-1!hg`C~8YNKa8EnI$x-!k0nP9VK_I6VQRFW}Q7k9+Qq$`$qS)h!U{ zf-QR^HZEWivV{9oTg4!r#)UZwMOOXt(udnG_!T_%IUCLFJFUM|5@lZIDpU`(dwKeD zaCU0CN@$1RFu_2#8lTcpqiYR7^SoaUZZE5=SAaf9{|v577Cyef^(Zs$9rBBDNU=fM z^m_>niJt2j3=ukoKla?}_OQvW25%d5wi}Bqw*nY{kX1d0XP%pT%+c_JJVE3uC%Q;8b zDFHBFjloFCcegF7q2G8VyhO$G&}KTcd;eoNDn21dxO`BSSpkSCDIa8Cgfd3;$qT7r?X!=sB>n!Ttz}r(;DlVwAydt{vVF)ze z%%W%5Ak_C4gQDP&%W^4p^&E$9{nIrY3Xn?(i1CHEbZgsD0CKI z)1p^$ycG4Z~)6v0a7J`T3I@&y?hS#Yq>pT&2T zrEJBVo4%i4O{V2+Mo1+)q$H$q5=UEm+36ZorhnlSTt3a7VrU+!Z0?fPt_TZred;~6 z7{1(?$HP;Cw7*rPSP4PDd|;Ku8@SovUpO_VYT+1roF@G%JH3YOQMPrWc!@l15i2V% zl5u4lOG9%ZzxV(U9c8so-tmQ4HbTJq?b7?_r;5~*Y zNAt|mPH1fa_9e3;@Yy*f+!VfmFn<{>5o~IT){e9a&?EtBX8b?p{^ZNd-j*2rWPM zu!ez$F>#^oJvd}<%i?}HbiSm{m*rQ08(VSQ5lsD~PSM7O1MKDy=aIib!oDw)Q$1K& zSWZ@3FRj=as#*K?&1)Q|l_0|jj}KgMfz_bS#+E({oXla zkK@#ObQIE!0@8+gUoYEKPwwtxA@*g*?HBdUps?_Q_IVjU$aw~meZ5}t)Icfcq@D}L zZM|m@YG3a6?E*+W*wPetpTyGC)U7M3nQQHV7f5lI@h43Wuaw=P^dAy>Qse3u)`wOY zTeJCzE<>eDAel%qB`t7U=oRK$2w|egJZ#Z!cw^e-g+p`FM$&nT_8N-c>HCg%CVfLy zA;pcZf$26@T_~*zSdG60IoKOe(@7Mrfq#gUR)pV4bWMkjNuFOl!aM?l*Rj4%=Rt!$ppS|0w@u!w4WnQPs)$3Ul_GY zQVN=If}}srB?5pmvtDCdgtl7Tp%aNgvRyX7e(oTlo$!6|XAY3I!H0NBPzy5rMO4jaBGzuj33 zvJvPOzz$W@wE>mk5jP+|#-#`W;k8zyHyT4JAP^VaBe)gt=kqC{*|<66TZD9VHU}`QQ)B_+T#k2p%rv>u#_7M_R zDHp>2mDGy2ZoEO3(!p~>)E&VX)Wu79YqydAJ<)9gOvKEYpZWv9z#Y#=-wU7={9D-b zwWrqV%Oc#|k(#L*y0c+WK@R9dvF{m~*Nkw-@U1+7B0GW&OAdAj|okCzC0{dwm*roHcHxqy>Rt_ktf>nyiN=+*G*;CAPA))GDtdmbDq}6f&wtX;FTHmDs%~f(gl(vRgcxmf zLlD0ISHGmn8Vx?kymr(^ERKWfA~yac_ov@y+Y6<+LsC$3#^C<4u`-4ub409Yx#jTG zc+GBC^n7~A?C_JQyu&{%|Ge+D2;hsGu&byd-i}|X5fl_EsAlDO_b^YD+4gkMjhmfsd$22@_6su=(AqyebWuHDI!i$YGDwXw z3mB~*4R*SPs-I^^GDGBg`b0AVdSAzlhxb-CQ_AZ8ldQ{YO7A%=ACX-n$LfOnY#@bb zkdUHF#oq?{7QDlHKNqB7P+4VSsPQT&BxDSi^ZZKSQ(gd;B*v+C;Ck4Xi-?vtdI&F? z&x!K6bvxvIj1}AT-pHMkGi*ChYW%uPF4*ZnyyhFK134dFZ&?LgPr`;&M5iKEKUGa z0&MN@jXA$_B8uGymF*ddc7wIYJkuZm-)ejo76mH2e-q?$?~AN{Nqde0!FFq&!4^LP z4#u%Bx{;tZIsia%d|(*PXp#$X7*IxV)s@E`vR~(aB6S8}b>)L2UQGw&O`i6AruT?_ zO@HPrx33>Cn!@cy^=|NiMs%EyARN-kL|o0YcU=h9E@{^#3ZG8?E1!*$ulg35I+stu zzC=yg2R@y3vbv-Hk`Tw9^7nePoFPHvOo3E29HaonLLMQ>6ciTD>ZYfGtm=0^E6^bu z;zhejfj&Y0E67QqOVaH7-h%pY^&-U|N^XB* z_mPD0>Q+6Tpp{k*(Pb3}J z*ep+Sw0_$xYaOf{63KL*f+Cn^tK$PKDtDU9B2hTn7uWQdz4|r7#b0jozfY?bwT%p8412Yw)W~@e^$&sJpeYNwsJ=a$V`0Ey0)l)G1b*nfn2_8?231!}?NR7@xe{py>=MfhP+MU+s04+@(^L@LX`C>Pr;ZW^0?2Ek-PYT|}i z!o>PFJxFb@AR6OZRyA>~WU?3kx{{3?qo|-2`mW9E6q+cZPpCeI*QeaWAs$}9@Qrs9 z06eRDoZpeC*-=i06L8YC{-fBWx%|4rx= z>FD&zSOsyc6W(2k$e{$VPq2{?&Rj42WD?Yz)AZHBC6+_Y=qUnL(~(OfIy}29yCHg- zG~jXihHioPjy4kcrte1g-ERgX$X+l-VTCHL zB!5N&UiJsICaej9<($E#)IWCyX|Le~?PIyu#OWqtmm$Fw+(&PQc;Id1}NbnW$Hx1YT;dP6g~3szR!3Fj<{uOXrC%} zD(Z#cmxL9efu+GClnmI}vwBr%t858c%dHEZFC@>W0|+FHvf+eu24Ey8R^3_@ot@#E zJ;6^bXL!E9kg?Xys^Si6MyO}5ikpMn=JVk>KYI#RsLM@s^kA5L>fT}KvV}x&)^Nve1Gi;_As2`-Tt$0{u{5Dy;WiRcxhuPwuWh!ewc4C0%}HM{1vuGZJ%2E z`k@koa*?C$)*H(!=x7`Za*3J6+EU9w3tC;t0@4@cAnc242p;^D{?xPB}>Ab!lE#Gh( zP@Rx|SK^+J=x;&z5;g**-~SH%)B+?6(=`rsGG~vGz#I}_tXl7h$)Jbj93j5LCJ&7T zv1OqRV6nGFLvt)A-m=jGYfSn*gP~8rG8+5$n`Oi$$;kqGN+rViMFX5bShjqB71fJg z4{{!Y!Gv%pc&8lcNX8o*2_>)l{N(Op|7M{6sI(PB1GKYYAZuU((6OB2=WuN zZJKaHFk;1+dwUqBfs&7!N>nNZD9v5Ne@(Mnl!J?%tqxdoYQt^|`!~uJyZ6vuSR(N; zcJAF_m@jUcBDOaVEui=NWh{-*74N!ag^DFm@hd5F)XIN@R?=4YB+R_Qlvc&7Q1Hud zQVvYpwyz`L2@bvzVskBZqNjZQ6K^T5l!}9gTElol8BuKl4f)F#4bk55p5nn-c0Wn| z&HtSi;t@ncoOsb3nogS~HN4j4+3=BbBCt^F>}+@#+>Q8V3UALV ziRN5ilIf#I3z&1Onie-)EPTHi4NYHY5Sz42V^7|xAG|*dGhn#C?}!um7r4x^6z~9h zSN=5nE`sSiK+Eux(Y?vs*?vQwGQM^4R>d#+fZ|DJmsNPi(So?$B7yY_plU4A)OSqk zMbW2A)mVe!1m*bmvC`0s73v^oA6QGkA4AjMoMg|2p~4OmvEgYyNsdE2b2|gd>mrFH zHmTttGO4Rju?1pcHq1wnK~wl|icklYLMnRt>Z3Jtpk^dYWP7qr)8D9#_1_2`O&mU5 zj|B&Vp2C|xfvo&XFutmX)s$et03LE>oh{(8hC=j@zL}lO9bfSgp1bb|{h8qBaFJV> z&B%`yH|D=^-uA0apiPy+fv_CI$6VLn%AXGVn`b_yA(%sAmhhwN(UBnvAFSY+b_@*z zqnE81&tWpKO3ngcjMBgD(5E*Z1r#67g89^!mWDQ_` z3pA{+y%HTiiSP|yqU`3`0RdL!zLdV&Gtb6^{o$55YfN`={ED zC@A>k&>t#FLJ~C!3ip8GV;N1iq^%jxq!-Q&H+!*3!nrJtldla_n1rKPqziRHS@w02hfv zi$*(rI!+~m^9H>f{kYFNFM@?5=)^A>(T1o)d_;(chBu+=`f($sLc@DP!))M6r(j^rm(+B)=J?|TMf6x@VgvtO>HA_ zlKaY{*clj(t23U8onn1i!!#OOWh?fHqQK=`7ihF4L5+v-o)=PwWq2=Tobm!jD|9dHH&k|_5iE+V-{@qH&(UQlt?$^0p8bRqt)dpANg>LF0oEc;EbXt0$xO>4 zUUs53pq*l>;nR{ka)VQN0@~baa$iQC!8ouEui(1y5yaVFE@UoDZ%x!d{ z5J(zW%nUt^P-U>DrB-zAYH*=8;O}zTEB`yRTEv8$oo3yZO;;m8y|TtIwDWqnEem0t zSWII2tGGq~X*QljS~zA3idc*OC-C9*SPh*`=cWm@q202_=+#SVIJ~eP@yEzxiSGxcY0cAz^TG1 zVbW45M$+dg4piV#GyMn1Un%n0ddS?1g8PX5AUO5j{$hh9h7-03_^rEXDa%++PsX>Ts1yWOW448 zY8vY4LN&gnVk6d}68yp;#1ZgG-p+8IeaZ4^UmN_I(15X(t%;Se^f{V9g;o6?K0n z*X@Oa{D>Y-8lubr0Jr>M^ndQ*HXZPruK&PX9=u@Q(z1NBLlUaD;e1m^WHA}wD2xBw z^SR${`leu&(*9*gS1edeaHQOQUrkA`C4EE0_bA&8+^yI_z}#LSTGbc0R1G6rY2 zFMzOLt_p`Az}X#Z`t_>PT-m5C2JLwOgS}2TjQIOmWAS^;r02rZn!R&qc{HHsc+76Z zArsRRq=$9LfMVG3XCGXbdCx0=*5EeI#D}XD%b=&`WqH|64({k}$vVO@a zd~iCV6e;09diZcc?`frO{|RgLz2U#N$-1Kal+V!K3QyUBOVS{tm(2jNiZz3u85hl! zh3cx4@VJ_pZUL~GdXjgmr;ue6aqg8m{lqBS&414?9B>#h$*hFxRid4n&VSw4T1(}$L;JF-}(w&@)B+FscD{+Ja zY2RN;wc1ckGLnp=^!38f4A76mtpWj17^+(;V{UQiYjgF=w7H-^z$hwaS@`;M(FWzB zor@yQNP8e=-+!=5Zn_X6Puf34_J`&@xI{B;`)+$HP0-!6aFno6BS{DrlQ>qjM%pHu2?Y40r|DodO)v5PSh~KW4wG zL3al>ZX`KBBwUJ~R@l#DH3LA?+;-OQ@ zuL0f}Pv44KzvCH01chc!^3jIslvy~wPVP50C5z52&xaR}q zs?IXdR@_NcC#+fA3x7@Beg&hyKz~`O2D?%zXJL99rqPRVRV09mG9!*De>x{I1jdU* zQywbu{lzm+jDVWVWkD0|+!aK-K?wvgSDanqduc zEiAay^ycY&R6+DOm$(Xc&=!Fb^wUQ_8Y+T*m8TK^goJ>_EMJ1}!Ng-@wDTX`*3h2E zpWY^O0X1v9tqj?0SjPpPB9@^dWq+j5Pl#d96u;BdRlpmGIS6tg031!3@iozmc*Fw6z;)1bSyK33@6Kcl0@noo-|)>Y{--=`j;Jhz3zk05}tb5{^Qwf z6RGL0{xJbEXQ>D>>57OBWnca@rHIV<&xFgc0YrGo{?yuuPFdn{%b9(d&D_XLA!0 zil4PaD8%#aXV0WGIi0En8aU%BoFM5n%wWA+qfZ+ldlBTzfc7?PIgyG3yOxgnsyDaK zvLR2T)}LoNS#)!^S!6%gcKeV;vBc`lct`aHyINU!Ws4=HX5aFmPtMAZ`y4_H=yY&b zqZ7CR?juT7-SZGzxB9104L{{jE+?Kl-=Xn3CfQDU zG&Jg@2wR$D7?(UO_g2|RZh4kjzmMtvywN(Y#RRU}@-l3HaC2(sPtVcO^yv)06UPp% zt7t@2$;0m(ubN!sz7HjhSGjf=M#XI%Ic>&<;Yw_2uO`Qj3S^=U$1p}VCE*~CrL@Yd zVn+8bwC0SAj~>a@X8Kq8Y+*gom#iJJr1$*uHG&v9VW3JU?rb!<$>AC5+nBQ-T%_mJ z0WXhXx|N%P=nCTD`J-y~;@$d~%~Ge|*pECPM~JTA-z{1MD}#UJbT2)@TTs62<2;LG zjZQL|kL2M|XnFMs_F&^WUX!qZK*C$DSw+ZZ{P%gqom^dgDvvkcv&opQa+0Qv@;pvFVHz_ zY`ZJCJQvm)Tt0OS7f-bp3agl(&kWt;w2o~FW>w5ew=!Y{+bn@RjW)fl5YKw1kLe{4 ztW-sfuO1d1U6BF4JH~b!k6sCCc-Zs|>iCo_4Qx`dDc;vMGut9qg_*F~p*x`veX62t zVImz)^||VZ3M@(B+h-dsDC&A`2DF294JFOwPMXqm^gRRpPH{ok4%&RIfSs}H_czZP z80vt^MTpfC&Yviz7ZlJ9?W9nOWsY$bF2}Q{cD;K6ozGFalcq8K;+|Agbj4zfxNUZQ zx^?c_PJ^{d3@7-0Uo?4=>`*2aKEFchI(;C4l`6rqR?eBK-_pft_~(OUn}Uwq0+EaJ z_YJqzWLNDf^O>Rn^Yf(9#NZ50k<r=|a zb>V|58{Z6bOKy^o#i^45js5&t*-GawAp^Zet1%0cGYw84_725I(`8!W<-9^l^{Ko( zh80tu@u!>496Q~{9~oi>1O!GVgYfmgS$Gv7vos$1mTs#jRA+-D>FDE;II= zp+mSX6g~E{#b%o$%@iZ@5CeK5IPA8@+!i)JB^=)C=5GCRti)>qkFw9#eoTd}0LUvr z!#P=R&HQE(-S0}wC){Ow@~2JMlT04U2|lwPDxn`s3>l)O}Rabqs0` zw82UXKE3HcQp+DaHj#b<&t`j-9apQ|HS2t94SJ6UBW9+rH)3oqFMoaHK} zQ?@d6lZmf^w@TvG>^Oojetz50`g`g?tXAW%{`@X#=QCu~FZUrcg2LJmuof|B-8@-q zb_cqC$Cf>(A#@msXue;w$oDZ((I%J7HP5(pLq9PK&oSehF+1^070Z^l z!h%HiibWyytx5iDR$B$u)v_RC%B&e+b^U@g+5!@eh5MSKMg^?$twl2|nxfuoSyT3C zCsib?DLZc!eW8e!ccg@nbw@U`(j+4(GL(-JX@uO|S~onwgFqHkXvA)I>+CnG-R^`O zjA+GE=WO#nP=gO2w{;^1ypE$zyX7lFf%;c(WU^8*G3TF<7!5CH&^Rw6{ijvPHjDwA z1X8pX%7k1;FLCa3+6K6USO!h0E3VJWr(3BP7yF0~J6YfS5Xw3eda!9a91V`1A%{QX zSpq1Ll}UlopFP+^>Vq7{R6k{zu`$@>&vi$m7k7w=utPTI)J}Q&$oZa4*+>uRF5O;Ypv**CK43#sI-g;o|-<{z3=w z``XEqD5*hpc*{BD=d=t~k7IM_4JkcRfSL+!pX1i?)OTeAURl(WKP%T;N}o;0s(`?I zRvgA8(D}~&1UMSX+~wtn;Uf~eKk z`t{NdlNT8e-cLbQ$+j2hAI)s8HP}_cFLe`TwSXFqR?|h7zb8Oijhq?oq&g);_Z8t) zyodZUAh)|COpqxu#=lUvQhxT`*X|VVhOB#$jaMDrjaMm2ALjDNb8<#hMZ)@~0bqF_ zn27O`vi6k}kg1*vqx1bvtc|lA65>C&`+2tSM>W-cq%|`g3REr( zdm8bkqT6iu>C?@EoH>cm(o%bdlfb~_X^+LF0?d}t0xy?ab9HAfwuuf(y|Mau?~Z8d z9e%sf>+jjWlyZzEV(1L=_S`w14r|&mIxD@!5fH_RZ4e17JTn5)a5{+Fw!BQ@1~}`u z_-W4}RviXm!n^M-e&6Au3iCJ=JVG&Z_&U(f6PNn2^*O3OPsYNRp}*tGl^@cT2^9d| zA4(ih@HD=*&t3BcPNRL@Ml8nV)#nwi73*k=Wz*9}S#@9&53rJyF3_hAYUFOHe0?r> zH^&>_w=wwK9i%Li%sDbg2=gxW`-(rvScW92ls$z=;<4ViO=f*MrwQ8(B7QYx;5Bvi zY4FeKcWph8`H?#+D>keuVN}JV;&tQWwQgLj6C4z)DZZ`QL8=h5V?{l&cS3i0T_Nmp zXHlnzv)tslWXIT6?(eNJ%2TK57rDC)SEH%Ams22$ z)Q?^CgBuf=)vUt`hd2+2@n))zk_Sbmq;$)N;VOKi*T8C3j@2sHhXcQQHtPDseyW;- zrXuA9A>K;a>T1T?JKLH?^TwNoiEj!4aSl`Z-_Se&K~`#Na&aXg;x6lmAL?2|C*2y= z*QM%;RgplT1&wwwwHxv}fWJo`jqnZ~wHMW0F+k>f$I$z|;h1Ff;HNtfW2M@{r>(ua zYkkc>7rQ&0JZ%uzx<52#ms z=ZH(@s|p+Zs1{&)wPJ1c1yb5<7PCL3xPWL!kMHy{(~#T+Sgd@%=p^@T8m^QRX=L&N z*zu!!rA@TB3OH{-aNg}#iptJ5-!t%8i0CI8X0X`sx;VMX>TK_dg6S?!O);p7V z!UZ}5{LcNH6Y*%&_q_A1q>FMN(+#DLhYbp2I%c~}|FF&^etHOORI80^K#Jt%HM;-j zV^yT%BPx9X#;nNSqrWRs(jhPEwCo`cDH5JQv z5ZzDas2Si^5mrcRW}eG;y?7KKDT4oEV?E-h35Nd|P-_7ez72&qkM3 zNylL%t@ZNi?ZBYg5I49_-8F3JBKful_N0z-Q<3-m;YaC3u1n*j1(Um8I@p`9ox13R zbDWj7N=sdyC(t)~4i;QkWM4^MubzJWmVI$$ZC8GN!Z+2Pxh8COS%ZYliVpX*rJRkF z(QvafIYdOO;9HNA& zkZ}Yeu(+3x2e!ki@H1>T+_LD9x(9)+EQwZv8re9GH3~F2z65etWw%w z;bhvx#l*;X+G3+|wW7vdqrN4@h_hxa`MxC|$o1O2imj)0n{Ut`{D$PZIqXo1zWBz) zdC(HDUEnZ*$&%MZm5M-8YFNvoXXJQce+hdG3hRNums?r)0%7R=ESYp>+GmVDw71h7 z;VLDX*~~=4yZRwgr5$->T=^9UV*48!Wf1Pst}n3;iHE2pO0Z_AeYGE8MVD0uDMVmM zY;+z~igvtHLhcCz%2;TP(BRpv2-*MmwzaRWJI~;t_3iR#wd#hBZF95%}U;t;kMS=DrwvBZ;T3{a>wHg&VJ zr8X2@G3Uvbwyet9gk%XG9duf$dDFYl(CQuf-QZTy^E%dmHP{FULua|oJ zwo9PcqOGmMt(Di2#fKdQH@4P|PQM%!b1ZdC;&?C=Xth%m6$fqW899^moV7Y1$DIsb zDU<`BMP4O49#)=6hvig(1Ys71Rc)h1O2p`8okdBsVE4=xEV{>o^_K7nLk;li)OkT$ zHgIm}Eu41r9c7iNvRP$de?Gll!cl}xR+ghyLdLNV_=az-hupU6!~0hvvbD$rhFCJ%Jz)A$7&Il6Pcg*vhj~ zV_hW_5yl?b?7hw(;4HsV;J;UH5;FYTaOyP`)z-J{S|QC1QN-AJ>d5ASvCfF0&e^+X z&RKW6>+760N;l>l#u5pQTg_g2{c>W47c&O!bJGIjfzAXL1Iht7wP# zV!yWgt&qq#dehb#N{&Xk2YfGq4~aR%$f!@>Fc2>O^nLd2*;Hjyi@txvQx~m1;CWU@ z)NHJ@OSYu&WWLdvWKz66xU0TZ3gYlFL!tYJhjPWWnIlan5}ap9o(XC@(e6iy-?^VR zH2G}im0JbNfpoE1KSg|_UNBOH+~oml%lGkPhJZAnl`uTOOrbXYfQ2dmOpBrc&s8@O_{Fb*?I4_ioNngOLoMAnkDcVRjCNg9s9eyw!MGVv1KGyV;Nwc5 zVoz5xX6Qg(6`^vlp(CeXbal4XlzEv@eazsH@*$@zC0!G|E(c~JA#!0kx;T@fw~Mp( z9Z=8+6)JP=7ogZ7mE>TTyBxUJOmh-=?wfh1e*+}(*9_Mpg_8*fAR?m%Wr$uWGn9uT zc@=K#7amme?2dvvcS}HypNk#=I~j>=if~E*EQ2<`Qwt>{jyko?XpOov@5s(bif(Rw zv6%BAemK&x^D3cR?u6U;cmB~8EGO$O;*LtSz=C|8wcM}MpTRoVZX+MT@b z(V1T7!y;{cs`5?Vo=Ze_Ou(5?K8WF4^`)2=`G)&{IF7B_ftExXI<=D#dZ(0SpoeD&*dp|O8Y60plAeK)N*yoAI|8x&(e6(b|NHb$Ji zFSBj6u{*a*A!qu&4v1SfTJ;hKt^X#$lteiGOCbNu9#(J!xhIrKBGF%rq&PGScw&Xdp`z{{tm>P`ZNbd%_PnYt54%PK_Mm|J`(T1r;F) z?mT+*{SR$v-G9RWp<-82I=qTG9wq<^Z%*PrJdq0FectgP)o+LBx^jL4aL%5S;4U=I zXgo?L{g$Kuz=9v*K6oYFE!PVm*>U35lF&-J=x63av+>;Ta(#Av9=dpy45B0SJfSdglOlQS8d>`yYi!5+C0& zkjq05)wRO10mlLxRpqlwYyP7+$Qi}vj_+O=o0U4-A6kD>h3uY`2$*X^HfqVgx+)q@ z>#<5zS4;VXhHR+MU{<|g*>)<8$0N5lH3p1X$(|Z7-rtXOVLxxpdJofKINA`p-FTn| z!jkIcu*4dx`5$US?2?K9rBaSa-p zvk8Z%S)980plDeFB5C!v!V`L`>L4o%84d}5^HxR`<1l%gSU(gh*MVq}Wq%Y70$9;G zKl6P;Y1$(+DAFF3e*$udvNR9d(RB`xY{V%Fo7!AY&HzkkM1`uZUI7k3VY(tAAytdZ z&?FseU6gCD;?q>;o9=>Msj2_WSAx32mkw7}jot@u zAd!n)enFUTYJ61RKQ~Sh`PNqW%heZ20IcA6OQrqTBNztxR*B8^ceB|w>@Rd6Vzp2D zmOzTJYHE>+!w3+Gk4D#8cx(a`j(N&~=#KM4C7&bdQKE9F)CcxiP(%cV)`|^|mV?;! z(eW>4A{xp_NI*<@ACY;CH#$8}ln7XJRpsokPa!#SDa!$-{A`Vv;Zywv>@`CyC#Y+y zgRm=PIClbWaC^}j7srK{MSa%HfYKF`KI8zfw0M>fWE?haudy5Ggt zsn0AQ2LO}@Kvqt$;-0x?u?<4W-+h$a%GQ~|zg^zFo||PI8GBR!xLRY=%1zL)P8P$f z`*Y!p_=!(O(ev51uOd_q41RfGpX>R<9P6vRcBe=4MQk9B=l3Z)uHHk0X9H>VPh zu`5?U1@9-N(mpIKuK8R+w)n<(ubS6mmxIJu(lLm99~5k!fXv>@Zs>=(Gu?mX7L%&g zDTeVBlP0IqJ=qsEUUNca4S$nm>gbRW>Av5*o}dwpmN(O-4~p(Ewr$KJ zrOQ$a*O*mDU8rxSx&QLPm02IW5e;gI)eKun8xmh~xK=P zb{}`nf~K3{N*W2YzsGcE1?@Z%(Ny}k38y)DE&rv#bx2Rz_s*%!XxHCf-;^D0>4~jQ ztu?Z*OeYvznQ8do#8e5jf$j6ZPMt!6XK5DvTGzeSQzzm+oMyO+q*=4gNQ##$ITAGoE z+UhG;pC0@Y&7AWm`ss$;@N-$~{o7cV1&wvwRw-3&&@O+#vx`krZvZ{e6Dh_Xd!qP? zG|*OBzwHb4XtTD95ma!SwO+OmAN|BVFe-zXBFbWEq@Ng&Etn<_jt%`)`t*E7|DweVXsfb?9q0^bIqyE@Wn6! z2+<{j-5LQ1SmPPaDe+x`RT6J$>{G(sE3R%Wu-wUB2B*qMu^iUdBO_vbvn_S^jvzE7 z@cNjopWKaioyH;qlg?kZ_;(Qxgoh*px#>)Mv#2R=8u}yr>G+m2NDdsTBN;c$7c1F7kla*f`bB6KlCF?^4o3nKbi3x@w@ zf`$X&`^{bC8(si)IU@i;=fEXW1SjG{bGzTV4AIr&j#n*I6wpS~FNM)aR8Mb02wkO7 zOl~?%ym+6n7f!Ux)6Z~f((Mcx6knG1?44Ha$J3~E{UPwQIXVPQ%I2bkTMAq7}| zvhd___;3BE8eqf-)dx#2kO{JP69=R+E0@~S0h=5QfIM`>GW8%30g47+lvpy;-ufY- zZ8V*dIHTn&j8ms>$mr#Xc%cwr`H;FYad(VS9)4g&FwS578+rO^jnc~=-YmN0EB(<3 z^Cg;ZcpYQ}Zxy)uIr_0L^036q;vi8}(Jj#ITJ$~wRPp+z5ZZ|SYJA)bHGnFoG=q`z zzcDPM0xT7l_6W>xU%)zB8ZYMV$OmxX^$ClN%QS^6Q{KIDIFZcFw`+v|zq@C3q`%P2 zfsrxITd%?Gr!C+8_C#LRf-{nPq%u$H6Bq?4lVk~zxI=yi@WOvHVB?e;4DqI6^ z7JxMI=W?Z$spPcEBI!q+MYly!!7>Q{JBAkkRF_O9W{T92TU7l%ICksjX`HaeW36*w zaBh7&!4;Uo#h@uNjna|+I3H820p~>x|A+qZvB<8OfA?{?n^H#ui~xyygRY5&?&th( zD3LLekfr?%_(7HkBSPXzVTWNhqSupKvjTjw*694XT)1R}I4!Xb!AvZ{{JnXe%~UWD z6NSG+9@6`cLWc5!Z`5l40~oE;^|M2t@pGrxJ52yb`On#9M%PvN#JWb^x;l8V4p(f@ z0#Oyv@%f;+7WqG^7!&Jn{0k~L{tjj~IB)oU_v1w=!)et3MgV3t%iIn#WNd%AvB=Hd zv;|tn9R`evVSQ+0s2OlMlOo`PIOZCZNxFS;E+4Mw;02;eNL--G;haCGqxw69dA8#0 z|5?fx&5Oj2BcKhQQqzdQF(Q$%2f**kGoUE>dBT_Z$6);g$bmC1K8osy`IvHu;CVA6 zrXSRGC69l>Q|Ki8KKt2`g=~OA&&TB4J## zGgUvW*F{;mH2sbnoYH2C8N$1HZcrFtHK2gFpH{iI2diI>B0~4@lEhCUOu|mWr=|Qm zWm=E<2=x)sCEi6;8@N;|jD?zXWvR7|-;EjE8zsSjZimt*EE%&%_+j54Rl?VsQUv!@ z;ODE2Hc~x%t14=+CqEpAXVa^2Z%=$4^DR@K64v3bQYm|`pZECN&~8arwWgZ!j{?=- zmqY(SG~R!DaDpLBcrLN)6XMK)iFLRzKq^9uM5CQNMGl`{{#2&F5W1emJ|(DZSBi)V zm&BUIyvD3Y_ZmMH+#APxi#luT^%m`V{Ini#`UacNg?oX)@=p(#_(oBbS9x3{Yvlib Dgg^(d literal 0 HcmV?d00001 diff --git a/images/documentation lighter.png b/images/documentation lighter.png new file mode 100644 index 0000000000000000000000000000000000000000..0545ee331d0794a3dcfdc52f3aac7b15ea82da8a GIT binary patch literal 11777 zcmYLvby$_n^EU?$T`D3S(jgtvWx%0Jkw#Jk>26Tu0VxrrOOWnv5EPJZq(SP?-SzH` z-{1BA!3FHSyE8jGJM)5IN z?TCVcO9cJEP!f_zQBXL_5l^Kx-IBIuypvu!C*SPFK4qhS+5AyjDPFzwHNSwf-aRv( zE*gI0e2RN@?&WDqHQ$#0bAK;#f41V$*SK^RxUrm^S@4Gq^LxQj??YVaDb@w8bGZ-s zVx&2?PCDXoG2`Ml^v0e0oQo325B6%PTY75G?20y$4w{-)J)10I>mIhLA#>P7TM~ zZZBY0em7`d1&fvJyJ~491DJorZ@$Y%W%AErRH5#m`NC$=4R+kFB-8N!z#As_CI|K= zlWx?BD`ycTX`!1Kl&F+Alz4cXZd6#CpN;gR7+7`c52WdJ%rFk#!3^MIt_>-eDX@uy7OtW~^n*1y3eKR9!Wu-K&oL@@7~wGuDQcKePV}c;NvX_s7Oo zdlYF}n#sM@AMhDf21Jy@C`HbeTiDv^#08vVGv69Rje z+$R;iN#0I)&CgWfGXtV)vb!Zh)Ifz%SKg8@tz(g|?i$tWgXI_i~Il%tt>ZHa^=$M^01WK0>*MPlqn(MKQ$0W?=CS zr25rkm3?+FlIrg(YSD%-{&)T0l>En&58!NFqc$zgcT6)X;o6qtPubbjF%EmY@J^Y2 zDBU~wbM_Dg?2f}bBo@$fs1$(rJrWPO;DvL_Ie;^b;zc+37HkmG#*9^_!EQ$+ouy8W zahS4&IgL|*#su?Z(g|ZgsM}8x)kM;2%*aQUj^Lwtj2|?%@12!ur&fH8~JPOo@sg6%RZPltsTv-!u(r zeq}(g(beBIt*V5R6H=en$pkHkb}_s_Q%Qvxig z58>9_cZu{>b4xyVK3VGSteB1>>`E>GwnnIQ zOM*D;iGK%HyAl`9uX_VQux1R2&rS~kOKc(+cT9gTMsYHP8e<`uVa;%fsDB@HXwbi+ z87kIAI^l??MPPhI@s7GZ#4}sMd5Hjw`G6Oluq!8{yCqit$eL~{51?F*X5y|Xr(+zJ zwj>L+*8)DjRkQEXN&UI zcR(xpL&QDPp(}8hH9rGx3K>AkteBQO{0knk9#!(Y6elCN9t3>iFq^^6&@>8mWp3ma zun`MZ*6~u-Fy_WE6Ay5~m>4Q-z@@MzJ7V!#%`)B{XhxZiZ1G?O>?nc<$TP>@>ncSf z#od=lNC&o4PrtZRawn&6RsMi%z?B|pXBgArEDt`^=Ain>M4BDYrNXWrq;W=Ue1XKF zDobrk-m4`giA$QA?F{H3)F@OUElpx0{0z{%BndabV~P6fgv-sc9rKfFz?HFvwZgXN z-)I%)52cLKW7!DNG+zoKx)o>WIA;!U>Gm+CB$zkL0O{gB#3q9t-ld5iZUlSU4y>nR zuMdoAdPu-~=sO!ee_{t~|43$;0f{3JQ>k%vbwBS8L||}+zODr$>JMupX)m6$tph!r zC)k{X9>LJb!+_0;1r`!=XQ^hm(4)U*(D4G-x#x#iNJ!13B(Z8lT9=`t{4&Ckeh~gN z8g}*E=maa48H^FXpTRD(WB7)Hydfba9xMZ=%QWT-p(R><6yi6Vh}Fyk9pc$~J$yol zSq*-OF&~Nbl^$AH{@(wuBw$jdQ81Dr5# zTe>%`&`rwsG{Y`TMIAX|S2$^OL1&OW)JHID#HQlQ@sSrWqB8wlFirC>6AD^cJkOM$ z1o|J2Zui@1)f9GSf_&yg1Cr>W-R5S1FLg39%+1s_#R>S~Cx1cXY5vIL$N{ct=l zK9UCQT3|Q^I)K@_AvUkmn|O>vj5OPi2hh-R9v2ybQVK$9jyNt(O&E+sang1UrJLRx z=!k0b+km!@pTrd6*!&p}KUCl?eg;PF4UoFDmcYEZ?cTUR9tdH!qZ*)?{PK^VkInJz z0G(Ui8=cpv=P4~(j``3MHcu0*GUtd^_~EeKn`c0BtXkgw!^2AVnbK0fcyfRlQCOb~ zhtUt8_-AC~{#)UkN#@06xm^^qrIX>#S)sXkNVg z4$#p4F~UgBwRw{@Eh4-LGQo%zt-6b6t}t(s`DnMdkk)q1N)9VMj@?E*zuS`iH}5&b zn`0_N=d|b=XId($;VmmjCP!D20hN$a$nYWT9JC}i+0u5Nar@m8KBa_hHZBs@;~)() zgY3z`tuQ<@CJESVOfEt_|L+v?BvJ6oIV#_Q7g9Ge)Ht{Qx4B;a2!~yv%oCmc?|2vY zOn2Y8;2}F!ivO`1*>{6%Y~#X5g2V2t%fhwJ46jpP>Wmet)R438grjZCUzv3H`RMsT z6qfURUohemwe#fpEV)S9y1%CS^nmO21R5cuW_+~klt9#u`eRew=EEDbiovV4PtlkW zlKc6cu@hre@k;OBH5^YqH}KvP&@p(mDyo?Es-fj^_gnYb!qlmLCRlG|b zUx)V$Cl?#ZQ481Wi?RJf&tUoI&z{k&hl@nDBZ##3=GDo$-lkQV8x5I(+mL$K+oHNa zX*8{E_^FmW*T{N`RRr_!2@%Uv^t5l^2u(UdSNdwJ2i?@ruJYNw0Bft+x-Oe?$78ae z(!nk?Mh@Lf_rVV30HH$K9pboBljn~8t!JU)M@yLspBaLEuB`dhC;qUeoxmiL6*<`W ziHW`6B=LXN=F6T+;6#K>C)zd1^-GZNxho6mxK z(R0pXbbTeW`*UW}XauS9R$2mewszI!kFsV~p6#uQ{m-((u9p+(S>DIBtK~XDnjbvC zMgqBhHaCvWxpf{W<0@8v_gR%bIw77FyQt!WGi}=^d?gu+@s^746s;`K?H@Ahb|t^@ zlHJ^wph-fq)1q3<#&&LZRl)QTuSlWpK0-Y>&ST37T-^B9&eT-3 z!&!7z*-cqynIpH?z^~;rIIh~q2OI{ip^4^!DNR5}oMov2%j6i;Zc@nfLJ@_)!voH^ zr61~1_k;O5QdJh8;d|Miu2$ER9WD^F1L~apkJ)6=7T){$7zG5fm{UA{XAZRLou|i} zn>_hGA@3e#%%4txCGJ8?9DEYNGW#!&x8$N69xrBT_taQE0M|t3)^hEVtyxQfMHBkh zK?>~BFK9E_130oD6UKj})4-77o}Z67!2McJF$Th!?MjLGtLm~$JLsCIo(#I5q9SqD zN%Sv!LqC?1zmSYNzuRU~pcuuF`)CVxxPh+8tREaz`PP}wB6(QKYUAU;L8#yX& zm9y+G#znx8ToA)-o2MRl^JXLJSXrdw>|LkO%!V*81`47I>F3RyA z6T-anqbb2jMcpZL!OF@~v+f6D&8PP`xC*8OMU>RRZ7s(2Q}xbzZv8UwJhF%g!O6>i zkE;Q$`0R<5mGr1?&s!@Db;9 z3~H(3DIJ%%!t=cxX8%m1i=(ve4Yr)u_P6&+sVp}Z?Qr^am9ymS1w~%jXt50X!Kjdh zs2DaCA@C41?@gk9+i**54rMZo*Ivf6gpHRAC>c`8@$ z6r!NYoYv%dYU}P|keMDA+~o4w}C=*M-MuZ zs$aNdExryC4uAeI(fiSqiLA=b>$sbp!498?`Dfn-W?Z^f?F$C_cz%`8u&Uh*Qk=RC zy~x?^X;>EAPV&^9T!Wr8uyooHpCnA}s8*FoW7T#i)qr&hrxA=^48s4u9O$ffv-7y! z<&Jz{CK%|o6wdYN2q)|P_Dcb$EHLZNVR4@6{$nQd%EpVM{H5tV3&-iw4!hrGTFw__ zRG#aXaa@ z0z84O-^`;HmErB5$ZOWxcb1&yr`5nC2dAV6BN2F7)e{YUaom0<9CXn47IT#QXq~-w zrR(=$v4hs8b?3au${Y{->XyD}&PUG&eHNYbbz45Ofco0Fz@U(&@ymsVa-SuFxd0ZBpZyY`*IDh1H!warz&c@VCKMvxDTW~}P!3@Hy68=B>yX#!hY zRQGqNfl^4r!ICwnkc`y5`slB|bErYGC>!7++6qu@S}w(9+TAI%k)9sJD%Ct^0e z+&#_Vvrbu$raOFR6h!53N9Dp0Q`)OMR&9Aj)KYC9>=hebJaX13m_k1l%=VdB2E71E zioMv^x$%Y~u&B}*EuUJq7dY-;H`jX+o7Fo_o3$2(AH8hsGC9oOy-Rf8bd5OZVlQ5) zNC^p%NU}eV8=z7znf^=%pD#P`=T>_5R)g{?RYR%x>S$%9 z-X=Lozck#|)qMhH@U@U<9*{%==SYc>OEQrSC&7`wfgE3=q)3MIvk?W`* zz+6wBOY`*Xp#^aA$x`i2$k|#eJBU8Z1v04Z8u!V3dbEwN34AS%)&Y@~qe1)6gMM7` zEdXWW1w3sg?MHRT3bV~-JRUsKOB(!BLewwGwe^E9+~e^-D8K{o0>8n?z#(agg(||k z8-_1_$I2#?dYqSZ5MtR(8Uh>eIrLL%)QC%nPHD19S|%LjGhGq?5tqW!s(bUQ#=EZ}wEAR;Jwc`#XUyw7joO!Hg>QVUN;uyxr3}CtutZ)| zzu6~7duDnv`S`d{@1W3%Z&KyKj3XELh9eMgzvXqRFhlG;dS{?TGML}2BWQBDg2gL+ zV!wbVZkjQDOplI-3>p28$i=6@Ny{dck1KndvV4^j%Nu1YH)}CmoN0vjTT!)pFHw^4 z_J($HiNavS_O<=Xh>=-vuE#g?V*|rn0He zGs*gFbZp#Z!&-rxKLS$hfg+rbCAW;2Ys}wm=eU;Q)?hCnY+zq;V(`$f9lth(g z)MTHR6?JFxn;kK-Gj5XL$VrB1N&wSDlE?TsZxIvzP`t;YL=(6JCW_@M8q36L57-uj z43A19%p?`Bc~V71w)i1Hem#>|ZqN~K-kUcBTB#w_||zMI{^zZ6(w)2un3 za-CBhb{?fU*=-|G5}o7vI!j|28y=P@cmPaWw)am*tx86<;=gxKa_+LS)420z2MvBW z>uKC;r}!MMF+Dp=a5z1rE1JWU%@uCjc=U4Yzy&fNK$`pK^YPZ-H@e_^l#*R%Zkqp} z_8NXeB4oBNHp^Xe)(m{F!jgTCf3L4s9B=?j*DesKZC>}@LW=_(F`EB3#;Q7#o8zTM zaT9O0^ZoklRPQ2|fM;y zJb3A7jt0aKG$q z2CqvlxnTl#*Q&A3sY^2X{@c2a#POec!I@qjTa{smSX#ugwFXz3mn}@I8kl=Rfh=3k zi?MIW9d=eRT}(z+wM0)eU*$R+;*{XkRFb!KLDUAHk3^u@Z0vxkbW>;}?8#h{_ZLoS zapc>j464d18>3N^`~%hA!v0q zM#QG^4~dVKyQnHjM5@EHly$(f4RNk^XDEmAKZWol_)Ae#U#l`O*Y$L- zjn4Kn89+Z|w(myUo}`{%6871FNHaGQ8vo3 zwR}QZ4Pb6S7azlk2SQdjtN)A2D~1Eze>wmNoMqswopbb&=P0jR=1Q_IkHfl=XaA&)EW*!__atkl9m~>zZyn;ZP7&0v@faC`uUDhteJ4o3Ab^icAD>IMDoRoWCNL|xs9x3vxKNt6haq(t;ikZgPDIxv zgL~yy6vD((rh_sp*PbMNS9Mvch1Rb*(~mc_5CDZ~kN*KUupL2SdU&Sl2C@JvkkFOK zxfhZ;bzoLj7-fHdeAjbaUMkx}Jz7azqjA3`lpQx~c=xionJ0vMmU_YGrPTPPJ^TD}CxACZNz?X|pb#u1o-A_8zq zveBW5!;J))8$*x0wdx2l^0#CFJtqt!TtF0)C7`s%drB>7&EneE7zWZh8j{9t)!kU0 z*v^_)asd0&FOqM_;;p6x9^@8LNA5M?aP^Gl0N@8OKK&{;h_@W3R&Bsk5Dw*c^uJ>D zIIjH)Lg1Xb@Y?B~Vw`%f!^85hGrfK40!ln27ge5z3BFsUUyPb6Ketm~%)v0MW5;yn|=repQiG@uPY>hyHAZk;Ijvt5r{31xhpl-o-Ur@(am2fn4G| zj$*2kR;9b^j?#4op2#kgh zOwdRI6J>EtRc?&GdS}NaL>(ZX%sW1W1N8@znv@`_qw-++nNv|^leW;>Uhpw2fp4d; zVsSKUgdlGLB<1#YLt+Zt_gMihv5g%L6F)Ky7x`OGmYTWJQ_r`>U)`6|t@PN3d1T+E z;3Pdr%cVB#|8gSGdpIOLxZ}jUZj)H7GouJ)aX6-d`u-NjT|KDpNYp-9(r5r~>yhXD z!*i_{>aI%*5Y(sKn`kqxH!a%kP4*?Zv2)U&-~6gg{sufDDQno#uC$bj3ugKyVdU z0@9*kUzkv%z2t6=LgqdY7yt=CSxsPfs`d31sDc2bFVomf0wB19%2Vi1zE1a2?ny#><3F+HV)nOSdJm*x&MFi=ApZ8f`|Cl>a9{YQ z+;ot5+xq-Nb$j>*%-W+=f@DvMzpVwdYe|3c%lxg)QYUoT^_c%YS2V@YG;@0>ml+ zTE>hH*r{zkhJvSv?tWQ0m6NBddcXefc(@x_yk}K@1SNv;I+`rNd6oecZ8}sjG2W1k z74ci4_k$2{A7&qf-@wTU1iGFVfoTqdzvg=0!u^1Jf4*FVaJg||-7?D`S*CKQiT#Fx zVh{sEae())tmvP$b5fTNuYhUhBR>|@wYU9eWhZ}BajrJjou_vxfb1lc4$jR^jfs8l z4dPP(>%S6dt$%6<5eS{AopRH+V6wS-;VPnc2MNqVM~CGOI0z8ggj5I=HhQx|2s{W+ zp>xD4+p)L02w&XVQ1C0Ku5w#om>F50#K?sL`Vn&>tCgw7wjN}AhN z^=P(!nDD?;=|%8c+qt@THI11zIEnVdk3dM+vt+9nYOOd21*zw*w&BZZwa64tcjQ70 z2-!)SK%6Vrjs>b3`rM`lPqiL_`1g}6jb{?@M@vqNZvdxp?%nUqMwip?Z~zmLkot;Z z&4M7{$`l}KFJIBj-@Kb?`FiYP;I(Ep>#@N>?Y+4dV97Ie8|z_FzcU2c+RzT{p!u8X z_0rKtuWE#j->-n=bv4ytzmPZH6N$${i?q|A!eWu?nUZ?tW%+2QEpN29KxE7M))y_z zQX7;;85W<0JqaX^>`SfHYj0KFVt(DB!u2@+<>PW7NRUT3ID36V`d05;ad}@tu-HA| zV*AGI?}fQUl}0qc&>3aN61%U~Y3fJtC$@R|`%#0I>gjUJfkVd=KPEk%F6Tlc6!Q}Y z`RV#O+ljKkBd(^OkSVcB^%2}F83aCkp@YzDPGz(0f0^ky%LK^WKpBr7RZnf3( zx;kxkfp8aBLT&X0?1Oj(?}t!<3B=?_jlD7;Kk>h`%iNaF^-7{`DX4%rN*p^$jBVA} zx=8d9pEr@6{0pQt1yj(gsIWkls~Xj?-xaFssgu>?3vnGqP}P?gblF1XHuVY5%mYa< zCxRBL^QC&=A-Pa}yKYNy>e?4XyXvbnP6Ec=#rqncKxs|$*rjV$AN}yA=Lyst5LMTC z`yc8xweRh2+83hI$Fs^>u&WpIzyEaK7X7Baz$ZLKXT${#XJ3KJ5F*OS5Y+TI)8=SQ zZugic(Z4FG_q%4;4Lbc-CY+=}X6d@S!mf5&R7}`qq2hwA?K)f}iezv$RIz9N11@30#@CfT^i zew5z9QBb&Mh^aIW2%RZ(f>F(fzxiK@GRcYP12z)ZzuMup-pXcW3zL+8tQO~Y%~T?^ z>HuhE?pMXp>YlJDN@2z&>;BugUcBe&YoN?SPUcXNd8E+7ucK9wR4@&#r?coEzcZk4 ztB86&)50DPwY31^TAlM}GN82U>c3;57Y>$b&iY72et+_tAuapCcH}4M5JI+}U(>s0 z!LFR=gUI@zx_3-I%{^1(0H`jb%%NW={PKdP37@e`yY$Hk!miAWY#tndX^7>eAozqZ z{RG^}&&uDmEJ+KDu{rOc4Bu9@n_#J&Y+#!6s!KpmQmHkXNXt>n80E@siG1+oe4VvL zA7ffG>U31wB}9O=E|8LXH(o&svj+AjX^|I_gZiFDKxk5>9@@G2ysj|k6Nmt@#_v*? z=(r!jkKt#!bgynt)uV$|X0QAW3+ao-^sIyuP|c0=eosrW$`Im5u$$U)b(aHbHeeGx zK_h%xnwEnq0-qzD4YzZL2=lmM)(GVVJ|i3ORr#s{&5H{8!$04gq4O#l+}oUgHoZX> zB)gmxSQF7QY6bz)7rhTQ`^oLLT_N3_P$-rbO_pgLkP_nN`?!4Do3LbVnO8y!xSQ};aZca)5zgKvYwzA!e*8KH9IxB7~Myw>`UK86M<9g zDzs4i>*o4)L}CQrHAIj^Df0k<1^M@*6*K4sK`Pgu-(ojkjUc)AycB$4_UniBGwJ^a zeQ7UmF$#F3eLsXX4XpyTLk%9w1csw)Zsw#<<8khctPZK5_rV@k)}RGt?JR z7pX{WA{AtG`pdq>Oz0)l-XO6{E&ZHQHj?ap>#%Efgr z)Lj@~)*W!b#t77_!33Mv@HguyxZcO}Y(}KJ4!2#9tgc){ruF$)Wd;bgWxtetEIXE6 zacd=DO4y$75SiAtVU_74*n=s>h)`C543+S`R6tbTuTz(%q*)05I1 zu*oBp4S|y1I~0^Q7Y(6piwSc8@)O&4CbttR5xZSkQNG+_fz3N*_=SVgc6HV%&Zd(Xh9A^achr^duL0_d%NO^t%{#%UOFPp~73LEB?=TJv4gLh^t??a3&P z>=M#}`Z1g&G(1$F|D>TdR~%pZ&X@18%3fN5rbs3L)xP^?O7HJ2h>HJg`7Bd@vVCUDy{8Pb{$pU6$1RcK7I7XJ0aFC|_QZ5)=0dQI#m za@){}GtAfG7SQ8FCH+xd4&!jM0j-DL?a!d9(d&+WAk3~&QF#0t7CTaIk^{Auc*xod zj$N)6X20N;1k-MCeO>tia55wjJRVv7`>V#KOD+Yk*c!R~V0w#|KM#D-0~p1as5!I~ zlu!PZ*yHLBc*s4`DB7F|*s%)KW|(Q+sCcK71WkoOPXWP~wyH(7`#pSI5`{*-3Yhau zn?}q_f>X1Oe5k!P2qrE}gQ<-29F^YBJBkCi?_e!IOuyxp_d@6Lba=?XMX7lqBe{+Z zqlRGMlYXsw&$}DF=-8+}MM)1B5M4v9=>0@hd`5M1Eo6tcs~wOYLO@@$Fk-_4q^z-X zclytnQ^yADyqfhxE+B~3Ba8X#aILQGmWS|+pOmP0a029MIo%vO=vfXJCMdu(SU)`w z4hX^!$81P8((m$vHP$pJagV*xGATSkJEnJw-kzl@&I?cVEf}^M50`X>Nd$20Lq_t% zzZu0B(*SdJ>y+>4Et(*#*rh?o|7Pp7He~t+6N|QZ#Bhsn2lRBKAf72dEs`+`_ Date: Thu, 21 Nov 2024 11:25:12 -0800 Subject: [PATCH 0824/1088] Delete docs github button.png --- docs github button.png | Bin 11805 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs github button.png diff --git a/docs github button.png b/docs github button.png deleted file mode 100644 index 6b129e0db728a4ffac63419eb054653930dee372..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11805 zcmXw9bySp17iURn1rd>!5|9w28$?n`=@3cj?pRPjx=T`NN$C!eMnJj*1c4>BzmP4@r2I>|A6_7MMsX#;oKINhI&W zA1vWKzN6_7iAmzHhx3`t!H6NI&IsR9os0F2xPWZN3C_a>U zPd|^aAo_8do#y~gS>M?CQo6G~bbh!oiBNPIkJ>2xVkdSvw!h`tqau_2(#UTyE3q5K z7A2o0wEK2ZjV(&VUkOzou7u)6+M+HO8{dB3mgye_uk~+5v8xuoLbt}KL9we6_MQBD zBK2G0)mQ_gKD?rWR2laG(-Rh4ElhyXz@U{xFBdv7?>+aS#_!(HEM^l<)5u>f!eX?K zaKg`zmG_=xk(Mc;GQoWOv#}*lf1iAmi_JN5K|8Aw);TBN<&x0ygK41^KeR~KjG6wy z&Mv)tJ3xHCb;X<`!1hrry`00zhpa#!lJbg`H9MjbSvX8YE)%r{70>ciJKcf@Y$(S* zAuC5e!PxCK>1S5LJovVuaJvWVPF3uzzH}@-NfDv{HRG*Kgzu(_4braC*7efbgfO zG~7J5O&$fBkS5uCqE4bDp`0`7hSJNa_Ii*eW6Tj)%htp%nd5qP6QExayJO!E`>hvC z&rEubs!43}<(j@n%Q6_&Y=ecefMx(=@3>L}oT^O4(dafyMpqNa)}$?&h%fxSYAb28 zyCu5fv78T1J~(>Vqti;;WAH1EEx&H-YJJDo9@apFmvC~+{z17+#R`x}LZApd4j$%j z)AnF7`xz?>YE4!LO~5C3-f})4cvE_3rk*!|vC0aKLD(hd>nFQvS(-PZWbQE$!|+v% z9`92Xz&vP%r@~47D~U?M>&P_<#?ci0dPX)+tYr(kycrlWnwthdppFCOric zK?U(^;az@vxsgC1rou&bTEzUtX%23Pr1>p`3%a2lMbgc2@&ie?xQz5i0{;|^p`G!; zGhqpGq!2^jUlEyf*wO3Dipuk3Az^beoZkm`{@t_mRY>A_Qxpw%Vwx%*A`{&gPcQA} zFcuwBVv{Zk z7TpcW!Ht2Ga1-+}qXi1PPAA3i5SH()t&|&jHl7}VRZ+fm$r-g`*ro6yjA9w6rQFY1 z;$(%Gpo2Gfhz|ejcaW;e?M3=XXC9w($*EsA(U%~o(N?I6rQIk z09lqFp|8Twh)Rtc-(Vw}{R73N|H*hu9pzd&M)89-9_&(lkqO;cO&**ecS0}0D8jie z8B2hWuB3z5J_`N(o4*&{!nFHH`tPd%u=FhY1vNwa`NKFogc}Tvc&-JkjV1cG9pvz~ zoaZ-EM}K!MLt~`>Y9t4Y>b)^g_b-xm{2})a1Q&&{tu?_leO+$wQJm(Vl2`}tZgr(N zKLl?`Pty3yAIpD;)G<2{58-vkxvuQ-A}`Rc9B*~hsM{vf0gp_?GYPy4;j?kz!q2-5 zIPRkspHR;x{S|Pk*r2;ClWv>LOp!dyqQ;)nhi?N_l5_Y-KtXc+e2D~mJZsv1#C6S#Ir^SW*K4~gt(`JeX3tK(7fsTNOqtIBU4@4;a}Z0uTr86 zeqw~bZCe$8e z|M)_Dfw+#P$Pp~j>&r#wO7${kWn!tD>qmkW_4zOUcZ^5}8M6$NeAvK2vG6@3QatkE zC@jx4jWB4(kteaqM))_(0|lot$cqxSZ+#{U#73N>uB~eR+i{gbqwDraQJmUeiWPd# z(-|y&u~A7Q)U$#MD@ci+eXvev&N{}749mL*alhpgg7>b>1P4Kux(L*k-ZZx({FjRO zLd0WCb?|brcl&My6#vSLL_G@;fAI&32XZSVnEZv!Vy<_GgJle%Yv~uaj;@9c#24`I z-&2A}SDik8u z_)cqxmp!~Mt`D8yeTIs)bLKsC$QTlyW6e=h->9K9q1MqQ}tD5snF$5I8YlL+mrIw z@7FX9FY&D=cp3B})a{wju9+^ygs=|caCN~6ESe%;itdXY$p;ZkJDert4T&A)VrSlA z#sR%t$cBo3Ct9W1aAjWJFVmI&LHOslE|KJ;Is~phP><^v|SMiyK+Gg|Ly! zRW0`V8e-b7$?0@77htUxn^T2_Y)x7|r%ZCF|GhRbyCS`^q_~)0Zfte>wRh|@y}G)( zy?5(!^02+gW`=&}pAh_mZM9Tq?bsZy;aVk|0(%0^)e4m-nsg))+uBVo;>dJyS{76i z5)wUSv4B&;AEtl5V|uDhp+VrG~q5u?59j8P3?MpSFmG#b6XMifk;r;6O!-x(q<;YsU^we;bBAQPT0 z`JB&==n{+zq62R@ULSw&Q&ACYS#S{Mw8E_QUTb`;|CMXqk)GMEBN%@bx4KzS&+PZ- z?!K$M?{&wc6(mtkr^)@2Y>lqJ8q#WL6S7LtQ!b{R#cSUs&5Tv%1qtQ7Fx@)AlxoO{ zJr0~P06cx}Fg38&F+75a>|9ov*%0HBvdx=b?5F3dGCx43q>S?B5RH?&^Zx40U5@Hy zu>y3@J+WZ{7?nvs6!?X@8Y7|?FE(c{y;u_7Y@GyET$krmjN*#!EIGOd1U^!5S{*e0 z8(;V-2??Gxgy~=zkj%fd+aXD4Gp-|;W9a@fH{&sbT+Ds03vWgx>(|3%`ABToTuLU^EjGWKL>?)vh)|_w~9G06DKDY8dSx#CSW8pGNw{G|IPwTlV zfl@~XIa3pGrD6tG0@e5{3YR~3T=UiIh44hFI#;66aaH_n=BiS9)$$yV>JJA}ggA=U zj*1J7E~Z$GzbMA4m-5=qSo0_8aCwlv_RG?(sD6aKo+auz@Jbusn8&&2br9j^Nf#9` z6Vnp%;bz?3(;QZnio{Z_-s!c8%1`wI2Tp08+8|=x!*YcY$T#3H*e&P%&~yKZ7^ZDD ze}TVTMn&xO3fQIT+P*%#FQ-V^VXE#e_oUDAbJarif|VgMU)KQ=aC|~~Un#oJFau4p z@RFC?3vu_6!sGOs#OtHdT1 zoK}6=?6BJ|Qdm$hWhz#7G5#gZj#r^Iqnpa7#kx`KhLqB!e@aehpy*b*b^gg3_fXrJt5>szQm@aGr-nqXQOjDdJpZgE zzKQdtj1pP3fNRYU&AckqH=bE?7+d!-tCflrJkwZg00O;n5zAcDk?%6XfQj&pxvwjg z_t{R zJACha-;llk^t+h%)a$%?N2lQaXQ!eemR|Lx<|pOtdRk%uGk9uIwKYI;R2kQ|fUm); z_SQ9iBGVk<4ID#%a{u!c=WuHa+f^npZ6dk!)21N9^L$$EA05%6HIobG|J@Gp(ya+E1yZ;1}!mAbkz->&)wW`?}lsHg4OM6Iz9cCM7_uQs)P9i@hO z+)bI?wmA>!X-PFa;D#JVzmq%8EF>iSs`nl+^*)Wi3a_`+YefE3q1z;L*^}1B*iWx@ z7~HK)Ien-7wMzXEu*7%0ZMZM^+;d{g9{7s;r-!)7KG$gh-dVE!YdqFI)})_}9=`nN z)BD2JW^0))A9S4>Y=+BkKNzKo%C6W?Ip7Q%A($w(d9z-z@+naSnO3$^712vfp8V5Y z(|~C(eSG5R&6Si>ittz33YWYW8B=W+!KQf?o+;w8*%SaOW)yz7N`T!dWYMrQr&x{~T}i^1ZLtxeHDzlys0o z7G34K_rou`#mACP+g#?eCq9h5fVzEz0axKKzZ^1HufxM?{dgvPy;eq9driY~GWp1z zA|dy(Sx$Xh=kkF2Q=-t{9=Zx0yQmG8M0V{S;Iti?S>tgxWXdhB-o>Q7$5BBJ)4ua~ zJrN%8K2KGdFH6i#G4-RH{mY_)g2mZ|hPUVEp8%zV10V0K2eaKW=zbKSHixXAnK2ON z+?>~1K;i2RHok5yy$#>Fi?hD=g)F8i<_GB>)P=(5DhF)83v|-39hQ z?$IyJC{%lpnEgo#v)fT3U@;>^^8G@Ce?FI`iOHM^_P_{s}>wD@@VScIrl0pd@BNi%k1y3_CJ@3%`h46u-c0Gl! z=tLN8tV+6z@rxig-EGiYjUOk;Cg!`95HmjO`FOkB(#q?})y5MHN}KE{8w*>rl`@UJ z9UTUitGQ#0Hc zc+1IGH)Wg?jVw{ImY4KhUc|n##HnxWjv=Vcvin+>vGH8^@tyGJNb`* zwlW$93P#2kld6z=mWo&m3Nyz`NYg>%rbJees&FMa=qkx@6M z0r;M_W5~SgU{+ys%Z}<};;A~&3(zRQoKhmHd_(!gwBt51(o4~53JA#4?M7K`S0#c962Q$cf>rURv zZRZz2%xMOS0_JS>V@t(5*(R|+jC2`_Fgh2l9+M+~&jiV7+c*J45#I8KV!^J7Y3whs z4Izr^hdZ9Mb&qN^k>8}l$ebrQJ3LrAr}pr*13M#9d7XYsXHQp{T4~Il`7VoSBaz_2 zW<}Fo8Jn*rRVc4S2E1(h6H5dF_p5SQHMm^8R~=GWSsT}!ovgf63-99dz3kiIPZ989 zH}2+p|3Cs~5s`iVN9|1xd$WlNLOg!+l>4jiWcRd9bm;#1-| zxX{C8E!oxF7(r|y!{c)rZ=bFGOdqLMlxcQSe<+||E|fieJkL9pSPlvd}Hw&JH zwEH@j#)k+Wm10Xm0?`tl1s#kuc9|4WMA2d3AvTmnJIcoK5!7EA+X23$TelL~9(;a} zz0aFyQ(J9xwYA7Db4J;aoI|gQf5emAHtqYVVe8nHAKS-|6KmCeYaCh>Ur2nUoSY06 z)Qh!ezXJ{i>-1!hg`C~8YNKa8EnI$x-!k0nP9VK_I6VQRFW}Q7k9+Qq$`$qS)h!U{ zf-QR^HZEWivV{9oTg4!r#)UZwMOOXt(udnG_!T_%IUCLFJFUM|5@lZIDpU`(dwKeD zaCU0CN@$1RFu_2#8lTcpqiYR7^SoaUZZE5=SAaf9{|v577Cyef^(Zs$9rBBDNU=fM z^m_>niJt2j3=ukoKla?}_OQvW25%d5wi}Bqw*nY{kX1d0XP%pT%+c_JJVE3uC%Q;8b zDFHBFjloFCcegF7q2G8VyhO$G&}KTcd;eoNDn21dxO`BSSpkSCDIa8Cgfd3;$qT7r?X!=sB>n!Ttz}r(;DlVwAydt{vVF)ze z%%W%5Ak_C4gQDP&%W^4p^&E$9{nIrY3Xn?(i1CHEbZgsD0CKI z)1p^$ycG4Z~)6v0a7J`T3I@&y?hS#Yq>pT&2T zrEJBVo4%i4O{V2+Mo1+)q$H$q5=UEm+36ZorhnlSTt3a7VrU+!Z0?fPt_TZred;~6 z7{1(?$HP;Cw7*rPSP4PDd|;Ku8@SovUpO_VYT+1roF@G%JH3YOQMPrWc!@l15i2V% zl5u4lOG9%ZzxV(U9c8so-tmQ4HbTJq?b7?_r;5~*Y zNAt|mPH1fa_9e3;@Yy*f+!VfmFn<{>5o~IT){e9a&?EtBX8b?p{^ZNd-j*2rWPM zu!ez$F>#^oJvd}<%i?}HbiSm{m*rQ08(VSQ5lsD~PSM7O1MKDy=aIib!oDw)Q$1K& zSWZ@3FRj=as#*K?&1)Q|l_0|jj}KgMfz_bS#+E({oXla zkK@#ObQIE!0@8+gUoYEKPwwtxA@*g*?HBdUps?_Q_IVjU$aw~meZ5}t)Icfcq@D}L zZM|m@YG3a6?E*+W*wPetpTyGC)U7M3nQQHV7f5lI@h43Wuaw=P^dAy>Qse3u)`wOY zTeJCzE<>eDAel%qB`t7U=oRK$2w|egJZ#Z!cw^e-g+p`FM$&nT_8N-c>HCg%CVfLy zA;pcZf$26@T_~*zSdG60IoKOe(@7Mrfq#gUR)pV4bWMkjNuFOl!aM?l*Rj4%=Rt!$ppS|0w@u!w4WnQPs)$3Ul_GY zQVN=If}}srB?5pmvtDCdgtl7Tp%aNgvRyX7e(oTlo$!6|XAY3I!H0NBPzy5rMO4jaBGzuj33 zvJvPOzz$W@wE>mk5jP+|#-#`W;k8zyHyT4JAP^VaBe)gt=kqC{*|<66TZD9VHU}`QQ)B_+T#k2p%rv>u#_7M_R zDHp>2mDGy2ZoEO3(!p~>)E&VX)Wu79YqydAJ<)9gOvKEYpZWv9z#Y#=-wU7={9D-b zwWrqV%Oc#|k(#L*y0c+WK@R9dvF{m~*Nkw-@U1+7B0GW&OAdAj|okCzC0{dwm*roHcHxqy>Rt_ktf>nyiN=+*G*;CAPA))GDtdmbDq}6f&wtX;FTHmDs%~f(gl(vRgcxmf zLlD0ISHGmn8Vx?kymr(^ERKWfA~yac_ov@y+Y6<+LsC$3#^C<4u`-4ub409Yx#jTG zc+GBC^n7~A?C_JQyu&{%|Ge+D2;hsGu&byd-i}|X5fl_EsAlDO_b^YD+4gkMjhmfsd$22@_6su=(AqyebWuHDI!i$YGDwXw z3mB~*4R*SPs-I^^GDGBg`b0AVdSAzlhxb-CQ_AZ8ldQ{YO7A%=ACX-n$LfOnY#@bb zkdUHF#oq?{7QDlHKNqB7P+4VSsPQT&BxDSi^ZZKSQ(gd;B*v+C;Ck4Xi-?vtdI&F? z&x!K6bvxvIj1}AT-pHMkGi*ChYW%uPF4*ZnyyhFK134dFZ&?LgPr`;&M5iKEKUGa z0&MN@jXA$_B8uGymF*ddc7wIYJkuZm-)ejo76mH2e-q?$?~AN{Nqde0!FFq&!4^LP z4#u%Bx{;tZIsia%d|(*PXp#$X7*IxV)s@E`vR~(aB6S8}b>)L2UQGw&O`i6AruT?_ zO@HPrx33>Cn!@cy^=|NiMs%EyARN-kL|o0YcU=h9E@{^#3ZG8?E1!*$ulg35I+stu zzC=yg2R@y3vbv-Hk`Tw9^7nePoFPHvOo3E29HaonLLMQ>6ciTD>ZYfGtm=0^E6^bu z;zhejfj&Y0E67QqOVaH7-h%pY^&-U|N^XB* z_mPD0>Q+6Tpp{k*(Pb3}J z*ep+Sw0_$xYaOf{63KL*f+Cn^tK$PKDtDU9B2hTn7uWQdz4|r7#b0jozfY?bwT%p8412Yw)W~@e^$&sJpeYNwsJ=a$V`0Ey0)l)G1b*nfn2_8?231!}?NR7@xe{py>=MfhP+MU+s04+@(^L@LX`C>Pr;ZW^0?2Ek-PYT|}i z!o>PFJxFb@AR6OZRyA>~WU?3kx{{3?qo|-2`mW9E6q+cZPpCeI*QeaWAs$}9@Qrs9 z06eRDoZpeC*-=i06L8YC{-fBWx%|4rx= z>FD&zSOsyc6W(2k$e{$VPq2{?&Rj42WD?Yz)AZHBC6+_Y=qUnL(~(OfIy}29yCHg- zG~jXihHioPjy4kcrte1g-ERgX$X+l-VTCHL zB!5N&UiJsICaej9<($E#)IWCyX|Le~?PIyu#OWqtmm$Fw+(&PQc;Id1}NbnW$Hx1YT;dP6g~3szR!3Fj<{uOXrC%} zD(Z#cmxL9efu+GClnmI}vwBr%t858c%dHEZFC@>W0|+FHvf+eu24Ey8R^3_@ot@#E zJ;6^bXL!E9kg?Xys^Si6MyO}5ikpMn=JVk>KYI#RsLM@s^kA5L>fT}KvV}x&)^Nve1Gi;_As2`-Tt$0{u{5Dy;WiRcxhuPwuWh!ewc4C0%}HM{1vuGZJ%2E z`k@koa*?C$)*H(!=x7`Za*3J6+EU9w3tC;t0@4@cAnc242p;^D{?xPB}>Ab!lE#Gh( zP@Rx|SK^+J=x;&z5;g**-~SH%)B+?6(=`rsGG~vGz#I}_tXl7h$)Jbj93j5LCJ&7T zv1OqRV6nGFLvt)A-m=jGYfSn*gP~8rG8+5$n`Oi$$;kqGN+rViMFX5bShjqB71fJg z4{{!Y!Gv%pc&8lcNX8o*2_>)l{N(Op|7M{6sI(PB1GKYYAZuU((6OB2=WuN zZJKaHFk;1+dwUqBfs&7!N>nNZD9v5Ne@(Mnl!J?%tqxdoYQt^|`!~uJyZ6vuSR(N; zcJAF_m@jUcBDOaVEui=NWh{-*74N!ag^DFm@hd5F)XIN@R?=4YB+R_Qlvc&7Q1Hud zQVvYpwyz`L2@bvzVskBZqNjZQ6K^T5l!}9gTElol8BuKl4f)F#4bk55p5nn-c0Wn| z&HtSi;t@ncoOsb3nogS~HN4j4+3=BbBCt^F>}+@#+>Q8V3UALV ziRN5ilIf#I3z&1Onie-)EPTHi4NYHY5Sz42V^7|xAG|*dGhn#C?}!um7r4x^6z~9h zSN=5nE`sSiK+Eux(Y?vs*?vQwGQM^4R>d#+fZ|DJmsNPi(So?$B7yY_plU4A)OSqk zMbW2A)mVe!1m*bmvC`0s73v^oA6QGkA4AjMoMg|2p~4OmvEgYyNsdE2b2|gd>mrFH zHmTttGO4Rju?1pcHq1wnK~wl|icklYLMnRt>Z3Jtpk^dYWP7qr)8D9#_1_2`O&mU5 zj|B&Vp2C|xfvo&XFutmX)s$et03LE>oh{(8hC=j@zL}lO9bfSgp1bb|{h8qBaFJV> z&B%`yH|D=^-uA0apiPy+fv_CI$6VLn%AXGVn`b_yA(%sAmhhwN(UBnvAFSY+b_@*z zqnE81&tWpKO3ngcjMBgD(5E*Z1r#67g89^!mWDQ_` z3pA{+y%HTiiSP|yqU` Date: Thu, 21 Nov 2024 17:46:22 -0800 Subject: [PATCH 0825/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e0c5d93562..ab2de2b732 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.1", + "unsloth_zoo>=2024.11.7", "packaging", "tyro", "transformers>=4.46.1", From 3782a5975c1b9ca6e13ff620fab6a5e4bd808cc3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 22 Nov 2024 16:04:53 -0800 Subject: [PATCH 0826/1088] Update loader.py --- unsloth/models/loader.py | 48 ++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 232fe6acff..452ec5ccb3 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -359,30 +359,30 @@ def from_pretrained( old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) - with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - ) + # with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + ) pass # First check if it's a normal model via AutoConfig From 80adcd609a7e98f5dd8b39c8e81977a16c6b44cc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 18:48:41 -0800 Subject: [PATCH 0827/1088] kwargs --- unsloth/models/_utils.py | 2 ++ unsloth/models/llama.py | 4 +++- unsloth/models/loader.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 69f36f0d46..0be3bfcc1d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1128,6 +1128,7 @@ def unsloth_compile_transformers( debug = False, import_from_cache = False, disable = False, + return_logits = False, ): if disable: return model_types = get_transformers_model_type( @@ -1158,6 +1159,7 @@ def unsloth_compile_transformers( debug = debug, import_from_cache = import_from_cache, disable = disable, + return_logits = return_logits, ) pass return model_types diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0256fc1830..3ec59b339a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -974,13 +974,15 @@ def _CausalLM_fast_forward( logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) + print(kwargs) + if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: - if HAS_CUT_CROSS_ENTROPY and labels is not None: + if "return_logits" not in kwargs and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( hidden_states = hidden_states, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 452ec5ccb3..fb97e74622 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -350,7 +350,7 @@ def from_pretrained( *args, **kwargs, ): if token is None: token = get_token() - + patch_compiled_autograd() patch_compiling_bitsandbytes() if use_gradient_checkpointing == "unsloth": From 2bb066008bb5cda2c697197dfcf45caa3852df6c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:31:43 -0800 Subject: [PATCH 0828/1088] logits --- unsloth/models/_utils.py | 25 +++++++++++++++++++++++++ unsloth/models/llama.py | 4 +--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0be3bfcc1d..5326c09b52 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -54,6 +54,7 @@ "unpatch_gradient_checkpointing", "HAS_CUT_CROSS_ENTROPY", + "EMPTY_LOGITS", "fused_linear_cross_entropy", "patch_unsloth_smart_gradient_checkpointing", "unpatch_unsloth_smart_gradient_checkpointing", @@ -1164,3 +1165,27 @@ def unsloth_compile_transformers( pass return model_types pass + +# We need an empty logits flag to warn people logits will not be returned anymore unless asked ie +# os.environ['UNSLOTH_RETURN_LOGITS'] = '1' +LOGITS_ERROR_STRING = \ + "Unsloth: Logits are empty from 2024.11 onwards. To get raw logits again, please "\ + 'set the environment variable `UNSLOTH_RETURN_LOGITS` to `"1" BEFORE starting to train ie before `trainer.train()`. For example:\n\n'\ + "import os\n"\ + "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ + "... trainer.train() ..." + +def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) +class EmptyLogits(torch.Tensor): + def __init__(self): return + __getitem__ = raise_logits_error + __getattr__ = raise_logits_error + def __repr__(self): return LOGITS_ERROR_STRING + def __str__ (self): return LOGITS_ERROR_STRING +pass +EMPTY_LOGITS = EmptyLogits() +functions = dir(torch.Tensor) +for function in functions: + try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) + except: continue +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3ec59b339a..a17af2dd8a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -974,8 +974,6 @@ def _CausalLM_fast_forward( logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) - print(kwargs) - if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) @@ -997,7 +995,7 @@ def _CausalLM_fast_forward( return CausalLMOutputWithPast( loss=loss, - logits=None, + logits=EMPTY_LOGITS, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, From e17dca45217fe5a63cbeb25a270d565a334aca43 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:39:19 -0800 Subject: [PATCH 0829/1088] Update llama.py --- unsloth/models/llama.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a17af2dd8a..36fd151f4e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -980,7 +980,8 @@ def _CausalLM_fast_forward( elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: - if "return_logits" not in kwargs and HAS_CUT_CROSS_ENTROPY and labels is not None: + RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" + if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( hidden_states = hidden_states, @@ -995,7 +996,7 @@ def _CausalLM_fast_forward( return CausalLMOutputWithPast( loss=loss, - logits=EMPTY_LOGITS, + logits=None, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, From b93e9cda3bd602b1eeb6a4db18939494266dc955 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:46:36 -0800 Subject: [PATCH 0830/1088] Update llama.py --- unsloth/models/llama.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 36fd151f4e..5a48959f3f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -994,13 +994,15 @@ def _CausalLM_fast_forward( output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output - return CausalLMOutputWithPast( + output = CausalLMOutputWithPast( loss=loss, - logits=None, + logits=EMPTY_LOGITS, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + print(output) + return output pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass From f7edb152e76a6e124977b51391fd9b429e861659 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:52:42 -0800 Subject: [PATCH 0831/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5a48959f3f..e2ce925f4b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1002,6 +1002,7 @@ def _CausalLM_fast_forward( attentions=outputs.attentions, ) print(output) + print(output) return output pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) From f7278b2624d2df7139209dcde9fce65aa4b6c5d0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:57:56 -0800 Subject: [PATCH 0832/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5326c09b52..a50c8be2bb 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1176,7 +1176,7 @@ def unsloth_compile_transformers( "... trainer.train() ..." def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) -class EmptyLogits(torch.Tensor): +class EmptyLogits: def __init__(self): return __getitem__ = raise_logits_error __getattr__ = raise_logits_error From 815576e6b1715bf994b7e6f76c3c6e46bc57f958 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 21:59:40 -0800 Subject: [PATCH 0833/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a50c8be2bb..90cbae2131 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1178,8 +1178,8 @@ def unsloth_compile_transformers( def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return - __getitem__ = raise_logits_error - __getattr__ = raise_logits_error + # __getitem__ = raise_logits_error + # __getattr__ = raise_logits_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass From 608e31bd0ea39ba4fe76455748f53e5c9bf32720 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:03:08 -0800 Subject: [PATCH 0834/1088] Update _utils.py --- unsloth/models/_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 90cbae2131..242e30675f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1178,14 +1178,14 @@ def unsloth_compile_transformers( def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return - # __getitem__ = raise_logits_error - # __getattr__ = raise_logits_error + __getitem__ = raise_logits_error + __getattr__ = raise_logits_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) -for function in functions: - try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) - except: continue -pass +# for function in functions: +# try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) +# except: continue +# pass From 659560304473eefc0de53ca717459b9127e5a9e7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:12:22 -0800 Subject: [PATCH 0835/1088] error --- unsloth/models/_utils.py | 10 +++++----- unsloth/models/llama.py | 2 -- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 242e30675f..531adad317 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,7 +1175,7 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." -def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) +def raise_logits_error(*args, **kwargs): print(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return __getitem__ = raise_logits_error @@ -1185,7 +1185,7 @@ def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) -# for function in functions: -# try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) -# except: continue -# pass +for function in functions: + try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) + except: continue +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e2ce925f4b..c0242825d3 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1001,8 +1001,6 @@ def _CausalLM_fast_forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) - print(output) - print(output) return output pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) From bfc1c3e5d8e118eae572611c35769c1251e78a8a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:17:20 -0800 Subject: [PATCH 0836/1088] Update _utils.py --- unsloth/models/_utils.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 531adad317..daf0bdf44d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,17 +1175,19 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." -def raise_logits_error(*args, **kwargs): print(LOGITS_ERROR_STRING) +def raise_logits_error1(*args, **kwargs): print(1000) +def raise_logits_error2(*args, **kwargs): print(2000) class EmptyLogits: def __init__(self): return - __getitem__ = raise_logits_error - __getattr__ = raise_logits_error + __getitem__ = raise_logits_error1 + __getattr__ = raise_logits_error2 def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) -for function in functions: - try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) +for j, function in enumerate(functions): + exec(f"def raise_{j}: print({j})", globals(), locals()) + try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From 7f3adad39756f1e2023a209bb821dc22f7139bd6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:18:33 -0800 Subject: [PATCH 0837/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index daf0bdf44d..82e8f97eb3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1187,7 +1187,7 @@ def __str__ (self): return LOGITS_ERROR_STRING EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - exec(f"def raise_{j}: print({j})", globals(), locals()) + exec(f"def raise_{j}(*args, **kwargs): print({j})", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From 5bd41e30361468819b58f271d5054f304add9d36 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:20:16 -0800 Subject: [PATCH 0838/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 82e8f97eb3..a4ded5f55b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1187,7 +1187,7 @@ def __str__ (self): return LOGITS_ERROR_STRING EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - exec(f"def raise_{j}(*args, **kwargs): print({j})", globals(), locals()) + exec(f"def raise_{j}(*args, **kwargs): print(function, {j})", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From 7949ddf05047f3fd00af29eaf2484e9f5e359937 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:27:49 -0800 Subject: [PATCH 0839/1088] Update _utils.py --- unsloth/models/_utils.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a4ded5f55b..036936810d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,19 +1175,22 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." -def raise_logits_error1(*args, **kwargs): print(1000) -def raise_logits_error2(*args, **kwargs): print(2000) +def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) +def warn_logits_error(*args, **kwargs): warnings.warn(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return - __getitem__ = raise_logits_error1 - __getattr__ = raise_logits_error2 + __getitem__ = raise_logits_error + __getattr__ = raise_logits_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) -for j, function in enumerate(functions): - exec(f"def raise_{j}(*args, **kwargs): print(function, {j})", globals(), locals()) - try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) - except: continue +for function in functions: + if function.endswith("_") and len(function) > 2 and function[-2] != "_": + try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) + except: continue + else: + try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) + except: continue pass From 2febe57a51c0734399d821f1e7768e245024f652 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:31:09 -0800 Subject: [PATCH 0840/1088] Update _utils.py --- unsloth/models/_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 036936810d..4daac6c4b4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1188,9 +1188,11 @@ def __str__ (self): return LOGITS_ERROR_STRING functions = dir(torch.Tensor) for function in functions: if function.endswith("_") and len(function) > 2 and function[-2] != "_": + print() try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) except: continue else: - try: exec(f"EMPTY_LOGITS.{function} = raise_logits_error", globals(), locals()) + exec(f"def raise_{j}: print({function})", globals(), locals()) + try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From 23716bfb80901b2d46987472236e39cd1eb39f9b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:32:27 -0800 Subject: [PATCH 0841/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4daac6c4b4..5024204f20 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1186,7 +1186,7 @@ def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) -for function in functions: +for j, function in enumerate(functions): if function.endswith("_") and len(function) > 2 and function[-2] != "_": print() try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) From 071b29b7c4ae6790e7694d76932f17d0f3642dcb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:33:18 -0800 Subject: [PATCH 0842/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5024204f20..acf411b6c9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1192,7 +1192,7 @@ def __str__ (self): return LOGITS_ERROR_STRING try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) except: continue else: - exec(f"def raise_{j}: print({function})", globals(), locals()) + exec(f"def raise_{j}(*args, **kwargs): print({function})", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From b509681885aca84e1371fe608e871e4e2f34f6eb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:35:55 -0800 Subject: [PATCH 0843/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index acf411b6c9..248efb2acd 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1192,7 +1192,7 @@ def __str__ (self): return LOGITS_ERROR_STRING try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) except: continue else: - exec(f"def raise_{j}(*args, **kwargs): print({function})", globals(), locals()) + exec(f"def raise_{j}(*args, **kwargs): print('{function}'')", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From a6ec1e61b2d34a7d0c52cdaa06fa147c85286341 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:37:02 -0800 Subject: [PATCH 0844/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 248efb2acd..775be91fe5 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1192,7 +1192,7 @@ def __str__ (self): return LOGITS_ERROR_STRING try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) except: continue else: - exec(f"def raise_{j}(*args, **kwargs): print('{function}'')", globals(), locals()) + exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass From 2c38d3d9eaccd59e27af34cc504004a588c5a573 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:52:57 -0800 Subject: [PATCH 0845/1088] Update _utils.py --- unsloth/models/_utils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 775be91fe5..fbb68b38f4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1187,12 +1187,12 @@ def __str__ (self): return LOGITS_ERROR_STRING EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - if function.endswith("_") and len(function) > 2 and function[-2] != "_": - print() - try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) - except: continue - else: - exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) - try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) - except: continue + # if function.endswith("_") and len(function) > 2 and function[-2] != "_": + # print() + # try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) + # except: continue + # else: + exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) + try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) + except: continue pass From 7fef1a99b160e109eb761f923e5c8e2ed8c9fe53 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 22:56:24 -0800 Subject: [PATCH 0846/1088] Update _utils.py --- unsloth/models/_utils.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index fbb68b38f4..7259923f7a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1187,11 +1187,7 @@ def __str__ (self): return LOGITS_ERROR_STRING EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - # if function.endswith("_") and len(function) > 2 and function[-2] != "_": - # print() - # try: exec(f"EMPTY_LOGITS.{function} = warn_logits_error", globals(), locals()) - # except: continue - # else: + if function == "to": continue exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue From 699091730391ebfbe71bac3a02d52f8ac82d6aa9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 23:03:35 -0800 Subject: [PATCH 0847/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7259923f7a..05f6520ef0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1179,8 +1179,8 @@ def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_ def warn_logits_error(*args, **kwargs): warnings.warn(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return - __getitem__ = raise_logits_error - __getattr__ = raise_logits_error + # __getitem__ = raise_logits_error + # __getattr__ = raise_logits_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass From 8f4f2fe4254319738d1ef948a4c884555dcd19a0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 23:33:19 -0800 Subject: [PATCH 0848/1088] Update _utils.py --- unsloth/models/_utils.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 05f6520ef0..5f0b4e0552 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,19 +1175,22 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." -def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) def warn_logits_error(*args, **kwargs): warnings.warn(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return - # __getitem__ = raise_logits_error - # __getattr__ = raise_logits_error + def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) + def return_none(*args, **kwargs): return None + def raise_getattr_error(self, attr): + if attr == "to": return self.return_none + return self.raise_logits_error + __getitem__ = self.raise_logits_error + __getattr__ = self.raise_getattr_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - if function == "to": continue exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue From 20e182b6068614346cbb38ad26d5933cf802ffef Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 23:35:36 -0800 Subject: [PATCH 0849/1088] Update _utils.py --- unsloth/models/_utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5f0b4e0552..45d32c8bc8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,7 +1175,6 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." -def warn_logits_error(*args, **kwargs): warnings.warn(LOGITS_ERROR_STRING) class EmptyLogits: def __init__(self): return def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) @@ -1183,8 +1182,8 @@ def return_none(*args, **kwargs): return None def raise_getattr_error(self, attr): if attr == "to": return self.return_none return self.raise_logits_error - __getitem__ = self.raise_logits_error - __getattr__ = self.raise_getattr_error + __getitem__ = raise_logits_error + __getattr__ = raise_getattr_error def __repr__(self): return LOGITS_ERROR_STRING def __str__ (self): return LOGITS_ERROR_STRING pass From 0162d22508beb80b264b5e5ceb941ec71228d5f4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 25 Nov 2024 23:39:26 -0800 Subject: [PATCH 0850/1088] Update _utils.py --- unsloth/models/_utils.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 45d32c8bc8..73b415a6c3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1175,13 +1175,11 @@ def unsloth_compile_transformers( "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ "... trainer.train() ..." +def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) +def return_none(*args, **kwargs): return None class EmptyLogits: def __init__(self): return - def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) - def return_none(*args, **kwargs): return None - def raise_getattr_error(self, attr): - if attr == "to": return self.return_none - return self.raise_logits_error + def raise_getattr_error(self, attr): return return_none if attr == "to" else raise_logits_error __getitem__ = raise_logits_error __getattr__ = raise_getattr_error def __repr__(self): return LOGITS_ERROR_STRING @@ -1190,7 +1188,8 @@ def __str__ (self): return LOGITS_ERROR_STRING EMPTY_LOGITS = EmptyLogits() functions = dir(torch.Tensor) for j, function in enumerate(functions): - exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) - try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) - except: continue + if function.startswith("__") and function.endswith("__"): + exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) + try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) + except: continue pass From 5d69df679e200d58829502162964cfbe875ba94a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:01:13 -0800 Subject: [PATCH 0851/1088] Update loader.py --- unsloth/models/loader.py | 52 +++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index fb97e74622..377f595280 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -347,10 +347,11 @@ def from_pretrained( use_gradient_checkpointing = "unsloth", resize_model_vocab = None, # [TODO] No effect revision = None, + return_logits = False, # Return logits *args, **kwargs, ): if token is None: token = get_token() - + patch_compiled_autograd() patch_compiling_bitsandbytes() if use_gradient_checkpointing == "unsloth": @@ -359,30 +360,31 @@ def from_pretrained( old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) - # with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - ) + with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + return_logits = return_logits, + ) pass # First check if it's a normal model via AutoConfig From 833f64df03de2e05b2be5005bea51ea2a3ebd4be Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:08:15 -0800 Subject: [PATCH 0852/1088] Update llama.py --- unsloth/models/llama.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c0242825d3..bb5c841409 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -65,7 +65,7 @@ # Old HF Hub versions <= 0.0.25 from huggingface_hub.utils._token import get_token pass - +from triton import __version__ as triton_version def original_apply_qkv(self, X): Q = self.q_proj(X) @@ -1549,9 +1549,9 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) From 4e245724fbc8cf69136e83da38fcb4676176eea1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:16:33 -0800 Subject: [PATCH 0853/1088] Update vision.py --- unsloth/models/vision.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 69fb3fd986..80c1f82d4d 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -31,6 +31,7 @@ get_peft_regex, merge_and_overwrite_lora, ) +from triton import __version__ as triton_version __all__ = [ "FastBaseVisionModel", @@ -95,9 +96,9 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers = {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers: {transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) From cfb769af324e1b52bdb7d1279b3c3ede14483a17 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:18:00 -0800 Subject: [PATCH 0854/1088] Update loader.py --- unsloth/models/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 377f595280..334f7b76f7 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -465,6 +465,7 @@ def from_pretrained( if is_peft: # Check base model again for PEFT model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + print(model_name) model_config = AutoConfig.from_pretrained( model_name, token = token, From 7321fe9b79c73679bfab7f584aa4ead0b3e0df4f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:26:37 -0800 Subject: [PATCH 0855/1088] Old torch versions --- unsloth/models/_utils.py | 10 ++++++++++ unsloth/models/loader.py | 2 ++ 2 files changed, 12 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 73b415a6c3..ac9c9a98ca 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1131,6 +1131,16 @@ def unsloth_compile_transformers( disable = False, return_logits = False, ): + if Version(torch_version) < Version("2.4.0"): + print( + "="*30 + \ + "Unsloth: Unfortunately Unsloth vision and other newer optimized models need Torch 2.4 or later.\n"\ + f"You have Torch version {torch_version}. Please upgrade your Torch version by visiting https://pytorch.org/\n"\ + "For now your models will not get optimized, but will still work for now!" + ) + return + pass + if disable: return model_types = get_transformers_model_type( model_name = model_name, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 334f7b76f7..198a95b313 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -401,6 +401,7 @@ def from_pretrained( revision = revision, trust_remote_code = trust_remote_code, ) + print("Model config", model_config) is_model = True except Exception as error: autoconfig_error = str(error) @@ -412,6 +413,7 @@ def from_pretrained( revision = revision, trust_remote_code = trust_remote_code, ) + print("PEFT config", peft_config) is_peft = True except Exception as error: peft_error = str(error) From 49caeb244a705029391695478904fceb76437c91 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:34:51 -0800 Subject: [PATCH 0856/1088] Update loader.py --- unsloth/models/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 198a95b313..393eb28dcb 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -392,6 +392,7 @@ def from_pretrained( was_disabled = are_progress_bars_disabled() disable_progress_bars() + print(model_name) autoconfig_error = None peft_error = None try: From ed1c7a9eda5c0c9f2909d3eb8821d39eb77fdb43 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:35:05 -0800 Subject: [PATCH 0857/1088] Update loader.py --- unsloth/models/loader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 393eb28dcb..b33c1e19db 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -406,6 +406,7 @@ def from_pretrained( is_model = True except Exception as error: autoconfig_error = str(error) + print(autoconfig_error) is_model = False try: peft_config = PeftConfig.from_pretrained( @@ -418,6 +419,7 @@ def from_pretrained( is_peft = True except Exception as error: peft_error = str(error) + print(peft_error) is_peft = False pass From 587223b4bf682e0001b4d8f23e3c260a0f23d2dd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 00:47:27 -0800 Subject: [PATCH 0858/1088] prints --- unsloth/models/_utils.py | 2 ++ unsloth/models/loader.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ac9c9a98ca..eed61b30c0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1142,12 +1142,14 @@ def unsloth_compile_transformers( pass if disable: return + print(1) model_types = get_transformers_model_type( model_name = model_name, token = token, revision = revision, trust_remote_code = trust_remote_code, ) + print(model_types) for model_type in model_types: _unsloth_compile_transformers( model_type, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index b33c1e19db..9d9ac31b6e 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -358,7 +358,9 @@ def from_pretrained( patch_unsloth_smart_gradient_checkpointing() old_model_name = model_name + print(model_name) model_name = get_model_name(model_name, load_in_4bit) + print(model_name) with contextlib.redirect_stdout(open(os.devnull, "w")): patch_loss_functions(torch_compile = False) From 6099293e2539d69d80d1453a8fdf728b8a1be524 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 02:52:19 -0800 Subject: [PATCH 0859/1088] recheck --- unsloth/models/_utils.py | 2 +- unsloth/models/loader.py | 52 ++++++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index eed61b30c0..c906ded5a4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1142,7 +1142,7 @@ def unsloth_compile_transformers( pass if disable: return - print(1) + print(model_name) model_types = get_transformers_model_type( model_name = model_name, token = token, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9d9ac31b6e..89bbb5e8f0 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -362,32 +362,32 @@ def from_pretrained( model_name = get_model_name(model_name, load_in_4bit) print(model_name) - with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - return_logits = return_logits, - ) - pass + # with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + return_logits = return_logits, + ) + # pass # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled From 207d047acba4b55b58b168812d1a63211f886f17 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 02:58:16 -0800 Subject: [PATCH 0860/1088] Update loader.py --- unsloth/models/loader.py | 64 ++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 89bbb5e8f0..0c8c284bb5 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -358,43 +358,13 @@ def from_pretrained( patch_unsloth_smart_gradient_checkpointing() old_model_name = model_name - print(model_name) model_name = get_model_name(model_name, load_in_4bit) - print(model_name) - - # with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - return_logits = return_logits, - ) - # pass # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() disable_progress_bars() - - print(model_name) + autoconfig_error = None peft_error = None try: @@ -404,11 +374,9 @@ def from_pretrained( revision = revision, trust_remote_code = trust_remote_code, ) - print("Model config", model_config) is_model = True except Exception as error: autoconfig_error = str(error) - print(autoconfig_error) is_model = False try: peft_config = PeftConfig.from_pretrained( @@ -417,11 +385,9 @@ def from_pretrained( revision = revision, trust_remote_code = trust_remote_code, ) - print("PEFT config", peft_config) is_peft = True except Exception as error: peft_error = str(error) - print(peft_error) is_peft = False pass @@ -472,7 +438,6 @@ def from_pretrained( if is_peft: # Check base model again for PEFT model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) - print(model_name) model_config = AutoConfig.from_pretrained( model_name, token = token, @@ -483,6 +448,33 @@ def from_pretrained( if not was_disabled: enable_progress_bars() + # with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + return_logits = return_logits, + ) + # pass + # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ From 90f79d28630611be287b1cb1e8f18967d6664d86 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:07:50 -0800 Subject: [PATCH 0861/1088] Update loader.py --- unsloth/models/loader.py | 52 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0c8c284bb5..f8ed3a87e6 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -448,32 +448,32 @@ def from_pretrained( if not was_disabled: enable_progress_bars() - # with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - return_logits = return_logits, - ) - # pass + with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + return_logits = return_logits, + ) + pass # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ From d3b147b4918898a84bef43bc4a40a71f6bd492b8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:09:32 -0800 Subject: [PATCH 0862/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c906ded5a4..6118ab926b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1142,14 +1142,14 @@ def unsloth_compile_transformers( pass if disable: return - print(model_name) + model_types = get_transformers_model_type( model_name = model_name, token = token, revision = revision, trust_remote_code = trust_remote_code, ) - print(model_types) + for model_type in model_types: _unsloth_compile_transformers( model_type, From 4e551685254f8f5f12a101cfbf3fdb55bff17063 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:18:09 -0800 Subject: [PATCH 0863/1088] Update _utils.py --- unsloth/models/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6118ab926b..3a29352a92 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.9" +__version__ = "2024.11.10" __all__ = [ "prepare_model_for_kbit_training", @@ -1142,7 +1142,7 @@ def unsloth_compile_transformers( pass if disable: return - + model_types = get_transformers_model_type( model_name = model_name, token = token, From 8bf04040dcb011864083fddbdeb7471a4ecd6472 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:20:21 -0800 Subject: [PATCH 0864/1088] Update mapper.py --- unsloth/models/mapper.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index fc1dc8cdb0..b2f73aa6c2 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -492,6 +492,14 @@ "unsloth/llava-v1.6-mistral-7b-hf", "llava-hf/llava-v1.6-mistral-7b-hf", ), + "unsloth/Llama-3.1-Tulu-3-8B-bnb-4bit" : ( + "unsloth/Llama-3.1-Tulu-3-8B", + "allenai/Llama-3.1-Tulu-3-8B", + ), + "unsloth/Llama-3.1-Tulu-3-70B-bnb-4bit" : ( + "unsloth/Llama-3.1-Tulu-3-70B", + "allenai/Llama-3.1-Tulu-3-70B", + ), } INT_TO_FLOAT_MAPPER = {} From 51b384e90523be76bafbff409f3f50c92dbab155 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:25:55 -0800 Subject: [PATCH 0865/1088] Bug fixes for vision (#1340) * Update __init__.py * Update __init__.py * Patching * Update cross_entropy_loss.py * CE Loss * Update _utils.py * Update _utils.py * CE Loss * Update _utils.py * Update _utils.py * Layernorm * Update _utils.py * Update _utils.py * Post patch * Update _utils.py * Update llama.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana --- unsloth/models/_utils.py | 45 +++++++++++++++++++++++++++++++- unsloth/models/llama.py | 16 +++++++----- unsloth/models/loader.py | 56 +++++++++++++++++++++------------------- unsloth/models/mapper.py | 8 ++++++ unsloth/models/vision.py | 7 ++--- 5 files changed, 94 insertions(+), 38 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 69f36f0d46..3a29352a92 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.9" +__version__ = "2024.11.10" __all__ = [ "prepare_model_for_kbit_training", @@ -54,6 +54,7 @@ "unpatch_gradient_checkpointing", "HAS_CUT_CROSS_ENTROPY", + "EMPTY_LOGITS", "fused_linear_cross_entropy", "patch_unsloth_smart_gradient_checkpointing", "unpatch_unsloth_smart_gradient_checkpointing", @@ -1128,14 +1129,27 @@ def unsloth_compile_transformers( debug = False, import_from_cache = False, disable = False, + return_logits = False, ): + if Version(torch_version) < Version("2.4.0"): + print( + "="*30 + \ + "Unsloth: Unfortunately Unsloth vision and other newer optimized models need Torch 2.4 or later.\n"\ + f"You have Torch version {torch_version}. Please upgrade your Torch version by visiting https://pytorch.org/\n"\ + "For now your models will not get optimized, but will still work for now!" + ) + return + pass + if disable: return + model_types = get_transformers_model_type( model_name = model_name, token = token, revision = revision, trust_remote_code = trust_remote_code, ) + for model_type in model_types: _unsloth_compile_transformers( model_type, @@ -1158,7 +1172,36 @@ def unsloth_compile_transformers( debug = debug, import_from_cache = import_from_cache, disable = disable, + return_logits = return_logits, ) pass return model_types pass + +# We need an empty logits flag to warn people logits will not be returned anymore unless asked ie +# os.environ['UNSLOTH_RETURN_LOGITS'] = '1' +LOGITS_ERROR_STRING = \ + "Unsloth: Logits are empty from 2024.11 onwards. To get raw logits again, please "\ + 'set the environment variable `UNSLOTH_RETURN_LOGITS` to `"1" BEFORE starting to train ie before `trainer.train()`. For example:\n\n'\ + "import os\n"\ + "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ + "... trainer.train() ..." + +def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) +def return_none(*args, **kwargs): return None +class EmptyLogits: + def __init__(self): return + def raise_getattr_error(self, attr): return return_none if attr == "to" else raise_logits_error + __getitem__ = raise_logits_error + __getattr__ = raise_getattr_error + def __repr__(self): return LOGITS_ERROR_STRING + def __str__ (self): return LOGITS_ERROR_STRING +pass +EMPTY_LOGITS = EmptyLogits() +functions = dir(torch.Tensor) +for j, function in enumerate(functions): + if function.startswith("__") and function.endswith("__"): + exec(f"def raise_{j}(*args, **kwargs): print('{function}')", globals(), locals()) + try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) + except: continue +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0256fc1830..bb5c841409 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -65,7 +65,7 @@ # Old HF Hub versions <= 0.0.25 from huggingface_hub.utils._token import get_token pass - +from triton import __version__ as triton_version def original_apply_qkv(self, X): Q = self.q_proj(X) @@ -980,7 +980,8 @@ def _CausalLM_fast_forward( elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: - if HAS_CUT_CROSS_ENTROPY and labels is not None: + RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" + if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( hidden_states = hidden_states, @@ -993,13 +994,14 @@ def _CausalLM_fast_forward( output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output - return CausalLMOutputWithPast( + output = CausalLMOutputWithPast( loss=loss, - logits=None, + logits=EMPTY_LOGITS, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + return output pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass @@ -1547,9 +1549,9 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers = {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 232fe6acff..f8ed3a87e6 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -347,6 +347,7 @@ def from_pretrained( use_gradient_checkpointing = "unsloth", resize_model_vocab = None, # [TODO] No effect revision = None, + return_logits = False, # Return logits *args, **kwargs, ): if token is None: token = get_token() @@ -359,37 +360,11 @@ def from_pretrained( old_model_name = model_name model_name = get_model_name(model_name, load_in_4bit) - with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) - model_types = unsloth_compile_transformers( - model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, - fuse_lm_head = True, - gradient_checkpointing = True, - manual_replacements = True, - epilogue_fusion = True, - max_autotune = False, - shape_padding = True, - cudagraphs = False, - debug = False, - import_from_cache = False, - disable = False, - ) - pass - # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() disable_progress_bars() - + autoconfig_error = None peft_error = None try: @@ -473,6 +448,33 @@ def from_pretrained( if not was_disabled: enable_progress_bars() + with contextlib.redirect_stdout(open(os.devnull, "w")): + patch_loss_functions(torch_compile = False) + model_types = unsloth_compile_transformers( + model_name = model_name, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + epilogue_fusion = True, + max_autotune = False, + shape_padding = True, + cudagraphs = False, + debug = False, + import_from_cache = False, + disable = False, + return_logits = return_logits, + ) + pass + # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ os.path.exists(os.path.join(old_model_name, "tokenizer.json")) and \ diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index fc1dc8cdb0..b2f73aa6c2 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -492,6 +492,14 @@ "unsloth/llava-v1.6-mistral-7b-hf", "llava-hf/llava-v1.6-mistral-7b-hf", ), + "unsloth/Llama-3.1-Tulu-3-8B-bnb-4bit" : ( + "unsloth/Llama-3.1-Tulu-3-8B", + "allenai/Llama-3.1-Tulu-3-8B", + ), + "unsloth/Llama-3.1-Tulu-3-70B-bnb-4bit" : ( + "unsloth/Llama-3.1-Tulu-3-70B", + "allenai/Llama-3.1-Tulu-3-70B", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 69fb3fd986..80c1f82d4d 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -31,6 +31,7 @@ get_peft_regex, merge_and_overwrite_lora, ) +from triton import __version__ as triton_version __all__ = [ "FastBaseVisionModel", @@ -95,9 +96,9 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers = {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform = {platform_system}.\n"\ - f"O^O/ \_/ \\ Pytorch: {torch.__version__}. CUDA = {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit = {torch.version.cuda}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers: {transformers_version}.\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) From 8558bc92b06f9128499484ef737fa71b966ffc23 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Nov 2024 03:29:59 -0800 Subject: [PATCH 0866/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ab2de2b732..51c3037235 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.7", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", "transformers>=4.46.1", @@ -244,7 +244,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.1", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", "transformers>=4.46.1", From 8a6da33bd08b48c83bdb3eaf449aeb2cbc95bbc8 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Wed, 27 Nov 2024 02:22:02 +0400 Subject: [PATCH 0867/1088] Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han --- unsloth/models/__init__.py | 2 +- unsloth/models/dpo.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 3230cdc207..9c032b28bd 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -16,5 +16,5 @@ from .llama import FastLlamaModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model -from .dpo import PatchDPOTrainer +from .dpo import PatchDPOTrainer, PatchKTOTrainer from ._utils import is_bfloat16_supported diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index e7074350c3..5dc71f920a 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -14,6 +14,7 @@ __all__ = [ "PatchDPOTrainer", + "PatchKTOTrainer", ] try: @@ -127,4 +128,4 @@ def PatchDPOTrainer(): pass pass pass - +PatchKTOTrainer = PatchDPOTrainer From 98a78ddfe4e40feca179d6092ab8ed80382f0251 Mon Sep 17 00:00:00 2001 From: cell-dame <122996026+dame-cell@users.noreply.github.com> Date: Wed, 27 Nov 2024 04:02:06 +0530 Subject: [PATCH 0868/1088] Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han From d4c06c0200265a36c0e4d0cdd677296234244184 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 28 Nov 2024 00:01:25 -0800 Subject: [PATCH 0869/1088] skip modules --- unsloth/models/vision.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 80c1f82d4d..507e19cfe0 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,6 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, merge_and_overwrite_lora, + SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version @@ -77,6 +78,7 @@ def from_pretrained( max_seq_length = None, dtype = None, load_in_4bit = True, + load_in_8bit token = None, device_map = "sequential", trust_remote_code = False, @@ -132,6 +134,7 @@ def from_pretrained( bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) pass @@ -424,5 +427,3 @@ def for_training(model, use_gradient_checkpointing = True): return model pass pass - - From 2e5393822bfd8a3412319bc4bf5a07403ee8531b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 28 Nov 2024 00:02:13 -0800 Subject: [PATCH 0870/1088] Update vision.py --- unsloth/models/vision.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 507e19cfe0..0f682c6b40 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -78,7 +78,6 @@ def from_pretrained( max_seq_length = None, dtype = None, load_in_4bit = True, - load_in_8bit token = None, device_map = "sequential", trust_remote_code = False, From aad5b1fcee4a47b785b6afbdff9a20a4fda5900e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:02:31 -0800 Subject: [PATCH 0871/1088] Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bb5c841409..2605d941ad 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1551,7 +1551,7 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ - f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ + f"O^O/ \+/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) @@ -1709,7 +1709,7 @@ def from_pretrained( debug_info = """debug_info = \\ f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"O^O/ \\+/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) From 2515d193572d301005d0ada5332c90f8e01f3d65 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:10:43 -0800 Subject: [PATCH 0872/1088] Update llama.py --- unsloth/models/llama.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 2605d941ad..8c4c5e6f68 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1551,7 +1551,7 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ - f"O^O/ \+/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ + f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) @@ -1706,12 +1706,18 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + unsloth_0 = r'==((====))==' + unsloth_1 = r' \\ /| ' + unsloth_2 = r'O^O/ \_/ \ ' + unsloth_3 = r'\ / ' + unsloth_4 = r' "-____-" ' + debug_info = """debug_info = \\ - f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\+/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + f"{unsloth_0} Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f"{unsloth_1} Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"{unsloth_2} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"{unsloth_3} Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f'{unsloth_4} Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) import subprocess, re, gc, numpy as np a = np.array([0,]) From 6d468532e127aa096877a062ea68eb2f55445386 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:13:34 -0800 Subject: [PATCH 0873/1088] Update llama.py --- unsloth/models/llama.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8c4c5e6f68..43d0e9f1c7 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1706,13 +1706,12 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + debug_info = """debug_info = \\ unsloth_0 = r'==((====))==' unsloth_1 = r' \\ /| ' unsloth_2 = r'O^O/ \_/ \ ' unsloth_3 = r'\ / ' unsloth_4 = r' "-____-" ' - - debug_info = """debug_info = \\ f"{unsloth_0} Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ f"{unsloth_1} Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ f"{unsloth_2} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ From a2b7a5ec4573157486a32f4cd2818b9f314b6a0c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:15:21 -0800 Subject: [PATCH 0874/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 43d0e9f1c7..1d413f0acc 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1717,7 +1717,7 @@ def from_pretrained( f"{unsloth_2} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"{unsloth_3} Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f'{unsloth_4} Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - logger.warning(debug_info) + print(debug_info) import subprocess, re, gc, numpy as np a = np.array([0,]) try: From 370e460d8dc7d69ac304bf83fa724603a3044001 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:19:07 -0800 Subject: [PATCH 0875/1088] Update llama.py --- unsloth/models/llama.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1d413f0acc..68aa527990 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1706,18 +1706,19 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) - debug_info = """debug_info = \\ unsloth_0 = r'==((====))==' unsloth_1 = r' \\ /| ' unsloth_2 = r'O^O/ \_/ \ ' unsloth_3 = r'\ / ' unsloth_4 = r' "-____-" ' + + debug_info = """debug_info = \\ f"{unsloth_0} Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ f"{unsloth_1} Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ f"{unsloth_2} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"{unsloth_3} Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f'{unsloth_4} Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' - print(debug_info) + logger.warning(debug_info) import subprocess, re, gc, numpy as np a = np.array([0,]) try: @@ -1733,6 +1734,14 @@ def from_pretrained( gc.collect() torch.cuda.empty_cache()""" + print(debug_info) + debug_info = debug_info\ + .replace("{unsloth_0}", unsloth_0)\ + .replace("{unsloth_1}", unsloth_1)\ + .replace("{unsloth_2}", unsloth_2)\ + .replace("{unsloth_3}", unsloth_3)\ + .replace("{unsloth_4}", unsloth_4) + debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) inner_training_loop = inner_training_loop.replace(original_debug, debug_info) From a1b9d74367012b409c6032634d2447d26838b8b9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:20:54 -0800 Subject: [PATCH 0876/1088] Update llama.py --- unsloth/models/llama.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 68aa527990..8f2e56d6e6 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1736,11 +1736,11 @@ def from_pretrained( print(debug_info) debug_info = debug_info\ - .replace("{unsloth_0}", unsloth_0)\ - .replace("{unsloth_1}", unsloth_1)\ - .replace("{unsloth_2}", unsloth_2)\ - .replace("{unsloth_3}", unsloth_3)\ - .replace("{unsloth_4}", unsloth_4) + .replace("{unsloth_0}", re.escape(unsloth_0))\ + .replace("{unsloth_1}", re.escape(unsloth_1))\ + .replace("{unsloth_2}", re.escape(unsloth_2))\ + .replace("{unsloth_3}", re.escape(unsloth_3))\ + .replace("{unsloth_4}", re.escape(unsloth_4)) debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) From 9dd59ae7acbf8d46453e201d069395f46f908046 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:22:21 -0800 Subject: [PATCH 0877/1088] Update llama.py --- unsloth/models/llama.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8f2e56d6e6..67a9d37bc3 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1736,11 +1736,11 @@ def from_pretrained( print(debug_info) debug_info = debug_info\ - .replace("{unsloth_0}", re.escape(unsloth_0))\ - .replace("{unsloth_1}", re.escape(unsloth_1))\ - .replace("{unsloth_2}", re.escape(unsloth_2))\ - .replace("{unsloth_3}", re.escape(unsloth_3))\ - .replace("{unsloth_4}", re.escape(unsloth_4)) + .replace("{unsloth_0}", re.unescape(unsloth_0))\ + .replace("{unsloth_1}", re.unescape(unsloth_1))\ + .replace("{unsloth_2}", re.unescape(unsloth_2))\ + .replace("{unsloth_3}", re.unescape(unsloth_3))\ + .replace("{unsloth_4}", re.unescape(unsloth_4)) debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) From e6aa302e07e903e6602751d1753f51880e545c16 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:25:48 -0800 Subject: [PATCH 0878/1088] Update llama.py --- unsloth/models/llama.py | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 67a9d37bc3..30ad5bcea0 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1706,18 +1706,12 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) - unsloth_0 = r'==((====))==' - unsloth_1 = r' \\ /| ' - unsloth_2 = r'O^O/ \_/ \ ' - unsloth_3 = r'\ / ' - unsloth_4 = r' "-____-" ' - debug_info = """debug_info = \\ - f"{unsloth_0} Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f"{unsloth_1} Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"{unsloth_2} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"{unsloth_3} Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f'{unsloth_4} Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ + f" \\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) import subprocess, re, gc, numpy as np a = np.array([0,]) @@ -1734,14 +1728,6 @@ def from_pretrained( gc.collect() torch.cuda.empty_cache()""" - print(debug_info) - debug_info = debug_info\ - .replace("{unsloth_0}", re.unescape(unsloth_0))\ - .replace("{unsloth_1}", re.unescape(unsloth_1))\ - .replace("{unsloth_2}", re.unescape(unsloth_2))\ - .replace("{unsloth_3}", re.unescape(unsloth_3))\ - .replace("{unsloth_4}", re.unescape(unsloth_4)) - debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) inner_training_loop = inner_training_loop.replace(original_debug, debug_info) From 39c01fa4fa4144ddb2826046aec303da43353945 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:29:11 -0800 Subject: [PATCH 0879/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 30ad5bcea0..718f573d2f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1708,9 +1708,9 @@ def from_pretrained( debug_info = """debug_info = \\ f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"{chr(92)} / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) import subprocess, re, gc, numpy as np From f2f6e1dc1fcfef16e064ceb69928af1161a612b9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:30:21 -0800 Subject: [PATCH 0880/1088] Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 718f573d2f..f22cf315a1 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1708,8 +1708,8 @@ def from_pretrained( debug_info = """debug_info = \\ f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ f"{chr(92)} / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) From 78160ab787da05dfdec036e8f18de037b6704157 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 1 Dec 2024 02:36:17 -0800 Subject: [PATCH 0881/1088] Update llama.py --- unsloth/models/llama.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f22cf315a1..1bffb0cb16 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1706,6 +1706,8 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + # Cannot use \\ since it will cause a SyntaxWarning in Python 3.12 + # Instead use chr(92) == \\ debug_info = """debug_info = \\ f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ From ae7afa2d3414d6c1c064098df52779689a55ff52 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:03:38 -0800 Subject: [PATCH 0882/1088] Fix llama.cpp --- unsloth/save.py | 91 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 22 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index b503b2b47a..a9fb5648b7 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -45,6 +45,9 @@ "create_huggingface_repo", ] +# llama.cpp specific targets - all takes 90s. Below takes 60s +LLAMA_CPP_TARGETS = ["llama-quantize", "llama-export-lora", "llama-cli",] + # Check environments keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames @@ -761,9 +764,21 @@ def install_llama_cpp_make_non_blocking(): # env = { **os.environ, "LLAMA_CUDA": "1", } n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean - os.system("make clean -C llama.cpp") - full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] - + check = os.system("make clean -C llama.cpp") + if check == 0: + # Uses old MAKE + full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] + else: + # Uses new CMAKE + check = os.system("cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON") + if check + commands = [ + "", + f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + "cp llama.cpp/build/bin/llama-* llama.cpp", + "rm -rf llama.cpp/build", + ] + pass # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) @@ -779,6 +794,27 @@ def install_python_non_blocking(packages = []): pass +def try_make(commands): + for command in commands: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: + for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") + if "undefined reference" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + elif "deprecated" in line: + return "CMAKE" + elif "Unknown argument" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + elif "***" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + print(line, flush = True, end = "") + pass + pass + pass + return None +pass + + def install_llama_cpp_old(version = -10): # Download the 10th latest release since the latest might be broken! # FALLBACK mechanism @@ -810,18 +846,25 @@ def install_llama_cpp_old(version = -10): commands = [ "git clone --recursive https://github.com/ggerganov/llama.cpp", f"cd llama.cpp && git reset --hard {version} && git clean -df", + ] + try_make(commands) + + # Try using MAKE + commands = [ "make clean -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] - for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - pass + if try_make(commands) == "CMAKE": + # Instead use CMAKE + commands = [ + "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", + f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + "cp llama.cpp/build/bin/llama-* llama.cpp", + "rm -rf llama.cpp/build", + ] + try_make(commands) pass + # Check if successful if not os.path.exists("llama.cpp/quantize") and not os.path.exists("llama.cpp/llama-quantize"): raise RuntimeError( @@ -839,23 +882,27 @@ def install_llama_cpp_blocking(use_cuda = False): commands = [ "git clone --recursive https://github.com/ggerganov/llama.cpp", + "pip install gguf protobuf", + ] + if os.path.exists("llama.cpp"): return + try_make(commands) + + commands = [ "make clean -C llama.cpp", # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", - "pip install gguf protobuf", ] - if os.path.exists("llama.cpp"): return - - for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - pass + if try_make(commands) == "CMAKE": + # Instead use CMAKE + commands = [ + "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", + f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + "cp llama.cpp/build/bin/llama-* llama.cpp", + "rm -rf llama.cpp/build", + ] + try_make(commands) pass pass From 56fa57f5675c4c02853e11072eba52df4eff97ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:16:28 -0800 Subject: [PATCH 0883/1088] Update save.py --- unsloth/save.py | 49 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index a9fb5648b7..ee663442db 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -765,25 +765,32 @@ def install_llama_cpp_make_non_blocking(): n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean check = os.system("make clean -C llama.cpp") + IS_CMAKE = False if check == 0: # Uses old MAKE full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] + IS_CMAKE = False else: # Uses new CMAKE check = os.system("cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON") - if check - commands = [ - "", - f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", - "cp llama.cpp/build/bin/llama-* llama.cpp", - "rm -rf llama.cpp/build", + if check != 0: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp using os.system(...) with error {check}. Please report this ASAP!") + pass + # f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + full_command = [ + "cmake", "--build", "llama.cpp/build", + "--config", "Release", + "-j"+str(n_jobs), + "--clean-first", + "--target", " ".join(LLAMA_CPP_TARGETS), ] + IS_CMAKE = True pass # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) - return run_installer + return run_installer, IS_CMAKE pass @@ -1023,13 +1030,26 @@ def save_to_gguf( else: print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") if _run_installer is not None: + _run_installer, IS_CMAKE = _run_installer + error = _run_installer.wait() + if IS_CMAKE: + # CMAKE needs to do some extra steps + check = os.system("cp llama.cpp/build/bin/llama-* llama.cpp") + if check != 0: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") + check = os.system("rm -rf llama.cpp/build") + if check != 0: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") + pass else: error = 0 install_llama_cpp_blocking() pass - # Check if successful. If not install 10th latest release + # Check if successful + if error != 0 or quantize_location is None or convert_location is None: + print(f"Unsloth: llama.cpp error code = {error}.") + install_llama_cpp_old(-10) + pass # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize # and llama.cpp/main changed to llama.cpp/llama-cli @@ -1059,11 +1079,6 @@ def save_to_gguf( "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" ) pass - - if error != 0 or quantize_location is None or convert_location is None: - print(f"Unsloth: llama.cpp error code = {error}.") - install_llama_cpp_old(-10) - pass pass # Determine maximum first_conversion state @@ -1676,7 +1691,7 @@ def unsloth_save_pretrained_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1697,7 +1712,7 @@ def unsloth_save_pretrained_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1854,7 +1869,7 @@ def unsloth_push_to_hub_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1875,7 +1890,7 @@ def unsloth_push_to_hub_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass From 41a045b2201e0315382308057abde0729b4e3c95 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:24:23 -0800 Subject: [PATCH 0884/1088] Update save.py --- unsloth/save.py | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index ee663442db..9deeb8a4d5 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -801,7 +801,7 @@ def install_python_non_blocking(packages = []): pass -def try_make(commands): +def try_execute(commands, force_complete = False): for command in commands: with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: for line in sp.stdout: @@ -816,6 +816,8 @@ def try_make(commands): raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") print(line, flush = True, end = "") pass + if force_complete and sp.returncode is not None and sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, sp.args) pass pass return None @@ -854,14 +856,14 @@ def install_llama_cpp_old(version = -10): "git clone --recursive https://github.com/ggerganov/llama.cpp", f"cd llama.cpp && git reset --hard {version} && git clean -df", ] - try_make(commands) + try_execute(commands) # Try using MAKE commands = [ "make clean -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] - if try_make(commands) == "CMAKE": + if try_execute(commands) == "CMAKE": # Instead use CMAKE commands = [ "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", @@ -869,7 +871,7 @@ def install_llama_cpp_old(version = -10): "cp llama.cpp/build/bin/llama-* llama.cpp", "rm -rf llama.cpp/build", ] - try_make(commands) + try_execute(commands) pass # Check if successful @@ -892,7 +894,7 @@ def install_llama_cpp_blocking(use_cuda = False): "pip install gguf protobuf", ] if os.path.exists("llama.cpp"): return - try_make(commands) + try_execute(commands) commands = [ "make clean -C llama.cpp", @@ -901,7 +903,7 @@ def install_llama_cpp_blocking(use_cuda = False): # f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] - if try_make(commands) == "CMAKE": + if try_execute(commands) == "CMAKE": # Instead use CMAKE commands = [ "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", @@ -909,7 +911,7 @@ def install_llama_cpp_blocking(use_cuda = False): "cp llama.cpp/build/bin/llama-* llama.cpp", "rm -rf llama.cpp/build", ] - try_make(commands) + try_execute(commands) pass pass @@ -1169,15 +1171,7 @@ def save_to_gguf( f"--outtype {first_conversion}" pass - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - if sp.returncode is not None and sp.returncode != 0: - raise subprocess.CalledProcessError(sp.returncode, sp.args) - pass + try_execute([command], force_complete = True) # Check if quantization succeeded! if not os.path.isfile(final_location): @@ -1219,16 +1213,7 @@ def save_to_gguf( command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" - # quantize uses stderr - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - if sp.returncode is not None and sp.returncode != 0: - raise subprocess.CalledProcessError(sp.returncode, sp.args) - pass + try_execute([command,], force_complete = True) # Check if quantization succeeded! if not os.path.isfile(final_location): From 1642ded0353347f42865db793c68daee4d3fc1ba Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:25:13 -0800 Subject: [PATCH 0885/1088] Update vision.py --- unsloth/models/vision.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 0f682c6b40..aa4cc09022 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,7 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, merge_and_overwrite_lora, - SKIP_QUANTIZATION_MODULES, + # SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version @@ -133,7 +133,7 @@ def from_pretrained( bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, - llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + # llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) pass From cf993d74b6cd4beb1c6ee1be4d2e9bf1de685b20 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:25:40 -0800 Subject: [PATCH 0886/1088] Update save.py --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 9deeb8a4d5..7b20b53704 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1171,7 +1171,7 @@ def save_to_gguf( f"--outtype {first_conversion}" pass - try_execute([command], force_complete = True) + try_execute([command,], force_complete = True) # Check if quantization succeeded! if not os.path.isfile(final_location): From 5041f9f1242f77df6f19eb573b36e07133687e4e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:34:15 -0800 Subject: [PATCH 0887/1088] Update save.py --- unsloth/save.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 7b20b53704..c1207d1788 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -782,8 +782,8 @@ def install_llama_cpp_make_non_blocking(): "--config", "Release", "-j"+str(n_jobs), "--clean-first", - "--target", " ".join(LLAMA_CPP_TARGETS), - ] + "--target", + ] + LLAMA_CPP_TARGETS IS_CMAKE = True pass # https://github.com/ggerganov/llama.cpp/issues/7062 From 70893fcd9b32345569877976120e6a6bed6ef824 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:35:38 -0800 Subject: [PATCH 0888/1088] Update save.py --- unsloth/save.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index c1207d1788..1dac6ae385 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -762,16 +762,17 @@ def install_llama_cpp_make_non_blocking(): # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # env = { **os.environ, "LLAMA_CUDA": "1", } - n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean check = os.system("make clean -C llama.cpp") IS_CMAKE = False if check == 0: # Uses old MAKE + n_jobs = max(int(psutil.cpu_count()*1.5), 1) full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] IS_CMAKE = False else: # Uses new CMAKE + n_jobs = max(int(psutil.cpu_count()), 1) # Use less CPUs since 1.5x faster check = os.system("cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON") if check != 0: raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp using os.system(...) with error {check}. Please report this ASAP!") From 4361fdeae644cc14b56db2f4edac5f2acbdb6a22 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:40:36 -0800 Subject: [PATCH 0889/1088] Update save.py --- unsloth/save.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 1dac6ae385..8b06e3801a 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -839,13 +839,13 @@ def install_llama_cpp_old(version = -10): # Check if the llama.cpp exists if os.path.exists("llama.cpp"): print( - "**[WARNING]** You have a llama.cpp old directory which is broken.\n"\ + "**[WARNING]** You have a llama.cpp directory which is broken.\n"\ "Unsloth will DELETE the broken directory and install a new one.\n"\ "Press CTRL + C / cancel this if this is wrong. We shall wait 10 seconds.\n" ) import time - for i in range(10): - print(f"**[WARNING]** Deleting llama.cpp directory... {10-i} seconds left.") + for i in range(30): + print(f"**[WARNING]** Deleting llama.cpp directory... {30-i} seconds left.") time.sleep(1) import shutil shutil.rmtree("llama.cpp", ignore_errors = True) From 4c90b5682b76fd60fe11fff22b1692f3dd1eb8bc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:40:43 -0800 Subject: [PATCH 0890/1088] Update save.py --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 8b06e3801a..1202614232 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -841,7 +841,7 @@ def install_llama_cpp_old(version = -10): print( "**[WARNING]** You have a llama.cpp directory which is broken.\n"\ "Unsloth will DELETE the broken directory and install a new one.\n"\ - "Press CTRL + C / cancel this if this is wrong. We shall wait 10 seconds.\n" + "Press CTRL + C / cancel this if this is wrong. We shall wait 30 seconds.\n" ) import time for i in range(30): From 8de7843e213b9384076234f12805b6bcd70b59de Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 3 Dec 2024 16:50:52 -0800 Subject: [PATCH 0891/1088] Update README.md Fixing Qwen links --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 07d4e0abb6..776fc60c39 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | | **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | -| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | +| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) | 2x faster | 63% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | @@ -42,7 +42,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) -- 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook] +- 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. - 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM!
    From c0c826490d49b62cd2e5417ed3940ea067e7c284 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:57:32 -0800 Subject: [PATCH 0892/1088] Update save.py --- unsloth/save.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unsloth/save.py b/unsloth/save.py index 1202614232..f7098aea12 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1038,6 +1038,8 @@ def save_to_gguf( error = _run_installer.wait() if IS_CMAKE: # CMAKE needs to do some extra steps + print("Unsloth: CMAKE detected. Finalizing some steps for installation.") + check = os.system("cp llama.cpp/build/bin/llama-* llama.cpp") if check != 0: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") check = os.system("rm -rf llama.cpp/build") From 236604fdc2e067d91b57df0e7b546bdfb43726c8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 16:59:56 -0800 Subject: [PATCH 0893/1088] Update save.py --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index f7098aea12..02619b2341 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -1031,7 +1031,7 @@ def save_to_gguf( if quantize_location is not None and convert_location is not None: print("Unsloth: llama.cpp found in the system. We shall skip installation.") else: - print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") + print("Unsloth: Installing llama.cpp. This will take 3 minutes...") if _run_installer is not None: _run_installer, IS_CMAKE = _run_installer From 2fbc62b2e0b3365d406269725c262470744f3139 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 17:01:16 -0800 Subject: [PATCH 0894/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3a29352a92..5bc9529e60 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.10" +__version__ = "2024.12.1" __all__ = [ "prepare_model_for_kbit_training", From 410cf59c9e98465f936f95ed33ccaced38075553 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 17:11:10 -0800 Subject: [PATCH 0895/1088] Update save.py --- unsloth/save.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 02619b2341..aa90a1f1ac 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -552,6 +552,8 @@ def unsloth_save_model( max_vram = int(torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage) + print("Unsloth: Saving model... This might take 5 minutes ...") + from tqdm import tqdm as ProgressBar for j, layer in enumerate(ProgressBar(internal_model.model.layers)): for item in LLAMA_WEIGHTS: @@ -668,8 +670,6 @@ def unsloth_save_model( print() pass - print("Unsloth: Saving model... This might take 5 minutes for Llama-7b...") - # Since merged, edit quantization_config old_config = model.config new_config = model.config.to_dict() @@ -1028,6 +1028,7 @@ def save_to_gguf( quantize_location = get_executable(["llama-quantize", "quantize"]) convert_location = get_executable(["convert-hf-to-gguf.py", "convert_hf_to_gguf.py"]) + error = 0 if quantize_location is not None and convert_location is not None: print("Unsloth: llama.cpp found in the system. We shall skip installation.") else: @@ -1036,6 +1037,12 @@ def save_to_gguf( _run_installer, IS_CMAKE = _run_installer error = _run_installer.wait() + # Check if successful + if error != 0: + print(f"Unsloth: llama.cpp error code = {error}.") + install_llama_cpp_old(-10) + pass + if IS_CMAKE: # CMAKE needs to do some extra steps print("Unsloth: CMAKE detected. Finalizing some steps for installation.") @@ -1050,12 +1057,6 @@ def save_to_gguf( install_llama_cpp_blocking() pass - # Check if successful - if error != 0 or quantize_location is None or convert_location is None: - print(f"Unsloth: llama.cpp error code = {error}.") - install_llama_cpp_old(-10) - pass - # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize # and llama.cpp/main changed to llama.cpp/llama-cli # See https://github.com/ggerganov/llama.cpp/pull/7809 From 6237e2be38a97f7fd70136472894085739c30629 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 17:28:46 -0800 Subject: [PATCH 0896/1088] Update save.py --- unsloth/save.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index aa90a1f1ac..cf78bf5897 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -497,7 +497,7 @@ def unsloth_save_model( elif safe_serialization and (n_cpus <= 2): logger.warning_once( f"Unsloth: You have {n_cpus} CPUs. Using `safe_serialization` is 10x slower.\n"\ - f"We shall switch to Pytorch saving, which will take 3 minutes and not 30 minutes.\n"\ + f"We shall switch to Pytorch saving, which might take 3 minutes and not 30 minutes.\n"\ f"To force `safe_serialization`, set it to `None` instead.", ) safe_serialization = False @@ -1007,9 +1007,9 @@ def save_to_gguf( print_info = \ f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ - f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ - f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 10 minutes each.\n"\ + f" \\\ /| [0] Installing llama.cpp might take 3 minutes.\n"\ + f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits might take 3 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to {quantization_method} might take 10 minutes each.\n"\ f' "-____-" In total, you will have to wait at least 16 minutes.\n' print(print_info) @@ -1032,7 +1032,7 @@ def save_to_gguf( if quantize_location is not None and convert_location is not None: print("Unsloth: llama.cpp found in the system. We shall skip installation.") else: - print("Unsloth: Installing llama.cpp. This will take 3 minutes...") + print("Unsloth: Installing llama.cpp. This might take 3 minutes...") if _run_installer is not None: _run_installer, IS_CMAKE = _run_installer @@ -1152,7 +1152,7 @@ def save_to_gguf( print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ - "This will take 3 minutes...") + "This might take 3 minutes...") # We first check if tokenizer.model exists in the model_directory if os.path.exists(f"{model_directory}/tokenizer.model"): @@ -1211,7 +1211,7 @@ def save_to_gguf( # Convert each type! for quant_method in quantization_method: if quant_method != first_conversion: - print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") + print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This might take 20 minutes...") final_location = str((Path(model_directory) / f"unsloth.{quant_method.upper()}.gguf").absolute()) command = f"./{quantize_location} {full_precision_location} "\ From a0e58493b40563a05fb6e26cea306ac4ac491789 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 3 Dec 2024 17:29:59 -0800 Subject: [PATCH 0897/1088] Fix llama.cpp GGUF (#1375) * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> --- unsloth/models/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- unsloth/models/dpo.py | 3 +- unsloth/models/llama.py | 8 +- unsloth/models/vision.py | 4 +- unsloth/save.py | 185 +++++++++++++++++++++++-------------- 6 files changed, 129 insertions(+), 75 deletions(-) diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 3230cdc207..9c032b28bd 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -16,5 +16,5 @@ from .llama import FastLlamaModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model -from .dpo import PatchDPOTrainer +from .dpo import PatchDPOTrainer, PatchKTOTrainer from ._utils import is_bfloat16_supported diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3a29352a92..5bc9529e60 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.11.10" +__version__ = "2024.12.1" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index e7074350c3..5dc71f920a 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -14,6 +14,7 @@ __all__ = [ "PatchDPOTrainer", + "PatchKTOTrainer", ] try: @@ -127,4 +128,4 @@ def PatchDPOTrainer(): pass pass pass - +PatchKTOTrainer = PatchDPOTrainer diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bb5c841409..1bffb0cb16 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1706,11 +1706,13 @@ def from_pretrained( spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + # Cannot use \\ since it will cause a SyntaxWarning in Python 3.12 + # Instead use chr(92) == \\ debug_info = """debug_info = \\ f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" \\\\\\ /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ \\_/ \\ Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"\\ / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ + f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ + f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"{chr(92)} / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) import subprocess, re, gc, numpy as np diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 80c1f82d4d..aa4cc09022 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,6 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, merge_and_overwrite_lora, + # SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version @@ -132,6 +133,7 @@ def from_pretrained( bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, + # llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) pass @@ -424,5 +426,3 @@ def for_training(model, use_gradient_checkpointing = True): return model pass pass - - diff --git a/unsloth/save.py b/unsloth/save.py index b503b2b47a..cf78bf5897 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -45,6 +45,9 @@ "create_huggingface_repo", ] +# llama.cpp specific targets - all takes 90s. Below takes 60s +LLAMA_CPP_TARGETS = ["llama-quantize", "llama-export-lora", "llama-cli",] + # Check environments keynames = "\n" + "\n".join(os.environ.keys()) IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames @@ -494,7 +497,7 @@ def unsloth_save_model( elif safe_serialization and (n_cpus <= 2): logger.warning_once( f"Unsloth: You have {n_cpus} CPUs. Using `safe_serialization` is 10x slower.\n"\ - f"We shall switch to Pytorch saving, which will take 3 minutes and not 30 minutes.\n"\ + f"We shall switch to Pytorch saving, which might take 3 minutes and not 30 minutes.\n"\ f"To force `safe_serialization`, set it to `None` instead.", ) safe_serialization = False @@ -549,6 +552,8 @@ def unsloth_save_model( max_vram = int(torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage) + print("Unsloth: Saving model... This might take 5 minutes ...") + from tqdm import tqdm as ProgressBar for j, layer in enumerate(ProgressBar(internal_model.model.layers)): for item in LLAMA_WEIGHTS: @@ -665,8 +670,6 @@ def unsloth_save_model( print() pass - print("Unsloth: Saving model... This might take 5 minutes for Llama-7b...") - # Since merged, edit quantization_config old_config = model.config new_config = model.config.to_dict() @@ -759,16 +762,36 @@ def install_llama_cpp_make_non_blocking(): # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # env = { **os.environ, "LLAMA_CUDA": "1", } - n_jobs = max(int(psutil.cpu_count()*1.5), 1) # Force make clean - os.system("make clean -C llama.cpp") - full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] - + check = os.system("make clean -C llama.cpp") + IS_CMAKE = False + if check == 0: + # Uses old MAKE + n_jobs = max(int(psutil.cpu_count()*1.5), 1) + full_command = ["make", "all", "-j"+str(n_jobs), "-C", "llama.cpp"] + IS_CMAKE = False + else: + # Uses new CMAKE + n_jobs = max(int(psutil.cpu_count()), 1) # Use less CPUs since 1.5x faster + check = os.system("cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON") + if check != 0: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp using os.system(...) with error {check}. Please report this ASAP!") + pass + # f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + full_command = [ + "cmake", "--build", "llama.cpp/build", + "--config", "Release", + "-j"+str(n_jobs), + "--clean-first", + "--target", + ] + LLAMA_CPP_TARGETS + IS_CMAKE = True + pass # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) run_installer = subprocess.Popen(full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT) - return run_installer + return run_installer, IS_CMAKE pass @@ -779,6 +802,29 @@ def install_python_non_blocking(packages = []): pass +def try_execute(commands, force_complete = False): + for command in commands: + with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: + for line in sp.stdout: + line = line.decode("utf-8", errors = "replace") + if "undefined reference" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + elif "deprecated" in line: + return "CMAKE" + elif "Unknown argument" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + elif "***" in line: + raise RuntimeError(f"*** Unsloth: Failed compiling llama.cpp with {line}. Please report this ASAP!") + print(line, flush = True, end = "") + pass + if force_complete and sp.returncode is not None and sp.returncode != 0: + raise subprocess.CalledProcessError(sp.returncode, sp.args) + pass + pass + return None +pass + + def install_llama_cpp_old(version = -10): # Download the 10th latest release since the latest might be broken! # FALLBACK mechanism @@ -793,13 +839,13 @@ def install_llama_cpp_old(version = -10): # Check if the llama.cpp exists if os.path.exists("llama.cpp"): print( - "**[WARNING]** You have a llama.cpp old directory which is broken.\n"\ + "**[WARNING]** You have a llama.cpp directory which is broken.\n"\ "Unsloth will DELETE the broken directory and install a new one.\n"\ - "Press CTRL + C / cancel this if this is wrong. We shall wait 10 seconds.\n" + "Press CTRL + C / cancel this if this is wrong. We shall wait 30 seconds.\n" ) import time - for i in range(10): - print(f"**[WARNING]** Deleting llama.cpp directory... {10-i} seconds left.") + for i in range(30): + print(f"**[WARNING]** Deleting llama.cpp directory... {30-i} seconds left.") time.sleep(1) import shutil shutil.rmtree("llama.cpp", ignore_errors = True) @@ -810,18 +856,25 @@ def install_llama_cpp_old(version = -10): commands = [ "git clone --recursive https://github.com/ggerganov/llama.cpp", f"cd llama.cpp && git reset --hard {version} && git clean -df", + ] + try_execute(commands) + + # Try using MAKE + commands = [ "make clean -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", ] - for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - pass + if try_execute(commands) == "CMAKE": + # Instead use CMAKE + commands = [ + "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", + f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + "cp llama.cpp/build/bin/llama-* llama.cpp", + "rm -rf llama.cpp/build", + ] + try_execute(commands) pass + # Check if successful if not os.path.exists("llama.cpp/quantize") and not os.path.exists("llama.cpp/llama-quantize"): raise RuntimeError( @@ -839,23 +892,27 @@ def install_llama_cpp_blocking(use_cuda = False): commands = [ "git clone --recursive https://github.com/ggerganov/llama.cpp", + "pip install gguf protobuf", + ] + if os.path.exists("llama.cpp"): return + try_execute(commands) + + commands = [ "make clean -C llama.cpp", # https://github.com/ggerganov/llama.cpp/issues/7062 # Weirdly GPU conversion for GGUF breaks?? # f"{use_cuda} make all -j{psutil.cpu_count()*2} -C llama.cpp", f"make all -j{psutil.cpu_count()*2} -C llama.cpp", - "pip install gguf protobuf", ] - if os.path.exists("llama.cpp"): return - - for command in commands: - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - pass + if try_execute(commands) == "CMAKE": + # Instead use CMAKE + commands = [ + "cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=ON", + f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}", + "cp llama.cpp/build/bin/llama-* llama.cpp", + "rm -rf llama.cpp/build", + ] + try_execute(commands) pass pass @@ -950,9 +1007,9 @@ def save_to_gguf( print_info = \ f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ - f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ - f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits will take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization_method} will take 10 minutes each.\n"\ + f" \\\ /| [0] Installing llama.cpp might take 3 minutes.\n"\ + f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits might take 3 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to {quantization_method} might take 10 minutes each.\n"\ f' "-____-" In total, you will have to wait at least 16 minutes.\n' print(print_info) @@ -971,19 +1028,35 @@ def save_to_gguf( quantize_location = get_executable(["llama-quantize", "quantize"]) convert_location = get_executable(["convert-hf-to-gguf.py", "convert_hf_to_gguf.py"]) + error = 0 if quantize_location is not None and convert_location is not None: print("Unsloth: llama.cpp found in the system. We shall skip installation.") else: - print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") + print("Unsloth: Installing llama.cpp. This might take 3 minutes...") if _run_installer is not None: + _run_installer, IS_CMAKE = _run_installer + error = _run_installer.wait() + # Check if successful + if error != 0: + print(f"Unsloth: llama.cpp error code = {error}.") + install_llama_cpp_old(-10) + pass + + if IS_CMAKE: + # CMAKE needs to do some extra steps + print("Unsloth: CMAKE detected. Finalizing some steps for installation.") + + check = os.system("cp llama.cpp/build/bin/llama-* llama.cpp") + if check != 0: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") + check = os.system("rm -rf llama.cpp/build") + if check != 0: raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") + pass else: error = 0 install_llama_cpp_blocking() pass - # Check if successful. If not install 10th latest release - # Careful llama.cpp/quantize changed to llama.cpp/llama-quantize # and llama.cpp/main changed to llama.cpp/llama-cli # See https://github.com/ggerganov/llama.cpp/pull/7809 @@ -1012,11 +1085,6 @@ def save_to_gguf( "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" ) pass - - if error != 0 or quantize_location is None or convert_location is None: - print(f"Unsloth: llama.cpp error code = {error}.") - install_llama_cpp_old(-10) - pass pass # Determine maximum first_conversion state @@ -1084,7 +1152,7 @@ def save_to_gguf( print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ - "This will take 3 minutes...") + "This might take 3 minutes...") # We first check if tokenizer.model exists in the model_directory if os.path.exists(f"{model_directory}/tokenizer.model"): @@ -1107,15 +1175,7 @@ def save_to_gguf( f"--outtype {first_conversion}" pass - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - if sp.returncode is not None and sp.returncode != 0: - raise subprocess.CalledProcessError(sp.returncode, sp.args) - pass + try_execute([command,], force_complete = True) # Check if quantization succeeded! if not os.path.isfile(final_location): @@ -1151,22 +1211,13 @@ def save_to_gguf( # Convert each type! for quant_method in quantization_method: if quant_method != first_conversion: - print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This will take 20 minutes...") + print(f"Unsloth: [2] Converting GGUF 16bit into {quant_method}. This might take 20 minutes...") final_location = str((Path(model_directory) / f"unsloth.{quant_method.upper()}.gguf").absolute()) command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" - # quantize uses stderr - with subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) as sp: - for line in sp.stdout: - line = line.decode("utf-8", errors = "replace") - if "undefined reference" in line: - raise RuntimeError("Failed compiling llama.cpp. Please report this ASAP!") - print(line, flush = True, end = "") - if sp.returncode is not None and sp.returncode != 0: - raise subprocess.CalledProcessError(sp.returncode, sp.args) - pass + try_execute([command,], force_complete = True) # Check if quantization succeeded! if not os.path.isfile(final_location): @@ -1629,7 +1680,7 @@ def unsloth_save_pretrained_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1650,7 +1701,7 @@ def unsloth_save_pretrained_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1807,7 +1858,7 @@ def unsloth_push_to_hub_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass @@ -1828,7 +1879,7 @@ def unsloth_push_to_hub_gguf( git_clone = install_llama_cpp_clone_non_blocking() python_install = install_python_non_blocking(["gguf", "protobuf"]) git_clone.wait() - makefile = install_llama_cpp_make_non_blocking() + makefile = install_llama_cpp_make_non_blocking() new_save_directory, old_username = unsloth_save_model(**arguments) python_install.wait() pass From 8d675975eaa852eac232da99d866d82d5cb21f15 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 4 Dec 2024 02:36:42 -0800 Subject: [PATCH 0898/1088] Update mapper.py --- unsloth/models/mapper.py | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b2f73aa6c2..0ba03ce01c 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -452,33 +452,45 @@ "unsloth/Llama-3.1-Nemotron-70B-Instruct", "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", ), - "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-2B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2-VL-2B-Instruct", "Qwen/Qwen2-VL-2B-Instruct", + "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit", ), - "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-7B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2-VL-7B-Instruct", "Qwen/Qwen2-VL-7B-Instruct", + "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B-Instruct", + "Qwen/Qwen2-VL-72B-Instruct", + "unsloth/Qwen2-VL-72B-Instruct-bnb-4bit", + ), + "unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.2-11B-Vision-Instruct", + "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision-Instruct", "meta-llama/Llama-3.2-90B-Vision-Instruct", + "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-11B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision", "meta-llama/Llama-3.2-11B-Vision", + "unsloth/Llama-3.2-11B-Vision-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision", "meta-llama/Llama-3.2-90B-Vision", + "unsloth/Llama-3.2-90B-Vision-bnb-4bit", ), - "unsloth/Pixtral-12B-2409-bnb-4bit" : ( + "unsloth/Pixtral-12B-2409-unsloth-bnb-4bit" : ( "unsloth/Pixtral-12B-2409", "mistralai/Pixtral-12B-2409", + "unsloth/Pixtral-12B-2409-bnb-4bit", ), "unsloth/Pixtral-12B-2409-Base-bnb-4bit" : ( "unsloth/Pixtral-12B-Base-2409", @@ -500,6 +512,10 @@ "unsloth/Llama-3.1-Tulu-3-70B", "allenai/Llama-3.1-Tulu-3-70B", ), + "unsloth/QwQ-32B-Preview-bnb-4bit" : ( + "unsloth/QwQ-32B-Preview", + "Qwen/QwQ-32B-Preview", + ), } INT_TO_FLOAT_MAPPER = {} @@ -519,6 +535,14 @@ MAP_TO_UNSLOTH_16bit[values[1]] = values[0] MAP_TO_UNSLOTH_16bit[values[1].lower()] = values[0] pass + elif len(values) == 3: + # Dynamic Unsloth quantization + if values[0].startswith("unsloth"): + MAP_TO_UNSLOTH_16bit[values[1]] = values[0] + MAP_TO_UNSLOTH_16bit[values[1].lower()] = values[0] + MAP_TO_UNSLOTH_16bit[values[2]] = values[0] + MAP_TO_UNSLOTH_16bit[values[2].lower()] = values[0] + pass pass # Get lowercased From 1cf7965896f9140ee965f85a0af664fde30c1dc3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 4 Dec 2024 04:26:36 -0800 Subject: [PATCH 0899/1088] modules --- unsloth/models/_utils.py | 2 +- unsloth/models/vision.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5bc9529e60..11423f9066 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.1" +__version__ = "2024.12.2" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index aa4cc09022..0f682c6b40 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,7 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, merge_and_overwrite_lora, - # SKIP_QUANTIZATION_MODULES, + SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version @@ -133,7 +133,7 @@ def from_pretrained( bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, - # llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) pass From 9dc399a6b6625ee40835c5eab361426d3c5d4abb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 4 Dec 2024 05:38:05 -0800 Subject: [PATCH 0900/1088] Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> --- unsloth/models/_utils.py | 2 +- unsloth/models/mapper.py | 38 +++++++++++++++++++++++++++++++------- unsloth/models/vision.py | 4 ++-- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5bc9529e60..11423f9066 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.1" +__version__ = "2024.12.2" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b2f73aa6c2..0ba03ce01c 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -452,33 +452,45 @@ "unsloth/Llama-3.1-Nemotron-70B-Instruct", "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", ), - "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-2B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2-VL-2B-Instruct", "Qwen/Qwen2-VL-2B-Instruct", + "unsloth/Qwen2-VL-2B-Instruct-bnb-4bit", ), - "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-7B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2-VL-7B-Instruct", "Qwen/Qwen2-VL-7B-Instruct", + "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B-Instruct", + "Qwen/Qwen2-VL-72B-Instruct", + "unsloth/Qwen2-VL-72B-Instruct-bnb-4bit", + ), + "unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.2-11B-Vision-Instruct", + "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision-Instruct", "meta-llama/Llama-3.2-90B-Vision-Instruct", + "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-11B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-11B-Vision-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision", "meta-llama/Llama-3.2-11B-Vision", + "unsloth/Llama-3.2-11B-Vision-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision", "meta-llama/Llama-3.2-90B-Vision", + "unsloth/Llama-3.2-90B-Vision-bnb-4bit", ), - "unsloth/Pixtral-12B-2409-bnb-4bit" : ( + "unsloth/Pixtral-12B-2409-unsloth-bnb-4bit" : ( "unsloth/Pixtral-12B-2409", "mistralai/Pixtral-12B-2409", + "unsloth/Pixtral-12B-2409-bnb-4bit", ), "unsloth/Pixtral-12B-2409-Base-bnb-4bit" : ( "unsloth/Pixtral-12B-Base-2409", @@ -500,6 +512,10 @@ "unsloth/Llama-3.1-Tulu-3-70B", "allenai/Llama-3.1-Tulu-3-70B", ), + "unsloth/QwQ-32B-Preview-bnb-4bit" : ( + "unsloth/QwQ-32B-Preview", + "Qwen/QwQ-32B-Preview", + ), } INT_TO_FLOAT_MAPPER = {} @@ -519,6 +535,14 @@ MAP_TO_UNSLOTH_16bit[values[1]] = values[0] MAP_TO_UNSLOTH_16bit[values[1].lower()] = values[0] pass + elif len(values) == 3: + # Dynamic Unsloth quantization + if values[0].startswith("unsloth"): + MAP_TO_UNSLOTH_16bit[values[1]] = values[0] + MAP_TO_UNSLOTH_16bit[values[1].lower()] = values[0] + MAP_TO_UNSLOTH_16bit[values[2]] = values[0] + MAP_TO_UNSLOTH_16bit[values[2].lower()] = values[0] + pass pass # Get lowercased diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index aa4cc09022..0f682c6b40 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,7 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, merge_and_overwrite_lora, - # SKIP_QUANTIZATION_MODULES, + SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version @@ -133,7 +133,7 @@ def from_pretrained( bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", bnb_4bit_compute_dtype = dtype, - # llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) pass From e6270d37904f16656f04678205faf6eb886f603f Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 4 Dec 2024 21:32:23 -0800 Subject: [PATCH 0901/1088] Update README.md Unsloth Dynamic 4-bit Quantization Update --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 776fc60c39..eabd938916 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) - 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. From 9bc260938ce2bc1e818e7e3cc40e84bc312bfb66 Mon Sep 17 00:00:00 2001 From: Zewen Shen Date: Thu, 5 Dec 2024 02:54:18 -0500 Subject: [PATCH 0902/1088] Fix vision model tokenizer padding side. (#1384) * Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> * Update README.md Unsloth Dynamic 4-bit Quantization Update * Fix vision model tokenizer padding side. * Update vision.py --------- Co-authored-by: Daniel Han Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/models/vision.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 0f682c6b40..a8ef9c0416 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -164,6 +164,8 @@ def from_pretrained( padding_side = "right", token = token, ) + # Add padding side as well + tokenizer.tokenizer.padding_side = "right" model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) @@ -187,6 +189,7 @@ def from_pretrained( # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference + tokenizer.tokenizer.padding_side = "left" # Force inference internal_model = model while hasattr(internal_model, "model"): internal_model._saved_temp_tokenizer = tokenizer @@ -315,12 +318,12 @@ def patch_peft_model( internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Clear deleted GPU items @@ -361,12 +364,12 @@ def for_inference(model): internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" pass # Also disable training for embeddings for NEFTune @@ -405,12 +408,12 @@ def for_training(model, use_gradient_checkpointing = True): internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Also re-enable training for embeddings for NEFTune From a8d8c97d0c48e9f030ab53055f770bbb5b3ad857 Mon Sep 17 00:00:00 2001 From: Edd <68678137+Erland366@users.noreply.github.com> Date: Thu, 5 Dec 2024 11:59:13 +0400 Subject: [PATCH 0903/1088] Add citation section to README.md (#1377) * Add citation section to README.md * Update README.md --------- Co-authored-by: Daniel Han --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index eabd938916..b477419864 100644 --- a/README.md +++ b/README.md @@ -469,6 +469,18 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    +### Citing + +You can cite the Unsloth repo as follows: +```bibtex +@software{unsloth, + author = {Daniel Han, Michael Han and Unsloth team}, + title = {Unsloth}, + url = {http://github.com/unslothai/unsloth}, + year = {2023} +} +``` + ### Thank You to - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support From 15d7fbb30da63ad1912a2179b3b6225b908a1d69 Mon Sep 17 00:00:00 2001 From: Datta Nimmaturi Date: Thu, 5 Dec 2024 13:31:53 +0530 Subject: [PATCH 0904/1088] Granite support (#1218) * [WIP] Support for Granite * Fixup inference * Cleanup flex attention * remove sliding window * Use torch.add for residual multiplier --- unsloth/models/__init__.py | 2 + unsloth/models/_utils.py | 2 +- unsloth/models/gemma2.py | 2 +- unsloth/models/granite.py | 523 +++++++++++++++++++++++++++++++++++++ unsloth/models/llama.py | 30 ++- unsloth/models/loader.py | 6 +- 6 files changed, 558 insertions(+), 7 deletions(-) create mode 100644 unsloth/models/granite.py diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 9c032b28bd..c52d14f402 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. + +from .granite import FastGraniteModel from .loader import FastLanguageModel, FastVisionModel from .llama import FastLlamaModel from .mistral import FastMistralModel diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 11423f9066..14e6f52739 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -188,7 +188,7 @@ def patch_mistral_nemo_config(config): from transformers import __version__ as transformers_version from transformers import PretrainedConfig -model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2",] +model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2", "granite"] for model_name in model_architectures: config_filepath = f"transformers.models.{model_name}.configuration_{model_name}" diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 62ecb9690f..e47a7434f2 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -193,7 +193,7 @@ def Gemma2DecoderLayer_fast_forward( output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, - _flag_for_generation=True, + _flag_for_generation=self._flag_for_generation, ) hidden_states = fast_rms_layernorm_inference_gemma(self.post_attention_layernorm, hidden_states, out_weight) hidden_states += residual diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py new file mode 100644 index 0000000000..2229636e9e --- /dev/null +++ b/unsloth/models/granite.py @@ -0,0 +1,523 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +import os +from ._utils import __version__ +from .llama import ( + LlamaRotaryEmbedding, + LlamaLinearScalingRotaryEmbedding, +) +from .mistral import * + +try: + from transformers.models.granite.modeling_granite import ( + GraniteAttention, + GraniteDecoderLayer, + GraniteModel, + GraniteForCausalLM, + ) +except: + from packaging.version import Version + + transformers_version = Version(transformers_version) + if not transformers_version >= Version("4.45.0"): + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + pass +pass + +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) + +# For Pytorch 2.1.1 +try: + from transformers.models.granite.modeling_granite import ( + GraniteSdpaAttention, + GraniteFlashAttention2, + ) +except: + GraniteSdpaAttention = GraniteAttention + GraniteFlashAttention2 = GraniteAttention +pass + +def GraniteAttention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention + pass + + bsz, q_len, _ = hidden_states.size() + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q, K, V = self.apply_qkv(self, hidden_states) + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + assert position_embeddings is not None + cos, sin = position_embeddings + if position_ids is None: + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + + if past_key_value is not None: + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + pass + past_key_value = (K, V) if use_cache else None + + # Attention module + if (not HAS_FLASH_ATTENTION and attention_mask is None): + # Xformers memory efficient attention + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + K_M = V_M = bsz * kv_seq_len + Q_M = bsz * q_len + + # Group query attention + K = K .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + K = K.reshape(bsz, kv_seq_len, n_heads, head_dim) + V = V.reshape(bsz, kv_seq_len, n_heads, head_dim) + else: + # Xformers does support the forward pass though + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + pass + + A = xformers_attention(Q, K, V, attn_bias = causal_mask, scale=self.scaling) + A = A.view(bsz, q_len, n_heads, head_dim) + + elif HAS_FLASH_ATTENTION and attention_mask is None: + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + window = (kv_seq_len, kv_seq_len) + A = flash_attn_func(Q, K, V, causal = True, window_size = window, softmax_scale=self.scaling) + else: + # Grouped query attention + # if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) + # pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, scale = self.scaling, is_causal = False) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2).contiguous() + pass + + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) + attn_output = self.apply_o(self, attn_output) + attn_weights = None + return attn_output, attn_weights, past_key_value +pass + + +def GraniteDecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + *args, **kwargs, +): + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + position_embeddings = position_embeddings, + _flag_for_generation=self._flag_for_generation, + ) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_swiglu_inference(self.mlp, hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + else: + residual = hidden_states + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + position_embeddings = position_embeddings, + ) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + pass + + outputs = (hidden_states,) + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) + return outputs +pass + + +from math import sqrt as math_sqrt +KV_CACHE_INCREMENT = 256 # KV Cache update size +torch_nn_functional_softmax = torch.nn.functional.softmax +torch_matmul = torch.matmul +torch_tanh = torch.tanh + +def GraniteAttention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, + do_prefill = False, + attention_mask = None, + use_sliding_window = False, + position_embeddings : Optional[Tuple[torch.Tensor, torch.Tensor]] = None, +): + + assert position_embeddings is not None, f"Granite model requires position embeddings to be specified" + + Xn = hidden_states + bsz, _, hd = hidden_states.size() + K1, V1 = past_key_value + dtype = Xn.dtype + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + attention_size = n_heads*head_dim + # assert(n_kv_heads * n_groups == n_heads) + seq_len = K1.shape[-2] + kv_seq_len = seq_len + 1 + + # Prefill phase + # if not hasattr(self, "paged_attention"): + if do_prefill: + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) + self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + # Only for Gemma2 + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + + + self.half_head_dim = head_dim // 2 + elif kv_seq_len >= self.paged_attention.shape[0]: + self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.attention.resize_((bsz, n_heads, 1, self.attention.shape[-1]+KV_CACHE_INCREMENT)) + pass + + Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0]) + Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0]) + Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1]) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + + # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + cos, sin = position_embeddings + cos, sin = cos[position_ids], sin[position_ids] + h = self.half_head_dim + + RH_Q = self.RH_Q + RH_Q[:,:,:,:h] = Qn[:,:,:,h:] + RH_Q[:,:,:,h:] = Qn[:,:,:,:h] + torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + Qn *= cos + Qn.addcmul_(RH_Q, sin) + + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + RH_K[:,:,:,:h] = Kn[:,:,:,h:] + RH_K[:,:,:,h:] = Kn[:,:,:,:h] + torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + Kn *= cos + Kn.addcmul_(RH_K, sin) + + # New KV cache + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + + # Grouped query attention + _, _, cached_len, _ = Kn.shape + if n_groups != 1: + Kn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Kn = Kn.reshape(bsz, n_heads, cached_len, head_dim) + Vn = Vn.reshape(bsz, n_heads, cached_len, head_dim) + pass + # else: + # Kn, Vn = Kn, Vn + # pass + + Qn *= self.scaling + A = torch_matmul(Qn, Kn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + + # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched + + A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch_matmul(A, Vn, out = Qn) + # else: + # A = scaled_dot_product_attention(Qn, Kn, Vn, attn_mask = attention_mask, is_causal = False) + # pass + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, attention_size) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) + return A, (Kn, Vn) +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +# @torch.inference_mode +def GraniteModel_fast_forward_inference( + self, + input_ids, + past_key_values, + position_ids, + attention_mask = None, +): + input_ids = input_ids[:,:self.max_seq_length] + hidden_states = self.model.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) + hidden_states *= self.model.embedding_multiplier + + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) + else: + attention_mask = None + pass + + position_embeddings = self.model.rotary_emb(hidden_states, position_ids, self.max_seq_length) + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.model.layers): + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) + hidden_states, present_key_value = GraniteAttention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = attention_mask, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), + position_embeddings = position_embeddings, + ) + + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) + hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_rms_layernorm_inference(self.model.norm, hidden_states) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + +class GraniteRotaryEmbedding(LlamaRotaryEmbedding): + def __init__(self, config): + super().__init__(config = config) + +class FastGraniteModel(FastLlamaModel): + + @staticmethod + def pre_patch(): + init_name, function = patch_linear_scaling( + model_name = "granite", + rope_module = GraniteRotaryEmbedding, + scaled_rope_module = LlamaLinearScalingRotaryEmbedding, + attention_module = GraniteAttention, + ) + if init_name is not None: + exec(function, globals()) + GraniteAttention.__init__ = eval(init_name) + pass + GraniteAttention .forward = GraniteAttention_fast_forward + GraniteSdpaAttention .forward = GraniteAttention_fast_forward + GraniteFlashAttention2.forward = GraniteAttention_fast_forward + GraniteDecoderLayer .forward = GraniteDecoderLayer_fast_forward + GraniteModel .forward = LlamaModel_fast_forward + GraniteForCausalLM .forward = CausalLM_fast_forward(GraniteModel_fast_forward_inference) + PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(GraniteForCausalLM) + + import transformers.models.granite.modeling_granite + transformers.models.granite.modeling_granite.GraniteRotaryEmbedding = GraniteRotaryEmbedding + + return + pass + + + @staticmethod + def post_patch(model): + + # Torch.compile fails on embedding matrix?? + # Workaround randomnly fixes it for torch versions < 2.2 + model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + model.config.update({"unsloth_version" : __version__}) + + # We also do this for the lm_head + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.lm_head.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + + # Granite has tied weights! This means lm_head == embed_tokens + if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.model.embed_tokens.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + pass + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + # Downcast RoPE embedding to correct data type + if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")): + + if hasattr(module, "cos_cached") and \ + (module.cos_cached.dtype != correct_dtype): + + module.cos_cached = module.cos_cached.to(correct_dtype) + module.sin_cached = module.sin_cached.to(correct_dtype) + + elif hasattr(module, "short_cos_cached") and \ + (module.short_cos_cached.dtype != correct_dtype): + + module.short_cos_cached = module.short_cos_cached.to(correct_dtype) + module.short_sin_cached = module.short_sin_cached.to(correct_dtype) + pass + pass + pass + + # Clear deleted GPU items + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass +pass + diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1bffb0cb16..cc41a8b266 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -616,9 +616,10 @@ def LlamaModel_fast_forward( pass # Normalized from Gemma - IS_GEMMA = self.config.model_type.startswith("gemma") - IS_GEMMA2 = self.config.model_type.startswith("gemma2") - IS_COHERE = self.config.model_type.startswith("cohere") + IS_GEMMA = self.config.model_type.startswith("gemma") + IS_GEMMA2 = self.config.model_type.startswith("gemma2") + IS_COHERE = self.config.model_type.startswith("cohere") + IS_GRANITE = self.config.model_type.startswith("granite") train_embed_tokens = self.embed_tokens.weight.requires_grad if IS_GEMMA: @@ -684,6 +685,8 @@ def LlamaModel_fast_forward( pass hidden_states = inputs_embeds + if IS_GRANITE: #granite has embedding multiplier + hidden_states = self.embedding_multiplier * hidden_states if past_key_values is None and self.training: use_cache = False @@ -773,6 +776,12 @@ def LlamaModel_fast_forward( pass pass + + if IS_GRANITE: + position_embeddings = self.rotary_emb(hidden_states, position_ids, self.max_position_embeddings) + else: + position_embeddings = None + # Go through every layer! for idx, decoder_layer in enumerate(self.layers): @@ -797,12 +806,14 @@ def LlamaModel_fast_forward( past_key_values, output_attentions, use_cache, + None, + position_embeddings, )[0] elif gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask) + return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask, position_embeddings = position_embeddings) return custom_forward pass @@ -827,6 +838,7 @@ def custom_forward(*inputs): output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, + position_embeddings = position_embeddings ) hidden_states = layer_outputs[0] pass @@ -1014,6 +1026,15 @@ def _CausalLM_fast_forward( pass loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if self.config.model_type == "granite": + # granite uses logit_scaling as key and they divide by the scale unlike cohere + # notice that for granite, logits_scale is 16 and for cohere it is 0.125 (aka 1/8) in their respective configs + # granite: https://github.com/huggingface/transformers/blob/4d1d0f29a493098e6bc6b904b82e29cb331827f5/src/transformers/models/granite/modeling_granite.py#L1103 + # cohere: https://github.com/huggingface/transformers/blob/4d1d0f29a493098e6bc6b904b82e29cb331827f5/src/transformers/models/cohere/modeling_cohere.py#L1176 + logit_scaling = 1 / getattr(self.config, "logits_scaling", 1) + if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): @@ -2245,6 +2266,7 @@ def patch_peft_model( elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx elif model_type == "gemma2": apply_lora_mlp = apply_lora_mlp_geglu_approx elif model_type == "cohere": apply_lora_mlp = apply_lora_mlp_swiglu + elif model_type == "granite": apply_lora_mlp = apply_lora_mlp_swiglu else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f8ed3a87e6..3b2c8ffefe 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -13,6 +13,7 @@ # limitations under the License. from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING +from .granite import FastGraniteModel from .llama import FastLlamaModel, logger from .mistral import FastMistralModel from .qwen2 import FastQwen2Model @@ -38,6 +39,7 @@ SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0") +SUPPORTS_GRANITE = transformers_version >= Version("4.46.0") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -175,7 +177,7 @@ def from_pretrained( model_type = model_config.model_type - if model_type == "llama": + if model_type == "llama": scaling_type = None if getattr(model_config, "rope_scaling", None) is not None: scaling_type1 = model_config.rope_scaling.get("type", None) @@ -231,6 +233,8 @@ def from_pretrained( dispatch_model = FastQwen2Model elif model_type == "cohere": dispatch_model = FastCohereModel + elif model_type == "granite": + dispatch_model = FastGraniteModel else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ From 2b5b77102eeec005b8b147013596bdaaefa1de6f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 6 Dec 2024 12:25:08 -0800 Subject: [PATCH 0905/1088] Llama 3.3 --- unsloth/models/_utils.py | 2 +- unsloth/models/mapper.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 14e6f52739..8e9f09df2b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.2" +__version__ = "2024.12.3" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 0ba03ce01c..41f7444643 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -516,6 +516,10 @@ "unsloth/QwQ-32B-Preview", "Qwen/QwQ-32B-Preview", ), + "unsloth/Llama-3.3-70B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.3-70B-Instruct", + "meta-llama/Llama-3.3-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} From 3464b35ae1b1c4e5d5f2de1fcfd6e35f85c99173 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 6 Dec 2024 13:05:15 -0800 Subject: [PATCH 0906/1088] Llama 3.3 (#1393) * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules * Fix vision model tokenizer padding side. (#1384) * Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> * Update README.md Unsloth Dynamic 4-bit Quantization Update * Fix vision model tokenizer padding side. * Update vision.py --------- Co-authored-by: Daniel Han Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Add citation section to README.md (#1377) * Add citation section to README.md * Update README.md --------- Co-authored-by: Daniel Han * Granite support (#1218) * [WIP] Support for Granite * Fixup inference * Cleanup flex attention * remove sliding window * Use torch.add for residual multiplier * Llama 3.3 --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Zewen Shen Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 12 + unsloth/models/__init__.py | 2 + unsloth/models/_utils.py | 4 +- unsloth/models/gemma2.py | 2 +- unsloth/models/granite.py | 523 +++++++++++++++++++++++++++++++++++++ unsloth/models/llama.py | 30 ++- unsloth/models/loader.py | 6 +- unsloth/models/mapper.py | 4 + unsloth/models/vision.py | 15 +- 9 files changed, 584 insertions(+), 14 deletions(-) create mode 100644 unsloth/models/granite.py diff --git a/README.md b/README.md index eabd938916..b477419864 100644 --- a/README.md +++ b/README.md @@ -469,6 +469,18 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    +### Citing + +You can cite the Unsloth repo as follows: +```bibtex +@software{unsloth, + author = {Daniel Han, Michael Han and Unsloth team}, + title = {Unsloth}, + url = {http://github.com/unslothai/unsloth}, + year = {2023} +} +``` + ### Thank You to - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 9c032b28bd..c52d14f402 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. + +from .granite import FastGraniteModel from .loader import FastLanguageModel, FastVisionModel from .llama import FastLlamaModel from .mistral import FastMistralModel diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 11423f9066..8e9f09df2b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.2" +__version__ = "2024.12.3" __all__ = [ "prepare_model_for_kbit_training", @@ -188,7 +188,7 @@ def patch_mistral_nemo_config(config): from transformers import __version__ as transformers_version from transformers import PretrainedConfig -model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2",] +model_architectures = ["llama", "mistral", "gemma", "gemma2", "qwen2", "granite"] for model_name in model_architectures: config_filepath = f"transformers.models.{model_name}.configuration_{model_name}" diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 62ecb9690f..e47a7434f2 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -193,7 +193,7 @@ def Gemma2DecoderLayer_fast_forward( output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, - _flag_for_generation=True, + _flag_for_generation=self._flag_for_generation, ) hidden_states = fast_rms_layernorm_inference_gemma(self.post_attention_layernorm, hidden_states, out_weight) hidden_states += residual diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py new file mode 100644 index 0000000000..2229636e9e --- /dev/null +++ b/unsloth/models/granite.py @@ -0,0 +1,523 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * +import os +from ._utils import __version__ +from .llama import ( + LlamaRotaryEmbedding, + LlamaLinearScalingRotaryEmbedding, +) +from .mistral import * + +try: + from transformers.models.granite.modeling_granite import ( + GraniteAttention, + GraniteDecoderLayer, + GraniteModel, + GraniteForCausalLM, + ) +except: + from packaging.version import Version + + transformers_version = Version(transformers_version) + if not transformers_version >= Version("4.45.0"): + raise ImportError( + f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"\ + f"The minimum required version is 4.42.3.\n"\ + f'Try `pip install --upgrade "transformers>=4.42.3"`\n'\ + f"to obtain the latest transformers build, then restart this session."\ + ) + pass +pass + +from transformers.modeling_attn_mask_utils import ( + _prepare_4d_causal_attention_mask_for_sdpa, +) + +# For Pytorch 2.1.1 +try: + from transformers.models.granite.modeling_granite import ( + GraniteSdpaAttention, + GraniteFlashAttention2, + ) +except: + GraniteSdpaAttention = GraniteAttention + GraniteFlashAttention2 = GraniteAttention +pass + +def GraniteAttention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + # Clear inference + if hasattr(self, "paged_attention"): + del self.paged_attention_K + del self.paged_attention_V + del self.paged_attention + del self.temp_QA + del self.temp_KV + del self.RH_Q + del self.attention + pass + + bsz, q_len, _ = hidden_states.size() + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q, K, V = self.apply_qkv(self, hidden_states) + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + assert position_embeddings is not None + cos, sin = position_embeddings + if position_ids is None: + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + + if past_key_value is not None: + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + pass + past_key_value = (K, V) if use_cache else None + + # Attention module + if (not HAS_FLASH_ATTENTION and attention_mask is None): + # Xformers memory efficient attention + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + K_M = V_M = bsz * kv_seq_len + Q_M = bsz * q_len + + # Group query attention + K = K .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, kv_seq_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, kv_seq_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + K = K.reshape(bsz, kv_seq_len, n_heads, head_dim) + V = V.reshape(bsz, kv_seq_len, n_heads, head_dim) + else: + # Xformers does support the forward pass though + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + pass + + A = xformers_attention(Q, K, V, attn_bias = causal_mask, scale=self.scaling) + A = A.view(bsz, q_len, n_heads, head_dim) + + elif HAS_FLASH_ATTENTION and attention_mask is None: + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + window = (kv_seq_len, kv_seq_len) + A = flash_attn_func(Q, K, V, causal = True, window_size = window, softmax_scale=self.scaling) + else: + # Grouped query attention + # if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) + # pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, scale = self.scaling, is_causal = False) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2).contiguous() + pass + + attn_output = A.reshape(bsz, q_len, n_heads*head_dim) + attn_output = self.apply_o(self, attn_output) + attn_weights = None + return attn_output, attn_weights, past_key_value +pass + + +def GraniteDecoderLayer_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + *args, **kwargs, +): + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + position_embeddings = position_embeddings, + _flag_for_generation=self._flag_for_generation, + ) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) + hidden_states = fast_swiglu_inference(self.mlp, hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + else: + residual = hidden_states + hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + padding_mask=padding_mask, + position_embeddings = position_embeddings, + ) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + # Fully Connected + residual = hidden_states + hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + pass + + outputs = (hidden_states,) + if output_attentions: outputs += (self_attn_weights,) + if use_cache: outputs += (present_key_value,) + return outputs +pass + + +from math import sqrt as math_sqrt +KV_CACHE_INCREMENT = 256 # KV Cache update size +torch_nn_functional_softmax = torch.nn.functional.softmax +torch_matmul = torch.matmul +torch_tanh = torch.tanh + +def GraniteAttention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, + do_prefill = False, + attention_mask = None, + use_sliding_window = False, + position_embeddings : Optional[Tuple[torch.Tensor, torch.Tensor]] = None, +): + + assert position_embeddings is not None, f"Granite model requires position embeddings to be specified" + + Xn = hidden_states + bsz, _, hd = hidden_states.size() + K1, V1 = past_key_value + dtype = Xn.dtype + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + attention_size = n_heads*head_dim + # assert(n_kv_heads * n_groups == n_heads) + seq_len = K1.shape[-2] + kv_seq_len = seq_len + 1 + + # Prefill phase + # if not hasattr(self, "paged_attention"): + if do_prefill: + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) + self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + # Only for Gemma2 + self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + + + self.half_head_dim = head_dim // 2 + elif kv_seq_len >= self.paged_attention.shape[0]: + self.paged_attention.resize_((self.paged_attention.shape[0]+KV_CACHE_INCREMENT, 2, bsz, n_kv_heads, head_dim)) + self.paged_attention_K = self.paged_attention[:,0] + self.paged_attention_V = self.paged_attention[:,1] + self.attention.resize_((bsz, n_heads, 1, self.attention.shape[-1]+KV_CACHE_INCREMENT)) + pass + + Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0]) + Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0]) + Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1]) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + + # cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + # Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + cos, sin = position_embeddings + cos, sin = cos[position_ids], sin[position_ids] + h = self.half_head_dim + + RH_Q = self.RH_Q + RH_Q[:,:,:,:h] = Qn[:,:,:,h:] + RH_Q[:,:,:,h:] = Qn[:,:,:,:h] + torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + Qn *= cos + Qn.addcmul_(RH_Q, sin) + + RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + RH_K[:,:,:,:h] = Kn[:,:,:,h:] + RH_K[:,:,:,h:] = Kn[:,:,:,:h] + torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + Kn *= cos + Kn.addcmul_(RH_K, sin) + + # New KV cache + # Kn = torch.cat([K1, Kn], dim = 2) + # Vn = torch.cat([V1, Vn], dim = 2) + self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3) + self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3) + Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3) + Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3) + + # Grouped query attention + _, _, cached_len, _ = Kn.shape + if n_groups != 1: + Kn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Kn = Kn.reshape(bsz, n_heads, cached_len, head_dim) + Vn = Vn.reshape(bsz, n_heads, cached_len, head_dim) + pass + # else: + # Kn, Vn = Kn, Vn + # pass + + Qn *= self.scaling + A = torch_matmul(Qn, Kn.transpose(2, 3), out = self.attention[:,:,:,:cached_len]) + + # if attention_mask is not None: A += attention_mask # Must add attention_mask for batched + + A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) + A = torch_matmul(A, Vn, out = Qn) + # else: + # A = scaled_dot_product_attention(Qn, Kn, Vn, attn_mask = attention_mask, is_causal = False) + # pass + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, attention_size) + A = fast_linear_forward(self.o_proj, A, out = self.temp_O) + return A, (Kn, Vn) +pass + + +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825 +# @torch.inference_mode +def GraniteModel_fast_forward_inference( + self, + input_ids, + past_key_values, + position_ids, + attention_mask = None, +): + input_ids = input_ids[:,:self.max_seq_length] + hidden_states = self.model.embed_tokens(input_ids) + hidden_states = hidden_states.to(self.config.torch_dtype) + hidden_states *= self.model.embedding_multiplier + + bsz, q_len, hd = hidden_states.shape + seq_len = past_key_values[0][0].shape[-2] + if bsz != 1: + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (bsz, q_len), + hidden_states, + seq_len, + ) + else: + attention_mask = None + pass + + position_embeddings = self.model.rotary_emb(hidden_states, position_ids, self.max_seq_length) + + next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.model.layers): + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) + hidden_states, present_key_value = GraniteAttention_fast_forward_inference( + decoder_layer.self_attn, + hidden_states = hidden_states, + past_key_value = past_key_values[idx], + position_ids = position_ids, + attention_mask = attention_mask, + do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), + position_embeddings = position_embeddings, + ) + + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + residual = hidden_states + hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) + hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) + hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + + next_decoder_cache.append(present_key_value) + pass + hidden_states = fast_rms_layernorm_inference(self.model.norm, hidden_states) + + return BaseModelOutputWithPast( + last_hidden_state = hidden_states, + past_key_values = next_decoder_cache, + hidden_states = [], + attentions = [], + ) +pass + +class GraniteRotaryEmbedding(LlamaRotaryEmbedding): + def __init__(self, config): + super().__init__(config = config) + +class FastGraniteModel(FastLlamaModel): + + @staticmethod + def pre_patch(): + init_name, function = patch_linear_scaling( + model_name = "granite", + rope_module = GraniteRotaryEmbedding, + scaled_rope_module = LlamaLinearScalingRotaryEmbedding, + attention_module = GraniteAttention, + ) + if init_name is not None: + exec(function, globals()) + GraniteAttention.__init__ = eval(init_name) + pass + GraniteAttention .forward = GraniteAttention_fast_forward + GraniteSdpaAttention .forward = GraniteAttention_fast_forward + GraniteFlashAttention2.forward = GraniteAttention_fast_forward + GraniteDecoderLayer .forward = GraniteDecoderLayer_fast_forward + GraniteModel .forward = LlamaModel_fast_forward + GraniteForCausalLM .forward = CausalLM_fast_forward(GraniteModel_fast_forward_inference) + PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward + fix_prepare_inputs_for_generation(GraniteForCausalLM) + + import transformers.models.granite.modeling_granite + transformers.models.granite.modeling_granite.GraniteRotaryEmbedding = GraniteRotaryEmbedding + + return + pass + + + @staticmethod + def post_patch(model): + + # Torch.compile fails on embedding matrix?? + # Workaround randomnly fixes it for torch versions < 2.2 + model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + model.config.update({"unsloth_version" : __version__}) + + # We also do this for the lm_head + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.lm_head.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + + # Granite has tied weights! This means lm_head == embed_tokens + if model.model.embed_tokens.weight.data_ptr() != model.lm_head.weight.data_ptr(): + lm_head = torch.nn.Linear(1, 1, bias = None) + del lm_head.weight + lm_head.weight = model.model.embed_tokens.weight + lm_head.in_features = lm_head.weight.shape[1] + lm_head.out_features = lm_head.weight.shape[0] + model.lm_head = lm_head + pass + + # Also patch all dtypes - BnB seems to not allocate the correct type? + # BnB default dtype seems to be float16! + correct_dtype = lm_head.weight.dtype + + for name, module in model.named_modules(): + if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)): + weight = module.weight + quant_state = weight.quant_state + + if type(quant_state) is list: + # BnB seems to have float16 as default! + module.weight.quant_state[2] = correct_dtype # Cast to correct dtype + else: + # https://github.com/TimDettmers/bitsandbytes/pull/763/files + quant_state.dtype = correct_dtype + pass + pass + # Downcast RoPE embedding to correct data type + if (name.endswith("rotary_emb") or hasattr(module, "cos_cached")): + + if hasattr(module, "cos_cached") and \ + (module.cos_cached.dtype != correct_dtype): + + module.cos_cached = module.cos_cached.to(correct_dtype) + module.sin_cached = module.sin_cached.to(correct_dtype) + + elif hasattr(module, "short_cos_cached") and \ + (module.short_cos_cached.dtype != correct_dtype): + + module.short_cos_cached = module.short_cos_cached.to(correct_dtype) + module.short_sin_cached = module.short_sin_cached.to(correct_dtype) + pass + pass + pass + + # Clear deleted GPU items + import gc + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + return model + pass +pass + diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1bffb0cb16..cc41a8b266 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -616,9 +616,10 @@ def LlamaModel_fast_forward( pass # Normalized from Gemma - IS_GEMMA = self.config.model_type.startswith("gemma") - IS_GEMMA2 = self.config.model_type.startswith("gemma2") - IS_COHERE = self.config.model_type.startswith("cohere") + IS_GEMMA = self.config.model_type.startswith("gemma") + IS_GEMMA2 = self.config.model_type.startswith("gemma2") + IS_COHERE = self.config.model_type.startswith("cohere") + IS_GRANITE = self.config.model_type.startswith("granite") train_embed_tokens = self.embed_tokens.weight.requires_grad if IS_GEMMA: @@ -684,6 +685,8 @@ def LlamaModel_fast_forward( pass hidden_states = inputs_embeds + if IS_GRANITE: #granite has embedding multiplier + hidden_states = self.embedding_multiplier * hidden_states if past_key_values is None and self.training: use_cache = False @@ -773,6 +776,12 @@ def LlamaModel_fast_forward( pass pass + + if IS_GRANITE: + position_embeddings = self.rotary_emb(hidden_states, position_ids, self.max_position_embeddings) + else: + position_embeddings = None + # Go through every layer! for idx, decoder_layer in enumerate(self.layers): @@ -797,12 +806,14 @@ def LlamaModel_fast_forward( past_key_values, output_attentions, use_cache, + None, + position_embeddings, )[0] elif gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask) + return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask, position_embeddings = position_embeddings) return custom_forward pass @@ -827,6 +838,7 @@ def custom_forward(*inputs): output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, + position_embeddings = position_embeddings ) hidden_states = layer_outputs[0] pass @@ -1014,6 +1026,15 @@ def _CausalLM_fast_forward( pass loss = None + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + logit_scaling = getattr(self.config, "logit_scale", 0) + if self.config.model_type == "granite": + # granite uses logit_scaling as key and they divide by the scale unlike cohere + # notice that for granite, logits_scale is 16 and for cohere it is 0.125 (aka 1/8) in their respective configs + # granite: https://github.com/huggingface/transformers/blob/4d1d0f29a493098e6bc6b904b82e29cb331827f5/src/transformers/models/granite/modeling_granite.py#L1103 + # cohere: https://github.com/huggingface/transformers/blob/4d1d0f29a493098e6bc6b904b82e29cb331827f5/src/transformers/models/cohere/modeling_cohere.py#L1176 + logit_scaling = 1 / getattr(self.config, "logits_scaling", 1) + if labels is not None: shift_logits = logits if not hasattr(self, "extra_ignored_labels"): @@ -2245,6 +2266,7 @@ def patch_peft_model( elif model_type == "gemma": apply_lora_mlp = apply_lora_mlp_geglu_approx elif model_type == "gemma2": apply_lora_mlp = apply_lora_mlp_geglu_approx elif model_type == "cohere": apply_lora_mlp = apply_lora_mlp_swiglu + elif model_type == "granite": apply_lora_mlp = apply_lora_mlp_swiglu else: raise NotImplementedError(f"Unsloth: {model_type} is not yet implemented!") pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f8ed3a87e6..3b2c8ffefe 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -13,6 +13,7 @@ # limitations under the License. from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING +from .granite import FastGraniteModel from .llama import FastLlamaModel, logger from .mistral import FastMistralModel from .qwen2 import FastQwen2Model @@ -38,6 +39,7 @@ SUPPORTS_GEMMA2 = transformers_version >= Version("4.42") SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2") SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0") +SUPPORTS_GRANITE = transformers_version >= Version("4.46.0") if SUPPORTS_GEMMA: from .gemma import FastGemmaModel if SUPPORTS_GEMMA2: @@ -175,7 +177,7 @@ def from_pretrained( model_type = model_config.model_type - if model_type == "llama": + if model_type == "llama": scaling_type = None if getattr(model_config, "rope_scaling", None) is not None: scaling_type1 = model_config.rope_scaling.get("type", None) @@ -231,6 +233,8 @@ def from_pretrained( dispatch_model = FastQwen2Model elif model_type == "cohere": dispatch_model = FastCohereModel + elif model_type == "granite": + dispatch_model = FastGraniteModel else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 0ba03ce01c..41f7444643 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -516,6 +516,10 @@ "unsloth/QwQ-32B-Preview", "Qwen/QwQ-32B-Preview", ), + "unsloth/Llama-3.3-70B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.3-70B-Instruct", + "meta-llama/Llama-3.3-70B-Instruct", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 0f682c6b40..a8ef9c0416 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -164,6 +164,8 @@ def from_pretrained( padding_side = "right", token = token, ) + # Add padding side as well + tokenizer.tokenizer.padding_side = "right" model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) @@ -187,6 +189,7 @@ def from_pretrained( # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference + tokenizer.tokenizer.padding_side = "left" # Force inference internal_model = model while hasattr(internal_model, "model"): internal_model._saved_temp_tokenizer = tokenizer @@ -315,12 +318,12 @@ def patch_peft_model( internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Clear deleted GPU items @@ -361,12 +364,12 @@ def for_inference(model): internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" pass # Also disable training for embeddings for NEFTune @@ -405,12 +408,12 @@ def for_training(model, use_gradient_checkpointing = True): internal_model = model while hasattr(internal_model, "model"): if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" + internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Also re-enable training for embeddings for NEFTune From c45a0c3e10f1fd487921de285fd33b4cd38f64ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 7 Dec 2024 00:15:48 -0800 Subject: [PATCH 0907/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8e9f09df2b..f1dc15aca4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.3" +__version__ = "2024.12.4" __all__ = [ "prepare_model_for_kbit_training", From 58c0333fb902c43a3215a9739261d88aabf6a640 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sat, 7 Dec 2024 14:24:48 -0800 Subject: [PATCH 0908/1088] Update README.md Llama 3.3 + Reddit --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b477419864..0b3b50583f 100644 --- a/README.md +++ b/README.md @@ -41,15 +41,16 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is now supported. - 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) - 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. -- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM!
    Click for more news - + - 📣 Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! +- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! @@ -66,6 +67,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) | 🌐 **Released Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| +|   **Reddit** | [Join our Reddit page](https://reddit.com/r/unsloth)| ## ⭐ Key Features - All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. From 7763f0048ac78ea23666bfdbcc1d6834488c45f9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 9 Dec 2024 14:19:56 -0800 Subject: [PATCH 0909/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cc41a8b266..38ad6062db 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1570,7 +1570,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ From 6322cb1385ff468123afe0eebd11353fd284eae9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 10 Dec 2024 02:46:14 -0800 Subject: [PATCH 0910/1088] Update llama.py --- unsloth/models/llama.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 38ad6062db..cfeeab0800 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -993,6 +993,9 @@ def _CausalLM_fast_forward( logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" + # < 1024 Normal Unsloth uses less VRAM! + if bsz*q_len <= 1024: RETURN_LOGITS = True + if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( From 9c9547d4c546a6ab61d74d86270a8a72834a5ed9 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 10 Dec 2024 12:15:03 -0800 Subject: [PATCH 0911/1088] Update README.md Apple ML Cross Entropy --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 0b3b50583f..fcd7d73d13 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🦥 Unsloth.ai News - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is now supported. +- 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) - 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) @@ -483,7 +484,20 @@ You can cite the Unsloth repo as follows: } ``` +### Citation +If you would like to cite Unsloth, you can use the following format: +``` +@misc{han2023unsloth, + author = {Daniel Han and Michael Han}, + title = {Unsloth AI: Finetune LLMs 2x faster 2-5x faster with 80\% less memory}, + year = {2023}, + url = {https://github.com/unslothai/unsloth}, + note = {GitHub repository} +} +``` + ### Thank You to +- [Erik](https://github.com/erikwijmans) for his help adding [Apple's ML Cross Entropy](https://github.com/apple/ml-cross-entropy) in Unsloth - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support - [152334H](https://github.com/152334H) for experimental DPO support From 138e9b9a8470974dcf7468fba6145e11ed5b1552 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 10 Dec 2024 14:43:27 -0800 Subject: [PATCH 0912/1088] Update README.md Removing double citation --- README.md | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/README.md b/README.md index fcd7d73d13..6bff98cbda 100644 --- a/README.md +++ b/README.md @@ -472,7 +472,7 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    -### Citing +### Citation You can cite the Unsloth repo as follows: ```bibtex @@ -484,18 +484,6 @@ You can cite the Unsloth repo as follows: } ``` -### Citation -If you would like to cite Unsloth, you can use the following format: -``` -@misc{han2023unsloth, - author = {Daniel Han and Michael Han}, - title = {Unsloth AI: Finetune LLMs 2x faster 2-5x faster with 80\% less memory}, - year = {2023}, - url = {https://github.com/unslothai/unsloth}, - note = {GitHub repository} -} -``` - ### Thank You to - [Erik](https://github.com/erikwijmans) for his help adding [Apple's ML Cross Entropy](https://github.com/apple/ml-cross-entropy) in Unsloth - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) From f64e6987829b059cd35c0e9ac52779acf1405ae3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 12 Dec 2024 01:14:27 -0800 Subject: [PATCH 0913/1088] fullgraph --- unsloth/models/_utils.py | 2 ++ unsloth/models/loader.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f1dc15aca4..805e373447 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1127,6 +1127,7 @@ def unsloth_compile_transformers( shape_padding = True, cudagraphs = False, debug = False, + fullgraph = True, import_from_cache = False, disable = False, return_logits = False, @@ -1170,6 +1171,7 @@ def unsloth_compile_transformers( shape_padding = shape_padding, cudagraphs = cudagraphs, debug = debug, + fullgraph = fullgraph, import_from_cache = import_from_cache, disable = disable, return_logits = return_logits, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 3b2c8ffefe..c2b68fdc4b 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -352,6 +352,7 @@ def from_pretrained( resize_model_vocab = None, # [TODO] No effect revision = None, return_logits = False, # Return logits + fullgraph = True, # No graph breaks *args, **kwargs, ): if token is None: token = get_token() @@ -473,6 +474,7 @@ def from_pretrained( shape_padding = True, cudagraphs = False, debug = False, + fullgraph = fullgraph, import_from_cache = False, disable = False, return_logits = return_logits, From c7bf9d55ff4093443290dd326b1662f4c16a0a0e Mon Sep 17 00:00:00 2001 From: Scott Phillips Date: Fri, 20 Dec 2024 05:20:15 -0500 Subject: [PATCH 0914/1088] Fix loader.py to work on Windows (#1453) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Fix loader.py to work on Windows --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 10 +++++++--- unsloth/models/loader.py | 3 ++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b477419864..6bff98cbda 100644 --- a/README.md +++ b/README.md @@ -41,15 +41,17 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is now supported. +- 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) - 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) - 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. -- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM!
    Click for more news - + - 📣 Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! +- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! - 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. - 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! @@ -66,6 +68,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) | 🌐 **Released Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| +|   **Reddit** | [Join our Reddit page](https://reddit.com/r/unsloth)| ## ⭐ Key Features - All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. @@ -469,7 +472,7 @@ Two Tesla T4s on Kaggle ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    -### Citing +### Citation You can cite the Unsloth repo as follows: ```bibtex @@ -482,6 +485,7 @@ You can cite the Unsloth repo as follows: ``` ### Thank You to +- [Erik](https://github.com/erikwijmans) for his help adding [Apple's ML Cross Entropy](https://github.com/apple/ml-cross-entropy) in Unsloth - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support - [152334H](https://github.com/152334H) for experimental DPO support diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index c2b68fdc4b..5ecd667f51 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -131,7 +131,8 @@ def from_pretrained( exist_config = os.path.exists(os.path.join(model_name, "config.json")) both_exist = exist_adapter_config and exist_config else: - files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + # Because HfFileSystem assumes linux paths, we need to set the path with forward slashes, even on Windows. + files = HfFileSystem(token = token).glob(f"{model_name}/*.json") files = (os.path.split(x)[-1] for x in files) if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: both_exist = True From e49e5e1e7d6e0c387f2d1a83bdd8a46968bec7f4 Mon Sep 17 00:00:00 2001 From: qingy1337 Date: Fri, 20 Dec 2024 02:22:27 -0800 Subject: [PATCH 0915/1088] Update save.py warning message (#1425) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Update save.py warning message --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index cf78bf5897..027d88b2fc 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -577,7 +577,7 @@ def unsloth_save_model( # max_ram = max(max_ram - W.nbytes, 0) else: # Save to Disk - logger.warning_once("We will save to Disk and not RAM now.") + logger.warning_once("\nWe will save to Disk and not RAM now.") filename = os.path.join(temporary_location, f"{name}.pt") torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) # weights_only = True weirdly fails? From 9ef3c7a45541b690b0074218e0255cc68018613d Mon Sep 17 00:00:00 2001 From: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:23:30 +0100 Subject: [PATCH 0916/1088] Change _fix_chat_template in case a template has both endif and endfor (#1388) --- unsloth/tokenizer_utils.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 302017d566..384b4bbca5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -585,26 +585,43 @@ def load_correct_tokenizer( pass +def _find_end_position(template, endfor, endif): + where_endfor = template.find(endfor) + where_endif = template.find(endif) + if where_endfor == where_endif == -1: + return None + elif where_endfor > where_endif: + return endfor + else: + return endif + pass +pass + + def _fix_chat_template(chat_template): - endfor = "{% endif %}" - where = chat_template.find(endfor) - if where == -1: - endfor = "{%- endif %}" - where = chat_template.find(endfor) - if where == -1: + endfor = "{% endfor %}" + endif = "{% endif %}" + chosen_end = _find_end_position(chat_template, endfor, endif) + if chosen_end is None: + endfor = "{%- endfor %}" + endif = "{%- endif %}" + chosen_end = _find_end_position(chat_template, endfor, endif) + if chosen_end is None: return chat_template + + where = chat_template.find(chosen_end) - after_endfor = chat_template[where + len(endfor):] + after_endfor = chat_template[where + len(chosen_end):] - dash = "-" if endfor.startswith("{%-") else "" + dash = "-" if chosen_end.startswith("{%-") else "" if "{%" + dash + " if" not in after_endfor and "{%" + dash + " set " not in after_endfor and \ after_endfor.startswith("{{") and after_endfor.endswith("}}") and \ after_endfor.count("{{") == 1 and after_endfor.count("}}") == 1: - after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endfor + after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endif - chat_template = chat_template[:where + len(endfor)] + after_endfor + chat_template = chat_template[:where + len(chosen_end)] + after_endfor pass return chat_template pass From 0671dbdc9270c7473f36a0c65a5526835365fbba Mon Sep 17 00:00:00 2001 From: Datta Nimmaturi Date: Fri, 20 Dec 2024 16:05:42 +0530 Subject: [PATCH 0917/1088] Update llama and derivatives to pass position embeddings explicitly for transformers v4.47+ (#1442) --- unsloth/models/cohere.py | 7 ++++--- unsloth/models/llama.py | 41 +++++++++++++++++++++++++-------------- unsloth/models/mistral.py | 1 + 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py index aa0bcb55ee..cbbebeec5c 100644 --- a/unsloth/models/cohere.py +++ b/unsloth/models/cohere.py @@ -75,6 +75,7 @@ def CohereAttention_fast_forward( output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -112,12 +113,11 @@ def CohereAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + cos, sin = position_embeddings if position_ids is None: - cos = self.rotary_emb.cos_cached - sin = self.rotary_emb.sin_cached Q, K = fast_rope_embedding(Q, K, cos, sin) else: - cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + cos, sin = cos[position_ids], sin[position_ids] Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) pass @@ -190,6 +190,7 @@ def CohereDecoderLayer_fast_forward( output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ): if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cfeeab0800..03336c134a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -337,6 +337,7 @@ def LlamaAttention_fast_forward( output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -368,20 +369,24 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] - # Extend RoPE dynamically to fit in VRAM - rotary_emb = self.rotary_emb - rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) - - if position_ids is None: - # Useful for LongRoPE - cos, sin = rotary_emb.get_cached(kv_seq_len) - # cos = self.rotary_emb.cos_cached - # sin = self.rotary_emb.sin_cached - Q, K = fast_rope_embedding(Q, K, cos, sin) + if position_embeddings: + cos, sin = position_embeddings else: - cos, sin = rotary_emb(V, seq_len = kv_seq_len) - Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) - pass + # Extend RoPE dynamically to fit in VRA + rotary_emb = self.rotary_emb + rotary_emb.extend_rope_embedding(V, seq_len=kv_seq_len) + + if position_ids is None: + # Useful for LongRoPE + cos, sin = rotary_emb.get_cached(kv_seq_len) + else: + cos, sin = rotary_emb(V, seq_len=kv_seq_len) + + Q, K = ( + fast_rope_embedding(Q, K, cos, sin) + if position_ids is None + else inplace_rope_embedding(Q, K, cos, sin, position_ids) + ) if past_key_value is not None: K = torch.cat([past_key_value[0], K], dim = 2) @@ -452,6 +457,7 @@ def LlamaDecoderLayer_fast_forward( output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ @@ -479,6 +485,7 @@ def LlamaDecoderLayer_fast_forward( output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, + position_embeddings = position_embeddings, ) hidden_states += residual @@ -499,6 +506,7 @@ def LlamaDecoderLayer_fast_forward( output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, + position_embeddings = position_embeddings, ) hidden_states = residual + hidden_states @@ -777,8 +785,11 @@ def LlamaModel_fast_forward( pass - if IS_GRANITE: - position_embeddings = self.rotary_emb(hidden_states, position_ids, self.max_position_embeddings) + if transformers_version > "4.47.1" and hasattr(self,'rotary_emb'): + # Transformers main has made it mandatory to pass position_embeddings + # https://github.com/huggingface/transformers/pull/34858 + position_embeddings = self.rotary_emb(hidden_states, position_ids, self.config.max_position_embeddings) + print(f'position_embeddings: {position_embeddings}') else: position_embeddings = None diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 00dcc5cd1d..cdc73c7610 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -47,6 +47,7 @@ def MistralAttention_fast_forward( output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: From 1a9e6d25bcaf3715fca0eb06c20d3459d4af5260 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:40:42 -0800 Subject: [PATCH 0918/1088] Update save.py --- unsloth/save.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index 027d88b2fc..ce5ee5d38e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2129,17 +2129,31 @@ def unsloth_generic_save( ): if token is None and push_to_hub: token = get_token() - merge_and_overwrite_lora( - get_model_name, - create_huggingface_repo, - model, - save_location = save_directory, - push_to_hub = push_to_hub, - token = token, - upload_location = save_directory if push_to_hub else None, - low_disk_space_usage = True, - private = private, - ) + import unsloth_zoo + if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): + merge_and_overwrite_lora( + get_model_name, + create_huggingface_repo, + model, + save_location = save_directory, + push_to_hub = push_to_hub, + token = token, + upload_location = save_directory if push_to_hub else None, + low_disk_space_usage = True, + private = private, + ) + else: + merge_and_overwrite_lora( + get_model_name, + model, + save_directory = save_directory, + push_to_hub = push_to_hub, + private = private, + token = token, + low_disk_space_usage = False, + use_temp_file = False, + ) + pass return pass From 541018e33a3fcfa0c2393ffdd59c643fb4df438a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:45:43 -0800 Subject: [PATCH 0919/1088] Update llama.py --- unsloth/models/llama.py | 79 ++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 03336c134a..169bfca07a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -329,14 +329,14 @@ def fast_layernorm_compiled(layernorm, X): # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -449,14 +449,14 @@ def LlamaAttention_fast_forward( # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 def LlamaDecoderLayer_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: @@ -477,14 +477,14 @@ def LlamaDecoderLayer_fast_forward( residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states = hidden_states, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, position_embeddings = position_embeddings, ) hidden_states += residual @@ -498,14 +498,14 @@ def LlamaDecoderLayer_fast_forward( residual = hidden_states hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states = hidden_states, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, position_embeddings = position_embeddings, ) hidden_states = residual + hidden_states @@ -785,11 +785,10 @@ def LlamaModel_fast_forward( pass - if transformers_version > "4.47.1" and hasattr(self,'rotary_emb'): + if transformers_version > "4.47.1" and hasattr(self, "rotary_emb"): # Transformers main has made it mandatory to pass position_embeddings # https://github.com/huggingface/transformers/pull/34858 position_embeddings = self.rotary_emb(hidden_states, position_ids, self.config.max_position_embeddings) - print(f'position_embeddings: {position_embeddings}') else: position_embeddings = None @@ -843,12 +842,12 @@ def custom_forward(*inputs): layer_outputs = decoder_layer( hidden_states, causal_mask=mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, position_embeddings = position_embeddings ) hidden_states = layer_outputs[0] From 044cb39ec4dfef8a2aa3e8744fcab082230d3349 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:46:29 -0800 Subject: [PATCH 0920/1088] Update mistral.py --- unsloth/models/mistral.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index cdc73c7610..dda430459a 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -39,14 +39,14 @@ def MistralAttention_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: From eb562fef8c5daa431631adfa4c6d5c669057beff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:47:32 -0800 Subject: [PATCH 0921/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 169bfca07a..26b9e07a27 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1055,6 +1055,7 @@ def _CausalLM_fast_forward( self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass + print(kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None)) shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, From a41e434691e8964f7c890c37fcbcdf71910d8f0c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:50:23 -0800 Subject: [PATCH 0922/1088] Update llama.py --- unsloth/models/llama.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 26b9e07a27..58b572d9b8 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1055,7 +1055,10 @@ def _CausalLM_fast_forward( self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass - print(kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None)) + print(locals()) + print(args) + print(kwargs) + raise shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, From dd2a1a13b18505dad521dff10b5815b24ff38706 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:53:45 -0800 Subject: [PATCH 0923/1088] Update llama.py --- unsloth/models/llama.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 58b572d9b8..d536ced541 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1054,11 +1054,6 @@ def _CausalLM_fast_forward( # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass - - print(locals()) - print(args) - print(kwargs) - raise shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, @@ -1119,6 +1114,8 @@ def PeftModelForCausalLM_fast_forward( num_logits_to_keep=0, **kwargs, ): + print(kwargs) + raise return self.base_model( input_ids=input_ids, causal_mask=causal_mask, From 197c253416e9f5902b1c05586238b3f624aa45fa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 02:59:27 -0800 Subject: [PATCH 0924/1088] Update llama.py --- unsloth/models/llama.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index d536ced541..b88ed637c2 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1114,8 +1114,6 @@ def PeftModelForCausalLM_fast_forward( num_logits_to_keep=0, **kwargs, ): - print(kwargs) - raise return self.base_model( input_ids=input_ids, causal_mask=causal_mask, From 7425bbbd88d5bf6ff5e3efe66f490359662242e8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:01:28 -0800 Subject: [PATCH 0925/1088] Update llama.py --- unsloth/models/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b88ed637c2..dc78547ddc 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1008,6 +1008,7 @@ def _CausalLM_fast_forward( if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + print(n_items) loss = fused_linear_cross_entropy( hidden_states = hidden_states, lm_weight = lm_head, From ba4a6cc27795cc2b968d8c2e7f7addf074a73945 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:03:26 -0800 Subject: [PATCH 0926/1088] Update llama.py --- unsloth/models/llama.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index dc78547ddc..3e67b4e89a 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -784,7 +784,6 @@ def LlamaModel_fast_forward( pass pass - if transformers_version > "4.47.1" and hasattr(self, "rotary_emb"): # Transformers main has made it mandatory to pass position_embeddings # https://github.com/huggingface/transformers/pull/34858 @@ -848,7 +847,7 @@ def custom_forward(*inputs): output_attentions = output_attentions, use_cache = use_cache, padding_mask = padding_mask, - position_embeddings = position_embeddings + position_embeddings = position_embeddings, ) hidden_states = layer_outputs[0] pass @@ -1008,7 +1007,6 @@ def _CausalLM_fast_forward( if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) - print(n_items) loss = fused_linear_cross_entropy( hidden_states = hidden_states, lm_weight = lm_head, @@ -1056,6 +1054,7 @@ def _CausalLM_fast_forward( self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + print(kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None)) loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, From c55191bc724e24b9c5bad8dd2427fbb446e673ad Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:07:04 -0800 Subject: [PATCH 0927/1088] Temp fix --- pyproject.toml | 4 ++-- unsloth/models/llama.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 51c3037235..49347c8ab0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ huggingface = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -247,7 +247,7 @@ colab-new = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3e67b4e89a..f8fb7d945d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1054,7 +1054,6 @@ def _CausalLM_fast_forward( self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - print(kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None)) loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, From f7ab1ca028a458e5cd7dbde3d4568bc47136edca Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:08:59 -0800 Subject: [PATCH 0928/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 805e373447..5b5ceefc66 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.4" +__version__ = "2024.12.5" __all__ = [ "prepare_model_for_kbit_training", From 135c25aeb701ba000b19f731902a818d85b62d2e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:09:59 -0800 Subject: [PATCH 0929/1088] Bug fixes (#1458) * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules * Fix vision model tokenizer padding side. (#1384) * Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> * Update README.md Unsloth Dynamic 4-bit Quantization Update * Fix vision model tokenizer padding side. * Update vision.py --------- Co-authored-by: Daniel Han Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Add citation section to README.md (#1377) * Add citation section to README.md * Update README.md --------- Co-authored-by: Daniel Han * Granite support (#1218) * [WIP] Support for Granite * Fixup inference * Cleanup flex attention * remove sliding window * Use torch.add for residual multiplier * Llama 3.3 * Update llama.py * Update llama.py * fullgraph * Fix loader.py to work on Windows (#1453) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Fix loader.py to work on Windows --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update save.py warning message (#1425) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Update save.py warning message --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Change _fix_chat_template in case a template has both endif and endfor (#1388) * Update llama and derivatives to pass position embeddings explicitly for transformers v4.47+ (#1442) * Update save.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Temp fix * Update _utils.py --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Zewen Shen Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Scott Phillips Co-authored-by: qingy1337 Co-authored-by: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> --- pyproject.toml | 4 +- unsloth/models/_utils.py | 4 +- unsloth/models/cohere.py | 7 ++- unsloth/models/llama.py | 125 ++++++++++++++++++++----------------- unsloth/models/loader.py | 5 +- unsloth/models/mistral.py | 17 ++--- unsloth/save.py | 38 +++++++---- unsloth/tokenizer_utils.py | 37 ++++++++--- 8 files changed, 143 insertions(+), 94 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 51c3037235..49347c8ab0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ huggingface = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -247,7 +247,7 @@ colab-new = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f1dc15aca4..5b5ceefc66 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.4" +__version__ = "2024.12.5" __all__ = [ "prepare_model_for_kbit_training", @@ -1127,6 +1127,7 @@ def unsloth_compile_transformers( shape_padding = True, cudagraphs = False, debug = False, + fullgraph = True, import_from_cache = False, disable = False, return_logits = False, @@ -1170,6 +1171,7 @@ def unsloth_compile_transformers( shape_padding = shape_padding, cudagraphs = cudagraphs, debug = debug, + fullgraph = fullgraph, import_from_cache = import_from_cache, disable = disable, return_logits = return_logits, diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py index aa0bcb55ee..cbbebeec5c 100644 --- a/unsloth/models/cohere.py +++ b/unsloth/models/cohere.py @@ -75,6 +75,7 @@ def CohereAttention_fast_forward( output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -112,12 +113,11 @@ def CohereAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] + cos, sin = position_embeddings if position_ids is None: - cos = self.rotary_emb.cos_cached - sin = self.rotary_emb.sin_cached Q, K = fast_rope_embedding(Q, K, cos, sin) else: - cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + cos, sin = cos[position_ids], sin[position_ids] Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) pass @@ -190,6 +190,7 @@ def CohereDecoderLayer_fast_forward( output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ): if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cc41a8b266..f8fb7d945d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -329,14 +329,15 @@ def fast_layernorm_compiled(layernorm, X): # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -368,20 +369,24 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] - # Extend RoPE dynamically to fit in VRAM - rotary_emb = self.rotary_emb - rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) - - if position_ids is None: - # Useful for LongRoPE - cos, sin = rotary_emb.get_cached(kv_seq_len) - # cos = self.rotary_emb.cos_cached - # sin = self.rotary_emb.sin_cached - Q, K = fast_rope_embedding(Q, K, cos, sin) + if position_embeddings: + cos, sin = position_embeddings else: - cos, sin = rotary_emb(V, seq_len = kv_seq_len) - Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) - pass + # Extend RoPE dynamically to fit in VRA + rotary_emb = self.rotary_emb + rotary_emb.extend_rope_embedding(V, seq_len=kv_seq_len) + + if position_ids is None: + # Useful for LongRoPE + cos, sin = rotary_emb.get_cached(kv_seq_len) + else: + cos, sin = rotary_emb(V, seq_len=kv_seq_len) + + Q, K = ( + fast_rope_embedding(Q, K, cos, sin) + if position_ids is None + else inplace_rope_embedding(Q, K, cos, sin, position_ids) + ) if past_key_value is not None: K = torch.cat([past_key_value[0], K], dim = 2) @@ -444,14 +449,15 @@ def LlamaAttention_fast_forward( # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590 def LlamaDecoderLayer_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: Optional[bool] = False, - use_cache: Optional[bool] = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ @@ -471,14 +477,15 @@ def LlamaDecoderLayer_fast_forward( residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states = hidden_states, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, + position_embeddings = position_embeddings, ) hidden_states += residual @@ -491,14 +498,15 @@ def LlamaDecoderLayer_fast_forward( residual = hidden_states hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, + hidden_states = hidden_states, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, + position_embeddings = position_embeddings, ) hidden_states = residual + hidden_states @@ -776,9 +784,10 @@ def LlamaModel_fast_forward( pass pass - - if IS_GRANITE: - position_embeddings = self.rotary_emb(hidden_states, position_ids, self.max_position_embeddings) + if transformers_version > "4.47.1" and hasattr(self, "rotary_emb"): + # Transformers main has made it mandatory to pass position_embeddings + # https://github.com/huggingface/transformers/pull/34858 + position_embeddings = self.rotary_emb(hidden_states, position_ids, self.config.max_position_embeddings) else: position_embeddings = None @@ -832,13 +841,13 @@ def custom_forward(*inputs): layer_outputs = decoder_layer( hidden_states, causal_mask=mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_value=past_key_value, - output_attentions=output_attentions, - use_cache=use_cache, - padding_mask=padding_mask, - position_embeddings = position_embeddings + attention_mask = attention_mask, + position_ids = position_ids, + past_key_value = past_key_value, + output_attentions = output_attentions, + use_cache = use_cache, + padding_mask = padding_mask, + position_embeddings = position_embeddings, ) hidden_states = layer_outputs[0] pass @@ -993,6 +1002,9 @@ def _CausalLM_fast_forward( logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" + # < 1024 Normal Unsloth uses less VRAM! + if bsz*q_len <= 1024: RETURN_LOGITS = True + if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( @@ -1041,7 +1053,6 @@ def _CausalLM_fast_forward( # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, @@ -1570,7 +1581,7 @@ def from_pretrained( max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers:{transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 3b2c8ffefe..5ecd667f51 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -131,7 +131,8 @@ def from_pretrained( exist_config = os.path.exists(os.path.join(model_name, "config.json")) both_exist = exist_adapter_config and exist_config else: - files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + # Because HfFileSystem assumes linux paths, we need to set the path with forward slashes, even on Windows. + files = HfFileSystem(token = token).glob(f"{model_name}/*.json") files = (os.path.split(x)[-1] for x in files) if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: both_exist = True @@ -352,6 +353,7 @@ def from_pretrained( resize_model_vocab = None, # [TODO] No effect revision = None, return_logits = False, # Return logits + fullgraph = True, # No graph breaks *args, **kwargs, ): if token is None: token = get_token() @@ -473,6 +475,7 @@ def from_pretrained( shape_padding = True, cudagraphs = False, debug = False, + fullgraph = fullgraph, import_from_cache = False, disable = False, return_logits = return_logits, diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 00dcc5cd1d..dda430459a 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -39,14 +39,15 @@ def MistralAttention_fast_forward( self, - hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: diff --git a/unsloth/save.py b/unsloth/save.py index cf78bf5897..ce5ee5d38e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -577,7 +577,7 @@ def unsloth_save_model( # max_ram = max(max_ram - W.nbytes, 0) else: # Save to Disk - logger.warning_once("We will save to Disk and not RAM now.") + logger.warning_once("\nWe will save to Disk and not RAM now.") filename = os.path.join(temporary_location, f"{name}.pt") torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) # weights_only = True weirdly fails? @@ -2129,17 +2129,31 @@ def unsloth_generic_save( ): if token is None and push_to_hub: token = get_token() - merge_and_overwrite_lora( - get_model_name, - create_huggingface_repo, - model, - save_location = save_directory, - push_to_hub = push_to_hub, - token = token, - upload_location = save_directory if push_to_hub else None, - low_disk_space_usage = True, - private = private, - ) + import unsloth_zoo + if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): + merge_and_overwrite_lora( + get_model_name, + create_huggingface_repo, + model, + save_location = save_directory, + push_to_hub = push_to_hub, + token = token, + upload_location = save_directory if push_to_hub else None, + low_disk_space_usage = True, + private = private, + ) + else: + merge_and_overwrite_lora( + get_model_name, + model, + save_directory = save_directory, + push_to_hub = push_to_hub, + private = private, + token = token, + low_disk_space_usage = False, + use_temp_file = False, + ) + pass return pass diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 302017d566..384b4bbca5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -585,26 +585,43 @@ def load_correct_tokenizer( pass +def _find_end_position(template, endfor, endif): + where_endfor = template.find(endfor) + where_endif = template.find(endif) + if where_endfor == where_endif == -1: + return None + elif where_endfor > where_endif: + return endfor + else: + return endif + pass +pass + + def _fix_chat_template(chat_template): - endfor = "{% endif %}" - where = chat_template.find(endfor) - if where == -1: - endfor = "{%- endif %}" - where = chat_template.find(endfor) - if where == -1: + endfor = "{% endfor %}" + endif = "{% endif %}" + chosen_end = _find_end_position(chat_template, endfor, endif) + if chosen_end is None: + endfor = "{%- endfor %}" + endif = "{%- endif %}" + chosen_end = _find_end_position(chat_template, endfor, endif) + if chosen_end is None: return chat_template + + where = chat_template.find(chosen_end) - after_endfor = chat_template[where + len(endfor):] + after_endfor = chat_template[where + len(chosen_end):] - dash = "-" if endfor.startswith("{%-") else "" + dash = "-" if chosen_end.startswith("{%-") else "" if "{%" + dash + " if" not in after_endfor and "{%" + dash + " set " not in after_endfor and \ after_endfor.startswith("{{") and after_endfor.endswith("}}") and \ after_endfor.count("{{") == 1 and after_endfor.count("}}") == 1: - after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endfor + after_endfor = "{%" + dash + " if add_generation_prompt %}" + after_endfor + endif - chat_template = chat_template[:where + len(endfor)] + after_endfor + chat_template = chat_template[:where + len(chosen_end)] + after_endfor pass return chat_template pass From 1df4df41c2e2989ddba1fc33b7a5212242c20924 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:24:17 -0800 Subject: [PATCH 0930/1088] Typo --- unsloth/models/_utils.py | 2 +- unsloth/save.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5b5ceefc66..19d1f61490 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.5" +__version__ = "2024.12.6" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/save.py b/unsloth/save.py index ce5ee5d38e..e76cb0ccbd 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2095,7 +2095,6 @@ def unsloth_convert_lora_to_ggml_and_save_locally( pass -from unsloth_zoo.peft_utils import merge_and_overwrite_lora from .models.loader_utils import get_model_name @torch.inference_mode @@ -2131,6 +2130,7 @@ def unsloth_generic_save( import unsloth_zoo if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): + from unsloth_zoo.peft_utils import merge_and_overwrite_lora merge_and_overwrite_lora( get_model_name, create_huggingface_repo, @@ -2143,6 +2143,7 @@ def unsloth_generic_save( private = private, ) else: + from unsloth_zoo.saving_utils import merge_and_overwrite_lora merge_and_overwrite_lora( get_model_name, model, From ae3b6c7c9108cdb1cd9fbeacca3d72fcf50b5077 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:26:55 -0800 Subject: [PATCH 0931/1088] Typo --- unsloth/models/_utils.py | 2 +- unsloth/save.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 19d1f61490..6b029c67ae 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.6" +__version__ = "2024.12.7" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/save.py b/unsloth/save.py index e76cb0ccbd..ea2d30972e 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2130,7 +2130,7 @@ def unsloth_generic_save( import unsloth_zoo if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): - from unsloth_zoo.peft_utils import merge_and_overwrite_lora + from unsloth_zoo.peft_utils import merge_and_overwrite_lora merge_and_overwrite_lora( get_model_name, create_huggingface_repo, From 79db7130734056928fc25e38a0fde64b72d5615e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 20 Dec 2024 03:30:01 -0800 Subject: [PATCH 0932/1088] Bug fix --- unsloth/models/_utils.py | 2 +- unsloth/models/vision.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6b029c67ae..a02cadd08a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.7" +__version__ = "2024.12.8" __all__ = [ "prepare_model_for_kbit_training", diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index a8ef9c0416..709cd1cb5c 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -29,7 +29,6 @@ from transformers import set_seed as transformers_set_seed from unsloth_zoo.peft_utils import ( get_peft_regex, - merge_and_overwrite_lora, SKIP_QUANTIZATION_MODULES, ) from triton import __version__ as triton_version From 73980fab8bd434bd2f150bcd72d7f5ba7598262e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 21 Dec 2024 01:11:30 -0800 Subject: [PATCH 0933/1088] Update _utils.py --- unsloth/models/_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a02cadd08a..d2bc4af2fc 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1086,6 +1086,14 @@ def patch_gradient_accumulation_fix(Trainer): "if num_items_in_batch is not None: loss *= self.args.gradient_accumulation_steps", ) function = function.replace("def training_step", "def _unsloth_training_step", 1) + + # Fix 4.47.0 issue where num_items_in_batch was removed + # See https://github.com/huggingface/transformers/pull/35121 + function = function.replace( + "if self.model_accepts_loss_kwargs:", + "if False:", + ) + exec(function, globals()) Trainer.training_step = _unsloth_training_step pass From b753a805edb8b2f1940657116e4872d9840995cf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 21 Dec 2024 01:24:09 -0800 Subject: [PATCH 0934/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 49347c8ab0..51c3037235 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ huggingface = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -247,7 +247,7 @@ colab-new = [ "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", From 3e8f935177d732f57b213743a769f795a4346bf1 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 23 Dec 2024 16:22:36 +0900 Subject: [PATCH 0935/1088] Name Error Bug Fix - import from packaging.version import Version (#1468) --- unsloth/save.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/save.py b/unsloth/save.py index ea2d30972e..11e1e7a3fb 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from packaging.version import Version from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit from peft.tuners.lora import Linear4bit as Peft_Linear4bit from peft.tuners.lora import Linear as Peft_Linear From 2678ad22fd8596b83fb247460274d4e2a82f8028 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:07:51 -0800 Subject: [PATCH 0936/1088] Version --- pyproject.toml | 166 +++++++++++++++++++++++++-------------- unsloth/models/_utils.py | 3 +- unsloth/models/loader.py | 2 +- unsloth/save.py | 41 +++------- 4 files changed, 123 insertions(+), 89 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 51c3037235..6b15ce5659 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,86 +51,110 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', +] +cu121onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', +] +cu124onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', ] cu118 = [ "unsloth[huggingface]", @@ -207,6 +231,16 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu121-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", +] +cu124-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", +] kaggle = [ "unsloth[huggingface]", ] @@ -244,10 +278,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -263,8 +297,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers<0.0.27", - "bitsandbytes>=0.43.3", + "xformers", + "bitsandbytes>=0.46.1", "protobuf<4.0.0", ] colab = [ @@ -380,6 +414,22 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] +cu121-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d2bc4af2fc..417633ee6e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -72,7 +72,7 @@ platform_system = platform_system() import numpy as np import warnings, subprocess, re, inspect, psutil, os, math -from packaging.version import Version +from unsloth_zoo.utils import Version from unsloth_zoo.tokenizer_utils import ( patch_tokenizer as _patch_tokenizer, @@ -403,6 +403,7 @@ def _is_openai_available(): return False # Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' accelerate_old_send_to_device = None accelerate_new_send_to_device = None +print(406, xformers_version) if Version(xformers_version) >= Version("0.0.27"): import accelerate.utils.operations if hasattr(accelerate.utils.operations, "send_to_device") and \ diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 5ecd667f51..9c5ea5baca 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -32,7 +32,7 @@ from huggingface_hub import HfFileSystem # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! -from packaging.version import Version +from unsloth_zoo.utils import Version transformers_version = Version(transformers_version) SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") diff --git a/unsloth/save.py b/unsloth/save.py index 11e1e7a3fb..a63225e649 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from packaging.version import Version +from unsloth_zoo.utils import Version from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit from peft.tuners.lora import Linear4bit as Peft_Linear4bit from peft.tuners.lora import Linear as Peft_Linear @@ -2097,6 +2097,7 @@ def unsloth_convert_lora_to_ggml_and_save_locally( from .models.loader_utils import get_model_name +from unsloth_zoo.saving_utils import merge_and_overwrite_lora @torch.inference_mode def unsloth_generic_save( @@ -2128,34 +2129,16 @@ def unsloth_generic_save( maximum_memory_usage : float = 0.9, ): if token is None and push_to_hub: token = get_token() - - import unsloth_zoo - if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): - from unsloth_zoo.peft_utils import merge_and_overwrite_lora - merge_and_overwrite_lora( - get_model_name, - create_huggingface_repo, - model, - save_location = save_directory, - push_to_hub = push_to_hub, - token = token, - upload_location = save_directory if push_to_hub else None, - low_disk_space_usage = True, - private = private, - ) - else: - from unsloth_zoo.saving_utils import merge_and_overwrite_lora - merge_and_overwrite_lora( - get_model_name, - model, - save_directory = save_directory, - push_to_hub = push_to_hub, - private = private, - token = token, - low_disk_space_usage = False, - use_temp_file = False, - ) - pass + merge_and_overwrite_lora( + get_model_name, + model, + save_directory = save_directory, + push_to_hub = push_to_hub, + private = private, + token = token, + low_disk_space_usage = False, + use_temp_file = False, + ) return pass From db0953d4ab78bf2156cf428d31dd29b6dd76bdbf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:14:01 -0800 Subject: [PATCH 0937/1088] Update pyproject.toml --- pyproject.toml | 144 ++++++++++++++++++++++++------------------------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6b15ce5659..9c52244e75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,110 +51,110 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", ] cu121onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", ] cu124onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'" ; platform_system == "Linux"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11'" ; platform_system == "Windows"', - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12'" ; platform_system == "Windows"', + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", ] cu118 = [ "unsloth[huggingface]", From 68d055db9530b1668e08cb3f632256d677a76384 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:16:30 -0800 Subject: [PATCH 0938/1088] Update pyproject.toml --- pyproject.toml | 168 +++++++++++++++++-------------------------------- 1 file changed, 59 insertions(+), 109 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9c52244e75..17ad03bf94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,110 +51,86 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", -] -cu121onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", -] -cu124onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu118 = [ "unsloth[huggingface]", @@ -231,16 +207,6 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] -cu121-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", -] -cu124-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", -] kaggle = [ "unsloth[huggingface]", ] @@ -278,10 +244,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -297,8 +263,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers", - "bitsandbytes>=0.46.1", + "xformers<0.0.27", + "bitsandbytes>=0.43.3", "protobuf<4.0.0", ] colab = [ @@ -414,24 +380,8 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] -cu121-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] -cu124-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" +repository = "https://github.com/unslothai/unsloth" \ No newline at end of file From 82570deaa716f6f78cf028f62fab4568670909b5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:43:32 -0800 Subject: [PATCH 0939/1088] Version --- pyproject.toml | 168 +++++++++++++++++++++++++-------------- unsloth/models/_utils.py | 3 +- 2 files changed, 110 insertions(+), 61 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 17ad03bf94..96ee98ce2a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,86 +51,110 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", +] +cu121onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", +] +cu124onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", ] cu118 = [ "unsloth[huggingface]", @@ -207,6 +231,16 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu121-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", +] +cu124-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", +] kaggle = [ "unsloth[huggingface]", ] @@ -244,10 +278,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -263,8 +297,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers<0.0.27", - "bitsandbytes>=0.43.3", + "xformers", + "bitsandbytes>=0.46.1", "protobuf<4.0.0", ] colab = [ @@ -380,8 +414,24 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] +cu121-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" \ No newline at end of file +repository = "https://github.com/unslothai/unsloth" diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 417633ee6e..cfb137d4eb 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -403,8 +403,7 @@ def _is_openai_available(): return False # Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' accelerate_old_send_to_device = None accelerate_new_send_to_device = None -print(406, xformers_version) -if Version(xformers_version) >= Version("0.0.27"): +if xformers_version is not None and Version(xformers_version) >= Version("0.0.27"): import accelerate.utils.operations if hasattr(accelerate.utils.operations, "send_to_device") and \ accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": From f9e59ff45d8cd21f0e4f81f1f84b4fbc0ebb22cc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:52:50 -0800 Subject: [PATCH 0940/1088] Update pyproject.toml --- pyproject.toml | 162 +++++++++++++++++-------------------------------- 1 file changed, 56 insertions(+), 106 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 96ee98ce2a..b11039b2b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -56,105 +56,81 @@ cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", -] -cu121onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", -] -cu124onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; platform_system=='Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; platform_system=='Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu118 = [ "unsloth[huggingface]", @@ -231,16 +207,6 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] -cu121-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", -] -cu124-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", -] kaggle = [ "unsloth[huggingface]", ] @@ -278,10 +244,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -297,8 +263,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers", - "bitsandbytes>=0.46.1", + "xformers<0.0.27", + "bitsandbytes>=0.43.3", "protobuf<4.0.0", ] colab = [ @@ -414,24 +380,8 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] -cu121-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] -cu124-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" +repository = "https://github.com/unslothai/unsloth" \ No newline at end of file From c37377382335fffc4521c82484cddb7e3cb28e69 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 21:53:43 -0800 Subject: [PATCH 0941/1088] Update pyproject.toml --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b11039b2b6..17ad03bf94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,9 +51,9 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121only = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", From 1f67a9498a7a1fc9cac37dcbefb7e85ee37d04b8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 22:27:35 -0800 Subject: [PATCH 0942/1088] dependencies --- pyproject.toml | 168 ++++++++++++++++++++++++++-------------- unsloth/models/llama.py | 8 +- 2 files changed, 114 insertions(+), 62 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 17ad03bf94..af212891a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,86 +51,110 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", +] +cu121onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", +] +cu124onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", ] cu118 = [ "unsloth[huggingface]", @@ -207,6 +231,16 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu121-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", +] +cu124-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", +] kaggle = [ "unsloth[huggingface]", ] @@ -244,10 +278,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -263,8 +297,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers<0.0.27", - "bitsandbytes>=0.43.3", + "xformers", + "bitsandbytes>=0.46.1", "protobuf<4.0.0", ] colab = [ @@ -380,8 +414,24 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] +cu121-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" \ No newline at end of file +repository = "https://github.com/unslothai/unsloth" diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f8fb7d945d..c94514966f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -66,6 +66,8 @@ from huggingface_hub.utils._token import get_token pass from triton import __version__ as triton_version +BlockDiagonalCausalMask = xformers.attn_bias.BlockDiagonalCausalMask if xformers is not None else None + def original_apply_qkv(self, X): Q = self.q_proj(X) @@ -330,7 +332,7 @@ def fast_layernorm_compiled(layernorm, X): def LlamaAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -538,7 +540,7 @@ def LlamaDecoderLayer_fast_forward( def LlamaModel_fast_forward( self, input_ids: torch.LongTensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, @@ -942,7 +944,7 @@ def CausalLM_fast_forward(fast_forward_inference): def _CausalLM_fast_forward( self, input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, From 7bc9bd3a8b927db6910b32fd32a9a5a65dbb7dcb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 22:47:11 -0800 Subject: [PATCH 0943/1088] Update pyproject.toml --- pyproject.toml | 168 +++++++++++++++++-------------------------------- 1 file changed, 59 insertions(+), 109 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index af212891a1..41c665d760 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,110 +51,86 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", -] -cu121onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", -] -cu124onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' ; sys_platform=='linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' ; sys_platform=='windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' ; sys_platform=='windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", ] cu118 = [ "unsloth[huggingface]", @@ -231,16 +207,6 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] -cu121-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", -] -cu124-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", -] kaggle = [ "unsloth[huggingface]", ] @@ -278,10 +244,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.11.8", "packaging", "tyro", - "transformers>=4.46.1,!=4.47.0", + "transformers>=4.46.1,<=4.46.3", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -297,8 +263,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers", - "bitsandbytes>=0.46.1", + "xformers<0.0.27", + "bitsandbytes>=0.43.3", "protobuf<4.0.0", ] colab = [ @@ -414,24 +380,8 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] -cu121-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] -cu124-ampere-torch251 = [ - "unsloth[huggingface]", - "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", -] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" +repository = "https://github.com/unslothai/unsloth" \ No newline at end of file From e3af8f9c4d8e96b4053362875b9bd0187c7b1249 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 22:47:20 -0800 Subject: [PATCH 0944/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 41c665d760..361661897a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] From e74fc34464a3c065168ba270b8f48c9bb79ebca0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 23 Dec 2024 22:49:16 -0800 Subject: [PATCH 0945/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 361661897a..45035db2bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' ; platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'; sys_platform =='linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] From 61917ccfcdf0d409b7ac84b14d3570e021f048bf Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:02:30 -0800 Subject: [PATCH 0946/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 45035db2bf..f27067f41c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'; sys_platform =='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; sys_platform =='linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] From 00af89272cdbdc4917c3d470b57d18a6d1bc4296 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:04:36 -0800 Subject: [PATCH 0947/1088] Update mistral.py --- unsloth/models/mistral.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index dda430459a..d6c6946664 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -40,7 +40,7 @@ def MistralAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -172,7 +172,7 @@ def MistralAttention_fast_forward( def MistralForCausalLM_fast_forward( self, input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, From e280cb766c4aabdcaf2fb481098b215491f868b8 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:07:37 -0800 Subject: [PATCH 0948/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f27067f41c..9f508f0d95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; sys_platform =='linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' & platform_system=='Linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] From 326610b118c478eabec6a4a3ec6d608bd7dce16f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:08:03 -0800 Subject: [PATCH 0949/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9f508f0d95..df7e2c83fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' & platform_system=='Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] From cc98e0a50fc9e7d0f993239580630eac99931aa1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:08:49 -0800 Subject: [PATCH 0950/1088] Update pyproject.toml --- pyproject.toml | 168 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 109 insertions(+), 59 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index df7e2c83fa..ae6a649bef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,10 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -51,86 +51,110 @@ huggingface = [ "hf_transfer", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system=='Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", +] +cu121onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] +cu124onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu118 = [ "unsloth[huggingface]", @@ -207,6 +231,16 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu121-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", +] +cu124-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", +] kaggle = [ "unsloth[huggingface]", ] @@ -244,10 +278,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -263,8 +297,8 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers<0.0.27", - "bitsandbytes>=0.43.3", + "xformers", + "bitsandbytes>=0.46.1", "protobuf<4.0.0", ] colab = [ @@ -380,8 +414,24 @@ cu124-ampere-torch250 = [ "ninja", "flash-attn>=2.6.3", ] +cu121-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] +cu124-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", + "packaging", + "ninja", + "flash-attn>=2.6.3", +] [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" \ No newline at end of file +repository = "https://github.com/unslothai/unsloth" From 052621631a4a6adeb75da7cad9d16cf56ebeda08 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:09:23 -0800 Subject: [PATCH 0951/1088] Update granite.py --- unsloth/models/granite.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index 2229636e9e..9466a8d6c1 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -60,7 +60,7 @@ def GraniteAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -171,7 +171,7 @@ def GraniteAttention_fast_forward( def GraniteDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, From 2e08a4b023c6c7f6cff57296d7c1c082f9ec09a2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:11:17 -0800 Subject: [PATCH 0952/1088] Update cohere.py --- unsloth/models/cohere.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py index cbbebeec5c..1610949f64 100644 --- a/unsloth/models/cohere.py +++ b/unsloth/models/cohere.py @@ -68,7 +68,7 @@ def fast_layernorm_inference(self, X, out_weight = None): def CohereAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -183,7 +183,7 @@ def CohereAttention_fast_forward( def CohereDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, From 95ff42da631c683b3f268cb8f4d0cd3fd8d421f3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:17:33 -0800 Subject: [PATCH 0953/1088] Triton windows --- pyproject.toml | 7 +++++++ unsloth/models/gemma.py | 2 +- unsloth/models/gemma2.py | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ae6a649bef..fd88925c49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,12 @@ include-package-data = false exclude = ["images*"] [project.optional-dependencies] +triton = [ + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", +] huggingface = [ "unsloth_zoo>=2024.12.3", "packaging", @@ -49,6 +55,7 @@ huggingface = [ "protobuf<4.0.0", "huggingface_hub", "hf_transfer", + "unsloth[triton]", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 1d9a0c1334..c654343282 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -77,7 +77,7 @@ def fast_geglu_inference(self, X): def GemmaDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index e47a7434f2..4a6f3e97cc 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -75,7 +75,7 @@ def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): def Gemma2Attention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, From d44bb3c2aa9d838ff313147b8e60408f8af234cc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 00:18:47 -0800 Subject: [PATCH 0954/1088] Update gemma2.py --- unsloth/models/gemma2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 4a6f3e97cc..0f0a020717 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -169,7 +169,7 @@ def Gemma2Attention_fast_forward( def Gemma2DecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, From 8337b90e97350c4c44df1e7cf381a308f97cd8e4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 01:55:04 -0800 Subject: [PATCH 0955/1088] Update pyproject.toml --- pyproject.toml | 69 +++++++++++++++----------------------------------- 1 file changed, 21 insertions(+), 48 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fd88925c49..5acc4fd586 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -311,131 +311,104 @@ colab-no-deps = [ colab = [ "unsloth[cu121]", ] +flashattention = [ + "packaging ; platform_system == 'Linux'", + "ninja ; platform_system == 'Linux'", + "flash-attn>=2.6.3 ; platform_system == 'Linux'", +] colab-ampere = [ "unsloth[colab-ampere-torch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118only]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121only]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch211]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch230 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch230]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch230 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch230]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch250]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu124-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu124-ampere-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch251 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu124-ampere-torch251 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch251]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] [project.urls] From 1c7bc4ea0bf3a169033dee9301c563a971394ba1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 03:34:54 -0800 Subject: [PATCH 0956/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cfb137d4eb..ce644f8540 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.8" +__version__ = "2024.12.9" __all__ = [ "prepare_model_for_kbit_training", From 3d76fdbd87cd1a4aa5a85e9535acb04bf50873be Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 03:35:04 -0800 Subject: [PATCH 0957/1088] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5acc4fd586..d14a392b8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.12.4", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", From a2407835534747d2421f58cbdeeb5a49482e7235 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 24 Dec 2024 03:37:03 -0800 Subject: [PATCH 0958/1088] Bug Fixes (#1470) * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules * Fix vision model tokenizer padding side. (#1384) * Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> * Update README.md Unsloth Dynamic 4-bit Quantization Update * Fix vision model tokenizer padding side. * Update vision.py --------- Co-authored-by: Daniel Han Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Add citation section to README.md (#1377) * Add citation section to README.md * Update README.md --------- Co-authored-by: Daniel Han * Granite support (#1218) * [WIP] Support for Granite * Fixup inference * Cleanup flex attention * remove sliding window * Use torch.add for residual multiplier * Llama 3.3 * Update llama.py * Update llama.py * fullgraph * Fix loader.py to work on Windows (#1453) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Fix loader.py to work on Windows --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update save.py warning message (#1425) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Update save.py warning message --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Change _fix_chat_template in case a template has both endif and endfor (#1388) * Update llama and derivatives to pass position embeddings explicitly for transformers v4.47+ (#1442) * Update save.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Temp fix * Update _utils.py * Update _utils.py * Update pyproject.toml * Name Error Bug Fix - import from packaging.version import Version (#1468) * Version * Update pyproject.toml * Update pyproject.toml * Version * Update pyproject.toml * Update pyproject.toml * dependencies * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update mistral.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update granite.py * Update cohere.py * Triton windows * Update gemma2.py * Update pyproject.toml * Update _utils.py * Update pyproject.toml --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Zewen Shen Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Scott Phillips Co-authored-by: qingy1337 Co-authored-by: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> Co-authored-by: Yonghye Kwon --- pyproject.toml | 230 +++++++++++++++++++++----------------- unsloth/models/_utils.py | 14 ++- unsloth/models/cohere.py | 4 +- unsloth/models/gemma.py | 2 +- unsloth/models/gemma2.py | 4 +- unsloth/models/granite.py | 4 +- unsloth/models/llama.py | 8 +- unsloth/models/loader.py | 2 +- unsloth/models/mistral.py | 4 +- unsloth/save.py | 40 ++----- 10 files changed, 168 insertions(+), 144 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 49347c8ab0..d14a392b8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,11 +32,17 @@ include-package-data = false exclude = ["images*"] [project.optional-dependencies] +triton = [ + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", +] huggingface = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.4", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -49,88 +55,113 @@ huggingface = [ "protobuf<4.0.0", "huggingface_hub", "hf_transfer", + "unsloth[triton]", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch211 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23.post1%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch212 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23.post1-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.24%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu121onlytorch220 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.24-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", ] cu118onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu121onlytorch230 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.27-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu118onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp312-cp312-manylinux2014_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu121onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch240 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu121onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch250 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", +] +cu121onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] +cu124onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu118 = [ "unsloth[huggingface]", @@ -207,6 +238,16 @@ cu124-torch250 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu121-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", +] +cu124-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", +] kaggle = [ "unsloth[huggingface]", ] @@ -244,10 +285,10 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.11.8", + "unsloth_zoo>=2024.12.3", "packaging", "tyro", - "transformers>=4.46.1,<=4.46.3", + "transformers>=4.46.1,!=4.47.0", "datasets>=2.16.0", "sentencepiece>=0.2.0", "tqdm", @@ -263,122 +304,111 @@ colab-no-deps = [ "accelerate>=0.34.1", "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", - "xformers<0.0.27", - "bitsandbytes>=0.43.3", + "xformers", + "bitsandbytes>=0.46.1", "protobuf<4.0.0", ] colab = [ "unsloth[cu121]", ] +flashattention = [ + "packaging ; platform_system == 'Linux'", + "ninja ; platform_system == 'Linux'", + "flash-attn>=2.6.3 ; platform_system == 'Linux'", +] colab-ampere = [ "unsloth[colab-ampere-torch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118only]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121only]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch211]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch211 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch211]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch220 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch220]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch230 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch230]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch230 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch230]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu118-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu118onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu121-ampere-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch250]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu124-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch240]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", ] cu124-ampere-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", - "packaging", - "ninja", - "flash-attn>=2.6.3", + "unsloth[flashattention]", +] +cu121-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch251]", + "unsloth[flashattention]", +] +cu124-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu124onlytorch251]", + "unsloth[flashattention]", ] [project.urls] diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a02cadd08a..ce644f8540 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.8" +__version__ = "2024.12.9" __all__ = [ "prepare_model_for_kbit_training", @@ -72,7 +72,7 @@ platform_system = platform_system() import numpy as np import warnings, subprocess, re, inspect, psutil, os, math -from packaging.version import Version +from unsloth_zoo.utils import Version from unsloth_zoo.tokenizer_utils import ( patch_tokenizer as _patch_tokenizer, @@ -403,7 +403,7 @@ def _is_openai_available(): return False # Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' accelerate_old_send_to_device = None accelerate_new_send_to_device = None -if Version(xformers_version) >= Version("0.0.27"): +if xformers_version is not None and Version(xformers_version) >= Version("0.0.27"): import accelerate.utils.operations if hasattr(accelerate.utils.operations, "send_to_device") and \ accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": @@ -1086,6 +1086,14 @@ def patch_gradient_accumulation_fix(Trainer): "if num_items_in_batch is not None: loss *= self.args.gradient_accumulation_steps", ) function = function.replace("def training_step", "def _unsloth_training_step", 1) + + # Fix 4.47.0 issue where num_items_in_batch was removed + # See https://github.com/huggingface/transformers/pull/35121 + function = function.replace( + "if self.model_accepts_loss_kwargs:", + "if False:", + ) + exec(function, globals()) Trainer.training_step = _unsloth_training_step pass diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py index cbbebeec5c..1610949f64 100644 --- a/unsloth/models/cohere.py +++ b/unsloth/models/cohere.py @@ -68,7 +68,7 @@ def fast_layernorm_inference(self, X, out_weight = None): def CohereAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -183,7 +183,7 @@ def CohereAttention_fast_forward( def CohereDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index 1d9a0c1334..c654343282 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -77,7 +77,7 @@ def fast_geglu_inference(self, X): def GemmaDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index e47a7434f2..0f0a020717 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -75,7 +75,7 @@ def fast_rms_layernorm_gemma2_compiled(layernorm, X, gemma = True): def Gemma2Attention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -169,7 +169,7 @@ def Gemma2Attention_fast_forward( def Gemma2DecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index 2229636e9e..9466a8d6c1 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -60,7 +60,7 @@ def GraniteAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -171,7 +171,7 @@ def GraniteAttention_fast_forward( def GraniteDecoderLayer_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f8fb7d945d..c94514966f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -66,6 +66,8 @@ from huggingface_hub.utils._token import get_token pass from triton import __version__ as triton_version +BlockDiagonalCausalMask = xformers.attn_bias.BlockDiagonalCausalMask if xformers is not None else None + def original_apply_qkv(self, X): Q = self.q_proj(X) @@ -330,7 +332,7 @@ def fast_layernorm_compiled(layernorm, X): def LlamaAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -538,7 +540,7 @@ def LlamaDecoderLayer_fast_forward( def LlamaModel_fast_forward( self, input_ids: torch.LongTensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, @@ -942,7 +944,7 @@ def CausalLM_fast_forward(fast_forward_inference): def _CausalLM_fast_forward( self, input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 5ecd667f51..9c5ea5baca 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -32,7 +32,7 @@ from huggingface_hub import HfFileSystem # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! -from packaging.version import Version +from unsloth_zoo.utils import Version transformers_version = Version(transformers_version) SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index dda430459a..d6c6946664 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -40,7 +40,7 @@ def MistralAttention_fast_forward( self, hidden_states: torch.Tensor, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, @@ -172,7 +172,7 @@ def MistralAttention_fast_forward( def MistralForCausalLM_fast_forward( self, input_ids: torch.LongTensor = None, - causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + causal_mask: Optional[BlockDiagonalCausalMask] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, diff --git a/unsloth/save.py b/unsloth/save.py index ea2d30972e..a63225e649 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from unsloth_zoo.utils import Version from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit from peft.tuners.lora import Linear4bit as Peft_Linear4bit from peft.tuners.lora import Linear as Peft_Linear @@ -2096,6 +2097,7 @@ def unsloth_convert_lora_to_ggml_and_save_locally( from .models.loader_utils import get_model_name +from unsloth_zoo.saving_utils import merge_and_overwrite_lora @torch.inference_mode def unsloth_generic_save( @@ -2127,34 +2129,16 @@ def unsloth_generic_save( maximum_memory_usage : float = 0.9, ): if token is None and push_to_hub: token = get_token() - - import unsloth_zoo - if Version(unsloth_zoo.__version__) <= Version("2024.12.1"): - from unsloth_zoo.peft_utils import merge_and_overwrite_lora - merge_and_overwrite_lora( - get_model_name, - create_huggingface_repo, - model, - save_location = save_directory, - push_to_hub = push_to_hub, - token = token, - upload_location = save_directory if push_to_hub else None, - low_disk_space_usage = True, - private = private, - ) - else: - from unsloth_zoo.saving_utils import merge_and_overwrite_lora - merge_and_overwrite_lora( - get_model_name, - model, - save_directory = save_directory, - push_to_hub = push_to_hub, - private = private, - token = token, - low_disk_space_usage = False, - use_temp_file = False, - ) - pass + merge_and_overwrite_lora( + get_model_name, + model, + save_directory = save_directory, + push_to_hub = push_to_hub, + private = private, + token = token, + low_disk_space_usage = False, + use_temp_file = False, + ) return pass From cebdfcf930d899525d9dc3fdcafc1a6518fc6a16 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Dec 2024 23:01:34 -0800 Subject: [PATCH 0959/1088] Residual & LoRA --- unsloth/models/_utils.py | 4 ++++ unsloth/models/loader.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ce644f8540..7cbecfa76a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1130,6 +1130,8 @@ def unsloth_compile_transformers( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, + fast_lora_forwards = True, + fast_residual_stream = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, @@ -1174,6 +1176,8 @@ def unsloth_compile_transformers( fuse_lm_head = fuse_lm_head, gradient_checkpointing = gradient_checkpointing, manual_replacements = manual_replacements, + fast_lora_forwards = fast_lora_forwards, + fast_residual_stream = fast_residual_stream, epilogue_fusion = epilogue_fusion, max_autotune = max_autotune, shape_padding = shape_padding, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9c5ea5baca..d1aae95a52 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -470,6 +470,8 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, + fast_lora_forwards = True, + fast_residual_stream = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, From a4f7b8898fe6704ce9160f625411f2bfc042057d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Dec 2024 23:01:54 -0800 Subject: [PATCH 0960/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d1aae95a52..993f50455c 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -454,7 +454,7 @@ def from_pretrained( if not was_disabled: enable_progress_bars() - with contextlib.redirect_stdout(open(os.devnull, "w")): + if True: # with contextlib.redirect_stdout(open(os.devnull, "w")): patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, From 2616a9c80d51c6afa6479d5d8d49f13656176326 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Dec 2024 23:49:40 -0800 Subject: [PATCH 0961/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 993f50455c..4274389a99 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -467,7 +467,7 @@ def from_pretrained( compile_torch_modules = True, compile_custom_modules = True, compile_function_calls = True, - fuse_lm_head = True, + fuse_lm_head = False, gradient_checkpointing = True, manual_replacements = True, fast_lora_forwards = True, From c43d969127cb741a71cd5789fa6b95957c04ed50 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 25 Dec 2024 23:57:42 -0800 Subject: [PATCH 0962/1088] Update loader.py --- unsloth/models/loader.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 4274389a99..09cd87384a 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -455,24 +455,24 @@ def from_pretrained( if not was_disabled: enable_progress_bars() if True: # with contextlib.redirect_stdout(open(os.devnull, "w")): - patch_loss_functions(torch_compile = False) + # patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, - sdpa_dynamic_mask = True, - sdpa_bool_masks = True, - sdpa_gqa_replace = True, - sdpa_dynamic_compile = True, - compile_attention = True, - disable_causal_masks = True, - compile_torch_modules = True, - compile_custom_modules = True, - compile_function_calls = True, + sdpa_dynamic_mask = False, + sdpa_bool_masks = False, + sdpa_gqa_replace = False, + sdpa_dynamic_compile = False, + compile_attention = False, + disable_causal_masks = False, + compile_torch_modules = False, + compile_custom_modules = False, + compile_function_calls = False, fuse_lm_head = False, - gradient_checkpointing = True, - manual_replacements = True, - fast_lora_forwards = True, - fast_residual_stream = True, - epilogue_fusion = True, + gradient_checkpointing = False, + manual_replacements = False, + fast_lora_forwards = False, + fast_residual_stream = False, + epilogue_fusion = False, max_autotune = False, shape_padding = True, cudagraphs = False, @@ -480,7 +480,7 @@ def from_pretrained( fullgraph = fullgraph, import_from_cache = False, disable = False, - return_logits = return_logits, + return_logits = True, ) pass From 8cfee4501afa6615d7797f4aa98faae8551718ce Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 00:33:15 -0800 Subject: [PATCH 0963/1088] Update loader.py --- unsloth/models/loader.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 09cd87384a..993f50455c 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -455,24 +455,24 @@ def from_pretrained( if not was_disabled: enable_progress_bars() if True: # with contextlib.redirect_stdout(open(os.devnull, "w")): - # patch_loss_functions(torch_compile = False) + patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, - sdpa_dynamic_mask = False, - sdpa_bool_masks = False, - sdpa_gqa_replace = False, - sdpa_dynamic_compile = False, - compile_attention = False, - disable_causal_masks = False, - compile_torch_modules = False, - compile_custom_modules = False, - compile_function_calls = False, - fuse_lm_head = False, - gradient_checkpointing = False, - manual_replacements = False, - fast_lora_forwards = False, - fast_residual_stream = False, - epilogue_fusion = False, + sdpa_dynamic_mask = True, + sdpa_bool_masks = True, + sdpa_gqa_replace = True, + sdpa_dynamic_compile = True, + compile_attention = True, + disable_causal_masks = True, + compile_torch_modules = True, + compile_custom_modules = True, + compile_function_calls = True, + fuse_lm_head = True, + gradient_checkpointing = True, + manual_replacements = True, + fast_lora_forwards = True, + fast_residual_stream = True, + epilogue_fusion = True, max_autotune = False, shape_padding = True, cudagraphs = False, @@ -480,7 +480,7 @@ def from_pretrained( fullgraph = fullgraph, import_from_cache = False, disable = False, - return_logits = True, + return_logits = return_logits, ) pass From ece0c858dbf657991605e8860d67ff59b56e506f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:22:06 -0800 Subject: [PATCH 0964/1088] Bug fix --- unsloth/models/_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7cbecfa76a..2af59f9a48 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1093,6 +1093,22 @@ def patch_gradient_accumulation_fix(Trainer): "if self.model_accepts_loss_kwargs:", "if False:", ) + + # Fix when num_items_in_batch is nothing + # https://github.com/huggingface/transformers/pull/35207 + function = re.sub( + r"else:\n"\ + r"([\s]{4,})self\.accelerator\.backward\(loss, \*\*kwargs\)\n"\ + r"(.+?)if num_items_in_batch is None\:\n"\ + r"(.+?)return loss\.detach\(\) \/ self\.args\.gradient_accumulation_steps", + + "else:\n"\ + "\2if num_items_in_batch is None:\n"\ + "\3loss /= self.args.gradient_accumulation_steps\n"\ + "\1self.accelerator.backward(loss, **kwargs)", + + function, + ) exec(function, globals()) Trainer.training_step = _unsloth_training_step From 6dfcebb3ef66b2dfd4043a10448c7933c065ff07 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:23:02 -0800 Subject: [PATCH 0965/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 993f50455c..d1aae95a52 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -454,7 +454,7 @@ def from_pretrained( if not was_disabled: enable_progress_bars() - if True: # with contextlib.redirect_stdout(open(os.devnull, "w")): + with contextlib.redirect_stdout(open(os.devnull, "w")): patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, From cb2dd07545b45c41f92b194eba6d7439200fd784 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:26:27 -0800 Subject: [PATCH 0966/1088] Update loader.py --- unsloth/models/loader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d1aae95a52..d1c8b1e07b 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -470,8 +470,8 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, - fast_lora_forwards = True, - fast_residual_stream = True, + fast_lora_forwards = False, + fast_residual_stream = False, epilogue_fusion = True, max_autotune = False, shape_padding = True, From e0dd0bd34ed448b177faf7657f91e84b8df13403 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:29:07 -0800 Subject: [PATCH 0967/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d1c8b1e07b..a75696a994 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -470,7 +470,7 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, - fast_lora_forwards = False, + fast_lora_forwards = True, fast_residual_stream = False, epilogue_fusion = True, max_autotune = False, From 355139f0183ceac10edaed79edbb5dcc45da8513 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:32:00 -0800 Subject: [PATCH 0968/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2af59f9a48..af61cc373a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.9" +__version__ = "2024.12.10" __all__ = [ "prepare_model_for_kbit_training", From fb99991119c0f019c4786ac107a939bfc17a6b2c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:32:56 -0800 Subject: [PATCH 0969/1088] Update loader.py --- unsloth/models/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index a75696a994..d1c8b1e07b 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -470,7 +470,7 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, - fast_lora_forwards = True, + fast_lora_forwards = False, fast_residual_stream = False, epilogue_fusion = True, max_autotune = False, From 4fbbd7e2afc307483bbbb95065dc4932ddc38692 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:35:19 -0800 Subject: [PATCH 0970/1088] Bug fixes (#1473) * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules * Fix vision model tokenizer padding side. (#1384) * Dynamic quants (#1379) * typing * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * int64 * Update _utils.py * Update cross_entropy_loss.py * constexpr * constexpr * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update _utils.py * Update _utils.py * Update _utils.py * CE * Update cross_entropy_loss.py * Update _utils.py * Update llama.py * Update _utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update utils.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * Update rms_layernorm.py * typing * Update rope_embedding.py * types * Disable compiling * Update _utils.py * Update _utils.py * Forward hook * Update _utils.py * Update llama.py * Update _utils.py * Update llama.py * Update llama.py * Update _utils.py * Update pyproject.toml * Update _utils.py * Update llama.py * CE Loss * Update cross_entropy_loss.py * Update _utils.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update cross_entropy_loss.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix: cast logits to float32 in cross_entropy_forward to prevent errors (#1254) * Fix: cast logits to float32 in cross_entropy_forward to prevent errors * Update cross_entropy_loss.py --------- Co-authored-by: Daniel Han * Throw error when inferencing longer than max_popsition_embeddings (#1236) * Throw error when inferencing longer than max_popsition_embeddings without rope scaling * Update llama.py --------- Co-authored-by: Daniel Han * CLI now handles user input strings for dtype correctly (#1235) Co-authored-by: root * Update flex_attention.py * Update _utils.py * Update _utils.py * Update flex_attention.py * Update flex_attention.py * Update loader.py * Update loader.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update flex_attention.py * Update _utils.py * Update cross_entropy_loss.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * triton_cast * Update utils.py * Qwen 2.5 Coder * Fix/export mistral (#1281) * Enhance install_python_non_blocking to handle protobuf installation and process management * Revert "Enhance install_python_non_blocking to handle protobuf installation and process management" This reverts commit f09974b151df1a6ce4708bc4cf75e5eb6b024aed. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Revert "Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266" This reverts commit 9fc130785dac65e9469306f71c666c155add53f1. * Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION to 'python' to address issue #1266 * Update __init__.py --------- Co-authored-by: Daniel Han * DOC Update - Update README.md with os.environ in example (#1269) * Update README.md with os.environ in example Added OS Environ in example to avoid device conflicts , for a user at least in jupyter notebook this allows to select GPU in a multi GPU setup. As currently the unsloth init checks all GPU's and takes the first in the order which can be a issue when some GPU's are in use and the list still shows them. So to manually avoid this, this os config is required. Small change but a bit time saver for those who straight away copies the tutorials * Update README.md --------- Co-authored-by: Daniel Han * fix/get_chat_template (#1246) * Refactor `get_chat_template` to now support system message instead. It supposed to fix ollama tokenizer chattemplate to * Remove type hinting * Update chat_templates.py --------- Co-authored-by: Daniel Han * fix/sft-trainer (#1276) * Add patch for SFTTrainer to maintain backward compatibility with TRL changes * Update trainer.py * Update trainer.py * Refactor trainer patch to maintain backward compatibility with TRL changes * Update trainer.py * Refactor trainer.py to exclude non-convertible trainers from backward compatibility patch --------- Co-authored-by: Daniel Han * Update __init__.py * Update trainer.py * Update trainer.py * Update trainer.py * Update tokenizer_utils.py * Update llama.py * Fix #853 * fix/sfttrainer-compatibility (#1293) * Refactor trainer.py to import SFTConfig directly and update UnslothTrainingArguments class inheritance * Update trainer.py * Update trainer.py --------- Co-authored-by: Daniel Han * Update rms_layernorm.py * Update rms_layernorm.py * Gemma * Update rms_layernorm.py * Update gemma2.py * Cut Cross Entropy * Update llama.py * Cut Cross Entropy * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update mapper.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * patch_fast_lora * vision * Update fast_lora.py * Update _utils.py * Update _utils.py * Vision * Update trainer.py * Update save.py * FastBaseVisionModel * Update loader_utils.py * Update vision.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update _utils.py * tokenizer_name * Update loader.py * Update vision.py * Update save.py * Update save.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update _utils.py * Update loader.py * kwargs * logits * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * error * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * Update llama.py * Update vision.py * Update loader.py * Old torch versions * Update loader.py * Update loader.py * prints * recheck * Update loader.py * Update loader.py * Update _utils.py * Update _utils.py * Update mapper.py * Feat/kto (#1316) * Add PatchKTOTrainer and update model imports * Update dpo.py * Update __init__.py * Delete unsloth/models/kto.py --------- Co-authored-by: Daniel Han * Fix orpo/dpo trainer (#1286) * change the colab notebook for dpo zephyr and orpo * use original tokenizer * Update README.md * Update README.md --------- Co-authored-by: Daniel Han * skip modules * Update vision.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Fix llama.cpp * Update save.py * Update save.py * Update vision.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update save.py * Update _utils.py * Update save.py * Update save.py * Update mapper.py * modules --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> * Update README.md Unsloth Dynamic 4-bit Quantization Update * Fix vision model tokenizer padding side. * Update vision.py --------- Co-authored-by: Daniel Han Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Add citation section to README.md (#1377) * Add citation section to README.md * Update README.md --------- Co-authored-by: Daniel Han * Granite support (#1218) * [WIP] Support for Granite * Fixup inference * Cleanup flex attention * remove sliding window * Use torch.add for residual multiplier * Llama 3.3 * Update llama.py * Update llama.py * fullgraph * Fix loader.py to work on Windows (#1453) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Fix loader.py to work on Windows --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Update save.py warning message (#1425) * Update README.md Llama 3.3 + Reddit * Update README.md Apple ML Cross Entropy * Update README.md Removing double citation * Update save.py warning message --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> * Change _fix_chat_template in case a template has both endif and endfor (#1388) * Update llama and derivatives to pass position embeddings explicitly for transformers v4.47+ (#1442) * Update save.py * Update llama.py * Update mistral.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Temp fix * Update _utils.py * Update _utils.py * Update pyproject.toml * Name Error Bug Fix - import from packaging.version import Version (#1468) * Version * Update pyproject.toml * Update pyproject.toml * Version * Update pyproject.toml * Update pyproject.toml * dependencies * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update mistral.py * Update pyproject.toml * Update pyproject.toml * Update pyproject.toml * Update granite.py * Update cohere.py * Triton windows * Update gemma2.py * Update pyproject.toml * Update _utils.py * Update pyproject.toml * Residual & LoRA * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Bug fix * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py --------- Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Edwin Fennell Co-authored-by: root Co-authored-by: Uday Girish Maradana Co-authored-by: cell-dame <122996026+dame-cell@users.noreply.github.com> Co-authored-by: Zewen Shen Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Scott Phillips Co-authored-by: qingy1337 Co-authored-by: Giulia Baldini <44327645+giuliabaldini@users.noreply.github.com> Co-authored-by: Yonghye Kwon --- unsloth/models/_utils.py | 22 +++++++++++++++++++++- unsloth/models/loader.py | 2 ++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ce644f8540..af61cc373a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.9" +__version__ = "2024.12.10" __all__ = [ "prepare_model_for_kbit_training", @@ -1093,6 +1093,22 @@ def patch_gradient_accumulation_fix(Trainer): "if self.model_accepts_loss_kwargs:", "if False:", ) + + # Fix when num_items_in_batch is nothing + # https://github.com/huggingface/transformers/pull/35207 + function = re.sub( + r"else:\n"\ + r"([\s]{4,})self\.accelerator\.backward\(loss, \*\*kwargs\)\n"\ + r"(.+?)if num_items_in_batch is None\:\n"\ + r"(.+?)return loss\.detach\(\) \/ self\.args\.gradient_accumulation_steps", + + "else:\n"\ + "\2if num_items_in_batch is None:\n"\ + "\3loss /= self.args.gradient_accumulation_steps\n"\ + "\1self.accelerator.backward(loss, **kwargs)", + + function, + ) exec(function, globals()) Trainer.training_step = _unsloth_training_step @@ -1130,6 +1146,8 @@ def unsloth_compile_transformers( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, + fast_lora_forwards = True, + fast_residual_stream = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, @@ -1174,6 +1192,8 @@ def unsloth_compile_transformers( fuse_lm_head = fuse_lm_head, gradient_checkpointing = gradient_checkpointing, manual_replacements = manual_replacements, + fast_lora_forwards = fast_lora_forwards, + fast_residual_stream = fast_residual_stream, epilogue_fusion = epilogue_fusion, max_autotune = max_autotune, shape_padding = shape_padding, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 9c5ea5baca..d1c8b1e07b 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -470,6 +470,8 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, + fast_lora_forwards = False, + fast_residual_stream = False, epilogue_fusion = True, max_autotune = False, shape_padding = True, From 4f0c904d0da626073cce3712e666f52e4427a715 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 01:44:12 -0800 Subject: [PATCH 0971/1088] Update save.py --- unsloth/save.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/save.py b/unsloth/save.py index a63225e649..8db3b6dc35 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2136,6 +2136,7 @@ def unsloth_generic_save( push_to_hub = push_to_hub, private = private, token = token, + output_dtype = None, low_disk_space_usage = False, use_temp_file = False, ) From 58448ba4b2aac4a98a2813a5cc78863aef7b0fab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 03:26:01 -0800 Subject: [PATCH 0972/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d14a392b8d..9f52aeeaaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2024.12.4", + "unsloth_zoo>=2024.12.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.3", + "unsloth_zoo>=2024.12.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", From 802a0087811722412c2a1c572eee6e94afb25190 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 04:04:23 -0800 Subject: [PATCH 0973/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9f52aeeaaa..8b1a6b67a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2024.12.5", + "unsloth_zoo>=2024.12.6", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.5", + "unsloth_zoo>=2024.12.6", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", From f69f88e84131280f860f88b8ce524d424ddc254f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 04:05:07 -0800 Subject: [PATCH 0974/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index af61cc373a..4f1b40884a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.10" +__version__ = "2024.12.11" __all__ = [ "prepare_model_for_kbit_training", From bc5f726a3cba3dbacda604a288dbc352c0baa737 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 26 Dec 2024 04:12:46 -0800 Subject: [PATCH 0975/1088] Update pyproject.toml --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 8b1a6b67a2..9abe7a5d88 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -299,6 +299,7 @@ colab-new = [ "huggingface_hub", "hf_transfer", "bitsandbytes>=0.43.3", + "unsloth[triton]", ] colab-no-deps = [ "accelerate>=0.34.1", From 746182153b453fae40c458dff3bfca22e1df484c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Dec 2024 03:57:31 -0800 Subject: [PATCH 0976/1088] Bug fixes (#1484) * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml --- pyproject.toml | 4 ++-- unsloth/models/_utils.py | 50 ++++++++++++++++++++++++++++++++++------ unsloth/models/loader.py | 1 + unsloth/models/vision.py | 4 ++++ unsloth/save.py | 3 ++- 5 files changed, 52 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9abe7a5d88..ce3301547b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2024.12.6", + "unsloth_zoo>=2024.12.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.6", + "unsloth_zoo>=2024.12.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4f1b40884a..86346d7e2e 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1003,28 +1003,62 @@ def test_mask_creation(): def _unsloth_get_batch_samples(self, epoch_iterator, num_batches): batch_samples = [] num_items_in_batch = None + + # Check if model allows **kwargs + model = self.model + f = model.base_model.model.forward if hasattr(model, "base_model") else model.forward + has_kwargs = tuple(inspect.signature(f).parameters.values())[-1].kind == inspect._VAR_KEYWORD + + # Iterate to find all batches for _ in range(num_batches): try: batch_samples += [next(epoch_iterator)] except StopIteration: break - if len(batch_samples) > 0 and "labels" in batch_samples[0]: + pass + + # Get num_items_in_batch + if has_kwargs and len(batch_samples) > 0 and "labels" in batch_samples[0]: try: num_items_in_batch = sum( - [torch.count_nonzero(x["labels"][..., 1:] != -100) for x in batch_samples] + [(x["labels"][..., 1:] != -100).sum() for x in batch_samples] ) - except TypeError: - pass + + if self.args.average_tokens_across_devices: + num_items_in_batch = self.accelerator.gather(num_items_in_batch).sum().item() + + if torch.is_tensor(num_items_in_batch): + num_items_in_batch = num_items_in_batch.item() + + except Exception as exception: + logger.warning_once(exception) + pass + return batch_samples, num_items_in_batch pass def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): + num_items_in_batch = None + if "num_items_in_batch" in kwargs: - if "num_items_in_batch" not in inputs: - inputs["num_items_in_batch"] = kwargs["num_items_in_batch"] + num_items_in_batch = kwargs["num_items_in_batch"] + if num_items_in_batch is None: + # Remove it since the model does not support it! + kwargs.pop("num_items_in_batch") + elif "num_items_in_batch" not in inputs: + inputs["num_items_in_batch"] = num_items_in_batch pass pass + + if num_items_in_batch is None: + name = (model.base_model.model if hasattr(model, "base_model") else model).__class__.__name__ + logger.warning_once( + f"Unsloth: Not an error, but {name} does not accept `num_items_in_batch`.\n"\ + "Using gradient accumulation will be very slightly less accurate.\n"\ + "Read more on gradient accumulation issues here: https://unsloth.ai/blog/gradient" + ) + pass return self._old_compute_loss(model, inputs, *args, **kwargs) pass @@ -1104,7 +1138,7 @@ def patch_gradient_accumulation_fix(Trainer): "else:\n"\ "\2if num_items_in_batch is None:\n"\ - "\3loss /= self.args.gradient_accumulation_steps\n"\ + "\3loss = loss / self.args.gradient_accumulation_steps\n"\ "\1self.accelerator.backward(loss, **kwargs)", function, @@ -1148,6 +1182,7 @@ def unsloth_compile_transformers( manual_replacements = True, fast_lora_forwards = True, fast_residual_stream = True, + accurate_accumulation = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, @@ -1194,6 +1229,7 @@ def unsloth_compile_transformers( manual_replacements = manual_replacements, fast_lora_forwards = fast_lora_forwards, fast_residual_stream = fast_residual_stream, + accurate_accumulation = accurate_accumulation, epilogue_fusion = epilogue_fusion, max_autotune = max_autotune, shape_padding = shape_padding, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d1c8b1e07b..113c4fbc70 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -472,6 +472,7 @@ def from_pretrained( manual_replacements = True, fast_lora_forwards = False, fast_residual_stream = False, + accurate_accumulation = True, epilogue_fusion = True, max_autotune = False, shape_padding = True, diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 709cd1cb5c..2dc4b88dfa 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -186,6 +186,10 @@ def from_pretrained( patch_saving_functions(model, vision = True) patch_saving_functions(tokenizer, vision = True) + # Fix gradient accumulation + from transformers.trainer import Trainer + patch_gradient_accumulation_fix(Trainer) + # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference tokenizer.tokenizer.padding_side = "left" # Force inference diff --git a/unsloth/save.py b/unsloth/save.py index 8db3b6dc35..d3ba1928c4 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2131,7 +2131,8 @@ def unsloth_generic_save( if token is None and push_to_hub: token = get_token() merge_and_overwrite_lora( get_model_name, - model, + model = model, + tokenizer = tokenizer, save_directory = save_directory, push_to_hub = push_to_hub, private = private, From 87f5bffc45a8af7f23a41650b30858e097b86418 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 29 Dec 2024 03:57:58 -0800 Subject: [PATCH 0977/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 86346d7e2e..3cb6ffb8f3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.11" +__version__ = "2024.12.12" __all__ = [ "prepare_model_for_kbit_training", From f48455529ff8f13f45bf27b2392fba8872203643 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sat, 4 Jan 2025 22:09:25 -0800 Subject: [PATCH 0978/1088] Create CONTRIBUTING.md (#1472) Creating contributing guidelines --- CONTRIBUTING.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..cdccbb3eac --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,29 @@ +# 🦥 Contributing to Unsloth + +Thank you for not only using Unsloth but also for being interested in helping out! We value all contributions, whether they come in the form of code, ideas, support for others or just by simply spreading the word of Unsloth! 💕 + +- **[Support the Community](https://github.com/unslothai/unsloth/issues)**: Answer questions, review pull requests, or assist others in discussions. +- **Fix Bugs**: Identify and resolve issues with the existing codebase. +- **Submit Ideas**: Request new features or share enhancements you'd like to see. +- **Develop Features**: Implement new functionality or improve existing tools which can do via PRs. +- **[Improve Documentation](https://docs.unsloth.ai/)**: Help by creating guides, FAQs, or enhancing clarity. + +One of the best ways to support us is by spreading the word about Unsloth! Share how it’s powering your amazing projects in blog posts or social media, and inspire others to explore its potential. Even a simple star on our repo goes a long way in showing your support and helping the community grow. 🌟 + +## Submitting Issues +If you find a bug or have a feature idea, we’d love to hear from you! Here’s how to make your submission stand out: + +### Reporting Bugs +1. **Search First**: Check if the issue has already been reported using GitHub’s search bar under Issues. +2. **Details Matter**: Is this on Google Colab, Kaggle, or on another platform service? Are you using Unsloth's official notebook? Include your OS, Python version, and other relevant details. For bugs, a concise code snippet that reproduces the issue is incredibly helpful. +3. **Be Thorough**: Attach screenshots, traceback logs, or any additional information that might speed up resolution. + +## Spread the Word +Your support extends beyond code: +- Spread the word by writing about Unsloth in blogs or social media. +- Share how Unsloth powers your projects. +- Star our repository to show your appreciation. + +Finally, please be mindful of our [Code of Conduct](https://github.com/unslothai/unsloth/tree/main/unsloth/CODE_OF_CONDUCT.md) to ensure a welcoming and inclusive environment for everyone. + +Thank you so much for reading and we hope you have lots of fun using Unsloth! 🦥 From 3a0eb2bbf42016dfeec924b320c420dabbce168b Mon Sep 17 00:00:00 2001 From: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Date: Sun, 5 Jan 2025 09:24:10 +0100 Subject: [PATCH 0979/1088] Update CONTRIBUTING.md improved sentence --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cdccbb3eac..58a2652b5e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ Thank you for not only using Unsloth but also for being interested in helping ou - **[Support the Community](https://github.com/unslothai/unsloth/issues)**: Answer questions, review pull requests, or assist others in discussions. - **Fix Bugs**: Identify and resolve issues with the existing codebase. - **Submit Ideas**: Request new features or share enhancements you'd like to see. -- **Develop Features**: Implement new functionality or improve existing tools which can do via PRs. +- **Develop Features**: Implement new functionality or improve existing tools which can be done via PRs. - **[Improve Documentation](https://docs.unsloth.ai/)**: Help by creating guides, FAQs, or enhancing clarity. One of the best ways to support us is by spreading the word about Unsloth! Share how it’s powering your amazing projects in blog posts or social media, and inspire others to explore its potential. Even a simple star on our repo goes a long way in showing your support and helping the community grow. 🌟 From 62acf65cc09a517e270b2405c50452cf24d14976 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 7 Jan 2025 02:02:59 -0800 Subject: [PATCH 0980/1088] Update README.md Notebook links --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 6bff98cbda..cdb48d2c26 100644 --- a/README.md +++ b/README.md @@ -22,22 +22,22 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing) | 2x faster | 60% less | -| **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing) | 2x faster | 40% less | -| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) | 2x faster | 60% less | -| **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/drive/1lN6hPQveB_mHSnTOYifygFcrO8C1bxq4?usp=sharing) | 2x faster | 50% less | -| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/drive/1vIrqH5uYDQwsJ4-OO3DErvuv4pBgVwk4?usp=sharing) | 2x faster | 63% less | -| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1Kose-ucXO1IBaZq5BvbwWieuubP7hxvQ?usp=sharing) | 2x faster | 63% less | -| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/drive/1_yNCks4BTD5zOnjozppphh5GzMFaMKq_?usp=sharing) | 2.2x faster | 73% less | +| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 60% less | +| **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 40% less | +| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 60% less | +| **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_3.5_Mini-Conversational.ipynb) | 2x faster | 50% less | +| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 63% less | +| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 63% less | +| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 73% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | -| **ORPO** | [▶️ Start for free](https://colab.research.google.com/drive/11t4njE3c4Lxl-07OD8lJSMKkfyJml3Tn?usp=sharing) | 1.9x faster | 43% less | -| **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 43% less | +| **ORPO** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) | 1.9x faster | 43% less | +| **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 43% less | - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) - **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) +- Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text -- This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language +- This [continued pretraining notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) is for learning another language - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News From c14046ea4a68f73dc3de29223b0d805382320e9f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 7 Jan 2025 04:23:14 -0800 Subject: [PATCH 0981/1088] Bug fixes (#1516) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han --------- Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> --- README.md | 3 + pyproject.toml | 24 +++--- unsloth-cli.py | 12 ++- unsloth/__init__.py | 58 +++++++++++--- unsloth/kernels/cross_entropy_loss.py | 37 +++++---- unsloth/kernels/flex_attention.py | 10 +-- unsloth/kernels/rms_layernorm.py | 8 +- unsloth/kernels/rope_embedding.py | 8 +- unsloth/models/_utils.py | 13 +-- unsloth/models/cohere.py | 18 +++-- unsloth/models/gemma2.py | 14 ++-- unsloth/models/granite.py | 67 +++++++++++----- unsloth/models/llama.py | 111 ++++++++++++++++++-------- unsloth/models/loader.py | 69 +++++++++------- unsloth/models/mistral.py | 12 +-- unsloth/models/vision.py | 3 + 16 files changed, 307 insertions(+), 160 deletions(-) diff --git a/README.md b/README.md index 6bff98cbda..f658e6cebd 100644 --- a/README.md +++ b/README.md @@ -212,6 +212,9 @@ For **advanced installation instructions** or if you see weird errors during ins - Go to our official [Documentation](https://docs.unsloth.ai) for saving to GGUF, checkpointing, evaluation and more! - We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! - We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! +- If you want to download models from the ModelScope community, please use an environment variable: `UNSLOTH_USE_MODELSCOPE=1`, and install the modelscope library by: `pip install modelscope -U`. + +> unsloth_cli.py also supports `UNSLOTH_USE_MODELSCOPE=1` to download models and datasets. please remember to use the model and dataset id in the ModelScope community. ```python from unsloth import FastLanguageModel diff --git a/pyproject.toml b/pyproject.toml index ce3301547b..bf4c995285 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,20 +148,20 @@ cu124onlytorch250 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu121onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch251 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu118 = [ "unsloth[huggingface]", diff --git a/unsloth-cli.py b/unsloth-cli.py index ddb0ac8b7b..b7613f92df 100644 --- a/unsloth-cli.py +++ b/unsloth-cli.py @@ -30,11 +30,14 @@ """ import argparse +import os + def run(args): import torch from unsloth import FastLanguageModel from datasets import load_dataset + from transformers.utils import strtobool from trl import SFTTrainer from transformers import TrainingArguments from unsloth import is_bfloat16_supported @@ -86,8 +89,13 @@ def formatting_prompts_func(examples): texts.append(text) return {"text": texts} - # Load and format dataset - dataset = load_dataset(args.dataset, split="train") + use_modelscope = strtobool(os.environ.get('UNSLOTH_USE_MODELSCOPE', 'False')) + if use_modelscope: + from modelscope import MsDataset + dataset = MsDataset.load(args.dataset, split="train") + else: + # Load and format dataset + dataset = load_dataset(args.dataset, split="train") dataset = dataset.map(formatting_prompts_func, batched=True) print("Data is formatted and ready!") diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 980425e1f1..bbeded9fc6 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -17,16 +17,6 @@ import os, re, subprocess, inspect import numpy as np -# # Define a list of modules to check -# MODULES_TO_CHECK = ["bitsandbytes"] - -# # Check if any of the modules in the list have been imported -# for module in MODULES_TO_CHECK: -# if module in sys.modules: -# raise ImportError(f"Unsloth: Please import Unsloth before {module}.") -# pass -# pass - # Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! @@ -55,7 +45,12 @@ pass # Reduce VRAM usage by reducing fragmentation -os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,roundup_power2_divisions:[64:128,256:64,>:32]" +# And optimize pinning of memory +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = \ + "expandable_segments:True,"\ + "roundup_power2_divisions:[32:256,64:128,256:64,>:32],"\ + "pinned_use_cuda_host_register:True,"\ + "pinned_num_register_threads:8" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: @@ -89,6 +84,36 @@ del os.environ["PYTORCH_CUDA_ALLOC_CONF"] pass +# Fix Xformers performance issues since 0.0.25 +import importlib.util +from pathlib import Path +from importlib.metadata import version as importlib_version +from packaging.version import Version +try: + xformers_version = importlib_version("xformers") + if Version(xformers_version) < Version("0.0.29"): + xformers_location = importlib.util.find_spec("xformers").origin + xformers_location = os.path.split(xformers_location)[0] + cutlass = Path(xformers_location) / "ops" / "fmha" / "cutlass.py" + + if cutlass.exists(): + with open(cutlass, "r+") as f: + text = f.read() + # See https://github.com/facebookresearch/xformers/issues/1176#issuecomment-2545829591 + if "num_splits_key=-1," in text: + text = text.replace("num_splits_key=-1,", "num_splits_key=None,") + f.seek(0) + f.write(text) + f.truncate() + print("Unsloth: Patching Xformers to fix some performance issues.") + pass + pass + pass + pass +except: + pass +pass + # Torch 2.4 has including_emulation major_version, minor_version = torch.cuda.get_device_capability() SUPPORTS_BFLOAT16 = (major_version >= 8) @@ -166,9 +191,18 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: + unsloth_zoo_version = importlib_version("unsloth_zoo") + if Version(unsloth_zoo_version) < Version("2025.1.1"): + try: + os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") + except: + try: + os.system("pip install --upgrade --no-cache-dir --no-deps --user unsloth_zoo") + except: + raise ImportError("Unsloth: Please update unsloth_zoo via `pip install --upgrade --no-cache-dir --no-deps unsloth_zoo`") import unsloth_zoo except: - raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth-zoo`") + raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo`") pass from .models import * diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index d347cd1878..fcba2eb6d4 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -25,11 +25,6 @@ ) -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), - "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), -}) -@triton.jit def _cross_entropy_forward( logits_ptr , logits_row_stride , @@ -95,13 +90,15 @@ def _cross_entropy_forward( tl.store(logsumexp_ptr, logsumexp) tl.store(loss_ptr, loss) pass +_cross_entropy_forward = triton.jit(_cross_entropy_forward) +_cross_entropy_forward = triton.heuristics( + { + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), + } +)(_cross_entropy_forward) -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), - "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), -}) -@triton.jit def _chunked_cross_entropy_forward( logits_ptr , logits_row_stride , @@ -177,13 +174,15 @@ def _chunked_cross_entropy_forward( pass tl.store(logsumexp_ptr, logsumexp) pass +_chunked_cross_entropy_forward = triton.jit(_chunked_cross_entropy_forward) +_chunked_cross_entropy_forward = triton.heuristics( + { + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), + } +)(_chunked_cross_entropy_forward) -@triton.heuristics({ - "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), - "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), -}) -@triton.jit def _cross_entropy_backward( logits_ptr , logits_row_stride , @@ -264,10 +263,16 @@ def _cross_entropy_backward( # If y == 0: dC/dx = 0 ==> we already masked it to be = 0, so dloss = 0. tl.store(logits_ptr + col_offsets, dloss * y, mask = mask) pass +_cross_entropy_backward = triton.jit(_cross_entropy_backward) +_cross_entropy_backward = triton.heuristics( + { + "DO_SOFTCAPPING": lambda args: bool(args["DO_SOFTCAPPING" ]), + "DO_LOGIT_SCALING": lambda args: bool(args["DO_LOGIT_SCALING"]), + } +)(_cross_entropy_backward) MAX_FUSED_SIZE = 65536 # 2**16 - class Fast_CrossEntropyLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : float = 0): diff --git a/unsloth/kernels/flex_attention.py b/unsloth/kernels/flex_attention.py index 887ffca1b7..6f82394228 100644 --- a/unsloth/kernels/flex_attention.py +++ b/unsloth/kernels/flex_attention.py @@ -43,9 +43,9 @@ # Logit softcapping @torch.compile(fullgraph = True, dynamic = True, options = torch_compile_options) def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): - n_heads = self.num_heads + n_heads = self.config.num_attention_heads head_dim = self.head_dim - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads n_groups = self.num_key_value_groups # Grouped query attention @@ -130,7 +130,7 @@ def flex_attention(s, t): pass def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): - n_heads = self.num_heads + n_heads = self.config.num_attention_heads head_dim = self.head_dim s = self.config.query_pre_attn_scalar t = self.config.attn_logit_softcapping @@ -147,9 +147,9 @@ def slow_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): torch_tanh = torch.tanh torch_nn_functional_softmax = torch.nn.functional.softmax def slow_inference_attention_softcapping(Q, K, V, causal_mask, self, bsz, q_len): - n_heads = self.num_heads + n_heads = self.config.num_attention_heads head_dim = self.head_dim - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads n_groups = self.num_key_value_groups # Grouped query attention diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index b74d636c63..6310f7f392 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -53,8 +53,6 @@ def _rms_layernorm_forward( pass -@triton.heuristics({"GEMMA": lambda args: bool(args["GEMMA"]),}) -@triton.jit def _rms_layernorm_backward( dY, dY_row_stride, dX, dX_row_stride, @@ -97,6 +95,12 @@ def _rms_layernorm_backward( output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dX + col_offsets, output, mask = mask) pass +_rms_layernorm_backward = triton.jit(_rms_layernorm_backward) +_rms_layernorm_backward = triton.heuristics( + { + "GEMMA": lambda args: bool(args["GEMMA"]), + } +)(_rms_layernorm_backward) @triton.jit diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 7fe15d0e3b..88b9ccadb4 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -18,8 +18,6 @@ from .utils import calculate_settings ROPE_GROUP_SIZE : int = 4 -@triton.heuristics({"BACKWARD_PASS": lambda args: bool(args["BACKWARD_PASS"]),}) -@triton.jit def _rope_embedding( Q, Q_row_stride, cos, cos_row_stride, @@ -69,6 +67,12 @@ def _rope_embedding( tl.store(Q + offs_q2, Q2*cos1 + Q1*sin1, mask = mask) pass pass +_rope_embedding = triton.jit(_rope_embedding) +_rope_embedding = triton.heuristics( + { + "BACKWARD_PASS": lambda args: bool(args["BACKWARD_PASS"]), + } +)(_rope_embedding) class Fast_RoPE_Embedding(torch.autograd.Function): diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 3cb6ffb8f3..86adc0e634 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,9 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2024.12.12" +__version__ = "2025.1.1" __all__ = [ + "SUPPORTS_BFLOAT16", + "is_bfloat16_supported", + "prepare_model_for_kbit_training", "xformers", "xformers_attention", @@ -30,7 +33,6 @@ "offload_to_disk", "offload_input_embeddings", "offload_output_embeddings", - "is_bfloat16_supported", "unsloth_offloaded_gradient_checkpoint", "torch_compile_options", "patch_linear_scaling", @@ -58,7 +60,6 @@ "fused_linear_cross_entropy", "patch_unsloth_smart_gradient_checkpointing", "unpatch_unsloth_smart_gradient_checkpointing", - "create_gradient_checkpointing_buffer", "patch_compiled_autograd", "process_vision_info", @@ -97,7 +98,6 @@ patch_unsloth_smart_gradient_checkpointing, unpatch_unsloth_smart_gradient_checkpointing, - create_gradient_checkpointing_buffer, ) from unsloth_zoo.loss_utils import ( HAS_CUT_CROSS_ENTROPY, @@ -556,6 +556,7 @@ def prepare_model_for_kbit_training( def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + pass return model pass @@ -1203,8 +1204,6 @@ def unsloth_compile_transformers( return pass - if disable: return - model_types = get_transformers_model_type( model_name = model_name, token = token, @@ -1212,6 +1211,8 @@ def unsloth_compile_transformers( trust_remote_code = trust_remote_code, ) + if disable: return + for model_type in model_types: _unsloth_compile_transformers( model_type, diff --git a/unsloth/models/cohere.py b/unsloth/models/cohere.py index 1610949f64..0c36abf681 100644 --- a/unsloth/models/cohere.py +++ b/unsloth/models/cohere.py @@ -94,9 +94,9 @@ def CohereAttention_fast_forward( bsz, q_len, _ = hidden_states.size() - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) @@ -259,12 +259,14 @@ def CohereAttention_fast_forward_inference( K1, V1 = past_key_value dtype = Xn.dtype - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim - attention_size = n_heads*head_dim # assert(n_kv_heads * n_groups == n_heads) + + hidden_size = self.config.hidden_size + attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 @@ -281,10 +283,10 @@ def CohereAttention_fast_forward_inference( self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") # Mistral Nemo 12b has weird dimensions - if attention_size != self.hidden_size: - self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + if attention_size != hidden_size: + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") else: - self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + self.temp_O = self.temp_QA[1][:,:,:hidden_size] pass self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index 0f0a020717..be6b0469d9 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -98,9 +98,9 @@ def Gemma2Attention_fast_forward( bsz, q_len, _ = hidden_states.size() - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) @@ -255,12 +255,14 @@ def Gemma2Attention_fast_forward_inference( K1, V1 = past_key_value dtype = Xn.dtype - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim - attention_size = n_heads*head_dim # assert(n_kv_heads * n_groups == n_heads) + + hidden_size = self.config.hidden_size + attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 @@ -276,7 +278,7 @@ def Gemma2Attention_fast_forward_inference( self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") # Only for Gemma2 - self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index 9466a8d6c1..497a357fe2 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -20,7 +20,8 @@ LlamaLinearScalingRotaryEmbedding, ) from .mistral import * - +from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit +from peft.tuners.lora import Linear4bit as Peft_Linear4bit try: from transformers.models.granite.modeling_granite import ( GraniteAttention, @@ -84,9 +85,9 @@ def GraniteAttention_fast_forward( bsz, q_len, _ = hidden_states.size() - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) @@ -181,6 +182,11 @@ def GraniteDecoderLayer_fast_forward( position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ): + residual_multiplier = \ + self.residual_multiplier \ + if hasattr(self, "residual_multiplier") else \ + self.config.residual_multiplier + if use_cache and hasattr(self, "_flag_for_generation"): #past_key_value is not None: residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.input_layernorm, hidden_states) @@ -196,13 +202,13 @@ def GraniteDecoderLayer_fast_forward( position_embeddings = position_embeddings, _flag_for_generation=self._flag_for_generation, ) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm_inference(self.post_attention_layernorm, hidden_states) hidden_states = fast_swiglu_inference(self.mlp, hidden_states) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) else: residual = hidden_states hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) @@ -217,13 +223,13 @@ def GraniteDecoderLayer_fast_forward( padding_mask=padding_mask, position_embeddings = position_embeddings, ) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) # Fully Connected residual = hidden_states hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) hidden_states = self.mlp(hidden_states) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) pass outputs = (hidden_states,) @@ -257,12 +263,14 @@ def GraniteAttention_fast_forward_inference( K1, V1 = past_key_value dtype = Xn.dtype - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim - attention_size = n_heads*head_dim # assert(n_kv_heads * n_groups == n_heads) + + hidden_size = self.config.hidden_size + attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 @@ -278,7 +286,7 @@ def GraniteAttention_fast_forward_inference( self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") # Only for Gemma2 - self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") @@ -367,6 +375,10 @@ def GraniteModel_fast_forward_inference( hidden_states = self.model.embed_tokens(input_ids) hidden_states = hidden_states.to(self.config.torch_dtype) hidden_states *= self.model.embedding_multiplier + residual_multiplier = \ + self.residual_multiplier \ + if hasattr(self, "residual_multiplier") else \ + self.config.residual_multiplier bsz, q_len, hd = hidden_states.shape seq_len = past_key_values[0][0].shape[-2] @@ -398,12 +410,12 @@ def GraniteModel_fast_forward_inference( position_embeddings = position_embeddings, ) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) residual = hidden_states hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) - hidden_states = torch.add(residual, hidden_states, alpha = self.config.residual_multiplier) + hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier) next_decoder_cache.append(present_key_value) pass @@ -421,6 +433,18 @@ class GraniteRotaryEmbedding(LlamaRotaryEmbedding): def __init__(self, config): super().__init__(config = config) +def patched_init(original_init): + def new_init(self, *args, **kwargs): + # we can use self.residual_multiplier arg in GraniteDecoderLayer_fast_forward as mentioned here + # https://github.com/huggingface/transformers/blob/e5fd865ebae062b7cf03a81b8c6affeb39f30bec/src/transformers/models/granite/modeling_granite.py#L243 + # The problem is, we don't have access to either the value or config in GraniteModel_fast_forward_inference + # So we need a way to pass this value around. It is probably better to pass on entire config just in case we need it later + config = kwargs.get("config", args[0] if args else None) + if config is not None: + self.config = config + original_init(self, *args, **kwargs) + return new_init + class FastGraniteModel(FastLlamaModel): @staticmethod @@ -435,12 +459,13 @@ def pre_patch(): exec(function, globals()) GraniteAttention.__init__ = eval(init_name) pass - GraniteAttention .forward = GraniteAttention_fast_forward - GraniteSdpaAttention .forward = GraniteAttention_fast_forward - GraniteFlashAttention2.forward = GraniteAttention_fast_forward - GraniteDecoderLayer .forward = GraniteDecoderLayer_fast_forward - GraniteModel .forward = LlamaModel_fast_forward - GraniteForCausalLM .forward = CausalLM_fast_forward(GraniteModel_fast_forward_inference) + GraniteAttention .forward = GraniteAttention_fast_forward + GraniteSdpaAttention .forward = GraniteAttention_fast_forward + GraniteFlashAttention2.forward = GraniteAttention_fast_forward + GraniteDecoderLayer .forward = GraniteDecoderLayer_fast_forward + GraniteModel .forward = LlamaModel_fast_forward + GraniteForCausalLM .forward = CausalLM_fast_forward(GraniteModel_fast_forward_inference) + GraniteForCausalLM .__init__ = patched_init(GraniteForCausalLM.__init__) PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward fix_prepare_inputs_for_generation(GraniteForCausalLM) @@ -452,7 +477,7 @@ def pre_patch(): @staticmethod - def post_patch(model): + def post_patch(model, tokenizer): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 @@ -517,7 +542,7 @@ def post_patch(model): for _ in range(3): gc.collect() torch.cuda.empty_cache() - return model + return model, tokenizer pass pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index c94514966f..edd3ddf94f 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -20,6 +20,10 @@ from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention from transformers import __version__ as transformers_version +from unsloth_zoo.utils import Version +transformers_version = Version(transformers_version) +# Transformers moved rotary embeddings out of all attention layers +IS_ATTENTION_REFACTOR = transformers_version > Version("4.47.1") from transformers.models.llama.modeling_llama import ( logger, BaseModelOutputWithPast, @@ -146,12 +150,14 @@ def LlamaAttention_fast_forward_inference( K1, V1 = past_key_value dtype = Xn.dtype - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim - attention_size = n_heads*head_dim # assert(n_kv_heads * n_groups == n_heads) + + hidden_size = self.config.hidden_size + attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 @@ -168,10 +174,10 @@ def LlamaAttention_fast_forward_inference( self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") # Mistral Nemo 12b has weird dimensions - if attention_size != self.hidden_size: - self.temp_O = torch.empty((1, bsz, self.hidden_size), dtype = dtype, device = "cuda:0") + if attention_size != hidden_size: + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") else: - self.temp_O = self.temp_QA[1][:,:,:self.hidden_size] + self.temp_O = self.temp_QA[1][:,:,:hidden_size] pass self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") @@ -356,9 +362,9 @@ def LlamaAttention_fast_forward( bsz, q_len, _ = hidden_states.size() - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) @@ -786,7 +792,7 @@ def LlamaModel_fast_forward( pass pass - if transformers_version > "4.47.1" and hasattr(self, "rotary_emb"): + if IS_ATTENTION_REFACTOR and not hasattr(self.layers[0].self_attn, "rotary_emb"): # Transformers main has made it mandatory to pass position_embeddings # https://github.com/huggingface/transformers/pull/34858 position_embeddings = self.rotary_emb(hidden_states, position_ids, self.config.max_position_embeddings) @@ -996,18 +1002,20 @@ def _CausalLM_fast_forward( lm_head = self.lm_head.weight logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) + dtype = lm_head.dtype if bsz == 1 and q_len == 1: - logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) + logits = torch.mv(lm_head, hidden_states.ravel().to(dtype)) logits = logits.unsqueeze(0).unsqueeze(0) elif num_logits_to_keep != 0: - logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) + logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(dtype)) else: RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" # < 1024 Normal Unsloth uses less VRAM! if bsz*q_len <= 1024: RETURN_LOGITS = True if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) loss = fused_linear_cross_entropy( hidden_states = hidden_states, @@ -1029,7 +1037,7 @@ def _CausalLM_fast_forward( ) return output pass - logits = self.lm_head(hidden_states.to(lm_head.dtype)) + logits = self.lm_head(hidden_states.to(dtype)) pass torch_dtype = __DTYPE_MAP.get(self.config.torch_dtype, None) @@ -1607,6 +1615,9 @@ def from_pretrained( elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: logger.warning_once("Device does not support bfloat16. Will change to float16.") dtype = torch.float16 + elif dtype == torch.float16 and SUPPORTS_BFLOAT16: + logger.warning_once("Device supports bfloat16 but you selected float16. Will change to bfloat16.") + dtype = torch.bfloat16 assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) @@ -1879,6 +1890,13 @@ def from_pretrained( internal_model = internal_model.model pass internal_model._saved_temp_tokenizer = tokenizer + + # For transformers > 4.47.1, we need to add rotary_emb to all attention layers + if IS_ATTENTION_REFACTOR or hasattr(model.model, "rotary_emb"): + rotary_emb = model.model.rotary_emb + for layer in model.model.layers: + layer.self_attn.rotary_emb = rotary_emb + pass return model, tokenizer pass @@ -1967,29 +1985,41 @@ def get_peft_model( if "embed_tokens" in new_target_modules: print("Unsloth: Training embed_tokens in mixed precision to save VRAM") - dtype = model.model.model.embed_tokens.modules_to_save.default.weight.dtype - model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) - model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) + new_dtype = model.get_input_embeddings().modules_to_save.default.weight.dtype + if new_dtype == torch.float16: + # See https://github.com/unslothai/unsloth/pull/1200 + # Tesla T4 must use float32 and not float16 + new_dtype = torch.float32 + pass + + model.get_input_embeddings().modules_to_save.default\ + .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + model.get_input_embeddings().modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! - model.model.model.embed_tokens.original_module\ + model.get_input_embeddings().original_module\ .to(device = "cpu", non_blocking = True) - model.model.model.embed_tokens.original_module.requires_grad_(False) + model.get_input_embeddings().original_module.requires_grad_(False) pass if "lm_head" in new_target_modules: print("Unsloth: Training lm_head in mixed precision to save VRAM") - dtype = model.model.model.lm_head.modules_to_save.default.weight.dtype - model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) - model.model.lm_head.modules_to_save.default.requires_grad_(True) + new_dtype = model.get_output_embeddings().modules_to_save.default.weight.dtype + if new_dtype == torch.float16: + # See https://github.com/unslothai/unsloth/pull/1200 + # Tesla T4 must use float32 and not float16 + new_dtype = torch.float32 + pass + + model.get_output_embeddings().modules_to_save.default\ + .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + model.get_output_embeddings().modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! - model.model.lm_head.original_module\ + model.get_output_embeddings().original_module\ .to(device = "cpu", non_blocking = True) - model.model.lm_head.original_module.requires_grad_(False) + model.get_output_embeddings().original_module.requires_grad_(False) pass return model @@ -2216,25 +2246,36 @@ def get_peft_model( model = FastLlamaModel.patch_peft_model(model, use_gradient_checkpointing) - # Now patch lm_head and embed_tokens if train_embed_tokens: print("Unsloth: Training embed_tokens in mixed precision to save VRAM") - assert(hasattr(model.model.model.embed_tokens, "modules_to_save")) + assert(hasattr(model.get_input_embeddings(), "modules_to_save")) - dtype = model.model.model.embed_tokens.modules_to_save.default.weight.dtype - model.model.model.embed_tokens.modules_to_save.default\ - .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) - model.model.model.embed_tokens.modules_to_save.default.requires_grad_(True) + new_dtype = model.get_input_embeddings().modules_to_save.default.weight.dtype + if new_dtype == torch.float16: + # See https://github.com/unslothai/unsloth/pull/1200 + # Tesla T4 must use float32 and not float16 + new_dtype = torch.float32 + pass + + model.get_input_embeddings().modules_to_save.default\ + .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + model.get_input_embeddings().modules_to_save.default.requires_grad_(True) pass if train_lm_head: print("Unsloth: Training lm_head in mixed precision to save VRAM") - assert(hasattr(model.model.lm_head, "modules_to_save")) + assert(hasattr(model.get_output_embeddings(), "modules_to_save")) + + new_dtype = model.get_output_embeddings().modules_to_save.default.weight.dtype + if new_dtype == torch.float16: + # See https://github.com/unslothai/unsloth/pull/1200 + # Tesla T4 must use float32 and not float16 + new_dtype = torch.float32 + pass - dtype = model.model.lm_head.modules_to_save.default.weight.dtype - model.model.lm_head.modules_to_save.default\ - .to(device = "cuda:0", dtype=(dtype if (dtype != torch.float16) else torch.float32), non_blocking = True) - model.model.lm_head.modules_to_save.default.requires_grad_(True) + model.get_output_embeddings().modules_to_save.default\ + .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + model.get_output_embeddings().modules_to_save.default.requires_grad_(True) pass # Patch tokenizer to pad to the right diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 113c4fbc70..e9caad0e60 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -31,8 +31,17 @@ pass from huggingface_hub import HfFileSystem +# [TODO] Move USE_MODELSCOPE to utils +USE_MODELSCOPE = os.environ.get("UNSLOTH_USE_MODELSCOPE", "0") == "1" +if USE_MODELSCOPE: + import importlib + if importlib.util.find_spec("modelscope") is None: + raise ImportError(f'You are using the modelscope hub, please install modelscope by `pip install modelscope -U`') + pass +pass + # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! -from unsloth_zoo.utils import Version +from unsloth_zoo.utils import Version, _get_dtype transformers_version = Version(transformers_version) SUPPORTS_FOURBIT = transformers_version >= Version("4.37") SUPPORTS_GEMMA = transformers_version >= Version("4.38") @@ -47,28 +56,11 @@ pass import torch -def _get_dtype(dtype): - __DTYPE_MAP = { - "float32": torch.float32, - torch.float32: torch.float32, - "float16": torch.float16, - torch.float16: torch.float16, - "bfloat16": torch.bfloat16, - torch.bfloat16: torch.bfloat16, - } - if dtype is None or dtype == None: return None - elif dtype in __DTYPE_MAP: return __DTYPE_MAP[dtype] - else: - print(f"Unsloth: {dtype} is not recognized, so we'll default to None") - return None - pass -pass - class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( - model_name = "unsloth/llama-3-8b-bnb-4bit", + model_name = "unsloth/Llama-3.2-1B-Instruct", max_seq_length = None, dtype = None, load_in_4bit = True, @@ -80,12 +72,19 @@ def from_pretrained( use_gradient_checkpointing = "unsloth", resize_model_vocab = None, revision = None, + use_exact_model_name = False, *args, **kwargs, ): if token is None: token = get_token() old_model_name = model_name - model_name = get_model_name(model_name, load_in_4bit) + if not use_exact_model_name: + model_name = get_model_name(model_name, load_in_4bit) + + if USE_MODELSCOPE and not os.path.exists(model_name): + from modelscope import snapshot_download + model_name = snapshot_download(model_name) + pass # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled @@ -165,7 +164,9 @@ def from_pretrained( # Get base model for PEFT: if is_peft: # Check base model again for PEFT - model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_name = peft_config.base_model_name_or_path + if not use_exact_model_name: + model_name = get_model_name(model_name, load_in_4bit) model_config = AutoConfig.from_pretrained( model_name, token = token, @@ -354,6 +355,7 @@ def from_pretrained( revision = None, return_logits = False, # Return logits fullgraph = True, # No graph breaks + use_exact_model_name = False, *args, **kwargs, ): if token is None: token = get_token() @@ -361,10 +363,16 @@ def from_pretrained( patch_compiled_autograd() patch_compiling_bitsandbytes() if use_gradient_checkpointing == "unsloth": - patch_unsloth_smart_gradient_checkpointing() + patch_unsloth_smart_gradient_checkpointing(dtype = dtype) old_model_name = model_name - model_name = get_model_name(model_name, load_in_4bit) + if not use_exact_model_name: + model_name = get_model_name(model_name, load_in_4bit) + + if USE_MODELSCOPE and not os.path.exists(model_name): + from modelscope import snapshot_download + model_name = snapshot_download(model_name) + pass # First check if it's a normal model via AutoConfig from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled @@ -410,7 +418,7 @@ def from_pretrained( exist_config = os.path.exists(os.path.join(model_name, "config.json")) both_exist = exist_adapter_config and exist_config else: - files = HfFileSystem(token = token).glob(os.path.join(model_name, "*.json")) + files = HfFileSystem(token = token).glob(f"{model_name}/*.json") files = (os.path.split(x)[-1] for x in files) if sum(x == "adapter_config.json" or x == "config.json" for x in files) >= 2: both_exist = True @@ -443,7 +451,10 @@ def from_pretrained( # Get base model for PEFT: if is_peft: # Check base model again for PEFT - model_name = get_model_name(peft_config.base_model_name_or_path, load_in_4bit) + model_name = peft_config.base_model_name_or_path + if not use_exact_model_name: + model_name = get_model_name(model_name, load_in_4bit) + model_config = AutoConfig.from_pretrained( model_name, token = token, @@ -454,7 +465,10 @@ def from_pretrained( if not was_disabled: enable_progress_bars() - with contextlib.redirect_stdout(open(os.devnull, "w")): + do_logging = os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") == "1" + redirector = sys.stdout if do_logging else open(os.devnull, "w") + + with contextlib.redirect_stdout(redirector): patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, @@ -470,7 +484,7 @@ def from_pretrained( fuse_lm_head = True, gradient_checkpointing = True, manual_replacements = True, - fast_lora_forwards = False, + fast_lora_forwards = True, fast_residual_stream = False, accurate_accumulation = True, epilogue_fusion = True, @@ -484,6 +498,7 @@ def from_pretrained( return_logits = return_logits, ) pass + if do_logging: redirector.close() # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index d6c6946664..9a97015f9b 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -64,9 +64,9 @@ def MistralAttention_fast_forward( bsz, q_len, _ = hidden_states.size() - n_heads = self.num_heads + n_heads = self.config.num_attention_heads n_groups = self.num_key_value_groups - n_kv_heads = self.num_key_value_heads + n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim assert(n_kv_heads * n_groups == n_heads) @@ -278,16 +278,16 @@ def MistralForCausalLM_fast_forward( # Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now. def patch_mistral_nemo_attention(function): function = function.replace( - "(self.head_dim * self.num_heads) != self.hidden_size", + "(self.head_dim * self.config.num_attention_heads) != self.config.hidden_size", "False", ) function = function.replace( - "self.head_dim = self.hidden_size // self.num_heads", + "self.head_dim = self.config.hidden_size // self.config.num_attention_heads", "self.head_dim = config.head_dim", ) function = function.replace( - "self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)", - "self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)", + "self.o_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False)", + "self.o_proj = nn.Linear(self.config.num_attention_heads * self.head_dim, self.config.hidden_size, bias=False)", ) return function pass diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 2dc4b88dfa..51450aa0d9 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -30,6 +30,7 @@ from unsloth_zoo.peft_utils import ( get_peft_regex, SKIP_QUANTIZATION_MODULES, + requires_grad_for_gradient_checkpointing, ) from triton import __version__ as triton_version @@ -275,6 +276,8 @@ def get_peft_model( use_gradient_checkpointing = use_gradient_checkpointing, ) model = get_peft_model(model, lora_config) + # Enable gradients on modules which are trainable + requires_grad_for_gradient_checkpointing(model) model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) From 0507bef4a4ff30e001651e96f8a4c98c3041b788 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 7 Jan 2025 04:29:09 -0800 Subject: [PATCH 0982/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bf4c995285..43ec13fd1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2024.12.7", + "unsloth_zoo>=2025.1.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2024.12.7", + "unsloth_zoo>=2025.1.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", From c83153ffc0344c6df9cdb466c25adc97ac2f2210 Mon Sep 17 00:00:00 2001 From: sebaxakerhtc <32651506+sebaxakerhtc@users.noreply.github.com> Date: Wed, 8 Jan 2025 00:51:17 +0200 Subject: [PATCH 0983/1088] Update __init__.py (#1520) * Update __init__.py This PR is solving the (issue)[https://github.com/unslothai/unsloth/issues/1518] with some GPUs * Update __init__.py --------- Co-authored-by: Daniel Han --- unsloth/__init__.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index bbeded9fc6..d460432bbb 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -48,9 +48,11 @@ # And optimize pinning of memory os.environ["PYTORCH_CUDA_ALLOC_CONF"] = \ "expandable_segments:True,"\ - "roundup_power2_divisions:[32:256,64:128,256:64,>:32],"\ - "pinned_use_cuda_host_register:True,"\ - "pinned_num_register_threads:8" + "roundup_power2_divisions:[32:256,64:128,256:64,>:32]" + +# [TODO] Check why some GPUs don't work +# "pinned_use_cuda_host_register:True,"\ +# "pinned_num_register_threads:8" # Hugging Face Hub faster downloads if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ: From 562cfd33971814d98c5a68c09b183756a23da427 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 Jan 2025 15:10:46 -0800 Subject: [PATCH 0984/1088] Phi-4 (#1523) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han * Merge branch 'main' into nightly * Phi 4 --------- Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> --- unsloth/chat_templates.py | 40 +++++++++++++++++++++++++++++++++++++++ unsloth/models/_utils.py | 2 +- unsloth/models/mapper.py | 5 +++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index da10f7e003..d8dc385223 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -890,6 +890,46 @@ DEFAULT_SYSTEM_MESSAGE["qwen2.5"] = qwen25_default_system_message # No system message in Qwen 2.5 pass +# =========================================== Phi-4 +# "{{ bos_token }}"\ # Phi-4 removes BOS? +phi4_template = \ + "{% for message in messages %}"\ + "{% if (message['role'] == 'system') %}"\ + "{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}"\ + "{% elif (message['role'] == 'user') %}"\ + "{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|>'}}"\ + "{% elif (message['role'] == 'assistant') %}"\ + "{{'<|im_start|>assistant<|im_sep|>' + message['content'] + '<|im_end|>'}}"\ + "{% endif %}"\ + "{% endfor %}"\ + "{% if add_generation_prompt %}"\ + "{{ '<|im_start|>assistant<|im_sep|>' }}"\ + "{% endif %}" +pass + +_phi4_ollama_template = \ + "{{ if .System }}<|im_start|><|system|><|im_sep|>{{ .System }}<|im_end|>{{ end }}"\ + "{{ if .Prompt }}<|im_start|><|user|><|im_sep|>{{ .Prompt }}<|im_end|>{{ end }}"\ + "<|im_start|><|assistant|><|im_sep|>{{ .Response }}<|im_end|>" + +# Ollama from https://www.ollama.com/library/phi4 is different +phi4_ollama = \ +f''' +FROM {{__FILE_LOCATION__}} +TEMPLATE """{_phi4_ollama_template}""" +PARAMETER stop "<|im_end|>" +PARAMETER stop "<|im_start|>" +PARAMETER stop "<|im_sep|>" +PARAMETER temperature 1.5 +PARAMETER min_p 0.1 +''' + +phi4_template_eos_token = "<|im_end|>" +CHAT_TEMPLATES["phi-4"] = (phi4_template, phi4_template_eos_token, False, phi4_ollama,) +DEFAULT_SYSTEM_MESSAGE["phi-4"] = None # No system message in Phi-4 +pass + + def _change_system_message(template: str, type_chat_template: str, system_message: str = None): system_message_pattern = r"\{system_message\}" diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 86adc0e634..a93f18cd41 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.1" +__version__ = "2025.1.2" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 41f7444643..b7b24b5ccf 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -520,6 +520,11 @@ "unsloth/Llama-3.3-70B-Instruct", "meta-llama/Llama-3.3-70B-Instruct", ), + "unsloth/phi-4-unsloth-bnb-4bit" : ( + "unsloth/phi-4", + "microsoft/phi-4", + "unsloth/phi-4-bnb-4bit", + ), } INT_TO_FLOAT_MAPPER = {} From 8a5cff16de5b8c6b126f8987acf5831bd7aa79dc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 Jan 2025 15:40:27 -0800 Subject: [PATCH 0985/1088] Phi-4 bug fix --- unsloth/models/_utils.py | 2 +- unsloth/tokenizer_utils.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a93f18cd41..ab752f9123 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.2" +__version__ = "2025.1.3" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 384b4bbca5..67a4c663b5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -265,6 +265,10 @@ def assert_same_tokenization(slow_tokenizer, fast_tokenizer): ))) all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens)) + # Remove replacement char for false positive + replacement_char = b"\xc3\xaf\xc2\xbf\xc2\xbd".decode("utf-8") + all_special_tokens = [x for x in all_special_tokens if x != replacement_char] + # Check if chat template is enabled! check_chat_template1 = True check_chat_template2 = True From bed5e06627c3d0e7678c9c2bd4b1be268e53a142 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 Jan 2025 15:48:11 -0800 Subject: [PATCH 0986/1088] Update tokenizer_utils.py --- unsloth/tokenizer_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 67a4c663b5..f2b0da8600 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -523,6 +523,9 @@ def _load_correct_tokenizer( # Ignore Mistral ones - they're a bit weird to handle! elif "mistral" in tokenizer_name.lower(): return fast_tokenizer + # Ignore Phi-4 ones as well + elif "phi-4" in tokenizer_name.lower(): + return fast_tokenizer elif slow_tokenizer is not None: if hasattr(fast_tokenizer, "add_bos_token") and hasattr(slow_tokenizer, "add_bos_token"): fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token From c05d145640981e8a6e69ccf9fea38642b770e7db Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 Jan 2025 15:48:40 -0800 Subject: [PATCH 0987/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index ab752f9123..086c51e344 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.3" +__version__ = "2025.1.4" __all__ = [ "SUPPORTS_BFLOAT16", From 0e0bd1ad393972f629b624a24aadddaf47bb0a00 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 8 Jan 2025 16:46:04 -0800 Subject: [PATCH 0988/1088] Update Unsloth-Zoo --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 43ec13fd1c..b24abd3559 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.1.1", + "unsloth_zoo>=2025.1.2", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.1.1", + "unsloth_zoo>=2025.1.2", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d460432bbb..8002fbaefd 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -194,7 +194,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.1.1"): + if Version(unsloth_zoo_version) < Version("2025.1.2"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 086c51e344..0036a18c4b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.4" +__version__ = "2025.1.5" __all__ = [ "SUPPORTS_BFLOAT16", From f920ddd39c7badddc616098e1b4d0889ed176713 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 8 Jan 2025 23:02:27 -0800 Subject: [PATCH 0989/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cdb48d2c26..7f25e5ecc2 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ unsloth logo - + From 2bf47e2a8a48ce2e24aacccb938272f9c81d8ee9 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Thu, 9 Jan 2025 16:59:43 -0800 Subject: [PATCH 0990/1088] Update README.md --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7f25e5ecc2..56eefaf1d1 100644 --- a/README.md +++ b/README.md @@ -23,13 +23,13 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 60% less | +| **Phi-4** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 50% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 40% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 60% less | -| **Phi-3.5 (mini)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_3.5_Mini-Conversational.ipynb) | 2x faster | 50% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 63% less | | **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 63% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 73% less | -| **Ollama** | [▶️ Start for free](https://colab.research.google.com/drive/1WZDi7APtQ9VsvOrQSSC5DDtxq159j8iZ?usp=sharing) | 1.9x faster | 43% less | +| **Ollama** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb) | 1.9x faster | 43% less | | **ORPO** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) | 1.9x faster | 43% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 43% less | @@ -41,7 +41,8 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is now supported. +- 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft is now supported. We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) +- 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. - 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) From dcb700a0ac7e7d0bcb101a9cab1b7391942c9480 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 10 Jan 2025 04:34:23 -0800 Subject: [PATCH 0991/1088] Update mapper.py --- unsloth/models/mapper.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b7b24b5ccf..c1113f5294 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -462,10 +462,9 @@ "Qwen/Qwen2-VL-7B-Instruct", "unsloth/Qwen2-VL-7B-Instruct-bnb-4bit", ), - "unsloth/Qwen2-VL-72B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B-Instruct-bnb-4bit" : ( "unsloth/Qwen2-VL-72B-Instruct", "Qwen/Qwen2-VL-72B-Instruct", - "unsloth/Qwen2-VL-72B-Instruct-bnb-4bit", ), "unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision-Instruct", From 248750895a8361b27954c58bb0061395835ba5cc Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sat, 11 Jan 2025 17:34:51 -0800 Subject: [PATCH 0992/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9e288ad2d5..7c84c5ef94 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3.2, Mistral, Phi-3.5, Qwen 2.5 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.3, Mistral, Phi-4, Qwen 2.5 & Gemma 2-5x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) From 08ff7883e512b98b31e9fa8e17f5f95283d33ca1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 14 Jan 2025 03:10:29 -0800 Subject: [PATCH 0993/1088] Update issue templates --- .github/ISSUE_TEMPLATE/bug_report.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..754c7ab11d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,24 @@ +--- +name: Bug report +about: Reporting a bug +title: "[BUG]" +labels: '' +assignees: '' + +--- + +1. Have you tried uninstall Unsloth and upgrading? +```bash +pip uninstall unsloth unsloth_zoo -y +pip install --upgrade --no-deps --no-cache-dir unsloth unsloth_zoo +``` +2. If there's a bug, please print out your Unsloth info: +```python +🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning. +==((====))== Unsloth 2024.8: Fast Llama patching. Transformers = 4.44.2. + \\ /| GPU: Tesla T4. Max memory: 14.748 GB. Platform = Linux. +O^O/ \_/ \ Pytorch: 2.4.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1. +\ / Bfloat16 = FALSE. FA [Xformers = 0.0.27.post2. FA2 = False] + "-____-" Free Apache license: http://github.com/unslothai/unsloth +``` +3. Otherwise, please describe your problem below: From 09df3ddeb64fb08dac71eea570b09a101906f99b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 14 Jan 2025 03:12:17 -0800 Subject: [PATCH 0994/1088] Update bug_report.md (#1538) --- .github/ISSUE_TEMPLATE/bug_report.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 754c7ab11d..24c708003f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ assignees: '' pip uninstall unsloth unsloth_zoo -y pip install --upgrade --no-deps --no-cache-dir unsloth unsloth_zoo ``` -2. If there's a bug, please print out your Unsloth info: +2. If there's a bug, please print out your Unsloth info (or do a screenshot): ```python 🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning. ==((====))== Unsloth 2024.8: Fast Llama patching. Transformers = 4.44.2. @@ -21,4 +21,4 @@ O^O/ \_/ \ Pytorch: 2.4.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1. \ / Bfloat16 = FALSE. FA [Xformers = 0.0.27.post2. FA2 = False] "-____-" Free Apache license: http://github.com/unslothai/unsloth ``` -3. Otherwise, please describe your problem below: +3. Otherwise, describe your problem: From d8ad96b018bbe90861144818c2b3e1b229287fc7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 14 Jan 2025 03:13:35 -0800 Subject: [PATCH 0995/1088] Update issue templates --- .github/ISSUE_TEMPLATE/{bug_report.md => new-issue.md} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename .github/ISSUE_TEMPLATE/{bug_report.md => new-issue.md} (86%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/new-issue.md similarity index 86% rename from .github/ISSUE_TEMPLATE/bug_report.md rename to .github/ISSUE_TEMPLATE/new-issue.md index 24c708003f..f0f4e98c46 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/new-issue.md @@ -1,7 +1,7 @@ --- -name: Bug report -about: Reporting a bug -title: "[BUG]" +name: New Issue +about: Bug / Feature Request +title: '' labels: '' assignees: '' @@ -21,4 +21,4 @@ O^O/ \_/ \ Pytorch: 2.4.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1. \ / Bfloat16 = FALSE. FA [Xformers = 0.0.27.post2. FA2 = False] "-____-" Free Apache license: http://github.com/unslothai/unsloth ``` -3. Otherwise, describe your problem: +3. Otherwise, describe your problem or **feature request**: From 3e9ed3cc80ad7a4122efac61ab9560f6e3ff67aa Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 14 Jan 2025 23:20:07 -0800 Subject: [PATCH 0996/1088] Update README.md Update to benchmark tables --- README.md | 157 +++++++++++------------------------------------------- 1 file changed, 30 insertions(+), 127 deletions(-) diff --git a/README.md b/README.md index 7c84c5ef94..dc2136f6fa 100644 --- a/README.md +++ b/README.md @@ -82,23 +82,17 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ## 🥇 Performance Benchmarking -- For the full list of **reproducible** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) +- For our most detailed benchmarks, read our [Llama 3.3 Blog](https://unsloth.ai/blog/llama3-3). +- Benchmarking of Unsloth was also conducted by [🤗Hugging Face](https://huggingface.co/blog/unsloth-trl). -| 1 A100 40GB | 🤗Hugging Face | Flash Attention | 🦥Unsloth Open Source | 🦥[Unsloth Pro](https://unsloth.ai/pricing) | -|--------------|--------------|-----------------|---------------------|-----------------| -| Alpaca | 1x | 1.04x | 1.98x | **15.64x** | -| LAION Chip2 | 1x | 0.92x | 1.61x | **20.73x** | -| OASST | 1x | 1.19x | 2.17x | **14.83x** | -| Slim Orca | 1x | 1.18x | 2.22x | **14.82x** | - -- Benchmarking table below was conducted by [🤗Hugging Face](https://huggingface.co/blog/unsloth-trl). +We tested using the Alpaca Dataset, a batch size of 2, gradient accumulation steps of 4, rank = 32, and applied QLoRA on all linear layers (q, k, v, o, gate, up, down): + +| Model | VRAM | 🦥 Unsloth speed | 🦥 VRAM reduction | 🦥 Longer context | 😊 Hugging Face + FA2 | +|----------------|-------|-----------------|----------------|----------------|--------------------| +| Llama 3.3 (70B)| 80GB | 2x | >75% | 13x longer | 1x | +| Llama 3.1 (8B) | 80GB | 2x | >70% | 12x longer | 1x | -| Free Colab T4 | Dataset | 🤗Hugging Face | Pytorch 2.1.1 | 🦥Unsloth | 🦥 VRAM reduction | -| --- | --- | --- | --- | --- | --- | -| Llama-2 7b | OASST | 1x | 1.19x | 1.95x | -43.3% | -| Mistral 7b | Alpaca | 1x | 1.07x | 1.56x | -13.7% | -| Tiny Llama 1.1b | Alpaca | 1x | 2.06x | 3.87x | -73.8% | -| DPO with Zephyr | Ultra Chat | 1x | 1.09x | 1.55x | -18.6% | +
    ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -359,119 +353,28 @@ dpo_trainer.train() ``` ## 🥇 Detailed Benchmarking Tables -- Click "Code" for fully reproducible examples -- "Unsloth Equal" is a preview of our PRO version, with code stripped out. All settings and the loss curve remains identical. -- For the full list of benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables) - -| 1 A100 40GB | 🤗Hugging Face | Flash Attention 2 | 🦥Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | -| code | [Code](https://colab.research.google.com/drive/1u4dBeM-0vGNVmmO6X7cScAut-Hyt4KDF?usp=sharing) | [Code](https://colab.research.google.com/drive/1fgTOxpMbVjloQBvZyz4lF4BacKSZOB2A?usp=sharing) | [Code](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Code](https://colab.research.google.com/drive/1ANW8EFL3LVyTD7Gq4TkheC1Z7Rxw-rHp?usp=sharing) | | | -| seconds| 1040 | 1001 | 525 | 419 | 196 | 67 | -| memory MB| 18235 | 15365 | 9631 | 8525 | | | -| % saved| | 15.74 | 47.18 | 53.25 | | | | - -### Llama-Factory 3rd party benchmarking -- [Link to performance table.](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-Comparison) TGS: tokens per GPU per second. Model: LLaMA2-7B. GPU: NVIDIA A100 * 1. Batch size: 4. Gradient accumulation: 2. LoRA rank: 8. Max length: 1024. - -| Method | Bits | TGS | GRAM | Speed | -| --- | --- | --- | --- | --- | -| HF | 16 | 2392 | 18GB | 100% | -| HF+FA2 | 16 | 2954 | 17GB | 123% | -| Unsloth+FA2 | 16 | 4007 | 16GB | **168%** | -| HF | 4 | 2415 | 9GB | 101% | -| Unsloth+FA2 | 4 | 3726 | 7GB | **160%** | - -### Performance comparisons between popular models -
    - Click for specific model benchmarking tables (Mistral 7b, CodeLlama 34b etc.) - -### Mistral 7b -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Mistral 7B Slim Orca | 1x | 1.15x | 2.15x | 2.53x | 4.61x | **13.69x** | -| code | [Code](https://colab.research.google.com/drive/1mePk3KzwTD81hr5mcNcs_AX3Kbg_Ha0x?usp=sharing) | [Code](https://colab.research.google.com/drive/1dgHxjvTmX6hb0bPcLp26RXSE6_n9DKj7?usp=sharing) | [Code](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | [Code](https://colab.research.google.com/drive/18yOiyX0T81mTwZqOALFSCX_tSAqju6aD?usp=sharing) | | -| seconds | 1813 | 1571 | 842 | 718 | 393 | 132 | -| memory MB | 32853 | 19385 | 12465 | 10271 | | | -| % saved| | 40.99 | 62.06 | 68.74 | | | - -### CodeLlama 34b -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Code Llama 34B | OOM ❌ | 0.99x | 1.87x | 2.61x | 4.27x | 12.82x | -| code | [▶️ Code](https://colab.research.google.com/drive/1ykfz3BqrtC_AUFegCzUQjjfUNlxp6Otc?usp=sharing) | [Code](https://colab.research.google.com/drive/12ZypxQh7OC6kBXvWZI-5d05I4m-B_hoR?usp=sharing) | [Code](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Code](https://colab.research.google.com/drive/1fm7wqx9MJ0kRrwKOfmLkK1Rmw-pySahB?usp=sharing) | | -| seconds | 1953 | 1982 | 1043 | 748 | 458 | 152 | -| memory MB | 40000 | 33217 | 27413 | 22161 | | | -| % saved| | 16.96| 31.47 | 44.60 | | | | - -### 1 Tesla T4 - -| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| -| Alpaca | 1x | 1.09x | 1.69x | 1.79x | 2.93x | **8.3x** | -| code | [▶️ Code](https://colab.research.google.com/drive/1XpLIV4s8Bj5uryB-X2gqM88oRGHEGdaB?usp=sharing) | [Code](https://colab.research.google.com/drive/1LyXu6CjuymQg6ddHX8g1dpUvrMa1nn4L?usp=sharing) | [Code](https://colab.research.google.com/drive/1gsv4LpY7C32otl1rgRo5wXTk4HIitXoM?usp=sharing) | [Code](https://colab.research.google.com/drive/1VtULwRQwhEnVdNryjm27zXfdSM1tNfFK?usp=sharing) | | | -| seconds | 1599 | 1468 | 942 | 894 | 545 | 193 | -| memory MB | 7199 | 7059 | 6459 | 5443 | | | -| % saved | | 1.94 | 10.28 | 24.39 | | | - -### 2 Tesla T4s via DDP - - | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|----------|-------------|-----------------|--------------|---------------|-------------| -| Alpaca | 1x | 0.99x | 4.95x | 4.44x | 7.28x | **20.61x** | -| code | [▶️ Code](https://www.kaggle.com/danielhanchen/hf-original-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | | -| seconds | 9882 | 9946 | 1996 | 2227 | 1357 | 480 | -| memory MB| 9176 | 9128 | 6904 | 6782 | | | -| % saved | | 0.52 | 24.76 | 26.09 | | | | -
    - -### Performance comparisons on 1 Tesla T4 GPU: -
    - Click for Time taken for 1 epoch - -One Tesla T4 on Google Colab -`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 1 T4 | 23h 15m | 56h 28m | 8h 38m | 391h 41m | -| Unsloth Open | 1 T4 | 13h 7m (1.8x) | 31h 47m (1.8x) | 4h 27m (1.9x) | 240h 4m (1.6x) | -| Unsloth Pro | 1 T4 | 3h 6m (7.5x) | 5h 17m (10.7x) | 1h 7m (7.7x) | 59h 53m (6.5x) | -| Unsloth Max | 1 T4 | 2h 39m (8.8x) | 4h 31m (12.5x) | 0h 58m (8.9x) | 51h 30m (7.6x) | - -**Peak Memory Usage** - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 1 T4 | 7.3GB | 5.9GB | 14.0GB | 13.3GB | -| Unsloth Open | 1 T4 | 6.8GB | 5.7GB | 7.8GB | 7.7GB | -| Unsloth Pro | 1 T4 | 6.4GB | 6.4GB | 6.4GB | 6.4GB | -| Unsloth Max | 1 T4 | 11.4GB | 12.4GB | 11.9GB | 14.4GB | -
    - -
    - Click for Performance Comparisons on 2 Tesla T4 GPUs via DDP: -**Time taken for 1 epoch** - -Two Tesla T4s on Kaggle -`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m * | -| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) * | -| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) * | +### Context length benchmarks +#### Llama 3.1 (8B) max. context length +We tested Llama 3.1 (8B) Instruct and did 4bit QLoRA on all linear layers (Q, K, V, O, gate, up and down) with rank = 32 with a batch size of 1. We padded all sequences to a certain maximum sequence length to mimic long context finetuning workloads. +| GPU VRAM | 🦥Unsloth context length | Hugging Face + FA2 | +|----------|-----------------------|-----------------| +| 8 GB | 2,972 | OOM | +| 12 GB | 21,848 | 932 | +| 16 GB | 40,724 | 2,551 | +| 24 GB | 78,475 | 5,789 | +| 40 GB | 153,977 | 12,264 | +| 48 GB | 191,728 | 15,502 | +| 80 GB | 342,733 | 28,454 | + +#### Llama 3.3 (70B) max. context length +We tested Llama 3.3 (70B) Instruct on a 80GB A100 and did 4bit QLoRA on all linear layers (Q, K, V, O, gate, up and down) with rank = 32 with a batch size of 1. We padded all sequences to a certain maximum sequence length to mimic long context finetuning workloads. + +| GPU VRAM | 🦥Unsloth context length | Hugging Face + FA2 | +|----------|------------------------|------------------| +| 48 GB | 12,106 | OOM | +| 80 GB | 89,389 | 6,916 | -**Peak Memory Usage on a Multi GPU System (2 GPUs)** - -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | -| --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB * | -| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB * | -| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB * | - -* Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. -
    +
    ![](https://i.ibb.co/sJ7RhGG/image-41.png)
    From d6982c1fe6b814874f2ff989a69e485e6c13ab52 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 17 Jan 2025 00:43:11 -0800 Subject: [PATCH 0997/1088] Update issue templates --- .github/ISSUE_TEMPLATE/new-issue.md | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/new-issue.md diff --git a/.github/ISSUE_TEMPLATE/new-issue.md b/.github/ISSUE_TEMPLATE/new-issue.md deleted file mode 100644 index f0f4e98c46..0000000000 --- a/.github/ISSUE_TEMPLATE/new-issue.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: New Issue -about: Bug / Feature Request -title: '' -labels: '' -assignees: '' - ---- - -1. Have you tried uninstall Unsloth and upgrading? -```bash -pip uninstall unsloth unsloth_zoo -y -pip install --upgrade --no-deps --no-cache-dir unsloth unsloth_zoo -``` -2. If there's a bug, please print out your Unsloth info (or do a screenshot): -```python -🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning. -==((====))== Unsloth 2024.8: Fast Llama patching. Transformers = 4.44.2. - \\ /| GPU: Tesla T4. Max memory: 14.748 GB. Platform = Linux. -O^O/ \_/ \ Pytorch: 2.4.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1. -\ / Bfloat16 = FALSE. FA [Xformers = 0.0.27.post2. FA2 = False] - "-____-" Free Apache license: http://github.com/unslothai/unsloth -``` -3. Otherwise, describe your problem or **feature request**: From d8c58fbbb7d59dfa13edba89c13301e60ccdbaf6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 20 Jan 2025 01:27:24 -0800 Subject: [PATCH 0998/1088] Fix Mistral, Qwen (#1565) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han * Merge branch 'main' into nightly * Phi 4 * Update llama.py * Torch.Cuda Is Available Condition and Warning (#1545) * check for torch.cuda and triton if available on my machine(mac m3) the cuda were not available * Update pyproject.toml * Update __init__.py --------- Co-authored-by: Daniel Han * Update mistral.py * Update mistral.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix * Bug fixes * Update mapper.py * Add dropout to granite to match HF's implementation (#1557) Signed-off-by: datta0 * Update llama.py * Update llama.py * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han --------- Signed-off-by: datta0 Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Co-authored-by: AminWhat <88392440+aminwhat@users.noreply.github.com> Co-authored-by: Zhe Zhang <2631992879@qq.com> --- pyproject.toml | 4 ++-- unsloth/__init__.py | 6 +++++- unsloth/models/_utils.py | 12 +++++++++--- unsloth/models/granite.py | 7 ++++--- unsloth/models/llama.py | 8 ++++++-- unsloth/models/mapper.py | 6 ++---- unsloth/models/mistral.py | 2 +- 7 files changed, 29 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b24abd3559..d9df119a16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.1.2", + "unsloth_zoo>=2025.1.4", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -285,7 +285,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.1.2", + "unsloth_zoo>=2025.1.4", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 8002fbaefd..4882eaf635 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -86,6 +86,10 @@ del os.environ["PYTORCH_CUDA_ALLOC_CONF"] pass +# First check if CUDA is available ie a NVIDIA GPU is seen +if not torch.cuda.is_available(): + raise NotImplementedError("Unsloth: No NVIDIA GPU found? Unsloth currently only supports GPUs!") + # Fix Xformers performance issues since 0.0.25 import importlib.util from pathlib import Path @@ -194,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.1.2"): + if Version(unsloth_zoo_version) < Version("2025.1.4"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0036a18c4b..bfb1786ee7 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.5" +__version__ = "2025.1.6" __all__ = [ "SUPPORTS_BFLOAT16", @@ -285,7 +285,11 @@ def _is_openai_available(): return False if _is_package_available("flash_attn"): # Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl" try: - from flash_attn.flash_attn_interface import flash_attn_cuda + try: + # See https://github.com/unslothai/unsloth/issues/1437 + from flash_attn.flash_attn_interface import flash_attn_gpu + except: + from flash_attn.flash_attn_interface import flash_attn_cuda HAS_FLASH_ATTENTION = True # Also check for softcapping @@ -843,7 +847,9 @@ def patch_linear_scaling( "self.rotary_emb = .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) - if len(rotary_emb) == 0: return None, function + if len(rotary_emb) == 0: + return None, exec_code + "\n\n" + function + rotary_emb = rotary_emb[0] function = function.replace(rotary_emb, fix_rope_function, 1) function = exec_code + "\n\n" + function diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index 497a357fe2..fb7e96d8d2 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -89,6 +89,7 @@ def GraniteAttention_fast_forward( n_groups = self.num_key_value_groups n_kv_heads = self.config.num_key_value_heads head_dim = self.head_dim + dropout_p = self.config.attention_dropout if self.training else 0 assert(n_kv_heads * n_groups == n_heads) Q, K, V = self.apply_qkv(self, hidden_states) @@ -135,7 +136,7 @@ def GraniteAttention_fast_forward( Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) pass - A = xformers_attention(Q, K, V, attn_bias = causal_mask, scale=self.scaling) + A = xformers_attention(Q, K, V, attn_bias = causal_mask, scale=self.scaling, p=dropout_p) A = A.view(bsz, q_len, n_heads, head_dim) elif HAS_FLASH_ATTENTION and attention_mask is None: @@ -143,7 +144,7 @@ def GraniteAttention_fast_forward( K = K.transpose(1, 2) V = V.transpose(1, 2) window = (kv_seq_len, kv_seq_len) - A = flash_attn_func(Q, K, V, causal = True, window_size = window, softmax_scale=self.scaling) + A = flash_attn_func(Q, K, V, causal = True, window_size = window, softmax_scale=self.scaling, dropout_p=dropout_p) else: # Grouped query attention # if n_groups != 1: @@ -157,7 +158,7 @@ def GraniteAttention_fast_forward( Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, scale = self.scaling, is_causal = False) + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, scale = self.scaling, is_causal = False, dropout_p=dropout_p) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2).contiguous() pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index edd3ddf94f..da3295adfd 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -636,6 +636,7 @@ def LlamaModel_fast_forward( IS_GEMMA2 = self.config.model_type.startswith("gemma2") IS_COHERE = self.config.model_type.startswith("cohere") IS_GRANITE = self.config.model_type.startswith("granite") + train_embed_tokens = self.embed_tokens.weight.requires_grad if IS_GEMMA: @@ -664,7 +665,7 @@ def LlamaModel_fast_forward( # Fix up attention mask by setting elements to 0 # Specifically for DPO - if self._has_no_labels and (attention_mask is not None) and (past_key_values is None) and \ + if getattr(self, "_has_no_labels", False) is True and (attention_mask is not None) and (past_key_values is None) and \ (not train_embed_tokens): # Careful for inference the attention_mask is size (1, kv_seq_len) # Whilst the input_embeds is size (1, 1, 4096) @@ -792,9 +793,12 @@ def LlamaModel_fast_forward( pass pass - if IS_ATTENTION_REFACTOR and not hasattr(self.layers[0].self_attn, "rotary_emb"): + if (IS_ATTENTION_REFACTOR and (hasattr(self, "rotary_emb") or not hasattr(self.layers[0].self_attn, "rotary_emb"))) or IS_GRANITE: # Transformers main has made it mandatory to pass position_embeddings # https://github.com/huggingface/transformers/pull/34858 + # Also, transformers 4.45.0 supports granite but with the attention refactor (it always had the refactor) + # unsloth's check for granite too has "version >= 4.45.0 (rightly so)". + # so let granite always use the attention refactor implementation. position_embeddings = self.rotary_emb(hidden_states, position_ids, self.config.max_position_embeddings) else: position_embeddings = None diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index c1113f5294..b7df6668bb 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -471,20 +471,18 @@ "meta-llama/Llama-3.2-11B-Vision-Instruct", "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision-Instruct", "meta-llama/Llama-3.2-90B-Vision-Instruct", - "unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit", ), "unsloth/Llama-3.2-11B-Vision-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision", "meta-llama/Llama-3.2-11B-Vision", "unsloth/Llama-3.2-11B-Vision-bnb-4bit", ), - "unsloth/Llama-3.2-90B-Vision-unsloth-bnb-4bit" : ( + "unsloth/Llama-3.2-90B-Vision-bnb-4bit" : ( "unsloth/Llama-3.2-90B-Vision", "meta-llama/Llama-3.2-90B-Vision", - "unsloth/Llama-3.2-90B-Vision-bnb-4bit", ), "unsloth/Pixtral-12B-2409-unsloth-bnb-4bit" : ( "unsloth/Pixtral-12B-2409", diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 9a97015f9b..784ca9cb41 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -304,7 +304,7 @@ def pre_patch(): attention_module = MistralAttention, ) # Just for Mistral Nemo models! - if function is not None: + if function is not None and init_name is not None: function = patch_mistral_nemo_attention(function) # if True:#init_name is not None: exec(function, globals()) From fde26db11de0dc6b76a9c6ac21ce9c71d3593323 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 20 Jan 2025 08:10:20 -0800 Subject: [PATCH 0999/1088] Update mapper.py --- unsloth/models/mapper.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b7df6668bb..72619cf05d 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -522,6 +522,34 @@ "microsoft/phi-4", "unsloth/phi-4-bnb-4bit", ), + "unsloth/DeepSeek-R1-Distill-Qwen-32B-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Qwen-32B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + ), + "unsloth/DeepSeek-R1-Distill-Qwen-14B-unsloth-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Qwen-14B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "unsloth/DeepSeek-R1-Distill-Qwen-14B-bnb-4bit", + ), + "unsloth/DeepSeek-R1-Distill-Qwen-7B-unsloth-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Qwen-7B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", + "unsloth/DeepSeek-R1-Distill-Qwen-7B-bnb-4bit", + ), + "unsloth/DeepSeek-R1-Distill-Qwen-1.5B-unsloth-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Qwen-1.5B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "unsloth/DeepSeek-R1-Distill-Qwen-1.5B-bnb-4bit", + ), + "unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Llama-8B", + "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", + "unsloth/DeepSeek-R1-Distill-Llama-8B-bnb-4bit", + ), + "unsloth/DeepSeek-R1-Distill-Llama-70B-bnb-4bit" : ( + "unsloth/DeepSeek-R1-Distill-Llama-70B", + "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + ), } INT_TO_FLOAT_MAPPER = {} From 0546d6793a45aff68c5a83c38bf6be003dd57e00 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Mon, 20 Jan 2025 22:13:07 -0800 Subject: [PATCH 1000/1088] Update README.md --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index dc2136f6fa..1c32d3aa4e 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 60% less | -| **Phi-4** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 50% less | +| **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 50% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 40% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 60% less | | **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 63% less | @@ -36,27 +36,27 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) - **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) -- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text +- This [text completion notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_(7B)-Text_Completion.ipynb) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) is for learning another language - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now! More details: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). - 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft is now supported. We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. - 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. -- 📣 NEW! Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) -- 📣 NEW! [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/drive/1j0N4XTY1zXXy7mPAhOC1_gMYZ2F2EBlk?usp=sharing), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/drive/1K9ZrdwvZRE96qGkCq_e88FgV3MLnymQq?usp=sharing) -- 📣 NEW! Qwen-2.5 including [Coder](https://colab.research.google.com/drive/18sN803sU23XuJV9Q8On2xgqHSer6-UZF?usp=sharing) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/drive/1qN1CEalC70EO1wGKhNxs1go1W9So61R5?usp=sharing) -- 📣 NEW! We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. +- 📣 Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) +- 📣 [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb)
    Click for more news -- 📣 Try out [Chat interface](https://colab.research.google.com/drive/1i-8ESvtLRGNkkUQQr_-z_rcSAIo9c3lM?usp=sharing)! -- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/drive/1oCEHcED15DzL8xXGU1VTx5ZfOJM8WY01?usp=sharing) finetuning fits in under 16GB of VRAM! -- 📣 NEW! [Llama 3.1 8b, 70b](https://colab.research.google.com/drive/1Ys44kVvmeZtnICzWz0xgpRnrIOjZAuxp?usp=sharing) & [Mistral Nemo-12b](https://colab.research.google.com/drive/17d3U-CAIwzmbDRqbZ9NnpHxCkmXB6LZ0?usp=sharing) both Base and Instruct are now supported +- 📣 We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. +- 📣 Try out [Chat interface](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Unsloth_Studio.ipynb)! +- 📣 NEW! Qwen-2.5 including [Coder](https://unsloth.ai/blog/qwen-coder) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_Coder_(14B)-Conversational.ipynb) +- 📣 NEW! [Mistral Small 22b notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_Small_(22B)-Alpaca.ipynb) finetuning fits in under 16GB of VRAM! - 📣 NEW! `pip install unsloth` now works! Head over to [pypi](https://pypi.org/project/unsloth/) to check it out! This allows non git pull installs. Use `pip install unsloth[colab-new]` for non dependency installs. -- 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean! -- 📣 [2x faster inference](https://colab.research.google.com/drive/1aqlNQi7MMJbynFDyOQteD2t0yVfjb9Zh?usp=sharing) added for all our models +- 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) for other languages like Korean! +- 📣 [2x faster inference](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Inference.ipynb) added for all our models - 📣 We cut memory usage by a [further 30%](https://unsloth.ai/blog/long-context) and now support [4x longer context windows](https://unsloth.ai/blog/long-context)!
    From c1195d76e828db1da18671d03503153559c37d66 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 16:44:48 -0800 Subject: [PATCH 1001/1088] Fix triton.ops missing Triton 3.2 --- unsloth/__init__.py | 114 ++++++++------- unsloth/kernels/matmul_perf_model.py | 211 +++++++++++++++++++++++++++ unsloth/models/_utils.py | 2 +- 3 files changed, 277 insertions(+), 50 deletions(-) create mode 100644 unsloth/kernels/matmul_perf_model.py diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 4882eaf635..2986ca492f 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -134,65 +134,81 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 torch.cuda.is_bf16_supported = is_bf16_supported pass + +# For Gradio HF Spaces? +# if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ: +import triton +libcuda_dirs = lambda: None +if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass +else: from triton.common.build import libcuda_dirs + +def fix_triton_ops(): + # Check if triton.ops exists + try: + import triton.ops + except: + # Triton 3.2 removed triton.ops + from .kernels.matmul_perf_model import ( + early_config_prune, + estimate_matmul_time, + ) + triton.ops.early_config_prune = early_config_prune + triton.ops.estimate_matmul_time = estimate_matmul_time + pass +pass +fix_triton_ops() + # Try loading bitsandbytes and triton import bitsandbytes as bnb +try: + cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 + libcuda_dirs() +except: + warnings.warn( + "Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ + ) -if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ: - - import triton - libcuda_dirs = lambda: None - if Version(triton.__version__) >= Version("3.0.0"): - try: from triton.backends.nvidia.driver import libcuda_dirs - except: pass - else: from triton.common.build import libcuda_dirs + if os.path.exists("/usr/lib64-nvidia"): + os.system("ldconfig /usr/lib64-nvidia") + elif os.path.exists("/usr/local"): + # Sometimes bitsandbytes cannot be linked properly in Runpod for example + possible_cudas = subprocess.check_output(["ls", "-al", "/usr/local"]).decode("utf-8").split("\n") + find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$") + possible_cudas = [find_cuda.search(x) for x in possible_cudas] + possible_cudas = [x.group(1) for x in possible_cudas if x is not None] + + # Try linking cuda folder, or everything in local + if len(possible_cudas) == 0: + os.system("ldconfig /usr/local/") + else: + find_number = re.compile(r"([\d\.]{2,})") + latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] + latest_cuda = possible_cudas[latest_cuda] + os.system(f"ldconfig /usr/local/{latest_cuda}") + pass + importlib.reload(bnb) + importlib.reload(triton) try: + libcuda_dirs = lambda: None + if Version(triton.__version__) >= Version("3.0.0"): + try: from triton.backends.nvidia.driver import libcuda_dirs + except: pass + else: from triton.common.build import libcuda_dirs cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() + fix_triton_ops() except: warnings.warn( - "Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA."\ + "Unsloth: CUDA is not linked properly.\n"\ + "Try running `python -m bitsandbytes` then `python -m xformers.info`\n"\ + "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ + "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ + "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\ + "Unsloth will still run for now, but maybe it might crash - let's hope it works!" ) - - if os.path.exists("/usr/lib64-nvidia"): - os.system("ldconfig /usr/lib64-nvidia") - elif os.path.exists("/usr/local"): - # Sometimes bitsandbytes cannot be linked properly in Runpod for example - possible_cudas = subprocess.check_output(["ls", "-al", "/usr/local"]).decode("utf-8").split("\n") - find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$") - possible_cudas = [find_cuda.search(x) for x in possible_cudas] - possible_cudas = [x.group(1) for x in possible_cudas if x is not None] - - # Try linking cuda folder, or everything in local - if len(possible_cudas) == 0: - os.system("ldconfig /usr/local/") - else: - find_number = re.compile(r"([\d\.]{2,})") - latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0] - latest_cuda = possible_cudas[latest_cuda] - os.system(f"ldconfig /usr/local/{latest_cuda}") - pass - - importlib.reload(bnb) - importlib.reload(triton) - try: - libcuda_dirs = lambda: None - if Version(triton.__version__) >= Version("3.0.0"): - try: from triton.backends.nvidia.driver import libcuda_dirs - except: pass - else: from triton.common.build import libcuda_dirs - cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 - libcuda_dirs() - except: - warnings.warn( - "Unsloth: CUDA is not linked properly.\n"\ - "Try running `python -m bitsandbytes` then `python -m xformers.info`\n"\ - "We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"\ - "You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"\ - "Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\ - "Unsloth will still run for now, but maybe it might crash - let's hope it works!" - ) - pass pass # Check for unsloth_zoo diff --git a/unsloth/kernels/matmul_perf_model.py b/unsloth/kernels/matmul_perf_model.py new file mode 100644 index 0000000000..53b59d808e --- /dev/null +++ b/unsloth/kernels/matmul_perf_model.py @@ -0,0 +1,211 @@ +# Adapted from https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/kernels/matmul_perf_model.py +# https://github.com/triton-lang/kernels is licensed under the MIT License. + +import functools +import heapq + +import torch + +from triton import cdiv +from triton.runtime import driver +from triton.testing import ( + get_dram_gbps, + get_max_simd_tflops, + get_max_tensorcore_tflops, + nvsmi, +) + + +@functools.lru_cache +def get_clock_rate_in_khz(): + try: + return nvsmi(["clocks.max.sm"])[0] * 1e3 + except FileNotFoundError: + import pynvml + + pynvml.nvmlInit() + handle = pynvml.nvmlDeviceGetHandleByIndex(0) + return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3 + + +def get_tensorcore_tflops(device, num_ctas, num_warps, dtype): + """return compute throughput in TOPS""" + total_warps = num_ctas * min(num_warps, 4) + num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs + tflops = ( + min(num_subcores, total_warps) + / num_subcores + * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device) + ) + return tflops + + +def get_simd_tflops(device, num_ctas, num_warps, dtype): + """return compute throughput in TOPS""" + total_warps = num_ctas * min(num_warps, 4) + num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs + tflops = ( + min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device) + ) + return tflops + + +def get_tflops(device, num_ctas, num_warps, dtype): + capability = torch.cuda.get_device_capability(device) + if capability[0] < 8 and dtype == torch.float32: + return get_simd_tflops(device, num_ctas, num_warps, dtype) + return get_tensorcore_tflops(device, num_ctas, num_warps, dtype) + + +def estimate_matmul_time( + # backend, device, + num_warps, + num_stages, # + A, + B, + C, # + M, + N, + K, # + BLOCK_M, + BLOCK_N, + BLOCK_K, + SPLIT_K, # + debug=False, + **kwargs, # +): + """return estimated running time in ms + = max(compute, loading) + store""" + device = torch.cuda.current_device() + dtype = A.dtype + dtsize = A.element_size() + + num_cta_m = cdiv(M, BLOCK_M) + num_cta_n = cdiv(N, BLOCK_N) + num_cta_k = SPLIT_K + num_ctas = num_cta_m * num_cta_n * num_cta_k + + # If the input is smaller than the block size + M, N = max(M, BLOCK_M), max(N, BLOCK_N) + + # time to compute + total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS + tput = get_tflops(device, num_ctas, num_warps, dtype) + compute_ms = total_ops / tput + + # time to load data + num_sm = driver.active.utils.get_device_properties(device)["multiprocessor_count"] + active_cta_ratio = min(1, num_ctas / num_sm) + active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate + active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% + dram_bw = get_dram_gbps(device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s + l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?) + # assume 80% of (following) loads are in L2 cache + load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1)) + load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1) + load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1)) + load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1) + # total + total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB + total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024) + # loading time in ms + load_ms = total_dram / dram_bw + total_l2 / l2_bw + + # estimate storing time + store_bw = dram_bw * 0.6 # :o + store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB + if SPLIT_K == 1: + store_ms = store_c_dram / store_bw + else: + reduce_bw = store_bw + store_ms = store_c_dram / reduce_bw + # c.zero_() + zero_ms = M * N * 2 / (1024 * 1024) / store_bw + store_ms += zero_ms + + total_time_ms = max(compute_ms, load_ms) + store_ms + if debug: + print( + f"Total time: {total_time_ms}ms, compute time: {compute_ms}ms, " + f"loading time: {load_ms}ms, store time: {store_ms}ms, " + f"Activate CTAs: {active_cta_ratio*100}%" + ) + return total_time_ms + + +def early_config_prune(configs, named_args, **kwargs): + device = torch.cuda.current_device() + capability = torch.cuda.get_device_capability() + # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages + dtsize = named_args["A"].element_size() + dtype = named_args["A"].dtype + + # 1. make sure we have enough smem + pruned_configs = [] + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_stages = ( + kw["BLOCK_M"], + kw["BLOCK_N"], + kw["BLOCK_K"], + config.num_stages, + ) + + max_shared_memory = driver.active.utils.get_device_properties(device)["max_shared_mem"] + required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize + if required_shared_memory <= max_shared_memory: + pruned_configs.append(config) + configs = pruned_configs + + # Some dtypes do not allow atomic_add + if dtype not in [torch.float16, torch.float32]: + configs = [config for config in configs if config.kwargs["SPLIT_K"] == 1] + + # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps) + configs_map = {} + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = ( + kw["BLOCK_M"], + kw["BLOCK_N"], + kw["BLOCK_K"], + kw["SPLIT_K"], + config.num_warps, + config.num_stages, + ) + + key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps) + if key in configs_map: + configs_map[key].append((config, num_stages)) + else: + configs_map[key] = [(config, num_stages)] + + pruned_configs = [] + for k, v in configs_map.items(): + BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k + if capability[0] >= 8: + # compute cycles (only works for ampere GPUs) + mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) + mma_cycles = mmas / min(4, num_warps) * 8 + + ldgsts_latency = 300 # Does this matter? + optimal_num_stages = ldgsts_latency / mma_cycles + + # nearest stages, prefer large #stages + nearest = heapq.nsmallest( + 2, + v, + key=lambda x: ( + 10 + abs(x[1] - optimal_num_stages) + if (x[1] - optimal_num_stages) < 0 + else x[1] - optimal_num_stages + ), + ) + + for n in nearest: + pruned_configs.append(n[0]) + else: # Volta & Turing only supports num_stages <= 2 + random_config = v[0][0] + random_config.num_stages = 2 + pruned_configs.append(random_config) + return pruned_configs \ No newline at end of file diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index bfb1786ee7..42b74cba19 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.6" +__version__ = "2025.1.7" __all__ = [ "SUPPORTS_BFLOAT16", From f291a4c12b8bd0c7dd5f00c6d13efe4b738ab1cb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 16:45:41 -0800 Subject: [PATCH 1002/1088] Update __init__.py --- unsloth/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 2986ca492f..8c949b0585 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -134,7 +134,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 torch.cuda.is_bf16_supported = is_bf16_supported pass - # For Gradio HF Spaces? # if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ: import triton From 436fe17ada89d3fa0d9b3bb6ac90d46cecf649d0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 16:46:54 -0800 Subject: [PATCH 1003/1088] Update __init__.py --- unsloth/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 8c949b0585..9068df99b4 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -153,8 +153,8 @@ def fix_triton_ops(): early_config_prune, estimate_matmul_time, ) - triton.ops.early_config_prune = early_config_prune - triton.ops.estimate_matmul_time = estimate_matmul_time + triton.ops.matmul_perf_model.early_config_prune = early_config_prune + triton.ops.matmul_perf_model.estimate_matmul_time = estimate_matmul_time pass pass fix_triton_ops() From f2973dc0ff466b47f8ac831b225c26bbe38404fa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 16:53:35 -0800 Subject: [PATCH 1004/1088] triton.ops error --- unsloth/__init__.py | 22 +++++++++++++++++----- unsloth/{kernels => }/matmul_perf_model.py | 0 2 files changed, 17 insertions(+), 5 deletions(-) rename unsloth/{kernels => }/matmul_perf_model.py (100%) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 9068df99b4..74a0adce57 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -149,12 +149,24 @@ def fix_triton_ops(): import triton.ops except: # Triton 3.2 removed triton.ops - from .kernels.matmul_perf_model import ( - early_config_prune, - estimate_matmul_time, + from .matmul_perf_model import ( + early_config_prune as _early_config_prune, + estimate_matmul_time as _estimate_matmul_time, ) - triton.ops.matmul_perf_model.early_config_prune = early_config_prune - triton.ops.matmul_perf_model.estimate_matmul_time = estimate_matmul_time + class PerfOps: + def __init__(self): return + @staticmethod + def early_config_prune(*args, **kwargs): + return _early_config_prune(*args, **kwargs) + @staticmethod + def estimate_matmul_time(*args, **kwargs): + return _estimate_matmul_time(*args, **kwargs) + pass + class TritonOps: + __slots__ = "matmul_perf_model", + def __init__(self): self.matmul_perf_model = PerfOps() + pass + triton.ops = TritonOps() pass pass fix_triton_ops() diff --git a/unsloth/kernels/matmul_perf_model.py b/unsloth/matmul_perf_model.py similarity index 100% rename from unsloth/kernels/matmul_perf_model.py rename to unsloth/matmul_perf_model.py From 0fd3c11f19f882d034d5666ed99eda2ad34f99a0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 16:56:01 -0800 Subject: [PATCH 1005/1088] move TritonOps --- unsloth/__init__.py | 35 +++++++---------------------------- unsloth/matmul_perf_model.py | 17 ++++++++++++++++- 2 files changed, 23 insertions(+), 29 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 74a0adce57..d1c9ab9ef6 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -143,33 +143,10 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 except: pass else: from triton.common.build import libcuda_dirs -def fix_triton_ops(): - # Check if triton.ops exists - try: - import triton.ops - except: - # Triton 3.2 removed triton.ops - from .matmul_perf_model import ( - early_config_prune as _early_config_prune, - estimate_matmul_time as _estimate_matmul_time, - ) - class PerfOps: - def __init__(self): return - @staticmethod - def early_config_prune(*args, **kwargs): - return _early_config_prune(*args, **kwargs) - @staticmethod - def estimate_matmul_time(*args, **kwargs): - return _estimate_matmul_time(*args, **kwargs) - pass - class TritonOps: - __slots__ = "matmul_perf_model", - def __init__(self): self.matmul_perf_model = PerfOps() - pass - triton.ops = TritonOps() - pass -pass -fix_triton_ops() +# Triton 3.2 removed triton.ops, so we shall fix it! +from .matmul_perf_model import TritonOps +try: import triton.ops +except: triton.ops = TritonOps() # Try loading bitsandbytes and triton import bitsandbytes as bnb @@ -210,7 +187,9 @@ def __init__(self): self.matmul_perf_model = PerfOps() else: from triton.common.build import libcuda_dirs cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() - fix_triton_ops() + # Triton 3.2 removed triton.ops, so we shall fix it! + try: import triton.ops + except: triton.ops = TritonOps() except: warnings.warn( "Unsloth: CUDA is not linked properly.\n"\ diff --git a/unsloth/matmul_perf_model.py b/unsloth/matmul_perf_model.py index 53b59d808e..6a86c29bd4 100644 --- a/unsloth/matmul_perf_model.py +++ b/unsloth/matmul_perf_model.py @@ -208,4 +208,19 @@ def early_config_prune(configs, named_args, **kwargs): random_config = v[0][0] random_config.num_stages = 2 pruned_configs.append(random_config) - return pruned_configs \ No newline at end of file + return pruned_configs + +class PerfOps: + def __init__(self): return + @staticmethod + def early_config_prune(*args, **kwargs): + return _early_config_prune(*args, **kwargs) + @staticmethod + def estimate_matmul_time(*args, **kwargs): + return _estimate_matmul_time(*args, **kwargs) +pass + +class TritonOps: + __slots__ = "matmul_perf_model", + def __init__(self): self.matmul_perf_model = PerfOps() +pass From 1ef71e8b9c570d59470ba5a40ed49064db295f43 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 22 Jan 2025 17:49:20 -0800 Subject: [PATCH 1006/1088] Fix triton.ops --- unsloth/__init__.py | 8 -- unsloth/matmul_perf_model.py | 226 ----------------------------------- 2 files changed, 234 deletions(-) delete mode 100644 unsloth/matmul_perf_model.py diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d1c9ab9ef6..1f82dd8b52 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -143,11 +143,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 except: pass else: from triton.common.build import libcuda_dirs -# Triton 3.2 removed triton.ops, so we shall fix it! -from .matmul_perf_model import TritonOps -try: import triton.ops -except: triton.ops = TritonOps() - # Try loading bitsandbytes and triton import bitsandbytes as bnb try: @@ -187,9 +182,6 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 else: from triton.common.build import libcuda_dirs cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 libcuda_dirs() - # Triton 3.2 removed triton.ops, so we shall fix it! - try: import triton.ops - except: triton.ops = TritonOps() except: warnings.warn( "Unsloth: CUDA is not linked properly.\n"\ diff --git a/unsloth/matmul_perf_model.py b/unsloth/matmul_perf_model.py deleted file mode 100644 index 6a86c29bd4..0000000000 --- a/unsloth/matmul_perf_model.py +++ /dev/null @@ -1,226 +0,0 @@ -# Adapted from https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/kernels/matmul_perf_model.py -# https://github.com/triton-lang/kernels is licensed under the MIT License. - -import functools -import heapq - -import torch - -from triton import cdiv -from triton.runtime import driver -from triton.testing import ( - get_dram_gbps, - get_max_simd_tflops, - get_max_tensorcore_tflops, - nvsmi, -) - - -@functools.lru_cache -def get_clock_rate_in_khz(): - try: - return nvsmi(["clocks.max.sm"])[0] * 1e3 - except FileNotFoundError: - import pynvml - - pynvml.nvmlInit() - handle = pynvml.nvmlDeviceGetHandleByIndex(0) - return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3 - - -def get_tensorcore_tflops(device, num_ctas, num_warps, dtype): - """return compute throughput in TOPS""" - total_warps = num_ctas * min(num_warps, 4) - num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs - tflops = ( - min(num_subcores, total_warps) - / num_subcores - * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device) - ) - return tflops - - -def get_simd_tflops(device, num_ctas, num_warps, dtype): - """return compute throughput in TOPS""" - total_warps = num_ctas * min(num_warps, 4) - num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs - tflops = ( - min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device) - ) - return tflops - - -def get_tflops(device, num_ctas, num_warps, dtype): - capability = torch.cuda.get_device_capability(device) - if capability[0] < 8 and dtype == torch.float32: - return get_simd_tflops(device, num_ctas, num_warps, dtype) - return get_tensorcore_tflops(device, num_ctas, num_warps, dtype) - - -def estimate_matmul_time( - # backend, device, - num_warps, - num_stages, # - A, - B, - C, # - M, - N, - K, # - BLOCK_M, - BLOCK_N, - BLOCK_K, - SPLIT_K, # - debug=False, - **kwargs, # -): - """return estimated running time in ms - = max(compute, loading) + store""" - device = torch.cuda.current_device() - dtype = A.dtype - dtsize = A.element_size() - - num_cta_m = cdiv(M, BLOCK_M) - num_cta_n = cdiv(N, BLOCK_N) - num_cta_k = SPLIT_K - num_ctas = num_cta_m * num_cta_n * num_cta_k - - # If the input is smaller than the block size - M, N = max(M, BLOCK_M), max(N, BLOCK_N) - - # time to compute - total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS - tput = get_tflops(device, num_ctas, num_warps, dtype) - compute_ms = total_ops / tput - - # time to load data - num_sm = driver.active.utils.get_device_properties(device)["multiprocessor_count"] - active_cta_ratio = min(1, num_ctas / num_sm) - active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate - active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% - dram_bw = get_dram_gbps(device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s - l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?) - # assume 80% of (following) loads are in L2 cache - load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1)) - load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1) - load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1)) - load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1) - # total - total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB - total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024) - # loading time in ms - load_ms = total_dram / dram_bw + total_l2 / l2_bw - - # estimate storing time - store_bw = dram_bw * 0.6 # :o - store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB - if SPLIT_K == 1: - store_ms = store_c_dram / store_bw - else: - reduce_bw = store_bw - store_ms = store_c_dram / reduce_bw - # c.zero_() - zero_ms = M * N * 2 / (1024 * 1024) / store_bw - store_ms += zero_ms - - total_time_ms = max(compute_ms, load_ms) + store_ms - if debug: - print( - f"Total time: {total_time_ms}ms, compute time: {compute_ms}ms, " - f"loading time: {load_ms}ms, store time: {store_ms}ms, " - f"Activate CTAs: {active_cta_ratio*100}%" - ) - return total_time_ms - - -def early_config_prune(configs, named_args, **kwargs): - device = torch.cuda.current_device() - capability = torch.cuda.get_device_capability() - # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages - dtsize = named_args["A"].element_size() - dtype = named_args["A"].dtype - - # 1. make sure we have enough smem - pruned_configs = [] - for config in configs: - kw = config.kwargs - BLOCK_M, BLOCK_N, BLOCK_K, num_stages = ( - kw["BLOCK_M"], - kw["BLOCK_N"], - kw["BLOCK_K"], - config.num_stages, - ) - - max_shared_memory = driver.active.utils.get_device_properties(device)["max_shared_mem"] - required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize - if required_shared_memory <= max_shared_memory: - pruned_configs.append(config) - configs = pruned_configs - - # Some dtypes do not allow atomic_add - if dtype not in [torch.float16, torch.float32]: - configs = [config for config in configs if config.kwargs["SPLIT_K"] == 1] - - # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps) - configs_map = {} - for config in configs: - kw = config.kwargs - BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = ( - kw["BLOCK_M"], - kw["BLOCK_N"], - kw["BLOCK_K"], - kw["SPLIT_K"], - config.num_warps, - config.num_stages, - ) - - key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps) - if key in configs_map: - configs_map[key].append((config, num_stages)) - else: - configs_map[key] = [(config, num_stages)] - - pruned_configs = [] - for k, v in configs_map.items(): - BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k - if capability[0] >= 8: - # compute cycles (only works for ampere GPUs) - mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) - mma_cycles = mmas / min(4, num_warps) * 8 - - ldgsts_latency = 300 # Does this matter? - optimal_num_stages = ldgsts_latency / mma_cycles - - # nearest stages, prefer large #stages - nearest = heapq.nsmallest( - 2, - v, - key=lambda x: ( - 10 + abs(x[1] - optimal_num_stages) - if (x[1] - optimal_num_stages) < 0 - else x[1] - optimal_num_stages - ), - ) - - for n in nearest: - pruned_configs.append(n[0]) - else: # Volta & Turing only supports num_stages <= 2 - random_config = v[0][0] - random_config.num_stages = 2 - pruned_configs.append(random_config) - return pruned_configs - -class PerfOps: - def __init__(self): return - @staticmethod - def early_config_prune(*args, **kwargs): - return _early_config_prune(*args, **kwargs) - @staticmethod - def estimate_matmul_time(*args, **kwargs): - return _estimate_matmul_time(*args, **kwargs) -pass - -class TritonOps: - __slots__ = "matmul_perf_model", - def __init__(self): self.matmul_perf_model = PerfOps() -pass From 04f514c7a1b34f2a25d36c8e2d2496510e15a7ba Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sun, 26 Jan 2025 14:11:58 -0800 Subject: [PATCH 1007/1088] Update README.md Updating super old benchmarks --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 1c32d3aa4e..883ac6b287 100644 --- a/README.md +++ b/README.md @@ -22,16 +22,16 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 60% less | -| **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 50% less | -| **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 40% less | -| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 60% less | -| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 63% less | -| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 63% less | -| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 73% less | -| **Ollama** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb) | 1.9x faster | 43% less | -| **ORPO** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) | 1.9x faster | 43% less | -| **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 43% less | +| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | +| **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 70% less | +| **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 50% less | +| **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 70% less | +| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 70% less | +| **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 70% less | +| **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 75% less | +| **Ollama** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb) | 1.9x faster | 60% less | +| **ORPO** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) | 1.9x faster | 50% less | +| **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 50% less | - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) - **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) From dd1a220e0944815ab6cc12d4755006d963c76898 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Thu, 30 Jan 2025 21:05:45 -0800 Subject: [PATCH 1008/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 883ac6b287..3e635dabe3 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. - 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) -- 📣 [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb), [Qwen 2.5 VL (7B)](https://colab.research.google.com/drive/1whHb54GNZMrNxIsi2wm2EY_-Pvo2QyKh?usp=sharing) and [Pixtral (12B) 2409](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb) +- 📣 [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb), [Qwen 2.5 VL (7B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb) and [Pixtral (12B) 2409](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Pixtral_(12B)-Vision.ipynb)
    Click for more news From ed14d37e0d7edde5e27afef8700a0b646261b15a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 31 Jan 2025 03:33:24 -0800 Subject: [PATCH 1009/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 42b74cba19..b0d51a8607 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.7" +__version__ = "2025.1.8" __all__ = [ "SUPPORTS_BFLOAT16", From 038e6d4c8d40207a87297ab3aaf787c19b1006d1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 31 Jan 2025 03:34:36 -0800 Subject: [PATCH 1010/1088] Mistral 24B, Qwen 2.5 VL support (#1598) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han * Merge branch 'main' into nightly * Phi 4 * Update llama.py * Torch.Cuda Is Available Condition and Warning (#1545) * check for torch.cuda and triton if available on my machine(mac m3) the cuda were not available * Update pyproject.toml * Update __init__.py --------- Co-authored-by: Daniel Han * Update mistral.py * Update mistral.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix * Bug fixes * Update mapper.py * Add dropout to granite to match HF's implementation (#1557) Signed-off-by: datta0 * Update llama.py * Update llama.py * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han * Update mapper.py --------- Signed-off-by: datta0 Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Co-authored-by: AminWhat <88392440+aminwhat@users.noreply.github.com> Co-authored-by: Zhe Zhang <2631992879@qq.com> --- unsloth/models/mapper.py | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 72619cf05d..bc01c28583 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -432,21 +432,25 @@ "unsloth/Qwen2.5-Coder-32B-Instruct", "Qwen/Qwen2.5-Coder-32B-Instruct", ), - "unsloth/Llama-3.2-1B-bnb-4bit" : ( + "unsloth/Llama-3.2-1B-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-1B", "meta-llama/Llama-3.2-1B", + "unsloth/Llama-3.2-1B-bnb-4bit", ), - "unsloth/Llama-3.2-3B-bnb-4bit" : ( + "unsloth/Llama-3.2-3B-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-3B", "meta-llama/Llama-3.2-3B", + "unsloth/Llama-3.2-3B-bnb-4bit", ), - "unsloth/Llama-3.2-1B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-1B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-1B-Instruct", "meta-llama/Llama-3.2-1B-Instruct", + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", ), - "unsloth/Llama-3.2-3B-Instruct-bnb-4bit" : ( + "unsloth/Llama-3.2-3B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-3B-Instruct", + "unsloth/Llama-3.2-3B-Instruct-bnb-4bit", ), "unsloth/Llama-3.1-Nemotron-70B-Instruct-bnb-4bit" : ( "unsloth/Llama-3.1-Nemotron-70B-Instruct", @@ -550,6 +554,31 @@ "unsloth/DeepSeek-R1-Distill-Llama-70B", "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", ), + "unsloth/Mistral-Small-24B-Base-2501-unsloth-bnb-4bit" : ( + "unsloth/Mistral-Small-24B-Base", + "mistralai/Mistral-Small-24B-Base-2501", + "unsloth/Mistral-Small-24B-Base-2501-bnb-4bit", + ), + "unsloth/Mistral-Small-24B-Instruct-2501-unsloth-bnb-4bit" : ( + "unsloth/Mistral-Small-24B-Instruct", + "mistralai/Mistral-Small-24B-Instruct-2501", + "unsloth/Mistral-Small-24B-Instruct-2501-bnb-4bit", + ), + "unsloth/Qwen2.5-VL-3B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2.5-VL-3B-Instruct", + "Qwen/Qwen2.5-VL-3B-Instruct", + "unsloth/Qwen2.5-VL-3B-Instruct-bnb-4bit", + ), + "unsloth/Qwen2.5-VL-7B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2.5-VL-7B-Instruct", + "Qwen/Qwen2.5-VL-7B-Instruct", + "unsloth/Qwen2.5-VL-7B-Instruct-bnb-4bit", + ), + "unsloth/Qwen2.5-VL-72B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Qwen2.5-VL-72B-Instruct", + "Qwen/Qwen2.5-VL-72B-Instruct", + "unsloth/Qwen2.5-VL-72B-Instruct-bnb-4bit", + ), } INT_TO_FLOAT_MAPPER = {} From 512fec6a7b77a930b85a5b5685bf056fbb29ff5e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Feb 2025 02:41:12 -0800 Subject: [PATCH 1011/1088] GRPO, vLLM, Bug Fixes, Reinforcement Learning (#1620) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han * Merge branch 'main' into nightly * Phi 4 * Update llama.py * Torch.Cuda Is Available Condition and Warning (#1545) * check for torch.cuda and triton if available on my machine(mac m3) the cuda were not available * Update pyproject.toml * Update __init__.py --------- Co-authored-by: Daniel Han * Update mistral.py * Update mistral.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix * Bug fixes * Update mapper.py * Add dropout to granite to match HF's implementation (#1557) Signed-off-by: datta0 * Update llama.py * Update llama.py * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han * Update mapper.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * dim fix * Update _utils.py * Torch 2.6 support * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py --------- Signed-off-by: datta0 Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Co-authored-by: AminWhat <88392440+aminwhat@users.noreply.github.com> Co-authored-by: Zhe Zhang <2631992879@qq.com> --- pyproject.toml | 105 ++++++++- unsloth/__init__.py | 2 +- unsloth/_auto_install.py | 6 +- unsloth/chat_templates.py | 4 + unsloth/kernels/utils.py | 142 ++++++++----- unsloth/models/__init__.py | 1 + unsloth/models/_utils.py | 2 +- unsloth/models/dpo.py | 113 +--------- unsloth/models/gemma.py | 10 +- unsloth/models/llama.py | 316 ++++++++++++++++++--------- unsloth/models/loader.py | 43 +++- unsloth/models/mapper.py | 34 +-- unsloth/models/rl.py | 423 +++++++++++++++++++++++++++++++++++++ 13 files changed, 918 insertions(+), 283 deletions(-) create mode 100644 unsloth/models/rl.py diff --git a/pyproject.toml b/pyproject.toml index d9df119a16..88c757b333 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -131,6 +131,12 @@ cu124onlytorch240 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] +cu118onlytorch250 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.28.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.28.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] cu121onlytorch250 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.28.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", @@ -147,6 +153,12 @@ cu124onlytorch250 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.28.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] +cu118onlytorch251 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post1-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post1-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] cu121onlytorch251 = [ "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", @@ -163,6 +175,28 @@ cu124onlytorch251 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] +cu118onlytorch260 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] +cu124onlytorch260 = [ + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", +] +cu126onlytorch260 = [ + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", +] cu118 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", @@ -223,21 +257,31 @@ cu121-torch240 = [ "bitsandbytes>=0.43.3", "unsloth[cu121onlytorch240]", ] -cu121-torch250 = [ +cu124-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch250]", + "unsloth[cu124onlytorch240]", ] -cu124-torch240 = [ +cu118-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch240]", + "unsloth[cu118onlytorch250]", +] +cu121-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch250]", ] cu124-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch250]", ] +cu118-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu118onlytorch251]", +] cu121-torch251 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", @@ -248,6 +292,21 @@ cu124-torch251 = [ "bitsandbytes>=0.43.3", "unsloth[cu124onlytorch251]", ] +cu118-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu118onlytorch260]", +] +cu124-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu124onlytorch260]", +] +cu126-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu126onlytorch260]", +] kaggle = [ "unsloth[huggingface]", ] @@ -381,16 +440,22 @@ cu121-ampere-torch240 = [ "unsloth[cu121onlytorch240]", "unsloth[flashattention]", ] -cu121-ampere-torch250 = [ +cu124-ampere-torch240 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", - "unsloth[cu121onlytorch250]", + "unsloth[cu124onlytorch240]", "unsloth[flashattention]", ] -cu124-ampere-torch240 = [ +cu118-ampere-torch250 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", - "unsloth[cu124onlytorch240]", + "unsloth[cu118onlytorch250]", + "unsloth[flashattention]", +] +cu121-ampere-torch250 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu121onlytorch250]", "unsloth[flashattention]", ] cu124-ampere-torch250 = [ @@ -399,6 +464,12 @@ cu124-ampere-torch250 = [ "unsloth[cu124onlytorch250]", "unsloth[flashattention]", ] +cu118-ampere-torch251 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.43.3", + "unsloth[cu118onlytorch251]", + "unsloth[flashattention]", +] cu121-ampere-torch251 = [ "unsloth[huggingface]", "bitsandbytes>=0.43.3", @@ -411,6 +482,24 @@ cu124-ampere-torch251 = [ "unsloth[cu124onlytorch251]", "unsloth[flashattention]", ] +cu118-ampere-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu118onlytorch260]", + "unsloth[flashattention]", +] +cu124-ampere-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu124onlytorch260]", + "unsloth[flashattention]", +] +cu126-ampere-torch260 = [ + "unsloth[huggingface]", + "bitsandbytes>=0.45.1", + "unsloth[cu126onlytorch260]", + "unsloth[flashattention]", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 1f82dd8b52..c89fd0f1fd 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -196,7 +196,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.1.4"): + if Version(unsloth_zoo_version) < Version("2025.2.1"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/_auto_install.py b/unsloth/_auto_install.py index c3b94c6706..8bb5485192 100644 --- a/unsloth/_auto_install.py +++ b/unsloth/_auto_install.py @@ -18,14 +18,16 @@ v = V(torch.__version__) cuda = str(torch.version.cuda) is_ampere = torch.cuda.get_device_capability()[0] >= 8 -if cuda != "12.1" and cuda != "11.8" and cuda != "12.4": raise RuntimeError(f"CUDA = {cuda} not supported!") +if cuda != "12.1" and cuda != "11.8" and cuda != "12.4" and cuda != "12.6": raise RuntimeError(f"CUDA = {cuda} not supported!") if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!") elif v <= V('2.1.1'): x = 'cu{}{}-torch211' elif v <= V('2.1.2'): x = 'cu{}{}-torch212' elif v < V('2.3.0'): x = 'cu{}{}-torch220' elif v < V('2.4.0'): x = 'cu{}{}-torch230' elif v < V('2.5.0'): x = 'cu{}{}-torch240' -elif v < V('2.6.0'): x = 'cu{}{}-torch250' +elif v < V('2.5.1'): x = 'cu{}{}-torch250' +elif v <= V('2.5.1'): x = 'cu{}{}-torch251' +elif v < V('2.7.0'): x = 'cu{}{}-torch260' else: raise RuntimeError(f"Torch = {v} too new!") x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') \ No newline at end of file diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index d8dc385223..c401393234 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -759,6 +759,10 @@ CHAT_TEMPLATES["llama-31"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,) DEFAULT_SYSTEM_MESSAGE["llama-31"] = "" # Llama3.1 default system message is empty + the dates + +for version in ("llama-3.2", "llama-3.3", "llama-32", "llama-33"): + CHAT_TEMPLATES[version] = CHAT_TEMPLATES["llama-3.1"] + DEFAULT_SYSTEM_MESSAGE[version] = "" pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index de543962ef..f052914f98 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -15,6 +15,7 @@ import triton MAX_FUSED_SIZE : int = 65536 next_power_of_2 = triton.next_power_of_2 +import functools # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch @@ -66,6 +67,8 @@ def calculate_settings(n : int) -> (int, int,): CUDA_STREAM = None get_ptr = bnb.functional.get_ptr import ctypes +ctypes_c_int = ctypes.c_int +ctypes_c_int32 = ctypes.c_int32 cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 cdequantize_blockwise_fp16_nf4 = bnb.functional.lib.cdequantize_blockwise_fp16_nf4 cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 @@ -98,25 +101,31 @@ def get_lora_parameters(proj): def get_lora_parameters_bias(proj): # For DPO or disabled adapters - base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) + base_layer = getattr(proj, "base_layer", proj) # (proj.base_layer if hasattr(proj, "base_layer") else proj) W = base_layer.weight bias = base_layer.bias - if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: + # if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: + if getattr(proj, "disable_adapters", True) or proj.merged: return W, QUANT_STATE(W), None, None, None, bias pass active_adapter = proj.active_adapters[0] if \ - hasattr(proj, "active_adapters") else proj.active_adapter + getattr(proj, "active_adapters", ) else proj.active_adapter A = proj.lora_A [active_adapter].weight B = proj.lora_B [active_adapter].weight s = proj.scaling[active_adapter] return W, QUANT_STATE(W), A, B, s, bias pass +global WEIGHT_BUFFER +WEIGHT_BUFFER = None +global ABSMAX_BUFFER +ABSMAX_BUFFER = None if HAS_CUDA_STREAM: - def fast_dequantize(W, quant_state = None, out = None): + @torch.inference_mode + def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False): if quant_state is None: return W if type(quant_state) is not list: # New quant_state as a class @@ -139,36 +148,54 @@ def fast_dequantize(W, quant_state = None, out = None): global CUDA_STREAM if CUDA_STREAM is None: CUDA_STREAM = torch.cuda.current_stream("cuda:0") + n_elements_absmax = absmax.numel() + # Create weight matrix - if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda:0") + if use_global_buffer: + + # Use same buffers for faster inference + size = shape[0]*shape[1] + global WEIGHT_BUFFER + global ABSMAX_BUFFER + if WEIGHT_BUFFER is None: + WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = "cuda:0", requires_grad = False) + ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) + + if size > WEIGHT_BUFFER.numel(): WEIGHT_BUFFER.resize_(size) + if n_elements_absmax > ABSMAX_BUFFER.numel(): ABSMAX_BUFFER.resize_(n_elements_absmax) + + out = WEIGHT_BUFFER[:size].view(shape) + out_absmax = ABSMAX_BUFFER[:n_elements_absmax] else: - assert(out.shape == shape) - assert(out.dtype == dtype) + if out is None: + out = torch.empty(shape, dtype = dtype, device = "cuda:0", requires_grad = False) + else: + assert(out.shape == shape) + assert(out.dtype == dtype) + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) + pass # NF4 dequantization of statistics - n_elements_absmax = absmax.numel() - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") - - # Do dequantization ptr_out_absmax = get_ptr(out_absmax) cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, - ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax), CUDA_STREAM, + ctypes_c_int(blocksize2), ctypes_c_int(n_elements_absmax), CUDA_STREAM, ) out_absmax += offset + # Dequantize W fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ cdequantize_blockwise_bf16_nf4 fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), - ctypes.c_int(blocksize), ctypes.c_int(out.numel()), CUDA_STREAM,) + ctypes_c_int(blocksize), ctypes_c_int(out.numel()), CUDA_STREAM,) # Careful returning transposed data is_transposed = (True if W.shape[0] == 1 else False) return out.t() if is_transposed else out pass else: - def fast_dequantize(W, quant_state = None, out = None): + @torch.inference_mode + def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False): if quant_state is None: return W if type(quant_state) is not list: # New quant_state as a class @@ -189,29 +216,45 @@ def fast_dequantize(W, quant_state = None, out = None): absmax2, code2, blocksize2, _, _, _, _ = state2 pass + n_elements_absmax = absmax.numel() + # Create weight matrix - if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda:0") - else: - assert(out.shape == shape) - assert(out.dtype == dtype) + if use_global_buffer: - # NF4 dequantization of statistics - n_elements_absmax = absmax.numel() - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0") + # Use same buffers for faster inference + size = shape[0]*shape[1] + global WEIGHT_BUFFER + global ABSMAX_BUFFER + if WEIGHT_BUFFER is None: + WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = "cuda:0", requires_grad = False) + ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = dtype, device = "cuda:0", requires_grad = False) + + if size > WEIGHT_BUFFER.numel(): WEIGHT_BUFFER.resize_(size) + if n_elements_absmax > ABSMAX_BUFFER.numel(): ABSMAX_BUFFER.resize_(n_elements_absmax) + + out = WEIGHT_BUFFER[:size].view(shape) + out_absmax = ABSMAX_BUFFER[:n_elements_absmax] + else: + if out is None: + out = torch.empty(shape, dtype = dtype, device = "cuda:0", requires_grad = False) + else: + assert(out.shape == shape) + assert(out.dtype == dtype) + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) + pass # Do dequantization ptr_out_absmax = get_ptr(out_absmax) cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, - ctypes.c_int(blocksize2), ctypes.c_int(n_elements_absmax), + ctypes_c_int(blocksize2), ctypes_c_int(n_elements_absmax), ) out_absmax += offset fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ cdequantize_blockwise_bf16_nf4 fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), - ctypes.c_int(blocksize), ctypes.c_int(out.numel()),) + ctypes_c_int(blocksize), ctypes_c_int(out.numel()),) # Careful returning transposed data is_transposed = (True if W.shape[0] == 1 else False) @@ -263,17 +306,17 @@ def fast_gemv(X, W, quant_state, out = None): lda = shape[0] ldc = shape[0] ldb = (hd+1)//2 - m = ctypes.c_int32(m) - n = ctypes.c_int32(n) - k = ctypes.c_int32(k) - lda = ctypes.c_int32(lda) - ldb = ctypes.c_int32(ldb) - ldc = ctypes.c_int32(ldc) + m = ctypes_c_int32(m) + n = ctypes_c_int32(n) + k = ctypes_c_int32(k) + lda = ctypes_c_int32(lda) + ldb = ctypes_c_int32(ldb) + ldc = ctypes_c_int32(ldc) df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), - ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), CUDA_STREAM, + ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), CUDA_STREAM, ) df += offset absmax = df @@ -281,7 +324,7 @@ def fast_gemv(X, W, quant_state, out = None): fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ cgemm_4bit_inference_naive_bf16 - blocksize = ctypes.c_int32(blocksize) + blocksize = ctypes_c_int32(blocksize) fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), lda, ldb, ldc, blocksize, CUDA_STREAM,) @@ -327,17 +370,17 @@ def fast_gemv(X, W, quant_state, out = None): lda = shape[0] ldc = shape[0] ldb = (hd+1)//2 - m = ctypes.c_int32(m) - n = ctypes.c_int32(n) - k = ctypes.c_int32(k) - lda = ctypes.c_int32(lda) - ldb = ctypes.c_int32(ldb) - ldc = ctypes.c_int32(ldc) + m = ctypes_c_int32(m) + n = ctypes_c_int32(n) + k = ctypes_c_int32(k) + lda = ctypes_c_int32(lda) + ldb = ctypes_c_int32(ldb) + ldc = ctypes_c_int32(ldc) df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), - ctypes.c_int(blocksize2), ctypes.c_int(df.numel()), + ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), ) df += offset absmax = df @@ -345,7 +388,7 @@ def fast_gemv(X, W, quant_state, out = None): fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ cgemm_4bit_inference_naive_bf16 - blocksize = ctypes.c_int32(blocksize) + blocksize = ctypes_c_int32(blocksize) fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), lda, ldb, ldc, blocksize,) @@ -354,6 +397,9 @@ def fast_gemv(X, W, quant_state, out = None): pass +torch_mm = torch.mm +torch_mv = torch.mv +torch_matmul = torch.matmul def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S, bias = get_lora_parameters_bias(proj) @@ -361,12 +407,12 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): if q_len != 1: return matmul_lora(X, W, W_quant, lora_A, lora_B, lora_S) if W_quant is None: - out = torch.matmul(X, W.t(), out = out) + out = torch_matmul(X, W.t(), out = out) elif bsz == 1 and q_len == 1: out = fast_gemv(X, W, W_quant, out = out) else: - W = fast_dequantize(W.t(), W_quant) - out = torch.matmul(X, W, out = out) + W = fast_dequantize(W.t(), W_quant, use_global_buffer = True) + out = torch_matmul(X, W, out = out) pass # Add in LoRA weights @@ -381,11 +427,11 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): if bsz == 1: out = out.view(out_dim) - temp_lora = torch.mv(lora_A._fast_lora, X.ravel(), out = temp_lora) + temp_lora = torch_mv(lora_A._fast_lora, X.ravel(), out = temp_lora) out.addmv_(lora_B._fast_lora, temp_lora, alpha = lora_S) else: out = out.view(bsz, out_dim) - temp_lora = torch.mm(X.view(bsz, in_dim), lora_A._fast_lora.t(), out = temp_lora) + temp_lora = torch_mm(X.view(bsz, in_dim), lora_A._fast_lora.t(), out = temp_lora) out.addmm_(temp_lora, lora_B._fast_lora.t(), alpha = lora_S) pass out = out.view(bsz, 1, out_dim) @@ -399,7 +445,7 @@ def fast_linear_forward(proj, X, temp_lora = None, out = None): def matmul_lora(X, W, W_quant, A, B, s, out = None): dtype = X.dtype - W = fast_dequantize(W.t(), W_quant) + W = fast_dequantize(W.t(), W_quant, use_global_buffer = True) if X.dim() == 3: batch, seq_len, d = X.shape @@ -409,7 +455,7 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): reshape = False pass - out = torch.matmul(X, W, out = out) + out = torch_matmul(X, W, out = out) if W_quant is not None: del W if A is not None: diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index c52d14f402..b15e04ab74 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -20,3 +20,4 @@ from .qwen2 import FastQwen2Model from .dpo import PatchDPOTrainer, PatchKTOTrainer from ._utils import is_bfloat16_supported +from .rl import PatchFastRL diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index b0d51a8607..017b5b5533 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.1.8" +__version__ = "2025.2.1" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 5dc71f920a..9c12abb98f 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -17,115 +17,8 @@ "PatchKTOTrainer", ] -try: - from transformers.utils.notebook import ( - IntervalStrategy, - NotebookTrainingTracker, - NotebookProgressCallback, - ) - HAS_NOTEBOOK = True -except: - HAS_NOTEBOOK = False -pass -import torch -from ._utils import torch_compile_options -import inspect -import torch.nn as nn -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union +from .rl import PatchFastRL +def PatchDPOTrainer(): PatchFastRL("DPO") -DPOTrainer_metrics = [ - "rewards/chosen", - "rewards/rejected", - "rewards/accuracies", - "rewards/margins", - "logps/rejected", - "logps/chosen", - "logits/rejected", - "logits/chosen", -] -set_DPOTrainer_metrics = frozenset(DPOTrainer_metrics) - - -def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): - self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step" - self.training_loss = 0 - self.last_log = 0 - column_names = [self.first_column] + ["Training Loss"] - if args.eval_strategy != IntervalStrategy.NO: - column_names.append("Validation Loss") - column_names += [x.replace("/", " / ") for x in DPOTrainer_metrics] - self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) -pass - - -def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwargs): - # Only for when there is no evaluation - if args.eval_strategy == IntervalStrategy.NO and "loss" in logs: - values = {"Training Loss": logs["loss"]} - for metric in DPOTrainer_metrics: - values[metric.replace("/", " / ")] = logs[metric] - pass - # First column is necessarily Step since we're not in epoch eval strategy - values["Step"] = state.global_step - self.training_tracker.write_line(values) - pass -pass - - -def NotebookTrainingTracker_write_line(self, values): - """ - Write the values in the inner table. - - Args: - values (`Dict[str, float]`): The values to display. - """ - if self.inner_table is None: - self.inner_table = [list(values.keys()), list(values.values())] - else: - columns = self.inner_table[0] - new_values = {} - for key, value in values.items(): - lowered = key.lower() - if lowered in set_DPOTrainer_metrics: - new_values[lowered.replace("/", " / ")] = value - else: - new_values[key] = value - pass - values = new_values - - self.inner_table[0] = columns - if len(self.inner_table) > 1: - last_values = self.inner_table[-1] - first_column = self.inner_table[0][0] - if last_values[0] != values[first_column]: - # write new line - self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) - else: - # update last line - new_values = values - for c in columns: - if c not in new_values.keys(): - new_values[c] = last_values[columns.index(c)] - self.inner_table[-1] = [new_values[c] for c in columns] - else: - # Edit for evaluation purposes - self.inner_table.append([values[c] if c in values else 0 for c in columns]) - pass - pass -pass - - -def PatchDPOTrainer(): - if HAS_NOTEBOOK: - from transformers.trainer import is_in_notebook - if is_in_notebook(): - # Patch DPO notebook printing - NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line - from transformers.trainer import DEFAULT_PROGRESS_CALLBACK - DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin - DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log - pass - pass -pass -PatchKTOTrainer = PatchDPOTrainer +def PatchKTOTrainer(): PatchFastRL("KTO") diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index c654343282..bc29c46abc 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -210,7 +210,15 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= config = None, # [TODO] Hack to pass in config - need to remove later ): super().__init__() - if config is not None: return # [TODO] Hack to pass in config - need to remove later + if config is not None: + # [TODO] Hack to pass in config - need to remove later + base = config.rope_theta + partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 + dim = getattr(config, "head_dim", None) + if dim is None: dim = int((config.hidden_size // config.num_attention_heads)) + device = "cuda" + max_position_embeddings = config.max_position_embeddings + pass self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index da3295adfd..a337472a3e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -20,7 +20,7 @@ from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention from transformers import __version__ as transformers_version -from unsloth_zoo.utils import Version +from unsloth_zoo.utils import Version, _get_dtype transformers_version = Version(transformers_version) # Transformers moved rotary embeddings out of all attention layers IS_ATTENTION_REFACTOR = transformers_version > Version("4.47.1") @@ -70,7 +70,8 @@ from huggingface_hub.utils._token import get_token pass from triton import __version__ as triton_version -BlockDiagonalCausalMask = xformers.attn_bias.BlockDiagonalCausalMask if xformers is not None else None +HAS_XFORMERS = xformers is not None +BlockDiagonalCausalMask = xformers.attn_bias.BlockDiagonalCausalMask if HAS_XFORMERS else None def original_apply_qkv(self, X): @@ -89,6 +90,8 @@ def original_apply_o(self, X): from math import sqrt as math_sqrt KV_CACHE_INCREMENT = 256 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax +# SDPA has GQA internally +SDPA_HAS_GQA = "enable_gqa" in scaled_dot_product_attention.__doc__ # Fix new HF's inference code def _fast_prepare_inputs_for_generation(self, input_ids, **kwargs,): @@ -243,7 +246,7 @@ def LlamaAttention_fast_forward_inference( # Grouped query attention _, _, cached_len, _ = Knn.shape - if n_groups != 1: + if bsz == 1 or not SDPA_HAS_GQA and n_groups != 1: Knn = Knn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Vnn = Vnn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) @@ -262,7 +265,10 @@ def LlamaAttention_fast_forward_inference( A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32)#.to(A.dtype) A = torch_matmul(A, Vnn, out = Qn) else: - A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) + if SDPA_HAS_GQA: + A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False, enable_gqa = True) + else: + A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False) pass A = A.transpose(1, 2) A = A.reshape(bsz, 1, attention_size) @@ -272,15 +278,15 @@ def LlamaAttention_fast_forward_inference( torch_nn_functional_silu = torch.nn.functional.silu -def fast_swiglu_inference(self, X): +def fast_swiglu_inference(self, X, temp_gate = None, temp_up = None): # gate = self.gate_proj(X) # up = self.up_proj(X) bsz, _, hd = X.shape # mlp_size = self.config.intermediate_size # temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0") - gate = fast_linear_forward(self.gate_proj, X)#, out = temp[0]) - up = fast_linear_forward(self. up_proj, X)#, out = temp[1]) + gate = fast_linear_forward(self.gate_proj, X, out = temp_gate) + up = fast_linear_forward(self. up_proj, X, out = temp_up) gate = torch_nn_functional_silu(gate, inplace = True) gate *= up @@ -289,14 +295,23 @@ def fast_swiglu_inference(self, X): return down pass - -def fast_rms_layernorm_inference(self, X): +torch_square = torch.square +torch_mean = torch.mean +def fast_rms_layernorm_inference(self, X, XX = None, XX2 = None, variance = None): old_dtype = X.dtype - XX = X.to(torch.float32) - variance = XX.square().mean(-1, keepdim = True) + if XX is None: + XX = X.to(torch.float32) + variance = XX.square().mean(-1, keepdim = True) + else: + XX.copy_(X) + torch_mean(torch_square(XX, out = XX2), -1, keepdim = True, out = variance) + pass variance += self.variance_epsilon XX *= variance.rsqrt_() - X = XX.to(old_dtype) # Must preserve due to residual + + if XX is None: X = XX.to(old_dtype) + else: X.copy_(XX) + X *= self.weight return X pass @@ -403,7 +418,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - if (not HAS_FLASH_ATTENTION and attention_mask is None): + if (not HAS_FLASH_ATTENTION and HAS_XFORMERS and attention_mask is None): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching Q = Q.transpose(1, 2) @@ -902,15 +917,29 @@ def LlamaModel_fast_forward_inference( attention_mask = None, ): input_ids = input_ids[:,:self.max_seq_length] - hidden_states = self.model.embed_tokens(input_ids) - hidden_states = hidden_states.to(self.config.torch_dtype) - bsz, q_len, hd = hidden_states.shape + bsz, q_len = input_ids.shape + hd = self.config.hidden_size + mlp_size = self.config.intermediate_size + + X = self.model.embed_tokens(input_ids) + X = X.to(self.config.torch_dtype) + bsz, q_len, hd = X.shape + assert(q_len == 1) + + # Get saved buffers to reduce memory movement + residual = torch.empty((bsz, q_len, hd), dtype = torch.float32, device = "cuda:0") + _XX = torch.empty((2, bsz, q_len, hd), dtype = torch.float32, device = "cuda:0") + XX, XX2 = _XX[0], _XX[1] + variance = torch.empty((bsz, q_len, 1), dtype = torch.float32, device = "cuda:0") + temp_mlp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0") + temp_gate, temp_up = temp_mlp[0], temp_mlp[1] + seq_len = past_key_values[0][0].shape[-2] if bsz != 1: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (bsz, q_len), - hidden_states, + X, seq_len, sliding_window = getattr(self.config, "sliding_window", None), ) @@ -919,30 +948,54 @@ def LlamaModel_fast_forward_inference( pass next_decoder_cache = [] + for idx, decoder_layer in enumerate(self.model.layers): - residual = hidden_states - hidden_states = fast_rms_layernorm_inference(decoder_layer.input_layernorm, hidden_states) - hidden_states, present_key_value = LlamaAttention_fast_forward_inference( + residual.copy_(X) # residual = X + X = fast_rms_layernorm_inference( + decoder_layer.input_layernorm, + X, + XX = XX, + XX2 = XX2, + variance = variance, + ) + X, present_key_value = LlamaAttention_fast_forward_inference( decoder_layer.self_attn, - hidden_states = hidden_states, + hidden_states = X, past_key_value = past_key_values[idx], position_ids = position_ids, attention_mask = attention_mask, do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"), ) - hidden_states += residual - - residual = hidden_states - hidden_states = fast_rms_layernorm_inference(decoder_layer.post_attention_layernorm, hidden_states) - hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states) - hidden_states += residual + X += residual + + residual.copy_(X) # residual = X + X = fast_rms_layernorm_inference( + decoder_layer.post_attention_layernorm, + X, + XX = XX, + XX2 = XX2, + variance = variance, + ) + X = fast_swiglu_inference( + decoder_layer.mlp, + X, + temp_gate = temp_gate, + temp_up = temp_up, + ) + X += residual next_decoder_cache.append(present_key_value) pass - hidden_states = fast_rms_layernorm_inference(self.model.norm, hidden_states) + X = fast_rms_layernorm_inference( + self.model.norm, + X, + XX = XX, + XX2 = XX2, + variance = variance, + ) return BaseModelOutputWithPast( - last_hidden_state = hidden_states, + last_hidden_state = X, past_key_values = next_decoder_cache, hidden_states = [], attentions = [], @@ -977,7 +1030,7 @@ def _CausalLM_fast_forward( attention_mask = attention_mask, ) else: - causal_mask = xformers.attn_bias.LowerTriangularMask() + causal_mask = xformers.attn_bias.LowerTriangularMask() if HAS_XFORMERS else None output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( @@ -1159,7 +1212,8 @@ def __init__(self, dim = None, max_position_embeddings=2048, base=10000, device= # [TODO] Hack to pass in config - need to remove later base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, "partial_rotary_factor") else 1.0 - dim = int((config.hidden_size // config.num_attention_heads)) + dim = getattr(config, "head_dim", None) + if dim is None: dim = int((config.hidden_size // config.num_attention_heads)) device = "cuda" max_position_embeddings = config.max_position_embeddings pass @@ -1580,9 +1634,18 @@ def from_pretrained( model_patcher = None, tokenizer_name = None, trust_remote_code = False, + + fast_inference = False, # uses vLLM + gpu_memory_utilization = 0.5, + float8_kv_cache = False, + random_state = 3407, + max_lora_rank = 16, + disable_log_stats = False, **kwargs, ): if trust_remote_code: + if fast_inference: + raise NotImplementedError("Unsloth: Fast inference does not support `trust_remote_code` yet.") print( "Unsloth: WARNING `trust_remote_code` is True.\n"\ "Are you certain you want to do remote code execution?" @@ -1596,9 +1659,9 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ - f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ - f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ + f" {chr(92)}{chr(92)} /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ + f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) @@ -1626,7 +1689,11 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) # RoPE Scaling - model_config = AutoConfig.from_pretrained(model_name, token = token) + model_config = AutoConfig.from_pretrained( + model_name, + token = token, + attn_implementation = "sdpa", + ) model_max_seq_length = model_config.max_position_embeddings # Check if RoPE Scaling is even allowed @@ -1647,6 +1714,9 @@ def from_pretrained( rope_scaling = max_seq_length / model_max_seq_length + if fast_inference: + raise NotImplementedError("Unsloth: Fast inference does not yet work with RoPE Scaling.") + logger.warning_once( f"Unsloth: {model_name} can only handle sequence lengths of at most "\ f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ @@ -1688,17 +1758,54 @@ def from_pretrained( # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config - model = AutoModelForCausalLM.from_pretrained( - model_name, - device_map = device_map, - torch_dtype = dtype, - # quantization_config = bnb_config, - token = token, - max_position_embeddings = max_position_embeddings, - trust_remote_code = trust_remote_code, - attn_implementation = "eager", - **kwargs, - ) + if not fast_inference: + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + # quantization_config = bnb_config, + token = token, + max_position_embeddings = max_position_embeddings, + trust_remote_code = trust_remote_code, + attn_implementation = "eager", + **kwargs, + ) + else: + from unsloth_zoo.vllm_utils import ( + load_vllm, + get_vllm_state_dict, + convert_vllm_to_huggingface, + generate_batches, + ) + allowed_args = inspect.getfullargspec(load_vllm).args + load_vllm_kwargs = dict( + model_name = model_name, + config = model_config, + gpu_memory_utilization = gpu_memory_utilization, + max_seq_length = max_seq_length, + dtype = dtype, + float8_kv_cache = float8_kv_cache, + enable_lora = True, + max_lora_rank = max_lora_rank, + disable_log_stats = disable_log_stats, + ) + for allowed_arg in allowed_args: + if allowed_arg not in load_vllm_kwargs and allowed_arg in kwargs: + load_vllm_kwargs[allowed_arg] = kwargs[allowed_arg] + pass + + # Load vLLM first + llm = load_vllm(**load_vllm_kwargs) + + # Convert to HF format + _, quant_state_dict = get_vllm_state_dict(llm, config = model_config) + model = convert_vllm_to_huggingface(quant_state_dict, model_config, dtype) + model.vllm_engine = llm + model.fast_generate = model.vllm_engine.generate + + from functools import partial + model.fast_generate_batches = partial(generate_batches, model.vllm_engine) + pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! @@ -2194,6 +2301,20 @@ def get_peft_model( modules_to_save = list(set(modules_to_save)) pass + vllm_engine = None + if hasattr(model, "vllm_engine"): + # Fast inference! + vllm_engine = model.vllm_engine + vllm_fast_generate = model.fast_generate + vllm_fast_generate_batches = model.fast_generate_batches + + if modules_to_save is not None: + raise NotImplementedError("Unsloth: Currently fast inference does not work with training embeddings or lm_head.") + + if bias != "none": + raise NotImplementedError("Unsloth: Currently fast inference does not work with using biases for LoRA.") + pass + # Get LoRA arguments = dict( r = r, @@ -2300,6 +2421,19 @@ def get_peft_model( torch.cuda.empty_cache() pass + # Patch for fast inference + if vllm_engine is not None: + model.vllm_engine = vllm_engine + model.fast_generate = vllm_fast_generate + model.fast_generate_batches = vllm_fast_generate_batches + + # Also saving and loading LoRA + from functools import partial + from unsloth_zoo.vllm_utils import save_lora, load_lora + model.save_lora = partial(save_lora, model) + model.load_lora = partial(load_lora, model) + pass + return model pass @@ -2509,18 +2643,24 @@ def for_inference(model): # return # pass - internal_model = model - internal_model.gradient_checkpointing = False - internal_model.training = False - - while hasattr(internal_model, "model"): - internal_model = internal_model.model - internal_model.gradient_checkpointing = False - internal_model.training = False - pass - if hasattr(internal_model, "training"): - internal_model.training = False - pass + m = model + while hasattr(m, "model"): + if hasattr(m, "gradient_checkpointing"): + m.gradient_checkpointing = False + if hasattr(m, "training"): + m.training = False + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.padding_side = "left" + m = m.model + pass + if hasattr(m, "gradient_checkpointing"): + m.gradient_checkpointing = False + if hasattr(m, "training"): + m.training = False + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.padding_side = "left" # Also check if lm_head / embeddings are trained internal_model = model @@ -2529,30 +2669,13 @@ def for_inference(model): pass lm_head = internal_model.lm_head.weight device_type = lm_head.device.type - dtype = model.config.torch_dtype - - if type(dtype) is str: - if dtype == "float16": dtype = torch.float16 - elif dtype == "bfloat16": dtype = torch.bfloat16 - pass + dtype = _get_dtype(model.config.torch_dtype) # Wrap model.generate if model.generate.__name__ != "_fast_generate": model._unwrapped_old_generate = model.generate model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) pass - - # Patch tokenizer to pad to the left - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "left" - pass # Also disable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -2570,9 +2693,6 @@ def for_inference(model): @staticmethod def for_training(model, use_gradient_checkpointing = True): - internal_model = model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True # Delete all fast inference loras for param in model.parameters(): @@ -2580,14 +2700,24 @@ def for_training(model, use_gradient_checkpointing = True): del param._fast_lora pass - while hasattr(internal_model, "model"): - internal_model = internal_model.model - internal_model.gradient_checkpointing = use_gradient_checkpointing - internal_model.training = True - pass - if hasattr(internal_model, "training"): - internal_model.training = True - pass + m = model + while hasattr(m, "model"): + if hasattr(m, "gradient_checkpointing"): + m.gradient_checkpointing = use_gradient_checkpointing + if hasattr(m, "training"): + m.training = True + # Pad tokenizer to the right + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.padding_side = "right" + m = m.model + pass + if hasattr(m, "gradient_checkpointing"): + m.gradient_checkpointing = use_gradient_checkpointing + if hasattr(m, "training"): + m.training = True + # Pad tokenizer to the right + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.padding_side = "right" # Also revert model.generate if hasattr(model, "_unwrapped_old_generate"): @@ -2595,18 +2725,6 @@ def for_training(model, use_gradient_checkpointing = True): del model._unwrapped_old_generate pass - # Patch tokenizer to pad to the right - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.padding_side = "right" - pass - # Also re-enable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): embeddings = model.get_input_embeddings() diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index e9caad0e60..39b367e275 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -30,11 +30,11 @@ from huggingface_hub.utils._token import get_token pass from huggingface_hub import HfFileSystem +import importlib.util # [TODO] Move USE_MODELSCOPE to utils USE_MODELSCOPE = os.environ.get("UNSLOTH_USE_MODELSCOPE", "0") == "1" if USE_MODELSCOPE: - import importlib if importlib.util.find_spec("modelscope") is None: raise ImportError(f'You are using the modelscope hub, please install modelscope by `pip install modelscope -U`') pass @@ -73,9 +73,25 @@ def from_pretrained( resize_model_vocab = None, revision = None, use_exact_model_name = False, + + fast_inference = False, # uses vLLM + gpu_memory_utilization = 0.5, + float8_kv_cache = False, + random_state = 3407, + max_lora_rank = 64, + disable_log_stats = True, *args, **kwargs, ): if token is None: token = get_token() + + if fast_inference: + if importlib.util.find_spec("vllm") is None: + raise ImportError( + "Unsloth: Please install vLLM before enabling `fast_inference`!\n"\ + "You can do this in a terminal via `pip install vllm`" + ) + pass + pass old_model_name = model_name if not use_exact_model_name: @@ -255,6 +271,24 @@ def from_pretrained( tokenizer_name = None pass + if fast_inference: + from unsloth_zoo.vllm_utils import ( + patch_vllm, + vllm_dynamic_quant_supported, + ) + patch_vllm() + if model_name.endswith("unsloth-bnb-4bit"): + if not vllm_dynamic_quant_supported(model_name, model_config): + # Instead use -bnb-4bit variant + print( + f"Unsloth: Switching from Unsloth dynamic quant to normal quant since\n"\ + f"we do not yet support fast inference for {model_name}" + ) + model_name = model_name[:-len("unsloth-bnb-4bit")] + "bnb-4bit" + pass + pass + pass + model, tokenizer = dispatch_model.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, @@ -268,6 +302,13 @@ def from_pretrained( tokenizer_name = tokenizer_name, trust_remote_code = trust_remote_code, revision = revision if not is_peft else None, + + fast_inference = fast_inference, + gpu_memory_utilization = gpu_memory_utilization, + float8_kv_cache = float8_kv_cache, + random_state = random_state, + max_lora_rank = max_lora_rank, + disable_log_stats = disable_log_stats, *args, **kwargs, ) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index bc01c28583..c81290b662 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -304,25 +304,30 @@ "unsloth/Mistral-Small-Instruct-2409", "mistralai/Mistral-Small-Instruct-2409", ), - "unsloth/Qwen2.5-0.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-0.5B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-0.5B-Instruct", "Qwen/Qwen2.5-0.5B-Instruct", + "unsloth/Qwen2.5-0.5B-Instruct-bnb-4bit", ), - "unsloth/Qwen2.5-1.5B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-1.5B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-1.5B-Instruct", "Qwen/Qwen2.5-1.5B-Instruct", + "unsloth/Qwen2.5-1.5B-Instruct-bnb-4bit", ), - "unsloth/Qwen2.5-3B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-3B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-3B-Instruct", + "unsloth/Qwen2.5-3B-Instruct-bnb-4bit", ), - "unsloth/Qwen2.5-7B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-7B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-7B-Instruct", + "unsloth/Qwen2.5-7B-Instruct-bnb-4bit", ), - "unsloth/Qwen2.5-14B-Instruct-bnb-4bit" : ( + "unsloth/Qwen2.5-14B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-14B-Instruct", "Qwen/Qwen2.5-14B-Instruct", + "unsloth/Qwen2.5-14B-Instruct-bnb-4bit", ), "unsloth/Qwen2.5-32B-Instruct-bnb-4bit" : ( "unsloth/Qwen2.5-32B-Instruct", @@ -332,25 +337,30 @@ "unsloth/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-72B-Instruct", ), - "unsloth/Qwen2.5-0.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-0.5B-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-0.5B", "Qwen/Qwen2.5-0.5B", + "unsloth/Qwen2.5-0.5B-bnb-4bit", ), - "unsloth/Qwen2.5-1.5B-bnb-4bit" : ( + "unsloth/Qwen2.5-1.5B-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-1.5B", "Qwen/Qwen2.5-1.5B", + "unsloth/Qwen2.5-1.5B-bnb-4bit", ), - "unsloth/Qwen2.5-3B-bnb-4bit" : ( + "unsloth/Qwen2.5-3B-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-3B", "Qwen/Qwen2.5-3B", + "unsloth/Qwen2.5-3B-bnb-4bit", ), - "unsloth/Qwen2.5-7B-bnb-4bit" : ( + "unsloth/Qwen2.5-7B-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-7B", "Qwen/Qwen2.5-7B", + "unsloth/Qwen2.5-7B-bnb-4bit", ), - "unsloth/Qwen2.5-14B-bnb-4bit" : ( + "unsloth/Qwen2.5-14B-unsloth-bnb-4bit" : ( "unsloth/Qwen2.5-14B", "Qwen/Qwen2.5-14B", + "unsloth/Qwen2.5-14B-bnb-4bit", ), "unsloth/Qwen2.5-32B-bnb-4bit" : ( "unsloth/Qwen2.5-32B", @@ -555,12 +565,12 @@ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", ), "unsloth/Mistral-Small-24B-Base-2501-unsloth-bnb-4bit" : ( - "unsloth/Mistral-Small-24B-Base", + "unsloth/Mistral-Small-24B-Base-2501", "mistralai/Mistral-Small-24B-Base-2501", "unsloth/Mistral-Small-24B-Base-2501-bnb-4bit", ), "unsloth/Mistral-Small-24B-Instruct-2501-unsloth-bnb-4bit" : ( - "unsloth/Mistral-Small-24B-Instruct", + "unsloth/Mistral-Small-24B-Instruct-2501", "mistralai/Mistral-Small-24B-Instruct-2501", "unsloth/Mistral-Small-24B-Instruct-2501-bnb-4bit", ), diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py new file mode 100644 index 0000000000..22e1e0f6cb --- /dev/null +++ b/unsloth/models/rl.py @@ -0,0 +1,423 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "PatchFastRL", +] + +METRICS_MOVE_TO_END = [ + "nll", + "aux", + "beta", + "alpha", +] +import torch +try: + from transformers.utils.notebook import ( + IntervalStrategy, + NotebookTrainingTracker, + NotebookProgressCallback, + ) + HAS_NOTEBOOK = True +except: + HAS_NOTEBOOK = False +pass +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union +import inspect +import os +import re +import functools +from unsloth_zoo.compiler import create_new_function + + +def PatchRL(FastLanguageModel): + + from trl.models.utils import unwrap_model_for_generation + from contextlib import contextmanager + + @contextmanager + def unsloth_unwrap_model_for_generation(model, accelerator): + with unwrap_model_for_generation(model, accelerator) as unwrapped_model: + # Put the model in inference mode. + FastLanguageModel.for_inference(unwrapped_model) + + # We must use .clone for Unsloth since we force inference_mode + # Rather we should have used no_grad + original_generate = unwrapped_model.generate + def generate_with_clone(*args, **kwargs): + out = original_generate(*args, **kwargs) + if isinstance(out, torch.Tensor): + return out.clone() + return out + pass + unwrapped_model.generate = generate_with_clone + + try: + yield unwrapped_model + finally: + # Restore generate and return + unwrapped_model.generate = original_generate + FastLanguageModel.for_training(model) + pass + pass + pass + + import trl.trainer + trainers = dir(trl.trainer) + trainers = [x for x in trainers if x.endswith("_trainer")] + unwrap = "unwrap_model_for_generation" + for trainer in trainers: + if hasattr(eval(f"trl.trainer.{trainer}"), unwrap): + exec(f"trl.trainer.{trainer}.{unwrap} = unsloth_{unwrap}") + pass +pass + + +def NotebookProgressCallback_on_train_begin(Trainer_metrics): + def _NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): + self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step" + self.training_loss = 0 + self.last_log = 0 + column_names = [self.first_column] + ["Training Loss"] + if args.eval_strategy != IntervalStrategy.NO: + column_names.append("Validation Loss") + column_names += [x.replace("/", " / ") for x in Trainer_metrics] + self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) + pass + return _NotebookProgressCallback_on_train_begin +pass + + +def NotebookProgressCallback_on_log(Trainer_metrics): + def _NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwargs): + # Only for when there is no evaluation + if args.eval_strategy == IntervalStrategy.NO and "loss" in logs: + values = {"Training Loss": logs["loss"]} + for metric in Trainer_metrics: + # Sometimes metric is not inside logs + try: values[metric.replace("/", " / ")] = logs[metric] + except: pass + pass + # First column is necessarily Step since we're not in epoch eval strategy + values["Step"] = state.global_step + self.training_tracker.write_line(values) + pass + pass + return _NotebookProgressCallback_on_log +pass + + +def NotebookTrainingTracker_write_line(Trainer_metrics): + set_Trainer_metrics = set(Trainer_metrics) + def _NotebookTrainingTracker_write_line(self, values): + """ + Write the values in the inner table. + + Args: + values (`Dict[str, float]`): The values to display. + """ + if self.inner_table is None: + self.inner_table = [list(values.keys()), list(values.values())] + else: + columns = self.inner_table[0] + new_values = {} + for key, value in values.items(): + lowered = key.lower() + if lowered in set_Trainer_metrics: + new_values[lowered.replace("/", " / ")] = value + else: + new_values[key] = value + pass + values = new_values + + self.inner_table[0] = columns + if len(self.inner_table) > 1: + last_values = self.inner_table[-1] + first_column = self.inner_table[0][0] + if last_values[0] != values[first_column]: + # write new line + self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) + else: + # update last line + new_values = values + for c in columns: + if c not in new_values.keys(): + new_values[c] = last_values[columns.index(c)] + self.inner_table[-1] = [new_values[c] for c in columns] + else: + # Edit for evaluation purposes + self.inner_table.append([values[c] if c in values else 0 for c in columns]) + pass + pass + pass + return _NotebookTrainingTracker_write_line +pass + + +def _PatchRLStatistics(metrics, algorithm): + if HAS_NOTEBOOK: + if len(metrics) == 0: + raise RuntimeError(f"Unsloth: RL statistics for {algorithm} failed with no metrics seen?") + from transformers.trainer import is_in_notebook + if is_in_notebook(): + # Patch DPO notebook printing + NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line(metrics) + from transformers.trainer import DEFAULT_PROGRESS_CALLBACK + DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin(metrics) + DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log(metrics) + pass + pass +pass + + +@functools.cache +def get_trl_metrics(): + # Gets metrics so we can output them in notebooks + + import trl.trainer + trainers = dir(trl.trainer) + trainers = [x for x in trainers if x.endswith("_trainer")] + filepath = inspect.getfile(trl.trainer) + filepath = os.path.split(filepath)[0] + + all_metrics = dict() + for trainer in trainers: + filename = os.path.join(filepath, f"{trainer}.py") + if not os.path.exists(filename): continue + with open(filename, "r") as file: file = file.read() + + # Get metrics['kl'] or stats['kl'] + metrics = re.findall(r"metrics\[[\"\']([^\"\']{1,})[\"\']\]", file) + stats = re.findall(r"stats\[[\"\']([^\"\']{1,})[\"\']\]", file) + metrics = metrics + stats + + # Get optional f-strings + metrics_f = re.findall(r"metrics\[f[\"\']\{[^\}]{1,}\}([^\"\']{1,})[\"\']\]", file) + stats_f = re.findall(r"stats\[f[\"\']\{[^\}]{1,}\}([^\"\']{1,})[\"\']\]", file) + metrics_f = metrics_f + stats_f + # Filter out prefixes if seen + # metrics[f"{prefix}rewards/chosen"] + left_prefix = 'prefix = "eval_" if train_eval == "eval" else ""' in file + if left_prefix: metrics += metrics_f + + # Move all eval_ things to the end and reward to the front + beginning = [] + middle = [] + end = [] + for x in metrics: + lowered = x.lower() + if "reward" in lowered: + beginning.append(x) + elif x.lower().startswith("eval"): + end.append(x) + else: + # Check if we want to move to the end + moved = False + for move_end in METRICS_MOVE_TO_END: + if move_end in lowered: + end.append(x) + moved = True + break + if not moved: + middle.append(x) + pass + pass + metrics = beginning + middle + end + + all_metrics[trainer[:trainer.find("_")].upper()] = metrics + pass + return all_metrics +pass + + +def PatchRLStatistics(algorithm = "GRPO"): + # Get notebook statistics columns to show up + algorithm = algorithm.upper() + all_metrics = get_trl_metrics() + if algorithm not in all_metrics: + print( + f"Unsloth for {algorithm.upper()} is not yet implemented! Just ignore this function.\n"\ + f"We support: `{list(all_metrics.keys())}`" + ) + pass + _PatchRLStatistics(all_metrics[algorithm], algorithm) +pass + + +def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): + # Patch for vLLM and Unsloth PEFT + import trl + import trl.trainer + + trainer = eval(f"trl.trainer.{trainer_file}") + name = [x for x in dir(trainer) if x.endswith("Trainer") and x != "Trainer" and trainer_file.split("_")[0] in x.lower()] + assert(len(name) == 1) + RLTrainer_name = name[0] + RLTrainer = eval(f"trl.trainer.{trainer_file}.{RLTrainer_name}") + + try: + __init__ = inspect.getsource(RLTrainer.__init__) + except: + # Already patched most likely! + return + old__init__ = __init__ + all_imports = dir(trainer) + assert("Union" in all_imports) + imports = [x for x in all_imports if not x.startswith("_")] + imports += ["Trainer"] + + spaces = __init__.find("def") + __init__ = __init__.split("\n") + __init__ = "\n".join(x[spaces:] for x in __init__) + + # Replace vLLM sections since we already have it done! + vllm_part = re.findall( + r"(\n[\s]{4}"\ + r"if (self|args)\.use_vllm\:.+?"\ + r"\n[\s]{4,}"\ + "else:\n)", + __init__, + flags = re.MULTILINE | re.DOTALL, + ) + if (len(vllm_part) != 1): return + + vllm_part, args = vllm_part[0][0], vllm_part[0][1] + # Strip all comments + new_vllm_part = re.sub(r"\#[^\n]{1,}\n", "", vllm_part) + + # Get SamplingParams + sampling_params = re.findall( + r"\n[\s]{4,}(self\.[^\s]{1,}[\s]{0,}\=[\s]{0,}"\ + r"SamplingParams\(.+?\))", + new_vllm_part, + flags = re.MULTILINE | re.DOTALL, + ) + if len(sampling_params) != 1: return + + sampling_params = sampling_params[0] + # Replace with our vLLM engine + sampling_params = \ + " "*8 + "self.llm = model.vllm_engine; self._last_loaded_step = 0; " + \ + sampling_params # Add spaces + new_vllm_part = f"\n if {args}.use_vllm:\n{sampling_params}\n else:\n" + __init__ = __init__.replace(vllm_part, new_vllm_part) + + # Remove peft_config + __init__ = __init__.replace("elif peft_config is None:", "elif False:") + __init__ = __init__.replace("elif peft_config is not None:", "elif False:") + __init__ = __init__.replace("if peft_config is None:", "if False:") + __init__ = __init__.replace("if peft_config is not None:", "if False:") + __init__ = __init__.replace("get_peft_model(model, peft_config)", "model") + + # Add spaces back into __init__ + __init__ = __init__.split("\n") + __init__ = "\n".join(' '*spaces + x for x in __init__) + + # Search for vLLM calling in all child functions + functions = dir(RLTrainer) + RLTrainer_source = inspect.getsource(RLTrainer) + functions = [x for x in functions if f"def {x}" in RLTrainer_source] + + changed = {"__init__" : (old__init__, __init__,)} + for function in functions: + if not hasattr(RLTrainer, function): continue + fx = getattr(RLTrainer, function) + try: + source = inspect.getsource(fx) + except: + continue + original_source = source + + # llm_model = self.llm.llm_engine.model_executor.driver_worker.model_runner.model + source = re.sub( + r"(\n[\s]{4,}).+?model_executor\.driver_worker.+?\n", + r"\n\1pass\n", + source, + ) + + # llm_model.load_weights(model.state_dict().items()) + source = re.sub( + r"(\n[\s]{4,}).+?load_weights\(.+?\n", + r"\n\1pass\n", + source, + ) + + # .state_dict() + source = re.sub( + r"\.state_dict\(\)", + r"", + source, + ) + + # Replace self.llm.generate and self.llm.chat + lora_name = trainer_file + "_lora_model" + source = re.sub( + r"(self\.llm\.(?:generate|chat)\([^\)]{1,})\)", + r"\1, lora_request = model.load_lora('" + lora_name + r"', load_tensors = True))", + source + ) + + # Skip if no changes done + if source == original_source: continue + + # Find all imports + imports += [x for x in all_imports if not x.startswith("_") and x in source] + + changed[function] = (original_source, source,) + pass + + # Import all functions + imports = list(set(imports)) + + # Patch all functions + for function in changed: + old, new = changed[function] + RLTrainer_source = RLTrainer_source.replace(old, new) + pass + RLTrainer_source = RLTrainer_source.replace( + f"class {RLTrainer_name}", f"class Unsloth{RLTrainer_name}", 1 + ) + + # Create new class in compiled cache and import it + module = create_new_function( + RLTrainer_name, + RLTrainer_source, + f"trl.trainer.{trainer_file}", + imports, + ) + + # Patch over modules + exec(f"trl.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) + exec(f"trl.trainer.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) + exec(f"trl.trainer.{trainer_file}.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) + return module +pass + + +def patch_trl_rl_trainers(): + # Patch all TRL modules if they have vLLM or PEFT + import trl.trainer + all_trainers = dir(trl.trainer) + all_trainers = [x for x in all_trainers if x.islower() and x.endswith("_trainer")] + for trainer in all_trainers: + _patch_trl_rl_trainers(trainer) + return +pass + + +def PatchFastRL(algorithm = "GRPO", FastLanguageModel = None): + if FastLanguageModel is not None: PatchRL(FastLanguageModel) + patch_trl_rl_trainers() + PatchRLStatistics(algorithm) +pass From 3fc3894a8f28515653097bee0c79332388269717 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Feb 2025 02:44:23 -0800 Subject: [PATCH 1012/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 88c757b333..406351ef18 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.1.4", + "unsloth_zoo>=2025.2.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -344,7 +344,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.1.4", + "unsloth_zoo>=2025.2.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", From f46d3dd80c0f4e9772e2020f95fa361feb14f172 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Feb 2025 02:44:46 -0800 Subject: [PATCH 1013/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 017b5b5533..0ebe9fe10a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.1" +__version__ = "2025.2.2" __all__ = [ "SUPPORTS_BFLOAT16", From 18e1bb545784aa6075ad356d62e49f05cef084ed Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Feb 2025 03:23:54 -0800 Subject: [PATCH 1014/1088] Update --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 406351ef18..d89ea2c4d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.2.1", + "unsloth_zoo>=2025.2.2", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -344,7 +344,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.2.1", + "unsloth_zoo>=2025.2.2", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c89fd0f1fd..bdde33c507 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -196,7 +196,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.2.1"): + if Version(unsloth_zoo_version) < Version("2025.2.2"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0ebe9fe10a..be7d2214a2 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.2" +__version__ = "2025.2.3" __all__ = [ "SUPPORTS_BFLOAT16", From 6bdaef3eebb117470f6ab263b23bc725080fe66e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Feb 2025 05:08:22 -0800 Subject: [PATCH 1015/1088] GRPO Bug fixes (#1623) * use exact model name * Update save.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * print * Update _utils.py * Update _utils.py * Update llama.py * Update _utils.py * Update vision.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update loader.py * accurate_accumulation * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update pyproject.toml * Update __init__.py * Update pyproject.toml * Update __init__.py * Update __init__.py * Fix Triton heuristics https://github.com/triton-lang/triton/issues/5224 * Update __init__.py * Update __init__.py * Update __init__.py * Update __init__.py * Xformers * Update loader.py * Update loader.py * Rewind * Update _utils.py * Update _utils.py * requires grad * Update loader.py * Update _utils.py * Update loader.py * changing model to base_model if peft model is already used * Improve debugging experience (#1512) * Create CONTRIBUTING.md (#1472) Creating contributing guidelines * Update CONTRIBUTING.md improved sentence * Improve logging control in `unsloth_compile_transformers` by conditionally redirecting stdout based on UNSLOTH_DISABLE_LOGGER environment variable --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> * Update loader.py * Update llama.py * Update llama.py * Revert "Update llama.py" This reverts commit b7ddf962d2f398be0286602d0fbb5b11e317887b. * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Auto change is_bfloat16_supported * Update llama.py * Force data-type * Update llama.py * All attention refactor fix (#1491) * change initilization of n_heads, n_kv_heads, hidden_size in llama.py * do the same for cohere, mistral, gemma2, granite * do the same for flexattention,cohere, mistral, granite * Update llama.py * Update llama.py * Update granite to work with latest post_patch methods (#1502) * Update granite to work with latest post_patch methods * Pass position_embeddings for granite even if transformers<4.47 * Update llama.py --------- Co-authored-by: Daniel Han * Minor fixes for granite models (#1503) * Update granite.py Grab residual multiplier directly from layer * Update llama.py Version should read >= 4.47.1 as that is the version requiring the changes * Update granite.py * Update llama.py --------- Co-authored-by: Daniel Han * support modelscope models and datasets (#1481) * support modelscope * change modelscope args * remove useless import * remove useless import * fix * wip * fix * remove useless code * add readme * add some comments * change print to raise error * update comment * Update loader.py --------- Co-authored-by: Daniel Han * Merge branch 'main' into nightly * Phi 4 * Update llama.py * Torch.Cuda Is Available Condition and Warning (#1545) * check for torch.cuda and triton if available on my machine(mac m3) the cuda were not available * Update pyproject.toml * Update __init__.py --------- Co-authored-by: Daniel Han * Update mistral.py * Update mistral.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix * Bug fixes * Update mapper.py * Add dropout to granite to match HF's implementation (#1557) Signed-off-by: datta0 * Update llama.py * Update llama.py * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han * Update mapper.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * dim fix * Update _utils.py * Torch 2.6 support * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py --------- Signed-off-by: datta0 Co-authored-by: Itsuro Tajima Co-authored-by: Muhammad Osama Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Z Co-authored-by: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Co-authored-by: AminWhat <88392440+aminwhat@users.noreply.github.com> Co-authored-by: Zhe Zhang <2631992879@qq.com> --- unsloth/models/_utils.py | 2 +- unsloth/models/rl.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index be7d2214a2..2ec4adaa11 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.3" +__version__ = "2025.2.4" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 22e1e0f6cb..515c6587f7 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -47,8 +47,8 @@ def PatchRL(FastLanguageModel): from contextlib import contextmanager @contextmanager - def unsloth_unwrap_model_for_generation(model, accelerator): - with unwrap_model_for_generation(model, accelerator) as unwrapped_model: + def unsloth_unwrap_model_for_generation(model, *args, **kwargs): + with unwrap_model_for_generation(model, *args, **kwargs) as unwrapped_model: # Put the model in inference mode. FastLanguageModel.for_inference(unwrapped_model) @@ -364,7 +364,7 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): lora_name = trainer_file + "_lora_model" source = re.sub( r"(self\.llm\.(?:generate|chat)\([^\)]{1,})\)", - r"\1, lora_request = model.load_lora('" + lora_name + r"', load_tensors = True))", + r"\1, lora_request = self.model.load_lora('" + lora_name + r"', load_tensors = True))", source ) From 53a773e4fbc53a1d96c7ba107e5fe75dab07027b Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:20:19 -0800 Subject: [PATCH 1016/1088] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3e635dabe3..ca8fe0e473 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | +| **GRPO (reasoning)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb) | 2x faster | 80% less | | **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 70% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 50% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 70% less | @@ -30,7 +31,6 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 70% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 75% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb) | 1.9x faster | 60% less | -| **ORPO** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) | 1.9x faster | 50% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 50% less | - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) @@ -41,6 +41,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News +- 📣 NEW! Introducing [Reasoning](https://unsloth.ai/blog/r1-reasoning) in Unsloth. You can now reproduce DeepSeek-R1's "aha" moment with just 7GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! - 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now! More details: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). - 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft is now supported. We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. From 2023b28caa0b0b8d172e2e88f92cc13bff537018 Mon Sep 17 00:00:00 2001 From: Diogo Neves Date: Sun, 9 Feb 2025 03:41:39 +0000 Subject: [PATCH 1017/1088] Fixed Triton url (#1607) Triton's link was pointing to the old research url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ca8fe0e473..1a29480de9 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and |   **Reddit** | [Join our Reddit page](https://reddit.com/r/unsloth)| ## ⭐ Key Features -- All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. +- All kernels written in [OpenAI's Triton](https://openai.com/index/triton/) language. **Manual backprop engine**. - **0% loss in accuracy** - no approximation methods - all exact. - No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. - Works on **Linux** and **Windows** via WSL. From 87ce049846d57c34dd363207b46a6a49ab81dcef Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sun, 9 Feb 2025 19:57:15 -0800 Subject: [PATCH 1018/1088] Update README.md --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1a29480de9..16de4f29ee 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,10 @@ - + -### Finetune Llama 3.3, Mistral, Phi-4, Qwen 2.5 & Gemma 2-5x faster with 80% less memory! +### Finetune Llama 3.3, Mistral, Phi-4, Qwen 2.5 & Gemma 2x faster with 80% less memory! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -36,7 +36,6 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) - **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) - Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) -- This [text completion notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_(7B)-Text_Completion.ipynb) is for continued pretraining / raw text - This [continued pretraining notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) is for learning another language - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. From f9e28fc6cfa3eeb6d87281c5e8313b46f8c141ec Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Thu, 13 Feb 2025 01:14:06 -0800 Subject: [PATCH 1019/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 16de4f29ee..0038355aa5 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | -| **GRPO (reasoning)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb) | 2x faster | 80% less | +| **GRPO (R1 reasoning)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb) | 2x faster | 80% less | | **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 70% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 50% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 70% less | From 016315eb0495135c78235fff9684cb8759ff0b64 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 14:50:44 -0800 Subject: [PATCH 1020/1088] Fix bugs (#1701) * Phi 4 * Update llama.py * Torch.Cuda Is Available Condition and Warning (#1545) * check for torch.cuda and triton if available on my machine(mac m3) the cuda were not available * Update pyproject.toml * Update __init__.py --------- Co-authored-by: Daniel Han * Update mistral.py * Update mistral.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Fix * Bug fixes * Update mapper.py * Add dropout to granite to match HF's implementation (#1557) Signed-off-by: datta0 * Update llama.py * Update llama.py * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han * Update mapper.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * dim fix * Update _utils.py * Torch 2.6 support * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Better TRL handling * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py --------- Signed-off-by: datta0 Co-authored-by: AminWhat <88392440+aminwhat@users.noreply.github.com> Co-authored-by: Datta Nimmaturi Co-authored-by: Zhe Zhang <2631992879@qq.com> --- pyproject.toml | 6 +- unsloth/models/_utils.py | 5 +- unsloth/models/llama.py | 55 ++- unsloth/models/loader_utils.py | 5 + unsloth/models/rl.py | 646 ++++++++++++++++++------------ unsloth/models/rl_replacements.py | 186 +++++++++ unsloth/tokenizer_utils.py | 90 ++--- 7 files changed, 670 insertions(+), 323 deletions(-) create mode 100644 unsloth/models/rl_replacements.py diff --git a/pyproject.toml b/pyproject.toml index d89ea2c4d2..5bdf3c4dc3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -187,9 +187,9 @@ cu124onlytorch260 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu126onlytorch260 = [ "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2ec4adaa11..656096b70c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.4" +__version__ = "2025.2.5" __all__ = [ "SUPPORTS_BFLOAT16", @@ -131,6 +131,7 @@ # Ignore logging messages class HideLoggingMessage(logging.Filter): + __slots__ = "text", def __init__(self, text): self.text = text def filter(self, x): return not (self.text in x.getMessage()) pass @@ -138,6 +139,8 @@ def filter(self, x): return not (self.text in x.getMessage()) # The speedups for torchdynamo mostly come wih GPU Ampere or higher and which is not detected here. from transformers.training_args import logger as transformers_training_args_logger transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups")) +# torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED. +transformers_training_args_logger.addFilter(HideLoggingMessage("torch.distributed")) del transformers_training_args_logger # Using the default loss: `ForCausalLMLoss`. diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a337472a3e..ec6706e515 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -15,6 +15,7 @@ import torch import gc import math +from functools import partial from typing import Optional, Tuple, List, Union from ._utils import * from ._utils import __version__ @@ -447,20 +448,28 @@ def LlamaAttention_fast_forward( A = flash_attn_func(Q, K, V, causal = True) else: # Grouped query attention - if n_groups != 1: - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) - K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) - V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) - pass - # Must be contiguous or else results are False! - # https://github.com/pytorch/pytorch/issues/112577 - Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() - # Needs (batch_size, n_heads, seq_len, head_dim) - # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) - # Go back to (batch_size, seq_len, n_heads, head_dim) - A = A.transpose(1, 2).contiguous() + if SDPA_HAS_GQA: + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False, enable_gqa = n_groups != 1) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2)#.contiguous() + else: + if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, kv_seq_len, head_dim) + K = K.reshape(bsz, n_heads, kv_seq_len, head_dim) + V = V.reshape(bsz, n_heads, kv_seq_len, head_dim) + pass + # Must be contiguous or else results are False! + # https://github.com/pytorch/pytorch/issues/112577 + Q, K, V = Q.contiguous(), K.contiguous(), V.contiguous() + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2).contiguous() + pass pass attn_output = A.reshape(bsz, q_len, n_heads*head_dim) attn_output = self.apply_o(self, attn_output) @@ -699,6 +708,7 @@ def LlamaModel_fast_forward( if attention_mask is None: padding_mask = None elif self.training: + # elif attention_mask is not None and self.training: attention_mask = None padding_mask = None else: @@ -714,6 +724,7 @@ def LlamaModel_fast_forward( past_key_values_length, sliding_window = getattr(self.config, "sliding_window", None), ) + attention_mask = attention_mask.to(torch.bool) pass hidden_states = inputs_embeds @@ -1802,8 +1813,6 @@ def from_pretrained( model = convert_vllm_to_huggingface(quant_state_dict, model_config, dtype) model.vllm_engine = llm model.fast_generate = model.vllm_engine.generate - - from functools import partial model.fast_generate_batches = partial(generate_batches, model.vllm_engine) pass # Return old flag @@ -1952,13 +1961,13 @@ def from_pretrained( Trainer._inner_training_loop = _fast_inner_training_loop # Save max_seq_length - model.max_seq_length = max_position_embeddings + model.max_seq_length = max_seq_length internal_model = model while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_position_embeddings + internal_model.max_seq_length = max_seq_length internal_model = internal_model.model pass - internal_model.max_seq_length = max_position_embeddings + internal_model.max_seq_length = max_seq_length # We check the tokenizer first for errors if fix_tokenizer: @@ -2146,8 +2155,6 @@ def get_peft_model( signature = str(inspect.signature(LoraConfig)) SUPPORTS_LOFTQ = "loftq_config" in signature SUPPORTS_RSLORA = "use_rslora" in signature - - assert(max_seq_length <= model.max_seq_length) if lora_dropout != 0: logger.warning_once( @@ -2632,6 +2639,10 @@ def patch_peft_model( gc.collect() torch.cuda.empty_cache() pass + + # Add for_inference and for_training + model.for_training = partial(FastLlamaModel.for_training, model) + model.for_inference = partial(FastLlamaModel.for_inference, model) return model pass @@ -2739,3 +2750,5 @@ def for_training(model, use_gradient_checkpointing = True): pass pass +from .rl import PatchFastRL +PatchFastRL(FastLanguageModel = FastLlamaModel) diff --git a/unsloth/models/loader_utils.py b/unsloth/models/loader_utils.py index b778b7e95b..e3eadd8c0f 100644 --- a/unsloth/models/loader_utils.py +++ b/unsloth/models/loader_utils.py @@ -58,6 +58,11 @@ def __get_model_name( elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER: + # Support returning original full -bnb-4bit name if specified specifically + # since we'll map it to the dynamic version instead + if lower_model_name.endswith("-bnb-4bit"): + return lower_model_name + new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name] # logger.warning_once( # f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\ diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 515c6587f7..466101d16c 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -16,30 +16,17 @@ "PatchFastRL", ] -METRICS_MOVE_TO_END = [ - "nll", - "aux", - "beta", - "alpha", -] import torch -try: - from transformers.utils.notebook import ( - IntervalStrategy, - NotebookTrainingTracker, - NotebookProgressCallback, - ) - HAS_NOTEBOOK = True -except: - HAS_NOTEBOOK = False -pass from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import inspect import os import re -import functools from unsloth_zoo.compiler import create_new_function - +from unsloth_zoo.logging_utils import PatchRLStatistics +from .rl_replacements import ( + RL_EXTRA_ARGS, + RL_FUNCTIONS, +) def PatchRL(FastLanguageModel): @@ -78,267 +65,441 @@ def generate_with_clone(*args, **kwargs): trainers = [x for x in trainers if x.endswith("_trainer")] unwrap = "unwrap_model_for_generation" for trainer in trainers: - if hasattr(eval(f"trl.trainer.{trainer}"), unwrap): - exec(f"trl.trainer.{trainer}.{unwrap} = unsloth_{unwrap}") + try: current_trainer = eval(f"trl.trainer.{trainer}") + except: continue + if hasattr(current_trainer, unwrap): + try: exec(f"trl.trainer.{trainer}.{unwrap} = unsloth_{unwrap}") + except: continue pass pass -def NotebookProgressCallback_on_train_begin(Trainer_metrics): - def _NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): - self.first_column = "Epoch" if args.eval_strategy == IntervalStrategy.EPOCH else "Step" - self.training_loss = 0 - self.last_log = 0 - column_names = [self.first_column] + ["Training Loss"] - if args.eval_strategy != IntervalStrategy.NO: - column_names.append("Validation Loss") - column_names += [x.replace("/", " / ") for x in Trainer_metrics] - self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) - pass - return _NotebookProgressCallback_on_train_begin +RLTrainer_replacement = ''' +import os +from typing import * +from dataclasses import dataclass, field +from packaging.version import Version +import torch +from contextlib import nullcontext + +@dataclass +class Unsloth{RLConfig_name}({RLConfig_name}): + """ + {__RLConfig_doc__} + """ + sampling_params: Optional[Any] = field( + default = None, + metadata = {{'help': 'vLLM SamplingParams'}}, + ) + def __init__({RLConfig_arguments}, + sampling_params = None, + **kwargs, + ): +{RLConfig_extra_args} + super().__init__({RLConfig_call_args}{RLConfig_kwargs}) pass - -def NotebookProgressCallback_on_log(Trainer_metrics): - def _NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwargs): - # Only for when there is no evaluation - if args.eval_strategy == IntervalStrategy.NO and "loss" in logs: - values = {"Training Loss": logs["loss"]} - for metric in Trainer_metrics: - # Sometimes metric is not inside logs - try: values[metric.replace("/", " / ")] = logs[metric] - except: pass - pass - # First column is necessarily Step since we're not in epoch eval strategy - values["Step"] = state.global_step - self.training_tracker.write_line(values) - pass - pass - return _NotebookProgressCallback_on_log +{RLTrainer_extras} + +class Unsloth{RLTrainer_name}(_Unsloth{RLTrainer_name}): + """ + {__RLTrainer_doc__} + """ + def __init__({RLTrainer_arguments}, + **kwargs + ): + if args is None: args = Unsloth{RLConfig_name}() +{RLTrainer_extra_args} + super().__init__({RLTrainer_call_args}{RLTrainer_kwargs}) +{RLTrainer_post} pass +''' +def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): + # Patch for vLLM and Unsloth PEFT + import trl + import trl.trainer + try: + trainer = eval(f"trl.trainer.{trainer_file}") + except Exception as error: + return + + # Get SFTTrainer and SFTConfig names + name = [x for x in dir(trainer) if x.endswith("Trainer") and x != "Trainer" and trainer_file.split("_")[0] in x.lower()] + config = [x for x in dir(trainer) if x.endswith("Config") and x != "Config" and trainer_file.split("_")[0] in x.lower()] + if len(name) != 1: return + if len(config) != 1: return + + # Get SFTTrainer, SFTConfig + RLTrainer_name = name[0] + RLConfig_name = config[0] + try: RLTrainer = eval(f"trl.trainer.{trainer_file}.{RLTrainer_name}") + except: return + try: RLConfig = eval(f"trl.trainer.{trainer_file}.{RLConfig_name}" ) + except: return -def NotebookTrainingTracker_write_line(Trainer_metrics): - set_Trainer_metrics = set(Trainer_metrics) - def _NotebookTrainingTracker_write_line(self, values): - """ - Write the values in the inner table. - - Args: - values (`Dict[str, float]`): The values to display. - """ - if self.inner_table is None: - self.inner_table = [list(values.keys()), list(values.values())] - else: - columns = self.inner_table[0] - new_values = {} - for key, value in values.items(): - lowered = key.lower() - if lowered in set_Trainer_metrics: - new_values[lowered.replace("/", " / ")] = value - else: - new_values[key] = value - pass - values = new_values - - self.inner_table[0] = columns - if len(self.inner_table) > 1: - last_values = self.inner_table[-1] - first_column = self.inner_table[0][0] - if last_values[0] != values[first_column]: - # write new line - self.inner_table.append([values[c] if c in values else "No Log" for c in columns]) - else: - # update last line - new_values = values - for c in columns: - if c not in new_values.keys(): - new_values[c] = last_values[columns.index(c)] - self.inner_table[-1] = [new_values[c] for c in columns] - else: - # Edit for evaluation purposes - self.inner_table.append([values[c] if c in values else 0 for c in columns]) - pass - pass - pass - return _NotebookTrainingTracker_write_line -pass + # Check name + if RLTrainer.__name__.startswith("Unsloth"): return + if RLConfig .__name__.startswith("Unsloth"): return + all_imports = dir(trainer) + imports = [x for x in all_imports if not x.startswith("_")] -def _PatchRLStatistics(metrics, algorithm): - if HAS_NOTEBOOK: - if len(metrics) == 0: - raise RuntimeError(f"Unsloth: RL statistics for {algorithm} failed with no metrics seen?") - from transformers.trainer import is_in_notebook - if is_in_notebook(): - # Patch DPO notebook printing - NotebookTrainingTracker.write_line = NotebookTrainingTracker_write_line(metrics) - from transformers.trainer import DEFAULT_PROGRESS_CALLBACK - DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin(metrics) - DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log(metrics) + # Get default arguments + EMPTY = inspect.Parameter.empty + processed = [] + for RLobject in [RLTrainer, RLConfig]: + parameters = inspect.signature(RLobject.__init__).parameters + types = (bool, type(None), int, float, str,) + arguments = ["self"] + call_args = [] + for k, v in parameters.items(): + if k == "self": continue + v = v.default + if v == "\n": v = re.escape("\n") + if v is EMPTY: arguments.append(k) + elif type(v) is str: arguments.append(f"{k} = '{v}'") + elif type(v) in types: arguments.append(f"{k} = {v}") + else: continue + call_args.append(f"{k} = {k}") pass + arguments = f"\n{' '*8}" + f",\n{' '*8}".join(arguments) + call_args = f"\n{' '*12}" + f",\n{' '*12}".join(call_args) + processed.append((arguments, call_args,)) pass -pass + # Process RLTrainer first + arguments, call_args = processed[0] + RLTrainer_post = "" -@functools.cache -def get_trl_metrics(): - # Gets metrics so we can output them in notebooks + # Add tokenizer if not seen + if "tokenizer" not in parameters and "processing_class" in parameters: + arguments += f",\n{' '*8}tokenizer = None" + call_args = call_args.replace( + "processing_class = processing_class", + "processing_class = tokenizer if tokenizer is not None else processing_class", + ) + pass - import trl.trainer - trainers = dir(trl.trainer) - trainers = [x for x in trainers if x.endswith("_trainer")] - filepath = inspect.getfile(trl.trainer) - filepath = os.path.split(filepath)[0] + # Edit bf16, fp16 by checking model's torch_dtype directly + extra_args = "" + if "args" in call_args and "model" in call_args: + mixed_precision = \ + "use_bf16 = getattr(args, 'bf16', False)\n"\ + "use_fp16 = getattr(args, 'fp16', False)\n"\ + "dtype = getattr(model.config, 'torch_dtype', None)\n"\ + "if dtype is None: dtype = model.get_input_embeddings().dtype\n"\ + "from unsloth_zoo.utils import _get_dtype\n"\ + "dtype = _get_dtype(dtype)\n"\ + "float16 = dtype == torch.float16\n"\ + "if float16 and use_bf16: raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')\n"\ + "if not float16 and use_fp16: raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')\n"\ + "if not use_bf16 and not use_fp16:\n"\ + " args.fp16 = float16\n"\ + " args.bf16 = not float16\n"\ + " os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'\n" + extra_args += mixed_precision + pass - all_metrics = dict() - for trainer in trainers: - filename = os.path.join(filepath, f"{trainer}.py") - if not os.path.exists(filename): continue - with open(filename, "r") as file: file = file.read() - - # Get metrics['kl'] or stats['kl'] - metrics = re.findall(r"metrics\[[\"\']([^\"\']{1,})[\"\']\]", file) - stats = re.findall(r"stats\[[\"\']([^\"\']{1,})[\"\']\]", file) - metrics = metrics + stats - - # Get optional f-strings - metrics_f = re.findall(r"metrics\[f[\"\']\{[^\}]{1,}\}([^\"\']{1,})[\"\']\]", file) - stats_f = re.findall(r"stats\[f[\"\']\{[^\}]{1,}\}([^\"\']{1,})[\"\']\]", file) - metrics_f = metrics_f + stats_f - # Filter out prefixes if seen - # metrics[f"{prefix}rewards/chosen"] - left_prefix = 'prefix = "eval_" if train_eval == "eval" else ""' in file - if left_prefix: metrics += metrics_f - - # Move all eval_ things to the end and reward to the front - beginning = [] - middle = [] - end = [] - for x in metrics: - lowered = x.lower() - if "reward" in lowered: - beginning.append(x) - elif x.lower().startswith("eval"): - end.append(x) - else: - # Check if we want to move to the end - moved = False - for move_end in METRICS_MOVE_TO_END: - if move_end in lowered: - end.append(x) - moved = True - break - if not moved: - middle.append(x) - pass + # Check if per_device_eval_batch_size (default 8) bigger than bsz + # Also use FP16 / BF16 evaluation + if "args" in call_args: + # Check eval_dataset first + if "eval_dataset" in call_args: + check_eval_dataset = \ + "if getattr(args, 'eval_dataset', None) is not None and "\ + "getattr(args, 'eval_strategy', 'no') == 'no':\n"\ + " args.eval_strategy = 'steps'\n"\ + " if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1\n" + extra_args += check_eval_dataset pass - metrics = beginning + middle + end - all_metrics[trainer[:trainer.find("_")].upper()] = metrics + # Check if gradient accumulation bug fix is applied + check_ga = \ + "ga_steps = getattr(args, 'gradient_accumulation_steps', None)\n"\ + "if ga_steps is not None and ga_steps > 1:\n"\ + " from transformers import __version__ as transformers_version\n"\ + " if Version(transformers_version) <= Version('4.45.2'):\n"\ + " print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\n"\ + " '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')\n" + extra_args += check_ga + + eval_changes = \ + "if getattr(args, 'eval_strategy', 'no') != 'no':\n"\ + " eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)\n"\ + " if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size\n"\ + " if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps\n"\ + "fp16_full_eval = getattr(args, 'fp16_full_eval', False)\n"\ + "bf16_full_eval = getattr(args, 'bf16_full_eval', False)\n"\ + "if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True\n"\ + "if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False\n"\ + "if not bf16_full_eval and not fp16_full_eval: args.bf16_full_eval = args.bf16; args.fp16_full_eval = args.fp16\n" + extra_args += eval_changes pass - return all_metrics -pass + # Check max_seq_length + if "model" in call_args: + length_check = \ + "if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):\n"\ + " pass\n"\ + "else:\n"\ + " model_max_seq_length = getattr(model, 'max_seq_length', None)\n"\ + " args_max_seq_length = getattr(args, 'max_seq_length', None)\n"\ + " if args_max_seq_length is None and model_max_seq_length is not None:\n"\ + " max_seq_length = model.max_seq_length\n"\ + " if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length\n" + " elif args_max_seq_length is not None and model_max_seq_length is not None:\n"\ + " if args_max_seq_length > model_max_seq_length:\n"\ + " print('Unsloth: You set `max_seq_length` as ' + str(args_max_seq_length) + ' but \n"\ + " the maximum the model supports is ' + str(model_max_seq_length) + '. We shall reduce it.')\n"\ + " args.max_seq_length = model_max_seq_length\n" + extra_args += length_check + pass -def PatchRLStatistics(algorithm = "GRPO"): - # Get notebook statistics columns to show up - algorithm = algorithm.upper() - all_metrics = get_trl_metrics() - if algorithm not in all_metrics: - print( - f"Unsloth for {algorithm.upper()} is not yet implemented! Just ignore this function.\n"\ - f"We support: `{list(all_metrics.keys())}`" - ) + # Enable for training and move padding side of tokenizer to right + if "model" in call_args: + training_check = \ + "if model is not None and hasattr(model, 'for_training'):\n"\ + " model.for_training()\n"\ + "if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'\n"\ + "if 'processing_class' in locals():\n"\ + " if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'\n"\ + " if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): "\ + "processing_class.tokenizer.padding_side = 'right'\n" + extra_args += training_check pass - _PatchRLStatistics(all_metrics[algorithm], algorithm) -pass + # Check NEFTune + if "model" in call_args: + neftune_check = \ + "if hasattr(self, 'neftune_hook_handle'):\n"\ + " self.neftune_hook_handle.remove()\n"\ + " if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"\ + "if getattr(args, 'neftune_noise_alpha', None) is not None:\n"\ + " model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"\ + "pass\n" + RLTrainer_post += neftune_check + pass -def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): - # Patch for vLLM and Unsloth PEFT - import trl - import trl.trainer + # Add statistics as well! + extra_args += \ + "from unsloth_zoo.logging_utils import PatchRLStatistics\n"\ + f"PatchRLStatistics('{trainer_file}')\n" - trainer = eval(f"trl.trainer.{trainer_file}") - name = [x for x in dir(trainer) if x.endswith("Trainer") and x != "Trainer" and trainer_file.split("_")[0] in x.lower()] - assert(len(name) == 1) - RLTrainer_name = name[0] - RLTrainer = eval(f"trl.trainer.{trainer_file}.{RLTrainer_name}") + # Patch optional args + if trainer_file in RL_EXTRA_ARGS: + process_extra_args = RL_EXTRA_ARGS[trainer_file] + for process_extra_arg in process_extra_args: + extra_args += process_extra_arg(call_args, extra_args) + pass - try: - __init__ = inspect.getsource(RLTrainer.__init__) - except: - # Already patched most likely! - return - old__init__ = __init__ - all_imports = dir(trainer) - assert("Union" in all_imports) - imports = [x for x in all_imports if not x.startswith("_")] - imports += ["Trainer"] + # Create RLTrainer args + extra_args = extra_args.split("\n") + extra_args = "\n".join(" "*8 + x for x in extra_args) + RLTrainer_post = RLTrainer_post.split("\n") + RLTrainer_post = "\n".join(" "*8 + x for x in RLTrainer_post) + RLTrainer_arguments = arguments + RLTrainer_extra_args = extra_args + RLTrainer_call_args = call_args + + # Fix RLConfig next + arguments, call_args = processed[1] + extra_args = "" + + # Edit GA / bsz and weight_decay + replacements = { + "output_dir" : None, + "logging_nan_inf_filter" : False, + "per_device_train_batch_size" : 4, + "gradient_accumulation_steps" : 2, + "weight_decay" : 0.01, + "warmup_ratio" : 0.1, + "seed" : 3407, + "optim" : "adamw_8bit", + "learning_rate" : 5e-05, + "per_device_eval_batch_size" : 4, + "eval_accumulation_steps" : 2, + "torch_empty_cache_steps" : 250, + "logging_steps" : 1, + } + for k, v in replacements.items(): + x = f"{k}( = [^,\n]{{1,}})?,\n" + y = f"'{v}'" if type(v) is str else f"{v}" + y = f"{k} = {y},\n" + arguments = re.sub(x, y, arguments) + pass - spaces = __init__.find("def") - __init__ = __init__.split("\n") - __init__ = "\n".join(x[spaces:] for x in __init__) + # Warn on too large or too small learning rate + if " learning_rate" in call_args: + learning_rate_check = \ + "if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! "\ + "Consider increasing it, otherwise gradient updates will be close to 0!')\n"\ + "if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! "\ + "Consider decreasing it to 1e-1, otherwise gradient updates will explode!')\n" + extra_args += learning_rate_check + pass - # Replace vLLM sections since we already have it done! - vllm_part = re.findall( - r"(\n[\s]{4}"\ - r"if (self|args)\.use_vllm\:.+?"\ - r"\n[\s]{4,}"\ - "else:\n)", - __init__, - flags = re.MULTILINE | re.DOTALL, - ) - if (len(vllm_part) != 1): return + # Add output_dir saving + if "output_dir" in call_args: + # Default checks + saving_check = \ + "if output_dir is None and save_strategy == 'steps' and save_steps == 500:\n"\ + " output_dir = 'unsloth_training_checkpoints'\n"\ + " save_strategy = 'no'\n" + extra_args += saving_check + pass - vllm_part, args = vllm_part[0][0], vllm_part[0][1] - # Strip all comments - new_vllm_part = re.sub(r"\#[^\n]{1,}\n", "", vllm_part) + # Edit dataset_num_proc + if "dataset_num_proc" in call_args: + num_proc_check = \ + "if dataset_num_proc is None:\n"\ + " from multiprocessing import cpu_count\n"\ + " dataset_num_proc = cpu_count()\n" + extra_args += num_proc_check + pass - # Get SamplingParams - sampling_params = re.findall( - r"\n[\s]{4,}(self\.[^\s]{1,}[\s]{0,}\=[\s]{0,}"\ - r"SamplingParams\(.+?\))", - new_vllm_part, - flags = re.MULTILINE | re.DOTALL, + # Edit report_to and default it to nothing if max_steps is like 60 + + # Create RLConfig args + extra_args = extra_args.split("\n") + extra_args = "\n".join(" "*8 + x for x in extra_args) + RLConfig_arguments = arguments + RLConfig_extra_args = extra_args + RLConfig_call_args = call_args + + # Patch vLLM and other functions + RLTrainer_extras = patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, imports) + if RLTrainer_extras is None: + RLTrainer_extras = f"_Unsloth{RLTrainer_name} = {RLTrainer_name}" + + # Create full module + exec(f"from trl.trainer import ({RLTrainer_name}, {RLConfig_name},)") + __RLTrainer_doc__ = eval(f"trl.trainer.{RLTrainer_name}").__doc__ + __RLConfig_doc__ = eval(f"trl.trainer.{RLConfig_name}") .__doc__ + + RLTrainer_source = RLTrainer_replacement.format( + RLTrainer_name = RLTrainer_name, + __RLTrainer_doc__ = __RLTrainer_doc__, + RLTrainer_arguments = RLTrainer_arguments, + RLTrainer_extra_args = RLTrainer_extra_args, + RLTrainer_call_args = RLTrainer_call_args, + RLTrainer_kwargs = ",**kwargs"[1 if RLTrainer_call_args.endswith(",") else 0:], + + RLConfig_name = RLConfig_name, + __RLConfig_doc__ = __RLConfig_doc__, + RLConfig_arguments = RLConfig_arguments, + RLConfig_extra_args = RLConfig_extra_args, + RLConfig_call_args = RLConfig_call_args, + RLConfig_kwargs = ",**kwargs"[1 if RLConfig_call_args .endswith(",") else 0:], + + RLTrainer_extras = RLTrainer_extras, + RLTrainer_post = RLTrainer_post, + ) + + # Create new function + created_module = create_new_function( + f"Unsloth{RLTrainer_name}", + RLTrainer_source, + f"trl.trainer.{trainer_file}", + imports, + overwrite = False, ) - if len(sampling_params) != 1: return + + # Patch Trainer + exec(f"trl.{RLTrainer_name} = created_module.Unsloth{RLTrainer_name}", locals(), globals()) + exec(f"trl.trainer.{RLTrainer_name} = created_module.Unsloth{RLTrainer_name}", locals(), globals()) + exec(f"trl.trainer.{trainer_file}.{RLTrainer_name} = created_module.Unsloth{RLTrainer_name}", locals(), globals()) + + # Patch Config + exec(f"trl.{RLConfig_name} = created_module.Unsloth{RLConfig_name}", locals(), globals()) + exec(f"trl.trainer.{RLConfig_name} = created_module.Unsloth{RLConfig_name}", locals(), globals()) + exec(f"trl.trainer.{trainer_file}.{RLConfig_name} = created_module.Unsloth{RLConfig_name}", locals(), globals()) +pass + - sampling_params = sampling_params[0] - # Replace with our vLLM engine - sampling_params = \ - " "*8 + "self.llm = model.vllm_engine; self._last_loaded_step = 0; " + \ - sampling_params # Add spaces - new_vllm_part = f"\n if {args}.use_vllm:\n{sampling_params}\n else:\n" - __init__ = __init__.replace(vllm_part, new_vllm_part) +def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, imports): + init = inspect.getsource(RLTrainer.__init__) + old_init = init # Remove peft_config - __init__ = __init__.replace("elif peft_config is None:", "elif False:") - __init__ = __init__.replace("elif peft_config is not None:", "elif False:") - __init__ = __init__.replace("if peft_config is None:", "if False:") - __init__ = __init__.replace("if peft_config is not None:", "if False:") - __init__ = __init__.replace("get_peft_model(model, peft_config)", "model") + init = init.replace("elif peft_config is None:", "elif False:") + init = init.replace("elif peft_config is not None:", "elif False:") + init = init.replace("if peft_config is None:", "if False:") + init = init.replace("if peft_config is not None:", "if False:") + init = init.replace("get_peft_model(model, peft_config)", "model") + + # Set use_vllm if not set + if "args.use_vllm" in init and "model" in init and "args" in init: + # .*? matches first match. .+? matches final match. + replacer = re.findall( + "def __init__\(.*?\).*?\:\n", + init, + flags = re.MULTILINE | re.DOTALL, + ) + if len(replacer) != 0: + replacer = replacer[0] + vllm_setter = "\n" + " "*8 + \ + "if hasattr(model, 'vllm_engine') and "\ + "getattr(args, 'use_vllm') and getattr(args, 'use_vllm', False): "\ + "args.use_vllm = True\n" + init = init.replace(replacer, replacer + vllm_setter) + pass + pass - # Add spaces back into __init__ - __init__ = __init__.split("\n") - __init__ = "\n".join(' '*spaces + x for x in __init__) + vllm_part = re.findall( + r"(\n[\s]{8}"\ + r"if (self|args)\.use_vllm\:.*?"\ + r"\n[\s]{8}"\ + "else:\n)", + init, + flags = re.MULTILINE | re.DOTALL, + ) + if len(vllm_part) == 1: + vllm_part, args = vllm_part[0][0], vllm_part[0][1] + # Strip all comments + new_vllm_part = re.sub(r"\#[^\n]{1,}\n", "", vllm_part) + + # Get SamplingParams + sampling_params = re.findall( + r"\n[\s]{4,}(self\.[^\s]{1,}[\s]{0,}\=[\s]{0,}"\ + r"SamplingParams\(.+?\))", + new_vllm_part, + flags = re.MULTILINE | re.DOTALL, + ) + if len(sampling_params) == 1: + sampling_params = sampling_params[0] + # Replace with our vLLM engine + sampling_params = \ + " "*12 + "self.llm = model.vllm_engine; self._last_loaded_step = 0; " + \ + sampling_params # Add spaces + new_vllm_part = \ + f"\n{' '*8}if {args}.use_vllm:\n{sampling_params} "\ + f"if getattr(args, 'sampling_params', None) is None else "\ + f"getattr(args, 'sampling_params', None)\n{' '*8}else:\n" + init = init.replace(vllm_part, new_vllm_part) + pass + pass # Search for vLLM calling in all child functions functions = dir(RLTrainer) RLTrainer_source = inspect.getsource(RLTrainer) functions = [x for x in functions if f"def {x}" in RLTrainer_source] - changed = {"__init__" : (old__init__, __init__,)} + changed = {"__init__" : (old_init, init,)} + edit_functions = RL_FUNCTIONS.get(trainer_file, []) + for function in functions: if not hasattr(RLTrainer, function): continue fx = getattr(RLTrainer, function) - try: - source = inspect.getsource(fx) - except: - continue + try: source = inspect.getsource(fx) + except: continue original_source = source + # Check for function + for edit_function in edit_functions: + source = edit_function(function, source) + pass + # llm_model = self.llm.llm_engine.model_executor.driver_worker.model_runner.model source = re.sub( r"(\n[\s]{4,}).+?model_executor\.driver_worker.+?\n", @@ -386,22 +547,9 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RLTrainer_source = RLTrainer_source.replace(old, new) pass RLTrainer_source = RLTrainer_source.replace( - f"class {RLTrainer_name}", f"class Unsloth{RLTrainer_name}", 1 - ) - - # Create new class in compiled cache and import it - module = create_new_function( - RLTrainer_name, - RLTrainer_source, - f"trl.trainer.{trainer_file}", - imports, + f"class {RLTrainer_name}", f"class _Unsloth{RLTrainer_name}", 1 ) - - # Patch over modules - exec(f"trl.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) - exec(f"trl.trainer.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) - exec(f"trl.trainer.{trainer_file}.{RLTrainer_name} = module.Unsloth{RLTrainer_name}", locals(), globals()) - return module + return RLTrainer_source pass @@ -416,8 +564,8 @@ def patch_trl_rl_trainers(): pass -def PatchFastRL(algorithm = "GRPO", FastLanguageModel = None): +def PatchFastRL(algorithm = None, FastLanguageModel = None): if FastLanguageModel is not None: PatchRL(FastLanguageModel) patch_trl_rl_trainers() - PatchRLStatistics(algorithm) + if algorithm is not None: PatchRLStatistics(algorithm) pass diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py new file mode 100644 index 0000000000..4d7a4dbe09 --- /dev/null +++ b/unsloth/models/rl_replacements.py @@ -0,0 +1,186 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "RL_EXTRA_ARGS", + "RL_FUNCTIONS", +] + +import re +import inspect +from collections import defaultdict +RL_EXTRA_ARGS = defaultdict(list) +RL_FUNCTIONS = defaultdict(list) + + +# Check untrained tokens +def sft_trainer_fix_untraiend_tokens(call_args, extra_args): + if "model" in call_args and "train_dataset" in call_args: + fix_tokenizer = \ + "IGNORED_TOKENIZER_NAMES = os.environ.get('UNSLOTH_IGNORED_TOKENIZER_NAMES', '').split('\\n')\n"\ + "from unsloth_zoo.tokenizer_utils import fix_untrained_tokens\n"\ + "from unsloth_zoo.training_utils import fix_zero_training_loss\n"\ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "fix_untrained_tokens(model, tokenizer, train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)\n"\ + "fix_zero_training_loss(model, tokenizer, train_dataset)\n" + return fix_tokenizer + return "" +pass +RL_EXTRA_ARGS["sft_trainer"].append(sft_trainer_fix_untraiend_tokens) + + +# Remove DPO columns which might randomnly be tokenized +def dpo_trainer_fix_columns(call_args, extra_args): + if "model" in call_args and "train_dataset" in call_args: + fix_dpo = \ + "if hasattr(train_dataset, 'column_names'):\n"\ + " column_names = set(train_dataset.column_names)\n"\ + " check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"\ + " 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"\ + " 'prompt_input_ids', 'prompt_attention_mask']\n"\ + " if all(x in column_names for x in check):\n"\ + " train_dataset = train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"\ + " del check, column_names\n" + return fix_dpo + return "" +pass +RL_EXTRA_ARGS["dpo_trainer"].append(dpo_trainer_fix_columns) + + +# Fix tokenizer double BOS +def sft_trainer_prepare_dataset(function_name, function): + if function_name != "_prepare_non_packed_dataloader" and \ + function_name != "_prepare_dataset": return function + + check_text = \ + "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ + "if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"\ + "if 'dataset_text_field' not in locals() and 'args' in locals(): dataset_text_field = args.dataset_text_field\n"\ + "if 'dataset_text_field' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `dataset_text_field` does not exist!')\n"\ + "test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"\ + "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ + "chat_template = '' if chat_template is None else chat_template\n"\ + "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ + "if getattr(tokenizer, 'bos_token', None) is not None else False\n"\ + "if 'add_special_tokens' not in locals() and has_bos_token_already:\n"\ + " from functools import partial\n"\ + " tokenizer = partial(tokenizer, add_special_tokens = False)\n"\ + " processing_class = tokenizer\n"\ + "else:\n"\ + " add_special_tokens = False if has_bos_token_already else add_special_tokens\n" + + check_text = check_text.split("\n") + check_text = "\n".join(" "*8 + x for x in check_text) + check_text = check_text.rstrip() + "\n" + + # .*? matches first match. .+? matches final match. + replacer = re.findall( + r"def {function_name}\(.*?\).*?\:\n", + function, + flags = re.MULTILINE | re.DOTALL, + ) + if len(replacer) != 0: + replacer = replacer[0] + function = function.replace(replacer, replacer + check_text) + pass + return function +pass +RL_FUNCTIONS["sft_trainer"].append(sft_trainer_prepare_dataset) + + +# Ignore mean_token_accuracy since it needs logits +# We override it directly with our version +def _sft_trainer_compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None): + (loss, outputs) = super().compute_loss( + model, + inputs, + return_outputs = return_outputs, + num_items_in_batch = num_items_in_batch, + ) + return (loss, outputs) if return_outputs else loss +pass + +def sft_trainer_compute_loss(function_name, function): + if function_name != "compute_loss": return function + + function = inspect.getsource(_sft_trainer_compute_loss) + function = function.replace("def _sft_trainer_compute_loss", "def compute_loss") + function = function.split("\n") + function = "\n".join(" "*4+x for x in function) + return function +pass +RL_FUNCTIONS["sft_trainer"].append(sft_trainer_compute_loss) + + +# Autocast precision for GRPO +def grpo_trainer__prepare_inputs(function_name, function): + if function_name != "_prepare_inputs": return function + + if "with torch.inference_mode()" not in function: return function + + # Add mixed precision training + function = function.replace( + "with torch.inference_mode():", + + "with torch.inference_mode(), "\ + "torch.amp.autocast(device_type = 'cuda', "\ + "dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16) "\ + "if not torch.is_autocast_enabled('cuda') else nullcontext():", + ) + + # Disable attaching a float32 conversion hook which upcasts logits to FP32 + function = function.replace( + "self.accelerator.unwrap_model(self.model)", + "self.accelerator.unwrap_model(self.model, keep_fp32_wrapper = False)", + ) + return function +pass +RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__prepare_inputs) + + +# Remove _move_model_to_vllm +def grpo_trainer__move_model_to_vllm(function_name, function): + if function_name != "_move_model_to_vllm": return function + + # .*? matches first match. .+? matches final match. + replacement = "def _move_model_to_vllm(self, *args, **kwargs): return None\n" + return " "*function.find("def") + replacement +pass +RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__move_model_to_vllm) + + +# Edit _get_per_token_logps to handle mixed precision +def grpo_trainer__get_per_token_logps(function_name, function): + if function_name != "_get_per_token_logps": return function + + # Edit model to autocast it + # .*? matches first match. .+? matches final match. + original = re.findall( + r"\n([ ]{4,})(logits = model\(.*?\))", + function, + flags = re.MULTILINE | re.DOTALL, + ) + if len(original) != 0: + spaces, original = original[0] + spaces = len(spaces) + replacer = \ + "if not hasattr(self, '_autocast_dtype'):\n" + \ + " "*(spaces + 4) + "self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16\n" + \ + " "*(spaces + 0) + "with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype):\n" + \ + " "*(spaces + 4) + original + function = function.replace(original, replacer) + pass + return function +pass +RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index f2b0da8600..404fce319f 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -59,6 +59,7 @@ [x.lower() for x in IGNORED_TOKENIZER_NAMES] + \ [x.lower()+"-bnb-4bit" for x in IGNORED_TOKENIZER_NAMES] ) +os.environ["UNSLOTH_IGNORED_TOKENIZER_NAMES"] = "\n".join(IGNORED_TOKENIZER_NAMES) # Check environments keynames = "\n" + "\n".join(os.environ.keys()) @@ -907,44 +908,25 @@ def neftune_post_forward_hook(module, input, output): pass -def patch_trl_tokenizer_processing_class(trainer_name): - # New TRL removes tokenizer! - # We return it back! - exec(f"from trl import {trainer_name}", globals()) - if str(eval(f"{trainer_name}").__name__).startswith("Unsloth"): return None - parameters = eval(f"inspect.signature({trainer_name}).parameters") - if "tokenizer" in parameters: return None - - args = { - key : \ - value.default \ - if type(value.default) is not str else \ - f"'{value.default}'" \ - for key, value in parameters.items() - } - args["tokenizer"] = None - new_args = args.copy() - del new_args["tokenizer"] - del new_args["processing_class"] - new_args = ",\n".join(f"{' '*12}{key} = {key}" for key in new_args) + \ - f",\n{' '*12}processing_class = tokenizer if tokenizer else processing_class" - args = ",\n".join(f"{' '*8}{key} = {value}" for key, value in args.items()) - args = f"def __init__(\n" + f"{' '*8}self,\n" + args + "):" - args += f"\n{' '*8}\n{' '*8}super().__init__(\n{new_args}\n{' '*8})" - new_class = f"""class Unsloth{trainer_name}({trainer_name}):\n{' '*4}{args}\n""" - return new_class -pass - - def patch_sft_trainer_tokenizer(): """ Patches the trainer with changes """ - for function_name, replacer in ( - ("_prepare_non_packed_dataloader", "def tokenize(element):",), + try: + sft_trainer = eval(f"trl.trainer.sft_trainer.SFTTrainer") + except: + return + all_imports = dir(trl.trainer.sft_trainer) + + for (function_name, replacer,) in ( + # ("_prepare_non_packed_dataloader", "def tokenize(element):",), + ("_prepare_non_packed_dataloader", None,), + ("_prepare_dataset", None,), # ("_prepare_packed_dataloader", "if dataset_text_field is not None",), ): - function = getsource(eval(f"trl.trainer.sft_trainer.SFTTrainer.{function_name}")) + if not hasattr(sft_trainer, function_name): continue + + function = getsource(eval(f"sft_trainer.{function_name}")) where = function.find("def") function = function.split("\n") function = "\n".join(x[where:] for x in function) @@ -953,20 +935,41 @@ def patch_sft_trainer_tokenizer(): "\n"\ "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ "if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"\ + "if 'dataset_text_field' not in locals() and 'args' in locals(): dataset_text_field = args.dataset_text_field\n"\ "if 'dataset_text_field' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `dataset_text_field` does not exist!')\n"\ "test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"\ "chat_template = getattr(tokenizer, 'chat_template', None)\n"\ "chat_template = '' if chat_template is None else chat_template\n"\ "has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "\ "if getattr(tokenizer, 'bos_token', None) is not None else False\n"\ - "add_special_tokens = False if has_bos_token_already else add_special_tokens\n\n" + "if 'add_special_tokens' not in locals() and has_bos_token_already:\n"\ + " from functools import partial\n"\ + " tokenizer = partial(tokenizer, add_special_tokens = False)\n"\ + " processing_class = tokenizer\n"\ + "else:\n"\ + " add_special_tokens = False if has_bos_token_already else add_special_tokens\n\n" check_text = check_text.split("\n") check_text = "\n".join(" "*where + x for x in check_text) + check_text = check_text.rstrip() + "\n" + + if replacer is None: + # .*? matches first match. .+? matches final match. + replacer = re.findall( + f"def {function_name}\(.*?\).*?\:\n", + function, + flags = re.MULTILINE | re.DOTALL, + ) + if len(replacer) == 0: continue + replacer = replacer[0] + function = function.replace(replacer, replacer + check_text) + else: + function = function.replace(replacer, check_text + replacer) + pass - function = function.replace(replacer, check_text + replacer) - exec(function, globals()) - + x = [x for x in all_imports if x in function] + exec(f"from trl.trainer.sft_trainer import ({','.join(x)})", locals()) + exec(function, locals(), globals()) exec(f"trl.trainer.sft_trainer.SFTTrainer.{function_name} = {function_name}", globals()) pass @@ -1053,16 +1056,5 @@ def patch_sft_trainer_tokenizer(): pass pass -# Fix TRL trainers with removed tokenizer args (got replaced with processing_class) -for trainer_name in ("SFTTrainer", "DPOTrainer", "KTOTrainer"): - trainer_text = patch_trl_tokenizer_processing_class(trainer_name) - if trainer_text is None: continue - try: - exec(trainer_text, globals()) - except: - raise RuntimeError(f"Unsloth: Please file a bug report! Error patching {trainer_name}") - exec(f"trl.trainer.{trainer_name} = Unsloth{trainer_name}", globals()) -pass - -# FInally patch TRL tokenizer things -patch_sft_trainer_tokenizer() +# Finally patch TRL tokenizer things -> moved to RL +# patch_sft_trainer_tokenizer() From 6fdb54b0abf46ccfd4606a3668439a228d332849 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 14:54:49 -0800 Subject: [PATCH 1021/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 656096b70c..276d1d0db9 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.5" +__version__ = "2025.2.6" __all__ = [ "SUPPORTS_BFLOAT16", From 57029ab7757d95103a2fec14b86b7246b078787b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 14:55:14 -0800 Subject: [PATCH 1022/1088] Update __init__.py --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index bdde33c507..f0600f3328 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -196,7 +196,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.2.2"): + if Version(unsloth_zoo_version) < Version("2025.2.4"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: From 3622d537e70a8123bb5de8dc0fc7e313b9e412b6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 14:59:42 -0800 Subject: [PATCH 1023/1088] Update dpo.py --- unsloth/models/dpo.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 9c12abb98f..7fc02fcf7f 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -17,8 +17,6 @@ "PatchKTOTrainer", ] -from .rl import PatchFastRL +def PatchDPOTrainer(): return -def PatchDPOTrainer(): PatchFastRL("DPO") - -def PatchKTOTrainer(): PatchFastRL("KTO") +def PatchKTOTrainer(): return From fd3bfa99453bdf7384f2aed3324343f08130d3f3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 14:59:52 -0800 Subject: [PATCH 1024/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 276d1d0db9..d668460eaa 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.6" +__version__ = "2025.2.7" __all__ = [ "SUPPORTS_BFLOAT16", From 7f0511b0cd7eda80e01c67c81dfa134fa41ff6d3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 15:02:50 -0800 Subject: [PATCH 1025/1088] Update rl.py --- unsloth/models/rl.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 466101d16c..fc094b0839 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -565,7 +565,8 @@ def patch_trl_rl_trainers(): def PatchFastRL(algorithm = None, FastLanguageModel = None): - if FastLanguageModel is not None: PatchRL(FastLanguageModel) - patch_trl_rl_trainers() - if algorithm is not None: PatchRLStatistics(algorithm) + return + # if FastLanguageModel is not None: PatchRL(FastLanguageModel) + # patch_trl_rl_trainers() + # if algorithm is not None: PatchRLStatistics(algorithm) pass From 5b1bb7782a4321f74a62c87c624e6410575517f0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 15:13:55 -0800 Subject: [PATCH 1026/1088] Update pyproject.toml --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5bdf3c4dc3..2a6e31dcae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<0.15.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -362,7 +362,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<0.15.0", "peft>=0.7.1", "xformers", "bitsandbytes>=0.46.1", From a41cdffa559dd6cf673db0e4d3802b2693cbeccc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 15:14:17 -0800 Subject: [PATCH 1027/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d668460eaa..f5d00eab22 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.7" +__version__ = "2025.2.8" __all__ = [ "SUPPORTS_BFLOAT16", From 179840d3a7b49188c372b56c67c4290d53c29ed6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Feb 2025 19:12:19 -0800 Subject: [PATCH 1028/1088] Fix bugs (#1706) * Bug fixes * fix: flash_attn_detection_error (#1556) * fix: flash_attn_detection_error * Update _utils.py --------- Co-authored-by: Daniel Han * Update mapper.py * Update gemma.py * Update gemma.py * Update gemma.py * Update gemma.py * dim fix * Update _utils.py * Torch 2.6 support * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Better TRL handling * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --------- Co-authored-by: Zhe Zhang <2631992879@qq.com> --- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 5 +++-- unsloth/models/rl.py | 8 ++++---- unsloth/models/rl_replacements.py | 25 +++++++++++-------------- 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index f5d00eab22..8d0eadb968 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.8" +__version__ = "2025.2.9" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ec6706e515..1eae97ff1c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -708,7 +708,7 @@ def LlamaModel_fast_forward( if attention_mask is None: padding_mask = None elif self.training: - # elif attention_mask is not None and self.training: + # elif attention_mask is None: attention_mask = None padding_mask = None else: @@ -724,7 +724,8 @@ def LlamaModel_fast_forward( past_key_values_length, sliding_window = getattr(self.config, "sliding_window", None), ) - attention_mask = attention_mask.to(torch.bool) + if attention_mask is not None: + attention_mask = attention_mask.to(torch.bool) pass hidden_states = inputs_embeds diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index fc094b0839..3d601b0af1 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -565,8 +565,8 @@ def patch_trl_rl_trainers(): def PatchFastRL(algorithm = None, FastLanguageModel = None): - return - # if FastLanguageModel is not None: PatchRL(FastLanguageModel) - # patch_trl_rl_trainers() - # if algorithm is not None: PatchRLStatistics(algorithm) + if FastLanguageModel is not None: PatchRL(FastLanguageModel) + patch_trl_rl_trainers() + if type(algorithm) is str and algorithm.islower(): + PatchRLStatistics(algorithm) pass diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 4d7a4dbe09..82fd3f8d3c 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -101,23 +101,20 @@ def sft_trainer_prepare_dataset(function_name, function): # Ignore mean_token_accuracy since it needs logits # We override it directly with our version -def _sft_trainer_compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None): - (loss, outputs) = super().compute_loss( - model, - inputs, - return_outputs = return_outputs, - num_items_in_batch = num_items_in_batch, - ) - return (loss, outputs) if return_outputs else loss -pass - def sft_trainer_compute_loss(function_name, function): if function_name != "compute_loss": return function - function = inspect.getsource(_sft_trainer_compute_loss) - function = function.replace("def _sft_trainer_compute_loss", "def compute_loss") - function = function.split("\n") - function = "\n".join(" "*4+x for x in function) + def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None): + outputs = super().compute_loss( + model, + inputs, + return_outputs = return_outputs, + num_items_in_batch = num_items_in_batch, + ) + return outputs + pass + + function = inspect.getsource(compute_loss) return function pass RL_FUNCTIONS["sft_trainer"].append(sft_trainer_compute_loss) From f6003b000f8c0d81168266937e5e3c7ba9bf6637 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Feb 2025 01:19:39 -0800 Subject: [PATCH 1029/1088] Memory efficient GRPO, DPO etc (#1716) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Better TRL handling * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL --- pyproject.toml | 34 ++++----- unsloth/models/_utils.py | 2 +- unsloth/models/rl.py | 54 +++++++++++++- unsloth/models/rl_replacements.py | 115 +++++++++++++++++++++++++----- 4 files changed, 167 insertions(+), 38 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2a6e31dcae..59a7c44737 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.2.2", + "unsloth_zoo>=2025.2.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -50,7 +50,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<0.15.0", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -176,26 +176,26 @@ cu124onlytorch251 = [ "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post1-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu118onlytorch260 = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.29.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu124onlytorch260 = [ - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post2-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", "xformers @ https://download.pytorch.org/whl/cu124/xformers-0.0.29.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu126onlytorch260 = [ - "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", - "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post2-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", ] cu118 = [ "unsloth[huggingface]", @@ -344,7 +344,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.2.2", + "unsloth_zoo>=2025.2.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -362,7 +362,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,<0.15.0", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", "peft>=0.7.1", "xformers", "bitsandbytes>=0.46.1", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 8d0eadb968..df925d746b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.9" +__version__ = "2025.2.10" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 3d601b0af1..2875ff64a5 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -21,12 +21,25 @@ import inspect import os import re +import torch from unsloth_zoo.compiler import create_new_function from unsloth_zoo.logging_utils import PatchRLStatistics +from unsloth_zoo.rl_replacements import RL_REPLACEMENTS from .rl_replacements import ( RL_EXTRA_ARGS, RL_FUNCTIONS, + RL_PRE_ITEMS, + RL_CONFIG_CHANGES, ) +selective_log_softmax = RL_REPLACEMENTS["selective_log_softmax"] + +torch_compile_options = { + "epilogue_fusion" : True, + "max_autotune" : True, + "shape_padding" : True, + "trace.enabled" : False, + "triton.cudagraphs" : False, +} def PatchRL(FastLanguageModel): @@ -81,6 +94,17 @@ def generate_with_clone(*args, **kwargs): from packaging.version import Version import torch from contextlib import nullcontext +from torch.nn import functional as F +torch_compile_options = {{ + "epilogue_fusion" : True, + "max_autotune" : True, + "shape_padding" : True, + "trace.enabled" : False, + "triton.cudagraphs" : False, +}} + +{selective_log_softmax_code} +{RL_pre} @dataclass class Unsloth{RLConfig_name}({RLConfig_name}): @@ -142,8 +166,13 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): if RLTrainer.__name__.startswith("Unsloth"): return if RLConfig .__name__.startswith("Unsloth"): return + # Get old source + old_RLTrainer_source = inspect.getsource(RLTrainer) + old_RLConfig_source = inspect.getsource(RLConfig) + all_imports = dir(trainer) - imports = [x for x in all_imports if not x.startswith("_")] + # Fix _deprecate_arguments not getting imported so stop __ but not _ + imports = [x for x in all_imports if not x.startswith("__")] # Get default arguments EMPTY = inspect.Parameter.empty @@ -358,6 +387,13 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): extra_args += num_proc_check pass + # Edit config with anything extra + if trainer_file in RL_CONFIG_CHANGES: + process_extra_args = RL_CONFIG_CHANGES[trainer_file] + for process_extra_arg in process_extra_args: + extra_args += process_extra_arg(old_RLTrainer_source, old_RLConfig_source) + pass + # Edit report_to and default it to nothing if max_steps is like 60 # Create RLConfig args @@ -377,6 +413,17 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): __RLTrainer_doc__ = eval(f"trl.trainer.{RLTrainer_name}").__doc__ __RLConfig_doc__ = eval(f"trl.trainer.{RLConfig_name}") .__doc__ + # Get all pre-modules + if trainer_file in RL_PRE_ITEMS: + RL_pre = "\n".join(RL_PRE_ITEMS[trainer_file]) + else: + RL_pre = "" + pass + + # Selective log softmax + selective_log_softmax_code = inspect.getsource(selective_log_softmax) + + # Get final source code RLTrainer_source = RLTrainer_replacement.format( RLTrainer_name = RLTrainer_name, __RLTrainer_doc__ = __RLTrainer_doc__, @@ -394,6 +441,9 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RLTrainer_extras = RLTrainer_extras, RLTrainer_post = RLTrainer_post, + RL_pre = RL_pre, + + selective_log_softmax_code = selective_log_softmax_code, ) # Create new function @@ -402,7 +452,7 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RLTrainer_source, f"trl.trainer.{trainer_file}", imports, - overwrite = False, + overwrite = True, ) # Patch Trainer diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 82fd3f8d3c..63fe243595 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -15,14 +15,27 @@ __all__ = [ "RL_EXTRA_ARGS", "RL_FUNCTIONS", + "RL_PRE_ITEMS", + "RL_CONFIG_CHANGES", ] import re +import torch import inspect from collections import defaultdict -RL_EXTRA_ARGS = defaultdict(list) -RL_FUNCTIONS = defaultdict(list) +from unsloth_zoo.rl_replacements import RL_REPLACEMENTS +RL_EXTRA_ARGS = defaultdict(list) +RL_FUNCTIONS = defaultdict(list) +RL_PRE_ITEMS = defaultdict(list) +RL_CONFIG_CHANGES = defaultdict(list) +torch_compile_options = { + "epilogue_fusion" : True, + "max_autotune" : True, + "shape_padding" : True, + "trace.enabled" : False, + "triton.cudagraphs" : False, +} # Check untrained tokens def sft_trainer_fix_untraiend_tokens(call_args, extra_args): @@ -161,23 +174,89 @@ def grpo_trainer__move_model_to_vllm(function_name, function): def grpo_trainer__get_per_token_logps(function_name, function): if function_name != "_get_per_token_logps": return function - # Edit model to autocast it - # .*? matches first match. .+? matches final match. - original = re.findall( - r"\n([ ]{4,})(logits = model\(.*?\))", - function, - flags = re.MULTILINE | re.DOTALL, - ) - if len(original) != 0: - spaces, original = original[0] - spaces = len(spaces) - replacer = \ - "if not hasattr(self, '_autocast_dtype'):\n" + \ - " "*(spaces + 4) + "self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16\n" + \ - " "*(spaces + 0) + "with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype):\n" + \ - " "*(spaces + 4) + original - function = function.replace(original, replacer) + def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep): + if not hasattr(self, '_autocast_dtype'): + self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16 + with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype): + # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded + logits = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits + logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred + + input_ids = input_ids[:, -logits_to_keep:] + # For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves. + # See https://github.com/huggingface/trl/issues/2770 + logits = logits[:, -logits_to_keep:] + return logits + # return selective_log_softmax(logits, input_ids) # compute logprobs for the input tokens + pass pass + + function = inspect.getsource(_get_per_token_logps) return function pass RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps) + +grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"] +RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_compute_loss)) + +# Edit _get_per_token_logps to handle mixed precision +def grpo_trainer_compute_loss(function_name, function): + if function_name != "compute_loss": return function + + def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None): + if return_outputs: + raise ValueError("The GRPOTrainer does not support returning outputs") + # Compute the per-token log probabilities for the model + + prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] + completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] + input_ids = torch.cat([prompt_ids, completion_ids], dim=1) + # attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) + attention_mask = None + logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens + + per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep) + + # Compute the KL divergence between the model and the reference model + ref_per_token_logps = inputs["ref_per_token_logps"] + # per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1 + + # x - x.detach() allows for preserving gradients from x + advantages = inputs["advantages"] + # per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1) + # per_token_loss = -(per_token_loss - self.beta * per_token_kl) + # loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() + input_ids = input_ids[:, -logits_to_keep:] + loss, completion_length, mean_kl = grpo_compute_loss( + ref_per_token_logps, per_token_logps, input_ids, completion_mask, self.beta, advantages, + ) + # Log the metrics + # completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item() + self._metrics["completion_length"].append(completion_length.item()) + + # mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() + # self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item()) + self._metrics["kl"].append(mean_kl.item()) + return loss + pass + + function = inspect.getsource(compute_loss) + return function +pass +RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer_compute_loss) + +# https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L356 +# TRL warns if batch size is not a multiple of num_generations -> fix this. +def grpo_trainer_fix_batch_size(RLTrainer_source, RLConfig_source): + if "divisible by the number of generations" not in RLTrainer_source: return "" + if "num_generations" not in RLConfig_source: return "" + + check_batch_size = \ + "div = per_device_train_batch_size // num_generations\n"\ + "if div * num_generations != per_device_train_batch_size:\n"\ + " print('Unsloth: We know expect `per_device_train_batch_size` to be a multiple of `num_generations`.\\n"\ + "We will change the batch size of ' + str(per_device_train_batch_size) + ' to the `num_generations` of ' + str(num_generations))\n"\ + " per_device_train_batch_size = num_generations\n" + return check_batch_size +pass +RL_CONFIG_CHANGES["grpo_trainer"].append(grpo_trainer_fix_batch_size) From 1c9661913532984223f27008982e5ac11b3fdcfe Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Feb 2025 02:24:01 -0800 Subject: [PATCH 1030/1088] Add GRPO metrics (#1718) * Update llama.py * Update llama.py * Faster inference? * Update llama.py * Update llama.py * Update utils.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update mapper.py * Fast Inference via vLLM * Update llama.py * Update llama.py * Update utils.py * Create rl.py * PatchRL * Update rl.py * Update rl.py * Update rl.py * PatchRLStatistics * Update rl.py * Update rl.py * Update rl.py * Update utils.py * Update utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * RL metrics * Update rl.py * RL metrics * Update __init__.py * Update rl.py * Update rl.py * Update rl.py * Update chat_templates.py * Update mapper.py * Fp8 cache * Update llama.py * Update llama.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Better TRL handling * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py --- unsloth/models/_utils.py | 2 +- unsloth/models/rl.py | 13 ++++++++++++- unsloth/models/rl_replacements.py | 27 +++++++++++++++++++++++---- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index df925d746b..2a5b71d399 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.10" +__version__ = "2025.2.11" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 2875ff64a5..7b363d8fc1 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -30,6 +30,7 @@ RL_FUNCTIONS, RL_PRE_ITEMS, RL_CONFIG_CHANGES, + RL_METRICS_CHANGES, ) selective_log_softmax = RL_REPLACEMENTS["selective_log_softmax"] @@ -310,10 +311,20 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RLTrainer_post += neftune_check pass + # Edit optional metrics + other_metrics_processor = "" + if trainer_file in RL_METRICS_CHANGES: + process_extra_args = RL_METRICS_CHANGES[trainer_file] + for process_extra_arg in process_extra_args: + other_metrics_processor += process_extra_arg(call_args, extra_args) + pass + # Add statistics as well! extra_args += \ + "other_metrics = []\n"\ + f"{other_metrics_processor}\n"\ "from unsloth_zoo.logging_utils import PatchRLStatistics\n"\ - f"PatchRLStatistics('{trainer_file}')\n" + f"PatchRLStatistics('{trainer_file}', other_metrics)\n" # Patch optional args if trainer_file in RL_EXTRA_ARGS: diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 63fe243595..b2501c94fc 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -17,6 +17,7 @@ "RL_FUNCTIONS", "RL_PRE_ITEMS", "RL_CONFIG_CHANGES", + "RL_METRICS_CHANGES", ] import re @@ -24,10 +25,11 @@ import inspect from collections import defaultdict from unsloth_zoo.rl_replacements import RL_REPLACEMENTS -RL_EXTRA_ARGS = defaultdict(list) -RL_FUNCTIONS = defaultdict(list) -RL_PRE_ITEMS = defaultdict(list) -RL_CONFIG_CHANGES = defaultdict(list) +RL_EXTRA_ARGS = defaultdict(list) +RL_FUNCTIONS = defaultdict(list) +RL_PRE_ITEMS = defaultdict(list) +RL_CONFIG_CHANGES = defaultdict(list) +RL_METRICS_CHANGES = defaultdict(list) torch_compile_options = { "epilogue_fusion" : True, @@ -260,3 +262,20 @@ def grpo_trainer_fix_batch_size(RLTrainer_source, RLConfig_source): return check_batch_size pass RL_CONFIG_CHANGES["grpo_trainer"].append(grpo_trainer_fix_batch_size) + + +# Add other reward function names +def grpo_trainer_metrics(RLTrainer_source, RLConfig_source): + if "reward_funcs" not in RLTrainer_source: return "" + + log_metrics = \ + "if not isinstance(reward_funcs, list): _reward_funcs = [reward_funcs]\n"\ + "else: _reward_funcs = reward_funcs\n"\ + "for reward_func in _reward_funcs:\n"\ + " try:\n"\ + " reward_func_name = reward_func.__name__\n"\ + " other_metrics.append(f'rewards/{reward_func_name}')\n"\ + " except: pass\n" + return log_metrics +pass +RL_METRICS_CHANGES["grpo_trainer"].append(grpo_trainer_metrics) From a54690f6401987096e353c6995678b901b72a3ed Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Feb 2025 02:48:36 -0800 Subject: [PATCH 1031/1088] Update mapper.py --- unsloth/models/mapper.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index c81290b662..2e85d30145 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -218,13 +218,25 @@ "unsloth/Mistral-Nemo-Base-2407", "mistralai/Mistral-Nemo-Base-2407", ), - "unsloth/Meta-Llama-3.1-8B-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B-unsloth-bnb-4bit" : ( "unsloth/Meta-Llama-3.1-8B", "meta-llama/Meta-Llama-3.1-8B", + "unsloth/Meta-Llama-3.1-8B-bnb-4bit", ), - "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" : ( + "unsloth/Meta-Llama-3.1-8B-Instruct-unsloth-bnb-4bit" : ( "unsloth/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", + "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit", + ), + "unsloth/Llama-3.1-8B-unsloth-bnb-4bit" : ( + "unsloth/Llama-3.1-8B", + "meta-llama/Llama-3.1-8B", + "unsloth/Llama-3.1-8B-bnb-4bit", + ), + "unsloth/Llama-3.1-8B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/Llama-3.1-8B-Instruct", + "meta-llama/Llama-3.1-8B-Instruct", + "unsloth/Llama-3.1-8B-Instruct-bnb-4bit", ), "unsloth/Meta-Llama-3.1-70B-bnb-4bit" : ( "unsloth/Meta-Llama-3.1-70B", @@ -589,6 +601,21 @@ "Qwen/Qwen2.5-VL-72B-Instruct", "unsloth/Qwen2.5-VL-72B-Instruct-bnb-4bit", ), + "unsloth/DeepHermes-3-Llama-3-8B-Preview-unsloth-bnb-4bit" : ( + "unsloth/DeepHermes-3-Llama-3-8B-Preview", + "NousResearch/DeepHermes-3-Llama-3-8B-Preview", + "unsloth/DeepHermes-3-Llama-3-8B-Preview-bnb-4bit", + ), + "unsloth/DeepScaleR-1.5B-Preview-unsloth-bnb-4bit" : ( + "unsloth/DeepHermes-3-Llama-3-8B-Preview", + "agentica-org/DeepScaleR-1.5B-Preview", + "unsloth/DeepScaleR-1.5B-Preview-bnb-4bit", + ), + "unsloth/OpenThinker-7B-unsloth-bnb-4bit" : ( + "unsloth/OpenThinker-7B", + "open-thoughts/OpenThinker-7B", + "unsloth/OpenThinker-7B-bnb-4bit", + ), } INT_TO_FLOAT_MAPPER = {} From d1d15f1d14f1168837d29b9c08e9b6d63945d469 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 15 Feb 2025 03:12:43 -0800 Subject: [PATCH 1032/1088] Fix weird tokenizer issue --- unsloth/models/_utils.py | 2 +- unsloth/tokenizer_utils.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2a5b71d399..0c51c174f0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.11" +__version__ = "2025.2.12" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 404fce319f..048bee7797 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -259,6 +259,7 @@ def convert_to_fast_tokenizer( def assert_same_tokenization(slow_tokenizer, fast_tokenizer): # Get eos_token, bos_token etc + if not hasattr(slow_tokenizer, "all_special_tokens"): return True dir_names = dir(slow_tokenizer) special_tokens = list(filter(None, ( getattr(slow_tokenizer, x) for x in dir_names @@ -503,12 +504,14 @@ def _load_correct_tokenizer( cache_dir = cache_dir, ) except: - pass + slow_tokenizer = None # print( # f"Unsloth: {tokenizer_name} has no tokenizer.model file.\n"\ # "Just informing you about this - this is not a critical error." # ) pass + # Unsure why this occurs! + if type(slow_tokenizer) is bool: slow_tokenizer = None fast_tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, From 67d34405ea926708e6c45f9005086be6bdc21885 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 19 Feb 2025 23:24:05 -0800 Subject: [PATCH 1033/1088] Update README.md (#1768) --- README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0038355aa5..45312a43d1 100644 --- a/README.md +++ b/README.md @@ -98,9 +98,16 @@ We tested using the Alpaca Dataset, a batch size of 2, gradient accumulation st ## 💾 Installation Instructions -For stable releases, use `pip install unsloth`. We recommend `pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"` for most installations though. +Simply use pip install on Linux machines. Windows instructions are below. +
    +
      + +

      pip install unsloth

      +
      +
    +
    -### Conda Installation +### Conda Installation (Optional) `⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. We support `python=3.10,3.11,3.12`. ```bash conda create --name unsloth_env \ @@ -110,8 +117,7 @@ conda create --name unsloth_env \ -y conda activate unsloth_env -pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" -pip install --no-deps trl peft accelerate bitsandbytes +pip install unsloth ```
    From f29da34fabc19e37badb08fe05bb43ad9f20e6bc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Feb 2025 04:23:28 -0800 Subject: [PATCH 1034/1088] Memory Efficient GRPO (#1773) * Update __init__.py * Update loader.py * Update rl.py * Update rl.py * Update _utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Better TRL handling * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning --------- Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> --- README.md | 7 ++- pyproject.toml | 8 ++- unsloth/__init__.py | 2 +- unsloth/models/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 97 +++++++++++++++++++------------ unsloth/models/loader.py | 10 +++- unsloth/models/mapper.py | 5 -- unsloth/models/rl.py | 67 ++++++++++++++++++--- unsloth/models/rl_replacements.py | 30 +++++++--- unsloth/save.py | 77 ++++++++++++++---------- 11 files changed, 206 insertions(+), 101 deletions(-) diff --git a/README.md b/README.md index 45312a43d1..4bdd7e2893 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://git ### Windows Installation To run Unsloth directly on Windows: -- Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows +- Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows (be aware that the Windows fork requires PyTorch >= 2.4 and CUDA 12) - In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: ```python trainer = SFTTrainer( @@ -202,12 +202,15 @@ trainer = SFTTrainer( ) ``` +### Advanced/Troubleshooting + For **advanced installation instructions** or if you see weird errors during installations: 1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` 2. Confirm if CUDA is installated correctly. Try `nvcc`. If that fails, you need to install `cudatoolkit` or CUDA drivers. 3. Install `xformers` manually. You can try installing `vllm` and seeing if `vllm` succeeds. Check if `xformers` succeeded with `python -m xformers.info` Go to https://github.com/facebookresearch/xformers. Another option is to install `flash-attn` for Ampere GPUs. -4. Finally, install `bitsandbytes` and check it with `python -m bitsandbytes` +4. Double check that your versions of Python, CUDA, CUDNN, `torch`, `triton`, and `xformers` are compatible with one another. The [PyTorch Compatibility Matrix](https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix) may be useful. +5. Finally, install `bitsandbytes` and check it with `python -m bitsandbytes` ## 📜 [Documentation](https://docs.unsloth.ai) - Go to our official [Documentation](https://docs.unsloth.ai) for saving to GGUF, checkpointing, evaluation and more! diff --git a/pyproject.toml b/pyproject.toml index 59a7c44737..96aa0696fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.2.5", + "unsloth_zoo>=2025.2.6", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -196,6 +196,10 @@ cu126onlytorch260 = [ "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp311-cp311-manylinux_2_28_x86_64.whl ; python_version=='3.11' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp312-cp312-manylinux_2_28_x86_64.whl ; python_version=='3.12' and platform_system == 'Linux'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "xformers @ https://download.pytorch.org/whl/cu126/xformers-0.0.29.post3-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] cu118 = [ "unsloth[huggingface]", @@ -344,7 +348,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.2.5", + "unsloth_zoo>=2025.2.6", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index f0600f3328..a3b3e68b2d 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -196,7 +196,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.2.4"): + if Version(unsloth_zoo_version) < Version("2025.2.6"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index b15e04ab74..29ad78dae2 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -20,4 +20,4 @@ from .qwen2 import FastQwen2Model from .dpo import PatchDPOTrainer, PatchKTOTrainer from ._utils import is_bfloat16_supported -from .rl import PatchFastRL +from .rl import PatchFastRL, vLLMSamplingParams diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0c51c174f0..52b3710916 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.12" +__version__ = "2025.2.13" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1eae97ff1c..909dfc339b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -700,6 +700,7 @@ def LlamaModel_fast_forward( elif inputs_requires_grad: inputs_embeds.requires_grad_(False) pass + attention_mask = attention_mask[:,:self.max_seq_length] # Must resize! inputs_embeds *= attention_mask.unsqueeze(0).transpose(0, 1).transpose(1, 2) if inputs_requires_grad: inputs_embeds.requires_grad_(True) pass @@ -774,9 +775,12 @@ def LlamaModel_fast_forward( self.SWA_mask = True self.GA_mask = False elif attention_mask is not None: - # Fixes https://github.com/unslothai/unsloth/issues/853 # Unsloth needs a 2D mask, not a [2, 1, n, n] mask! + + # https://github.com/pytorch/pytorch/issues/103749 + # Need to convert to float and not using bool + attention_mask = (1.0 - attention_mask.float()) * torch.finfo(inputs_embeds.dtype).min dynamic_SWA_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), @@ -1030,6 +1034,7 @@ def _CausalLM_fast_forward( output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, num_logits_to_keep: Optional[int] = 0, + logits_to_keep: Optional[int] = 0, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: @@ -1053,16 +1058,16 @@ def _CausalLM_fast_forward( # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) self.model._has_no_labels = labels is None outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + input_ids = input_ids, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_values = past_key_values, + inputs_embeds = inputs_embeds, + use_cache = use_cache, + output_attentions = output_attentions, + output_hidden_states = output_hidden_states, + return_dict = return_dict, ) pass hidden_states = outputs[0] @@ -1072,6 +1077,20 @@ def _CausalLM_fast_forward( logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) dtype = lm_head.dtype + num_logits_to_keep = max(num_logits_to_keep, logits_to_keep) + + # Output last hidden states without logits if asked + if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": + if num_logits_to_keep != 0: + hidden_states = hidden_states[:, -num_logits_to_keep:, :] + return CausalLMOutputWithPast( + loss = None, + logits = hidden_states, + past_key_values = outputs.past_key_values, + hidden_states = outputs.hidden_states, + attentions= outputs.attentions, + ) + pass if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(dtype)) @@ -1166,11 +1185,11 @@ def _CausalLM_fast_forward( return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, + loss = loss, + logits = logits, + past_key_values = outputs.past_key_values, + hidden_states = outputs.hidden_states, + attentions= outputs.attentions, ) pass return _CausalLM_fast_forward @@ -1180,28 +1199,30 @@ def _CausalLM_fast_forward( @torch._disable_dynamo def PeftModelForCausalLM_fast_forward( self, - input_ids=None, - causal_mask=None, - attention_mask=None, - inputs_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - task_ids=None, - num_logits_to_keep=0, + input_ids = None, + causal_mask = None, + attention_mask = None, + inputs_embeds = None, + labels = None, + output_attentions = None, + output_hidden_states = None, + return_dict = None, + task_ids = None, + num_logits_to_keep = 0, + logits_to_keep = 0, **kwargs, ): return self.base_model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - inputs_embeds=inputs_embeds, - labels=labels, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - num_logits_to_keep=num_logits_to_keep, + input_ids = input_ids, + causal_mask = causal_mask, + attention_mask = attention_mask, + inputs_embeds = inputs_embeds, + labels = labels, + output_attentions = output_attentions, + output_hidden_states = output_hidden_states, + return_dict = return_dict, + num_logits_to_keep = num_logits_to_keep, + logits_to_keep = logits_to_keep, **kwargs, ) pass @@ -1694,9 +1715,9 @@ def from_pretrained( elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: logger.warning_once("Device does not support bfloat16. Will change to float16.") dtype = torch.float16 - elif dtype == torch.float16 and SUPPORTS_BFLOAT16: - logger.warning_once("Device supports bfloat16 but you selected float16. Will change to bfloat16.") - dtype = torch.bfloat16 + # elif dtype == torch.float16 and SUPPORTS_BFLOAT16: + # logger.warning_once("Device supports bfloat16 but you selected float16. Will change to bfloat16.") + # dtype = torch.bfloat16 assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 39b367e275..186545cf0c 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -24,10 +24,14 @@ from .loader_utils import get_model_name import os, contextlib, sys try: - from huggingface_hub.utils import get_token + from huggingface_hub import get_token except: - # Old HF Hub versions <= 0.0.25 - from huggingface_hub.utils._token import get_token + try: + from huggingface_hub.utils import get_token + except: + # For older versions of huggingface_hub + from huggingface_hub.utils._token import get_token + pass pass from huggingface_hub import HfFileSystem import importlib.util diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 2e85d30145..da7f449bb4 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -601,11 +601,6 @@ "Qwen/Qwen2.5-VL-72B-Instruct", "unsloth/Qwen2.5-VL-72B-Instruct-bnb-4bit", ), - "unsloth/DeepHermes-3-Llama-3-8B-Preview-unsloth-bnb-4bit" : ( - "unsloth/DeepHermes-3-Llama-3-8B-Preview", - "NousResearch/DeepHermes-3-Llama-3-8B-Preview", - "unsloth/DeepHermes-3-Llama-3-8B-Preview-bnb-4bit", - ), "unsloth/DeepScaleR-1.5B-Preview-unsloth-bnb-4bit" : ( "unsloth/DeepHermes-3-Llama-3-8B-Preview", "agentica-org/DeepScaleR-1.5B-Preview", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 7b363d8fc1..f6b3fdbf32 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -14,6 +14,7 @@ __all__ = [ "PatchFastRL", + "vLLMSamplingParams", ] import torch @@ -36,12 +37,20 @@ torch_compile_options = { "epilogue_fusion" : True, - "max_autotune" : True, + "max_autotune" : False, # Disable Triton mm kernels "shape_padding" : True, "trace.enabled" : False, "triton.cudagraphs" : False, } + +def vLLMSamplingParams(**kwargs): + from vllm import SamplingParams + sampling_params = SamplingParams(**kwargs) + sampling_params._set_kwargs = kwargs + return sampling_params +pass + def PatchRL(FastLanguageModel): from trl.models.utils import unwrap_model_for_generation @@ -94,11 +103,12 @@ def generate_with_clone(*args, **kwargs): from dataclasses import dataclass, field from packaging.version import Version import torch +import numpy as np from contextlib import nullcontext from torch.nn import functional as F torch_compile_options = {{ "epilogue_fusion" : True, - "max_autotune" : True, + "max_autotune" : False, "shape_padding" : True, "trace.enabled" : False, "triton.cudagraphs" : False, @@ -112,16 +122,24 @@ class Unsloth{RLConfig_name}({RLConfig_name}): """ {__RLConfig_doc__} """ - sampling_params: Optional[Any] = field( + vllm_sampling_params: Optional[Any] = field( default = None, metadata = {{'help': 'vLLM SamplingParams'}}, ) + unsloth_num_chunks : Optional[int] = field( + default = -1, + metadata = {{'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}}, + ) def __init__({RLConfig_arguments}, - sampling_params = None, + vllm_sampling_params = None, + unsloth_num_chunks = -1, **kwargs, ): {RLConfig_extra_args} super().__init__({RLConfig_call_args}{RLConfig_kwargs}) + assert(hasattr(vllm_sampling_params, '_set_kwargs')) + self.vllm_sampling_params = vllm_sampling_params + self.unsloth_num_chunks = unsloth_num_chunks pass {RLTrainer_extras} @@ -422,7 +440,9 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): # Create full module exec(f"from trl.trainer import ({RLTrainer_name}, {RLConfig_name},)") __RLTrainer_doc__ = eval(f"trl.trainer.{RLTrainer_name}").__doc__ + if __RLTrainer_doc__ is None: __RLTrainer_doc__ = "" __RLConfig_doc__ = eval(f"trl.trainer.{RLConfig_name}") .__doc__ + if __RLConfig_doc__ is None: __RLConfig_doc__ = "" # Get all pre-modules if trainer_file in RL_PRE_ITEMS: @@ -431,6 +451,11 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RL_pre = "" pass + # Check if SamplingParams is in there + if "SamplingParams" in old_RLTrainer_source: + RL_pre = RL_pre + "\n" + inspect.getsource(vLLMSamplingParams) + pass + # Selective log softmax selective_log_softmax_code = inspect.getsource(selective_log_softmax) @@ -457,6 +482,14 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): selective_log_softmax_code = selective_log_softmax_code, ) + # Remove multiple doc strings + if __RLConfig_doc__ != "" and RLTrainer_source.count(__RLTrainer_doc__) == 2: + RLTrainer_source = RLTrainer_source.replace(__RLTrainer_doc__, "", 1) + pass + + # Remove multiple newlines + RLTrainer_source = re.sub(r"[\n]{3,}", "\n", RLTrainer_source) + # Create new function created_module = create_new_function( f"Unsloth{RLTrainer_name}", @@ -501,7 +534,7 @@ def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, import replacer = replacer[0] vllm_setter = "\n" + " "*8 + \ "if hasattr(model, 'vllm_engine') and "\ - "getattr(args, 'use_vllm') and getattr(args, 'use_vllm', False): "\ + "hasattr(args, 'use_vllm') and (getattr(args, 'use_vllm', False) == False): "\ "args.use_vllm = True\n" init = init.replace(replacer, replacer + vllm_setter) pass @@ -529,14 +562,31 @@ def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, import ) if len(sampling_params) == 1: sampling_params = sampling_params[0] + + # Fix guided_decoding + sampling_params = sampling_params.replace( + "guided_decoding=guided_decoding,", + 'guided_decoding='\ + 'GuidedDecodingParams(backend="outlines", regex=args.vllm_guided_decoding_regex) '\ + 'if getattr(args, "vllm_guided_decoding_regex", None) is not None else None,', + ) # Replace with our vLLM engine sampling_params = \ " "*12 + "self.llm = model.vllm_engine; self._last_loaded_step = 0; " + \ sampling_params # Add spaces + + # Add extra arguments to SamplingParams + extra = "**getattr(getattr(args, 'vllm_sampling_params', vLLMSamplingParams()), '_set_kwargs', {})" + # Backwards replace + to_replace = "," + extra + "," + ")" + sampling_params = to_replace.join(sampling_params.rsplit(")", 1)) + # Strip multiple commas + sampling_params = re.sub(r"[\,][\s]{0,}\,", ",", sampling_params) + new_vllm_part = \ - f"\n{' '*8}if {args}.use_vllm:\n{sampling_params} "\ - f"if getattr(args, 'sampling_params', None) is None else "\ - f"getattr(args, 'sampling_params', None)\n{' '*8}else:\n" + f"\n{' '*8}if {args}.use_vllm:\n{sampling_params}"\ + f"\n{' '*8}else:\n" + init = init.replace(vllm_part, new_vllm_part) pass pass @@ -607,6 +657,7 @@ def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, import old, new = changed[function] RLTrainer_source = RLTrainer_source.replace(old, new) pass + RLTrainer_source = RLTrainer_source.replace( f"class {RLTrainer_name}", f"class _Unsloth{RLTrainer_name}", 1 ) diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index b2501c94fc..23b31172fd 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -40,7 +40,7 @@ } # Check untrained tokens -def sft_trainer_fix_untraiend_tokens(call_args, extra_args): +def sft_trainer_fix_untrained_tokens(call_args, extra_args): if "model" in call_args and "train_dataset" in call_args: fix_tokenizer = \ "IGNORED_TOKENIZER_NAMES = os.environ.get('UNSLOTH_IGNORED_TOKENIZER_NAMES', '').split('\\n')\n"\ @@ -52,7 +52,7 @@ def sft_trainer_fix_untraiend_tokens(call_args, extra_args): return fix_tokenizer return "" pass -RL_EXTRA_ARGS["sft_trainer"].append(sft_trainer_fix_untraiend_tokens) +RL_EXTRA_ARGS["sft_trainer"].append(sft_trainer_fix_untrained_tokens) # Remove DPO columns which might randomnly be tokenized @@ -177,6 +177,7 @@ def grpo_trainer__get_per_token_logps(function_name, function): if function_name != "_get_per_token_logps": return function def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep): + return None # Unsloth efficient GRPO if not hasattr(self, '_autocast_dtype'): self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16 with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype): @@ -198,8 +199,12 @@ def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep) pass RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps) -grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"] +grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"] +UnslothEfficientGRPO = RL_REPLACEMENTS["UnslothEfficientGRPO"] +grpo_accumulated_loss = RL_REPLACEMENTS["grpo_accumulated_loss"] RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_compute_loss)) +RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(UnslothEfficientGRPO)) +RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_accumulated_loss)) # Edit _get_per_token_logps to handle mixed precision def grpo_trainer_compute_loss(function_name, function): @@ -213,10 +218,12 @@ def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"] completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"] input_ids = torch.cat([prompt_ids, completion_ids], dim=1) + bsz, qlen = input_ids.shape # attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) attention_mask = None logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens - + _input_ids = input_ids + _logits_to_keep = logits_to_keep per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep) # Compute the KL divergence between the model and the reference model @@ -229,9 +236,16 @@ def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch # per_token_loss = -(per_token_loss - self.beta * per_token_kl) # loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() input_ids = input_ids[:, -logits_to_keep:] - loss, completion_length, mean_kl = grpo_compute_loss( - ref_per_token_logps, per_token_logps, input_ids, completion_mask, self.beta, advantages, - ) + if False:#per_token_logps is not None: + loss, completion_length, mean_kl = grpo_compute_loss( + ref_per_token_logps, per_token_logps, input_ids, completion_mask, self.beta, advantages, + ) + else: + loss, completion_length, mean_kl = grpo_accumulated_loss( + self, _input_ids, logits_to_keep, completion_mask, advantages, + n_chunks = self.args.unsloth_num_chunks, + ) + # Log the metrics # completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item() self._metrics["completion_length"].append(completion_length.item()) @@ -256,7 +270,7 @@ def grpo_trainer_fix_batch_size(RLTrainer_source, RLConfig_source): check_batch_size = \ "div = per_device_train_batch_size // num_generations\n"\ "if div * num_generations != per_device_train_batch_size:\n"\ - " print('Unsloth: We know expect `per_device_train_batch_size` to be a multiple of `num_generations`.\\n"\ + " print('Unsloth: We now expect `per_device_train_batch_size` to be a multiple of `num_generations`.\\n"\ "We will change the batch size of ' + str(per_device_train_batch_size) + ' to the `num_generations` of ' + str(num_generations))\n"\ " per_device_train_batch_size = num_generations\n" return check_batch_size diff --git a/unsloth/save.py b/unsloth/save.py index d3ba1928c4..eaddfa05c5 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -31,10 +31,14 @@ from .tokenizer_utils import fix_sentencepiece_gguf from huggingface_hub import HfApi try: - from huggingface_hub.utils import get_token + from huggingface_hub import get_token except: - # Old HF Hub versions <= 0.0.25 - from huggingface_hub.utils._token import get_token + try: + from huggingface_hub.utils import get_token + except: + # For older versions of huggingface_hub + from huggingface_hub.utils._token import get_token + pass pass from pathlib import Path @@ -254,7 +258,7 @@ def unsloth_save_model( # First check for a token! if push_to_hub: from huggingface_hub import whoami - try: + try: username = whoami(token = token)["name"] except: raise RuntimeError( @@ -385,7 +389,7 @@ def unsloth_save_model( else: internal_model = model pass - + # Cannot be converted properly! if (save_method == "merged_4bit") or (save_method == "lora") or ( not hasattr(model, "model") or \ @@ -481,7 +485,7 @@ def unsloth_save_model( gb_found = re.match("([0-9]{1,})[\s]{0,}GB", max_shard_size, flags = re.IGNORECASE) mb_found = re.match("([0-9]{1,})[\s]{0,}MB", max_shard_size, flags = re.IGNORECASE) if gb_found: sharded_ram_usage = int(gb_found.group(1)) * 1024 * 1024 * 1024 - elif mb_found: sharded_ram_usage = int(mb_found.group(1)) * 1024 * 1024 + elif mb_found: sharded_ram_usage = int(mb_found.group(1)) * 1024 * 1024 elif type(max_shard_size) is int: sharded_ram_usage = sharded_ram_usage pass @@ -612,7 +616,7 @@ def unsloth_save_model( # Edit save_pretrained_settings # [TODO] _create_repo has errors due to **kwargs getting accepted save_pretrained_settings["state_dict"] = state_dict - + # commit_description does not seem to work? what_to_delete = ("use_temp_dir", "commit_message", "create_pr", "revision", "commit_description", "tags",) \ if not push_to_hub else \ @@ -665,7 +669,7 @@ def unsloth_save_model( # Revert back padding side tokenizer.padding_side = old_padding_side - + print(" Done.") else: print() @@ -877,10 +881,15 @@ def install_llama_cpp_old(version = -10): pass # Check if successful - if not os.path.exists("llama.cpp/quantize") and not os.path.exists("llama.cpp/llama-quantize"): + if not ( + os.path.exists("llama.cpp/llama-quantize.exe") or + os.path.exists("llama.cpp/llama-quantize") or + os.path.exists("llama.cpp/quantize.exe") or + os.path.exists("llama.cpp/quantize") + ): raise RuntimeError( "Unsloth: The file 'llama.cpp/llama-quantize' or `llama.cpp/quantize` does not exist.\n"\ - "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + "But we expect this file to exist! Maybe the llama.cpp developers changed the name or check extension of the llama-quantize file." ) pass pass @@ -957,7 +966,7 @@ def save_to_gguf( else: raise TypeError("Unsloth: quantization_method can only be a string or a list of strings") pass - + # Check if bfloat16 is supported if model_dtype == "bf16" and not torch.cuda.is_bf16_supported(): logger.warning( @@ -973,7 +982,7 @@ def save_to_gguf( pass # Check I quants - for quant_method in quantization_method: + for quant_method in quantization_method: if quant_method.startswith("iq2"): raise RuntimeError("Unsloth: Currently iq2 type quantizations aren't supported yet - sorry!") pass @@ -1026,9 +1035,9 @@ def save_to_gguf( pass # Determine whether the system already has llama.cpp installed and the scripts are executable - quantize_location = get_executable(["llama-quantize", "quantize"]) + quantize_location = get_executable(["llama-quantize", "quantize", "llama-quantize.exe", "quantize.exe"]) convert_location = get_executable(["convert-hf-to-gguf.py", "convert_hf_to_gguf.py"]) - + error = 0 if quantize_location is not None and convert_location is not None: print("Unsloth: llama.cpp found in the system. We shall skip installation.") @@ -1062,14 +1071,18 @@ def save_to_gguf( # and llama.cpp/main changed to llama.cpp/llama-cli # See https://github.com/ggerganov/llama.cpp/pull/7809 quantize_location = None - if os.path.exists("llama.cpp/quantize"): + if os.path.exists("llama.cpp/quantize.exe"): + quantize_location = "llama.cpp/quantize.exe" + elif os.path.exists("llama.cpp/quantize"): quantize_location = "llama.cpp/quantize" + elif os.path.exists("llama.cpp/llama-quantize.exe"): + quantize_location = "llama.cpp/llama-quantize.exe" elif os.path.exists("llama.cpp/llama-quantize"): quantize_location = "llama.cpp/llama-quantize" else: raise RuntimeError( - "Unsloth: The file 'llama.cpp/llama-quantize' or 'llama.cpp/quantize' does not exist.\n"\ - "But we expect this file to exist! Maybe the llama.cpp developers changed the name?" + "Unsloth: The file ('llama.cpp/llama-quantize' or 'llama.cpp/llama-quantize.exe' if you are on Windows WSL) or 'llama.cpp/quantize' does not exist.\n"\ + "But we expect this file to exist! Maybe the llama.cpp developers changed the name or check extension of the llama-quantize file." ) pass @@ -1150,7 +1163,7 @@ def save_to_gguf( # Concurrency from https://rentry.org/llama-cpp-conversions#merging-loras-into-a-model final_location = str((Path(model_directory) / f"unsloth.{first_conversion.upper()}.gguf").absolute()) - + print(f"Unsloth: [1] Converting model at {model_directory} into {first_conversion} GGUF format.\n"\ f"The output location will be {final_location}\n"\ "This might take 3 minutes...") @@ -1217,7 +1230,7 @@ def save_to_gguf( command = f"./{quantize_location} {full_precision_location} "\ f"{final_location} {quant_method} {n_cpus}" - + try_execute([command,], force_complete = True) # Check if quantization succeeded! @@ -1378,7 +1391,7 @@ def _determine_username(save_directory, old_username, token): save_directory = save_directory.lstrip("./") if "/" not in save_directory: from huggingface_hub import whoami - try: + try: username = whoami(token = token)["name"] if type(old_username) is str and username != old_username: username = old_username @@ -1412,7 +1425,7 @@ def create_huggingface_repo( repo_type = "model", exist_ok = False, private = private, - ) + ) # Create model card from huggingface_hub import ModelCard @@ -1453,7 +1466,7 @@ def upload_to_huggingface( repo_type = "model", exist_ok = False, private = private, - ) + ) # Create model card from huggingface_hub import ModelCard @@ -1527,7 +1540,7 @@ def fix_tokenizer_bos_token(tokenizer): # Check if BOS added already, then warn fix_bos_token = False chat_template = getattr(tokenizer, "chat_template", None) - + if (tokenizer("A").input_ids[0] == getattr(tokenizer, "bos_token_id", None)): if chat_template is not None and \ ( @@ -1546,7 +1559,7 @@ def fix_tokenizer_bos_token(tokenizer): new_chat_template = re.sub(r"\{[\s]{0,}\{[\s]{0,}bos\_token[\s]{0,}\}[\s]{0,}\}", "", chat_template) # Remove {{bos_token + new_chat_template = re.sub(r"\{[\s]{0,}\{[\s]{0,}bos\_token[\s]{0,}\+[\s]{0,}", "", new_chat_template) - + tokenizer.chat_template = new_chat_template pass @@ -1580,7 +1593,7 @@ def create_ollama_modelfile(tokenizer, gguf_location): modelfile = modelfile\ .replace(FILE_LOCATION_REPLACER, "{__FILE_LOCATION__}")\ .replace(EOS_TOKEN_REPLACER, "{__EOS_TOKEN__}") - + if "__EOS_TOKEN__" in modelfile: modelfile = modelfile.format( __FILE_LOCATION__ = gguf_location, @@ -1591,7 +1604,7 @@ def create_ollama_modelfile(tokenizer, gguf_location): __FILE_LOCATION__ = gguf_location, ) pass - + modelfile = modelfile\ .replace("⚫@✅#🦥", "{")\ .replace("⚡@🦥#⛵", "}")\ @@ -1733,7 +1746,7 @@ def unsloth_save_pretrained_gguf( # Save to GGUF all_file_locations, want_full_precision = save_to_gguf( - model_type, model_dtype, is_sentencepiece_model, + model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1911,7 +1924,7 @@ def unsloth_push_to_hub_gguf( # Save to GGUF all_file_locations, want_full_precision = save_to_gguf( - model_type, model_dtype, is_sentencepiece_model, + model_type, model_dtype, is_sentencepiece_model, new_save_directory, quantization_method, first_conversion, makefile, ) @@ -1928,7 +1941,7 @@ def unsloth_push_to_hub_gguf( # If not needing full precision, skip the first if not want_full_precision: all_file_locations = all_file_locations[1:] - + for file_location in all_file_locations: print("Unsloth: Uploading GGUF to Huggingface Hub...") username = upload_to_huggingface( @@ -2044,8 +2057,8 @@ def unsloth_convert_lora_to_ggml_and_push_to_hub( def unsloth_convert_lora_to_ggml_and_save_locally( self, - save_directory: str, # Added parameter for the folder name - tokenizer, + save_directory: str, # Added parameter for the folder name + tokenizer, temporary_location: str = "_unsloth_temporary_saved_buffers", maximum_memory_usage: float = 0.85, ): @@ -2162,7 +2175,7 @@ def unsloth_generic_save_pretrained_merged( tags : List[str] = None, temporary_location : str = "_unsloth_temporary_saved_buffers", maximum_memory_usage : float = 0.75, -): +): """ Same as .push_to_hub(...) except 4bit weights are auto converted to float16 with as few overhead as possible. From ed7673b35097ada1a5e66afc7d0c9cf22441bf6c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Feb 2025 04:47:12 -0800 Subject: [PATCH 1035/1088] bug fix --- unsloth/models/_utils.py | 2 +- unsloth/models/rl.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 52b3710916..382024512d 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.13" +__version__ = "2025.2.14" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index f6b3fdbf32..8f346073bf 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -137,7 +137,6 @@ def __init__({RLConfig_arguments}, ): {RLConfig_extra_args} super().__init__({RLConfig_call_args}{RLConfig_kwargs}) - assert(hasattr(vllm_sampling_params, '_set_kwargs')) self.vllm_sampling_params = vllm_sampling_params self.unsloth_num_chunks = unsloth_num_chunks pass From ca33f4b206cc0286b03c9aee5770333a4b1a5728 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Feb 2025 09:20:49 -0800 Subject: [PATCH 1036/1088] Bug Fixes (#1774) * Update rl.py * Update tokenizer_utils.py * Auto patching * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update tokenizer_utils.py * Update rl.py * Update rl.py * Update rl.py * max seq length * Update rl.py * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml --------- Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> --- pyproject.toml | 8 ++++---- unsloth/models/_utils.py | 5 +++++ unsloth/models/llama.py | 5 +++-- unsloth/models/rl_replacements.py | 7 ++++--- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 96aa0696fb..14797c8fa7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.2.6", + "unsloth_zoo>=2025.2.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -50,7 +50,7 @@ huggingface = [ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", @@ -348,7 +348,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.2.6", + "unsloth_zoo>=2025.2.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -366,7 +366,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", "peft>=0.7.1", "xformers", "bitsandbytes>=0.46.1", diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 382024512d..e1259af3ae 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -143,6 +143,11 @@ def filter(self, x): return not (self.text in x.getMessage()) transformers_training_args_logger.addFilter(HideLoggingMessage("torch.distributed")) del transformers_training_args_logger +# No label_names provided for model class +from transformers.trainer import logger as transformers_trainer_logger +transformers_trainer_logger.addFilter(HideLoggingMessage("No label_names")) +del transformers_trainer_logger + # Using the default loss: `ForCausalLMLoss`. try: from transformers.modeling_utils import logger as transformers_modeling_utils_logger diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 909dfc339b..3e0717a872 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -725,8 +725,9 @@ def LlamaModel_fast_forward( past_key_values_length, sliding_window = getattr(self.config, "sliding_window", None), ) - if attention_mask is not None: - attention_mask = attention_mask.to(torch.bool) + # Must NOT convert to bool - weirdly this causes stuff to error out! + # if attention_mask is not None: + # attention_mask = attention_mask.to(torch.bool) pass hidden_states = inputs_embeds diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 23b31172fd..06ae82140b 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -164,10 +164,11 @@ def grpo_trainer__prepare_inputs(function_name, function): # Remove _move_model_to_vllm def grpo_trainer__move_model_to_vllm(function_name, function): if function_name != "_move_model_to_vllm": return function + + def _move_model_to_vllm(self, *args, **kwargs): return None - # .*? matches first match. .+? matches final match. - replacement = "def _move_model_to_vllm(self, *args, **kwargs): return None\n" - return " "*function.find("def") + replacement + function = inspect.getsource(_move_model_to_vllm) + return function pass RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__move_model_to_vllm) From 14c9be1d7160162a90ce7a9a6cae36965563a0e6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 20 Feb 2025 09:24:07 -0800 Subject: [PATCH 1037/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index e1259af3ae..6a3effb3c8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.14" +__version__ = "2025.2.15" __all__ = [ "SUPPORTS_BFLOAT16", From 575ef4d9c094d1c6d13f5414df4e7580347529ee Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Fri, 21 Feb 2025 22:59:19 -0800 Subject: [PATCH 1038/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4bdd7e2893..d4fe6678b0 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. ## 🦥 Unsloth.ai News -- 📣 NEW! Introducing [Reasoning](https://unsloth.ai/blog/r1-reasoning) in Unsloth. You can now reproduce DeepSeek-R1's "aha" moment with just 7GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! +- 📣 NEW! Introducing Long-context [Reasoning (GRPO)](https://unsloth.ai/blog/grpo) in Unsloth. You can now reproduce DeepSeek-R1's "aha" moment with just 5GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! - 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now! More details: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). - 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft is now supported. We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. From 42cbe1f5659fd7f8e143a04a20c19aff87b0c07d Mon Sep 17 00:00:00 2001 From: Igor Kilbas Date: Wed, 26 Feb 2025 03:21:26 +0400 Subject: [PATCH 1039/1088] Fix: GRPO with Mistral and importing (#1831) * fix: mistral and importing * minor change * Style :) * Update mistral.py * Update mistral.py * Update mistral.py --------- Co-authored-by: Daniel Han --- unsloth/__init__.py | 21 ++++++++++ unsloth/models/mistral.py | 80 ++++++++++++++++++++++++++++++--------- 2 files changed, 83 insertions(+), 18 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index a3b3e68b2d..d18aaac0a0 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -17,6 +17,27 @@ import os, re, subprocess, inspect import numpy as np +# Check if modules that need patching are already imported +critical_modules = ['trl', 'transformers', 'peft'] +already_imported = [mod for mod in critical_modules if mod in sys.modules] + +# This check is critical because Unsloth optimizes these libraries by modifying +# their code at import time. If they're imported first, the original (slower, +# more memory-intensive) implementations will be used instead of Unsloth's +# optimized versions, potentially causing OOM errors or slower training. + +if already_imported: + # stacklevel=2 makes warning point to user's import line rather than this library code, + # showing them exactly where to fix the import order in their script + warnings.warn( + f"WARNING: Unsloth should be imported before {', '.join(already_imported)} " + f"to ensure all optimizations are applied. Your code may run slower or encounter " + f"memory issues without these optimizations.\n\n" + f"Please restructure your imports with 'import unsloth' at the top of your file.", + stacklevel = 2, + ) +pass + # Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so # enabling it will require much more work, so we have to prioritize. Please understand! # We do have a beta version, which you can contact us about! diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 784ca9cb41..779ff83496 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -35,6 +35,7 @@ MistralSdpaAttention = MistralAttention MistralFlashAttention2 = MistralAttention pass +from unsloth_zoo.utils import Version, _get_dtype def MistralAttention_fast_forward( @@ -183,6 +184,7 @@ def MistralForCausalLM_fast_forward( output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, num_logits_to_keep: Optional[int] = 0, + logits_to_keep: Optional[int] = 0, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: @@ -194,7 +196,6 @@ def MistralForCausalLM_fast_forward( elif q_len <= sliding_window: causal_mask = xformers.attn_bias.LowerTriangularMask() else: - # Fix from https://github.com/Rypo causal_mask = xformers.attn_bias.BlockDiagonalCausalMask\ .from_seqlens([q_len]*bsz)\ .make_local_attention(window_size = sliding_window) @@ -219,20 +220,35 @@ def MistralForCausalLM_fast_forward( ) else: outputs = self.model( - input_ids=input_ids, - causal_mask=causal_mask, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, + input_ids = input_ids, + causal_mask = causal_mask, + attention_mask = attention_mask, + position_ids = position_ids, + past_key_values = past_key_values, + inputs_embeds = inputs_embeds, + use_cache = use_cache, + output_attentions = output_attentions, + output_hidden_states = output_hidden_states, + return_dict = return_dict, ) pass hidden_states = outputs[0] + + # If we are in GRPO mode, return raw hidden states + if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": + num_logits_to_keep = max(num_logits_to_keep, logits_to_keep) + if num_logits_to_keep != 0: + hidden_states = hidden_states[:, -num_logits_to_keep:, :] + return CausalLMOutputWithPast( + loss = None, + logits = hidden_states, + past_key_values = outputs.past_key_values, + hidden_states = outputs.hidden_states, + attentions = outputs.attentions, + ) + pass + bsz, q_len, hd = hidden_states.shape lm_head = self.lm_head.weight if bsz == 1 and q_len == 1: @@ -241,9 +257,37 @@ def MistralForCausalLM_fast_forward( elif num_logits_to_keep != 0: logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)) else: + RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1" + # < 1024 Normal Unsloth uses less VRAM! + if bsz * q_len <= 1024: RETURN_LOGITS = True + + if not RETURN_LOGITS and HAS_CUT_CROSS_ENTROPY and labels is not None: + n_items = kwargs.get("num_items_in_batch", None) or kwargs.get("n_items", None) + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) + loss = fused_linear_cross_entropy( + hidden_states = hidden_states, + lm_weight = lm_head, + labels = labels, + num_items_in_batch = n_items, + logit_softcapping = logit_softcapping, + ) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + output = CausalLMOutputWithPast( + loss = loss, + logits = EMPTY_LOGITS, + past_key_values = outputs.past_key_values, + hidden_states = outputs.hidden_states, + attentions = outputs.attentions, + ) + return output + pass logits = self.lm_head(hidden_states.to(lm_head.dtype)) pass - logits = logits.to(self.config.torch_dtype) + logits = logits.to(_get_dtype(self.config.torch_dtype)) loss = None if labels is not None: @@ -252,7 +296,7 @@ def MistralForCausalLM_fast_forward( # Fixes https://github.com/unslothai/unsloth/issues/10 self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") pass - + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, @@ -266,11 +310,11 @@ def MistralForCausalLM_fast_forward( return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, + loss = loss, + logits = logits, + past_key_values = outputs.past_key_values, + hidden_states = outputs.hidden_states, + attentions = outputs.attentions, ) pass From 2c0f50160e227936e0011d67e3bc2472c2089629 Mon Sep 17 00:00:00 2001 From: Charles London <36036324+le-big-mac@users.noreply.github.com> Date: Tue, 25 Feb 2025 23:22:35 +0000 Subject: [PATCH 1040/1088] Fix key error in GRPOTrainer (#1818) * fix keyerror in GRPOTrainer * check for train in _metrics --- unsloth/models/rl_replacements.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 06ae82140b..f88e362beb 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -164,7 +164,7 @@ def grpo_trainer__prepare_inputs(function_name, function): # Remove _move_model_to_vllm def grpo_trainer__move_model_to_vllm(function_name, function): if function_name != "_move_model_to_vllm": return function - + def _move_model_to_vllm(self, *args, **kwargs): return None function = inspect.getsource(_move_model_to_vllm) @@ -246,14 +246,20 @@ def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch self, _input_ids, logits_to_keep, completion_mask, advantages, n_chunks = self.args.unsloth_num_chunks, ) - + # Log the metrics # completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item() - self._metrics["completion_length"].append(completion_length.item()) # mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() # self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item()) - self._metrics["kl"].append(mean_kl.item()) + + if "train" in self._metrics: + mode = "eval" if self.control.should_evaluate else "train" + self._metrics[mode]["completion_length"].append(completion_length.item()) + self._metrics[mode]["kl"].append(mean_kl.item()) + else: + self._metrics["completion_length"].append(completion_length.item()) + self._metrics["kl"].append(mean_kl.item()) return loss pass From 7feb2ff8817a1dd602b1a7a142952da01cfcf52c Mon Sep 17 00:00:00 2001 From: Kareem <81531392+KareemMusleh@users.noreply.github.com> Date: Wed, 26 Feb 2025 18:50:37 +0700 Subject: [PATCH 1041/1088] fixed syntax warnings (#1522) * fixed most of syntax warnings * all syntaxwarnings fixed * Syntax fixes --------- Co-authored-by: Daniel Han --- unsloth/chat_templates.py | 4 ++-- unsloth/models/_utils.py | 6 +++--- unsloth/models/llama.py | 6 +++--- unsloth/models/vision.py | 6 +++--- unsloth/save.py | 10 +++++----- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index c401393234..5785894a23 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1684,7 +1684,7 @@ def construct_chat_template( \ for j in range(1, len(response_part)): try_find = re.escape(response_part[:j]) - try: found = next(re.finditer("(" + try_find + ").+?\{INPUT\}", chat_template, flags = re.DOTALL | re.MULTILINE)) + try: found = next(re.finditer("(" + try_find + ").+?\\{INPUT\\}", chat_template, flags = re.DOTALL | re.MULTILINE)) except: break pass separator = found.group(1) @@ -2125,7 +2125,7 @@ def test_hf_gguf_equivalence(tokenizer, gguf_model = "./model-unsloth.F16.gguf") gguf_tokens = "".join(datas) # Now extract GGUF tokenization attempt - gguf_tokenized = re.findall("([\d]{1,}) \-\> \'([^\']{1,})\'", gguf_tokens, flags = re.MULTILINE) + gguf_tokenized = re.findall(r"([\d]{1,}) \-\> \'([^\']{1,})\'", gguf_tokens, flags = re.MULTILINE) gguf_tokenized = [(int(x[0]), x[1],) for x in gguf_tokenized] input_ids = tokenizer(prompt).input_ids diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 6a3effb3c8..19b09e803c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -589,7 +589,7 @@ def make_inputs_require_grad(module, input, output): spaces = len(re.match(r"[\s]{1,}", source).group(0)) lines = source.split("\n") source = "\n".join(x[spaces:] for x in lines) - source = re.sub("([^\.])nn\.", r"\1torch.nn.", source) + source = re.sub(r"([^\.])nn\.", r"\1torch.nn.", source) source = source.replace("def update_layer", "def LoraLayer_update_layer") exec(source, globals()) @@ -852,7 +852,7 @@ def patch_linear_scaling( scaled_rope_function = scaled_rope_module.__name__, ) rotary_emb = re.findall( - "self.rotary_emb = .+?\)", function, + r"self\.rotary\_emb \= .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) if len(rotary_emb) == 0: @@ -952,7 +952,7 @@ def patch_llama_rope_scaling( (longrope_module if longrope_module is not None else rope_module).__name__ ) rotary_emb = re.findall( - "self.rotary_emb = .+?\)", function, + r"self\.rotary\_emb \= .+?\)", function, flags = re.DOTALL | re.MULTILINE, ) if len(rotary_emb) == 0: return None, function diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3e0717a872..9515a41cd9 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1888,11 +1888,11 @@ def from_pretrained( pass exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) - start = re.search('logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] + start = re.search(r'logger\.info\([\"\'].+?Running training', inner_training_loop).span(0)[0] end = inner_training_loop.find("\n\n", start) original_debug = inner_training_loop[start:end] - spaces = re.search('\n([\s\t]{1,})', original_debug).group(0)[1:] - front_spaces = re.match('([\s\t]{1,})', inner_training_loop).group(0) + spaces = re.search(r'\n([\s\t]{1,})', original_debug).group(0)[1:] + front_spaces = re.match(r'([\s\t]{1,})', inner_training_loop).group(0) # Cannot use \\ since it will cause a SyntaxWarning in Python 3.12 # Instead use chr(92) == \\ diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 51450aa0d9..31c6394f5d 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -98,9 +98,9 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers: {transformers_version}.\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ - f"O^O/ \_/ \\ Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ - f"\ / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ + f" {chr(92)}{chr(92)} /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ + f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' print(statistics) diff --git a/unsloth/save.py b/unsloth/save.py index eaddfa05c5..af95de07e7 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -482,8 +482,8 @@ def unsloth_save_model( max_ram = psutil.virtual_memory().available sharded_ram_usage = 5 * 1024 * 1024 * 1024 if type(max_shard_size) is str: - gb_found = re.match("([0-9]{1,})[\s]{0,}GB", max_shard_size, flags = re.IGNORECASE) - mb_found = re.match("([0-9]{1,})[\s]{0,}MB", max_shard_size, flags = re.IGNORECASE) + gb_found = re.match(r"([0-9]{1,})[\s]{0,}GB", max_shard_size, flags = re.IGNORECASE) + mb_found = re.match(r"([0-9]{1,})[\s]{0,}MB", max_shard_size, flags = re.IGNORECASE) if gb_found: sharded_ram_usage = int(gb_found.group(1)) * 1024 * 1024 * 1024 elif mb_found: sharded_ram_usage = int(mb_found.group(1)) * 1024 * 1024 elif type(max_shard_size) is int: @@ -1017,9 +1017,9 @@ def save_to_gguf( print_info = \ f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ - f" \\\ /| [0] Installing llama.cpp might take 3 minutes.\n"\ - f"O^O/ \_/ \\ [1] Converting HF to GGUF 16bits might take 3 minutes.\n"\ - f"\ / [2] Converting GGUF 16bits to {quantization_method} might take 10 minutes each.\n"\ + f" {chr(92)}{chr(92)} /| [0] Installing llama.cpp might take 3 minutes.\n"\ + f"O^O/ {chr(92)}_/ {chr(92)} [1] Converting HF to GGUF 16bits might take 3 minutes.\n"\ + f"{chr(92)} / [2] Converting GGUF 16bits to {quantization_method} might take 10 minutes each.\n"\ f' "-____-" In total, you will have to wait at least 16 minutes.\n' print(print_info) From 034af99dc271c03d2dd4357ba375fe7f266381a2 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 26 Feb 2025 16:58:32 -0800 Subject: [PATCH 1042/1088] Update README.md --- README.md | 57 ++++++++++++++++++++++++------------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index d4fe6678b0..9046fa0ba0 100644 --- a/README.md +++ b/README.md @@ -65,9 +65,8 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and | ------------------------------- | --------------------------------------- | | 📚 **Documentation & Wiki** | [Read Our Docs](https://docs.unsloth.ai) | |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| -| 💾 **Installation** | [unsloth/README.md](https://github.com/unslothai/unsloth/tree/main#-installation-instructions)| -| 🥇 **Benchmarking** | [Performance Tables](https://github.com/unslothai/unsloth/tree/main#-performance-benchmarking) -| 🌐 **Released Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| +| 💾 **Installation** | [Pip install](https://github.com/unslothai/unsloth/edit/main/README.md#-install-unsloth)| +| 🔮 **Our Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| |   **Reddit** | [Join our Reddit page](https://reddit.com/r/unsloth)| @@ -77,36 +76,15 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. - Works on **Linux** and **Windows** via WSL. - Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -- Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for up to **30x faster training**! - If you trained a model with 🦥Unsloth, you can use this cool sticker!   +## 💾 Install Unsloth -## 🥇 Performance Benchmarking -- For our most detailed benchmarks, read our [Llama 3.3 Blog](https://unsloth.ai/blog/llama3-3). -- Benchmarking of Unsloth was also conducted by [🤗Hugging Face](https://huggingface.co/blog/unsloth-trl). - -We tested using the Alpaca Dataset, a batch size of 2, gradient accumulation steps of 4, rank = 32, and applied QLoRA on all linear layers (q, k, v, o, gate, up, down): - -| Model | VRAM | 🦥 Unsloth speed | 🦥 VRAM reduction | 🦥 Longer context | 😊 Hugging Face + FA2 | -|----------------|-------|-----------------|----------------|----------------|--------------------| -| Llama 3.3 (70B)| 80GB | 2x | >75% | 13x longer | 1x | -| Llama 3.1 (8B) | 80GB | 2x | >70% | 12x longer | 1x | - -
    - -![](https://i.ibb.co/sJ7RhGG/image-41.png) - -## 💾 Installation Instructions - -Simply use pip install on Linux machines. Windows instructions are below. -
    -
      - -

      pip install unsloth

      -
      -
    -
    - +- **Install with pip (recommended)** for Linux devices: +``` +pip install unsloth +``` +See below for Windows install instructions: ### Conda Installation (Optional) `⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. We support `python=3.10,3.11,3.12`. ```bash @@ -298,11 +276,14 @@ trainer.train() ``` -## DPO Support +## DPO + GRPO Support DPO (Direct Preference Optimization), PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! +
    + Click for DPO code + ```python import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Optional set GPU device ID @@ -360,9 +341,21 @@ dpo_trainer = DPOTrainer( ) dpo_trainer.train() ``` +
    + +## 🥇 Performance Benchmarking +- For our most detailed benchmarks, read our [Llama 3.3 Blog](https://unsloth.ai/blog/llama3-3). +- Benchmarking of Unsloth was also conducted by [🤗Hugging Face](https://huggingface.co/blog/unsloth-trl). + +We tested using the Alpaca Dataset, a batch size of 2, gradient accumulation steps of 4, rank = 32, and applied QLoRA on all linear layers (q, k, v, o, gate, up, down): + +| Model | VRAM | 🦥 Unsloth speed | 🦥 VRAM reduction | 🦥 Longer context | 😊 Hugging Face + FA2 | +|----------------|-------|-----------------|----------------|----------------|--------------------| +| Llama 3.3 (70B)| 80GB | 2x | >75% | 13x longer | 1x | +| Llama 3.1 (8B) | 80GB | 2x | >70% | 12x longer | 1x | -## 🥇 Detailed Benchmarking Tables ### Context length benchmarks + #### Llama 3.1 (8B) max. context length We tested Llama 3.1 (8B) Instruct and did 4bit QLoRA on all linear layers (Q, K, V, O, gate, up and down) with rank = 32 with a batch size of 1. We padded all sequences to a certain maximum sequence length to mimic long context finetuning workloads. | GPU VRAM | 🦥Unsloth context length | Hugging Face + FA2 | From 34ed3830154c02f9e78aaf88767712479863e3ab Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 26 Feb 2025 17:03:47 -0800 Subject: [PATCH 1043/1088] Update README.md --- README.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9046fa0ba0..665cbd5ea4 100644 --- a/README.md +++ b/README.md @@ -275,11 +275,14 @@ trainer.train() # (4) Customized chat templates ``` - -## DPO + GRPO Support -DPO (Direct Preference Optimization), PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). - -We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! + +## 💡 Reinforcement Learning +RL including DPO, GRPO, PPO, Reward Modelling, Online DPO all work with Unsloth. We're in 🤗Hugging Face's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! List of RL notebooks: + +- ORPO notebook: [Link](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-ORPO.ipynb) +- DPO Zephyr notebook: [Link](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) +- KTO notebook: [Link](https://colab.research.google.com/drive/1a2b3c4d5e6f7g8h9i0j) +- SimPO notebook: [Link](https://colab.research.google.com/drive/1a2b3c4d5e6f7g8h9i0j)
    Click for DPO code From 2f15de5670da68a8feb72ea66c6885497cabf074 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 27 Feb 2025 03:42:03 -0800 Subject: [PATCH 1044/1088] Update rl_replacements.py --- unsloth/models/rl_replacements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index f88e362beb..ec311a7a83 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -101,7 +101,7 @@ def sft_trainer_prepare_dataset(function_name, function): # .*? matches first match. .+? matches final match. replacer = re.findall( - r"def {function_name}\(.*?\).*?\:\n", + r"def " + function_name + r"\(.*?\).*?\:\n", function, flags = re.MULTILINE | re.DOTALL, ) From 32f86a05c27858ca9e38e5c1e4db8dd9ec110db0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 27 Feb 2025 03:46:47 -0800 Subject: [PATCH 1045/1088] Update rl_replacements.py --- unsloth/models/rl_replacements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index ec311a7a83..fe7f4accee 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -93,7 +93,7 @@ def sft_trainer_prepare_dataset(function_name, function): " tokenizer = partial(tokenizer, add_special_tokens = False)\n"\ " processing_class = tokenizer\n"\ "else:\n"\ - " add_special_tokens = False if has_bos_token_already else add_special_tokens\n" + " add_special_tokens = False if has_bos_token_already else locals().get('add_special_tokens', False)\n" check_text = check_text.split("\n") check_text = "\n".join(" "*8 + x for x in check_text) From 088765042786ede9e62dd888d9956424293232dd Mon Sep 17 00:00:00 2001 From: Aditya Ghai <119144814+adityaghai07@users.noreply.github.com> Date: Fri, 28 Feb 2025 09:55:46 +0530 Subject: [PATCH 1046/1088] Direct windows support for unsloth (#1841) * Direct Windows Support(main) * Update pyproject.toml * Update README.md Added the suggested changes to README --- README.md | 42 +++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 30 ++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 665cbd5ea4..0a13ac47c4 100644 --- a/README.md +++ b/README.md @@ -168,8 +168,48 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` -### Windows Installation +## Windows Installation +### Step 1: NVIDIA Video Driver + +You should install the latest version of your GPUs driver. You can download drivers here: + - [NVIDIA GPU Drive Download](https://www.nvidia.com/Download/index.aspx) + +### Step 2: Visual Studio C++ +You will need Visual Studio, with C++ installed. By default, C++ is not installed with Visual Studio, so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. + - [Visual Studio Community Edition](https://visualstudio.microsoft.com/vs/community/) + + + + + +
    + VSCode C++ Ref Image + +
    +

    Steps to configure VS C++

    +
    +
      +
    1. Launch the Installer downloaded from the link above.
    2. +
    3. In the installer, navigate to Individual components and select all the options mentioned in the image.
    4. +
    5. Click on install now.
    6. +
    +
    + +### Step 3: CUDA Toolkit + + - [Download CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive) + +### Step 4: Install PyTorch + +You will need the correct version of PyTorch that is compatibile with your CUDA drivers, so make sure to select them carefully + - [Install PyTorch](https://pytorch.org/get-started/locally/) + +### Step 5: Install Unsloth +```python +pip install "unsloth[windows] @ git+https://github.com/unslothai/unsloth.git" +``` +### Side note To run Unsloth directly on Windows: - Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows (be aware that the Windows fork requires PyTorch >= 2.4 and CUDA 12) - In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: diff --git a/pyproject.toml b/pyproject.toml index 14797c8fa7..de1583e9e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,10 +33,32 @@ exclude = ["images*"] [project.optional-dependencies] triton = [ - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.1.0-windows.post5/triton-3.1.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", + "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'" +] + +windows=[ + "unsloth_zoo>=2025.2.7", + "packaging", + "tyro", + "transformers>=4.46.1,!=4.47.0", + "datasets>=2.16.0", + "sentencepiece>=0.2.0", + "tqdm", + "psutil", + "wheel>=0.42.0", + "numpy", + "accelerate>=0.34.1", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", + "peft>=0.7.1,!=0.11.0", + "protobuf<4.0.0", + "huggingface_hub", + "hf_transfer", + "unsloth[triton]", + "bitsandbytes>=0.41.1 ; platform_system == 'Windows'", + "xformers>=0.0.22.post7 ; platform_system == 'Windows'", ] huggingface = [ "unsloth_zoo>=2025.2.7", From 996dca380c98387baec07c0786c05cc153dfddbb Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 00:13:11 -0800 Subject: [PATCH 1047/1088] Prelim release --- unsloth/__init__.py | 19 ------ unsloth/kernels/layernorm.py | 8 +-- unsloth/kernels/rms_layernorm.py | 7 +- unsloth/kernels/swiglu.py | 2 +- unsloth/kernels/utils.py | 95 ++++++++++++++------------ unsloth/models/_utils.py | 64 +++++++---------- unsloth/models/gemma.py | 10 +-- unsloth/models/gemma2.py | 13 ++-- unsloth/models/granite.py | 13 ++-- unsloth/models/llama.py | 113 ++++++++++--------------------- unsloth/models/mistral.py | 24 ++++--- unsloth/tokenizer_utils.py | 15 ---- 12 files changed, 152 insertions(+), 231 deletions(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index d18aaac0a0..e33d16577a 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -46,25 +46,6 @@ # Fixes https://github.com/unslothai/unsloth/issues/1266 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" -if "CUDA_VISIBLE_DEVICES" in os.environ: - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - devices = os.environ["CUDA_VISIBLE_DEVICES"] - # Check if there are multiple cuda devices set in env - if not devices.isdigit(): - first_id = devices.split(",")[0] - warnings.warn( - f"Unsloth: 'CUDA_VISIBLE_DEVICES' is currently {devices} \n"\ - "Unsloth currently does not support multi GPU setups - but we are working on it!\n"\ - "Multiple CUDA devices detected but we require a single device.\n"\ - f"We will override CUDA_VISIBLE_DEVICES to first device: {first_id}." - ) - os.environ["CUDA_VISIBLE_DEVICES"] = str(first_id) -else: - # warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "0" -pass - # Reduce VRAM usage by reducing fragmentation # And optimize pinning of memory os.environ["PYTORCH_CUDA_ALLOC_CONF"] = \ diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index a5f7926e2e..11f82b8ff3 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -105,10 +105,10 @@ def forward(ctx, X, W, b, eps): X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) - - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") - r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - mu = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + device = X.device + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = device) + r = torch.empty(n_rows, dtype = torch.float32, device = device) + mu = torch.empty(n_rows, dtype = torch.float32, device = device) layernorm_forward[(n_rows,)]( Y, Y.stride(0), diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 6310f7f392..7487c10eeb 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -148,9 +148,10 @@ def forward(ctx, X : torch.Tensor, W : torch.Tensor, eps : float, gemma : bool = BLOCK_SIZE : int num_warps : int BLOCK_SIZE, num_warps = calculate_settings(n_cols) + device = X.device - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda:0") - r = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = device) + r = torch.empty(n_rows, dtype = torch.float32, device = device) fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( @@ -180,7 +181,7 @@ def backward(ctx, dY : torch.Tensor): n_cols : int n_rows, n_cols = dY.shape # dW = X - dX = torch.empty_like(dY, device = "cuda:0") if ctx.GEMMA else dY + dX = torch.empty_like(dY) if ctx.GEMMA else dY _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index f81b7aae9b..688e9f9a48 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -41,7 +41,7 @@ def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def swiglu_fg_kernel(e, g): batch, seq_len, hd = e.shape n_elements = e.numel() - h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda:0") + h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = e.device) grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,) return h diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index f052914f98..f743e12f59 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -61,12 +61,29 @@ def calculate_settings(n : int) -> (int, int,): import bitsandbytes as bnb +import ctypes + # https://github.com/bitsandbytes-foundation/bitsandbytes/pull/1330/files HAS_CUDA_STREAM = Version(bnb.__version__) > Version("0.43.3") -global CUDA_STREAM -CUDA_STREAM = None get_ptr = bnb.functional.get_ptr -import ctypes + +# Get array of CUDA streams and other buffers +global CUDA_STREAMS +global WEIGHT_BUFFERS +global ABSMAX_BUFFERS + +_CUDA_STREAMS = { + (index := torch.cuda.device(i).idx) : ctypes.c_void_p(torch._C._cuda_getCurrentRawStream(index)) + for i in range(torch.cuda.device_count()) +} +CUDA_STREAMS = [None] * (max(_CUDA_STREAMS.keys()) + 1) +WEIGHT_BUFFERS = [None] * (max(_CUDA_STREAMS.keys()) + 1) +ABSMAX_BUFFERS = [None] * (max(_CUDA_STREAMS.keys()) + 1) +for k, v in _CUDA_STREAMS.items(): CUDA_STREAMS[k] = v +CUDA_STREAMS = tuple(CUDA_STREAMS) +del _CUDA_STREAMS + +# Bitsandbytes operations ctypes_c_int = ctypes.c_int ctypes_c_int32 = ctypes.c_int32 cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32 @@ -118,11 +135,6 @@ def get_lora_parameters_bias(proj): return W, QUANT_STATE(W), A, B, s, bias pass -global WEIGHT_BUFFER -WEIGHT_BUFFER = None -global ABSMAX_BUFFER -ABSMAX_BUFFER = None - if HAS_CUDA_STREAM: @torch.inference_mode def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False): @@ -145,8 +157,10 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass - global CUDA_STREAM - if CUDA_STREAM is None: CUDA_STREAM = torch.cuda.current_stream("cuda:0") + global CUDA_STREAMS + device = W.device + device_index = device.index + CUDA_STREAM = CUDA_STREAMS[device_index] n_elements_absmax = absmax.numel() @@ -155,11 +169,13 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False # Use same buffers for faster inference size = shape[0]*shape[1] - global WEIGHT_BUFFER - global ABSMAX_BUFFER + global WEIGHT_BUFFERS + global ABSMAX_BUFFERS + WEIGHT_BUFFER = WEIGHT_BUFFERS[device_index] + ABSMAX_BUFFER = ABSMAX_BUFFERS[device_index] if WEIGHT_BUFFER is None: - WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = "cuda:0", requires_grad = False) - ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) + WEIGHT_BUFFERS[device_index] = WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = device, requires_grad = False) + ABSMAX_BUFFERS[device_index] = ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) if size > WEIGHT_BUFFER.numel(): WEIGHT_BUFFER.resize_(size) if n_elements_absmax > ABSMAX_BUFFER.numel(): ABSMAX_BUFFER.resize_(n_elements_absmax) @@ -168,11 +184,11 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False out_absmax = ABSMAX_BUFFER[:n_elements_absmax] else: if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda:0", requires_grad = False) + out = torch.empty(shape, dtype = dtype, device = device, requires_grad = False) else: assert(out.shape == shape) assert(out.dtype == dtype) - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) pass # NF4 dequantization of statistics @@ -217,31 +233,15 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False pass n_elements_absmax = absmax.numel() + device = W.device # Create weight matrix - if use_global_buffer: - - # Use same buffers for faster inference - size = shape[0]*shape[1] - global WEIGHT_BUFFER - global ABSMAX_BUFFER - if WEIGHT_BUFFER is None: - WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = "cuda:0", requires_grad = False) - ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = dtype, device = "cuda:0", requires_grad = False) - - if size > WEIGHT_BUFFER.numel(): WEIGHT_BUFFER.resize_(size) - if n_elements_absmax > ABSMAX_BUFFER.numel(): ABSMAX_BUFFER.resize_(n_elements_absmax) - - out = WEIGHT_BUFFER[:size].view(shape) - out_absmax = ABSMAX_BUFFER[:n_elements_absmax] + if out is None: + out = torch.empty(shape, dtype = dtype, device = device, requires_grad = False) else: - if out is None: - out = torch.empty(shape, dtype = dtype, device = "cuda:0", requires_grad = False) - else: - assert(out.shape == shape) - assert(out.dtype == dtype) - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = "cuda:0", requires_grad = False) - pass + assert(out.shape == shape) + assert(out.dtype == dtype) + out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) # Do dequantization ptr_out_absmax = get_ptr(out_absmax) @@ -288,14 +288,16 @@ def fast_gemv(X, W, quant_state, out = None): offset, state2 = compressed_stats absmax2, code2, blocksize2, _, _, _, _ = state2 pass - global CUDA_STREAM - if CUDA_STREAM is None: CUDA_STREAM = torch.cuda.current_stream("cuda:0") + global CUDA_STREAMS + device = W.device + device_index = device.index + CUDA_STREAM = CUDA_STREAMS[device_index] # assert(dtype == X.dtype) bout = shape[0] if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") + out = torch.empty((1, 1, bout,), dtype = dtype, device = device) # else: # assert(out.shape == (1, 1, bout,)) # pass @@ -313,7 +315,7 @@ def fast_gemv(X, W, quant_state, out = None): ldb = ctypes_c_int32(ldb) ldc = ctypes_c_int32(ldc) - df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") + df = torch.empty(absmax.shape, dtype = torch.float32, device = device) cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), CUDA_STREAM, @@ -357,9 +359,10 @@ def fast_gemv(X, W, quant_state, out = None): pass # assert(dtype == X.dtype) bout = shape[0] + device = W.device if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = "cuda:0") + out = torch.empty((1, 1, bout,), dtype = dtype, device = device) # else: # assert(out.shape == (1, 1, bout,)) # pass @@ -377,7 +380,7 @@ def fast_gemv(X, W, quant_state, out = None): ldb = ctypes_c_int32(ldb) ldc = ctypes_c_int32(ldc) - df = torch.empty(absmax.shape, dtype = torch.float32, device = "cuda:0") + df = torch.empty(absmax.shape, dtype = torch.float32, device = device) cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), @@ -400,6 +403,7 @@ def fast_gemv(X, W, quant_state, out = None): torch_mm = torch.mm torch_mv = torch.mv torch_matmul = torch.matmul +torch_addmm = torch.addmm def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S, bias = get_lora_parameters_bias(proj) @@ -461,7 +465,8 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): if A is not None: # LoRA is enabled A, B = A.t(), B.t() - out += (X @ A.to(dtype)) @ (s * B.to(dtype)) + out = torch_addmm(X @ A.to(dtype), B.to(dtype), alpha = s, beta = 1.0, out = out) + # out += (X @ A.to(dtype)) @ (s * B.to(dtype)) pass return out.view(batch, seq_len, -1) if reshape else out diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 19b09e803c..5088b79d23 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.2.15" +__version__ = "2025.3.1" __all__ = [ "SUPPORTS_BFLOAT16", @@ -37,7 +37,6 @@ "torch_compile_options", "patch_linear_scaling", "patch_llama_rope_scaling", - "check_nvidia", "create_boolean_mask", "torch_amp_custom_fwd", "torch_amp_custom_bwd", @@ -703,9 +702,7 @@ def get_statistics(): # ============================================= # Fixes Bitsandbytes to remove missing warnings from transformers.utils.quantization_config import BitsAndBytesConfig, QuantizationMethod -from inspect import getsource -from accelerate.utils.dataclasses import DistributedType -BitsAndBytesConfig__init__ = getsource(BitsAndBytesConfig.__init__) +BitsAndBytesConfig__init__ = inspect.getsource(BitsAndBytesConfig.__init__) BitsAndBytesConfig__init__ = re.sub( r"if[\s]{1,}kwargs\:[\s]{1,}.+?\n", "", @@ -719,27 +716,29 @@ def get_statistics(): "__init__", "_BitsAndBytesConfig__init__", ) +exec(BitsAndBytesConfig__init__, globals()) -def _prepare_backend( - self, cpu = False, sagemaker_dp = False, backend: str = None, -) -> tuple[str, DistributedType]: - return None, DistributedType.NO +if torch.cuda.device_count() == 1: + from accelerate.utils.dataclasses import DistributedType + def _prepare_backend( + self, cpu = False, sagemaker_dp = False, backend: str = None, + ) -> tuple[str, DistributedType]: + return None, DistributedType.NO + pass + import accelerate.state + accelerate.state.PartialState._prepare_backend = _prepare_backend + + import accelerate.accelerator + prepare = inspect.getsource(accelerate.accelerator.Accelerator.prepare) + prepare = prepare.split("\n") + spaces = prepare[0].find("def") + prepare = "\n".join(x[spaces:] for x in prepare) + x = "for obj in args:" + s = " "*spaces + prepare = prepare.replace(x, f'self.state.distributed_type = DistributedType.NO\n{s}{x}', 1) + exec(prepare, globals()) + accelerate.accelerator.Accelerator.prepare = prepare pass -import accelerate.state -accelerate.state.PartialState._prepare_backend = _prepare_backend - -import accelerate.accelerator -prepare = inspect.getsource(accelerate.accelerator.Accelerator.prepare) -prepare = prepare.split("\n") -spaces = prepare[0].find("def") -prepare = "\n".join(x[spaces:] for x in prepare) -x = "for obj in args:" -s = " "*spaces -prepare = prepare.replace(x, f'self.state.distributed_type = DistributedType.NO\n{s}{x}', 1) -exec(prepare, globals()) -accelerate.accelerator.Accelerator.prepare = prepare - -exec(BitsAndBytesConfig__init__, globals()) import transformers.utils.quantization_config transformers.utils.quantization_config.BitsAndBytesConfig.__init__ = _BitsAndBytesConfig__init__ @@ -963,21 +962,6 @@ def patch_llama_rope_scaling( pass -def check_nvidia(): - # Unsloth doesn't work yet on AMD devices - we're working on it! - output = np.array([0,]) - try: - output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) - output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) - output = np.array([int(x.decode('utf-8'))/1024 for x in output]) - except: - if not torch.cuda.is_available(): - raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") - return output -pass -PRE_CHECK = check_nvidia() - - def create_boolean_mask(n = 4096, sliding_window = 2048): # Creates a boolean mask for attention mask = torch.ones(n, n, dtype = torch.bool) @@ -1122,8 +1106,6 @@ def patch_gradient_accumulation_fix(Trainer): items_in_trainer = dir(transformers.trainer) good_items = [] for item in items_in_trainer: - # TODO: Support Deepspeed - if item.startswith(("deepspeed", "xm", "met", "smp")): continue if item in function: good_items.append(item) pass exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) diff --git a/unsloth/models/gemma.py b/unsloth/models/gemma.py index bc29c46abc..873bdcf2eb 100644 --- a/unsloth/models/gemma.py +++ b/unsloth/models/gemma.py @@ -245,8 +245,8 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): emb = torch.cat((radians_new, radians_new), dim = -1) # We must do RoPE in float32! - cos = emb.cos().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) - sin = emb.sin().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) + cos = emb.cos().to(device = "cuda", non_blocking = True)#, dtype = dtype) + sin = emb.sin().to(device = "cuda", non_blocking = True)#, dtype = dtype) self.register_buffer("cos_cached", cos, persistent = False) self.register_buffer("sin_cached", sin, persistent = False) pass @@ -270,7 +270,7 @@ def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 self.current_rope_size = math.ceil(seq_len / 8192) * 8192 - self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + self._set_cos_sin_cache(self.current_rope_size, device = "cuda", dtype = x.dtype) pass pass @@ -304,8 +304,8 @@ def _set_cos_sin_cache(self, seq_len, device, dtype): emb = torch.cat((radians_new, radians_new), dim = -1) # We must do RoPE in float32! - cos = emb.cos().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) - sin = emb.sin().to(device = "cuda:0", non_blocking = True)#, dtype = dtype) + cos = emb.cos().to(device = "cuda", non_blocking = True)#, dtype = dtype) + sin = emb.sin().to(device = "cuda", non_blocking = True)#, dtype = dtype) self.register_buffer("cos_cached", cos, persistent = False) self.register_buffer("sin_cached", sin, persistent = False) pass diff --git a/unsloth/models/gemma2.py b/unsloth/models/gemma2.py index be6b0469d9..316b4e8f0e 100644 --- a/unsloth/models/gemma2.py +++ b/unsloth/models/gemma2.py @@ -265,21 +265,22 @@ def Gemma2Attention_fast_forward_inference( attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 + device = hidden_states.device # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") - self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") - self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device) + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) # Only for Gemma2 - self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device) + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = device) # See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e # Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index fb7e96d8d2..bfaea9a555 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -274,21 +274,22 @@ def GraniteAttention_fast_forward_inference( attention_size = n_heads*head_dim seq_len = K1.shape[-2] kv_seq_len = seq_len + 1 + device = hidden_states.device # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") - self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") - self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) # Only for Gemma2 - self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device) + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = device) self.half_head_dim = head_dim // 2 diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9515a41cd9..815bad8a3e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -167,24 +167,25 @@ def LlamaAttention_fast_forward_inference( # Prefill phase # if not hasattr(self, "paged_attention"): + device = hidden_states.device if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = "cuda:0") + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0") - self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = "cuda:0") - self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0") + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device) + self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) + self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) # Mistral Nemo 12b has weird dimensions if attention_size != hidden_size: - self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = "cuda:0") + self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device) else: self.temp_O = self.temp_QA[1][:,:,:hidden_size] pass - self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = "cuda:0") + self.attention = torch.empty((bsz, n_heads, 1, KV_CACHE_INCREMENT+seq_len), dtype = dtype, device = device) self.scalar = 1.0 / math_sqrt(self.head_dim) self.half_head_dim = head_dim // 2 elif kv_seq_len >= self.paged_attention.shape[0]: @@ -813,13 +814,13 @@ def LlamaModel_fast_forward( is_causal = True, sliding_window = self.config.sliding_window, )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda",)\ .squeeze(0).squeeze(0) self.GA_mask = AttentionMaskConverter( is_causal = True, )\ - .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda:0",)\ + .to_causal_4d(1, n, n, dtype = inputs_embeds.dtype, device = "cuda",)\ .squeeze(0).squeeze(0) pass pass @@ -1075,10 +1076,16 @@ def _CausalLM_fast_forward( bsz, q_len, hd = hidden_states.shape lm_head = self.lm_head.weight + lm_head_device = lm_head.device + logit_softcapping = getattr(self.config, "final_logit_softcapping", 0) logit_scaling = getattr(self.config, "logit_scale", 0) dtype = lm_head.dtype num_logits_to_keep = max(num_logits_to_keep, logits_to_keep) + + # Move items to same device as lm_head + hidden_states = hidden_states.to(lm_head_device) + if labels is not None: labels = labels.to(lm_head_device) # Output last hidden states without logits if asked if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": @@ -1148,11 +1155,14 @@ def _CausalLM_fast_forward( if labels is not None: shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - pass - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + # if not hasattr(self, "extra_ignored_labels"): + # # Fixes https://github.com/unslothai/unsloth/issues/10 + # self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + # pass + shift_labels = torch.empty_like(labels) + shift_labels[..., :-1] = labels[..., 1:] + shift_labels[..., -1] = -100 + # shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, @@ -1297,7 +1307,7 @@ def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 - self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + self._set_cos_sin_cache(self.current_rope_size, device = "cuda", dtype = x.dtype) pass pass @@ -1423,7 +1433,7 @@ def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 - self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + self._set_cos_sin_cache(self.current_rope_size, device = "cuda", dtype = x.dtype) pass pass @@ -1538,7 +1548,7 @@ def extend_rope_embedding(self, x, seq_len): if seq_len <= self.current_rope_size: return # Iteratively grow by increments of 8192 self.current_rope_size = ((seq_len // 8192) + ((seq_len % 8192) != 0)) * 8192 - self._set_cos_sin_cache(self.current_rope_size, device = "cuda:0", dtype = x.dtype) + self._set_cos_sin_cache(self.current_rope_size, device = "cuda", dtype = x.dtype) pass pass @@ -1771,8 +1781,6 @@ def from_pretrained( # Add to kwargs kwargs["rope_scaling"] = rope_scaling pass - # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! - pre_check = check_nvidia() bnb_config = None if load_in_4bit: @@ -1840,8 +1848,6 @@ def from_pretrained( pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer - # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! - post_check = check_nvidia() # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name @@ -1882,8 +1888,6 @@ def from_pretrained( items_in_trainer = dir(transformers.trainer) good_items = [] for item in items_in_trainer: - # TODO: Support Deepspeed - if item.startswith(("deepspeed", "xm", "met", "smp")): continue if item in inner_training_loop: good_items.append(item) pass exec("from transformers.trainer import (" + ", ".join(x for x in good_items) + ")", globals()) @@ -1903,17 +1907,7 @@ def from_pretrained( f"{chr(92)} / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' logger.warning(debug_info) - import subprocess, re, gc, numpy as np - a = np.array([0,]) - try: - a = subprocess.check_output('nvidia-smi --query-gpu=memory.used --format=csv', shell = True) - a = re.findall(rb'([\\d]{1,})[\\s]{1,}M', a) - a = np.array([int(x.decode('utf-8'))/1024 for x in a]) - except: - if not torch.cuda.is_available(): - raise RuntimeError('Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!') - if ((a - PRE_CHECK) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + import subprocess, re, gc for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1925,7 +1919,7 @@ def from_pretrained( debug_info = """n_total_devices = total_train_batch_size // \\ args.gradient_accumulation_steps // self._train_batch_size if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') + logger.warning_once('Unsloth is running with multi GPUs - the effective batch size is multiplied by ' + str(n_total_devices)) debug_info =""" debug_info = debug_info.split('\n') debug_info = "\n".join([debug_info[0]] + [spaces + x[8:] for x in debug_info[1:]]) @@ -1937,31 +1931,6 @@ def from_pretrained( "train_dataloader = tpu_spmd_dataloader(train_dataloader)", "raise RuntimeError('Unsloth: TPUs are not yet supported!')" ) - inner_training_loop = inner_training_loop.replace( - "self.accelerator.free_memory()", - "self.accelerator.free_memory()\n" + \ - front_spaces + "if self.is_deepspeed_enabled:"\ - "raise RuntimeError('Unsloth: Deepspeed is not yet supported!')\n", 1, - ) - - check_batches = """train_dataloader = self.get_train_dataloader() - ga = args.gradient_accumulation_steps - bsz = self._train_batch_size - total_batches = bsz * ga * args.world_size - n_total_devices = total_batches // ga // bsz - if n_total_devices > 1: - logger.warning_once('Unsloth currently does not support multi GPU setups - but we are working on it!') - divisor = n_total_devices / 1 - bsz = self._train_batch_size = max(int(bsz / divisor), 1) - if total_batches // ga // bsz > 1: - divisor = n_total_devices / 1 - ga = args.gradient_accumulation_steps = max(int(ga / divisor), 1)""" - check_batches = check_batches.split('\n') - check_batches = "\n".join([check_batches[0]] + [front_spaces + x[8:] for x in check_batches[1:]]) - inner_training_loop = inner_training_loop.replace( - "train_dataloader = self.get_train_dataloader()", - check_batches, 1, - ) inner_training_loop = inner_training_loop.replace( "_inner_training_loop", "_fast_inner_training_loop", 1, @@ -1973,13 +1942,6 @@ def from_pretrained( "is_torch_tpu_available()", "False", ) - if "n_total_devices >" not in inner_training_loop: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - pass - inner_training_loop = inner_training_loop.replace( - "is_sagemaker_mp_enabled()", - "False", - ) exec(inner_training_loop, globals()) Trainer._inner_training_loop = _fast_inner_training_loop @@ -2136,7 +2098,7 @@ def get_peft_model( pass model.get_input_embeddings().modules_to_save.default\ - .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + .to(device = "cuda", dtype = new_dtype, non_blocking = True) model.get_input_embeddings().modules_to_save.default.requires_grad_(True) # [TODO] Move old embed_tokens to CPU - should be disk! @@ -2156,7 +2118,7 @@ def get_peft_model( pass model.get_output_embeddings().modules_to_save.default\ - .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + .to(device = "cuda", dtype = new_dtype, non_blocking = True) model.get_output_embeddings().modules_to_save.default.requires_grad_(True) # [TODO] Move old lm_head to CPU - should be disk! @@ -2413,7 +2375,7 @@ def get_peft_model( pass model.get_input_embeddings().modules_to_save.default\ - .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + .to(device = "cuda", dtype = new_dtype, non_blocking = True) model.get_input_embeddings().modules_to_save.default.requires_grad_(True) pass @@ -2429,7 +2391,7 @@ def get_peft_model( pass model.get_output_embeddings().modules_to_save.default\ - .to(device = "cuda:0", dtype = new_dtype, non_blocking = True) + .to(device = "cuda", dtype = new_dtype, non_blocking = True) model.get_output_embeddings().modules_to_save.default.requires_grad_(True) pass @@ -2515,12 +2477,7 @@ def patch_peft_model( from transformers.trainer import Trainer if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - raise RuntimeError( - 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ - 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ - 'We do have a separate beta version, which you can contact us about!\n'\ - 'Thank you for your understanding and we appreciate it immensely!' - ) + raise RuntimeError("Unsloth: Unsuccessfully patched Trainer! Please file a bug report!") pass # Fix loftq issues @@ -2636,8 +2593,8 @@ def patch_peft_model( # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 max_seq_length = model.max_seq_length - extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") - model.model.extra_ignored_labels = extra_ignored_labels + # extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda:0") + # model.model.extra_ignored_labels = extra_ignored_labels internal_model = model while hasattr(internal_model, "model"): internal_model.max_seq_length = max_seq_length diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 779ff83496..303c3d9589 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -235,6 +235,14 @@ def MistralForCausalLM_fast_forward( hidden_states = outputs[0] + bsz, q_len, hd = hidden_states.shape + lm_head = self.lm_head.weight + lm_head_device = lm_head.device + + # Move items to same device as lm_head + hidden_states = hidden_states.to(lm_head_device) + if labels is not None: labels = labels.to(lm_head_device) + # If we are in GRPO mode, return raw hidden states if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": num_logits_to_keep = max(num_logits_to_keep, logits_to_keep) @@ -249,8 +257,6 @@ def MistralForCausalLM_fast_forward( ) pass - bsz, q_len, hd = hidden_states.shape - lm_head = self.lm_head.weight if bsz == 1 and q_len == 1: logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype)) logits = logits.unsqueeze(0).unsqueeze(0) @@ -292,12 +298,14 @@ def MistralForCausalLM_fast_forward( loss = None if labels is not None: shift_logits = logits - if not hasattr(self, "extra_ignored_labels"): - # Fixes https://github.com/unslothai/unsloth/issues/10 - self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") - pass - - shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + # if not hasattr(self, "extra_ignored_labels"): + # # Fixes https://github.com/unslothai/unsloth/issues/10 + # self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0") + # pass + # shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + shift_labels = torch.empty_like(labels) + shift_labels[..., :-1] = labels[..., 1:] + shift_labels[..., -1] = -100 loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 048bee7797..9c5f825a0c 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -857,21 +857,6 @@ def check_tokenizer( pass -def check_nvidia(): - # Unsloth doesn't work yet on AMD devices - we're working on it! - output = np.array([0,]) - try: - output = subprocess.check_output("nvidia-smi --query-gpu=memory.used --format=csv", shell = True) - output = re.findall(rb'([\d]{1,})[\s]{1,}M', output) - output = np.array([int(x.decode('utf-8'))/1024 for x in output]) - except: - if not torch.cuda.is_available(): - raise RuntimeError("Unsloth: We do not support AMD / Intel machines yet - it is a work in progress!") - return output -pass -PRE_CHECK = check_nvidia() - - import inspect from inspect import getsource import trl.trainer.sft_trainer From a777cd4a8f9c9e4082534d2d4a1f311614aa4fcd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 00:19:12 -0800 Subject: [PATCH 1048/1088] Update granite.py --- unsloth/models/granite.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index bfaea9a555..b268d744aa 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -279,12 +279,12 @@ def GraniteAttention_fast_forward_inference( # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device)) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3) - self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device + self.temp_QA = torch.empty((2, bsz, 1, attention_size), dtype = dtype, device = device) self.temp_KV = torch.empty((2, bsz, 1, n_kv_heads*head_dim), dtype = dtype, device = device) self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device) # Only for Gemma2 From deff934f4673b4b278185544f6964e79d05d1291 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 00:20:34 -0800 Subject: [PATCH 1049/1088] Update granite.py --- unsloth/models/granite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/granite.py b/unsloth/models/granite.py index b268d744aa..df498d18ba 100644 --- a/unsloth/models/granite.py +++ b/unsloth/models/granite.py @@ -279,7 +279,7 @@ def GraniteAttention_fast_forward_inference( # Prefill phase # if not hasattr(self, "paged_attention"): if do_prefill: - self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device)) + self.paged_attention = torch.empty((KV_CACHE_INCREMENT+seq_len+1, 2, bsz, n_kv_heads, head_dim), dtype = dtype, device = device) self.paged_attention_K = self.paged_attention[:,0] self.paged_attention_V = self.paged_attention[:,1] self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3) From c4ac03cf4c43f9b50540b2b3ea4f4ccc0a36c7d7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 00:21:28 -0800 Subject: [PATCH 1050/1088] Update _utils.py --- unsloth/models/_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 5088b79d23..cca77bb60b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -25,7 +25,6 @@ "__version__", "HAS_FLASH_ATTENTION", "HAS_FLASH_ATTENTION_SOFTCAPPING", - "PRE_CHECK", "platform_system", "patch_tokenizer", "get_statistics", From 60d173322b676035fcc280d648c01d37bad534bd Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 00:23:20 -0800 Subject: [PATCH 1051/1088] Update llama.py --- unsloth/models/llama.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 815bad8a3e..fe0627f8d7 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1880,10 +1880,7 @@ def from_pretrained( except: raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') pass - - if ((post_check - pre_check) >= 1).sum() > 1: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') - + import transformers.trainer items_in_trainer = dir(transformers.trainer) good_items = [] From 91598a6ee8ecda6dbaa2c9fd1ea9c75719da54a6 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 02:52:20 -0800 Subject: [PATCH 1052/1088] LoRA --- unsloth/kernels/fast_lora.py | 137 +++++++++++++++++++++++------------ unsloth/kernels/utils.py | 3 +- 2 files changed, 94 insertions(+), 46 deletions(-) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index c2b7929a29..a4fb2a89b6 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -98,9 +98,6 @@ def backward(ctx, dY : torch.Tensor): gateA, gateB, upA, upB, downA, downB, \ X, e, g = ctx.saved_tensors - gateA, gateB, upA, upB, downA, downB = \ - gateA.t(), gateB.t(), upA.t(), upB.t(), downA.t(), downB.t() - batch, seq_len, hd = X.shape dY = dY.view(-1, dY.shape[-1]) X = X .view(-1, X .shape[-1]) @@ -108,39 +105,61 @@ def backward(ctx, dY : torch.Tensor): g = g .view(-1, g .shape[-1]) dtype = X.dtype + gateA, gateB, upA, upB, downA, downB = \ + gateA.to(dtype), gateB.to(dtype), upA.to(dtype), upB.to(dtype), downA.to(dtype), downB.to(dtype) + + gateA, gateB, upA, upB, downA, downB = \ + gateA.t(), gateB.t(), upA.t(), upB.t(), downA.t(), downB.t() + DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) DW, e, g = _backward_function(DW, e, g) h, df, de = DW, e, g + d_downA = torch.empty_like(downA) + d_downB = torch.empty_like(downB) + d_gateA = torch.empty_like(gateA) + d_gateB = torch.empty_like(gateB) + d_upA = torch.empty_like(upA) + d_upB = torch.empty_like(upB) + # Down projection LoRA weights - d_downA = h.t() @ (dY @ downB.t()) - d_downB = (downA.t() @ h.t()) @ dY - d_downA *= downS - d_downB *= downS + # d_downA = h.t() @ (dY @ downB.t()) + # d_downB = (downA.t() @ h.t()) @ dY + # d_downA *= downS + # d_downB *= downS + d_downA.addmm_(h.t(), dY @ downB.t(), alpha = downS, beta = 0) + d_downB.addmm_(downA.t() @ h.t(), dY, alpha = downS, beta = 0) # Up projection LoRA weights - d_upA = X.t() @ (df @ upB.t()) - d_upB = (upA.t() @ X.t()) @ df - d_upA *= upS - d_upB *= upS + # d_upA = X.t() @ (df @ upB.t()) + # d_upB = (upA.t() @ X.t()) @ df + # d_upA *= upS + # d_upB *= upS + d_upA.addmm_(X.t(), df @ upB.t(), alpha = upS, beta = 0) + d_upB.addmm_(upA.t() @ X.t(), df, alpha = upS, beta = 0) # Gate projection LoRA weights - d_gateA = X.t() @ (de @ gateB.t()) - d_gateB = (gateA.t() @ X.t()) @ de - d_gateA *= gateS - d_gateB *= gateS + # d_gateA = X.t() @ (de @ gateB.t()) + # d_gateB = (gateA.t() @ X.t()) @ de + # d_gateA *= gateS + # d_gateB *= gateS + d_gateA.addmm_(X.t(), de @ gateB.t(), alpha = gateS, beta = 0) + d_gateB.addmm_(gateA.t() @ X.t(), de, alpha = gateS, beta = 0) # dX = matmul_lora(df, upW.t(), upW_quant, upB, upA, upS) # dX += matmul_lora(de, gateW.t(), gateW_quant, gateB, gateA, gateS) upW = fast_dequantize(upW.t(), upW_quant) dX = torch.matmul(df, upW.t(), out = X if ctx.inplace else None) del upW - dX += df @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) + # dX += df @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) + dX.addmm_(df @ upB.t(), upA.t(), alpha = upS) gateW = fast_dequantize(gateW.t(), gateW_quant) - dX += de @ gateW.t() + # dX += de @ gateW.t() + dX.addmm_(de, gateW.t()) del gateW - dX += de @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) + # dX += de @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) + dX.addmm_(de @ gateB.t(), gateA.t(), alpha = gateS) # gateW, gateW_quant, gateA, gateB, gateS, # upW, upW_quant, upA, upB, upS, @@ -258,9 +277,6 @@ def backward(ctx, dQ, dK, dV): ctx.custom_saved_tensors X, QA, QB, KA, KB, VA, VB, = ctx.saved_tensors - QA, QB, KA, KB, VA, VB = \ - QA.t(), QB.t(), KA.t(), KB.t(), VA.t(), VB.t() - batch, seq_len, hd = X.shape dQ = dQ.view(-1, dQ.shape[-1]) dK = dK.reshape(-1, dK.shape[-1]) # view doesn't work on K.T @@ -268,45 +284,68 @@ def backward(ctx, dQ, dK, dV): X = X .view(-1, X .shape[-1]) dtype = X.dtype + QA, QB, KA, KB, VA, VB = \ + QA.to(dtype), QB.to(dtype), KA.to(dtype), KB.to(dtype), VA.to(dtype), VB.to(dtype) + + QA, QB, KA, KB, VA, VB = \ + QA.t(), QB.t(), KA.t(), KB.t(), VA.t(), VB.t() + ### Weight projection LoRA weights # See our blogpost for more details. + d_QA = torch.empty_like(QA) + d_QB = torch.empty_like(QB) + d_KA = torch.empty_like(KA) + d_KB = torch.empty_like(KB) + d_VA = torch.empty_like(VA) + d_VB = torch.empty_like(VB) # Q Projection - d_QA = X.t() @ (dQ @ QB.t()) - d_QB = (QA.t() @ X.t()) @ dQ - d_QA *= QS - d_QB *= QS + # d_QA = X.t() @ (dQ @ QB.t()) + # d_QB = (QA.t() @ X.t()) @ dQ + # d_QA *= QS + # d_QB *= QS + d_QA.addmm_(X.t(), dQ @ QB.t(), alpha = QS, beta = 0) + d_QB.addmm_(QA.t() @ X.t(), dQ, alpha = QS, beta = 0) # K Projection - d_KA = X.t() @ (dK @ KB.t()) - d_KB = (KA.t() @ X.t()) @ dK - d_KA *= KS - d_KB *= KS + # d_KA = X.t() @ (dK @ KB.t()) + # d_KB = (KA.t() @ X.t()) @ dK + # d_KA *= KS + # d_KB *= KS + d_KA.addmm_(X.t(), dK @ KB.t(), alpha = KS, beta = 0) + d_KB.addmm_(KA.t() @ X.t(), dK, alpha = KS, beta = 0) # V Projection - d_VA = X.t() @ (dV @ VB.t()) - d_VB = (VA.t() @ X.t()) @ dV - d_VA *= VS - d_VB *= VS + # d_VA = X.t() @ (dV @ VB.t()) + # d_VB = (VA.t() @ X.t()) @ dV + # d_VA *= VS + # d_VB *= VS + d_VA.addmm_(X.t(), dV @ VB.t(), alpha = VS, beta = 0) + d_VB.addmm_(VA.t() @ X.t(), dV, alpha = VS, beta = 0) # Combine derivatives to find dX # dQ QW = fast_dequantize(QW.t(), QW_quant) dX = torch.matmul(dQ, QW.t(), out = X if ctx.inplace else None) del QW - dX += (dQ @ QB.to(dtype).t() @ (QS * QA.to(dtype).t())) + # dX += (dQ @ QB.to(dtype).t() @ (QS * QA.to(dtype).t())) + dX.addmm_(dQ @ QB.t(), QA.t(), alpha = QS) # dK KW = fast_dequantize(KW.t(), KW_quant) - dX += dK @ KW.t() + # dX += dK @ KW.t() + dX.addmm_(dK, KW.t()) del KW - dX += dK @ KB.to(dtype).t() @ (KS * KA.to(dtype).t()) + # dX += dK @ KB.to(dtype).t() @ (KS * KA.to(dtype).t()) + dX.addmm_(dK @ KB.t(), KA.t(), alpha = KS) # dV VW = fast_dequantize(VW.t(), VW_quant) - dX += dV @ VW.t() + # dX += dV @ VW.t() + dX.addmm_(dV, VW.t()) del VW - dX += dV @ VB.to(dtype).t() @ (VS * VA.to(dtype).t()) + # dX += dV @ VB.to(dtype).t() @ (VS * VA.to(dtype).t()) + dX.addmm_(dV @ VB.t(), VA.t(), alpha = VS) # QW, QW_quant, QA, QB, QS, # KW, KW_quant, KA, KB, KS, @@ -378,25 +417,33 @@ def backward(ctx, dY : torch.Tensor): W, W_quant, S = ctx.custom_saved_tensors A, B, X = ctx.saved_tensors - A, B = A.t(), B.t() - batch, seq_len, hd = X.shape dY = dY.reshape(-1, dY.shape[-1]) # Must be reshape X = X .reshape(-1, X .shape[-1]) # Must be reshape dtype = X.dtype + A, B = A.to(dtype), B.to(dtype) + + A, B = A.t(), B.t() + + d_A = torch.empty_like(A) + d_B = torch.empty_like(B) + ### Weight projection LoRA weights # Weight projection - d_A = X.t() @ (dY @ B.t()) - d_B = (A.t() @ X.t()) @ dY - d_A *= S - d_B *= S + # d_A = X.t() @ (dY @ B.t()) + # d_B = (A.t() @ X.t()) @ dY + # d_A *= S + # d_B *= S + d_A.addmm_(X.t(), dY @ B.t(), alpha = S, beta = 0) + d_B.addmm_(A.t() @ X.t(), dY, alpha = S, beta = 0) # Get derivative for dX W = fast_dequantize(W.t(), W_quant) dX = dY @ W.t() del W - dX += dY @ B.to(dtype).t() @ (S * A.to(dtype).t()) + # dX += dY @ B.to(dtype).t() @ (S * A.to(dtype).t()) + dX.addmm_(dY @ B.t(), A.t(), alpha = S) # W, W_quant, A, B, S return dX.view(batch, seq_len, hd), \ diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index f743e12f59..985adaaa44 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -465,7 +465,8 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): if A is not None: # LoRA is enabled A, B = A.t(), B.t() - out = torch_addmm(X @ A.to(dtype), B.to(dtype), alpha = s, beta = 1.0, out = out) + XA = torch_matmul(X, A.to(dtype)) + out.addmm_(XA, B.to(dtype), alpha = s) # out += (X @ A.to(dtype)) @ (s * B.to(dtype)) pass From 1e82600104141c5f4f7b938fb63c106c93305f1b Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 1 Mar 2025 13:47:01 -0800 Subject: [PATCH 1053/1088] Update vision.py --- unsloth/models/vision.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 31c6394f5d..d13d394669 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -1,18 +1,16 @@ -# Unsloth Zoo - Utilities for Unsloth # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# http://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import torch from transformers import ( @@ -123,9 +121,6 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! - pre_check = check_nvidia() - bnb_config = None if load_in_4bit: bnb_config = BitsAndBytesConfig( @@ -154,8 +149,6 @@ def from_pretrained( ) # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer - # We currently only support NVIDIA GPUs - AMD / Intel is a work in progress! - post_check = check_nvidia() # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name From bda3978287da90c452ba18c3345228829ad09dcc Mon Sep 17 00:00:00 2001 From: Mohamed Mekkouri <93391238+MekkCyber@users.noreply.github.com> Date: Sat, 1 Mar 2025 23:23:22 +0100 Subject: [PATCH 1054/1088] Fix Layernorm when num_cols not a power of 2 (#1867) * fix * Update layernorm.py --------- Co-authored-by: Daniel Han --- unsloth/kernels/layernorm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 11f82b8ff3..ffcc5cc13c 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -49,7 +49,8 @@ def layernorm_forward( b_row = tl.load(b + col_offsets, mask = mask, other = 0).to(tl.float32) mean_X = tl.sum(X_row, axis = 0) / n_cols - XX = X_row - mean_X + # (X[0] - mean) == -mean so we need to mask it out + XX = tl.where(mask, X_row - mean_X, 0) row_var = tl.sum(XX * XX, axis = 0) / n_cols inv_var = tl.math.rsqrt(row_var + eps) tl.store (r, inv_var) From 3a0d3d58d2c31d9d04bab2d712b59bb75a1e2e3b Mon Sep 17 00:00:00 2001 From: "J. M Areeb Uzair" <142584764+areebuzair@users.noreply.github.com> Date: Sun, 2 Mar 2025 17:48:21 +0600 Subject: [PATCH 1055/1088] Added Python version warning to Windows Install Section (#1872) I spent half a day on the wrong Python version, so I am adding this big, red sign. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0a13ac47c4..5b2dd6f129 100644 --- a/README.md +++ b/README.md @@ -169,6 +169,8 @@ print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://git ``` ## Windows Installation +> [!warning] +> Python 3.13 does not support Unsloth. Use 3.12, 3.11 or 3.10 ### Step 1: NVIDIA Video Driver You should install the latest version of your GPUs driver. You can download drivers here: From eb04ec464b6dd5a42cac7db9a26cf1b04342cdc4 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sun, 2 Mar 2025 20:34:36 -0800 Subject: [PATCH 1056/1088] Update README.md --- README.md | 130 +++++++++++++++++++++++++----------------------------- 1 file changed, 60 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index 5b2dd6f129..a291abb765 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,14 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - This [continued pretraining notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) is for learning another language - Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. +## ⚡ Quickstart + +- **Install with pip (recommended)** for Linux devices: +``` +pip install unsloth +``` +For Windows install instructions, see [here](https://github.com/unslothai/unsloth/edit/main/README.md#windows-installation). + ## 🦥 Unsloth.ai News - 📣 NEW! Introducing Long-context [Reasoning (GRPO)](https://unsloth.ai/blog/grpo) in Unsloth. You can now reproduce DeepSeek-R1's "aha" moment with just 5GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! - 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now! More details: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). @@ -74,17 +82,63 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and - All kernels written in [OpenAI's Triton](https://openai.com/index/triton/) language. **Manual backprop engine**. - **0% loss in accuracy** - no approximation methods - all exact. - No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. -- Works on **Linux** and **Windows** via WSL. +- Works on **Linux** and **Windows** - Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). - If you trained a model with 🦥Unsloth, you can use this cool sticker!   ## 💾 Install Unsloth +You can also see our documentation for more detailed installation and updating instructions [here](https://docs.unsloth.ai/get-started/installing-+-updating). -- **Install with pip (recommended)** for Linux devices: +### Pip Installation +**Install with pip (recommended) for Linux devices:** ``` pip install unsloth ``` -See below for Windows install instructions: +See [here](https://github.com/unslothai/unsloth/edit/main/README.md#advanced-pip-installation) for advanced pip install instructions. +### Windows Installation +> [!warning] +> Python 3.13 does not support Unsloth. Use 3.12, 3.11 or 3.10 + +1. **Install NVIDIA Video Driver:**: + You should install the latest version of your GPUs driver. Download drivers here: [NVIDIA GPU Drive](https://www.nvidia.com/Download/index.aspx) + +3. **Install Visual Studio C++:** + You will need Visual Studio, with C++ installed. By default, C++ is not installed with Visual Studio, so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. [Visual Studio Community Edition](https://visualstudio.microsoft.com/vs/community/) + +5. **Install CUDA Toolkit:** + Follow the instructions to install [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive) + +6. **Install PyTorch:** + You will need the correct version of PyTorch that is compatibile with your CUDA drivers, so make sure to select them carefully. + [Install PyTorch](https://pytorch.org/get-started/locally/) + +7. **Install Unsloth:** + +```python +pip install "unsloth[windows] @ git+https://github.com/unslothai/unsloth.git" +``` + +#### Notes +To run Unsloth directly on Windows: +- Install Triton from this Windows fork and follow the instructions [here](https://github.com/woct0rdho/triton-windows) (be aware that the Windows fork requires PyTorch >= 2.4 and CUDA 12) +- In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: +```python +trainer = SFTTrainer( + dataset_num_proc=1, + ... +) +``` + +#### Advanced/Troubleshooting + +For **advanced installation instructions** or if you see weird errors during installations: + +1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` +2. Confirm if CUDA is installated correctly. Try `nvcc`. If that fails, you need to install `cudatoolkit` or CUDA drivers. +3. Install `xformers` manually. You can try installing `vllm` and seeing if `vllm` succeeds. Check if `xformers` succeeded with `python -m xformers.info` Go to https://github.com/facebookresearch/xformers. Another option is to install `flash-attn` for Ampere GPUs. +4. Double check that your versions of Python, CUDA, CUDNN, `torch`, `triton`, and `xformers` are compatible with one another. The [PyTorch Compatibility Matrix](https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix) may be useful. +5. Finally, install `bitsandbytes` and check it with `python -m bitsandbytes` + ### Conda Installation (Optional) `⚠️Only use Conda if you have it. If not, use Pip`. Select either `pytorch-cuda=11.8,12.1` for CUDA 11.8 or CUDA 12.1. We support `python=3.10,3.11,3.12`. ```bash @@ -111,7 +165,7 @@ pip install unsloth ```
    -### Pip Installation +### Advanced Pip Installation `⚠️Do **NOT** use this if you have Conda.` Pip is a bit more complex since there are dependency issues. The pip command is different for `torch 2.2,2.3,2.4,2.5` and CUDA versions. For other torch versions, we support `torch211`, `torch212`, `torch220`, `torch230`, `torch240` and for CUDA versions, we support `cu118` and `cu121` and `cu124`. For Ampere devices (A100, H100, RTX3090) and above, use `cu118-ampere` or `cu121-ampere` or `cu124-ampere`. @@ -168,71 +222,7 @@ x = x.format(cuda.replace(".", ""), "-ampere" if is_ampere else "") print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git"') ``` -## Windows Installation -> [!warning] -> Python 3.13 does not support Unsloth. Use 3.12, 3.11 or 3.10 -### Step 1: NVIDIA Video Driver - -You should install the latest version of your GPUs driver. You can download drivers here: - - [NVIDIA GPU Drive Download](https://www.nvidia.com/Download/index.aspx) - -### Step 2: Visual Studio C++ -You will need Visual Studio, with C++ installed. By default, C++ is not installed with Visual Studio, so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. - - [Visual Studio Community Edition](https://visualstudio.microsoft.com/vs/community/) - - - - - -
    - VSCode C++ Ref Image - -
    -

    Steps to configure VS C++

    -
    -
      -
    1. Launch the Installer downloaded from the link above.
    2. -
    3. In the installer, navigate to Individual components and select all the options mentioned in the image.
    4. -
    5. Click on install now.
    6. -
    -
    - -### Step 3: CUDA Toolkit - - - [Download CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive) - -### Step 4: Install PyTorch - -You will need the correct version of PyTorch that is compatibile with your CUDA drivers, so make sure to select them carefully - - [Install PyTorch](https://pytorch.org/get-started/locally/) - -### Step 5: Install Unsloth -```python -pip install "unsloth[windows] @ git+https://github.com/unslothai/unsloth.git" -``` - -### Side note -To run Unsloth directly on Windows: -- Install Triton from this Windows fork and follow the instructions: https://github.com/woct0rdho/triton-windows (be aware that the Windows fork requires PyTorch >= 2.4 and CUDA 12) -- In the SFTTrainer, set `dataset_num_proc=1` to avoid a crashing issue: -```python -trainer = SFTTrainer( - dataset_num_proc=1, - ... -) -``` - -### Advanced/Troubleshooting - -For **advanced installation instructions** or if you see weird errors during installations: - -1. Install `torch` and `triton`. Go to https://pytorch.org to install it. For example `pip install torch torchvision torchaudio triton` -2. Confirm if CUDA is installated correctly. Try `nvcc`. If that fails, you need to install `cudatoolkit` or CUDA drivers. -3. Install `xformers` manually. You can try installing `vllm` and seeing if `vllm` succeeds. Check if `xformers` succeeded with `python -m xformers.info` Go to https://github.com/facebookresearch/xformers. Another option is to install `flash-attn` for Ampere GPUs. -4. Double check that your versions of Python, CUDA, CUDNN, `torch`, `triton`, and `xformers` are compatible with one another. The [PyTorch Compatibility Matrix](https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix) may be useful. -5. Finally, install `bitsandbytes` and check it with `python -m bitsandbytes` - -## 📜 [Documentation](https://docs.unsloth.ai) +## 📜 Documentation - Go to our official [Documentation](https://docs.unsloth.ai) for saving to GGUF, checkpointing, evaluation and more! - We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! - We're in 🤗Hugging Face's official docs! Check out the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! @@ -439,8 +429,8 @@ You can cite the Unsloth repo as follows: ``` ### Thank You to +- Hugging Face's [TRL library](https://github.com/huggingface/trl) which serves as the basis foundation for Unsloth - [Erik](https://github.com/erikwijmans) for his help adding [Apple's ML Cross Entropy](https://github.com/apple/ml-cross-entropy) in Unsloth - [HuyNguyen-hust](https://github.com/HuyNguyen-hust) for making [RoPE Embeddings 28% faster](https://github.com/unslothai/unsloth/pull/238) - [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support - [152334H](https://github.com/152334H) for experimental DPO support -- [atgctg](https://github.com/atgctg) for syntax highlighting From ceb2ca864ee69c04bbc1e0f97d1593f68ef1ef5e Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sun, 2 Mar 2025 20:35:27 -0800 Subject: [PATCH 1057/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a291abb765..67de3df60d 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ For Windows install instructions, see [here](https://github.com/unslothai/unslot | ------------------------------- | --------------------------------------- | | 📚 **Documentation & Wiki** | [Read Our Docs](https://docs.unsloth.ai) | |   **Twitter (aka X)** | [Follow us on X](https://twitter.com/unslothai)| -| 💾 **Installation** | [Pip install](https://github.com/unslothai/unsloth/edit/main/README.md#-install-unsloth)| +| 💾 **Installation** | [Pip install](https://docs.unsloth.ai/get-started/installing-+-updating)| | 🔮 **Our Models** | [Unsloth Releases](https://docs.unsloth.ai/get-started/all-our-models)| | ✍️ **Blog** | [Read our Blogs](https://unsloth.ai/blog)| |   **Reddit** | [Join our Reddit page](https://reddit.com/r/unsloth)| From 5d0ee525c1b6a3522f64ad9722249ae34b584555 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sun, 2 Mar 2025 20:44:26 -0800 Subject: [PATCH 1058/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 67de3df60d..a352042a70 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ See [here](https://github.com/unslothai/unsloth/edit/main/README.md#advanced-pip > [!warning] > Python 3.13 does not support Unsloth. Use 3.12, 3.11 or 3.10 -1. **Install NVIDIA Video Driver:**: +1. **Install NVIDIA Video Driver:** You should install the latest version of your GPUs driver. Download drivers here: [NVIDIA GPU Drive](https://www.nvidia.com/Download/index.aspx) 3. **Install Visual Studio C++:** From b512a600938bcb2592d59410583b72eb5e44e062 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Mon, 3 Mar 2025 21:27:20 -0800 Subject: [PATCH 1059/1088] Update README.md --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a352042a70..f8c7bd0fa0 100644 --- a/README.md +++ b/README.md @@ -100,17 +100,17 @@ See [here](https://github.com/unslothai/unsloth/edit/main/README.md#advanced-pip > Python 3.13 does not support Unsloth. Use 3.12, 3.11 or 3.10 1. **Install NVIDIA Video Driver:** - You should install the latest version of your GPUs driver. Download drivers here: [NVIDIA GPU Drive](https://www.nvidia.com/Download/index.aspx) + You should install the latest version of your GPUs driver. Download drivers here: [NVIDIA GPU Drive](https://www.nvidia.com/Download/index.aspx). 3. **Install Visual Studio C++:** - You will need Visual Studio, with C++ installed. By default, C++ is not installed with Visual Studio, so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. [Visual Studio Community Edition](https://visualstudio.microsoft.com/vs/community/) + You will need Visual Studio, with C++ installed. By default, C++ is not installed with [Visual Studio](https://visualstudio.microsoft.com/vs/community/), so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. For more detailed instructions, see [here](https://docs.unsloth.ai/get-started/installing-+-updating). 5. **Install CUDA Toolkit:** - Follow the instructions to install [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive) + Follow the instructions to install [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive). 6. **Install PyTorch:** You will need the correct version of PyTorch that is compatibile with your CUDA drivers, so make sure to select them carefully. - [Install PyTorch](https://pytorch.org/get-started/locally/) + [Install PyTorch](https://pytorch.org/get-started/locally/). 7. **Install Unsloth:** From 362a9ac1ac2d19bfd2a5bcc4ab344a1a38f1cea3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 4 Mar 2025 03:55:49 -0800 Subject: [PATCH 1060/1088] Bug fixes (#1891) * Update rl.py * Patching * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * NEFTune * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Extra replacements * Update rl_replacements.py * Update rl.py * extra RL replacements * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update _utils.py * Update loader_utils.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> --- README.md | 62 +++++++------- pyproject.toml | 4 +- unsloth/__init__.py | 3 +- unsloth/kernels/cross_entropy_loss.py | 102 ++++++++++++---------- unsloth/kernels/geglu.py | 24 ++++-- unsloth/kernels/layernorm.py | 48 ++++++----- unsloth/kernels/rms_layernorm.py | 47 +++++----- unsloth/kernels/rope_embedding.py | 42 ++++----- unsloth/kernels/swiglu.py | 8 +- unsloth/kernels/utils.py | 118 +++++++++++++++----------- unsloth/models/__init__.py | 2 +- unsloth/models/_utils.py | 3 +- unsloth/models/llama.py | 49 +++++------ unsloth/models/loader.py | 24 +++--- unsloth/models/rl.py | 2 +- unsloth/save.py | 108 +++++++++++++++++++++++ 16 files changed, 400 insertions(+), 246 deletions(-) diff --git a/README.md b/README.md index f8c7bd0fa0..1f85647f94 100644 --- a/README.md +++ b/README.md @@ -232,10 +232,8 @@ print(f'pip install --upgrade pip && pip install "unsloth[{x}] @ git+https://git ```python from unsloth import FastLanguageModel -from unsloth import is_bfloat16_supported import torch -from trl import SFTTrainer -from transformers import TrainingArguments +from trl import SFTTrainer, SFTConfig from datasets import load_dataset max_seq_length = 2048 # Supports RoPE Scaling interally, so choose any! # Get LAION dataset @@ -244,21 +242,28 @@ dataset = load_dataset("json", data_files = {"train" : url}, split = "train") # 4bit pre quantized models we support for 4x faster downloading + no OOMs. fourbit_models = [ - "unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster! + "unsloth/Meta-Llama-3.1-8B-bnb-4bit", # Llama-3.1 2x faster + "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit", + "unsloth/Meta-Llama-3.1-70B-bnb-4bit", + "unsloth/Meta-Llama-3.1-405B-bnb-4bit", # 4bit for 405b! + "unsloth/Mistral-Small-Instruct-2409", # Mistral 22b 2x faster! "unsloth/mistral-7b-instruct-v0.3-bnb-4bit", - "unsloth/llama-3-8b-bnb-4bit", # Llama-3 15 trillion tokens model 2x faster! - "unsloth/llama-3-8b-Instruct-bnb-4bit", - "unsloth/llama-3-70b-bnb-4bit", - "unsloth/Phi-3-mini-4k-instruct", # Phi-3 2x faster! + "unsloth/Phi-3.5-mini-instruct", # Phi-3.5 2x faster! "unsloth/Phi-3-medium-4k-instruct", - "unsloth/mistral-7b-bnb-4bit", - "unsloth/gemma-7b-bnb-4bit", # Gemma 2.2x faster! + "unsloth/gemma-2-9b-bnb-4bit", + "unsloth/gemma-2-27b-bnb-4bit", # Gemma 2x faster! + + "unsloth/Llama-3.2-1B-bnb-4bit", # NEW! Llama 3.2 models + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", + "unsloth/Llama-3.2-3B-bnb-4bit", + "unsloth/Llama-3.2-3B-Instruct-bnb-4bit", + + "unsloth/Llama-3.3-70B-Instruct-bnb-4bit" # NEW! Llama 3.3 70B! ] # More models at https://huggingface.co/unsloth model, tokenizer = FastLanguageModel.from_pretrained( - model_name = "unsloth/llama-3-8b-bnb-4bit", + model_name = "unsloth/Llama-3.2-1B", max_seq_length = max_seq_length, - dtype = None, load_in_4bit = True, ) @@ -282,16 +287,14 @@ model = FastLanguageModel.get_peft_model( trainer = SFTTrainer( model = model, train_dataset = dataset, - dataset_text_field = "text", - max_seq_length = max_seq_length, tokenizer = tokenizer, - args = TrainingArguments( + args = SFTConfig( + dataset_text_field = "text", + max_seq_length = max_seq_length, per_device_train_batch_size = 2, gradient_accumulation_steps = 4, warmup_steps = 10, max_steps = 60, - fp16 = not is_bfloat16_supported(), - bf16 = is_bfloat16_supported(), logging_steps = 1, output_dir = "outputs", optim = "adamw_8bit", @@ -323,17 +326,14 @@ RL including DPO, GRPO, PPO, Reward Modelling, Online DPO all work with Unsloth. import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Optional set GPU device ID -from unsloth import FastLanguageModel, PatchDPOTrainer -from unsloth import is_bfloat16_supported -PatchDPOTrainer() +from unsloth import FastLanguageModel import torch -from transformers import TrainingArguments -from trl import DPOTrainer +from trl import DPOTrainer, DPOConfig +max_seq_length = 2048 model, tokenizer = FastLanguageModel.from_pretrained( model_name = "unsloth/zephyr-sft-bnb-4bit", max_seq_length = max_seq_length, - dtype = None, load_in_4bit = True, ) @@ -355,24 +355,22 @@ model = FastLanguageModel.get_peft_model( dpo_trainer = DPOTrainer( model = model, ref_model = None, - args = TrainingArguments( + train_dataset = YOUR_DATASET_HERE, + # eval_dataset = YOUR_DATASET_HERE, + tokenizer = tokenizer, + args = DPOConfig( per_device_train_batch_size = 4, gradient_accumulation_steps = 8, warmup_ratio = 0.1, num_train_epochs = 3, - fp16 = not is_bfloat16_supported(), - bf16 = is_bfloat16_supported(), logging_steps = 1, optim = "adamw_8bit", seed = 42, output_dir = "outputs", + max_length = 1024, + max_prompt_length = 512, + beta = 0.1, ), - beta = 0.1, - train_dataset = YOUR_DATASET_HERE, - # eval_dataset = YOUR_DATASET_HERE, - tokenizer = tokenizer, - max_length = 1024, - max_prompt_length = 512, ) dpo_trainer.train() ``` diff --git a/pyproject.toml b/pyproject.toml index de1583e9e3..73e69dcd4a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] windows=[ - "unsloth_zoo>=2025.2.7", + "unsloth_zoo>=2025.3.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -61,7 +61,7 @@ windows=[ "xformers>=0.0.22.post7 ; platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.2.7", + "unsloth_zoo>=2025.3.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index e33d16577a..c8f2926985 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.2.6"): + if Version(unsloth_zoo_version) < Version("2025.3.1"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: @@ -212,6 +212,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 pass from .models import * +from .models import __version__ from .save import * from .chat_templates import * from .tokenizer_utils import * diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index fcba2eb6d4..006dfff631 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -15,7 +15,13 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings, MAX_FUSED_SIZE, triton_tanh, triton_cast +from .utils import ( + calculate_settings, + MAX_FUSED_SIZE, + triton_tanh, + triton_cast, + torch_cuda_device, +) from transformers.models.llama.modeling_llama import logger from packaging.version import Version @@ -279,10 +285,11 @@ def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : n_rows : int vocab_size : int n_rows, vocab_size = logits.shape + device = logits.device div, mod = divmod(vocab_size, MAX_FUSED_SIZE) n_chunks : int = div + (mod != 0) - losses = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") + losses = torch.empty(n_rows, dtype = torch.float32, device = device) DO_SOFTCAPPING : bool = bool(logit_softcapping != 0) DO_LOGIT_SCALING : bool = bool(logit_scaling != 0) @@ -292,39 +299,41 @@ def forward(ctx, logits, labels, logit_softcapping : float = 0, logit_scaling : if n_chunks == 1: # For small vocabs <= 65336 like Llama, Mistral BLOCK_SIZE, num_warps = calculate_settings(vocab_size) - logsumexp = torch.empty(n_rows, dtype = torch.float32, device = "cuda:0") - - _cross_entropy_forward[(n_rows,)]( - logits, logits.stride(0), - losses, - logsumexp, - labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, - SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, - LOGIT_SCALE = logit_scaling, - num_warps = num_warps, - ) + logsumexp = torch.empty(n_rows, dtype = torch.float32, device = device) + + with torch_cuda_device(device): + _cross_entropy_forward[(n_rows,)]( + logits, logits.stride(0), + losses, + logsumexp, + labels, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, + LOGIT_SCALE = logit_scaling, + num_warps = num_warps, + ) else: # For large vocabs > 65336 like Gemma 256K - logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = "cuda:0") - - _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( - logits, logits.stride(0), - losses, - logsumexp, - labels, - VOCAB_SIZE = vocab_size, - N_CHUNKS = n_chunks, - BLOCK_SIZE = MAX_FUSED_SIZE, - DO_SOFTCAPPING = DO_SOFTCAPPING, - SOFTCAP = logit_softcapping, - DO_LOGIT_SCALING = DO_LOGIT_SCALING, - LOGIT_SCALE = logit_scaling, - num_warps = 32, - ) + logsumexp = torch.empty((n_rows, n_chunks,), dtype = torch.float32, device = device) + + with torch_cuda_device(device): + _chunked_cross_entropy_forward[(n_rows, n_chunks,)]( + logits, logits.stride(0), + losses, + logsumexp, + labels, + VOCAB_SIZE = vocab_size, + N_CHUNKS = n_chunks, + BLOCK_SIZE = MAX_FUSED_SIZE, + DO_SOFTCAPPING = DO_SOFTCAPPING, + SOFTCAP = logit_softcapping, + DO_LOGIT_SCALING = DO_LOGIT_SCALING, + LOGIT_SCALE = logit_scaling, + num_warps = 32, + ) # logsumexp(chunked_logsumexp) - x # Do the -x separately logsumexp = torch.logsumexp(logsumexp, dim = 1) # Row sum @@ -354,19 +363,20 @@ def backward(ctx, dlosses): div, mod = divmod(vocab_size, BLOCK_SIZE) n_blocks : int = div + (mod != 0) - _cross_entropy_backward[(n_rows, n_blocks,)]( - logits, logits.stride(0), - dlosses, dlosses.stride(0), - logsumexp, - labels, - VOCAB_SIZE = vocab_size, - BLOCK_SIZE = BLOCK_SIZE, - DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, - SOFTCAP = ctx.logit_softcapping, - DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, - LOGIT_SCALE = ctx.logit_scaling, - num_warps = 8, - ) + with torch_cuda_device(dlosses.device): + _cross_entropy_backward[(n_rows, n_blocks,)]( + logits, logits.stride(0), + dlosses, dlosses.stride(0), + logsumexp, + labels, + VOCAB_SIZE = vocab_size, + BLOCK_SIZE = BLOCK_SIZE, + DO_SOFTCAPPING = ctx.DO_SOFTCAPPING, + SOFTCAP = ctx.logit_softcapping, + DO_LOGIT_SCALING = ctx.DO_LOGIT_SCALING, + LOGIT_SCALE = ctx.logit_scaling, + num_warps = 8, + ) return logits, None, None, None, pass pass diff --git a/unsloth/kernels/geglu.py b/unsloth/kernels/geglu.py index 9fedae769e..1ece87c080 100644 --- a/unsloth/kernels/geglu.py +++ b/unsloth/kernels/geglu.py @@ -15,7 +15,11 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings, triton_tanh +from .utils import ( + calculate_settings, + triton_tanh, + torch_cuda_device, +) @triton.jit @@ -41,9 +45,11 @@ def _exact_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def geglu_exact_forward_kernel(gate, up): batch, seq_len, hd = gate.shape n_elements = gate.numel() - out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda:0") + device = gate.device + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = device) grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _exact_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(device): + _exact_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) return out pass @@ -99,7 +105,8 @@ def geglu_exact_backward_kernel(DW, e, g): batch_seq_len, hd = e.shape n_elements = e.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _exact_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(e.device): + _exact_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) return DW, e, g pass @@ -133,9 +140,11 @@ def _approx_forward_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): def geglu_approx_forward_kernel(gate, up): batch, seq_len, hd = gate.shape n_elements = gate.numel() - out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = "cuda:0") + device = gate.device + out = torch.empty((batch, seq_len, hd), dtype = gate.dtype, device = device) grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _approx_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(device): + _approx_forward_kernel[grid](gate, up, out, n_elements, BLOCK_SIZE = 1024,) return out pass @@ -198,6 +207,7 @@ def geglu_approx_backward_kernel(DW, e, g): batch_seq_len, hd = e.shape n_elements = e.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _approx_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(e.device): + _approx_backward_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) return DW, e, g pass diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index ffcc5cc13c..26a77f03a0 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -16,7 +16,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings +from .utils import calculate_settings, torch_cuda_device from unsloth_zoo.patching_utils import ( patch_layernorm, ) @@ -111,17 +111,18 @@ def forward(ctx, X, W, b, eps): r = torch.empty(n_rows, dtype = torch.float32, device = device) mu = torch.empty(n_rows, dtype = torch.float32, device = device) - layernorm_forward[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, - b, - r, - mu, - n_cols, eps, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + with torch_cuda_device(device): + layernorm_forward[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, + b, + r, + mu, + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps @@ -137,17 +138,18 @@ def backward(ctx, dY): X, W, b, r, mu = ctx.saved_tensors n_rows, n_cols = dY.shape - layernorm_backward[(n_rows,)]( - dY, dY.stride(0), - X, X .stride(0), - W, - b, - r, - mu, - n_cols, ctx.eps, - BLOCK_SIZE = ctx.BLOCK_SIZE, - num_warps = ctx.num_warps, - ) + with torch_cuda_device(dY.device): + layernorm_backward[(n_rows,)]( + dY, dY.stride(0), + X, X .stride(0), + W, + b, + r, + mu, + n_cols, ctx.eps, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) dX = dY.view(*shape) return dX, None, None, None, None pass diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 7487c10eeb..1cde6388ea 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -15,8 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings - +from .utils import calculate_settings, torch_cuda_device @triton.jit def _rms_layernorm_forward( @@ -154,15 +153,16 @@ def forward(ctx, X : torch.Tensor, W : torch.Tensor, eps : float, gemma : bool = r = torch.empty(n_rows, dtype = torch.float32, device = device) fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward - fx[(n_rows,)]( - Y, Y.stride(0), - X, X.stride(0), - W, W.stride(0), - r, r.stride(0), - n_cols, eps, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + with torch_cuda_device(device): + fx[(n_rows,)]( + Y, Y.stride(0), + X, X.stride(0), + W, W.stride(0), + r, r.stride(0), + n_cols, eps, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps @@ -183,18 +183,19 @@ def backward(ctx, dY : torch.Tensor): # dW = X dX = torch.empty_like(dY) if ctx.GEMMA else dY - _rms_layernorm_backward[(n_rows,)]( - dY, dY.stride(0), - dX, dX.stride(0), - X, X .stride(0), - W, W .stride(0), - r, r .stride(0), - # dW, dW.stride(0), - n_cols, ctx.eps, - GEMMA = ctx.GEMMA, - BLOCK_SIZE = ctx.BLOCK_SIZE, - num_warps = ctx.num_warps, - ) + with torch_cuda_device(dY.device): + _rms_layernorm_backward[(n_rows,)]( + dY, dY.stride(0), + dX, dX.stride(0), + X, X .stride(0), + W, W .stride(0), + r, r .stride(0), + # dW, dW.stride(0), + n_cols, ctx.eps, + GEMMA = ctx.GEMMA, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) dX = dX.view(*shape) return dX, None, None, None pass diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 88b9ccadb4..a14a485352 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings +from .utils import calculate_settings, torch_cuda_device ROPE_GROUP_SIZE : int = 4 def _rope_embedding( @@ -100,16 +100,17 @@ def forward(ctx, Q, cos, sin): div, mod = divmod(n_heads, ROPE_GROUP_SIZE) n_groups : int = div + (mod != 0) - _rope_embedding[(n_rows, n_groups, )]( - Q, Q.stride(0), - cos, cos.stride(0), - sin, sin.stride(0), - seq_len, - head_dim, n_heads, - BACKWARD_PASS = False, - BLOCK_SIZE = BLOCK_SIZE, - num_warps = num_warps, - ) + with torch_cuda_device(Q.device): + _rope_embedding[(n_rows, n_groups, )]( + Q, Q.stride(0), + cos, cos.stride(0), + sin, sin.stride(0), + seq_len, + head_dim, n_heads, + BACKWARD_PASS = False, + BLOCK_SIZE = BLOCK_SIZE, + num_warps = num_warps, + ) ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.n_groups = n_groups @@ -134,15 +135,16 @@ def backward(ctx, dY): cos = ctx.cos sin = ctx.sin - _rope_embedding[(n_rows, ctx.n_groups, )]( - dY, dY .stride(0), - cos, cos.stride(0), - sin, sin.stride(0), - seq_len, head_dim, n_heads, - BACKWARD_PASS = True, - BLOCK_SIZE = ctx.BLOCK_SIZE, - num_warps = ctx.num_warps, - ) + with torch_cuda_device(dY.device): + _rope_embedding[(n_rows, ctx.n_groups, )]( + dY, dY .stride(0), + cos, cos.stride(0), + sin, sin.stride(0), + seq_len, head_dim, n_heads, + BACKWARD_PASS = True, + BLOCK_SIZE = ctx.BLOCK_SIZE, + num_warps = ctx.num_warps, + ) dY = dY.view(batch, seq_len, n_heads, head_dim) return dY, None, None, pass diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index 688e9f9a48..12f1f5e063 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -15,7 +15,7 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings +from .utils import calculate_settings, torch_cuda_device @triton.jit @@ -43,7 +43,8 @@ def swiglu_fg_kernel(e, g): n_elements = e.numel() h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = e.device) grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(e.device): + _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,) return h pass @@ -94,6 +95,7 @@ def swiglu_DWf_DW_dfg_kernel(DW, e, g): batch_seq_len, hd = e.shape n_elements = e.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) - _DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) + with torch_cuda_device(e.device): + _DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) return DW, e, g pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 985adaaa44..5eb9b8f5ce 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -19,6 +19,7 @@ # torch.cuda.amp.custom_fwd is deprecated >= 2.4 import torch +torch_Tensor = torch.Tensor from packaging.version import Version if Version(torch.__version__) < Version("2.4.0"): torch_amp_custom_fwd = torch.cuda.amp.custom_fwd @@ -67,6 +68,18 @@ def calculate_settings(n : int) -> (int, int,): HAS_CUDA_STREAM = Version(bnb.__version__) > Version("0.43.3") get_ptr = bnb.functional.get_ptr +if torch.cuda.device_count() > 1: + torch_cuda_device = torch.cuda.device +else: + from contextlib import nullcontext + def torch_cuda_device(device): return nullcontext() +pass +_cuda_getCurrentRawStream = torch._C._cuda_getCurrentRawStream +c_void_p = ctypes.c_void_p +def _get_tensor_stream(tensor: torch_Tensor) -> c_void_p: + return c_void_p(_cuda_getCurrentRawStream(tensor.device.index)) +pass + # Get array of CUDA streams and other buffers global CUDA_STREAMS global WEIGHT_BUFFERS @@ -92,27 +105,29 @@ def calculate_settings(n : int) -> (int, int,): cgemm_4bit_inference_naive_fp16 = bnb.functional.lib.cgemm_4bit_inference_naive_fp16 cgemm_4bit_inference_naive_bf16 = bnb.functional.lib.cgemm_4bit_inference_naive_bf16 - -def QUANT_STATE(W): - return getattr(W, "quant_state", None) -pass - +def QUANT_STATE(W): return getattr(W, "quant_state", None) def get_lora_parameters(proj): # For DPO or disabled adapters - base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) + base_layer = getattr(proj, "base_layer", proj) # (proj.base_layer if hasattr(proj, "base_layer") else proj) W = base_layer.weight - if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: - return W, QUANT_STATE(W), None, None, None + # if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: + if getattr(proj, "disable_adapters", True) or proj.merged: + return W, getattr(W, "quant_state", None), None, None, None pass - active_adapter = proj.active_adapters[0] if \ - hasattr(proj, "active_adapters") else proj.active_adapter - A = proj.lora_A [active_adapter].weight - B = proj.lora_B [active_adapter].weight - s = proj.scaling[active_adapter] - return W, QUANT_STATE(W), A, B, s + adapter = getattr(proj, "active_adapters", None) + if adapter is None: adapter = getattr(proj, "active_adapter", ("default")) + adapter = adapter[0] + + return ( + W, + getattr(W, "quant_state", None), + proj.lora_A [adapter].weight, + proj.lora_B [adapter].weight, + proj.scaling[adapter], + ) pass @@ -120,19 +135,24 @@ def get_lora_parameters_bias(proj): # For DPO or disabled adapters base_layer = getattr(proj, "base_layer", proj) # (proj.base_layer if hasattr(proj, "base_layer") else proj) W = base_layer.weight - bias = base_layer.bias # if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: if getattr(proj, "disable_adapters", True) or proj.merged: - return W, QUANT_STATE(W), None, None, None, bias + return W, getattr(W, "quant_state", None), None, None, None, bias pass - active_adapter = proj.active_adapters[0] if \ - getattr(proj, "active_adapters", ) else proj.active_adapter - A = proj.lora_A [active_adapter].weight - B = proj.lora_B [active_adapter].weight - s = proj.scaling[active_adapter] - return W, QUANT_STATE(W), A, B, s, bias + adapter = getattr(proj, "active_adapters", None) + if adapter is None: adapter = getattr(proj, "active_adapter", ("default")) + adapter = adapter[0] + + return ( + W, + getattr(W, "quant_state", None), + proj.lora_A [adapter].weight, + proj.lora_B [adapter].weight, + proj.scaling[adapter], + base_layer.bias, + ) pass if HAS_CUDA_STREAM: @@ -193,18 +213,19 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False # NF4 dequantization of statistics ptr_out_absmax = get_ptr(out_absmax) - cdequantize_blockwise_fp32( - get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, - ctypes_c_int(blocksize2), ctypes_c_int(n_elements_absmax), CUDA_STREAM, - ) - out_absmax += offset - - # Dequantize W - fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ - cdequantize_blockwise_bf16_nf4 - fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), - ctypes_c_int(blocksize), ctypes_c_int(out.numel()), CUDA_STREAM,) - + with torch_cuda_device(device): + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), ptr_out_absmax, + ctypes_c_int(blocksize2), ctypes_c_int(n_elements_absmax), CUDA_STREAM + ) + out_absmax += offset + + # Dequantize W + fx = cdequantize_blockwise_fp16_nf4 if dtype == torch.float16 else \ + cdequantize_blockwise_bf16_nf4 + fx(get_ptr(None), get_ptr(W), ptr_out_absmax, get_ptr(out), + ctypes_c_int(blocksize), ctypes_c_int(out.numel()), CUDA_STREAM,) + pass # Careful returning transposed data is_transposed = (True if W.shape[0] == 1 else False) return out.t() if is_transposed else out @@ -316,19 +337,21 @@ def fast_gemv(X, W, quant_state, out = None): ldc = ctypes_c_int32(ldc) df = torch.empty(absmax.shape, dtype = torch.float32, device = device) - cdequantize_blockwise_fp32( - get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), - ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), CUDA_STREAM, - ) - df += offset - absmax = df - - fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ - cgemm_4bit_inference_naive_bf16 - - blocksize = ctypes_c_int32(blocksize) - fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), - lda, ldb, ldc, blocksize, CUDA_STREAM,) + with torch_cuda_device(device): + cdequantize_blockwise_fp32( + get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), + ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), CUDA_STREAM, + ) + df += offset + absmax = df + + fx = cgemm_4bit_inference_naive_fp16 if dtype == torch.float16 else \ + cgemm_4bit_inference_naive_bf16 + + blocksize = ctypes_c_int32(blocksize) + fx(m, n, k, get_ptr(X), get_ptr(W), get_ptr(absmax), get_ptr(stats), get_ptr(out), + lda, ldb, ldc, blocksize, CUDA_STREAM,) + pass return out pass @@ -458,7 +481,6 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): else: reshape = False pass - out = torch_matmul(X, W, out = out) if W_quant is not None: del W diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 29ad78dae2..e11cd54417 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -19,5 +19,5 @@ from .mistral import FastMistralModel from .qwen2 import FastQwen2Model from .dpo import PatchDPOTrainer, PatchKTOTrainer -from ._utils import is_bfloat16_supported +from ._utils import is_bfloat16_supported, __version__ from .rl import PatchFastRL, vLLMSamplingParams diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index cca77bb60b..0f0d4c159f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -755,7 +755,8 @@ def offload_to_disk(W, model, name, temporary_location : str = "_unsloth_tempora filename = os.path.join(file_location, f"{name}.pt") W = W.weight if hasattr(W, "weight") else W torch.save(W, filename, pickle_module = pickle, pickle_protocol = pickle.HIGHEST_PROTOCOL,) - offloaded_W = torch.load(filename, map_location = "cpu", mmap = True) + # We must use weights_only = False due to pickling + offloaded_W = torch.load(filename, map_location = "cpu", mmap = True, weights_only = False) offloaded_W._offloaded_file_location = filename return offloaded_W pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index fe0627f8d7..bcabbd5125 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -18,6 +18,7 @@ from functools import partial from typing import Optional, Tuple, List, Union from ._utils import * +from ._utils import patch_unsloth_smart_gradient_checkpointing from ._utils import __version__ from torch.nn.functional import scaled_dot_product_attention from transformers import __version__ as transformers_version @@ -758,14 +759,9 @@ def LlamaModel_fast_forward( # Check checkpointing method gradient_checkpointing = False - offloaded_gradient_checkpointing = False if (self.gradient_checkpointing and self.training and not use_cache): - gradient_checkpointing = True - - if output_attentions is False and hasattr(self, "_offloaded_gradient_checkpointing"): - offloaded_gradient_checkpointing = True pass # Gemma2 has alternating SWA and global attn @@ -850,27 +846,12 @@ def LlamaModel_fast_forward( mask = self. GA_mask if use_static_mask else dynamic_GA_mask pass - if offloaded_gradient_checkpointing: - hidden_states = Unsloth_Offloaded_Gradient_Checkpointer.apply( - decoder_layer, - hidden_states, - mask, - attention_mask, - position_ids, - past_key_values, - output_attentions, - use_cache, - None, - position_embeddings, - )[0] - - elif gradient_checkpointing: + if gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions, padding_mask = padding_mask, position_embeddings = position_embeddings) return custom_forward pass - layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, @@ -1703,10 +1684,10 @@ def from_pretrained( statistics = \ f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.\n"\ - f" {chr(92)}{chr(92)} /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f" {chr(92)}{chr(92)} /| {gpu_stats.name}. Num GPUs = {torch.cuda.device_count()}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ - f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' + f' "-____-" Free license: http://github.com/unslothai/unsloth' print(statistics) # Warn about fast transfers @@ -1898,11 +1879,11 @@ def from_pretrained( # Cannot use \\ since it will cause a SyntaxWarning in Python 3.12 # Instead use chr(92) == \\ debug_info = """debug_info = \\ - f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs = {args.world_size}\\n"\\ - f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,}\\n"\\ - f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient Accumulation steps = {args.gradient_accumulation_steps}\\n"\\ - f"{chr(92)} / Total batch size = {total_train_batch_size:,} | Total steps = {max_steps:,}\\n"\\ - f' "-____-" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}' + f"==((====))== Unsloth - 2x faster free finetuning | Num GPUs used = {len(set(p.device for p in model.parameters()))}\\n"\\ + f" {chr(92)}{chr(92)} /| Num examples = {num_examples:,} | Num Epochs = {num_train_epochs:,} | Total steps = {max_steps:,}\\n"\\ + f"O^O/ {chr(92)}_/ {chr(92)} Batch size per device = {self._train_batch_size:,} | Gradient accumulation steps = {args.gradient_accumulation_steps}\\n"\\ + f"{chr(92)} / Data Parallel GPUs = {args.world_size} | Total batch size ({self._train_batch_size} x {args.gradient_accumulation_steps} x {args.world_size}) = {total_train_batch_size:,}\\n"\\ + f' "-____-" Trainable parameters = {get_model_param_count(model, trainable_only=True):,}/{get_model_param_count(model):,} ({get_model_param_count(model, trainable_only=True)/get_model_param_count(model)*100:.2f}% trained)' logger.warning(debug_info) import subprocess, re, gc for _ in range(3): @@ -1989,9 +1970,14 @@ def from_pretrained( internal_model = model while hasattr(internal_model, "model"): internal_model._saved_temp_tokenizer = tokenizer + # Also set is_loaded_in_8bit to disable incorrect DDP + internal_model.is_loaded_in_8bit = True + internal_model = internal_model.model pass internal_model._saved_temp_tokenizer = tokenizer + # Also set is_loaded_in_8bit to disable incorrect DDP + internal_model.is_loaded_in_8bit = True # For transformers > 4.47.1, we need to add rotary_emb to all attention layers if IS_ATTENTION_REFACTOR or hasattr(model.model, "rotary_emb"): @@ -2034,6 +2020,9 @@ def get_peft_model( ): transformers_set_seed(random_state) + if use_gradient_checkpointing == "unsloth": + patch_unsloth_smart_gradient_checkpointing(dtype = model.get_input_embeddings().weight.dtype) + if type(r) is not int: raise TypeError(f"Unsloth: Rank of {str(r)} must be an integer.") if r <= 0: @@ -2398,11 +2387,15 @@ def get_peft_model( if hasattr(internal_model, "_saved_temp_tokenizer"): internal_model._saved_temp_tokenizer.padding_side = "right" pass + # Also set is_loaded_in_8bit to disable incorrect DDP + internal_model.is_loaded_in_8bit = True internal_model = internal_model.model pass if hasattr(internal_model, "_saved_temp_tokenizer"): internal_model._saved_temp_tokenizer.padding_side = "right" pass + # Also set is_loaded_in_8bit to disable incorrect DDP + internal_model.is_loaded_in_8bit = True # Clear deleted GPU items for _ in range(3): diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 186545cf0c..30128cd134 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -59,7 +59,15 @@ from .gemma2 import FastGemma2Model pass import torch - +from ._utils import ( + patch_compiling_bitsandbytes, + patch_model_and_tokenizer, + prepare_model_for_kbit_training, + patch_unsloth_smart_gradient_checkpointing, + patch_compiled_autograd, + process_vision_info, + unsloth_compile_transformers, +) class FastLanguageModel(FastLlamaModel): @staticmethod @@ -87,6 +95,10 @@ def from_pretrained( *args, **kwargs, ): if token is None: token = get_token() + assert (dtype is None or dtype == torch.float16 or dtype == torch.bfloat16) + + if use_gradient_checkpointing == "unsloth": + patch_unsloth_smart_gradient_checkpointing(dtype = dtype) if fast_inference: if importlib.util.find_spec("vllm") is None: @@ -367,15 +379,6 @@ def from_pretrained( pass -from ._utils import ( - patch_compiling_bitsandbytes, - patch_model_and_tokenizer, - prepare_model_for_kbit_training, - patch_unsloth_smart_gradient_checkpointing, - patch_compiled_autograd, - process_vision_info, - unsloth_compile_transformers, -) from ..kernels import ( patch_loss_functions, post_patch_loss_function, @@ -404,6 +407,7 @@ def from_pretrained( *args, **kwargs, ): if token is None: token = get_token() + assert (dtype is None or dtype == torch.float16 or dtype == torch.bfloat16) patch_compiled_autograd() patch_compiling_bitsandbytes() diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 8f346073bf..3a9d651d11 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -495,7 +495,7 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): RLTrainer_source, f"trl.trainer.{trainer_file}", imports, - overwrite = True, + overwrite = False, ) # Patch Trainer diff --git a/unsloth/save.py b/unsloth/save.py index af95de07e7..d03f47e874 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -17,6 +17,8 @@ from peft.tuners.lora import Linear4bit as Peft_Linear4bit from peft.tuners.lora import Linear as Peft_Linear from typing import Optional, Callable, Union, List +import sys +import requests import torch import os import shutil @@ -1613,6 +1615,112 @@ def create_ollama_modelfile(tokenizer, gguf_location): return modelfile pass +def create_ollama_model( + username: str, + model_name: str, + tag: str, + modelfile_path: str +): + try: + init_check = subprocess.run( + ['curl', 'http://localhost:11434'], capture_output=True, text=True, timeout=3 + ) + if init_check.returncode == 0: + print(init_check.stdout.strip()) + else: + print("Ollama Server is not Running") + except subprocess.TimeoutExpired: + return "Ollama Request Timeout" + + process = subprocess.Popen( + ['ollama', 'create', f'{username}/{model_name}:{tag}', '-f', f'{modelfile_path}'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + universal_newlines=True + ) + + for line in iter(process.stdout.readline, ''): + print(line, end='') + sys.stdout.flush() + + return_code = process.wait() + + if return_code != 0: + print(f"\nMODEL CREATED FAILED WITH RETURN CODE {return_code}") + else: + print("\nMODEL CREATED SUCCESSFULLY") +pass + + +def push_to_ollama_hub(username: str, model_name: str, tag: str): + try: + init_check = subprocess.run( + ['curl', 'http://localhost:11434'], capture_output=True, text=True, timeout=3 + ) + if init_check.returncode == 0: + print(init_check.stdout.strip()) + else: + print("Ollama Server is not Running") + except subprocess.TimeoutExpired: + return "Ollama Request Timeout" + + process = subprocess.Popen( + ['ollama', 'push', f'{username}/{model_name}:{tag}'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + universal_newlines=True + ) + + for line in iter(process.stdout.readline, ''): + print(line, end='') + sys.stdout.flush() + + return_code = process.wait() + + if return_code != 0: + print(f"\nMODEL PUBLISHED FAILED WITH RETURN CODE {return_code}") + else: + print("\nMODEL PUBLISHED SUCCESSFULLY") + + +def push_to_ollama( + tokenizer, + gguf_location, + username: str, + model_name: str, + tag: str +): + model_file = create_ollama_modelfile( + tokenizer=tokenizer, + gguf_location=gguf_location + ) + + with open(f"Modelfile_{model_name}", "w") as f: + f.write(model_file) + f.close() + + create_ollama_model( + username=username, + model_name=model_name, + tag=tag, + modelfile_path=f"Modelfile_{model_name}" + ) + + push_to_ollama_hub( + username=username, + model_name=model_name, + tag=tag + ) + + print("Succesfully pushed to ollama") + + + + def unsloth_save_pretrained_gguf( self, From ff18cb3a7436b3ad7f7c189834dff81f7526ee61 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 4 Mar 2025 04:12:38 -0800 Subject: [PATCH 1061/1088] Bug fix --- unsloth/models/_utils.py | 2 +- unsloth/tokenizer_utils.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0f0d4c159f..36e4f51a27 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.1" +__version__ = "2025.3.2" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 9c5f825a0c..de9f73d7a5 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -859,6 +859,7 @@ def check_tokenizer( import inspect from inspect import getsource +import trl import trl.trainer.sft_trainer from trl.trainer.sft_trainer import * from transformers.trainer import * @@ -1046,3 +1047,9 @@ def patch_sft_trainer_tokenizer(): # Finally patch TRL tokenizer things -> moved to RL # patch_sft_trainer_tokenizer() + +# Temporary measure to stop tokenizing data twice +if hasattr(trl, "data_utils"): + def maybe_apply_chat_template(example, *args, **kwargs): return example + trl.data_utils.maybe_apply_chat_template = maybe_apply_chat_template +pass From 1aeb21b10ece8350a41a9542f51e68dbcdcdda59 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 4 Mar 2025 04:22:23 -0800 Subject: [PATCH 1062/1088] Bug fix --- unsloth/models/rl_replacements.py | 12 +++++++++++- unsloth/tokenizer_utils.py | 6 ------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index fe7f4accee..5ea61cb9b3 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -90,9 +90,11 @@ def sft_trainer_prepare_dataset(function_name, function): "if getattr(tokenizer, 'bos_token', None) is not None else False\n"\ "if 'add_special_tokens' not in locals() and has_bos_token_already:\n"\ " from functools import partial\n"\ - " tokenizer = partial(tokenizer, add_special_tokens = False)\n"\ + " tokenizer_call = tokenizer.__call__\n"\ + " tokenizer.__call__ = partial(tokenizer_call, add_special_tokens = False)\n"\ " processing_class = tokenizer\n"\ "else:\n"\ + " tokenizer_call = None\n"\ " add_special_tokens = False if has_bos_token_already else locals().get('add_special_tokens', False)\n" check_text = check_text.split("\n") @@ -109,6 +111,14 @@ def sft_trainer_prepare_dataset(function_name, function): replacer = replacer[0] function = function.replace(replacer, replacer + check_text) pass + + # Return tokenizer's original state + return_state = "if tokenizer_call is not None: tokenizer.__call__ = tokenizer_call\n" + function = re.sub( + r"\n([ ]{4,})(return .*?[\s]{0,})$", + rf"\1{return_state}\1\2", + function, + ) return function pass RL_FUNCTIONS["sft_trainer"].append(sft_trainer_prepare_dataset) diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index de9f73d7a5..91bb0202ff 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -1047,9 +1047,3 @@ def patch_sft_trainer_tokenizer(): # Finally patch TRL tokenizer things -> moved to RL # patch_sft_trainer_tokenizer() - -# Temporary measure to stop tokenizing data twice -if hasattr(trl, "data_utils"): - def maybe_apply_chat_template(example, *args, **kwargs): return example - trl.data_utils.maybe_apply_chat_template = maybe_apply_chat_template -pass From 840988b12b7e7ba5fa103f42cafccc3d61f6ed3e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 4 Mar 2025 13:26:47 -0800 Subject: [PATCH 1063/1088] Bug fix --- unsloth/kernels/utils.py | 2 +- unsloth/models/_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 5eb9b8f5ce..8da152bcb3 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -138,7 +138,7 @@ def get_lora_parameters_bias(proj): # if not hasattr(proj, "disable_adapters") or proj.disable_adapters or proj.merged: if getattr(proj, "disable_adapters", True) or proj.merged: - return W, getattr(W, "quant_state", None), None, None, None, bias + return W, getattr(W, "quant_state", None), None, None, None, base_layer.bias pass adapter = getattr(proj, "active_adapters", None) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 36e4f51a27..43828a358a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.2" +__version__ = "2025.3.3" __all__ = [ "SUPPORTS_BFLOAT16", From f48cb4133980650f32627c58666ef44d0f408b36 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 5 Mar 2025 05:13:32 -0800 Subject: [PATCH 1064/1088] Many bug fixes (#1900) * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * autocast * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> --- pyproject.toml | 34 ++--- unsloth/__init__.py | 2 +- unsloth/kernels/utils.py | 31 ++-- unsloth/models/_utils.py | 80 +++++----- unsloth/models/llama.py | 244 +++++++++++++----------------- unsloth/models/rl.py | 2 +- unsloth/models/rl_replacements.py | 19 +++ 7 files changed, 189 insertions(+), 223 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 73e69dcd4a..5a9d92202a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "unsloth" dynamic = ["version"] description = "2-5X faster LLM finetuning" readme = "README.md" -requires-python = ">=3.9" +requires-python = ">=3.9,<=3.12" license = {file = "LICENSE"} keywords = ["ai", "llm",] authors = [ @@ -39,8 +39,8 @@ triton = [ "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'" ] -windows=[ - "unsloth_zoo>=2025.3.1", +huggingface = [ + "unsloth_zoo>=2025.3.2", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -51,34 +51,18 @@ windows=[ "wheel>=0.42.0", "numpy", "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0,<=0.15.2", "peft>=0.7.1,!=0.11.0", "protobuf<4.0.0", "huggingface_hub", "hf_transfer", "unsloth[triton]", +] +windows=[ + "unsloth[huggingface]", "bitsandbytes>=0.41.1 ; platform_system == 'Windows'", "xformers>=0.0.22.post7 ; platform_system == 'Windows'", ] -huggingface = [ - "unsloth_zoo>=2025.3.1", - "packaging", - "tyro", - "transformers>=4.46.1,!=4.47.0", - "datasets>=2.16.0", - "sentencepiece>=0.2.0", - "tqdm", - "psutil", - "wheel>=0.42.0", - "numpy", - "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", - "peft>=0.7.1,!=0.11.0", - "protobuf<4.0.0", - "huggingface_hub", - "hf_transfer", - "unsloth[triton]", -] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9' and platform_system == 'Linux'", "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10' and platform_system == 'Linux'", @@ -370,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.2.7", + "unsloth_zoo>=2025.3.1", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -388,7 +372,7 @@ colab-new = [ ] colab-no-deps = [ "accelerate>=0.34.1", - "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0", + "trl>=0.7.9,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.15.0,<=0.15.2", "peft>=0.7.1", "xformers", "bitsandbytes>=0.46.1", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index c8f2926985..8439ab8212 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.1"): + if Version(unsloth_zoo_version) < Version("2025.3.2"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 8da152bcb3..db1d73c340 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -104,6 +104,11 @@ def _get_tensor_stream(tensor: torch_Tensor) -> c_void_p: cdequantize_blockwise_bf16_nf4 = bnb.functional.lib.cdequantize_blockwise_bf16_nf4 cgemm_4bit_inference_naive_fp16 = bnb.functional.lib.cgemm_4bit_inference_naive_fp16 cgemm_4bit_inference_naive_bf16 = bnb.functional.lib.cgemm_4bit_inference_naive_bf16 +torch_mm = torch.mm +torch_mv = torch.mv +torch_matmul = torch.matmul +torch_addmm = torch.addmm +torch_empty = torch.empty def QUANT_STATE(W): return getattr(W, "quant_state", None) @@ -194,8 +199,8 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False WEIGHT_BUFFER = WEIGHT_BUFFERS[device_index] ABSMAX_BUFFER = ABSMAX_BUFFERS[device_index] if WEIGHT_BUFFER is None: - WEIGHT_BUFFERS[device_index] = WEIGHT_BUFFER = torch.empty(size, dtype = dtype, device = device, requires_grad = False) - ABSMAX_BUFFERS[device_index] = ABSMAX_BUFFER = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) + WEIGHT_BUFFERS[device_index] = WEIGHT_BUFFER = torch_empty(size, dtype = dtype, device = device, requires_grad = False) + ABSMAX_BUFFERS[device_index] = ABSMAX_BUFFER = torch_empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) if size > WEIGHT_BUFFER.numel(): WEIGHT_BUFFER.resize_(size) if n_elements_absmax > ABSMAX_BUFFER.numel(): ABSMAX_BUFFER.resize_(n_elements_absmax) @@ -204,11 +209,11 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False out_absmax = ABSMAX_BUFFER[:n_elements_absmax] else: if out is None: - out = torch.empty(shape, dtype = dtype, device = device, requires_grad = False) + out = torch_empty(shape, dtype = dtype, device = device, requires_grad = False) else: assert(out.shape == shape) assert(out.dtype == dtype) - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) + out_absmax = torch_empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) pass # NF4 dequantization of statistics @@ -258,11 +263,11 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False # Create weight matrix if out is None: - out = torch.empty(shape, dtype = dtype, device = device, requires_grad = False) + out = torch_empty(shape, dtype = dtype, device = device, requires_grad = False) else: assert(out.shape == shape) assert(out.dtype == dtype) - out_absmax = torch.empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) + out_absmax = torch_empty(n_elements_absmax, dtype = torch.float32, device = device, requires_grad = False) # Do dequantization ptr_out_absmax = get_ptr(out_absmax) @@ -286,7 +291,7 @@ def fast_dequantize(W, quant_state = None, out = None, use_global_buffer = False if HAS_CUDA_STREAM: def fast_gemv(X, W, quant_state, out = None): - if quant_state is None: return torch.matmul(X, W, out = out) + if quant_state is None: return torch_matmul(X, W, out = out) # For fast X @ W where seq_len == 1 # From https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L1469 _, q_len, hd = X.shape @@ -318,7 +323,7 @@ def fast_gemv(X, W, quant_state, out = None): bout = shape[0] if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = device) + out = torch_empty((1, 1, bout,), dtype = dtype, device = device) # else: # assert(out.shape == (1, 1, bout,)) # pass @@ -336,7 +341,7 @@ def fast_gemv(X, W, quant_state, out = None): ldb = ctypes_c_int32(ldb) ldc = ctypes_c_int32(ldc) - df = torch.empty(absmax.shape, dtype = torch.float32, device = device) + df = torch_empty(absmax.shape, dtype = torch.float32, device = device) with torch_cuda_device(device): cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), @@ -385,7 +390,7 @@ def fast_gemv(X, W, quant_state, out = None): device = W.device if out is None: - out = torch.empty((1, 1, bout,), dtype = dtype, device = device) + out = torch_empty((1, 1, bout,), dtype = dtype, device = device) # else: # assert(out.shape == (1, 1, bout,)) # pass @@ -403,7 +408,7 @@ def fast_gemv(X, W, quant_state, out = None): ldb = ctypes_c_int32(ldb) ldc = ctypes_c_int32(ldc) - df = torch.empty(absmax.shape, dtype = torch.float32, device = device) + df = torch_empty(absmax.shape, dtype = torch.float32, device = device) cdequantize_blockwise_fp32( get_ptr(code2), get_ptr(absmax), get_ptr(absmax2), get_ptr(df), ctypes_c_int(blocksize2), ctypes_c_int(df.numel()), @@ -423,10 +428,6 @@ def fast_gemv(X, W, quant_state, out = None): pass -torch_mm = torch.mm -torch_mv = torch.mv -torch_matmul = torch.matmul -torch_addmm = torch.addmm def fast_linear_forward(proj, X, temp_lora = None, out = None): W, W_quant, lora_A, lora_B, lora_S, bias = get_lora_parameters_bias(proj) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 43828a358a..66926bca10 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.3" +__version__ = "2025.3.4" __all__ = [ "SUPPORTS_BFLOAT16", @@ -39,8 +39,8 @@ "create_boolean_mask", "torch_amp_custom_fwd", "torch_amp_custom_bwd", - "accelerate_old_send_to_device", - "accelerate_new_send_to_device", + # "accelerate_old_send_to_device", + # "accelerate_new_send_to_device", "patch_gradient_accumulation_fix", "patch_compiling_bitsandbytes", "patch_regional_compilation", @@ -241,24 +241,24 @@ def patch_mistral_nemo_config(config): # ============================================= # Fix KeyError: 'Cache only has 0 layers, attempted to access layer with index 0' -import transformers.cache_utils -if hasattr(transformers.cache_utils, "DynamicCache") and \ - transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": - - source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) - start = source.find("def") - spaces = start*" " - source = source.split("\n") - source = "\n".join(x[start:] for x in source) - where = source.find("raise KeyError") - source = source[:where] + \ - f"if len(self) == 0:\n{spaces}{spaces}"\ - " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ - f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] - source = source.replace("__getitem__", "__cache_utils_getitem__", 1) - exec(source) - transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ -pass +# import transformers.cache_utils +# if hasattr(transformers.cache_utils, "DynamicCache") and \ +# transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__": + +# source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__) +# start = source.find("def") +# spaces = start*" " +# source = source.split("\n") +# source = "\n".join(x[start:] for x in source) +# where = source.find("raise KeyError") +# source = source[:where] + \ +# f"if len(self) == 0:\n{spaces}{spaces}"\ +# " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \ +# f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:] +# source = source.replace("__getitem__", "__cache_utils_getitem__", 1) +# exec(source) +# transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__ +# pass # ============================================= # ============================================= @@ -411,25 +411,25 @@ def _is_openai_available(): return False # ============================================= # Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout' -accelerate_old_send_to_device = None -accelerate_new_send_to_device = None -if xformers_version is not None and Version(xformers_version) >= Version("0.0.27"): - import accelerate.utils.operations - if hasattr(accelerate.utils.operations, "send_to_device") and \ - accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": - accelerate_old_send_to_device = accelerate.utils.operations.send_to_device - from accelerate.utils.operations import * - send_to_device = inspect.getsource(accelerate.utils.operations.send_to_device) - send_to_device = re.sub( - r"([ ]{4,})return tensor\.to\(device\)", - r"\1try: return tensor.to(device)\n\1except: return tensor", - send_to_device, - ).replace("def send_to_device", "def _fixed_send_to_device") - exec(send_to_device) - # accelerate.utils.operations.send_to_device = _fixed_send_to_device - accelerate_new_send_to_device = _fixed_send_to_device - pass -pass +# accelerate_old_send_to_device = None +# accelerate_new_send_to_device = None +# if xformers_version is not None and Version(xformers_version) >= Version("0.0.27"): +# import accelerate.utils.operations +# if hasattr(accelerate.utils.operations, "send_to_device") and \ +# accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device": +# accelerate_old_send_to_device = accelerate.utils.operations.send_to_device +# from accelerate.utils.operations import * +# send_to_device = inspect.getsource(accelerate.utils.operations.send_to_device) +# send_to_device = re.sub( +# r"([ ]{4,})return tensor\.to\(device\)", +# r"\1try: return tensor.to(device)\n\1except: return tensor", +# send_to_device, +# ).replace("def send_to_device", "def _fixed_send_to_device") +# exec(send_to_device) +# # accelerate.utils.operations.send_to_device = _fixed_send_to_device +# accelerate_new_send_to_device = _fixed_send_to_device +# pass +# pass # Transformers 4.46 breaks dynamic caching. This is a hack import transformers.generation.configuration_utils diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bcabbd5125..3dacf5cdd5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -15,7 +15,7 @@ import torch import gc import math -from functools import partial +import functools from typing import Optional, Tuple, List, Union from ._utils import * from ._utils import patch_unsloth_smart_gradient_checkpointing @@ -65,6 +65,7 @@ from peft import PeftModelForCausalLM from ..save import patch_saving_functions import re, os, inspect, math, sys +import types try: from huggingface_hub.utils import get_token except: @@ -217,14 +218,14 @@ def LlamaAttention_fast_forward_inference( RH_Q = self.RH_Q RH_Q[:,:,:,:h] = Qn[:,:,:,h:] RH_Q[:,:,:,h:] = Qn[:,:,:,:h] - torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) + RH_Q[:,:,:,:h].neg_() # torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h]) Qn *= cos Qn.addcmul_(RH_Q, sin) RH_K = RH_Q[:,:n_kv_heads,:,:] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0") RH_K[:,:,:,:h] = Kn[:,:,:,h:] RH_K[:,:,:,h:] = Kn[:,:,:,:h] - torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) + RH_K[:,:,:,:h].neg_() #torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h]) Kn *= cos Kn.addcmul_(RH_K, sin) @@ -400,19 +401,20 @@ def LlamaAttention_fast_forward( else: # Extend RoPE dynamically to fit in VRA rotary_emb = self.rotary_emb - rotary_emb.extend_rope_embedding(V, seq_len=kv_seq_len) + rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len) if position_ids is None: # Useful for LongRoPE cos, sin = rotary_emb.get_cached(kv_seq_len) else: - cos, sin = rotary_emb(V, seq_len=kv_seq_len) + cos, sin = rotary_emb(V, seq_len = kv_seq_len) - Q, K = ( - fast_rope_embedding(Q, K, cos, sin) - if position_ids is None - else inplace_rope_embedding(Q, K, cos, sin, position_ids) - ) + # Q, K = ( + # fast_rope_embedding(Q, K, cos, sin) + # if position_ids is None + # else inplace_rope_embedding(Q, K, cos, sin, position_ids) + # ) + Q, K = fast_rope_embedding(Q, K, cos, sin) if past_key_value is not None: K = torch.cat([past_key_value[0], K], dim = 2) @@ -924,7 +926,6 @@ def LlamaModel_fast_forward_inference( X = X.to(self.config.torch_dtype) bsz, q_len, hd = X.shape assert(q_len == 1) - # Get saved buffers to reduce memory movement residual = torch.empty((bsz, q_len, hd), dtype = torch.float32, device = "cuda:0") _XX = torch.empty((2, bsz, q_len, hd), dtype = torch.float32, device = "cuda:0") @@ -1020,7 +1021,6 @@ def _CausalLM_fast_forward( logits_to_keep: Optional[int] = 0, *args, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: - if past_key_values is not None: outputs = fast_forward_inference( self, @@ -1069,7 +1069,7 @@ def _CausalLM_fast_forward( if labels is not None: labels = labels.to(lm_head_device) # Output last hidden states without logits if asked - if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": + if self.training and os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1": if num_logits_to_keep != 0: hidden_states = hidden_states[:, -num_logits_to_keep:, :] return CausalLMOutputWithPast( @@ -1534,78 +1534,58 @@ def extend_rope_embedding(self, x, seq_len): pass -def _wrap_fast_inference(generate, device_type, dtype, model): - # Wraps inference with bfloat16 / float16 - @torch.inference_mode - def _fast_generate(*args, **kwargs): - - if hasattr(model, "config") and hasattr(model.config, "max_position_embeddings"): - if "input_ids" in kwargs and kwargs["input_ids"] is not None and "max_new_tokens" in kwargs: - if kwargs["input_ids"].shape[-1] + kwargs["max_new_tokens"] > model.config.max_position_embeddings: - raise ValueError( - f'Unsloth: input length {kwargs["input_ids"].shape[-1]} + max_new_tokens {kwargs["max_new_tokens"]} exceeds the maximum sequence length of {model.config.max_position_embeddings}!\n'\ - 'You will need to do long context extension by increasing the `max_seq_length` in `FastLanguageModel.from_pretrained`.' - ) - pass - - # Set a flag for generation! - internal_model = model - while hasattr(internal_model, "model"): - internal_model._flag_for_generation = True - internal_model = internal_model.model - pass - internal_model._flag_for_generation = True +def unsloth_fast_generate( + self, + *args, + **kwargs, +): + FastLlamaModel.for_inference(self) - # Must patch accelerate for Xformers - if accelerate_new_send_to_device is not None: - import accelerate.utils.operations - accelerate.utils.operations.send_to_device = accelerate_new_send_to_device - pass + dtype = _get_dtype(self.config.torch_dtype) - # For newer HF - kwargs["cache_implementation"] = "dynamic" - # For num_logits_to_keep - kwargs["num_logits_to_keep"] = 1 + if hasattr(self, "config") and hasattr(self.config, "max_position_embeddings"): + if "input_ids" in kwargs and kwargs["input_ids"] is not None and "max_new_tokens" in kwargs: + if kwargs["input_ids"].shape[-1] + kwargs["max_new_tokens"] > self.config.max_position_embeddings: + raise ValueError( + f'Unsloth: input length {kwargs["input_ids"].shape[-1]} + max_new_tokens {kwargs["max_new_tokens"]} exceeds the maximum sequence length of {model.config.max_position_embeddings}!\n'\ + 'You will need to do long context extension by increasing the `max_seq_length` in `FastLanguageModel.from_pretrained`.' + ) + pass - # Remove token_type_ids - kwargs.pop("token_type_ids", None) + # Must patch accelerate for Xformers + # if accelerate_new_send_to_device is not None: + # import accelerate.utils.operations + # accelerate.utils.operations.send_to_device = accelerate_new_send_to_device + # pass - # Check pad_token - model_eos_token_id = getattr(model.config, "eos_token_id", None) - if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): - model_eos_token_id = model_eos_token_id[0] + # For newer HF + kwargs["cache_implementation"] = "dynamic" + # For num_logits_to_keep + kwargs["num_logits_to_keep"] = 1 - kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + # Remove token_type_ids + kwargs.pop("token_type_ids", None) - # Set pad token - # old_pad_token_id = getattr(model.config, "pad_token_id", None) - # old_eos_token_id = getattr(model.config, "eos_token_id", None) - # model.config.pad_token_id = old_eos_token_id + # Check pad_token + model_eos_token_id = getattr(self.config, "eos_token_id", None) + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] - # Autocasted - with torch.autocast(device_type = device_type, dtype = dtype): - output = generate(*args, **kwargs) - pass + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) - # Revert - # model.config.pad_token_id = old_pad_token_id + # Mixed precision autocast + with torch.inference_mode(), torch.autocast(device_type = "cuda", dtype = dtype): + output = self._old_generate(*args, **kwargs) + pass - # Unset a flag for generation! - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation - internal_model = internal_model.model - pass - if hasattr(internal_model, "_flag_for_generation"): del internal_model._flag_for_generation + # Return accelerate back + # if accelerate_new_send_to_device is not None: + # accelerate.utils.operations.send_to_device = accelerate_old_send_to_device + # pass - # Return accelerate back - if accelerate_new_send_to_device is not None: - accelerate.utils.operations.send_to_device = accelerate_old_send_to_device - pass + FastLlamaModel.for_training(self) - return output - pass - return _fast_generate + return output pass @@ -1682,8 +1662,12 @@ def from_pretrained( gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + from importlib.metadata import version as importlib_version + try: vllm_version = f" vLLM: {importlib_version('vllm')}." + except: vllm_version = "" + statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_patcher.__name__[4:-5]} patching. Transformers: {transformers_version}.{vllm_version}\n"\ f" {chr(92)}{chr(92)} /| {gpu_stats.name}. Num GPUs = {torch.cuda.device_count()}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ @@ -1825,7 +1809,7 @@ def from_pretrained( model = convert_vllm_to_huggingface(quant_state_dict, model_config, dtype) model.vllm_engine = llm model.fast_generate = model.vllm_engine.generate - model.fast_generate_batches = partial(generate_batches, model.vllm_engine) + model.fast_generate_batches = functools.partial(generate_batches, model.vllm_engine) pass # Return old flag os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer @@ -1986,6 +1970,11 @@ def from_pretrained( layer.self_attn.rotary_emb = rotary_emb pass + # Patch generate + if model.generate.__name__ != "unsloth_fast_generate": + model._old_generate = model.generate + unsloth_fast_generate.__doc__ = model._old_generate.__doc__ + model.generate = types.MethodType(unsloth_fast_generate, model) return model, tokenizer pass @@ -2410,12 +2399,20 @@ def get_peft_model( model.fast_generate_batches = vllm_fast_generate_batches # Also saving and loading LoRA - from functools import partial from unsloth_zoo.vllm_utils import save_lora, load_lora - model.save_lora = partial(save_lora, model) - model.load_lora = partial(load_lora, model) + model.save_lora = functools.partial(save_lora, model) + model.load_lora = functools.partial(load_lora, model) pass + # Add for_inference and for_training + model.for_training = functools.partial(FastLlamaModel.for_training, model) + model.for_inference = functools.partial(FastLlamaModel.for_inference, model) + + # Patch generate + if model.generate.__name__ != "unsloth_fast_generate": + model._old_generate = model.generate + unsloth_fast_generate.__doc__ = model._old_generate.__doc__ + model.generate = types.MethodType(unsloth_fast_generate, model) return model pass @@ -2486,7 +2483,6 @@ def patch_peft_model( n_mlp = 0 n_qkv = 0 n_o = 0 - import types active_adapter = model.active_adapters[0] if \ hasattr(model, "active_adapters") else model.active_adapter @@ -2496,9 +2492,8 @@ def patch_peft_model( bias = model.peft_config[active_adapter].bias # We also do not inplace edit QKV for Cohere! - from functools import partial _apply_lora_mlp = \ - partial(apply_lora_mlp, inplace = False) \ + functools.partial(apply_lora_mlp, inplace = False) \ if model_type == "cohere" else \ apply_lora_mlp pass @@ -2611,52 +2606,30 @@ def patch_peft_model( pass # Add for_inference and for_training - model.for_training = partial(FastLlamaModel.for_training, model) - model.for_inference = partial(FastLlamaModel.for_inference, model) + model.for_training = functools.partial(FastLlamaModel.for_training, model) + model.for_inference = functools.partial(FastLlamaModel.for_inference, model) return model pass @staticmethod def for_inference(model): - # if model.config.model_type == "qwen2": - # FastLlamaModel.for_training(model) - # return - # pass + if not hasattr(model, "parameters"): + raise TypeError("Unsloth: I think you're passing a tokenizer, not the model to for_inference!") + def _for_inference(m): + if hasattr(m, "gradient_checkpointing"): m.gradient_checkpointing = False + if hasattr(m, "training"): m.training = False + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): m._saved_temp_tokenizer.padding_side = "left" + # Set a flag for generation! + m._flag_for_generation = True + pass m = model while hasattr(m, "model"): - if hasattr(m, "gradient_checkpointing"): - m.gradient_checkpointing = False - if hasattr(m, "training"): - m.training = False - # Pad tokenizer to the left - if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.padding_side = "left" + _for_inference(m) m = m.model - pass - if hasattr(m, "gradient_checkpointing"): - m.gradient_checkpointing = False - if hasattr(m, "training"): - m.training = False - # Pad tokenizer to the left - if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.padding_side = "left" - - # Also check if lm_head / embeddings are trained - internal_model = model - while not hasattr(internal_model, "lm_head"): - internal_model = internal_model.model - pass - lm_head = internal_model.lm_head.weight - device_type = lm_head.device.type - dtype = _get_dtype(model.config.torch_dtype) - - # Wrap model.generate - if model.generate.__name__ != "_fast_generate": - model._unwrapped_old_generate = model.generate - model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) - pass + _for_inference(m) # Also disable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -2667,13 +2640,14 @@ def for_inference(model): embeddings = model.get_output_embeddings() if hasattr(embeddings, "training"): embeddings.training = False pass - return model pass @staticmethod def for_training(model, use_gradient_checkpointing = True): + if not hasattr(model, "parameters"): + raise TypeError("Unsloth: I think you're passing a tokenizer, not the model to for_training!") # Delete all fast inference loras for param in model.parameters(): @@ -2681,30 +2655,19 @@ def for_training(model, use_gradient_checkpointing = True): del param._fast_lora pass + def _for_training(m): + if hasattr(m, "gradient_checkpointing"): m.gradient_checkpointing = use_gradient_checkpointing + if hasattr(m, "training"): m.training = True + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): m._saved_temp_tokenizer.padding_side = "right" + # Set a flag for generation! + if hasattr(m, "_flag_for_generation"): del m._flag_for_generation + pass m = model while hasattr(m, "model"): - if hasattr(m, "gradient_checkpointing"): - m.gradient_checkpointing = use_gradient_checkpointing - if hasattr(m, "training"): - m.training = True - # Pad tokenizer to the right - if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.padding_side = "right" + _for_training(m) m = m.model - pass - if hasattr(m, "gradient_checkpointing"): - m.gradient_checkpointing = use_gradient_checkpointing - if hasattr(m, "training"): - m.training = True - # Pad tokenizer to the right - if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.padding_side = "right" - - # Also revert model.generate - if hasattr(model, "_unwrapped_old_generate"): - model.generate = model._unwrapped_old_generate - del model._unwrapped_old_generate - pass + _for_training(m) # Also re-enable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -2715,7 +2678,6 @@ def for_training(model, use_gradient_checkpointing = True): embeddings = model.get_output_embeddings() if hasattr(embeddings, "training"): embeddings.training = True pass - return model pass pass diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 3a9d651d11..c9ea922272 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -60,7 +60,7 @@ def PatchRL(FastLanguageModel): def unsloth_unwrap_model_for_generation(model, *args, **kwargs): with unwrap_model_for_generation(model, *args, **kwargs) as unwrapped_model: # Put the model in inference mode. - FastLanguageModel.for_inference(unwrapped_model) + FastLanguageModel.for_inference(model) # We must use .clone for Unsloth since we force inference_mode # Rather we should have used no_grad diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 5ea61cb9b3..7462d55944 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -78,6 +78,25 @@ def sft_trainer_prepare_dataset(function_name, function): if function_name != "_prepare_non_packed_dataloader" and \ function_name != "_prepare_dataset": return function + fast_sft_prepare_dataset = RL_REPLACEMENTS.get("sft_prepare_dataset", None) + if fast_sft_prepare_dataset is not None and "pack_examples" in function: + params = inspect.signature(fast_sft_prepare_dataset).parameters.keys() + params = ".*?".join(params) + matched = re.match( + r"[\s]{0,}def _prepare_dataset\(.*?" + params + r".*?\)", + function, + flags = re.MULTILINE | re.DOTALL, + ) + if matched: + # Use fast version! + function = inspect.getsource(fast_sft_prepare_dataset) + function = function.split("\n") + function = "\n".join(" "*4 + x for x in function) + function = function.replace("def sft_prepare_dataset", "def _prepare_dataset") + return function + pass + pass + check_text = \ "if 'tokenizer' not in locals(): tokenizer = processing_class\n"\ "if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"\ From 53202efb55166bd933f0f93d4a07443e03878d2f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 5 Mar 2025 12:58:24 -0800 Subject: [PATCH 1065/1088] Python 3.12 fix --- pyproject.toml | 2 +- unsloth/models/_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5a9d92202a..6bf403849d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "unsloth" dynamic = ["version"] description = "2-5X faster LLM finetuning" readme = "README.md" -requires-python = ">=3.9,<=3.12" +requires-python = ">=3.9,<3.13" license = {file = "LICENSE"} keywords = ["ai", "llm",] authors = [ diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 66926bca10..7d6bbfb78b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.4" +__version__ = "2025.3.5" __all__ = [ "SUPPORTS_BFLOAT16", From 8a675d86c218318bc499fcb53d0aeb5061f88875 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Mar 2025 02:32:32 -0800 Subject: [PATCH 1066/1088] Logits fixes (#1916) * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/__init__.py | 5 ++--- unsloth/models/_utils.py | 14 +++++++++----- unsloth/models/rl.py | 11 +++++++++++ 5 files changed, 25 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6bf403849d..1d206913a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.2", + "unsloth_zoo>=2025.3.4", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.1", + "unsloth_zoo>=2025.3.4", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 8439ab8212..4336ec494b 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.2"): + if Version(unsloth_zoo_version) < Version("2025.3.4"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index e11cd54417..a187ee577a 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from .granite import FastGraniteModel -from .loader import FastLanguageModel, FastVisionModel from .llama import FastLlamaModel +from .loader import FastLanguageModel, FastVisionModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model +from .granite import FastGraniteModel from .dpo import PatchDPOTrainer, PatchKTOTrainer from ._utils import is_bfloat16_supported, __version__ from .rl import PatchFastRL, vLLMSamplingParams diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7d6bbfb78b..c01e0ccc82 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.5" +__version__ = "2025.3.6" __all__ = [ "SUPPORTS_BFLOAT16", @@ -1050,7 +1050,10 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): pass pass - if num_items_in_batch is None: + # Get gradient accumulation steps if possible + if num_items_in_batch is None and \ + getattr(self, "args", {}).get("gradient_accumulation_steps", 1) != 1: + name = (model.base_model.model if hasattr(model, "base_model") else model).__class__.__name__ logger.warning_once( f"Unsloth: Not an error, but {name} does not accept `num_items_in_batch`.\n"\ @@ -1245,10 +1248,11 @@ def unsloth_compile_transformers( # os.environ['UNSLOTH_RETURN_LOGITS'] = '1' LOGITS_ERROR_STRING = \ "Unsloth: Logits are empty from 2024.11 onwards. To get raw logits again, please "\ - 'set the environment variable `UNSLOTH_RETURN_LOGITS` to `"1" BEFORE starting to train ie before `trainer.train()`. For example:\n\n'\ - "import os\n"\ + 'set the environment variable `UNSLOTH_RETURN_LOGITS` to `"1" BEFORE starting to train ie before `trainer.train()`. For example:\n'\ + "```\nimport os\n"\ "os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"\ - "... trainer.train() ..." + "trainer.train()\n```\n"\ + "No need to restart your console - just add `os.environ['UNSLOTH_RETURN_LOGITS'] = '1'` before trainer.train() and re-run the cell!" def raise_logits_error(*args, **kwargs): raise NotImplementedError(LOGITS_ERROR_STRING) def return_none(*args, **kwargs): return None diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index c9ea922272..cf9c16514e 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -284,6 +284,17 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): extra_args += eval_changes pass + # Force logits to be produced if preprocess_logits_for_metrics or compute_metrics is used + if "model" in call_args: + logits_check = \ + "_output_logits = False\n"\ + "if locals().get('compute_metrics', None) is not None: _output_logits = True\n"\ + "if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True\n"\ + "if _output_logits:\n"\ + " os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n" + extra_args += logits_check + pass + # Check max_seq_length if "model" in call_args: length_check = \ From 5efb1a76efe41db1057a8c06dcb0f0d5b6fa6ff2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Mar 2025 05:16:15 -0800 Subject: [PATCH 1067/1088] Bug fixes (#1920) * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update pyproject.toml * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/_utils.py | 5 ++--- unsloth/models/llama.py | 7 ++----- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1d206913a9..01636e75f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.4", + "unsloth_zoo>=2025.3.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.4", + "unsloth_zoo>=2025.3.5", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 4336ec494b..9ed356db54 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.4"): + if Version(unsloth_zoo_version) < Version("2025.3.5"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c01e0ccc82..7ac35d71b4 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.6" +__version__ = "2025.3.7" __all__ = [ "SUPPORTS_BFLOAT16", @@ -1052,8 +1052,7 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): # Get gradient accumulation steps if possible if num_items_in_batch is None and \ - getattr(self, "args", {}).get("gradient_accumulation_steps", 1) != 1: - + getattr(getattr(self, "args", self), "gradient_accumulation_steps", 1) != 1: name = (model.base_model.model if hasattr(model, "base_model") else model).__class__.__name__ logger.warning_once( f"Unsloth: Not an error, but {name} does not accept `num_items_in_batch`.\n"\ diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3dacf5cdd5..a490fb8ab4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1843,7 +1843,7 @@ def from_pretrained( else: inner_training_loop = Trainer._original_training_loop except: - raise RuntimeError('Unsloth currently does not support multi GPU setups - but we are working on it!') + raise RuntimeError('Unsloth: Unsuccessfully patched inner_training_loop') pass import transformers.trainer @@ -1869,7 +1869,7 @@ def from_pretrained( f"{chr(92)} / Data Parallel GPUs = {args.world_size} | Total batch size ({self._train_batch_size} x {args.gradient_accumulation_steps} x {args.world_size}) = {total_train_batch_size:,}\\n"\\ f' "-____-" Trainable parameters = {get_model_param_count(model, trainable_only=True):,}/{get_model_param_count(model):,} ({get_model_param_count(model, trainable_only=True)/get_model_param_count(model)*100:.2f}% trained)' logger.warning(debug_info) - import subprocess, re, gc + import gc for _ in range(3): gc.collect() torch.cuda.empty_cache()""" @@ -1897,9 +1897,6 @@ def from_pretrained( "_inner_training_loop", "_fast_inner_training_loop", 1, ) - exec(inner_training_loop, globals()) - - Trainer._inner_training_loop = _fast_inner_training_loop inner_training_loop = inner_training_loop.replace( "is_torch_tpu_available()", "False", From ee60e4ea95f1452b6bc4ab32dfd5f868e3086c89 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Mar 2025 14:39:04 -0800 Subject: [PATCH 1068/1088] Bug fixes --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 01636e75f1..7dfca63faa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.5", + "unsloth_zoo>=2025.3.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.5", + "unsloth_zoo>=2025.3.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 9ed356db54..38453f3614 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.5"): + if Version(unsloth_zoo_version) < Version("2025.3.7"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7ac35d71b4..37c69ef877 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.7" +__version__ = "2025.3.8" __all__ = [ "SUPPORTS_BFLOAT16", From 81c967b9a50335bb1a49ed572f11c42128533adc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 6 Mar 2025 14:39:04 -0800 Subject: [PATCH 1069/1088] Big bug fixes Fixes: 1. #1932 2. #1931 3. #1928 4. #1925 5. #1921 6. #1918 7. #1923 8. #1922 9. #1921 Please do: `pip install --upgrade --force-reinstall --no-deps unsloth unsloth_zoo` for local machines. Colab / Kaggle please restart and delete / disconnect runtime and redo Apologies on the issues! --- pyproject.toml | 4 ++-- unsloth/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 01636e75f1..7dfca63faa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.5", + "unsloth_zoo>=2025.3.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.5", + "unsloth_zoo>=2025.3.7", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 9ed356db54..38453f3614 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.5"): + if Version(unsloth_zoo_version) < Version("2025.3.7"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 7ac35d71b4..37c69ef877 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.7" +__version__ = "2025.3.8" __all__ = [ "SUPPORTS_BFLOAT16", From 2b5d81d75281c02480927cf3ca0dea7c8e98d484 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 8 Mar 2025 04:34:55 -0800 Subject: [PATCH 1070/1088] Bug fixes (#1951) * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update _utils.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py * Bug fixes * FastModel * __doc__ * Update vision.py * Update loader.py * Update loader.py * Update loader.py * version --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> --- pyproject.toml | 4 +- unsloth/__init__.py | 2 +- unsloth/models/__init__.py | 2 +- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 18 ++- unsloth/models/loader.py | 39 ++++-- unsloth/models/mapper.py | 15 +++ unsloth/models/vision.py | 248 ++++++++++++++++++++----------------- 8 files changed, 196 insertions(+), 134 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7dfca63faa..5b9dc8bb57 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.7", + "unsloth_zoo>=2025.3.8", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.7", + "unsloth_zoo>=2025.3.8", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 38453f3614..5bbb85d520 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.7"): + if Version(unsloth_zoo_version) < Version("2025.3.8"): try: os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index a187ee577a..317525c793 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. from .llama import FastLlamaModel -from .loader import FastLanguageModel, FastVisionModel +from .loader import FastLanguageModel, FastVisionModel, FastTextModel, FastModel from .mistral import FastMistralModel from .qwen2 import FastQwen2Model from .granite import FastGraniteModel diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 37c69ef877..03eb21f4eb 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.8" +__version__ = "2025.3.9" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a490fb8ab4..3504037b66 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -91,7 +91,7 @@ def original_apply_o(self, X): pass from math import sqrt as math_sqrt -KV_CACHE_INCREMENT = 256 # KV Cache update size +KV_CACHE_INCREMENT = 512 # KV Cache update size torch_nn_functional_softmax = torch.nn.functional.softmax # SDPA has GQA internally SDPA_HAS_GQA = "enable_gqa" in scaled_dot_product_attention.__doc__ @@ -1656,6 +1656,13 @@ def from_pretrained( "Are you certain you want to do remote code execution?" ) pass + if fast_inference: + import platform + if platform.system().lower() == 'windows': + print("Unsloth: vLLM does not work in Windows! Will use Unsloth inference!") + fast_inference = False + pass + if token is None: token = get_token() if model_patcher is None: model_patcher = FastLlamaModel SUPPORTS_BFLOAT16 = is_bfloat16_supported() @@ -1966,12 +1973,17 @@ def from_pretrained( for layer in model.model.layers: layer.self_attn.rotary_emb = rotary_emb pass - + + # Add for_inference and for_training + model.for_training = functools.partial(FastLlamaModel.for_training, model) + model.for_inference = functools.partial(FastLlamaModel.for_inference, model) + # Patch generate if model.generate.__name__ != "unsloth_fast_generate": model._old_generate = model.generate unsloth_fast_generate.__doc__ = model._old_generate.__doc__ model.generate = types.MethodType(unsloth_fast_generate, model) + pass return model, tokenizer pass @@ -2404,7 +2416,7 @@ def get_peft_model( # Add for_inference and for_training model.for_training = functools.partial(FastLlamaModel.for_training, model) model.for_inference = functools.partial(FastLlamaModel.for_inference, model) - + # Patch generate if model.generate.__name__ != "unsloth_fast_generate": model._old_generate = model.generate diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 30128cd134..800c016cc8 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -383,10 +383,13 @@ def from_pretrained( patch_loss_functions, post_patch_loss_function, ) -from .vision import FastBaseVisionModel - +from .vision import FastBaseModel +from transformers import ( + AutoModelForVision2Seq, + AutoModelForCausalLM, +) -class FastVisionModel(FastBaseVisionModel): +class FastModel(FastBaseModel): @staticmethod def from_pretrained( model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", @@ -413,7 +416,7 @@ def from_pretrained( patch_compiling_bitsandbytes() if use_gradient_checkpointing == "unsloth": patch_unsloth_smart_gradient_checkpointing(dtype = dtype) - + old_model_name = model_name if not use_exact_model_name: model_name = get_model_name(model_name, load_in_4bit) @@ -427,7 +430,7 @@ def from_pretrained( from huggingface_hub.utils import disable_progress_bars, enable_progress_bars, are_progress_bars_disabled was_disabled = are_progress_bars_disabled() disable_progress_bars() - + autoconfig_error = None peft_error = None try: @@ -458,7 +461,7 @@ def from_pretrained( # Old transformers versions check both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 - + # New transformers need to check manually. if SUPPORTS_LLAMA32: # Check if folder exists locally @@ -515,9 +518,12 @@ def from_pretrained( if not was_disabled: enable_progress_bars() do_logging = os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") == "1" - redirector = sys.stdout if do_logging else open(os.devnull, "w") + if do_logging: + redirector = contextlib.nullcontext() + else: + redirector = contextlib.redirect_stdout(open(os.devnull, "w")) - with contextlib.redirect_stdout(redirector): + with redirector: patch_loss_functions(torch_compile = False) model_types = unsloth_compile_transformers( model_name = model_name, @@ -547,7 +553,6 @@ def from_pretrained( return_logits = return_logits, ) pass - if do_logging: redirector.close() # Check if this is local model since the tokenizer gets overwritten if os.path.exists(os.path.join(old_model_name, "tokenizer_config.json")) and \ @@ -559,7 +564,12 @@ def from_pretrained( tokenizer_name = None pass - model, tokenizer = FastBaseVisionModel.from_pretrained( + # Check if VLM + is_vlm = (x.endswith("ForConditionalGeneration") for x in model_config.architectures) + is_vlm = is_vlm or hasattr(model_config, "vision_config") + auto_model = AutoModelForVision2Seq if is_vlm else AutoModelForCausalLM + + model, tokenizer = FastBaseModel.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, dtype = _get_dtype(dtype), @@ -570,6 +580,7 @@ def from_pretrained( revision = revision if not is_peft else None, model_types = model_types, tokenizer_name = tokenizer_name, + auto_model = auto_model, *args, **kwargs, ) @@ -617,8 +628,14 @@ def from_pretrained( trust_remote_code = trust_remote_code, ) # Patch it as well! - model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseModel.patch_peft_model(model, use_gradient_checkpointing) pass return model, tokenizer pass pass + +class FastVisionModel(FastModel): + pass + +class FastTextModel(FastModel): + pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index da7f449bb4..a2e609f203 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -611,6 +611,21 @@ "open-thoughts/OpenThinker-7B", "unsloth/OpenThinker-7B-bnb-4bit", ), + "unsloth/granite-3.2-2b-instruct-unsloth-bnb-4bit" : ( + "unsloth/granite-3.2-2b-instruct", + "ibm-granite/granite-3.2-2b-instruct", + "unsloth/granite-3.2-2b-instruct-bnb-4bit", + ), + "unsloth/granite-3.2-8b-instruct-unsloth-bnb-4bit" : ( + "unsloth/granite-3.2-8b-instruct", + "ibm-granite/granite-3.2-8b-instruct", + "unsloth/granite-3.2-8b-instruct-bnb-4bit", + ), + "unsloth/QwQ-32B-unsloth-bnb-4bit" : ( + "unsloth/QwQ-32B", + "Qwen/QwQ-32B", + "unsloth/QwQ-32B-bnb-4bit", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index d13d394669..ff07ef6917 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -17,6 +17,8 @@ BitsAndBytesConfig, AutoModelForVision2Seq, AutoProcessor, + AutoTokenizer, + AutoModelForCausalLM, ) from .llama import * from ..kernels import ( @@ -31,48 +33,60 @@ requires_grad_for_gradient_checkpointing, ) from triton import __version__ as triton_version +from unsloth_zoo.utils import _get_dtype +from unsloth_zoo.patching_utils import patch_model_and_tokenizer +import types +import functools __all__ = [ - "FastBaseVisionModel", + "FastBaseModel", ] -def _wrap_fast_inference(generate, device_type, dtype, model): - # Wraps inference with bfloat16 / float16 - @torch.inference_mode - def _fast_generate(*args, **kwargs): - # For num_logits_to_keep - # kwargs["num_logits_to_keep"] = 1 - # Remove token_type_ids - kwargs.pop("token_type_ids", None) +def unsloth_base_fast_generate( + self, + *args, + **kwargs, +): + FastBaseModel.for_inference(self) + dtype = _get_dtype(self.config.torch_dtype) - # Check pad_token - model_eos_token_id = getattr(model.config, "eos_token_id", None) - if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): - model_eos_token_id = model_eos_token_id[0] + # Check if VLM + is_vlm = (x.endswith("ForConditionalGeneration") for x in self.config.architectures) + is_vlm = is_vlm or hasattr(self.config, "vision_config") - kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + # Remove token_type_ids + kwargs.pop("token_type_ids", None) - try: - kwargs["pixel_values"] = kwargs["pixel_values"].to(model.dtype) - except: - pass + # VLMs do not allow logits_to_keep + if not is_vlm: kwargs["logits_to_keep"] = 1 - # Autocasted - with torch.autocast(device_type = device_type, dtype = dtype): - output = generate(*args, **kwargs) - pass - return output + # Check pad_token + model_eos_token_id = getattr(self.config, "eos_token_id", None) + if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"): + model_eos_token_id = model_eos_token_id[0] + + kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id) + + # Get pixel values for VLMs + try: kwargs["pixel_values"] = kwargs["pixel_values"].to(dtype) + except: pass + + # Mixed precision autocast + with torch.inference_mode(), torch.autocast(device_type = "cuda", dtype = dtype): + output = self._old_generate(*args, **kwargs) pass - return _fast_generate + + FastBaseModel.for_training(self) + return output pass -class FastBaseVisionModel: +class FastBaseModel: @staticmethod def from_pretrained( - model_name = "unsloth/llama-3-8b-bnb-4bit", + model_name = "unsloth/Llama-3.2-1B-Instruct", max_seq_length = None, dtype = None, load_in_4bit = True, @@ -81,6 +95,7 @@ def from_pretrained( trust_remote_code = False, model_types = None, tokenizer_name = None, + auto_model = AutoModelForVision2Seq, **kwargs, ): if trust_remote_code: @@ -94,12 +109,16 @@ def from_pretrained( gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + from importlib.metadata import version as importlib_version + try: vllm_version = f" vLLM: {importlib_version('vllm')}." + except: vllm_version = "" + statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} vision patching. Transformers: {transformers_version}.\n"\ - f" {chr(92)}{chr(92)} /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} patching. Transformers: {transformers_version}.{vllm_version}\n"\ + f" {chr(92)}{chr(92)} /| {gpu_stats.name}. Num GPUs = {torch.cuda.device_count()}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ - f' "-____-" Free Apache license: http://github.com/unslothai/unsloth' + f' "-____-" Free license: http://github.com/unslothai/unsloth' print(statistics) # Warn about fast transfers @@ -136,8 +155,8 @@ def from_pretrained( # Cannot be None, since HF now checks for the config if load_in_4bit: kwargs["quantization_config"] = bnb_config - - model = AutoModelForVision2Seq.from_pretrained( + + model = auto_model.from_pretrained( model_name, device_map = device_map, torch_dtype = dtype, @@ -152,26 +171,25 @@ def from_pretrained( # Counteract saved tokenizers tokenizer_name = model_name if tokenizer_name is None else tokenizer_name - tokenizer = AutoProcessor.from_pretrained( + auto_processor = AutoProcessor if auto_model is AutoModelForVision2Seq else AutoTokenizer + tokenizer = auto_processor.from_pretrained( tokenizer_name, padding_side = "right", token = token, ) # Add padding side as well - tokenizer.tokenizer.padding_side = "right" + if hasattr(tokenizer, "tokenizer"): + tokenizer.tokenizer.padding_side = "right" model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) - - # Fix up config for transformers uploading PEFT - # Not necessary anymore since we require transformers>=4.37! - if False: - name = model.config._name_or_path - if name.startswith("unsloth/") and name.endswith("-bnb-4bit"): - name = name[:len(name) - len("-bnb-4bit")] - model.config.update({"_name_or_path" : name}) - pass - pass + # Fix other stuff like BnB compute data types + model, tokenizer = patch_model_and_tokenizer( + model, + tokenizer, + downcast_rope = False, + fix_embeddings = False, + ) # Log Unsloth version for future fastpaths for inference if hasattr(model, "config"): @@ -187,13 +205,22 @@ def from_pretrained( # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference tokenizer.tokenizer.padding_side = "left" # Force inference - internal_model = model - while hasattr(internal_model, "model"): - internal_model._saved_temp_tokenizer = tokenizer - internal_model = internal_model.model + m = model + while hasattr(m, "model"): + m._saved_temp_tokenizer = tokenizer + # Also set is_loaded_in_8bit to disable incorrect DDP + m.is_loaded_in_8bit = True + m = m.model pass - internal_model._saved_temp_tokenizer = tokenizer - + m._saved_temp_tokenizer = tokenizer + # Also set is_loaded_in_8bit to disable incorrect DDP + m.is_loaded_in_8bit = True + + # Patch generate + if model.generate.__name__ != "unsloth_base_fast_generate": + model._old_generate = model.generate + unsloth_base_fast_generate.__doc__ = model._old_generate.__doc__ + model.generate = types.MethodType(unsloth_base_fast_generate, model) return model, tokenizer pass @@ -272,7 +299,7 @@ def get_peft_model( # Enable gradients on modules which are trainable requires_grad_for_gradient_checkpointing(model) - model = FastBaseVisionModel.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseModel.patch_peft_model(model, use_gradient_checkpointing) # Clear deleted GPU items for _ in range(3): @@ -281,6 +308,9 @@ def get_peft_model( pass patch_saving_functions(model, vision = True) + # Add for_inference and for_training + model.for_training = functools.partial(FastBaseModel.for_training, model) + model.for_inference = functools.partial(FastBaseModel.for_inference, model) return model pass @@ -314,62 +344,57 @@ def patch_peft_model( patch_saving_functions(model, vision = True) # Patch tokenizer to pad to the right - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" + m = model + while hasattr(m, "model"): + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.tokenizer.padding_side = "right" pass - internal_model = internal_model.model + # Also set is_loaded_in_8bit to disable incorrect DDP + m.is_loaded_in_8bit = True + m = m.model pass - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" + if hasattr(m, "_saved_temp_tokenizer"): + m._saved_temp_tokenizer.tokenizer.padding_side = "right" pass + # Also set is_loaded_in_8bit to disable incorrect DDP + m.is_loaded_in_8bit = True # Clear deleted GPU items for _ in range(3): gc.collect() torch.cuda.empty_cache() pass + # Add for_inference and for_training + model.for_training = functools.partial(FastBaseModel.for_training, model) + model.for_inference = functools.partial(FastBaseModel.for_inference, model) + + # Patch generate + if model.generate.__name__ != "unsloth_base_fast_generate": + model._old_generate = model.generate + unsloth_base_fast_generate.__doc__ = model._old_generate.__doc__ + model.generate = types.MethodType(unsloth_base_fast_generate, model) return model pass @staticmethod def for_inference(model): - model.gradient_checkpointing = False - model.training = False - - for name, module in model.named_modules(): - if hasattr(module, "gradient_checkpointing"): - module.gradient_checkpointing = False - if hasattr(module, "training"): - module.training = False - pass - - dtype = model.config.torch_dtype - if type(dtype) is str: - if dtype == "float16": dtype = torch.float16 - elif dtype == "bfloat16": dtype = torch.bfloat16 - pass - device_type = model.device.type - - # Wrap model.generate - if model.generate.__name__ != "_fast_generate": - model._unwrapped_old_generate = model.generate - model.generate = _wrap_fast_inference(model.generate, device_type, dtype, model) - pass - - # Patch tokenizer to pad to the left - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "left" + if not hasattr(model, "parameters"): + raise TypeError("Unsloth: I think you're passing a tokenizer, not the model to for_inference!") + + def _for_inference(m): + if hasattr(m, "gradient_checkpointing"): m.gradient_checkpointing = False + if hasattr(m, "training"): m.training = False + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): m._saved_temp_tokenizer.padding_side = "left" + # Set a flag for generation! + m._flag_for_generation = True pass + m = model + while hasattr(m, "model"): + _for_inference(m) + m = m.model + _for_inference(m) # Also disable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -380,40 +405,34 @@ def for_inference(model): embeddings = model.get_output_embeddings() if hasattr(embeddings, "training"): embeddings.training = False pass - return model pass @staticmethod def for_training(model, use_gradient_checkpointing = True): - model.gradient_checkpointing = use_gradient_checkpointing - model.training = True - - for name, module in model.named_modules(): - if hasattr(module, "gradient_checkpointing"): - module.gradient_checkpointing = use_gradient_checkpointing - if hasattr(module, "training"): - module.training = True - pass + if not hasattr(model, "parameters"): + raise TypeError("Unsloth: I think you're passing a tokenizer, not the model to for_training!") - # Also revert model.generate - if hasattr(model, "_unwrapped_old_generate"): - model.generate = model._unwrapped_old_generate - del model._unwrapped_old_generate + # Delete all fast inference loras + for param in model.parameters(): + if hasattr(param, "_fast_lora"): + del param._fast_lora pass - # Patch tokenizer to pad to the right - internal_model = model - while hasattr(internal_model, "model"): - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" - pass - internal_model = internal_model.model - pass - if hasattr(internal_model, "_saved_temp_tokenizer"): - internal_model._saved_temp_tokenizer.tokenizer.padding_side = "right" + def _for_training(m): + if hasattr(m, "gradient_checkpointing"): m.gradient_checkpointing = use_gradient_checkpointing + if hasattr(m, "training"): m.training = True + # Pad tokenizer to the left + if hasattr(m, "_saved_temp_tokenizer"): m._saved_temp_tokenizer.padding_side = "right" + # Set a flag for generation! + if hasattr(m, "_flag_for_generation"): del m._flag_for_generation pass + m = model + while hasattr(m, "model"): + _for_training(m) + m = m.model + _for_training(m) # Also re-enable training for embeddings for NEFTune if hasattr(model, "get_input_embeddings"): @@ -424,7 +443,6 @@ def for_training(model, use_gradient_checkpointing = True): embeddings = model.get_output_embeddings() if hasattr(embeddings, "training"): embeddings.training = True pass - return model pass pass From 78374adccd9eaeed63fe0aa27ddad67b09477a82 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 12 Mar 2025 01:23:34 -0700 Subject: [PATCH 1071/1088] Gemma 3 (#1986) * Update llama.py * GRPO optimized * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Selective Log softmax * Fix GRPO bsz * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Fix TRL * Metrics GRPO * Update rl_replacements.py * Update rl_replacements.py * No compile * Update rl.py * Remove docs * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py * Bug fixes * FastModel * __doc__ * Update vision.py * Update loader.py * Update loader.py * Update loader.py * version * move use_modelscope to _utils (#1938) * move use_modelscope to _utils * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Don't use revision when loading model_config and is_peft=True (#1949) * More syntax warnings (#1944) * move use_modelscope to _utils * fix * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Update loader.py * Full finetuning and other fixes * UNSLOTH_ENABLE_FULL_FINETUNING * Update loader.py * Update loader.py * Update loader.py * Update vision.py * Update vision.py * full finetuning * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * max_seq_length * Update rl.py * Update rl.py * Update rl.py * Update pyproject.toml * AutoModelForImageTextToText * Update mapper.py * Update pyproject.toml * Update _utils.py * Update _utils.py * Update _utils.py * Batch samples * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Update vision.py * Update mapper.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Wilson Wu <140025193+wiwu2390@users.noreply.github.com> --- pyproject.toml | 4 +- unsloth/__init__.py | 17 +++-- unsloth/models/_utils.py | 130 +++++++++---------------------------- unsloth/models/llama.py | 14 ++-- unsloth/models/loader.py | 129 +++++++++++++++++++++++++++++------- unsloth/models/mapper.py | 44 +++++++++++++ unsloth/models/rl.py | 32 ++++++++- unsloth/models/vision.py | 120 +++++++++++++++++++++++++++------- unsloth/tokenizer_utils.py | 2 +- 9 files changed, 329 insertions(+), 163 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5b9dc8bb57..667901e76f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ triton = [ ] huggingface = [ - "unsloth_zoo>=2025.3.8", + "unsloth_zoo>=2025.3.9", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", @@ -354,7 +354,7 @@ colab-ampere-torch220 = [ "flash-attn>=2.6.3", ] colab-new = [ - "unsloth_zoo>=2025.3.8", + "unsloth_zoo>=2025.3.9", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 5bbb85d520..9bcdd5cf64 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,14 +198,19 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.8"): - try: - os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") - except: + if Version(unsloth_zoo_version) < Version("2025.3.9"): + print( + "Unsloth: Updating Unsloth-Zoo utilies to the latest version.\n"\ + "To disable this, set os.environ['UNSLOTH_DISABLE_AUTO_UPDATES'] = '1'" + ) + if os.environ.get("UNSLOTH_DISABLE_AUTO_UPDATES", "0") == "0": try: - os.system("pip install --upgrade --no-cache-dir --no-deps --user unsloth_zoo") + os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo") except: - raise ImportError("Unsloth: Please update unsloth_zoo via `pip install --upgrade --no-cache-dir --no-deps unsloth_zoo`") + try: + os.system("pip install --upgrade --no-cache-dir --no-deps --user unsloth_zoo") + except: + raise ImportError("Unsloth: Please update unsloth_zoo via `pip install --upgrade --no-cache-dir --no-deps unsloth_zoo`") import unsloth_zoo except: raise ImportError("Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo`") diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 03eb21f4eb..c79d702b15 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.9" +__version__ = "2025.3.10" __all__ = [ "SUPPORTS_BFLOAT16", @@ -25,6 +25,7 @@ "__version__", "HAS_FLASH_ATTENTION", "HAS_FLASH_ATTENTION_SOFTCAPPING", + "USE_MODELSCOPE", "platform_system", "patch_tokenizer", "get_statistics", @@ -100,6 +101,7 @@ from unsloth_zoo.loss_utils import ( HAS_CUT_CROSS_ENTROPY, fused_linear_cross_entropy, + _unsloth_get_batch_samples, ) from unsloth_zoo.vision_utils import ( process_vision_info, @@ -108,6 +110,9 @@ get_transformers_model_type, unsloth_compile_transformers as _unsloth_compile_transformers, ) +from unsloth_zoo.training_utils import ( + prepare_model_for_training, +) # ============================================= # Disable some warnings which can get annoying @@ -508,67 +513,16 @@ def prepare_model_for_kbit_training( use_gradient_checkpointing : Optional = True, use_reentrant : Optional[bool] = True, ) -> Any: - """ - Calculates where to place the gradient checkpoints given n_layers. - We also freeze all other layers's gradients - - Args: - model: Any LlamaModel with layers. - use_gradient_checkpointing (`bool`, *optional*): - Default enabled. Provides memory savings by not saving all activations, - but only some. - use_reentrant (`bool`, *optional*): - https://github.com/pytorch/pytorch/blob/main/torch/utils/checkpoint.py#L354 - Optimal gradient checkpointing algorithm which will be the default in - future Pytorch versions. - """ - - # Freeze all parameters except LoRA - with torch.no_grad(): - for name, param in model.named_parameters(): - if ".lora_A." in name or ".lora_B." in name or ".lora_magnitude_vector" in name: - param.requires_grad_(True) - # Also must be in float32! - if param.dtype != torch.float32: - name = name.replace("base_model", "model", 1) - layer_number = re.search(r"\.[\d]{1,}\.", name).group(0) - name = name.replace(layer_number, f"[{layer_number[1:-1]}].") - name = name.replace(".weight", "", 1) - exec(f"{name}.to(torch.float32)") - pass - else: - param.requires_grad_(False) - pass - pass - - # Gradient checkpointing! - if use_gradient_checkpointing == "unsloth": - - # Saves VRAM! - original_model = model - while hasattr(original_model, "model"): - original_model._offloaded_gradient_checkpointing = True - original_model = original_model.model - pass - original_model._offloaded_gradient_checkpointing = True - - model.gradient_checkpointing_enable() - - elif use_gradient_checkpointing == True: - model.gradient_checkpointing_enable() - pass - - # If use_reentrant = True which is the Pytorch default, we just make the input requires_grad. - if use_reentrant: - if hasattr(model, "enable_input_require_grads"): - model.enable_input_require_grads() - else: - def make_inputs_require_grad(module, input, output): - output.requires_grad_(True) - model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) - pass - - return model + return prepare_model_for_training( + model = model, + use_gradient_checkpointing = use_gradient_checkpointing, + use_reentrant = use_reentrant, + full_finetuning = False, + train_layernorms = False, + train_embedding = False, + train_lm_head = False, + float32_mixed_precision = True, + ) pass # ============================================= @@ -999,44 +953,6 @@ def test_mask_creation(): pass -def _unsloth_get_batch_samples(self, epoch_iterator, num_batches): - batch_samples = [] - num_items_in_batch = None - - # Check if model allows **kwargs - model = self.model - f = model.base_model.model.forward if hasattr(model, "base_model") else model.forward - has_kwargs = tuple(inspect.signature(f).parameters.values())[-1].kind == inspect._VAR_KEYWORD - - # Iterate to find all batches - for _ in range(num_batches): - try: - batch_samples += [next(epoch_iterator)] - except StopIteration: - break - pass - - # Get num_items_in_batch - if has_kwargs and len(batch_samples) > 0 and "labels" in batch_samples[0]: - try: - num_items_in_batch = sum( - [(x["labels"][..., 1:] != -100).sum() for x in batch_samples] - ) - - if self.args.average_tokens_across_devices: - num_items_in_batch = self.accelerator.gather(num_items_in_batch).sum().item() - - if torch.is_tensor(num_items_in_batch): - num_items_in_batch = num_items_in_batch.item() - - except Exception as exception: - logger.warning_once(exception) - pass - - return batch_samples, num_items_in_batch -pass - - def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): num_items_in_batch = None @@ -1053,7 +969,12 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): # Get gradient accumulation steps if possible if num_items_in_batch is None and \ getattr(getattr(self, "args", self), "gradient_accumulation_steps", 1) != 1: - name = (model.base_model.model if hasattr(model, "base_model") else model).__class__.__name__ + + inner_model = model + if hasattr(inner_model, "base_model"): inner_model = inner_model. base_model + if hasattr(inner_model, "model"): inner_model = inner_model.model + name = inner_model.__class__.__name__ + logger.warning_once( f"Unsloth: Not an error, but {name} does not accept `num_items_in_batch`.\n"\ "Using gradient accumulation will be very slightly less accurate.\n"\ @@ -1271,3 +1192,10 @@ def __str__ (self): return LOGITS_ERROR_STRING try: exec(f"EMPTY_LOGITS.{function} = raise_{j}", globals(), locals()) except: continue pass + +USE_MODELSCOPE = os.environ.get("UNSLOTH_USE_MODELSCOPE", "0") == "1" +if USE_MODELSCOPE: + if importlib.util.find_spec("modelscope") is None: + raise ImportError(f'You are using the modelscope hub, please install modelscope by `pip install modelscope -U`') + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3504037b66..7ae6e92d11 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1913,12 +1913,12 @@ def from_pretrained( # Save max_seq_length model.max_seq_length = max_seq_length - internal_model = model - while hasattr(internal_model, "model"): - internal_model.max_seq_length = max_seq_length - internal_model = internal_model.model + m = model + while hasattr(m, "model"): + m.max_seq_length = max_seq_length + m = m.model pass - internal_model.max_seq_length = max_seq_length + m.max_seq_length = max_seq_length # We check the tokenizer first for errors if fix_tokenizer: @@ -2016,6 +2016,10 @@ def get_peft_model( temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): + if os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1": + print("Unsloth: Full finetuning is enabled, so .get_peft_model has no effect") + return model + pass transformers_set_seed(random_state) if use_gradient_checkpointing == "unsloth": diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 800c016cc8..92a166f69a 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -12,7 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._utils import is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING +from ._utils import ( + is_bfloat16_supported, + HAS_FLASH_ATTENTION, + HAS_FLASH_ATTENTION_SOFTCAPPING, + USE_MODELSCOPE, +) from .granite import FastGraniteModel from .llama import FastLlamaModel, logger from .mistral import FastMistralModel @@ -36,14 +41,6 @@ from huggingface_hub import HfFileSystem import importlib.util -# [TODO] Move USE_MODELSCOPE to utils -USE_MODELSCOPE = os.environ.get("UNSLOTH_USE_MODELSCOPE", "0") == "1" -if USE_MODELSCOPE: - if importlib.util.find_spec("modelscope") is None: - raise ImportError(f'You are using the modelscope hub, please install modelscope by `pip install modelscope -U`') - pass -pass - # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! from unsloth_zoo.utils import Version, _get_dtype transformers_version = Version(transformers_version) @@ -76,6 +73,8 @@ def from_pretrained( max_seq_length = None, dtype = None, load_in_4bit = True, + load_in_8bit = False, + full_finetuning = False, token = None, device_map = "sequential", rope_scaling = None, @@ -94,6 +93,29 @@ def from_pretrained( disable_log_stats = True, *args, **kwargs, ): + if load_in_8bit or full_finetuning: + return FastModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, # [TODO] No effect + dtype = dtype, + load_in_4bit = load_in_4bit, + load_in_8bit = load_in_8bit, + full_finetuning = full_finetuning, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, # [TODO] No effect + fix_tokenizer = fix_tokenizer, # [TODO] No effect + trust_remote_code = trust_remote_code, + use_gradient_checkpointing = use_gradient_checkpointing, + resize_model_vocab = resize_model_vocab, # [TODO] No effect + revision = revision, + return_logits = False, # Return logits + fullgraph = True, # No graph breaks + use_exact_model_name = use_exact_model_name, + *args, **kwargs, + ) + pass + if token is None: token = get_token() assert (dtype is None or dtype == torch.float16 or dtype == torch.bfloat16) @@ -153,7 +175,7 @@ def from_pretrained( # Old transformers versions check both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32 - + # New transformers need to check manually. if SUPPORTS_LLAMA32: # Check if folder exists locally @@ -202,7 +224,6 @@ def from_pretrained( model_config = AutoConfig.from_pretrained( model_name, token = token, - revision = revision, trust_remote_code = trust_remote_code, ) pass @@ -265,15 +286,32 @@ def from_pretrained( dispatch_model = FastGemma2Model elif model_type == "qwen2": dispatch_model = FastQwen2Model - elif model_type == "cohere": - dispatch_model = FastCohereModel - elif model_type == "granite": - dispatch_model = FastGraniteModel + # Temporary disable optimized Cohere until errors match + # elif model_type == "cohere": + # dispatch_model = FastCohereModel + # Temporary disable optimized Granite until errors match + # elif model_type == "granite": + # dispatch_model = FastGraniteModel else: - raise NotImplementedError( - f"Unsloth: {model_name} not supported yet!\n"\ - "Maybe you're doing vision finetuning? Please use FastVisionModel instead!\n"\ - "Otherwise, make an issue to https://github.com/unslothai/unsloth!", + return FastModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, # [TODO] No effect + dtype = dtype, + load_in_4bit = load_in_4bit, + load_in_8bit = load_in_8bit, + full_finetuning = full_finetuning, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, # [TODO] No effect + fix_tokenizer = fix_tokenizer, # [TODO] No effect + trust_remote_code = trust_remote_code, + use_gradient_checkpointing = use_gradient_checkpointing, + resize_model_vocab = resize_model_vocab, # [TODO] No effect + revision = revision, + return_logits = False, # Return logits + fullgraph = True, # No graph breaks + use_exact_model_name = use_exact_model_name, + *args, **kwargs, ) pass @@ -288,6 +326,11 @@ def from_pretrained( pass if fast_inference: + import platform + if platform.system().lower() == 'windows': + print("Unsloth: vLLM does not work in Windows! Will use Unsloth inference!") + fast_inference = False + pass from unsloth_zoo.vllm_utils import ( patch_vllm, vllm_dynamic_quant_supported, @@ -385,9 +428,15 @@ def from_pretrained( ) from .vision import FastBaseModel from transformers import ( - AutoModelForVision2Seq, AutoModelForCausalLM, ) +try: + from transformers import AutoModelForImageTextToText + AutoModelForVision2Seq = AutoModelForImageTextToText +except: + from transformers import AutoModelForVision2Seq +pass + class FastModel(FastBaseModel): @staticmethod @@ -396,6 +445,8 @@ def from_pretrained( max_seq_length = None, # [TODO] No effect dtype = None, load_in_4bit = True, + load_in_8bit = False, + full_finetuning = False, token = None, device_map = "sequential", rope_scaling = None, # [TODO] No effect @@ -417,10 +468,40 @@ def from_pretrained( if use_gradient_checkpointing == "unsloth": patch_unsloth_smart_gradient_checkpointing(dtype = dtype) + if full_finetuning and (load_in_4bit or load_in_8bit): + print("Unsloth: You selected full finetuning support, but 4bit / 8bit is enabled - disabling LoRA / QLoRA.") + load_in_4bit = False + load_in_8bit = False + pass + + if load_in_4bit and load_in_8bit: + raise RuntimeError( + "Unsloth: Can only load in 4bit or 8bit, not both!\n"\ + "Also, we by default set `load_in_4bit = True`.\n"\ + "If you want 8bit finetuning, set both `load_in_4bit = False` and `load_in_8bit = True`" + ) + if load_in_4bit: pass + elif load_in_8bit: pass + elif not load_in_4bit and not load_in_8bit and not full_finetuning: + print("Unsloth: LoRA, QLoRA and full finetuning all not selected. Switching to QLoRA.") + load_in_4bit = True + pass + old_model_name = model_name if not use_exact_model_name: model_name = get_model_name(model_name, load_in_4bit) + # Check versions + LATEST = '\nPlease use transformers via `pip install --no-deps git+https://github.com/huggingface/transformers.git`' + NIGHTLY = '\nPlease use nightly transformers via pip install --upgrade "transformers>=4.49.0"`' + if "pixtral" in model_name.lower() and transformers_version < Version("4.49.0"): + raise RuntimeError("Unsloth: Pixtral only works on transformers >= 4.49.0." + LATEST) + elif "qwen2.5" in model_name.lower() and transformers_version < Version("4.49.0"): + raise RuntimeError("Unsloth: Qwen 2.5 only works on transformers >= 4.49.0." + LATEST) + elif "aya-vision" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: Aya Vision only works on transformers >= 4.50.0." + NIGHTLY) + pass + if USE_MODELSCOPE and not os.path.exists(model_name): from modelscope import snapshot_download model_name = snapshot_download(model_name) @@ -510,7 +591,6 @@ def from_pretrained( model_config = AutoConfig.from_pretrained( model_name, token = token, - revision = revision, trust_remote_code = trust_remote_code, ) pass @@ -565,7 +645,7 @@ def from_pretrained( pass # Check if VLM - is_vlm = (x.endswith("ForConditionalGeneration") for x in model_config.architectures) + is_vlm = any(x.endswith("ForConditionalGeneration") for x in model_config.architectures) is_vlm = is_vlm or hasattr(model_config, "vision_config") auto_model = AutoModelForVision2Seq if is_vlm else AutoModelForCausalLM @@ -574,6 +654,8 @@ def from_pretrained( max_seq_length = max_seq_length, dtype = _get_dtype(dtype), load_in_4bit = load_in_4bit, + load_in_8bit = load_in_8bit, + full_finetuning = full_finetuning, token = token, device_map = device_map, trust_remote_code = trust_remote_code, @@ -581,6 +663,7 @@ def from_pretrained( model_types = model_types, tokenizer_name = tokenizer_name, auto_model = auto_model, + use_gradient_checkpointing = use_gradient_checkpointing, *args, **kwargs, ) @@ -628,7 +711,7 @@ def from_pretrained( trust_remote_code = trust_remote_code, ) # Patch it as well! - model = FastBaseModel.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseModel.post_patch_model(model, use_gradient_checkpointing) pass return model, tokenizer pass diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index a2e609f203..47dbb325ef 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -492,6 +492,18 @@ "unsloth/Qwen2-VL-72B-Instruct", "Qwen/Qwen2-VL-72B-Instruct", ), + "unsloth/Qwen2-VL-2B-bnb-4bit" : ( + "unsloth/Qwen2-VL-2B", + "Qwen/Qwen2-VL-2B", + ), + "unsloth/Qwen2-VL-7B-bnb-4bit" : ( + "unsloth/Qwen2-VL-7B", + "Qwen/Qwen2-VL-7B", + ), + "unsloth/Qwen2-VL-72B-bnb-4bit" : ( + "unsloth/Qwen2-VL-72B", + "Qwen/Qwen2-VL-72B", + ), "unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit" : ( "unsloth/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.2-11B-Vision-Instruct", @@ -626,6 +638,38 @@ "Qwen/QwQ-32B", "unsloth/QwQ-32B-bnb-4bit", ), + "unsloth/gemma-3-1b-it" : ( + "unsloth/gemma-3-1b-it", + "google/gemma-3-1b-it", + ), + "unsloth/gemma-3-4b-it" : ( + "unsloth/gemma-3-4b-it", + "google/gemma-3-4b-it", + ), + "unsloth/gemma-3-12b-it" : ( + "unsloth/gemma-3-12b-it", + "google/gemma-3-12b-it", + ), + "unsloth/gemma-3-27b-it" : ( + "unsloth/gemma-3-27b-it", + "google/gemma-3-27b-it", + ), + "unsloth/gemma-3-1b-pt" : ( + "unsloth/gemma-3-1b-pt", + "google/gemma-3-1b-pt", + ), + "unsloth/gemma-3-4b-pt" : ( + "unsloth/gemma-3-4b-pt", + "google/gemma-3-4b-pt", + ), + "unsloth/gemma-3-12b-pt" : ( + "unsloth/gemma-3-12b-pt", + "google/gemma-3-12b-pt", + ), + "unsloth/gemma-3-27b-pt" : ( + "unsloth/gemma-3-27b-pt", + "google/gemma-3-27b-pt", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index cf9c16514e..86a174ebfe 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -106,6 +106,8 @@ def generate_with_clone(*args, **kwargs): import numpy as np from contextlib import nullcontext from torch.nn import functional as F +from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling + torch_compile_options = {{ "epilogue_fusion" : True, "max_autotune" : False, @@ -234,6 +236,7 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): mixed_precision = \ "use_bf16 = getattr(args, 'bf16', False)\n"\ "use_fp16 = getattr(args, 'fp16', False)\n"\ + "mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')\n"\ "dtype = getattr(model.config, 'torch_dtype', None)\n"\ "if dtype is None: dtype = model.get_input_embeddings().dtype\n"\ "from unsloth_zoo.utils import _get_dtype\n"\ @@ -241,10 +244,14 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): "float16 = dtype == torch.float16\n"\ "if float16 and use_bf16: raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')\n"\ "if not float16 and use_fp16: raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')\n"\ - "if not use_bf16 and not use_fp16:\n"\ + "if (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':\n"\ " args.fp16 = float16\n"\ " args.bf16 = not float16\n"\ " os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'\n" + "elif mixed_precision_dtype == 'bfloat16':\n"\ + " args.fp16 = False\n"\ + " args.bf16 = False\n"\ + " os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'\n" extra_args += mixed_precision pass @@ -280,7 +287,12 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): "bf16_full_eval = getattr(args, 'bf16_full_eval', False)\n"\ "if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True\n"\ "if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False\n"\ - "if not bf16_full_eval and not fp16_full_eval: args.bf16_full_eval = args.bf16; args.fp16_full_eval = args.fp16\n" + "if os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':\n"\ + " args.bf16_full_eval = True\n"\ + " args.fp16_full_eval = False\n"\ + "elif not bf16_full_eval and not fp16_full_eval:\n"\ + " args.bf16_full_eval = args.bf16\n"\ + " args.fp16_full_eval = args.fp16\n" extra_args += eval_changes pass @@ -327,6 +339,20 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): extra_args += training_check pass + # Check data collator if it's correct! + if "data_collator" in call_args and "train_dataset" in call_args: + data_collator_check = \ + "if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:\n"\ + " print('Unsloth: Changing data collator to `DataCollatorForLanguageModeling` since `labels` not found.')\n"\ + " data_collator = DataCollatorForLanguageModeling("\ + "tokenizer = processing_class if 'processing_class' in locals() else tokenizer, mlm = False)\n"\ + "elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:\n"\ + " print('Unsloth: Changing data collator to `DataCollatorForSeq2Seq` since `labels` found.')\n"\ + " data_collator = DataCollatorForSeq2Seq("\ + "tokenizer = processing_class if 'processing_class' in locals() else tokenizer)\n" + extra_args += data_collator_check + pass + # Check NEFTune if "model" in call_args: neftune_check = \ @@ -536,7 +562,7 @@ def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, import if "args.use_vllm" in init and "model" in init and "args" in init: # .*? matches first match. .+? matches final match. replacer = re.findall( - "def __init__\(.*?\).*?\:\n", + r"def __init__\(.*?\).*?\:\n", init, flags = re.MULTILINE | re.DOTALL, ) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index ff07ef6917..fa5547ec55 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -15,17 +15,22 @@ import torch from transformers import ( BitsAndBytesConfig, - AutoModelForVision2Seq, AutoProcessor, AutoTokenizer, AutoModelForCausalLM, ) +try: + from transformers import AutoModelForImageTextToText + AutoModelForVision2Seq = AutoModelForImageTextToText +except: + from transformers import AutoModelForVision2Seq +pass from .llama import * from ..kernels import ( post_patch_loss_function, ) from ._utils import __version__ -from peft import LoraConfig, TaskType, get_peft_model +from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model from transformers import set_seed as transformers_set_seed from unsloth_zoo.peft_utils import ( get_peft_regex, @@ -35,6 +40,7 @@ from triton import __version__ as triton_version from unsloth_zoo.utils import _get_dtype from unsloth_zoo.patching_utils import patch_model_and_tokenizer +from unsloth_zoo.training_utils import prepare_model_for_training import types import functools @@ -52,14 +58,21 @@ def unsloth_base_fast_generate( dtype = _get_dtype(self.config.torch_dtype) # Check if VLM - is_vlm = (x.endswith("ForConditionalGeneration") for x in self.config.architectures) + is_vlm = ( + x.endswith(("ForConditionalGeneration", "ForVisionText2Text")) + for x in self.config.architectures + ) is_vlm = is_vlm or hasattr(self.config, "vision_config") # Remove token_type_ids kwargs.pop("token_type_ids", None) # VLMs do not allow logits_to_keep - if not is_vlm: kwargs["logits_to_keep"] = 1 + if not is_vlm: + kwargs["logits_to_keep"] = 1 + else: + kwargs.pop("logits_to_keep", None) + kwargs.pop("num_logits_to_keep", None) # Check pad_token model_eos_token_id = getattr(self.config, "eos_token_id", None) @@ -90,12 +103,15 @@ def from_pretrained( max_seq_length = None, dtype = None, load_in_4bit = True, + load_in_8bit = False, + full_finetuning = False, token = None, device_map = "sequential", trust_remote_code = False, model_types = None, tokenizer_name = None, auto_model = AutoModelForVision2Seq, + use_gradient_checkpointing = "unsloth", **kwargs, ): if trust_remote_code: @@ -141,6 +157,14 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) bnb_config = None + if full_finetuning and (load_in_4bit or load_in_8bit): + print("Unsloth: You selected full finetuning support, but 4bit / 8bit is enabled - disabling LoRA / QLoRA.") + load_in_4bit = False + load_in_8bit = False + pass + + if load_in_4bit and load_in_8bit: + raise RuntimeError("Unsloth: Can only load in 4bit or 8bit, not both!") if load_in_4bit: bnb_config = BitsAndBytesConfig( load_in_4bit = True, @@ -149,6 +173,31 @@ def from_pretrained( bnb_4bit_compute_dtype = dtype, llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, ) + elif load_in_8bit: + bnb_config = BitsAndBytesConfig( + load_in_8bit = True, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + ) + elif not load_in_4bit and not load_in_8bit and not full_finetuning: + print("Unsloth: LoRA, QLoRA and full finetuning all not selected. Switching to QLoRA.") + load_in_4bit = True + bnb_config = BitsAndBytesConfig( + load_in_4bit = True, + bnb_4bit_use_double_quant = True, + bnb_4bit_quant_type = "nf4", + bnb_4bit_compute_dtype = dtype, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + ) + pass + + if full_finetuning: + os.environ["UNSLOTH_ENABLE_FULL_FINETUNING"] = "1" + if dtype == torch.bfloat16: + print("Unsloth: Using bfloat16 full finetuning which cuts memory usage by 50%.") + else: + print("Unsloth: Float16 full finetuning uses more memory since we upcast weights to float32.") + else: + os.environ["UNSLOTH_ENABLE_FULL_FINETUNING"] = "0" pass kwargs.pop("attn_implementation", None); # No need since we auto call it @@ -204,23 +253,37 @@ def from_pretrained( # Save tokenizer for inference purposes tokenizer.padding_side = "left" # Force inference - tokenizer.tokenizer.padding_side = "left" # Force inference + if hasattr(tokenizer, "tokenizer"): + tokenizer.tokenizer.padding_side = "left" # Force inference m = model while hasattr(m, "model"): + m.max_seq_length = max_seq_length m._saved_temp_tokenizer = tokenizer # Also set is_loaded_in_8bit to disable incorrect DDP - m.is_loaded_in_8bit = True + m.is_loaded_in_8bit = True if not full_finetuning else False m = m.model pass + m.max_seq_length = max_seq_length m._saved_temp_tokenizer = tokenizer # Also set is_loaded_in_8bit to disable incorrect DDP - m.is_loaded_in_8bit = True + m.is_loaded_in_8bit = True if not full_finetuning else False # Patch generate if model.generate.__name__ != "unsloth_base_fast_generate": model._old_generate = model.generate unsloth_base_fast_generate.__doc__ = model._old_generate.__doc__ model.generate = types.MethodType(unsloth_base_fast_generate, model) + + # Post patches + model = FastBaseModel.post_patch_model( + model, + use_gradient_checkpointing = use_gradient_checkpointing, + ) + # Clear deleted GPU items + for _ in range(3): + gc.collect() + torch.cuda.empty_cache() + pass return model, tokenizer pass @@ -249,6 +312,10 @@ def get_peft_model( temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): + if os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1": + print("Unsloth: Full finetuning is enabled, so .get_peft_model has no effect") + return model + pass transformers_set_seed(random_state) if type(r) is not int: @@ -282,7 +349,7 @@ def get_peft_model( gc.collect() torch.cuda.empty_cache() pass - + max_seq_length = model.max_seq_length lora_config = LoraConfig( r = r, lora_alpha = lora_alpha, @@ -295,11 +362,12 @@ def get_peft_model( model, use_gradient_checkpointing = use_gradient_checkpointing, ) - model = get_peft_model(model, lora_config) + model = _get_peft_model(model, lora_config) # Enable gradients on modules which are trainable requires_grad_for_gradient_checkpointing(model) - model = FastBaseModel.patch_peft_model(model, use_gradient_checkpointing) + model = FastBaseModel.post_patch_model(model, use_gradient_checkpointing) + model.max_seq_length = max_seq_length # Clear deleted GPU items for _ in range(3): @@ -316,20 +384,26 @@ def get_peft_model( @staticmethod - def patch_peft_model( + def post_patch_model( model, use_gradient_checkpointing = True, ): - if not isinstance(model, PeftModelForCausalLM): - raise TypeError( - "Unsloth: Your model needs to call `.get_peft_model` first!" - ) - pass + full_finetuning = os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1" - model = prepare_model_for_kbit_training( + float32_mixed_precision = True + if _get_dtype(model.config.torch_dtype) == torch.bfloat16: + # Use bfloat16 precision for full finetuning + float32_mixed_precision = False + + model = prepare_model_for_training( model, use_gradient_checkpointing = use_gradient_checkpointing, - use_reentrant = True, + use_reentrant = True, + full_finetuning = full_finetuning, + train_layernorms = full_finetuning, + train_embedding = full_finetuning, + train_lm_head = full_finetuning, + float32_mixed_precision = float32_mixed_precision, ) from transformers.trainer import Trainer @@ -347,17 +421,19 @@ def patch_peft_model( m = model while hasattr(m, "model"): if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.tokenizer.padding_side = "right" + if hasattr(m._saved_temp_tokenizer, "tokenizer"): + m._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Also set is_loaded_in_8bit to disable incorrect DDP - m.is_loaded_in_8bit = True + m.is_loaded_in_8bit = True if not full_finetuning else False m = m.model pass if hasattr(m, "_saved_temp_tokenizer"): - m._saved_temp_tokenizer.tokenizer.padding_side = "right" + if hasattr(m._saved_temp_tokenizer, "tokenizer"): + m._saved_temp_tokenizer.tokenizer.padding_side = "right" pass # Also set is_loaded_in_8bit to disable incorrect DDP - m.is_loaded_in_8bit = True + m.is_loaded_in_8bit = True if not full_finetuning else False # Clear deleted GPU items for _ in range(3): diff --git a/unsloth/tokenizer_utils.py b/unsloth/tokenizer_utils.py index 91bb0202ff..26669127d7 100644 --- a/unsloth/tokenizer_utils.py +++ b/unsloth/tokenizer_utils.py @@ -945,7 +945,7 @@ def patch_sft_trainer_tokenizer(): if replacer is None: # .*? matches first match. .+? matches final match. replacer = re.findall( - f"def {function_name}\(.*?\).*?\:\n", + f"def {function_name}" + r"\(.*?\).*?\:\n", function, flags = re.MULTILINE | re.DOTALL, ) From 060f931edf3976789149fedb3fa5da8809aa54d7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 12 Mar 2025 04:07:45 -0700 Subject: [PATCH 1072/1088] Update mapper.py --- unsloth/models/mapper.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index 47dbb325ef..b4facf729c 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -638,35 +638,35 @@ "Qwen/QwQ-32B", "unsloth/QwQ-32B-bnb-4bit", ), - "unsloth/gemma-3-1b-it" : ( + "unsloth/gemma-3-1b-it-bnb-4bit" : ( "unsloth/gemma-3-1b-it", "google/gemma-3-1b-it", ), - "unsloth/gemma-3-4b-it" : ( + "unsloth/gemma-3-4b-it-bnb-4bit" : ( "unsloth/gemma-3-4b-it", "google/gemma-3-4b-it", ), - "unsloth/gemma-3-12b-it" : ( + "unsloth/gemma-3-12b-it-bnb-4bit" : ( "unsloth/gemma-3-12b-it", "google/gemma-3-12b-it", ), - "unsloth/gemma-3-27b-it" : ( + "unsloth/gemma-3-27b-it-bnb-4bit" : ( "unsloth/gemma-3-27b-it", "google/gemma-3-27b-it", ), - "unsloth/gemma-3-1b-pt" : ( + "unsloth/gemma-3-1b-pt-bnb-4bit" : ( "unsloth/gemma-3-1b-pt", "google/gemma-3-1b-pt", ), - "unsloth/gemma-3-4b-pt" : ( + "unsloth/gemma-3-4b-pt-bnb-4bit" : ( "unsloth/gemma-3-4b-pt", "google/gemma-3-4b-pt", ), - "unsloth/gemma-3-12b-pt" : ( + "unsloth/gemma-3-12b-pt-bnb-4bit" : ( "unsloth/gemma-3-12b-pt", "google/gemma-3-12b-pt", ), - "unsloth/gemma-3-27b-pt" : ( + "unsloth/gemma-3-27b-pt-bnb-4bit" : ( "unsloth/gemma-3-27b-pt", "google/gemma-3-27b-pt", ), From 3edacc50bc2277ec331040a38ea9bbabe40a6c31 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 12 Mar 2025 04:46:34 -0700 Subject: [PATCH 1073/1088] Update _utils.py --- unsloth/models/_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index c79d702b15..0698d5d29c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1129,6 +1129,7 @@ def unsloth_compile_transformers( revision = revision, trust_remote_code = trust_remote_code, ) + model_types += ["siglip"] if disable: return From 71039cb1ce88034f12855476f2a0c5ff63ad59a7 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 12 Mar 2025 04:52:07 -0700 Subject: [PATCH 1074/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0698d5d29c..77bfa8762a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -1129,7 +1129,7 @@ def unsloth_compile_transformers( revision = revision, trust_remote_code = trust_remote_code, ) - model_types += ["siglip"] + model_types = ["siglip"] + model_types if disable: return From fe04c014c22616cba56c9ddf7782be8ea8ed117e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 13 Mar 2025 06:41:42 -0700 Subject: [PATCH 1075/1088] Gemma 3 bug fixes (#2005) * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * llama-quantize on WINDOWS WSL error fix - edit save.py (gguf saving breaks) (#1649) * edit save.py to fix gguf saving breaks. * add check for .exe or not exe file extension for linux and windows * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update llama.py * Update llama.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * unsloth_num_chunks * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py (#1754) Fix typo in comment: know -> now. This was printed when running the Llama3.1_(8B)-GRPO.ipynb example notebook, so I'd expect others to run into it as well. * Optional logits * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py * Bug fixes * FastModel * __doc__ * Update vision.py * Update loader.py * Update loader.py * Update loader.py * version * move use_modelscope to _utils (#1938) * move use_modelscope to _utils * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Don't use revision when loading model_config and is_peft=True (#1949) * More syntax warnings (#1944) * move use_modelscope to _utils * fix * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Update loader.py * Full finetuning and other fixes * UNSLOTH_ENABLE_FULL_FINETUNING * Update loader.py * Update loader.py * Update loader.py * Update vision.py * Update vision.py * full finetuning * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * max_seq_length * Update rl.py * Update rl.py * Update rl.py * Update pyproject.toml * AutoModelForImageTextToText * Update mapper.py * Update pyproject.toml * Update _utils.py * Update _utils.py * Update _utils.py * Batch samples * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Update vision.py * Update mapper.py * Update vision.py * Temporary patches * Update loader.py * model names * Gemma 3 chat template * Bug fixes * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update llama.py * Update llama.py * Update rl.py * Update chat_templates.py * Update chat_templates.py * Update vision.py * Update vision.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Revert * Update _utils.py * forced precision * Autocast * Update vision.py * Update vision.py * Update rl.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Gennadii Manzhos <105049664+everythingisc00l@users.noreply.github.com> Co-authored-by: Seth Weidman Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Wilson Wu <140025193+wiwu2390@users.noreply.github.com> --- unsloth/chat_templates.py | 173 ++++++++++++++++++++------------------ unsloth/models/_utils.py | 15 +++- unsloth/models/llama.py | 33 ++++++++ unsloth/models/loader.py | 10 ++- unsloth/models/mapper.py | 24 ++++-- unsloth/models/rl.py | 22 +++-- unsloth/models/vision.py | 62 ++++++++++---- 7 files changed, 224 insertions(+), 115 deletions(-) diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 5785894a23..2c2e36182d 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -20,6 +20,7 @@ "to_sharegpt", "standardize_sharegpt", + "standardize_data_formats", "apply_chat_template", "train_on_responses_only", @@ -37,7 +38,9 @@ import re from unsloth_zoo.dataset_utils import ( train_on_responses_only, + standardize_data_formats, ) +standardize_sharegpt = standardize_data_formats CHAT_TEMPLATES = {} DEFAULT_SYSTEM_MESSAGE = {} @@ -934,6 +937,84 @@ pass +# =========================================== Gemma-3 +# Obtained via +# print(tokenizer.chat_template.replace("}\n", "####").replace("\n", "\\n").replace("####", "}\n")) +gemma3_template = \ +"""{{ bos_token }} +{%- if messages[0]['role'] == 'system' -%} + {%- if messages[0]['content'] is string -%} + {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%} + {%- else -%} + {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%} + {%- endif -%} + {%- set loop_messages = messages[1:] -%} +{%- else -%} + {%- set first_user_prefix = "" -%} + {%- set loop_messages = messages -%} +{%- endif -%} +{%- for message in loop_messages -%} + {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%} + {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }} + {%- endif -%} + {%- if (message['role'] == 'assistant') -%} + {%- set role = "model" -%} + {%- else -%} + {%- set role = message['role'] -%} + {%- endif -%} + {{ '' + role + '\n' + (first_user_prefix if loop.first else "") }} + {%- if message['content'] is string -%} + {{ message['content'] | trim }} + {%- elif message['content'] is iterable -%} + {%- for item in message['content'] -%} + {%- if item['type'] == 'image' -%} + {{ '' }} + {%- elif item['type'] == 'text' -%} + {{ item['text'] | trim }} + {%- endif -%} + {%- endfor -%} + {%- else -%} + {{ raise_exception("Invalid content type") }} + {%- endif -%} + {{ '\n' }} +{%- endfor -%} +{%- if add_generation_prompt -%} + {{ 'model\n' }} +{%- endif -%} +""" + +# Ollama from https://ollama.com/library/gemma3/blobs/e0a42594d802 +gemma3_ollama = \ +''' +FROM {__FILE_LOCATION__} +TEMPLATE """{{- range $i, $_ := .Messages }} +{{- $last := eq (len (slice $.Messages $i)) 1 }} +{{- if or (eq .Role "user") (eq .Role "system") }}user +{{ .Content }} +{{ if $last }}model +{{ end }} +{{- else if eq .Role "assistant" }}model +{{ .Content }}{{ if not $last }} +{{ end }} +{{- end }} +{{- end }}""" +PARAMETER stop "" +PARAMETER stop "" +PARAMETER temperature 0.1 +PARAMETER min_p 0.0 +PARAMETER top_k 64 +PARAMETER top_p 0.95 +PARAMETER num_predict 32768 +''' + +gemma3_template_eos_token = "" +CHAT_TEMPLATES["gemma-3"] = (gemma3_template, gemma3_template_eos_token, False, gemma3_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma-3"] = None # No system message in Gemma-3 + +CHAT_TEMPLATES["gemma3"] = (gemma3_template, gemma3_template_eos_token, False, gemma3_ollama,) +DEFAULT_SYSTEM_MESSAGE["gemma3"] = None # No system message in Gemma-3 +pass + def _change_system_message(template: str, type_chat_template: str, system_message: str = None): system_message_pattern = r"\{system_message\}" @@ -1033,11 +1114,12 @@ def get_chat_template( # Check fast tokenizer if not is_fast_tokenizer: - print( - "Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ - "Please log a Github issue if you want this as a new feature!\n"\ - "Your chat template will still work, but it won't add or edit tokens." - ) + pass + # print( + # "Unsloth: Not a fast tokenizer, so can't process it as of yet :(\n"\ + # "Please log a Github issue if you want this as a new feature!\n"\ + # "Your chat template will still work, but it won't add or edit tokens." + # ) elif token_mapping is not None: # token_mapping = {"" : "<|im_start|>", "" : "<|im_end|>"} @@ -1396,82 +1478,6 @@ def __convert_to_sharegpt__(examples): pass -def standardize_sharegpt( - dataset, - aliases_for_system = ["system",], - aliases_for_user = ["user", "human", "input",], - aliases_for_assistant = ["gpt", "assistant", "output",], -): - """ - Standardizes ShareGPT and other formats to user/assistant Hugging Face format. - - Get aliases for the system, user and assistant roles. - These shall map to "system", "user" and "assistant" respectively. - - aliases_for_system = ["system",], - aliases_for_user = ["user", "human", "input",], - aliases_for_assistant = ["gpt", "assistant", "output",], - """ - import collections - import itertools - - convos = dataset[:10]["conversations"] - uniques = collections.defaultdict(list) - for convo in convos: - for message in convo: - for key, value in message.items(): - uniques[key].append(value) - pass - - # Must be only 2 entries - assert(len(uniques.keys()) == 2) - - keys = list(uniques.keys()) - length_first = len(set(uniques[keys[0]])) - length_second = len(set(uniques[keys[1]])) - - if length_first < length_second: - # Role is assigned to the first element - role_key = keys[0] - content_key = keys[1] - else: - role_key = keys[1] - content_key = keys[0] - pass - - # Check roles are in aliases - all_aliases = set(aliases_for_system + aliases_for_user + aliases_for_assistant) - roles = set(uniques[role_key]) - leftover_aliases = (all_aliases | roles) - all_aliases - if len(leftover_aliases) != 0: - raise TypeError( - f"Unsloth: {list(leftover_aliases)} are not in aliases. Please update aliases." - ) - pass - - # Mapping for aliases - aliases_mapping = {} - for x in aliases_for_system: aliases_mapping[x] = "system" - for x in aliases_for_user: aliases_mapping[x] = "user" - for x in aliases_for_assistant: aliases_mapping[x] = "assistant" - - def _standardize_dataset(examples): - convos = examples["conversations"] - all_convos = [] - for convo in convos: - new_convo = [ - { "role" : aliases_mapping[message[role_key]], "content" : message[content_key], } - for message in convo - ] - all_convos.append(new_convo) - pass - return { "conversations" : all_convos, } - pass - - return dataset.map(_standardize_dataset, batched = True, desc = "Standardizing format") -pass - - def get_ollama_eos_tokens(tokenizer, extra_eos_tokens = []): added_tokens_decoder = tokenizer.added_tokens_decoder.values() added_tokens_decoder = [str(x) for x in added_tokens_decoder] @@ -1934,6 +1940,11 @@ def formatting_prompts_func(examples): tokenizer._ollama_modelfile = modelfile tokenizer._unsloth_input_part = input_part tokenizer._unsloth_output_part = output_part + if hasattr(tokenizer, "tokenizer"): + tokenizer.tokenizer.chat_template = jinja_template + tokenizer.tokenizer._ollama_modelfile = modelfile + tokenizer.tokenizer._unsloth_input_part = input_part + tokenizer.tokenizer._unsloth_output_part = output_part return dataset.map(formatting_prompts_func, batched = True,) pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 77bfa8762a..a3fc12f6d0 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -71,6 +71,7 @@ from platform import system as platform_system platform_system = platform_system() import numpy as np +import contextlib import warnings, subprocess, re, inspect, psutil, os, math from unsloth_zoo.utils import Version @@ -113,6 +114,11 @@ from unsloth_zoo.training_utils import ( prepare_model_for_training, ) +from unsloth_zoo.temporary_patches import ( + TEMPORARY_PATCHES, +) +for temporary_patch in TEMPORARY_PATCHES: + temporary_patch() # ============================================= # Disable some warnings which can get annoying @@ -981,7 +987,14 @@ def _unsloth_pre_compute_loss(self, model, inputs, *args, **kwargs): "Read more on gradient accumulation issues here: https://unsloth.ai/blog/gradient" ) pass - return self._old_compute_loss(model, inputs, *args, **kwargs) + + if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "0": + autocaster = contextlib.nullcontext() + else: + autocaster = torch.autocast(device_type = "cuda", dtype = torch.float32) + with autocaster: + outputs = self._old_compute_loss(model, inputs, *args, **kwargs) + return outputs pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7ae6e92d11..7000739850 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -38,6 +38,7 @@ from ..tokenizer_utils import * if HAS_FLASH_ATTENTION: from flash_attn import flash_attn_func +from .vision import FastBaseModel # Final patching code from transformers.models.llama.modeling_llama import ( @@ -1648,6 +1649,7 @@ def from_pretrained( disable_log_stats = False, **kwargs, ): + os.environ["UNSLOTH_USE_NEW_MODEL"] = "0" if trust_remote_code: if fast_inference: raise NotImplementedError("Unsloth: Fast inference does not support `trust_remote_code` yet.") @@ -2016,6 +2018,31 @@ def get_peft_model( temporary_location = "_unsloth_temporary_saved_buffers", **kwargs, ): + if os.environ.get("UNSLOTH_USE_NEW_MODEL", "0") == "1": + return FastBaseModel.get_peft_model( + model = model, + r = r, + target_modules = target_modules, + lora_alpha = lora_alpha, + lora_dropout = lora_dropout, + bias = bias, + finetune_vision_layers = False, + finetune_language_layers = True, + finetune_attention_modules = True, + finetune_mlp_modules = True, + layers_to_transform = layers_to_transform, + layers_pattern = layers_pattern, + use_gradient_checkpointing = use_gradient_checkpointing, + random_state = random_state, + max_seq_length = max_seq_length, + use_rslora = use_rslora, + modules_to_save = modules_to_save, + init_lora_weights = init_lora_weights, + loftq_config = loftq_config, + temporary_location = temporary_location, + **kwargs, + ) + pass if os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1": print("Unsloth: Full finetuning is enabled, so .get_peft_model has no effect") return model @@ -2435,6 +2462,12 @@ def patch_peft_model( model, use_gradient_checkpointing = True, ): + if os.environ.get("UNSLOTH_USE_NEW_MODEL", "0") == "1": + return FastBaseModel.patch_peft_model( + model = model, + use_gradient_checkpointing = use_gradient_checkpointing, + ) + pass if not isinstance(model, PeftModelForCausalLM): raise TypeError( "Unsloth: Your model needs to call `.get_peft_model` first!" diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 92a166f69a..1b54c8c7fc 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -70,7 +70,7 @@ class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( model_name = "unsloth/Llama-3.2-1B-Instruct", - max_seq_length = None, + max_seq_length = 2048, dtype = None, load_in_4bit = True, load_in_8bit = False, @@ -96,7 +96,7 @@ def from_pretrained( if load_in_8bit or full_finetuning: return FastModel.from_pretrained( model_name = model_name, - max_seq_length = max_seq_length, # [TODO] No effect + max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, load_in_8bit = load_in_8bit, @@ -295,7 +295,7 @@ def from_pretrained( else: return FastModel.from_pretrained( model_name = model_name, - max_seq_length = max_seq_length, # [TODO] No effect + max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, load_in_8bit = load_in_8bit, @@ -442,7 +442,7 @@ class FastModel(FastBaseModel): @staticmethod def from_pretrained( model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit", - max_seq_length = None, # [TODO] No effect + max_seq_length = 2048, dtype = None, load_in_4bit = True, load_in_8bit = False, @@ -500,6 +500,8 @@ def from_pretrained( raise RuntimeError("Unsloth: Qwen 2.5 only works on transformers >= 4.49.0." + LATEST) elif "aya-vision" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): raise RuntimeError("Unsloth: Aya Vision only works on transformers >= 4.50.0." + NIGHTLY) + elif "gemma-3" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: Gemma 3 only works on transformers >= 4.50.0." + NIGHTLY) pass if USE_MODELSCOPE and not os.path.exists(model_name): diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index b4facf729c..cb0d73c590 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -638,37 +638,45 @@ "Qwen/QwQ-32B", "unsloth/QwQ-32B-bnb-4bit", ), - "unsloth/gemma-3-1b-it-bnb-4bit" : ( + "unsloth/gemma-3-1b-it-unsloth-bnb-4bit" : ( "unsloth/gemma-3-1b-it", "google/gemma-3-1b-it", + "unsloth/gemma-3-1b-it-bnb-4bit", ), - "unsloth/gemma-3-4b-it-bnb-4bit" : ( + "unsloth/gemma-3-4b-it-unsloth-bnb-4bit" : ( "unsloth/gemma-3-4b-it", "google/gemma-3-4b-it", + "unsloth/gemma-3-4b-it-bnb-4bit", ), - "unsloth/gemma-3-12b-it-bnb-4bit" : ( + "unsloth/gemma-3-12b-it-unsloth-bnb-4bit" : ( "unsloth/gemma-3-12b-it", "google/gemma-3-12b-it", + "unsloth/gemma-3-12b-it-bnb-4bit", ), - "unsloth/gemma-3-27b-it-bnb-4bit" : ( + "unsloth/gemma-3-27b-it-unsloth-bnb-4bit" : ( "unsloth/gemma-3-27b-it", "google/gemma-3-27b-it", + "unsloth/gemma-3-27b-it-bnb-4bit", ), - "unsloth/gemma-3-1b-pt-bnb-4bit" : ( + "unsloth/gemma-3-1b-pt-unsloth-bnb-4bit" : ( "unsloth/gemma-3-1b-pt", "google/gemma-3-1b-pt", + "unsloth/gemma-3-1b-pt-bnb-4bit", ), - "unsloth/gemma-3-4b-pt-bnb-4bit" : ( + "unsloth/gemma-3-4b-pt-unsloth-bnb-4bit" : ( "unsloth/gemma-3-4b-pt", "google/gemma-3-4b-pt", + "unsloth/gemma-3-4b-pt-bnb-4bit", ), - "unsloth/gemma-3-12b-pt-bnb-4bit" : ( + "unsloth/gemma-3-12b-pt-unsloth-bnb-4bit" : ( "unsloth/gemma-3-12b-pt", "google/gemma-3-12b-pt", + "unsloth/gemma-3-12b-pt-bnb-4bit", ), - "unsloth/gemma-3-27b-pt-bnb-4bit" : ( + "unsloth/gemma-3-27b-pt-unsloth-bnb-4bit" : ( "unsloth/gemma-3-27b-pt", "google/gemma-3-27b-pt", + "unsloth/gemma-3-27b-pt-bnb-4bit", ), } diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 86a174ebfe..4e158f58b3 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -236,15 +236,24 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): mixed_precision = \ "use_bf16 = getattr(args, 'bf16', False)\n"\ "use_fp16 = getattr(args, 'fp16', False)\n"\ + "force_float32 = False\n"\ + "if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':\n"\ + " if use_bf16 or use_fp16:\n"\ + " print('Unsloth: Switching to float32 training since model cannot work with float16')\n"\ + " force_float32 = True\n"\ "mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')\n"\ "dtype = getattr(model.config, 'torch_dtype', None)\n"\ "if dtype is None: dtype = model.get_input_embeddings().dtype\n"\ "from unsloth_zoo.utils import _get_dtype\n"\ "dtype = _get_dtype(dtype)\n"\ "float16 = dtype == torch.float16\n"\ - "if float16 and use_bf16: raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')\n"\ - "if not float16 and use_fp16: raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')\n"\ - "if (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':\n"\ + "if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')\n"\ + "if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')\n"\ + "if force_float32:\n"\ + " args.fp16 = False\n"\ + " args.bf16 = False\n"\ + " os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'\n"\ + "elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':\n"\ " args.fp16 = float16\n"\ " args.bf16 = not float16\n"\ " os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'\n" @@ -287,7 +296,10 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): "bf16_full_eval = getattr(args, 'bf16_full_eval', False)\n"\ "if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True\n"\ "if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False\n"\ - "if os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':\n"\ + "if force_float32:\n"\ + " args.bf16_full_eval = False\n"\ + " args.fp16_full_eval = False\n"\ + "elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':\n"\ " args.bf16_full_eval = True\n"\ " args.fp16_full_eval = False\n"\ "elif not bf16_full_eval and not fp16_full_eval:\n"\ @@ -343,11 +355,9 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): if "data_collator" in call_args and "train_dataset" in call_args: data_collator_check = \ "if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:\n"\ - " print('Unsloth: Changing data collator to `DataCollatorForLanguageModeling` since `labels` not found.')\n"\ " data_collator = DataCollatorForLanguageModeling("\ "tokenizer = processing_class if 'processing_class' in locals() else tokenizer, mlm = False)\n"\ "elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:\n"\ - " print('Unsloth: Changing data collator to `DataCollatorForSeq2Seq` since `labels` found.')\n"\ " data_collator = DataCollatorForSeq2Seq("\ "tokenizer = processing_class if 'processing_class' in locals() else tokenizer)\n" extra_args += data_collator_check diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index fa5547ec55..2ef9d2ee99 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -25,29 +25,49 @@ except: from transformers import AutoModelForVision2Seq pass -from .llama import * from ..kernels import ( post_patch_loss_function, ) from ._utils import __version__ +from ._utils import * +from ..save import patch_saving_functions from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model +from peft import PeftModelForCausalLM from transformers import set_seed as transformers_set_seed from unsloth_zoo.peft_utils import ( get_peft_regex, SKIP_QUANTIZATION_MODULES, requires_grad_for_gradient_checkpointing, ) +from transformers.models.llama.modeling_llama import logger +from transformers import __version__ as transformers_version from triton import __version__ as triton_version from unsloth_zoo.utils import _get_dtype from unsloth_zoo.patching_utils import patch_model_and_tokenizer from unsloth_zoo.training_utils import prepare_model_for_training import types import functools +import os +import gc +import math +import functools +from typing import Optional, Tuple, List, Union +import re, inspect, sys +import types +try: + from huggingface_hub.utils import get_token +except: + # Old HF Hub versions <= 0.0.25 + from huggingface_hub.utils._token import get_token +pass __all__ = [ "FastBaseModel", ] +global FORCE_FLOAT32 +FORCE_FLOAT32 = ["gemma3"] + def unsloth_base_fast_generate( self, @@ -86,6 +106,7 @@ def unsloth_base_fast_generate( except: pass # Mixed precision autocast + if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1": dtype = torch.float32 with torch.inference_mode(), torch.autocast(device_type = "cuda", dtype = dtype): output = self._old_generate(*args, **kwargs) pass @@ -100,7 +121,7 @@ class FastBaseModel: @staticmethod def from_pretrained( model_name = "unsloth/Llama-3.2-1B-Instruct", - max_seq_length = None, + max_seq_length = 2048, dtype = None, load_in_4bit = True, load_in_8bit = False, @@ -114,6 +135,7 @@ def from_pretrained( use_gradient_checkpointing = "unsloth", **kwargs, ): + os.environ["UNSLOTH_USE_NEW_MODEL"] = "1" if trust_remote_code: print( "Unsloth: WARNING `trust_remote_code` is True.\n"\ @@ -129,8 +151,12 @@ def from_pretrained( try: vllm_version = f" vLLM: {importlib_version('vllm')}." except: vllm_version = "" + model_type_arch = model_types[0] + if model_type_arch == "siglip" and len(model_types) != 1: + model_type_arch = model_types[1] + statistics = \ - f"==((====))== Unsloth {__version__}: Fast {model_types[0].title()} patching. Transformers: {transformers_version}.{vllm_version}\n"\ + f"==((====))== Unsloth {__version__}: Fast {model_type_arch.title()} patching. Transformers: {transformers_version}.{vllm_version}\n"\ f" {chr(92)}{chr(92)} /| {gpu_stats.name}. Num GPUs = {torch.cuda.device_count()}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"\ f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {torch.version.cuda}. Triton: {triton_version}\n"\ f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"\ @@ -156,6 +182,17 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + global FORCE_FLOAT32 + os.environ["UNSLOTH_FORCE_FLOAT32"] = "0" + bnb_compute_dtype = dtype + for disable_name in FORCE_FLOAT32: + if disable_name.lower() == model_type_arch.lower() and dtype == torch.float16: + print(f"Unsloth: Using float16 precision for {model_type_arch} won't work! Using float32.") + os.environ["UNSLOTH_FORCE_FLOAT32"] = "1" + bnb_compute_dtype = torch.float32 + break + pass + bnb_config = None if full_finetuning and (load_in_4bit or load_in_8bit): print("Unsloth: You selected full finetuning support, but 4bit / 8bit is enabled - disabling LoRA / QLoRA.") @@ -170,13 +207,13 @@ def from_pretrained( load_in_4bit = True, bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", - bnb_4bit_compute_dtype = dtype, - llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + bnb_4bit_compute_dtype = bnb_compute_dtype, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES.copy(), ) elif load_in_8bit: bnb_config = BitsAndBytesConfig( load_in_8bit = True, - llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES.copy(), ) elif not load_in_4bit and not load_in_8bit and not full_finetuning: print("Unsloth: LoRA, QLoRA and full finetuning all not selected. Switching to QLoRA.") @@ -185,8 +222,8 @@ def from_pretrained( load_in_4bit = True, bnb_4bit_use_double_quant = True, bnb_4bit_quant_type = "nf4", - bnb_4bit_compute_dtype = dtype, - llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES, + bnb_4bit_compute_dtype = bnb_compute_dtype, + llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES.copy(), ) pass @@ -212,7 +249,7 @@ def from_pretrained( # quantization_config = bnb_config, token = token, trust_remote_code = trust_remote_code, - # attn_implementation = "sdpa", [TODO] Pixtral for eg fails + attn_implementation = "sdpa", #[TODO] Pixtral for eg fails **kwargs, ) # Return old flag @@ -408,12 +445,7 @@ def post_patch_model( from transformers.trainer import Trainer if Trainer._inner_training_loop.__name__ != "_fast_inner_training_loop": - raise RuntimeError( - 'Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so '\ - 'enabling it will require much more work, so we have to prioritize. Please understand!\n'\ - 'We do have a separate beta version, which you can contact us about!\n'\ - 'Thank you for your understanding and we appreciate it immensely!' - ) + raise RuntimeError('Unsloth: Unsuccessfully patched inner_training_loop') pass patch_saving_functions(model, vision = True) From 029461a8c4338c304cf1c2bf8b634a0b00407eab Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 06:42:44 -0700 Subject: [PATCH 1076/1088] Gemma 3, bug fixes (#2014) * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py * Bug fixes * FastModel * __doc__ * Update vision.py * Update loader.py * Update loader.py * Update loader.py * version * move use_modelscope to _utils (#1938) * move use_modelscope to _utils * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Don't use revision when loading model_config and is_peft=True (#1949) * More syntax warnings (#1944) * move use_modelscope to _utils * fix * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Update loader.py * Full finetuning and other fixes * UNSLOTH_ENABLE_FULL_FINETUNING * Update loader.py * Update loader.py * Update loader.py * Update vision.py * Update vision.py * full finetuning * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * max_seq_length * Update rl.py * Update rl.py * Update rl.py * Update pyproject.toml * AutoModelForImageTextToText * Update mapper.py * Update pyproject.toml * Update _utils.py * Update _utils.py * Update _utils.py * Batch samples * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Update vision.py * Update mapper.py * Update vision.py * Temporary patches * Update loader.py * model names * Gemma 3 chat template * Bug fixes * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update llama.py * Update llama.py * Update rl.py * Update chat_templates.py * Update chat_templates.py * Update vision.py * Update vision.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Revert * Update _utils.py * forced precision * Autocast * Update vision.py * Update vision.py * Update rl.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update rl.py * vLLM fixes * constexpr * Update vision.py * Update vision.py * Update vision.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update save.py * New models * Triton windows update (#1976) * Update pyproject.toml * Update README.md * Update RMS LayerNorm implementation, and list compr. change in chat templates (#1974) * Update RMS LayerNorm implementation with optimizations and testing suite * perf: optimize list comprehension in get_ollama_eos_tokens * Update Zoo * Update llama.py * Update llama.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update rl_replacements.py * Update vision.py * grpo fix * Update rl_replacements.py * Update vision.py * Update rl_replacements.py * Update vision.py * Update mapper.py * Update vision.py * Update vision.py * Update loader.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Wilson Wu <140025193+wiwu2390@users.noreply.github.com> Co-authored-by: Akshay Behl <126911424+Captain-T2004@users.noreply.github.com> --- README.md | 2 +- pyproject.toml | 7 +-- unsloth/__init__.py | 2 +- unsloth/chat_templates.py | 5 +- unsloth/kernels/cross_entropy_loss.py | 32 +++++------ unsloth/kernels/layernorm.py | 6 ++- unsloth/kernels/rms_layernorm.py | 18 ++++--- unsloth/models/_utils.py | 31 ++++++++++- unsloth/models/llama.py | 20 +++++++ unsloth/models/loader.py | 20 +++++-- unsloth/models/mapper.py | 40 ++++++++++++++ unsloth/models/rl.py | 27 +++++++--- unsloth/models/rl_replacements.py | 17 +++--- unsloth/models/vision.py | 77 +++++++++++++++++++++++---- unsloth/save.py | 4 ++ 15 files changed, 243 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index 1f85647f94..e6098cbebb 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ See [here](https://github.com/unslothai/unsloth/edit/main/README.md#advanced-pip 7. **Install Unsloth:** ```python -pip install "unsloth[windows] @ git+https://github.com/unslothai/unsloth.git" +pip install unsloth ``` #### Notes diff --git a/pyproject.toml b/pyproject.toml index 667901e76f..7b1d2efda4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,14 +33,11 @@ exclude = ["images*"] [project.optional-dependencies] triton = [ - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp39-cp39-win_amd64.whl ; python_version=='3.9' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp310-cp310-win_amd64.whl ; python_version=='3.10' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp311-cp311-win_amd64.whl ; python_version=='3.11' and platform_system == 'Windows'", - "triton @ https://github.com/woct0rdho/triton-windows/releases/download/v3.2.0-windows.post10/triton-3.2.0-cp312-cp312-win_amd64.whl ; python_version=='3.12' and platform_system == 'Windows'" + "triton-windows ; platform_system == 'Windows'", ] huggingface = [ - "unsloth_zoo>=2025.3.9", + "unsloth_zoo>=2025.3.11", "packaging", "tyro", "transformers>=4.46.1,!=4.47.0", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 9bcdd5cf64..7ffddde9b0 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -198,7 +198,7 @@ def is_bf16_supported(): return SUPPORTS_BFLOAT16 # Check for unsloth_zoo try: unsloth_zoo_version = importlib_version("unsloth_zoo") - if Version(unsloth_zoo_version) < Version("2025.3.9"): + if Version(unsloth_zoo_version) < Version("2025.3.11"): print( "Unsloth: Updating Unsloth-Zoo utilies to the latest version.\n"\ "To disable this, set os.environ['UNSLOTH_DISABLE_AUTO_UPDATES'] = '1'" diff --git a/unsloth/chat_templates.py b/unsloth/chat_templates.py index 2c2e36182d..c10b2641a4 100644 --- a/unsloth/chat_templates.py +++ b/unsloth/chat_templates.py @@ -1512,10 +1512,7 @@ def get_ollama_eos_tokens(tokenizer, extra_eos_tokens = []): # Remove duplicates splitted = joined_text.split("\x01\x00") - final_eos_tokens = [] - for old, new in zip(added_tokens_decoder, splitted): - if old == new: final_eos_tokens.append(old) - pass + final_eos_tokens = [old for old, new in zip(added_tokens_decoder, splitted) if old == new] final_eos_tokens += extra_eos_tokens final_eos_tokens += repeatted_tokens diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 006dfff631..834a74c66d 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -37,12 +37,12 @@ def _cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ Cross Entropy Loss = 1/n sum [ -yi log(Pi) ] @@ -111,13 +111,13 @@ def _chunked_cross_entropy_forward( loss_ptr , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , - N_CHUNKS , + VOCAB_SIZE : tl.constexpr, + N_CHUNKS : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ 256K vocab divided in 4 chunks @@ -196,12 +196,12 @@ def _cross_entropy_backward( dloss_row_stride , logsumexp_ptr , labels_ptr , - VOCAB_SIZE , + VOCAB_SIZE : tl.constexpr, BLOCK_SIZE : tl.constexpr, - DO_SOFTCAPPING , - SOFTCAP , - DO_LOGIT_SCALING , - LOGIT_SCALE , + DO_SOFTCAPPING : tl.constexpr, + SOFTCAP : tl.constexpr, + DO_LOGIT_SCALING : tl.constexpr, + LOGIT_SCALE : tl.constexpr, ): """ CE_i = -y log(P) = y * (log[sum(exp(x))] - x) diff --git a/unsloth/kernels/layernorm.py b/unsloth/kernels/layernorm.py index 26a77f03a0..ed8182014e 100644 --- a/unsloth/kernels/layernorm.py +++ b/unsloth/kernels/layernorm.py @@ -30,7 +30,8 @@ def layernorm_forward( b, r, mu, - n_cols, eps, + n_cols : tl.constexpr, + eps : tl.constexpr, BLOCK_SIZE : tl.constexpr ): row_idx = tl.program_id(0) @@ -68,7 +69,8 @@ def layernorm_backward( b, r, mu, - n_cols, eps, + n_cols : tl.constexpr, + eps : tl.constexpr, BLOCK_SIZE : tl.constexpr ): # Approximately follows https://github.com/karpathy/llm.c/blob/master/doc/layernorm/layernorm.md diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index 1cde6388ea..8f54e74908 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -22,9 +22,10 @@ def _rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, - r, r_row_stride, - n_cols, eps, - BLOCK_SIZE : tl.constexpr + r, r_row_stride : tl.constexpr, + n_cols : tl.constexpr, + eps : tl.constexpr, + BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel @@ -57,9 +58,10 @@ def _rms_layernorm_backward( dX, dX_row_stride, X, X_row_stride, W, W_row_stride, - r, r_row_stride, + r, r_row_stride : tl.constexpr, # dW, dW_row_stride, - n_cols, eps, + n_cols : tl.constexpr, + eps : tl.constexpr, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): @@ -107,8 +109,9 @@ def _gemma_rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, - r, r_row_stride, - n_cols, eps, + r, r_row_stride : tl.constexpr, + n_cols : tl.constexpr, + eps : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 @@ -253,7 +256,6 @@ def unpatch_rms_layernorm(): except: pass return - return pass diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index a3fc12f6d0..06a76b19d8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.10" +__version__ = "2025.3.11" __all__ = [ "SUPPORTS_BFLOAT16", @@ -72,6 +72,7 @@ platform_system = platform_system() import numpy as np import contextlib +import re import warnings, subprocess, re, inspect, psutil, os, math from unsloth_zoo.utils import Version @@ -181,6 +182,34 @@ def filter(self, x): return not (self.text in x.getMessage()) except: pass +# Patch get_model_param_count to record correct 4bit / 8bit +from transformers.trainer_pt_utils import is_deepspeed_zero3_enabled +def get_model_param_count(model, trainable_only = False): + """ + Calculate model's total param count. If trainable_only is True then count only those requiring grads + """ + if is_deepspeed_zero3_enabled(): + def numel(p): + return p.ds_numel if hasattr(p, "ds_numel") else p.numel() + else: + def numel(p): + return p.numel() + s = sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad) + if (not trainable_only) and \ + hasattr(model, "config") and \ + hasattr(model.config, "quantization_config"): + + billions = re.findall(r"([0-9]{1,})(?:b|B)", model.config.name_or_path) + if len(billions) != 0: + billions = int(billions[0]) + s = 1_000_000_000 * billions + pass + return s +pass +import transformers.trainer_pt_utils +transformers.trainer_pt_utils.get_model_param_count = get_model_param_count +import transformers.trainer +transformers.trainer.get_model_param_count = get_model_param_count # ============================================= # ============================================= diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7000739850..893a09dd14 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -1663,6 +1663,10 @@ def from_pretrained( if platform.system().lower() == 'windows': print("Unsloth: vLLM does not work in Windows! Will use Unsloth inference!") fast_inference = False + major_version, minor_version = torch.cuda.get_device_capability() + if major_version < 7: + print("Unsloth: vLLM does not work on older GPUs - will switch to Unsloth inference!") + fast_inference = False pass if token is None: token = get_token() @@ -1786,6 +1790,8 @@ def from_pretrained( attn_implementation = "eager", **kwargs, ) + model.fast_generate = model.generate + model.fast_generate_batches = None else: from unsloth_zoo.vllm_utils import ( load_vllm, @@ -1804,6 +1810,7 @@ def from_pretrained( enable_lora = True, max_lora_rank = max_lora_rank, disable_log_stats = disable_log_stats, + use_bitsandbytes = load_in_4bit, ) for allowed_arg in allowed_args: if allowed_arg not in load_vllm_kwargs and allowed_arg in kwargs: @@ -2651,6 +2658,19 @@ def patch_peft_model( torch.cuda.empty_cache() pass + # Patch for fast inference + vllm_engine = getattr(model.model, "vllm_engine", None) + if vllm_engine is not None: + model.vllm_engine = model.model.vllm_engine + model.fast_generate = model.model.fast_generate + model.fast_generate_batches = model.model.fast_generate_batches + + # Also saving and loading LoRA + from unsloth_zoo.vllm_utils import save_lora, load_lora + model.save_lora = functools.partial(save_lora, model) + model.load_lora = functools.partial(load_lora, model) + pass + # Add for_inference and for_training model.for_training = functools.partial(FastLlamaModel.for_training, model) model.for_inference = functools.partial(FastLlamaModel.for_inference, model) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 1b54c8c7fc..44475780af 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -405,7 +405,6 @@ def from_pretrained( if is_peft: # From https://github.com/huggingface/peft/issues/184 # Now add PEFT adapters - model.enable_input_require_grads() model = PeftModel.from_pretrained( model, old_model_name, @@ -498,10 +497,22 @@ def from_pretrained( raise RuntimeError("Unsloth: Pixtral only works on transformers >= 4.49.0." + LATEST) elif "qwen2.5" in model_name.lower() and transformers_version < Version("4.49.0"): raise RuntimeError("Unsloth: Qwen 2.5 only works on transformers >= 4.49.0." + LATEST) - elif "aya-vision" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): - raise RuntimeError("Unsloth: Aya Vision only works on transformers >= 4.50.0." + NIGHTLY) + elif "aya-vision" in model_name.lower(): + # Disable compiling for now - errors out! + os.environ["UNSLOTH_COMPILE_DISABLE"] = "1" + if transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: Aya Vision only works on transformers >= 4.50.0." + NIGHTLY) elif "gemma-3" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): raise RuntimeError("Unsloth: Gemma 3 only works on transformers >= 4.50.0." + NIGHTLY) + elif "c4ai-command-a-03-2025" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: Cohere's Command model only works on transformers >= 4.50.0." + NIGHTLY) + elif "granite-vision" in model_name.lower(): + # Disable compiling for now - errors out! + os.environ["UNSLOTH_COMPILE_DISABLE"] = "1" + if transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: Granite Vision only works on transformers >= 4.50.0." + NIGHTLY) + elif "olmo-2" in model_name.lower() and transformers_version < Version("4.50.0.dev0"): + raise RuntimeError("Unsloth: OLMo-2 only works on transformers >= 4.50.0." + NIGHTLY) pass if USE_MODELSCOPE and not os.path.exists(model_name): @@ -668,7 +679,7 @@ def from_pretrained( use_gradient_checkpointing = use_gradient_checkpointing, *args, **kwargs, ) - + if resize_model_vocab is not None: model.resize_token_embeddings(resize_model_vocab) pass @@ -703,7 +714,6 @@ def from_pretrained( if is_peft: # From https://github.com/huggingface/peft/issues/184 # Now add PEFT adapters - model.enable_input_require_grads() model = PeftModel.from_pretrained( model, old_model_name, diff --git a/unsloth/models/mapper.py b/unsloth/models/mapper.py index cb0d73c590..9af5317986 100644 --- a/unsloth/models/mapper.py +++ b/unsloth/models/mapper.py @@ -62,6 +62,16 @@ "unsloth/llama-2-7b-chat", "meta-llama/Llama-2-7b-chat-hf", ), + "unsloth/Mixtral-8x7B-v0.1-unsloth-bnb-4bit" : ( + "unsloth/Mixtral-8x7B-v0.1", + "mistralai/Mixtral-8x7B-v0.1", + "unsloth/Mixtral-8x7B-v0.1-bnb-4bit", + ), + "unsloth/Mixtral-8x7B-Instruct-v0.1-unsloth-bnb-4bit" : ( + "unsloth/Mixtral-8x7B-Instruct-v0.1", + "mistralai/Mixtral-8x7B-Instruct-v0.1", + "unsloth/Mixtral-8x7B-Instruct-v0.1-bnb-4bit", + ), "unsloth/codellama-7b-bnb-4bit" : ( "unsloth/codellama-7b", "codellama/CodeLlama-7b-hf", @@ -678,6 +688,36 @@ "google/gemma-3-27b-pt", "unsloth/gemma-3-27b-pt-bnb-4bit", ), + "unsloth/reka-flash-3-unsloth-bnb-4bit" : ( + "unsloth/reka-flash-3", + "RekaAI/reka-flash-3", + "unsloth/reka-flash-3-bnb-4bit", + ), + "unsloth/c4ai-command-a-03-2025-unsloth-bnb-4bit" : ( + "unsloth/c4ai-command-a-03-2025", + "CohereForAI/c4ai-command-a-03-2025", + "unsloth/c4ai-command-a-03-2025-bnb-4bit", + ), + "unsloth/aya-vision-32b-unsloth-bnb-4bit" : ( + "unsloth/aya-vision-32b", + "CohereForAI/aya-vision-32b", + "unsloth/aya-vision-32b-bnb-4bit", + ), + "unsloth/aya-vision-8b-unsloth-bnb-4bit" : ( + "unsloth/aya-vision-8b", + "CohereForAI/aya-vision-8b", + "unsloth/aya-vision-8b-bnb-4bit", + ), + "unsloth/granite-vision-3.2-2b-unsloth-bnb-4bit" : ( + "unsloth/granite-vision-3.2-2b", + "ibm-granite/granite-vision-3.2-2b", + "unsloth/granite-vision-3.2-2b-bnb-4bit", + ), + "unsloth/OLMo-2-0325-32B-Instruct-unsloth-bnb-4bit" : ( + "unsloth/OLMo-2-0325-32B-Instruct", + "allenai/OLMo-2-0325-32B-Instruct", + "unsloth/OLMo-2-0325-32B-Instruct-bnb-4bit", + ), } INT_TO_FLOAT_MAPPER = {} diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index 4e158f58b3..e412c3a5a0 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -354,13 +354,28 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): # Check data collator if it's correct! if "data_collator" in call_args and "train_dataset" in call_args: data_collator_check = \ - "if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:\n"\ - " data_collator = DataCollatorForLanguageModeling("\ - "tokenizer = processing_class if 'processing_class' in locals() else tokenizer, mlm = False)\n"\ - "elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:\n"\ - " data_collator = DataCollatorForSeq2Seq("\ - "tokenizer = processing_class if 'processing_class' in locals() else tokenizer)\n" + "__tokenizer = processing_class if 'processing_class' in locals() else tokenizer\n"\ + "from unsloth_zoo.vision_utils import UnslothVisionDataCollator\n"\ + "if not isinstance(data_collator, UnslothVisionDataCollator):\n"\ + " if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:\n"\ + " data_collator = DataCollatorForLanguageModeling(__tokenizer, mlm = False)\n"\ + " elif isinstance(data_collator, DataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:\n"\ + " data_collator = DataCollatorForSeq2Seq(__tokenizer)\n"\ + "else:\n"\ + " if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False\n"\ + " if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''\n"\ + " if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}\n" extra_args += data_collator_check + + # Also check if .pad exists -> if not, and is VLM, then change it! + pad_check = \ + "if not isinstance(data_collator, UnslothVisionDataCollator):\n"\ + " if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):\n"\ + " if isinstance(data_collator, DataCollatorForSeq2Seq):\n"\ + " data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)\n"\ + " else:\n"\ + " data_collator = DataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False)\n" + extra_args += pad_check pass # Check NEFTune diff --git a/unsloth/models/rl_replacements.py b/unsloth/models/rl_replacements.py index 7462d55944..4071ef835a 100644 --- a/unsloth/models/rl_replacements.py +++ b/unsloth/models/rl_replacements.py @@ -207,9 +207,12 @@ def grpo_trainer__get_per_token_logps(function_name, function): if function_name != "_get_per_token_logps": return function def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep): - return None # Unsloth efficient GRPO + if os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0': + return None # Unsloth efficient GRPO + # Otherwise, calculate normally: if not hasattr(self, '_autocast_dtype'): self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16 + if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': self._autocast_dtype = torch.float32 with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype): # We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded logits = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits @@ -229,12 +232,14 @@ def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep) pass RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps) -grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"] -UnslothEfficientGRPO = RL_REPLACEMENTS["UnslothEfficientGRPO"] -grpo_accumulated_loss = RL_REPLACEMENTS["grpo_accumulated_loss"] +grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"] +grpo_compute_loss_slow = RL_REPLACEMENTS["grpo_compute_loss_slow"] +UnslothEfficientGRPO = RL_REPLACEMENTS["UnslothEfficientGRPO"] +grpo_accumulated_loss = RL_REPLACEMENTS["grpo_accumulated_loss"] RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_compute_loss)) RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(UnslothEfficientGRPO)) RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_accumulated_loss)) +RL_PRE_ITEMS["grpo_trainer"].append(grpo_compute_loss_slow) # Edit _get_per_token_logps to handle mixed precision def grpo_trainer_compute_loss(function_name, function): @@ -266,8 +271,8 @@ def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch # per_token_loss = -(per_token_loss - self.beta * per_token_kl) # loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean() input_ids = input_ids[:, -logits_to_keep:] - if False:#per_token_logps is not None: - loss, completion_length, mean_kl = grpo_compute_loss( + if per_token_logps is not None: + loss, completion_length, mean_kl = grpo_compute_loss_slow( ref_per_token_logps, per_token_logps, input_ids, completion_mask, self.beta, advantages, ) else: diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 2ef9d2ee99..31733c2976 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -66,8 +66,17 @@ ] global FORCE_FLOAT32 -FORCE_FLOAT32 = ["gemma3"] +FORCE_FLOAT32 = [ + "gemma3", +] + +global FORCE_EAGER_ATTENTION +FORCE_EAGER_ATTENTION = [ + "pixtral", # Pixtral SDPA not implemented +] +global NUM_LOGITS_TO_KEEP +NUM_LOGITS_TO_KEEP = dict() def unsloth_base_fast_generate( self, @@ -78,21 +87,45 @@ def unsloth_base_fast_generate( dtype = _get_dtype(self.config.torch_dtype) # Check if VLM - is_vlm = ( + is_vlm = any( x.endswith(("ForConditionalGeneration", "ForVisionText2Text")) for x in self.config.architectures ) is_vlm = is_vlm or hasattr(self.config, "vision_config") + arch = self.config.architectures[0] # Remove token_type_ids kwargs.pop("token_type_ids", None) # VLMs do not allow logits_to_keep if not is_vlm: - kwargs["logits_to_keep"] = 1 + global NUM_LOGITS_TO_KEEP + if arch not in NUM_LOGITS_TO_KEEP: + m = self + # Find which is needed ie + # num_logits_to_keep or logits_to_keep + while hasattr(m, "model"): + if hasattr(m, "forward"): + keys = inspect.signature(m.forward).parameters.keys() + if "num_logits_to_keep" in keys: + NUM_LOGITS_TO_KEEP[arch] = "num_logits_to_keep" + break + elif "logits_to_keep" in keys: + NUM_LOGITS_TO_KEEP[arch] = "logits_to_keep" + break + m = m.model + pass + if arch not in NUM_LOGITS_TO_KEEP: + NUM_LOGITS_TO_KEEP[arch] = None + pass + pass + key = NUM_LOGITS_TO_KEEP[arch] + if key is not None and key not in kwargs: + kwargs[key] = 1 else: - kwargs.pop("logits_to_keep", None) - kwargs.pop("num_logits_to_keep", None) + pass + # kwargs.pop("logits_to_keep", None) + # kwargs.pop("num_logits_to_keep", None) # Check pad_token model_eos_token_id = getattr(self.config, "eos_token_id", None) @@ -186,13 +219,27 @@ def from_pretrained( os.environ["UNSLOTH_FORCE_FLOAT32"] = "0" bnb_compute_dtype = dtype for disable_name in FORCE_FLOAT32: - if disable_name.lower() == model_type_arch.lower() and dtype == torch.float16: + if (disable_name.lower() == model_type_arch.lower() or \ + disable_name.lower() in model_name.lower()) and \ + dtype == torch.float16: + print(f"Unsloth: Using float16 precision for {model_type_arch} won't work! Using float32.") os.environ["UNSLOTH_FORCE_FLOAT32"] = "1" bnb_compute_dtype = torch.float32 break pass + global FORCE_EAGER_ATTENTION + attn_implementation = "sdpa" + for disable_name in FORCE_EAGER_ATTENTION: + if (disable_name.lower() == model_type_arch.lower() or \ + disable_name.lower() in model_name.lower()): + + print(f"Unsloth: {model_type_arch} does not support SDPA - switching to eager!") + attn_implementation = "eager" + break + pass + bnb_config = None if full_finetuning and (load_in_4bit or load_in_8bit): print("Unsloth: You selected full finetuning support, but 4bit / 8bit is enabled - disabling LoRA / QLoRA.") @@ -249,7 +296,7 @@ def from_pretrained( # quantization_config = bnb_config, token = token, trust_remote_code = trust_remote_code, - attn_implementation = "sdpa", #[TODO] Pixtral for eg fails + attn_implementation = attn_implementation, **kwargs, ) # Return old flag @@ -263,10 +310,20 @@ def from_pretrained( padding_side = "right", token = token, ) - # Add padding side as well if hasattr(tokenizer, "tokenizer"): - tokenizer.tokenizer.padding_side = "right" - + __tokenizer = tokenizer.tokenizer + # Add padding side as well + __tokenizer.padding_side = "right" + # Check bos, eos, pad, unk tokens + tokens = ["bos_token", "eos_token", "pad_token", "unk_token"] + for token in tokens: + if hasattr(__tokenizer, token) and not hasattr(tokenizer, token): + _args = {"__tokenizer" : __tokenizer, "tokenizer" : tokenizer} + exec(f"tokenizer.{token} = __tokenizer.{token}", _args) + exec(f"tokenizer.{token}_id = __tokenizer.{token}_id", _args) + pass + pass + pass model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) # Fix other stuff like BnB compute data types diff --git a/unsloth/save.py b/unsloth/save.py index d03f47e874..4b2c012985 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2219,6 +2219,10 @@ def unsloth_convert_lora_to_ggml_and_save_locally( from .models.loader_utils import get_model_name from unsloth_zoo.saving_utils import merge_and_overwrite_lora +from unsloth_zoo.llama_cpp import ( + install_llama_cpp, + convert_to_gguf, +) @torch.inference_mode def unsloth_generic_save( From fa6dedf3be1b8f78609477cca8d45661a93700c2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 07:58:57 -0700 Subject: [PATCH 1077/1088] GGUF saving (#2017) * Update rl_replacements.py * Update rl.py * Update rl.py * Update rl.py * Update rl.py * fix an import error (#1767) * fix an import error * Delete .gitignore * Update loader.py * Update save.py --------- Co-authored-by: Daniel Han * SamplingParams * Convert mask to float (#1762) * [Windows Support] Add latest `xformers` wheels to pyproject.toml (#1753) * Add latest xformers * Add a couple of lines to docs * vLLMSamplingParams * Update __init__.py * default num_chunks == -1 * Versioning * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update rl_replacements.py * Update rl_replacements.py * Update pyproject.toml * Update pyproject.toml * Export Model to ollama.com (#1648) * Ollama Export Model to ollama.com Signed-off-by: Jyotin Goel * Check for model_name Signed-off-by: Jyotin Goel * subprocess use instead of requests | added check for ollama server Signed-off-by: Jyotin Goel * create_ollama_model Signed-off-by: Jyotin Goel * create_ollama_model | fix Signed-off-by: Jyotin Goel * Push to Ollama Signed-off-by: Jyotin Goel --------- Signed-off-by: Jyotin Goel * Update cross_entropy_loss.py * torch_cuda_device * Update utils.py * Update utils.py * Update utils.py * device * device * Update loader.py * Update llama.py * Update README.md * Update llama.py * Update llama.py * Update _utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update utils.py * Update utils.py * Update utils.py * Update utils.py * __version__ * Update rl.py * Bug fixes * Bug fixes * Update llama.py * Update _utils.py * _wrap_fast_inference * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * SFT dataset prepare * Update pyproject.toml * Update rl_replacements.py * Update rl_replacements.py * Update rl_replacements.py * Update rl.py * Update llama.py * Update llama.py * Update utils.py * bug fix * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update __init__.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update rl.py * Update rl.py * Update rl.py * Update _utils.py * Update __init__.py * Update _utils.py * Version * versioning * Update _utils.py * Update llama.py * Update llama.py * Bug fixes * FastModel * __doc__ * Update vision.py * Update loader.py * Update loader.py * Update loader.py * version * move use_modelscope to _utils (#1938) * move use_modelscope to _utils * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Don't use revision when loading model_config and is_peft=True (#1949) * More syntax warnings (#1944) * move use_modelscope to _utils * fix * Update _utils.py * Update loader.py --------- Co-authored-by: Daniel Han * Update loader.py * Full finetuning and other fixes * UNSLOTH_ENABLE_FULL_FINETUNING * Update loader.py * Update loader.py * Update loader.py * Update vision.py * Update vision.py * full finetuning * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * max_seq_length * Update rl.py * Update rl.py * Update rl.py * Update pyproject.toml * AutoModelForImageTextToText * Update mapper.py * Update pyproject.toml * Update _utils.py * Update _utils.py * Update _utils.py * Batch samples * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Update _utils.py * Update loader.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Update vision.py * Update mapper.py * Update vision.py * Temporary patches * Update loader.py * model names * Gemma 3 chat template * Bug fixes * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update llama.py * Update llama.py * Update rl.py * Update chat_templates.py * Update chat_templates.py * Update vision.py * Update vision.py * Update vision.py * Update loader.py * Update vision.py * Update vision.py * Revert * Update _utils.py * forced precision * Autocast * Update vision.py * Update vision.py * Update rl.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update rl.py * vLLM fixes * constexpr * Update vision.py * Update vision.py * Update vision.py * Update rl.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update _utils.py * Update _utils.py * Update _utils.py * Update _utils.py * Update save.py * New models * Triton windows update (#1976) * Update pyproject.toml * Update README.md * Update RMS LayerNorm implementation, and list compr. change in chat templates (#1974) * Update RMS LayerNorm implementation with optimizations and testing suite * perf: optimize list comprehension in get_ollama_eos_tokens * Update Zoo * Update llama.py * Update llama.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update vision.py * Update rl_replacements.py * Update vision.py * grpo fix * Update rl_replacements.py * Update vision.py * Update rl_replacements.py * Update vision.py * Update mapper.py * Update vision.py * Update vision.py * Update loader.py * Update vision.py * Update save.py * Update save.py * Update save.py --------- Signed-off-by: Jyotin Goel Co-authored-by: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Co-authored-by: Edd <68678137+Erland366@users.noreply.github.com> Co-authored-by: Ben <6579034+versipellis@users.noreply.github.com> Co-authored-by: Jyotin Goel <120490013+gjyotin305@users.noreply.github.com> Co-authored-by: Kareem <81531392+KareemMusleh@users.noreply.github.com> Co-authored-by: Wilson Wu <140025193+wiwu2390@users.noreply.github.com> Co-authored-by: Akshay Behl <126911424+Captain-T2004@users.noreply.github.com> --- unsloth/models/vision.py | 2 +- unsloth/save.py | 56 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 31733c2976..1404be8b0f 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -485,7 +485,7 @@ def post_patch_model( full_finetuning = os.environ.get("UNSLOTH_ENABLE_FULL_FINETUNING", "0") == "1" float32_mixed_precision = True - if _get_dtype(model.config.torch_dtype) == torch.bfloat16: + if _get_dtype(model.config.torch_dtype) == torch.bfloat16 and full_finetuning: # Use bfloat16 precision for full finetuning float32_mixed_precision = False diff --git a/unsloth/save.py b/unsloth/save.py index 4b2c012985..3e720ceb9b 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2218,12 +2218,60 @@ def unsloth_convert_lora_to_ggml_and_save_locally( from .models.loader_utils import get_model_name -from unsloth_zoo.saving_utils import merge_and_overwrite_lora +from unsloth_zoo.saving_utils import ( + merge_and_overwrite_lora, + prepare_saving, +) from unsloth_zoo.llama_cpp import ( install_llama_cpp, - convert_to_gguf, + convert_to_gguf as _convert_to_gguf, ) +@torch.inference_mode +def save_to_gguf_generic( + model, + save_directory, + quantization_type = "Q8_0", + repo_id = None, + token = None, +): + if token is None and repo_id is not None: token = get_token() + if repo_id is not None and token is None: + raise RuntimeError("Unsloth: Please specify a token for uploading!") + + if not os.path.exists(os.path.join("llama.cpp", "unsloth_convert_hf_to_gguf.py")): + install_llama_cpp(just_clone_repo = True) + pass + + metadata = _convert_to_gguf( + save_directory, + print_output = True, + quantization_type = quantization_type, + ) + if repo_id is not None: + prepare_saving( + model, + repo_id, + push_to_hub = True, + max_shard_size = "50GB", + private = True, + token = token, + ) + + from huggingface_hub import HfApi + api = HfApi(token = token) + api.upload_folder( + folder_path = save_directory, + repo_id = repo_id, + repo_type = "model", + allow_patterns = ["*.gguf"], + private = True, + ) + pass + return metadata +pass + + @torch.inference_mode def unsloth_generic_save( model, @@ -2467,8 +2515,8 @@ def patch_saving_functions(model, vision = False): # Vision only 1 option model.push_to_hub_merged = types.MethodType(unsloth_generic_push_to_hub_merged, model) model.save_pretrained_merged = types.MethodType(unsloth_generic_save_pretrained_merged, model) - model.push_to_hub_gguf = types.MethodType(not_implemented_save, model) - model.save_pretrained_gguf = types.MethodType(not_implemented_save, model) + model.push_to_hub_gguf = types.MethodType(save_to_gguf_generic, model) + model.save_pretrained_gguf = types.MethodType(save_to_gguf_generic, model) pass return model pass From 1101752ee1e4ecae1b33a1bf72d06d79ab9f52b4 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 08:08:43 -0700 Subject: [PATCH 1078/1088] Update save.py --- unsloth/save.py | 1 - 1 file changed, 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index 3e720ceb9b..eeadf0154b 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2265,7 +2265,6 @@ def save_to_gguf_generic( repo_id = repo_id, repo_type = "model", allow_patterns = ["*.gguf"], - private = True, ) pass return metadata From fc6ca8dd1afc618cede19f27df62fc6252a9f0e1 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 08:17:36 -0700 Subject: [PATCH 1079/1088] Update vision.py --- unsloth/models/vision.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 1404be8b0f..219aba8cb6 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -314,15 +314,16 @@ def from_pretrained( __tokenizer = tokenizer.tokenizer # Add padding side as well __tokenizer.padding_side = "right" - # Check bos, eos, pad, unk tokens - tokens = ["bos_token", "eos_token", "pad_token", "unk_token"] - for token in tokens: - if hasattr(__tokenizer, token) and not hasattr(tokenizer, token): - _args = {"__tokenizer" : __tokenizer, "tokenizer" : tokenizer} - exec(f"tokenizer.{token} = __tokenizer.{token}", _args) - exec(f"tokenizer.{token}_id = __tokenizer.{token}_id", _args) - pass - pass + # Check bos, eos, pad tokens + if hasattr(tokenizer, "bos_token"): + tokenizer.bos_token = tokenizer.tokenizer.bos_token + tokenizer.bos_token_id = tokenizer.tokenizer.bos_token_id + if hasattr(tokenizer, "eos_token"): + tokenizer.eos_token = tokenizer.tokenizer.eos_token + tokenizer.eos_token_id = tokenizer.tokenizer.eos_token_id + if hasattr(tokenizer, "pad_token"): + tokenizer.pad_token = tokenizer.tokenizer.pad_token + tokenizer.pad_token_id = tokenizer.tokenizer.pad_token_id pass model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) From 23683e1d3491639f2dfb30422c1ea0da45e22740 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 08:17:49 -0700 Subject: [PATCH 1080/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 06a76b19d8..4ebde13b8c 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.11" +__version__ = "2025.3.12" __all__ = [ "SUPPORTS_BFLOAT16", From 99a591a8623ff2c80eaa7a2ab6915b4c67f44cd5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 08:19:02 -0700 Subject: [PATCH 1081/1088] Update vision.py --- unsloth/models/vision.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/unsloth/models/vision.py b/unsloth/models/vision.py index 219aba8cb6..24015f82fe 100644 --- a/unsloth/models/vision.py +++ b/unsloth/models/vision.py @@ -315,15 +315,15 @@ def from_pretrained( # Add padding side as well __tokenizer.padding_side = "right" # Check bos, eos, pad tokens - if hasattr(tokenizer, "bos_token"): - tokenizer.bos_token = tokenizer.tokenizer.bos_token - tokenizer.bos_token_id = tokenizer.tokenizer.bos_token_id - if hasattr(tokenizer, "eos_token"): - tokenizer.eos_token = tokenizer.tokenizer.eos_token - tokenizer.eos_token_id = tokenizer.tokenizer.eos_token_id - if hasattr(tokenizer, "pad_token"): - tokenizer.pad_token = tokenizer.tokenizer.pad_token - tokenizer.pad_token_id = tokenizer.tokenizer.pad_token_id + if hasattr(__tokenizer, "bos_token"): + tokenizer.bos_token = __tokenizer.bos_token + tokenizer.bos_token_id = __tokenizer.bos_token_id + if hasattr(__tokenizer, "eos_token"): + tokenizer.eos_token = __tokenizer.eos_token + tokenizer.eos_token_id = __tokenizer.eos_token_id + if hasattr(__tokenizer, "pad_token"): + tokenizer.pad_token = __tokenizer.pad_token + tokenizer.pad_token_id = __tokenizer.pad_token_id pass model, tokenizer = patch_tokenizer(model, tokenizer) model = post_patch_loss_function(model) From ae43909f1f010c7a67441399d94ee7e92c9d3ce2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 08:33:33 -0700 Subject: [PATCH 1082/1088] Precision issues --- unsloth/models/_utils.py | 2 +- unsloth/models/rl.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 4ebde13b8c..10ba3530da 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.12" +__version__ = "2025.3.13" __all__ = [ "SUPPORTS_BFLOAT16", diff --git a/unsloth/models/rl.py b/unsloth/models/rl.py index e412c3a5a0..c450ef6df5 100644 --- a/unsloth/models/rl.py +++ b/unsloth/models/rl.py @@ -238,9 +238,8 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"): "use_fp16 = getattr(args, 'fp16', False)\n"\ "force_float32 = False\n"\ "if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':\n"\ - " if use_bf16 or use_fp16:\n"\ - " print('Unsloth: Switching to float32 training since model cannot work with float16')\n"\ - " force_float32 = True\n"\ + " print('Unsloth: Switching to float32 training since model cannot work with float16')\n"\ + " force_float32 = True\n"\ "mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')\n"\ "dtype = getattr(model.config, 'torch_dtype', None)\n"\ "if dtype is None: dtype = model.get_input_embeddings().dtype\n"\ From c62a05bc6b157e7d9def5ecd43883a1b2fb8c470 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 09:54:48 -0700 Subject: [PATCH 1083/1088] Update save.py --- unsloth/save.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/save.py b/unsloth/save.py index eeadf0154b..b8da9c08d0 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -2310,7 +2310,7 @@ def unsloth_generic_save( private = private, token = token, output_dtype = None, - low_disk_space_usage = False, + low_disk_space_usage = True, use_temp_file = False, ) return From aff2cf21250af91ed1a34af2c40b46819989a947 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 10:04:10 -0700 Subject: [PATCH 1084/1088] Update _utils.py --- unsloth/models/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 10ba3530da..69cc1e6884 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2025.3.13" +__version__ = "2025.3.14" __all__ = [ "SUPPORTS_BFLOAT16", From 5174a675a2d15bcb36beb64a030c7bb27310d6ec Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 11:12:02 -0700 Subject: [PATCH 1085/1088] Gemma 3 readme (#2019) * Update README.md * Update README.md * Update README.md --------- Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> --- README.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index e6098cbebb..5365e0b262 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ -### Finetune Llama 3.3, Mistral, Phi-4, Qwen 2.5 & Gemma 2x faster with 80% less memory! +### Finetune Llama 3.3, Gemma 3, Phi-4, Qwen 2.5 & Mistral 2x faster with 80% less VRAM! ![](https://i.ibb.co/sJ7RhGG/image-41.png) @@ -18,26 +18,24 @@ ## ✨ Finetune for Free -All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, Ollama, vLLM or uploaded to Hugging Face. +Notebooks are beginner friendly. Read our [guide](https://docs.unsloth.ai/get-started/fine-tuning-guide). Add your dataset, click "Run All", and export your finetuned model to GGUF, Ollama, vLLM or Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| -| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | | **GRPO (R1 reasoning)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb) | 2x faster | 80% less | +| **Gemma 3 (4B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma_3_(4B).ipynb) | 1.6x faster | 60% less | +| **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | | **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 70% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 50% less | | **Llama 3.1 (8B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-Alpaca.ipynb) | 2x faster | 70% less | -| **Gemma 2 (9B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma2_(9B)-Alpaca.ipynb) | 2x faster | 70% less | | **Qwen 2.5 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_(7B)-Alpaca.ipynb) | 2x faster | 70% less | | **Mistral v0.3 (7B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-Conversational.ipynb) | 2.2x faster | 75% less | | **Ollama** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3_(8B)-Ollama.ipynb) | 1.9x faster | 60% less | | **DPO Zephyr** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Zephyr_(7B)-DPO.ipynb) | 1.9x faster | 50% less | - See [all our notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks) and [all our models](https://docs.unsloth.ai/get-started/all-our-models) -- **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Gemma 2 (9B)](https://www.kaggle.com/code/danielhanchen/kaggle-gemma-7b-unsloth-notebook/), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) -- Run notebooks for [Llama 3.2 conversational](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb), [Llama 3.1 conversational](https://colab.research.google.com/drive/15OyFkGoCImV9dSsewU1wa2JuKB4-mDE_?usp=sharing) and [Mistral v0.3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing) -- This [continued pretraining notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Mistral_v0.3_(7B)-CPT.ipynb) is for learning another language -- Click [here](https://docs.unsloth.ai/) for detailed documentation for Unsloth. +- **Kaggle Notebooks** for [Llama 3.2 Kaggle notebook](https://www.kaggle.com/danielhanchen/kaggle-llama-3-2-1b-3b-unsloth-notebook), [Llama 3.1 (8B)](https://www.kaggle.com/danielhanchen/kaggle-llama-3-1-8b-unsloth-notebook), [Phi-4 (14B)](https://www.kaggle.com/code/danielhanchen/phi-4-finetuning-unsloth-notebook), [Mistral (7B)](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) +- See detailed documentation for Unsloth [here](https://docs.unsloth.ai/). ## ⚡ Quickstart @@ -45,19 +43,21 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and ``` pip install unsloth ``` -For Windows install instructions, see [here](https://github.com/unslothai/unsloth/edit/main/README.md#windows-installation). +For Windows install instructions, see [here](https://docs.unsloth.ai/get-started/installing-+-updating/windows-installation). ## 🦥 Unsloth.ai News -- 📣 NEW! Introducing Long-context [Reasoning (GRPO)](https://unsloth.ai/blog/grpo) in Unsloth. You can now reproduce DeepSeek-R1's "aha" moment with just 5GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! -- 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now! More details: [unsloth.ai/blog/deepseek-r1](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). -- 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft is now supported. We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) +- 📣 NEW! [**EVERYTHING** is now supported](https://unsloth.ai/blog/gemma3#everything) incuding: full finetuning, pretraining, ALL models (Mixtral, MOE, Cohere, Mamba) and all training algorithms (KTO, DoRA) etc. MultiGPU support coming very soon. +- 📣 NEW! **Gemma 3** by Google: [Read Blog](https://unsloth.ai/blog/gemma3). We [uploaded GGUFs, 4-bit models](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Gemma 3 (4B) Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma_3.ipynb) +- 📣 NEW! Introducing Long-context [Reasoning (GRPO)](https://unsloth.ai/blog/grpo) in Unsloth. Train your own reasoning model with just 5GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! +- 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now [with our guide](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). +- 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft: We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. -- 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb), [Qwen 2.5 VL (7B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb) and [Pixtral (12B) 2409](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Pixtral_(12B)-Vision.ipynb)
    Click for more news - + +- 📣 NEW! We worked with Apple to add [Cut Cross Entropy](https://arxiv.org/abs/2411.09009). Unsloth now supports 89K context for Meta's Llama 3.3 (70B) on a 80GB GPU - 13x longer than HF+FA2. For Llama 3.1 (8B), Unsloth enables 342K context, surpassing its native 128K support. - 📣 We found and helped fix a [gradient accumulation bug](https://unsloth.ai/blog/gradient)! Please update Unsloth and transformers. - 📣 Try out [Chat interface](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Unsloth_Studio.ipynb)! - 📣 NEW! Qwen-2.5 including [Coder](https://unsloth.ai/blog/qwen-coder) models are now supported with bugfixes. 14b fits in a Colab GPU! [Qwen 2.5 conversational notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2.5_Coder_(14B)-Conversational.ipynb) @@ -103,7 +103,7 @@ See [here](https://github.com/unslothai/unsloth/edit/main/README.md#advanced-pip You should install the latest version of your GPUs driver. Download drivers here: [NVIDIA GPU Drive](https://www.nvidia.com/Download/index.aspx). 3. **Install Visual Studio C++:** - You will need Visual Studio, with C++ installed. By default, C++ is not installed with [Visual Studio](https://visualstudio.microsoft.com/vs/community/), so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. For more detailed instructions, see [here](https://docs.unsloth.ai/get-started/installing-+-updating). + You will need Visual Studio, with C++ installed. By default, C++ is not installed with [Visual Studio](https://visualstudio.microsoft.com/vs/community/), so make sure you select all of the C++ options. Also select options for Windows 10/11 SDK. For detailed instructions with options, see [here](https://docs.unsloth.ai/get-started/installing-+-updating). 5. **Install CUDA Toolkit:** Follow the instructions to install [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit-archive). From 5df2a0ce2a63a8b206c2e857bb44f4f9247610f5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 14 Mar 2025 22:06:53 -0700 Subject: [PATCH 1086/1088] Update README.md (#2028) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5365e0b262..695e7b6ee9 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Notebooks are beginner friendly. Read our [guide](https://docs.unsloth.ai/get-st | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------|---------|--------|----------| | **GRPO (R1 reasoning)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.1_(8B)-GRPO.ipynb) | 2x faster | 80% less | -| **Gemma 3 (4B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma_3_(4B).ipynb) | 1.6x faster | 60% less | +| **Gemma 3 (4B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma3_(4B).ipynb) | 1.6x faster | 60% less | | **Llama 3.2 (3B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(1B_and_3B)-Conversational.ipynb) | 2x faster | 70% less | | **Phi-4 (14B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) | 2x faster | 70% less | | **Llama 3.2 Vision (11B)** | [▶️ Start for free](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb) | 2x faster | 50% less | From 6f7c8c6d0a63caaa129cc0bc6b845d5d8b9c81e8 Mon Sep 17 00:00:00 2001 From: Michael Han <107991372+shimmyshimmer@users.noreply.github.com> Date: Sat, 15 Mar 2025 17:47:25 -0700 Subject: [PATCH 1087/1088] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 695e7b6ee9..b4be0c7334 100644 --- a/README.md +++ b/README.md @@ -47,10 +47,10 @@ For Windows install instructions, see [here](https://docs.unsloth.ai/get-started ## 🦥 Unsloth.ai News - 📣 NEW! [**EVERYTHING** is now supported](https://unsloth.ai/blog/gemma3#everything) incuding: full finetuning, pretraining, ALL models (Mixtral, MOE, Cohere, Mamba) and all training algorithms (KTO, DoRA) etc. MultiGPU support coming very soon. -- 📣 NEW! **Gemma 3** by Google: [Read Blog](https://unsloth.ai/blog/gemma3). We [uploaded GGUFs, 4-bit models](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Gemma 3 (4B) Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma_3.ipynb) +- 📣 NEW! **Gemma 3** by Google: [Read Blog](https://unsloth.ai/blog/gemma3). We [uploaded GGUFs, 4-bit models](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). - 📣 NEW! Introducing Long-context [Reasoning (GRPO)](https://unsloth.ai/blog/grpo) in Unsloth. Train your own reasoning model with just 5GB VRAM. Transform Llama, Phi, Mistral etc. into reasoning LLMs! - 📣 NEW! [DeepSeek-R1](https://unsloth.ai/blog/deepseek-r1) - the most powerful open reasoning models with Llama & Qwen distillations. Run or fine-tune them now [with our guide](https://unsloth.ai/blog/deepseek-r1). All model uploads: [here](https://huggingface.co/collections/unsloth/deepseek-r1-all-versions-678e1c48f5d2fce87892ace5). -- 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft: We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). Try the [Phi-4 Colab notebook](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Phi_4-Conversational.ipynb) +- 📣 NEW! [Phi-4](https://unsloth.ai/blog/phi4) by Microsoft: We also [fixed bugs](https://unsloth.ai/blog/phi4) in Phi-4 and [uploaded GGUFs, 4-bit](https://huggingface.co/collections/unsloth/phi-4-all-versions-677eecf93784e61afe762afa). - 📣 NEW! [Llama 3.3 (70B)](https://huggingface.co/collections/unsloth/llama-33-all-versions-67535d7d994794b9d7cf5e9f), Meta's latest model is supported. - 📣 Introducing Unsloth [Dynamic 4-bit Quantization](https://unsloth.ai/blog/dynamic-4bit)! We dynamically opt not to quantize certain parameters and this greatly increases accuracy while only using <10% more VRAM than BnB 4-bit. See our collection on [Hugging Face here.](https://huggingface.co/collections/unsloth/unsloth-4-bit-dynamic-quants-67503bb873f89e15276c44e7) - 📣 [Vision models](https://unsloth.ai/blog/vision) now supported! [Llama 3.2 Vision (11B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Llama3.2_(11B)-Vision.ipynb), [Qwen 2.5 VL (7B)](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Qwen2_VL_(7B)-Vision.ipynb) and [Pixtral (12B) 2409](https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Pixtral_(12B)-Vision.ipynb) From fde9509c37068ab2f4d3362f28e98bda8bf54bdc Mon Sep 17 00:00:00 2001 From: Nino Risteski Date: Tue, 18 Mar 2025 10:15:05 +0100 Subject: [PATCH 1088/1088] Optimize get_executable func with list compr --- unsloth/save.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/unsloth/save.py b/unsloth/save.py index b8da9c08d0..c4ec69c7cf 100644 --- a/unsloth/save.py +++ b/unsloth/save.py @@ -933,14 +933,11 @@ def get_executable(executables): # Get system locations (System Path).split(system separator) system_directories = os.environ.get("PATH").split(os.pathsep) - for directory in system_directories: - for executable in executables: - path = os.path.join(directory, executable) - # Check if the executable exists and is executable - if os.path.exists(path) and os.access(path, os.X_OK): return path - pass - pass - return None + found_executables = [os.path.join(path, executable) + for path in system_directories + for executable in executables + if os.path.exists(os.path.join(path, executable)) and os.access(os.path.join(path, executable), os.X_OK)] + return found_executables[0] if found_executables else None pass
  • 9bf z-}))Ea$|6+nA8wctdKcs3x@s=t4`Wk{!oy8!lh-9NC2?bIk93zF!{jB;udb?60UmM z=Z~XQ_6((3Bq9pQ@=L)skeT`YN8}AEC@%XBnIJ*(@WC@=1K_M3?t9rFT3M{4K6;$@ zL=xHY{uV=_rz<386#Hd3hUneD&q&bl*?7onb8jvNysi_p!e(@sDrX5UN~74>kGXas zsXw2lk3A&ceCQ_)Z;LNeABN{DBEoJiMWOE5iPy1lY0dGo5PN_mhhoM-4y)M-Bo>?K z4RB71#&M=`v^b?4D87l09J1+H_IIdWHD$wd3(8sVitPr}|s zhNZqe_uUV5q7s=Hx#g%8)0`^H696;Cq2{FSbQeDr2lU@5U&T1Hih-U7FhdX9G|%og zUR);Z1z^1wj|NuK$C{0rcH8EntSex2TmgB013=;bFpf_oE))9`WJANm=e>A z6vL66jj!dB72kmfnu+x}2ptuYD>L==>_-v3=c7k3y|>=-LgosDNG8=7=3U3Fu3Onp zQYQpT$P1J>%cwNT%qYX@2}(WnS7F)tO(iWsM(6qP_%;zR?_#=b7#uKNy{%-ScX#xF zDm?cg9B3%`&JUstY^J&8`2Jt2e#AIKMkPy$hby&WaCQ}G|Cm3<^finX2uAWjk-(TY zX{}OhcILo7WF->m)H_VY^wb27)}oG-C~#?5VWi_CZb zbQnv&wVn;blH_b>rK#g%g@ZI~hIP+o^#BA<&~HgT25BA#@Fh}KLc36LPlkM`$KSpS zwmC#c2*p3{YN_W2hc+oyu-@~&1Ol(RiWgAE#-WEPsF=G+Ejo%On}EiOSfq-BP~p_P zZHHq*9C9Bsdk~2%={8?{67q~zx|qQuB+UL z@Zt~Bh}+RWDFq_sQDucr9mI?*ofSj);(DBX1vO&x&U^@5p$vin7p+HLYGBFTFKQ&% z>_W$pPu#P8?f>MMtTi59M#o=ROpmXm9_XP4$+3qL`#FQ~&e#eN`tzCq;DY2ZfzNUx zRzP>_g}U4Evr(?EZ9Q89-M!%lq<~&TPlBfx`+z77%Rlv!2sQljiYB7TgFc5an0N)-d@6Rc2!`WVID6S@1K*cP>91 zyc|c!DBD9ixoK6su$- zePvCiY|M1iMj#p^{+0X-ig28?c*;f=+ycG)RZ}?UGomrkP_b2B4GLQi2AgLR+6noj z{H80XUd{fm?4w?A$B?7#u^nTAj)dgcMf=~PTskH=J}i!i`vJ+{Xix-6B`4%B8j2Kz zV$`zxEgR<$vof*}7x4FCS4|E%BkN2*t87^YlGT}mMt!LibTi=+lRBzzRJ#_eQqG_u;)_O6p9ftbMd6$2rUvU787_`ZP!sG03L-s1wRkthIEjr1h4(+HshL5n+lTKMV5tHYU#W4fl4DHmd6RLsiyV zzUhomQ^|@XpT#!$<>1&}bc+AFzrBFl>INP3AHuHp*1a_7C&mkf1}Ll=Ejav{e@H&8 zi+r7%-=pefO+OW72MO^@uK<+|is1G%YHNO=TjfrliQjVNp$~#TSG@86Et)S*I8f zOTc#JPF3E*h>whgTdwTH56($x7(pNJS5=(D$i0B%tKSe!!$CEpEKaIXjLuea1MJJ& zkf1}j$;~gOL?2fv+}RXfNHjW(3{RiPg^#1p+3!*p!fzN2L~?57LZpt3x};+PC%(5? zg{-Wa$dQ7w#MEat3`)w#?)~u&@8sN~f+WUnSFWt3d^WAUt`0U5$SI(iNiLnk%RT$I zEul<0DH%x^;5p`+ye2|cgf2RDJS*P>C1B1A_8A8Xt`VS&7A_m27B2j-De#=-wfq?A znwqrd<+lkF(KXk0vSWBwv^$pojlfuZ;}hDB225aS)GVg_xn#&6?IANoPvc_`r#klz z?qh)wKJ6$AuR|dqCd1Fhg0Xx1ZJO9!H=5eX-=5JDOz5GJ1Ppz4W=Vf(Rar^9xiu=HxNix(!R0tAPi zQ+y}t{@c)>0pG{siWYPxlynH(60$Mqr>23xFB>#Xpipy*_usI?zLE2^4(R4!b?)-S z_(32(H=BgeNCm>n(SoPX@$e!q#4sHNNW>M<#?_CkBlJvQ_&0t-jP6*(Pdma|Nrx)y z=P*$~KAIBL!P(opFB^aR#%M9SH16Tx`Snc+)}fqSB`XkPDY|q^|6@2Hl&|1Bi63Hk z_t8t%{4z2pzj^G`-T(Cy zUTM}b8_{VO>x^%?|B`zfo1$cH{(xn#>>5L!@D-vZT3qB!VK+mPvhD1NiL`^4w2hbk zupJo+EbTCoibZ%D+Fh{{I5Nw19AG}M&~9o9_AtS#_m#C3#cs4t(cNp0Kh*&5#O`|< zXG6##{MXw5dWO3!$6(?GCQ0_*ezr=)cH!d8a5AsRHVfmUR}6}N-!&u08y1Y=B%puj zkDZ3%iHOjs9H?{xd}|Xx&f`9wiqFFH-6+Yw^kA((7=Qp&jSuoZAN*Hg=EWRXG*UZ9 zuPiOwAJ?($CBLrt=$a^IrmpKm&(YxRiTs{$13Fv3`_97fL$7L8L44S1?##?-DFNJu z9$jj#MNZ#P%`L8B$HjPoNad)SKF(Cbk+#b8pd7HiSi6x+qVX=5($+f7o>c?%NG}ta zfO$EUWmRlyNIg7??uld(L(oH3VC9WKLYAMoy?Z109mBuu1VTgnqSVF4E_!YFj}A_g zh2>VAGR#Ol74C(G3$@8&By|qeWKXGdg*JsGEzWp(?5oN(Z9@9Y$qBNX<4*ky)Q=%* zT@&jhg&-i+w3O~$H82D*j&!tdWUq(>_RYll$qs9cCg7qBj{S=KaeV9}x{qyiCAWE{ zQg^-sNpJKL*BJA~J&kTDj{q819$m(=^ixr-Lt%|N*-jG8oRf0-`@>4V*~A#?EbNgq z+%;MOpN7-;%|EcE0!lKHdlg59EslVYyaxiuoE3X~)X+T!?KN5(!w*gt6_n^F%+;?t zb&-@Jd$hf9A)XQI%k-A|xPM?xn{eG=GG1A8g*ho@CnXV$_PlL!iflO&o}%+NVg!*G z?La6Cs*K;n*nJCSv`%Z0Md-JheSnc9fFmijFUg19I#>=gTj{Oj+taKTEdA50R5~-M z=!2W$zyt-C)=m@~GFPglF*E=MB-89wL_!Jelr*re;YYHC7=`8qHA$fitN-|45ws9k zYH6uz5KR4dabO@1`JTJVfV=k=mBW~U@T}qt#UnNkz}SGGayD5oTBgr^qeVoO^rtT* zn{WEMhYT!0X`D%^G3(aKSYGy*Pc3ly&ZCYL*Z0znfv2*(6LvLeLV0JQJx<@ zmTl~JLl3)BIsxdn`<)f@h|twcKQRvfEiYL70RKQzlQMzi-x;r&JP5qnqxdFDKd??m ziQJAFtyeCU7Hjd0ra_Cyg}7N?`T~57Qtr6W_Ybv@J68B}6hh#CNNqb_`s6>%@ikdn zrwc-?xImmQwb`>*@!Kq-!&S$ zxSKu0dG8gLwBM=-SYBt2TcfnQJ9Lk;XQH>xhIKq7b_^UDM1Z1>Ss3GFX(peP6K^JV zVL`+Z>>VNn#~2k_u*5-!8ftK?izP1k@O!{_e`C%2qHKsfmd`)Y1oi6;u=~>ckIOCH zb0IU+v)6{}0KWg7E$xo}yR{p9?1Sw1^2S|FrBMl$&7-qAzoQuh9U<7q;LY)(Lki8j((4Ql^{(LgqX%ww$?UyB3HY84e7}b&aQ_ zvCBg1pin&?*TM<|z8zUlXQm*(ZJ@u>43XbplK@st6eT3}i}0-&tvaiv;r%n3h&qb> zh9djZE-;x7&wBxTDe1@Ae;}#84fACJjM&bT(8=TxB&wpjiNNus3s;D0)I#KbkDQfD zxO9?EAWJO1JZ2=!JwkgLgEZD^bzYm&%0^*u){J}<1}98c95Uu4gTIT8qM2iYT64=3 zNn9u2R($FVs|E=|t07SL!{QkkaUstERsbGj&jsER5xP|dZ{&b0xpC%n;}ZWJH%oOo z^B9!E*MF}ro7{pYaJxorqFiE7iPS7=8cQ6wEupZM+FPeYS(!PH*zW2;%~?1TGfuJ~ zDv}&@q4f8#C-Jj(dQ6HNEkT4sf7)0&P>I}PU(>`ITGa{vsE{rukQB>FUeNE&+0apA zCc~(I@gOtwFZ7in;Ph6G;P#fRDe~#p&UCz25K3839<*=w1}fXV<75Tw?#Q=J5rdAyQ<`IJsjH4#J(6mu?VE-6 z+NySlJF1o|KLPCvTzv+jC~72m6IS2{*uzGzj-wBk;e?Ho{g{ zxOn-EaDfH+*?g_v^_wLQ?Ya_zC7+QjMinKm{*bTS)U$c>GS@}QliRTntA@W92H7l-a08}Z?1EkcWk7e6!*thbSVt}R4K2WK@6z^I{*u* zEg9BzU4(xg3E51lS#W=U%duu`rn}2m7vHyA(Dhc~HjmtIfrj*Go_BLhKOwBwPvwq* zAZ0cB%NL2-FW^4QC=VmB8KgCNN4l4;$$cin#rzc&l2&(W3g*{pWVJ?lAoSv;x)4!P z&iw3)zthAEN;y~v7HNz#3s)_xZJp{WkCB%rZu^U7uvDp7TJ*iMZ4h0EN?X-l-b55d z0GbFIF1yjQu92G*etFLQRi_`38s<;2q_8N_p`#l3Cwn6=Vd&bGi(nbk2P2DSx1-`L z{ksU9mB8OSDedm{S=+b5xx;c}BgBfOdy>)8p1Sw1J4oOJjB^PZrE4SfUD5quvxAST zqpD6fNPPLK5QZ&s#tQ5lAxL&A@T+I{nYfgtmCQ7uRl4`}-^Qyr4ZZ74JE$xSFhllZ zy1(mE0BJ-XvbE~zB!q+mikemv=_FrNU_;RN_c&-$Xm5cFk2LZ(1$)C8Is~2BT9_vu z^1ubSZGpxyKxj~=D-j>G61dGs_9HHL8cR#;&-;J5V8=>{u{owlSFKv<6P~f8*ywWo zu#jrsx*{uVu#0%QahUu;?48LBKf?qU#d4d+9X{~CM!s$BUT#h*YZ{AxSA20zs{b|b zw$o?lN;YeoJZ?d+`iaxW!VekAjz800$d&PBWbP=Vgy+by8tx)06vW7e^cch z8X1CwP~^1GuBn%h_WT&%m8EnZs7@5yQDvo@Xap)1yy|^wVJGv*%C}XNR-GSan4`K( zWmcCV15$R$&(*>*-evf__ZmAj8uy-=FZcL@*fAx3-2IGsZDkQh!ewWMJyryPf&Vdt zK#LI`#wdVJPIkC6MiLH!Masr(9w0)!qF+`Li=dWx&L=0O^>HPHCP$_qP7-HQE}j~F zpIB<^DM(JG4vTY#5DS^U7-fnDOnY9m7~?F1vpjVZe_I9~WCup8E$x?TnTUc6dk-h0 z{CWn4Lm@HPX=vo8-tFrtE9D^9UdIU_w8Md)GZ(_uUZf>68XW@Uz*3a=k3^^KA(J9- zicJiY4ok-jKbd1Smw*F0!$=arUqH8A=z*s|rP0MMNtCCSEX08yEMMi>k&4%HUN;OSQ= z%UKbqPj&twV5H=7u&G@j!VF0P`yhzSyW+}Ic zoTRdc0ZJl31?|`-T?C4B3sz-u69eqzJI@j2Wajgqfm+HVS)m$0kScZ6SNaefB;$Ps zyxDLSL%KcZWRlWgvw2l>E$|F>tj@c@6AdQEee2q5cFlM2^`)^jNa<8$BM@VX@WXEp ze&Tq;SFBOc*s0abNu;UL$`_t$0!?!zOERD8pxmQkF;+6uKvCMoW-rUk1~FyI^e66e zT1PO1OeYpr!-G>_TzZ`3*s#)W1zKrlRRrah$LJbfPPrI0y@S|3Sg>g+sA%D+@c!tD zm8#&)$zO1=^mk-Ndk3I($dT`^2t+_<+E?wi&CeQ~mo^Q|Ebovua)nqqpNWqjpg6>i z_KOsL55nE_D2}I+-v;bvTVT-{!k@ih(tq&RGi>Od4W1%|jJX>Yp(#3QvinOheZpaG zm4=Ja?X#un0{LZhAd`~rr|i#07X_t6qOaeVyu+vz3ij^~MlFf`hvxM#wu(gCu5teq zLipTMp?*QIN%Un{JHU6XUi?T(;YmJZqP(_lWvGl<6$8*_Gcj(z*>xkUwvLbGvXqG6 zg*;mw=8ZX!#S_&Pzq7Jv=O+7&Kp2W~&n7-t4+!4{3yQhrJP#m6t%Z<(5UHqzx}mdy zz=LLRS)EV-ze7=TSEY&AaiCtRXO!uVCwfFV(}<}0;;{{jZ=iL*YYwJ(spQ}|sW{y6 zO1?u2fGL#W)Lf-tVf*pq-Mdr%Xz51aibip7y{8`Ubo9}e;ovZs;p%&MG}y=1xwLK<=?Xbm`O;c^4Scpth0z? zqNs3G!l_CuM0lKIenRYnU!S_saniyv^prbn%-pL45oOiAO@Z$<9v(Bd!-S@p0 zR(rB4U3T&#eFP4E2BJX8n=9XEVzo{P1%d*OJ9|g>N2HZ&vK+9Yvo)J{lu<;@RC05? zgGp%8GX7>z^9Ft86{fHq8sd!x%#2a?nXo*M2&HE! zy9peM_ola0d`T(SS}>Ea8nQH8vkOBAAl2r`VJ6mKS}7MesU3biL3l*U36nrkOpcFb zBp<25%F8%NpU(G;1U=T7>@@yU|9dW;j6*urLxLOYZ@5c}IVNfDpLnUXzVS7>ox?F6 zpQ~j&y45Il=)6K!vvw)YFx~sdVL3(rFUdlTtT}5IJHtY3mbvyap1Y0IvU|oRSwqL~ z%MAjeq^!1ctQgZr_MWEN99(!XP%eT#?j3H)OXqSYQ;`yfAp!F9?^H%|OU)|#IKd;9 ze_F$);E>xDkSso8RB(mlK|db>M2+f$}x7BV8H3nT^Tc%X| z3<#tvKa{=HgsTGFZ;L!_(7D9C0Hw9%u@ zg(2RN5bjsR#FzhkUEZ(V)f+9kZ|bB&Y|#C*E>#sG;GW?sUH;x`(WcSTVZ#`4wm|7b zivW(p17oo%;F!RTL&8ZMnjQ`J$!l4V$1f~~)Qp8^=@N$w1bEdf$39)LcC&3u=UI!V zNP#~Kr&gpBoHDwvTT36}v=x)`WEPCGEub(&Cx*1n8cQ$Ix2R5b7fOzFw=8gJB@AV` zh>&Wd_gzF+?p}jI{!nN%A>;p6@D=&cD$o$jPkdeERiL>$S$T1>NFx@}&waA|-ORyn zP_iC;@#@iTv16oY*PXBh$CVKtbiO4R$mRK$8<*c@0NYkCb<&c)zC6{gp5 z_s%_<*kN&Lzc6LgXuXIA6mfi2`b>92ED6Ni9D5 zAr2sn*kFM|qgK3XKAD+GEL0vRt+4!+52ybRm#f_*0D8J+(R0_eI}@ZL917ZAw(y_; z24&e^nGyNO9H6G4TmpJx#?|Up`T&QzQG|LQ3WVo*u2CcIw$MKPCp;wCH|)=3YCGYL zq-3mdWLrG+XVisft(IEs53Z753%ED|;MYUAX|g_J)3g|M>j zrU9uz_w&oMFR!aU+az=Bk8Pvaiq52jq3y3Fbsf9^eF*BY1S#zt3! zic$pZZ5m4j%{(1prmtk&?)VR(S(w6wnQ;g&_UVjrihzRkHR8e*RYNo=H9Y3N9%qR} ztc`kR?deW7I9)v?8$D6eX)5!g2!3h===xPyU~y8>93*t=zS3m&cCTGRbX(1O*L2cy z;h?zAtH^Tt_T!)KCi?bmdiCIbeGv&5TQepy)^in*0wNgWHSgmGOJAX~JBu-$Q3iy8 z$|n9((As`h}4Ardcj1+i9VvU$>^u#^Nyc9MHCGqMbG z(_Us=)Q{L)O4v0@OvnvRF{K6SKf5fXhETX>N`vHD;(z)8@8yJ`sprEAdjIP6dv*)x zlq_-Wzn#_4xXPmf9KrZU#2ati8Vd@V&k%FV|~RbNxKpN9wScYH3F0a2o5jH)8)(q_2 zp7YOqMY013ttJ$;4~=pDR?Z@mbV!k;B}6B!)d2_zT~C91Z|O4K;F_5IK`kV4f&SGj zLnX41CFF4-vC&a_2NM-n$XP+|&PQG?_b_CN-Hpoz`rl$bvGyLyPKT-CRa_?FMg zaXm^eJem#f`LBt~#~0pIt>Z_9R=l+VfAmPeJ_F=k{oEZMOWTUt7=MZuo~&-jZmT_q zkz11{>8UMu#R@*u)4D|r+UaKb*`3FDSYg|ei3sL6S(TaJ#ERq5#o}aqL1w^}kpVwL z5}ZxEyj%vK_Goq?v)SAlW992U&qnW5+oP@7EHC(ki;_pW9cW_#-3gmsj<;?{v#6o{ zkbXmqnApTGPz`Czcj&2UhU11?-cwVbGU ze+>>_IgIZ60+XVkT63+31+3jdCS`43B=8K(C$x9n9M}KtPx4)*jlslK>Vbcp02Jad zB3z4v3$iL8<^E<8L0$XY&h3f21pRm(>kzf~9wLF`R``+Z8a83TFvgLo_ly<4op&tD z*F=KOOZ&Ov?u+}KI>vBx?#zdoHo|k7?7!WyUtEsad@NIBFF_OYo$qmP^RwZX298yN zF~9amG9z0R14RLEIcg?U3&DQ(|NR5ev3p!lCEuD|CZunZ0E)WG#EU^rEiNl@|7?@9 zvYTas3*;yv{kgTa2>N^s>G(>Ry#a0x$;UO>X9LnC0}1gYABk{-#cn52pnbiqqv}Wc zB!@={|GQt$VF{O6Nr!^3Gl`QK5%X{2QMrKw27H%sKQ?0qeT*awE7NEORdUQ}NB{lXxk@Z4)6Cn2!f2@iFAXAn)fplfI@Bq>&CG!&XZmb+dWCe2NR=*9Bb zND8D{%5g7sweiUI*hqV0<>-*n6mtzn4~{>hu)I^Jq9KIP7SK@C;LyU7>LQ8}R695c zMD_@>w2B!Y$+5M6+CdNl6_)I}UR88BwfUFk#T}DEoSLlK)GC8+0!~^X|D~3WaHO zLxgCr|G-|G+36VCZj_o9qX^kcmvK9?Zo!0t2R>!_YD_GwHbdi`SiORa6oNfZcnp@} z8SuKUC=x^jsgn_}GqCzz*WZAU>&KE493D7PJ~%xX*vh|q?wqh?pAZK2If_b1W9G`A z8y?B_hY9wdv+es*&E_#;h47lGUPjgws<9bff~{W0uLnUG+Scnno(ExAlF5->=v+Z2@(u>BF#1jNhK4rKIEEO`yZyMI0 z%HX8hXy~&rQ^5`-7B$r)f*qO?h9>L?py>x=DLJU8E|X2)u@mLfY+ZAC$gvO$Rio2H z4)jV$vIhi|_C-VHKMMc$ep5+oeid&SdzWWFZS;DIW3vQIBV~45m z{>Kf8smqvuHrGBeTVQhO^>_Y_Xxb0OS~yHk25B0dic5LHizx5Eh!;hwz0*cl0bDAo zi+-plIknqU5@2~ZJ_c>m8Z`u0owa0+ioz$ammV9(CnmA#(RITxq1PsdP{9#}Pd*T< zoX?SGSdEY(3J>E0CnKi$QJ6^Asox1iHhFaS){=k|5_F5KuzVzLN?o%lXL;?Z+UsJk zUaKyBbZ-I9m(w(q31i+R7$xZy20jHzYje#BB=UVSuBP0JKL%Id0pzaO!XuN(e_|2S z-?O6rnBTQuGo21Ww>Ph-zvpN>_PXNE7HCmgx z#$vik$9Duy!@L%o+yHvOhOZ?zc9>%=W`_lYzLK$~x&&isI0}7nC&%wUrBA^_pY3MB zIneA@D*sOj;#VJ*`Z`j-mHUbsn6ko~lfwJ9pHcIJS4)Za>mKg)5#F^;66s%z)6 z9dqRlZ&0PM-`b>HdSI4mhgiC_m|>!x>NxPr?n!iP57%0`T3URX=JO1wV>b+s6`M=LWjjp`U`xt{FM4zWDWZQZQ zEzo;Ev9iQpDLH!-l20<(411+)ENaV~HY)+@X2Sz{Pt0JGQTJhK7^T&b#dT7JR{~?b zNvh1`HCEIGCdGaOVp8U4A!*oNs2q{?0~DAbM-13w-QKAPP}n%sB)iNyP{B@D2!{TpK3bEC=ecEStMFY7fElq)c3B%YQx&yb*s-H zKU&3L83kX}k;J?AX5*oW{ATEqOfQugH!5dLwSK3Qy6fk57xGu?*KUl}SrcFpL~c6c zZaq>&bjB}Cbx=dREbN?Yd?i7~&NxwtK?Jcq47{R2R1y*q{V}+l^C0ZaPZcxOJ9LCo zwHj#+E|*Z0%BUM$r(hhUgBvrswki#zZg-c;kq(7LeCU~1zm4{2xH+46c;Y`qU7yk0*>ijkq(NML^jX7-sbuc5 z@$#RoZUTmlDJ26FfE948*}u60yGTmzX`@)T@8t!1=0G$;+roSwFE2I&nAi-+!L{dE;}VGAsDJHp?&>-<)z)l?4Zt|9ift zpPfV3yKnE7;U8lkhx|hd7Bw3Qz+5@7H&;7PoD!2sMuXEWHOu6qJC{?svTZ?^VeEBV zCLG<29sPMAhZb9HhwAwqmpB6+==juL6B)*acJTMnVr5Su5&J@_ zGNV}^T0`T4YLeURfI*{bLT=%?qPR8Q*$V^vg=-MqE9s!3b>#oj2mhlKES9KX5iGD! zxe%SiAt3~Mq6V^f1}`C+LRsdg^>@g+dqkd>pxY0oNKTcZ#cSBNFU4z3G94_t59lH8 zR|?9L_+xym;aj?5l+RqtYJWE-_hnQ}9z4lv`)Cn5HtqfzerpWyU$Y-3(;tsnsFSJ4 zzwUtJZ9@RH+feDFXevO{+V}mqvzeL<H*hHu1R*BdT6eFCK|Ebc1N*8(# zOWN;fh=xz}iKO3zCnZcujkG2>otd8WQ!6qvGlO^NsLv>tMqXA7rSf!KBFKf&obm@6 zosl`LwuMm?6FwOPJi@Z#tO(wioP#Nzsn9LDNB-;#UEvvSOL_gks$EKE15YgM$7-?D zN@Pcnc@J9>Db_bRKul{E*OJ24(x0Pd=cLZ=?!u%3O%FbIHI|WY3>| zBN?<#aZ?6HcBJ_@?{+ow2b-S@fm-W#oZ6Qom?Lmb({goGw^mui3^DoNK6$JRXyf7gD!Fc{vCNRe;n>n%gp!lE*lT6Mzwni@6 zqvArxEAppbZMmgp|4Z<>x)jHU#Ih%kXKC|lYw%1-%}FW~0pvy}w?Qd_Y&-0rVrOpL zY3jiarg)?jA1j?hC|{jaY-MzKieK*1xy+dPEA^WCRh=Z~Y$g)=J!#Q~n^bd9fG~zT zf{1i`oeDgyg(0iQT{5D&=@JIGh1c$5-8NOze@@eDi)UOFq8qp%qopfS(GtPB}0YK-b-+-e$W?jO}B zJnvhd{J5=4j*Jl7Sz(%Tj+n0{4?X+BL^towG zyFi1UFHEmC|9PP){6BFdYtHZEXDV*#Kkhyp z`;IS|9a|*zR;yr?rZL-#UxZszmMg+J%h@XUZE(`&wPKB>WU&W4AL4Qg{s+#z?6Bd< zcmCjq-o~I{SAoq>t52|=aMQM{qJh>x*q|;+l0>4D-GCgzP9wyR9|-pGpJh71Zsp1H zJ67lKa)_PM0KyY%Ljq^S3Q+XLvb(43Mz+RsU`+-0_UrVx$o$VHJND55O0&!s{4hRd_3~$Bigaw^sMzPQvP+NE-DE8Io8h%fFLU(l;#O$0_CCAtj zA$9pvg{ya&a)KXmi+s|Cr8F4&l7W-@ew+{8C=;xuRfH0J0svR}VQ?QcXMs zH_UQGKk+HZ6wuEf&5e-zKZ!W0goKg_AW*&&5`G{vCk(BkL zL@dAvhL0ST-13C5mMkM1!Y_# zSBZ-&+GJJQ2rWaA<6b(_fmT|Vy4nOZZ0y^38gok_Ih1UKK;UOlBGQIl3|1@4pK-+g zO}puZ*isTfTPNWk8(Inv2nwg<{GCSr3uSqoR3(4;ivE1-`SO%ByC<%`4z}>j6Sh%HJpYSc|)CKJVin{L9U?d zK6Nvj4G~!unWzNX6kce#F?zCzHGV*-QKJuFBiE=N0+IEb>rLd z8E&uGsny0{t_aDn@~W*ErjV!@GFgA)Wqf6FS~XtWPwG=InBE%2Y@R^;wrdWsLF`Gq zS^2&;nZ&EtIK*{BsS4J>zU#kG6NAxbFx(z!VsFGQGwmE4?tpy!0B^J)H>XYKiagLg z60)J|LmiQhy^Z?`=UKJDV8Xm$4tP>cdh4at_rpgaQ!~Dfvq-Bb;kpfA)eAxNSqqFj z1DGhu?~-_lkT6hQ!}eFu!0^c-I!($0R+L77FHnE`^NtT^;m`Vf=K}x}IoQmX?kQ+> zL&cM+BTb&u&B%;H5(sat()7z8M6rWtWg(+OAkeVl-fLWTGYOKFxL^x~*he#R{u0J2 z9Z$=kVLoq>@vrSdE26Pn!6n?ITXN-}x4yR_$25sBS)qs9=e~^4(_k0pt{reuo}4Z^ zQkPxH(+cM(?Q$&>Y@#T%9M7u5JeCV`OefYpdWTy!_;WC#adaNs2witU2+Zvwo9?5 z@g7WW%cx*{pM}Qwp{P`%TwHani{=FPb$~wH12UQDg4)$-L8Qzp)DL$~9~G_}0WCo$ zXjXqj?h~pFhFsCjoH!G#mT3?e6k}I$NanLAtO`{ADeEJ&92oH3G3xpI`8V~%&FNb^ z#nkb8U!+tAZ>lNRk!3VA{y3dv0FUwB4Yop)i;QiP%YK-biReY8*wq#Gxk8_F}fT)_<85(;m4AhIBU-CMgTs7aHPE z9x!7&0pQGiA ztv8ecppR9XC)7M*b7qLT`N=^BebFJ2o+TElPZXzpil~xqLldl;1sH z&Myvz#Pad5LM9W?9~flrMdTOAdH@kGO6hk06(lwt5awq|fWR!ynejP{;!$jpWeNA~ zzAo+@B`bxh30gTw{+h}Nj+gcZYw_8K$tD5gh5`Q^+GuI*YU^{m;iE5gY1(*rPY^+k zNs?^~>9&(=*N9Oj01EOIzY`|7vsoxK;d^Sl*1Cbp$fVpzdD1Jfgdh@E`N9`pg*qP6 z<_sQ-q#6!ciLfqr-%{Qp6gVhXFSLQ<7^eBuy7eTX0C}Q$R=jMuoMNy z#07$N3<&Gsl711>zN8zhjvecPX~bwtxH2|w zfGin*6KrXVXUKgJhjaT=o1WWzH3y#{-b<)4#ZzzbvG8}n&-ULE2Ykds^{u7jJ8$N< z2CS6H--m$-9!2%l4!OyK`*#>5cxm@B24cCkB}WKflAT8m@-w^Ooby}NTDCFooJE1D zdobAeEZ*7|K*<6S2k2(0E1czrqFB1UZ3wAIvJ}9VAlA2{~=% z>q4$GW*C4I(wm<(zk5_XZF1N9C$|A1b|8U9ExX0m>sO96ZF^W@mtIl4cUT_=ZMhkT79H_OI6l5{MOGWWo!ibut?*fV6%-;8IV$Uyg0>4gXN2Syn}@ z*{3K=O)UR0{4};Qb@wy!47ZnN-IRZY9deR}ZLzJCntU>`600Ng#X@DO88Y8vPI&fhqLiRM+||k{_cff@ zBUfB#_iCs8F$j&RZeidil)>$Y`RF|(T2XME8WFHJO7gc5l@KIi`m z1Tfs`bFR>V1ceiNEP5Jf+R93+#}97L*VfHlTF*OlXQywUe!7-vvnQe8>>J8efepWL zhrlyb7Q|@ITSrhfbB2M{`7k|125H zAiYjIMNT|-*eY`q+d^5l!w+s@#l15y)e5}>btgF7nyH0`QYuUUwp22vFDM}pw4d_^ z*nequGS$|(CjK3_!oWvBnUx9c-TL+4q-e2{yYPX5fSM@tpLuLaOS#Nr>*^90SXK_J z8KKleKVKaokns`RiMw32_xJt8D&VYHDh%m!C3EpN%P#ZS0J6xib9OCA|4u|$%Yf14 z%oC8mU%#aUk^b>j%Bc#9$H~bRBSRDxZ|n<%Ds*i9MXQo$Z=n#lB~#GSqe(f&l0%&3 z>hgDWI0(lGx#BSGyfRE1dH>%7d&ygOrNCu>8E0YnwG=S+!CYnx>@^x5t`xV0Q6w2w-oq)lkh2XO0X8 zVqO?|?#DM-^Y`-EvxH0N>iRD}sQHM5fiq;M$t9T7Fslpd)tIIH8TBN}LI>4*8e^aP zZ=t<$L4vGN|K<1sZ%u4_B-#QUVFWVA%{%B?;|CsuJb&1l2)&Uik>|0Y@tx)!n*hL= zfPof{7D`Bncd^?-eXQ>$;ulM%O}&I{HZepACC3`f_2=IN9B>fipTRkWpaC~yy4wvF z$G^pY2aU?AZABQHQLjf!3{Vvqk!EmVH8e!%H3*HSfs*FK=upbU)7e^ zYBy^aR)&)}lv*B0Kkoa@zSKC3CxKJZUuFY2WV$IT1IfZv8{>wfmqa{Jx}v4MzFy-L z`(@r9rS%MEsnw*vvPTDLF3})fjvKIKR28nOi0l1q|+Hl0N$KZVJ#8H*g z&uohFeiZUrBycF|Jq+ZT=0O>OBdu`dt`d^om7*j?0c$lQ&=O#3_o@>$u~I|-eBqsRWv&UYRw~{0dKNF4Ld6SVoYV4iozxNz|u+{5L9>b$r9zNGq z-{nI0(=*KpI#KZ;!@_HY_jzK1nwL2QL$iecmPGUhg30rjqA_Iy?(kfkvSL@ct7rnr z*1ie6(IdmM&e}r>vu>RpFsIXNe;33eOn;x&N;aq(vtoi98t8TNzAbq&u3Xbs!-iyW z1?26{DNid;0>{at>GSY`f&xYYBLfx0ZP4K}JVZq)CVuiWR>foQu`|9)7YWr=;+QPM zG&niA<(z|bj9ZotJ-pK$-J^Q#if%3`voOIFwOz0v6ubnATgi!&BruN19-FktdE#(l zX^F&j(LX_WD5IVVmLa<^c~F(|^b?P~_hXttLZFlGL{E6EpWHRyWKfHqdr|l9&OtF8 z8v4$a3V~qfhXK%{P(%qdWu_F4uA#khCdmZDB`-!Ru0qgtSu)-*Ndp0`)BIODMp7R}6^gqB?hIt$3CqN;h_@;{&VvvPIuuU}n)LRvo8)p77P1X4tw zD;aOLrrg<-Ldy<8XWQ`Y9pb_k?6CoI&)%#fs-UUg16TmR8PFCWtdA{*rID2@FyKO< zLH_QB>(!@f_F#K|UZq^9Yu=XrF`0Ojb`N3+n>Q#dHI01#$mp}|hihDFC(}ORZpizISU}*Q zSYRC}Ak?b^+z%L|2?G0l_%pgua?Pq_SZ54i^P6EjtHbJ1|HUeVhTx5it{|5W(Oy1< zp(Imisj5J%_Z_g7n($N)yZ+v1tPdu@PQ+O-rs#=rKBr9>{?YN$=K zGxvy`93!ApVbCOg4;sd{YXJ%XJ4y^R=PwF2<|A*iAlY7(lTUSP*y;uQ+mm+Go|}qS z0Du*wUi1lxTrG=9K$pR}5Nv=wtE*eE@zU!32GnxHx4XyI(>KCakY>(>BNH?S?i z_R2Vu?Iobk9pgdBtX=E>(P@k8dNqF0j9}UVw8A_h1RQa6lW$&VBiHjieu1k}P~^vE z`=!Gx{{V!XPI`fIx2^yrw5$Xt*i>%FPwxoMji=-jbJC@40hSpgLHs7p9zqkQ+p(Oo%%$FJElaqbHV zwX8ZDU{QMW-Xlwz{7)+$ufA^r1v+&1*nDMOrIVw^Q=xprS5x`S`0C*>l1ts0%73%~ zM6!J&H=WeKKLYoLQ|=IM-zXws#%rehv2Vnytvj4vDMx@cNCN#72GLZ>3v_YoE@s+q z<3$}T{stjQ9k=r)G0{P8xTWVb9&qtrj&qL7_ShLnot^R=Rdt&bYC3Tj_&4j46ek?q zpVbLP-5n1>*6dET(S(#)ttG*dH?*ZDbD#CTca2hCx{(Y7OisD=3q=+;M!>%#SB9w> z<%W@#4&o;x%wo<=Yj!k+t^Y2rU!5|%TK?}Q|6TncmNUdBQ1O9@XoGexy(~4cELaL1 zEqkjCH+8y-=5BA!$;fS(X?x#cI%G4%pM0|I=zFs3V)+m|K-i3OBI zFD?HMROf&p{b_BcDf=lA4CkcUI0at3&V8I+0Z360Pxd4r8>&`Pf0lXXJvz-O_Ltxf zhW9E&aR=Rc!0grPvV9tFkzHIz6&F;qSZjPu4O;6nO6I4nPrUfRm7$`H9=HqN_=899 z!?ZW_gi>8iOD_$agoHdl{zfA^Jj*QW-r8~Ur>C~tvLJa4Z*5NP zq;^3<{La+I}~c-zoUP@iDj@zk$0PH+E3&o4?3 zG;U%G_ihU(QwQ9cW>zsDRAaDiuUmh&@sYHwQ201uK*M$f}R z(0s#4CG2tRpt!TI%&<^b&^L6jwqfwiaV9OPyg`e$XvreOWy5k#xuWJ4#3SJTGijq2 zl1|0i%{ft;Gj#>dS#3Hzq_>4SmcS{EMs-uW&@gguJXn*LieD zH;b`;dMnjLPX`xIM|lQC?J(Endh7~E=~T|D{JKj# z1tRqU&`^10Jk<529@n*HVm_u9SB;~jN2AO;f#oa!Lz ziN0vXk^e8&57yCGCCqhe9=3KRtMTA&G)ivWL~VHkpr>IxP8ns1QWfHH%&n?_Vg|UU z0Wi2ZWm*O8sa}rr^*)0o&V{sn-sU{Y2Nt=MZJzvW%8;^Wqa6coJSSx_qa8aT75JTX zuMpaNjER%>bn{#(9x7iN=WTv&d>PBO34q8ys1JmY0X;aLbb?$D&H^%fu8&oX8LZP; zAxbMK8WM=3EQ+zA3*Bc&)-Ib!6pD$o&~RI7#n_S_sdQg+kk(0b+~%BK+7J*~d*mEZ z7ng#&Jr~}fb@ohd0B6%4=O?@nq`dJ~Iy|9Y2mn<{pT8p%bnu29IGDOYl#~H1(l7rC zu~YpH{!su5<~(<%>|FM{ZRyP4?4$IOIUhM&L(tF54BrG5t1dA9HE-6<@ephtl`sM@MEDos4#xzI?eEoy>F z)kJ!PDAZ$^1n}HO%b!itZT-a+rphCh^YEdf7*Cg>nXAID+%vH_(Jy67QrVui zK~0OV#;Mlv?J~63zqKtyUv5E-}d}mhak(OjXo)3TjV@9RYm;M|6CaFvYm~7)PXkWdgqjzqQ;@+oqbh z+fnvPwBc2r@r`{7I5pkg#UsYH1t~BHOI3J_XwY!sH6hgIl$gn?`0H{Cm7y+2 z{}X<0hUSD-l{t^5{!a(h}%vW`*4n^7T}f~@lpHP6*{JGM_$s4uU-hZLx%WC#ZtKk|UPG!^vJ z@x#Qc>=sXegyB>>G&#mo)Nf6m_E!I|w|LcettIcx*4rUzDcA+djf(g5Sx+}PqIlp9 zje16v7%_AJ=OHj)jGgZ8{REx;#~sl%@AmHZG3p;YJNj)NeGITyhPoQsGSE^d%}YTb zO(<6wnKQ7Co}LkUm{l$CCUjJ8U|DV%>)p#Gjv5}vwsxVZr72=b{kMY?Ij>7qLn5o) z(o(9~ z&Cp?QlCjnCkj_Gs-EYuzNsQEodO$5i3h3Sse8m}d%3~$%3;>jf`O>5Fgon{-Pmb8F zlP_(O@XXoG{Jbw1Y2mi0_2mfvz_02ul#yjG%L}7m_!FSQb^}BFA=cq|>B;vZdi4Vf z1MIxM*^#FyZowR7DnRnbwDTf!b9 zJ1@|y+bi-|$324gGco7P0btVXEh4n9AB2GfKd}o4%v&~=*Mn6h- z^z_kyG|^ri>LoRbAx8!h_+qaL&9$f#Y@`Lvl{M;p!PW3SQ2XPUU|t%ln#n1$fr62d zWe?G&%pNn@7uuT)=k_W>j#S17M$XD^+B_U0P&83hkeCq5bN9$|L`K?`fWoz^-u_kA zkHu8`Dc=)Wth&t)YAIou)cHWOs-0>7A#faZ9r{27oxWi1h`ylHEokfRUD@#sv+5Zk zv6<|MP|>a~_yU(+0jGmwm}GU)!E(YrHCGAy>xR9(Mwr8^kqjM7zWr>Tlw*cAR&j_I z*}8mUpZ!d=Mm3IMCY~J}a5A5@1|>Vp_>wYC4IUGl7T>=+AEJ!};dV}R@b;N4_}gaS z%I=-R;LF~BqA>i6yW)r%68&hGT-@MD<*_Vxzl3DW90@-UCjXWo1-+woCwXIbv*vS` zMyCXm8>i_~ktoH7I@0V%Iy&sAw}6i|AQ z78N}}900I+nhXruCaWf2!Y%|OK4H ze=Z*HOqnx$@U*$pP%t>sT0U&Dv49@o-C!eu_ttroL{<6l!l|=Lf0m3~T*<5^q?TPv zS`s|B=Fk9od3&d+S?S5f5E3TP5Uslx9E}oRQ<&&qLrWexKGlBkS$6(!j9H#W%$BrI zJEi(D^-aHRd3@fh&JvpIi7;DqUq2VdtnDjl-3_Oe z+l?Ll=C%O`Ha!panqedp+G%k_wpc18EZ`C*Hn&{VW_s};K|HqhRn9%>^&lPlt2`iM z$zG;)Kr-{If?nb@b)hO&Y~_}}<&G^|Mms}d88uXBw`P<2@;?J1Z?DUv=FB`I{VkR9 zKNC%}BXV|vRQ?hjFeWnzJ_|5xIQ9tGp7q<1RKKD(mvJsqu_7UE26EwLxoFz^dmzak z0z0ERPt8Y?7|&ZaUIvI1{2Z@Kjj3h?n$$%jfwcEJyMoIA8=poB?bHEP zxr`;uR2hc%SV$-!N{C1N6u1DmL=%H$ap)TTw!ISh73%NFMjOOQBm#!`ezq%7D2mpb zw2TN`AX5r&_F{~(zh6vyh)6dN;@x`$y9Vg?9u6TN9|v-*e|+-jynK&ePW0$hHNZ$S z<^#E?yPS20ski>c_U~itTe#B3S?Lk_IstY$ z$45n$LrJ=zrdq;lB=&x1551O~G)WsaA+A9x0IaKtb^OO@nIY7jy^VAJ;bKx6J;U`;~Hu^V=OkX#;qVB z3%HhT+QH4qD+>qj^{r%lka1s2T4jkM7-Pm;T(QF7?OILae` zBQwXT*Zf=S{7xqcjs-HPLVBGgH^o5$Sr|E`pkIKIvp5du(Vz62p1xHwusaIi351KW z6W_JCvRC_m`!U2B@)F@NPr?mM4g?)#8is@wba7kPf|#;_YKDEs8-pKF^|g|~;5u0z z>BKzV+&bo`jw`bn6;vf2|jiG@Qjn^e7wE0oRS=@;0Xg;5;b0MyuWw!*-U$=v9 zEjQeDTN9{&;-+w&S$QnCwtsIZLVItT9UpPKX&a%5_rHe}CPjvSn!6TF`n&+|9}q zp1`T3B@6A+6YAPE+X+wyPHTbunZGwqy`PwX>2a8=A0k2Y@?X7ri!hv4hU~HKFV{r6*VVN*FO}8!@y#wc zz|3rHZs?=y6eKFTbb#iUVK ziitX+b(%r&uqGK55QXr`sX=`JSb?_c|Dq;?#2#qfc~Z~~q}Xa|AT6Cznijrxmkb`K-*Wg0W=vB~qJcQW%JI=Le{ zdM_swy>c%tOi0grcFqK1Jnbyp0es$We3>@KA^jqu_e|5wY)ojaBQ1|nNvDocGUmDUwg*F|u>fTjpgV@nvzGRwnpMON zO^#TE27rDrpxkQ~yw1M*H= z(~YGj;c$-hT2Z5dLSw`TtQ)X_BPgPZB>P3GJ~GcaZFz8Wt{aletyoT`H~Pr zSmkaz%aWuBx*9Y~Q49yBi=JGBs(s}QAM}v{ zPBa1#&N#imjuFzqi{4_Fx_bSS$%LU855U6zUkcN^SU-qPbH}jIzF2$@s>3iGOjYg4otVvt(@L8h z+vl{$(Xbyg6--^-AY&lYQBhgc^|LZ94G0oQH{CL5IMx}_odCcwj|iw(ad$Un!G?NS z&w8y-m`yFM*cDrQB9vcGvl@EOLa{(yInwJiay7|`w_JF*GG*#JMJxKSL(nvbggo~1q&Y`73OctosuL;kLGQ< zj_R<+D&G_xbMbqxQB=@u3VQ=L6tpHvf4(ab!bF>LNCDbeSQj~DTM`xGUu(Twvd{ID zijzzfLz+_6ltkE`(ve;}t83voOLN%AdTMwLp7Xx55EXDSH4L5`-6hdp2XEUSD^ z&HXIZdAhVmPFfryvT=r1bRvK{cMPu46a*0?GK4jxJe0ZBR*@BG!bWm*nbwky+$ChQ zpqAs`D-UdOb}n29!yd693*XQ$0Z>^!OvgqCtEJ&=6EAXmBkAYDd|4oShKwzTaU}1`iV6#GwsLKY9@NW2oCL(uKNMB{H?0e8S-Z3_K((V_0 zZ7uh8MBLw|l2D{>DZe5t5mjJ0wAAHifY+g+Cjrq`rW%uZ_q3t3Z|9A_Q8>2wA`p>I z(z4wpuyr?BiiO9q#zBby!Q5RHedy&2Z!tf?Fp#A_gX)aqTPSe-53TyYxtDV!m1}x! z&{6N6e~WQ+aAhj?Ne?iJswTtbg!*0-uz~;J&a=HzE0j)FE?Ro@<%9d}H2yK*a#SW* z8;O_I!3bMRr^gb-{t;8Pm3{%fH|S%Wy%24suzIcVq(lO^8Uh=#B6BGAhJZ^%&W~SN zi*Rcxqn8}%3>3mPzS0I%&w!Y-<>-j3X+rhafL52gwD1m8>haVRRhH-p{wy|Rsih@L zJL(I>cz9=uKg{coeEKkutwhGp_YK+9IqN4XG$7Pp2Q(UQjYGOV2#n}9v!{Yc-Cv&> zQ^hZoO}D^1XWjp|U<4B@M(rVJI1wRTdk={YV=M2%vI6 z`Km=Q74Ek8S)BYMAos>cbf#mi&W_c`?Rkb&ispQfI0ukN4kR?q-u>ozbhW`Sq>4(s zWDnT%=8l#b-4HdZp7=vwgE{=ojPoMA<_(f5Um%AnT%c%jKVlN$Nt*C>oz1G`L?W$L zJ#m&H-J#EKp$==I5P%EnFqV>eF$D(k0XVIR>BbsZC0Pp<~2&xV7U z{k}Jn$a!buKzrhu)4GFRU`NW~!L|K{=|&GS<)brw{GBBrPyJhU7#Zf!)82|C}1jXVFIAGsggiATV&jv5Bs9-(NrR?oVT4L#q0G`i~B z=Ow95_cu^ix^Cg??O$`{F5Ay17MMbpUqEcVL}^2&qy4H^sYXz~Um@kkuJtL$lr#C^ z+?&9iFb|~s3h^M)HB%E>H(=M&E#Ff!9}!)?S&FeCFV}QVDNSNv#-~tucoD0kgtENu zlBl<4r#ZrV@Nr_j9HFS>C|pqybon{H7ti_)Q=%R^J&9s}OHuMD4El{LEUg=^&IyOL zUyIe;4REGzX&knf^x9t;Kv5U77UhoXI3eFJDatcg^VW%NwhWaB+qOQ3?m1qcF@Ag7 zxzImNITLO5P$C#@mBV!oWtL-thqS}1Wtb^fp~@2f-b79L$iDy!9|?eB^m^jG2MDBd zg}zP<`abFFNl-Xr0LSO;`Ga^HX=rO`9lrJ>8xl`3gE6^U5dx5E|JjV!oI|DOnv|22^D>Etwu^H>->Af*D@X45HbU>nzQ z?q~Iv_BO3^Fvt(*iyH4(oe&=~{6eZ|3XG73$@JoI!Pyw2*`r{lxpnZTy#LeFxVf{r zw>LmDyy4ZomV<{2Kawzq1T5WE4sj~VfSF!m43LE732c+ID{4GzMRrna-qNOKgk7Tv zRN@NJ4J0{O!dtF0!XZ!LmQ`o07mmlQb;%7Hu0d{G-qAVK9+{hEG{{cp-bRgWC?N^+ zOuM|iW2`E5Nzp^!Z9fSqW*m(in~RE_P|JR`&g!pJWm}pR&p((^2BFBzj;oCr4hbh~ zG9*E|^Aprb%c>4x3;~^;*T}?O<`V20Ia0YXSEdsGDLc25Ght06jL#Bqg?viAEEvnz zVnSGGP#EaQa(brE(0Wx!RndZv-6<*X~EeDWiQ0>YrXFwFIu+OhXm48XG0i zhM$2)OeV>v_hV+pQHV_<_a7|)rDSzRWH^7eHDxrpt3@F|BJI2w&cU-+taqmXEJeC= zwwhC-p`WVO(V;eio|W!!c?d zHWhmEA-W^X%e%8@=-M!j>b{uZ9uc;B-Kx`riWtnqwA15Dlgof)a08idO&NhxO&~%P ziKW*OJQ3{AU!aG_iCNCMKOrGmILYvch3*c%Duu{_Ma{_dZ2~raPzePfAZb-uKFu29 zB$cl2CJMTOHZbZ+8)Ze7@H1P}^9E8iNFVH=c?}@FPRi~JwN`m0$bhp_wbt3mF9OZ)iwEGj+;yofG$3wzLIHf zsPhJv28TRl<1pK2gq7x}FQ@~iu(xYi?LVuHF-QUH?oMlfkDmbx&_23`Sk%prtp>-- zc08E7#7==+(O}nkDA@dqoy;|7SYv$_2o6O7=13gFzrF$jQzaM>=;d$^0AP%U8BMOU z$qeJB4<3-$;35xYIGV%f8h}R0rg#-e;9_9w0gML(>y4uXPp{UgCc`Gn6SNDblthr< zpYp%dG$fy>5(ZBcGFytZCg1Rd(ejXzJCA04J6s(by(Bk&gwJ^-WO)tb(QuPYKDu>#ilA4O|egMhThrzH#e2= z6Y=>$Zrk1Hkn3LANM6MP8LU4w|3AP)fhwTSU*ODI$XIv;%J}{wFw_Szxym3;*fCu( zi1Uj>J)QGAt&Lk23Z_1jb8`&#=I$6S!GAgo@L&)3U;yyW-e1W-7T~PNtzNgPxRtAh zkS^Z?DFPr#A3t1;e$^_e*|lNi9ZJ`h+7&YHdEo6>b;j_!TQ*=1O5r%`M0rh$Gl$F@Thuv5)8b78r!?F3#1stw1e<|#Hy(A9&Jck`emVrH$9cWj@QnCys1SDv zd6df{o~cdBIrl&@gL#y^n;Z>5=P) zz1a(@67g0#72aQl7_2MrG_=<}3}on`M;73`Hz2him*G;8drmDSI-W$(l7W1;@VCAA zu2@z6uKvnCPC}gGqBPhT0F(SCP2$L zAbC+Lul*pee!k3x(tl}es5jR|bCgBVPtd!!{U42A;23m&Eg_XbI(7`C4Aw4LoJA&l zCQ$|bdi9WP&xs3(%W_9nTx)B0f39G+lPMJ?CRxwxz0~FoU+vc7(h&*cTPe8K9W_m* zZc9YDvZ@y{N=wlbRr=FXBxgmcNJnDZ!`8Zy5(`@Tc=`LgmHLl|Z2}$LXy%N;M{EY{ z#O`ewyijA!Sm@AOJ@_(!wa$#Ne;rd6E1B*+lL@r6w7fKP3pG=5x_vg;H$lj9<-;DXMsFy^&(F%e^Hp6*k!6xi4qM}p0nBsi07uHUutX}L8Fa-fDE zUyFLf3i90=1-&=tRgH0I228cr?{ho^|>{WmhImq~g#g>`BqyKAfTuOo>q8MW|yyY|8aS+W6i%vFPqHfQv z_zg`zV_1fi24(>%C~NPUnC8L!#F9BF1J;+(S7^d*evJU@0+eqwkgA-Vz#rG;`G(ly z){?#BP!B6b`Hfd%aiFKlaYRP~O~J0%={+bI@a{bMN3hxuj!}s@LT`c?==@>J=^+)d zV9s(-q>q9dV+#O;}Ku+D6J;FiUGRUoiE*+wT$T2 zJ0V<97t_2YJ6tR3S8FW`v}c__#=?W>=$6Ei!{=>!W=7#aLWHek;4pBn>@cmTUY7&7 zJ`0R?sV33`$2?KcztdLSC+l3$DIA-iu3P6EeCErJ~ z)OeVt{DeauOg_sXab3`>CIpla!lWO?!=MDXqj{Mq*ttu{#y5i+6at;4khQkTDF}}{ zs!Ln~DEI@(Qp$sn)SvoaMor5Xf-XiVhj1qeSFzozSpJB+E1wlwkGp zgmTv<5xpmWs+)xFZZ&Q)gET0d(sd@V6(Ta-tS)x9byqo5td!5zd<9 zEVquRC)I445^XQ!eQS=P8}!N_upOVWJ^X64AWH*nq)cIBB(@C#u0+fwe0J(g7VWti zOC>bWkq>vSJYf#PH^P3s*IWBX6KRaG?V%mBbb8tcLlbDfQgxo#0R&sR?b}A0z(@@9 z=#OD5ek(-AxeiMjtD^`9G>b2&X*E=N%M~LHOc7WUOXTe71i2{`RQg;M!vi%`6IkGB z!w zf!xbJmcsITnK58T^%*3;O3kff9YCXrJ00Hjs*l$>-g{0ps-^f9>3^2Sd7p*=#y;2D zPIB^Vl4CtX6TDhz?zH&j>SANuGWutUOOf%@S*&u6cr&3t|v`T!-7<52>VbCDAp7-F6u7jEB(WKwVf&)-6ZUr`OPZuhDRartpE z@rK)5LsijuH;39-c2D)^^XGk@Vs|e0{h-#&SXIicrbNbi8nGgnurryz$5L} z9=&&n0d5_(g?6nN={?~|FgkV8Xn#?~{Yeb{lIQ%a2J=BiGRr_TiL`oXh6-Y>G=(^M zuoGq<(kmed?f)R_oT4*}x-}i!wrx}@wr$(4*tTukwrwXBJE_=CzPQufYMpx>M zlSUoSO|&{?&SDd26U*KQ-8AGp=GqovQk*ZGsvDWx*`K|)$yaM?Gp*ok&9`iHn)Zoo zBsqPjH)I)j?|2I%380-5l};!4&+zpx*qHCpQBeq9-TegImpUTj(;|kJ7?;nv_5dI3NY?H=aNSr9U$}O9t^Jl*BoSv8|ro+cdQ^}$ucbqww;gUkyMG_c$fQH z3_q;Gd#{ns+ilhgQ3-7tSvngVa9C@6P9+2D#zb*dYZ~>8!gnT>*5$pb;NR}}U}+$= z{i(+kQN1SsE|nd$gPlpAC_M_{B2{!{&n^o9*c;Txf4t;)vMbku9vRa~V3mQt9E68C zka!CQFzdrf;_Rp^s-jq>nR=jt{U-m)^IzNPoI%yvEu_4lQL0pUuMJG_a(`Thwp?)k zN<&B0OGd7qBnk`+hqpk0Hl!|i(`h$iTc!d?-P}Qoawc_%EA|KL6otY0zN54<6iofQ zqF-oOAhLuYNNg6M5&UOb<^Jk?4Y2cq@Bt0;^&((Rd_lX6886FTD${8TG_TBzU z-EQC5@)KL9{)dwH!!qB|6{ekopIh7uAwSEV;8d}3>4|q1iMh@>Ya+(^GuxNJrQ4jb zn2C@l(%Xqa{9Z#WF5%@Vn3->nu-NXNE zHq~Z%S4~9jP&qk(EXNps^!D}j^pc^hNL9a)tt-D z#a($C=bD@FS}qVr!!*M$c~*pt)Arc`eJP%P069$j$;h5|c6!IUvg#bXUdRbzQ5hKU z{`6+qleNqU3*q6HM$E=7i6T-^DP$E>y##lX%LTW*9qyT?Y}Ci2&m3X9pc`?|7ImXfSeT}@{(yo z3Lr+~*I!jI4)im6b)krNqWmCvI6(uGdqs5;YXU1i2;4soa0)077#Qa$%jfU2RD57; zB*Z1WwReijawD|w_5wN%H(R~!$R*8c)L|@+Uu%q9%I9vxDAo+-`gJYVfkoCpRKWtB z@7GdRML0dh?cM#3!}ylA*3%stC87f-Q45MQ`E8LA-0%RdJCxvJxpDzFJ2jtg(~3vG z+9vkfq^B|_6EaP@86`%u)k(=HH{4NOWsDx;&Bp4BxHLW-!e~j;0+&ueb#d+Vb-m8TtUh`FZKCF|_ z1y_QjriuYmg1fI{{L)Qmhs^oj4w2nI|Nki8G0YG+iXTOQ{-spD`_lu+fE;f{Zs1MK zt>*$&Kocl3lzVstArc*(IfA#|jJ`N|SyZEQ?99w91LM$P&=HSmo3CDW$(zTBPYx?I zAqDFI^vDE{3I7@y@n!7=7}IVizw6ZD=9&{JeH^v(?%4irp7=4wSB{1~y!aAp40+7s zP|k#e#f(I7d}UHvy=$#){N<@vdvhKZh9n>cq4Zdx3mM$;=*SX$`J6$9R493=p*nz5 zRt9S1(EiuH;0~*Zc&LV}loxIe|1=1Ji7< zInx4-_1>I%%ijtll;8Ip`lj#}aa)b}*b8o^aP3A(g88JHp7URni3WNYI+v~=c2}gm zfz=&rttQ}4%Fx22Twk>DUmYGLx!yX!hgYW#uf^~7VL#ZE-}rr%}wo( z=c?WPqbO^N46T!e5DYIEZ=WX+{m*M!@0Wehuk%&EffaJlmdwSw-Cxx*EC?a1Iw|<{veo^21ejhitQGO5LUs>PJyL*`Wy`!6Lb?%#r z93UaQgrmdHf_N<^*7?#DLfuv+T@OT zs8s~Qu{dvphefu`*T@&>nIj^dv~k(c$J9?CYXioc zzpsPtoits6IiBMPRMVBuR_z}COBu-v%t_nuC#F~hSd|f+pH%3hd0oDZ4)SWkaEq4QjbZLUg2QB<<7$oKH;x_#?KrC?l&*GL#yc7V}kreCl|F8K$u+wjBof{ zljl;8xy3 zzrrV?Qj(ogwo|mkEn`KL*g|wT+$Ug^|HZ}@a9ZBB{$aLT-6IFJA?M*(%B^*K(u$GX z4OW(-zVwA1=NFY#@j4pcUE<+Z>#DK<*+=+Fkkn? z+`K>UAfK4*oL8P3RYgYE#iw|%&PAXO#o{Ad`&iRorH^aQf7+&!nBNDoOch%OTpKP9 z>`CVC8+C|u(0Ob_&_>dBv~m)6pmY#YD)n8U@@Ur8Q<6LRi0ms>H=+AKU)aOUu&wGi z{hvcWNUALSoZ7k}Bae6GXiBwyjOG8#jY6vX72Pxg-l9{Ezse9-1ulXKnkw7z zmkppEp^W4Ct>sHrvT$z&!K?UT1KZz4id-8U;HE;81MAR`?q!Jr19>H&M@YJEOJuAf zP$Xa1TWdWPU`!1(fLyQdueE`wwWEgyXai0q&8TW^37%ndR|?uAeYp3Kml;D#4Bn} zFSZKs3#}*tg2OpGbZe~_FH|4zw?%IIEo*q|RUZrIo@;n^=}ftn?#X0Gc*Ue$!#Ei;PQvxH zAasHvDbPP;zY2sdMWNDi)`T^qOv zzjkj29$s$EzYp^E*?kYFeGlArL_1~`YiP>p!bk#%$>&)GH>tL4v_J0nzfnKJzHj~Z zd%jP?EcB_Ld)0+UP=Hn~DQG&01Ab$p91QTL?AhBW5=Qio7c7E6&q#A60U3r*UQO|} z@Uc>TVB?+}i;b$}V-<-JoTlScfLj7+CfOI~$v3}*RDQnst>tqEuWb|o>o=)#hN z^7o#4K|>*|@@2~DGq>@>jNHN?#U0B*H1pedVGurD^34@NU|9DA9KWvA2R8gj9`y-d32Loz@Ls&IHYJTgkLt_u%_l$(S*621$XBzH(mTRD@tHuoNq zgknFDLH-lopr|_O#$GBK(sLMtENQ4+TwKb8pM+|1r&21AJHcFdWV*6fD5a62bxe)n zFvCTVSLV1F$ zXgQ#>-?T-b0QE9Xyt}mysQPOmoB#jDr2qM}YZn1xEJZ{0v_6nEr-9rJ{k4s#DVgK7 zC!TNK%t0iOh>jix>?*G}O23iCJG~9;>L<{|-S5nsg!(7Hme+Hh1o0*EiWUqxs1P|OPaj({F7CvDCr`p%7Y|#y9a$dz$HF} zZ9n?@CI+Qe{yt)SE5L6O#;zz!j(F1?~to2fjx%qx(tB{opu$vsHIR4-1|` zmE{=wob){p|8N(w<8h2D-I|r6_@ox04}7CM;U-^PnVe-*POp#md=%N6eWnhHZ86uK ztJP?KjgjW|c}K9!d0D`c z5GMXeWZ?{BVJ|2y>f|==bn6dTeuEe!%x9R9mrwYIdb2P6?Z^$u64+%RPpM86d|+`+>`%a_6y?wO4=PM za{Dbo6IQ$=5gFVS_4;W!i6ImvRG@Y2et*fl^EuiD@bf#VyX*Uy`-bT1B8~M4;gi?6 z8gioM+`h}iYOTlF^6%Ql*UmUvCszWQqdr}Ai;_O>h;+u$2 zc@B61^yRcGRou-`T1B1Bp-wfY+x^04l{5QA1oq>5a+O~gc&qH(3ZJ`&wI&pa$lqsf z5G8CE#M-Q|jSOGKXmz?YfXCGi8c(0fq|vXHIRs05=^e&_sEj|(gS(YO5`RG?Ril`~ zydi#PW8X+P=z-i=Kf29L?oVcbeSo(=#&d-{Tg|MedQ1kIuf@2~!_{Bxn7i03k<#`E z4bxySQ_|}z6|LU_a-C~+B}|EM>JoKn1$+p8qD8LPAezLX9lS997o=PISS@{uN7HZ_ zd&11PHK^}&9I#MmV$q4?S(eZ!WH2(;fUSl1>}Ju*4d)V9fVnQFcszce+!Te9DLjsl zbkDT`+}bu?30LdNG~sWP^l#w_ohBz=LfJxT&jo`f$%{=1K4bRI8rK0y@(f50{jV|7|PSZxA5eB|SbDFhUDtDU~%Sj+PrWmMMnr4Pf4Oqls zvIaq|M%=doV`-`!6%+pH-l5S)g$UC@p%OI~GC;+4{~puR}F*eKU#oYJDa;<#4VJ`oceQN zf^yIZ;=NQq$aUEcdmE0dEa*r@X~WNF&v0({?)SZA4zo1^sWBjc;si=uIgzWBkPRpk z);nOiT*RLEYDh8{6gRPA;VpQFuruSeso6QHvfMg;-khWzi3PcrB&OSNj>8X5r(S;_ z3vtGSL1I`cQot0%gu_^9iZ53LjrCMQDm0*)Hu<>U`aE@++1*(MHO7;Hn7 z?-N`NI*e+G&Y$7o<`XwyA zD+qK=WJSjXUGs7{1|8u51L#D$=I5+5IYTiAM)yvk!vmIsK$YgoBNhpcox{yHuCAU& zwKA#^+c_2id0?P`O}h^70}S{Lz7u}<0n+@QclYDIChQD-DjIZ>S|$mg$?9<7W`job{bExOhjS?p!P)6|~hVYScmFC-j zw6$5m4s21xW2-TrO|E11;_fi{9YG_|U#vdSqiMeW;$8})SHjs$Hde+>4-)oNEXcRP zy^w4`gn(WYAE4I$MMEbiQIJ0hck)+KT$m`Ew}enAcWC<1_LgeNU+4ri zTq(A;X4xv>AnFS6!=&)otW?|%i2$)t5%?Tp99w+~cCvT=YjmnVNN?4;RrCmz*$nNx zs&y@VnJ+3TD1)4UNwJ0>AxdBxJOP9zsR8gI%RbHIq^cW?e@Bf14m&`&jy*;T>ZVg@ zV>|yspz<`q*1(o>`hCQDDnmMfGQs{A$9D1G^)Z6(Vk7)^J)!+&Y!Mj z=m|CCCP|9NY%uenYv^|6P!@mBuN)k>Bn)b5f*j$Bh+&s|r_6d5vN;J}qkM#=Vr8cl zoU4lxX$Q?31(VDsE;Q4C$Hbe&1>XwC%Xb_}b9?QZyYpA+CDDkBDp)d1Ts^x*X<_(n zMoi7nVlprKkYz`3mxB2f_^K>wn;g1c#|=mm{*+DTvM`-0D)Z8k544y#Yr>HX z*8di!KDlU)#Mz_MtKy8vd|CXGDxttHK5*!vYoGz7ajf~5>XXg-46`Jp(70m=b zg;(cLF%7Rubbes{YAf;!b!PJf5f*LV4f>AkDuiD8i-%;{(@27TI~uO4bc}M%Ln)wu zMl9U?1hxIpji0R?P~m}CT94R`53U`-fzLa6ApAA_FY?Y0WZY-1RmvOF>-4Y7fh!c% z?<+0wEk47lXvm(#g+EmOecj*Xo6Ok8hHU>>YDUVjpoSDD-eVFk;ZJl_C=@8U2_5Kr zZ`+7gIu%m8?Mt~;9(uatMwNpn zextO;HBy~KB=Xga(CaO~Q4`yb+b z2i_0P?-N;>xFCGcw2MOkP**h*&2#Ani2~M~pqHoUc0OZ`u3VLj;dHEMp1EK(36MX| zops_D_!P3t5Z~4p5mON{n)Sg(QRIS?N1q$aUy+6|cVizv%Np;fns01Xk(DmZwOMhCAabZF}{d{W7L<-t>GC$EVAZ2~!L zVx67ywL^oncXD$1`uaY9BR~Grtl4_?_@LJ4fE~ogl(;C*z`nAsR*we<52cg{cWzPC z#Te}AIZAe_Mj!(b#%xw#0VNXy2@avdC@UMs4cQ+_cX4c8*>8T? zey7YjKOypq&FdZVKw6U_&v6w0d#KPeb2|_ZzgNF#HY{++Ae9hGvuEN#TWd{ zYoMVHBTW5)B!k+H<4o)S9B*7|nWZMXz}NjPD>=?W|C~Td>?+r0!q-ZV{jy0o|NO1t z^Bi9kw(sK3|GE>`=>DT{C?9V_L&pH!UMOb~(t5VA!h0?pR*TP}9>|*+V@lNGgx@F% zElsd{h&`QWx%Ps!6KMqX`L)$i$peMj-B{y%wK(K;eidUgr#ldY9Ogg)qBGXc$UjfP znM83&ZPR|yWo|Fp6oLZ2fKdI9NII~V`?i%L*MLajk3mX*GwL=iG0}xlcCN-*JCr+3 zo(D4bB`<~Kil&w?7AviHSk;dS&SaK|=FwF(7?DUblpA(Jk`GrKuhWfpI?uw{POoT24vLcGno|D1d2QUlWZ>Izblt{&gabNLE$P*P z5Q?aY+h}>#uF)u59bmz)ueq{^<-R=Y6;@jsY@eSdkioCFkE|cNw&UYU3tS2MJFq1w z?gqKDKP1z6%SLkJCp7nFpZVUnnVfQ*<7s`&L1jf6)EUVkI`$RCFo{109sn!{#RBXlq>%!7O- z;~#RX8r4n6SvS04ySiZs`Ha)!NF@D?t3vX?{x~}|su3sSsqaK!DN1JFYf<^x@W~g4 zou=5c(r<`(#5wr(fI38>vmSr>vdQ<^lDN$2;vf7m)L|Nl_flK_pi$2J$BiBD6J&vD}Oh>>C|Bsa{b{>_81Y7 z5YDrOKodDO2R|XYLWFewjIB${n!%Y;B|pbk;%-10yKSi_G=r`9uK_UgVyddS((%UO z`bzp#&=tvmbVDT=avLj``zjO;@u&!1*e30`Ob0Poo?vS%7XYNd0X!Y^B-nO%mK6aG z5fIZw`p54XMOM9k&lrg&6%)PkXRF32C~69Jk3@9)kz2p)FXEBF*X&zvwy#`b>DFi*+oEJdkdp&P1QssnO^m1nNSd`aet&7J zIwX6nSIm>u905%dCKN)U$|A#vpTR)q!dv)&p<$r=)&$KP!X7jfMc6sSTs?5h)qKSk|7dmrN_fG zJ9t-@n5r2NYG6_kF2EInKNq0_FAX@B%&PIT9MC@(I8#L0>o}hG%gxhI#tU8=9Ftwy zdrgfLU^Re>1<6ql-rP-YJuiFL}9e2n;ksbeDbOf7z^zCzx ze$FCy^bjx*<5Y1+Mr<6W$1JQyu<<^Nx+f0?9%o9AKgDwt%&8Z5z(i&2-e(2U1$Ui^ z_SCBp<5#TmyROs!QtfXr8dX!Vv<{ z`_gms?|i)r@G9>Y@brceni)`qD|MR|gs zWIRu4GGgKQNKq=3m#dJJ##5Xb&&lXf6l&FJ!QG+O38_cO?|L42JbDBc)uQOJpLS8F zh;ZOW$lgyD^0d7d)Yf54r7CIvvKLe{(mFi}AQ_SFqO7CR@DgCh;~>f~hiEinfnn2R zb0kBGAryAzI04^Eiw|ybE5c7|<5Nzu{yK~0+mD?6x?Dy#)bDU$aQ^ZuVvsZw0x6zQ zW4WuORpasi;hRBirI9}UGK${{RP1!I=-%Hz{j%pH zFt(KU6LzR^4S?;-Iqz1mP}1QgU%%=MJB(_V$nkr3}gzD!PwKAF<6|ae}JOs zBxf_2Y?q8=dC;#fbIw|jeeXg6wW`F$@JpY?u$I${c0%dYxmzy@g|z6BC9`jQMNV=G z8u%k3>9Shz#r~{6*h$VRm`1Qvq99%X9;da1!&~E^y{QWECq<~pobc&=Molm@`HW-{P2ld8Cu+@!0rUlU*=Iad$3xWA(`xGzlTwiR55r(r+&QWCl?Ddm z0688Arq5?e9KPuVRIJt$$erYV&dEv(YY0Usw34}^FlEesSafGN)XhL@7-Yfd`n(!Q zw`xcWo_?oMUT`MkzW_d7t}+_fIy-|4BHp(Q>G{h0fb81{{0XdJoCkzQzysOo+UTri z-Cub0NJ`q+bDrY=-DN{ak0~nKYFWvH!r>V<@KsGNh)*)BOO)B@n z*@zYt&x4xL(WhAY7RsCsf>YRcr$!uQ^6{SCkkPmL_NEmlw;Lxgon|gNyJjMs2$Z~= z0a)3OJ4yrUln=DJN+O`}WQt_X{2rggtGM#4)S6<9DDpR?6&wy(Zepv#%Hfe%TjKg) z4j%De8F)r{PUJhEL;WzofLMEXlqrA{L$H}OJ47(7J4PdAvwzRZ8|UO`ttm+VDF;Co z#LUCV+Ml*^j1>1eby(+5|2X~b{(bl1qAFU1fs;?$5MNm0C$Q!-d*Ayv4u01b#F~@` zbuT5cW9^`Vn~;xvPI2!uP5c8f4jzz#JC05cC=hDFRPvQ1vjYv)+$9A#IY^3O50-ER zOA;I1^o#7v_tJ`nnwwj%-Ose2BJ&)Q1Eg~W{RD~J6fq@3LTD`33(A0637567Yj&^3 z+-~1%_uJ>As}CUU{eGvsb&249VijV%ZEw*08zxLRtd#iXJ-aIn6+up3Ebte?rF;UZ zPXwD>Lz&6QrGGt~?qAW-^Jwys% z)Ezr(Gzo9ZLmo_Q*P*SQjUHlhpgt$s9LA9yV{Ax~gf!4L`4jCgajxHD&OMOyuh*HZ zxSgTNpv9s(0zeKw%H58z>#wB97BGyE`3J_X>$)+?^BQNFCij=dZ8 z$>M>6W~wOid!>fP=yBLt8;W-_BXuwG8T?`)0~BG9#f;?VYVE-$!%-8ZN&S1SE2oC^ zHwBZFHIiJ>n6tfcjg5`{FmL)&)o>*(=Om%8*#bR-RA*&lzl81sTpwx6EI`Bdw zl05I@u+1i85D$-r&rib4wMZ(>njD4VLGof0`rI_N&{DjXe;FUTtE`5d=pE5(%fVjN zilN;|*aUHPmAcHHb%fJ@F3sQfUy9eC0OgmRqBKtk8YMUO$05uB)zB~|Mg#)%nE(|F zIu7XGnvm$c3u_x7EALGqCy68QUT_52QFhQ%#3d{;1CuyN1PVvfu9ZW9 z7<4F!AfP&(iWLb+>)VZ;Sg(-a5V-!Nn1cHV3@r2Gt5}<>9D>QLivw{FnOVZ1_sO6m zCvY|(hT|dVnXQ z;)OGYxaty``FL+VE4kVME>0O49~#>F;1%o|rj-Bc#cOMOyMRVIeW2=B_DIwwZtH~m zJ%BlqPIT1p1Vu&up3u#+1iyiGF)vE<+rz zkxo$WZQt^dHDdaJZkrztjaV17%=HI=(Xi@V3%U1A=ZqLd-1vMW<~RC#g>$ zk=Mr@Y&(z^yB`!=9&yk49e@`NCc2kvB9oOre=7ou?7zZJ7JnZO1;-kW9>d0~zsr#| zF{kB>&vW_G&A=t6a-DX`>u&T%<@KZUyr5&rgN+inL)IrA$*V8qjZWm_LdBu4(t*3r zqC}+o{eiWG&6&->p(B*xmpS{q#zoCXj)3<_i^|gk7@fn2R~$p7iLj>S28rX zk#P2S;F?+F30tPTyNp&|nIRxI?ijw;yfCAQF5@x((Sw8}@|Ph{o=q&qD~PC;10TXs zTl|qqEi9Rd(<(G`p>1R@c6U#DK+${0kpH&BTnxjtTsZLjTyd=vbP|b2Ch;OCq$Z+T zQJL4lwSR8o5KB~P&BBtnMlb)R0}tFsE;_iDI{I9f?e2rOSdpFI*25=T4kdX7uEfX+ zHid1tjs_hqMN`b#0wQl`>&TxjV{mYA%M*{0Ra`Ct5v_QoM4@)--HLVmbZ3gJikSm( z4LVGX#F3>DaD9E(F8Z*XljJtV)YEqY7u}*8?nkWKS?xLZ?I_29#_m8R`qoYSzBqb# zR3b)Jx}TEn2u3y6NEMBv6~DDjdzPT#`NBi)|H!@mm&ihY#s~zIU>a&Wd^E^^#q!3| z`twGdE%ZrE!WASmn321o2nL9qqIb>s#Npoq{v(*>Q>y^#1$>VAIHMHWT$~NNfEt@v z1SwJlx@WV+21b}+Sm`E7S4F+YJzAPP!4JUw6(jZn_~!c%HNAH1+-M+oCZmvOJQ6sY zOu2l8d{TBGXUtaLxC?wkjoH9b7b7ziyS^Bbx6#Llh^oJKXO|DeiUz0`gv-qGnsA<8 zdls&^-5#eDv;M|Wx1dwUpMX$SpVZ4|2)`LD^6$_m61TnTw3k8yu(sFysHlORN+ekR z#E!5FAH1&;aPsOv+m<%)YrNB+cVPvUa*I#VN^D|gv>*|wn4Z7XL0P`93CXwn+X2IS zKB*%2g0Gh##GHwM9Uwiizk_a9VV{KlJm@xAI#7>ak74J^i0}1{dLAfRrJug4g0C#= zrA)^YQ`{!3WN=(v7KYBx5q%rOdlb@87qwzldQ0;Z~bY|NT%x69x0v zR77c$J_gMWNckmBi}*(zCBr?E3|%y%Ejhr2Z*>e6tM?^SJ-CEZL}Gpy^{oAb*oe%6 z5GO1Pbj5VUM1A)eE0)c|b4eI>v&`muFf7})=x_Rc^*_Oc&&hXQVBd$XzL=RK^c}xf z=-gWAt>5%3pZ=;h@h!a1@0kNKB)Ox8G-F0Kl`v0fQT zl+0+(cPY||gcHs564Gvxwcy5DvYpdJ`D3vz6*+FEq*oh3r;%KE{p4Cuj7JJfu~A!E zTHQ)2aPxC5v~u8CC5wj4yai5JcdDPujztzUPKzE16dYBDR^GiG!OvY)EyIW^j=bi@ zft%Z!bl07}wfnrn)bW1>OU6}QSo|zz zQk8HIU*%c@=De;I%Vg3>f1AC;;^QW!u6|J_v@M%HY}sc6$&Q{CUu1ZN4nC`40=lbV zn1op_f`63H6Fx%dQN*hD<8|Y)PM#lk4lJ;pZ^PHSv0nQGmf!J(yy5$V2tD?f=kuNb z!g9G<5BS)0n%K$LiJmrWkZS1AD=g0?S?HVC!{!ZM2%-l5ZGvdev(nzxGt{u-6qe;& z_q@d>E`KHBKktge*6@`79Xdd(6|I4*kha6b6yJ3Ne@-X*byX(8ZlcQsw29x@Dd=dk z_v}?}48Q2Qg%J$r#R!;)$dQCXLdYH^8wwajqYg`MV}hPcfuxu71>dvj+@_RPeM#df zXpm!N7CUwRJA%uyfl6Gu1XU~D3d^15BnA^w$n7kF8dj|#j7d&wv4$Y?^h;t+J!YjvX> zQ)xhM`X*#)LdSX<%c4$s;oi2x_WM*fI%-TN*vSuV?`Tqq3`Xl9tPU_>2JHaU&v+=$NM}MJCV$EQy-HoH7Wz; zM?mxb+P|S+K|sbiHy#y6S3HZjnMIdQF%^3V&U1%F2Tn{@6s3uK&rVE;91(SEI>|(Bu|S^iI<`h652c z0E3+a7ZH!asmW&s>Bv)(#tXl7a zawA3>!|CiNCc?a_y|5*GqRd_|RD0&7sLwIBE-oUM%;{rm@D`hbwMk!rt8}P2NqL(O zv4f#FRFYlQOjYY%OCD9-ikt!-V%8R9B84Uz+DMb1B*I6TvK5a`KR5qbZ1gWMIah@1 zWtJpNy}Sgv(mP=JD0I#Z=CPE5kVL0bo0z|`Iv@Q>Awqla7s_Tg|`5?XUT|f3`me#9V zZyoX8A0*i7K@rJ$xA{tapC#9@00Jb{=X?Gu?}MFHMrCNU(+nU$71fT?i90IaKc7N`uex~OynsqyI$Sd;3I+>QS}=vQFN zJXBzkc(e0~G*m}sX%fsE9y0mGSd2lukc>v%slaLV@pK>)h4W)4j$|-au|33!e02^8 z6xJ6D@wwbf2-iS>(26+F05YiJ+&lxXq8Y$#9w67fI*HXtApV5S1D(fyYbpLucFdfN z_WF~a!&7$l$ib~+TpjGMfF7RB9v-3itgfP-XtISoomE}y1Xi&tmmv^eiT&SZfqznP zrGksD<&g%YQ_liS3DDTOoBfTF0qdB(4CuS zC)B2f`Q2SmKg- z!0H;-1kUTl2xWX?79cQjR-t=H1$^Z&|DfL5cD8H^m&VJGu1_o}^DHK)qi{*%`weGA7 zfo9J8KoBTrPHJByA~e7C-6fJKJIYD7UJv%2MK&mMesf(mP93n@;c23DQW;9E=<*!K zZd0F$)bS#D{2)5~((Ry}lNZPD1rC1cp-Fb?EOm!6FB%X+Z||W|KcdVMueD@PAiImR zHp|SJAUG?j#5eC}(jp=P9_Gl%s%V9yNL*U1Q}hlAt(@r9Wx$I02R(X=8+kyIsAr*A zt;GHsjD}uAQ4prZX1tz?hU%ccYm<2)I{}AuMwB@rp+zM^MO{dx9Ss~N&r49ikHJGO z8=mx~#3V-yn{WwFSpg#SXNEuol7Dphs}&^pV46h?8Kf#rp4^df%kXRa|UD@Y3{IbiBGlfIgo@~u)UY4 z8^zPweO{RL?EL&2FY47`L?N$HsvF5sR2rP-P@u#UeZ>)#L4Ds2`iAa}P&el$r2GZN z>-O>?XIAT4-S5tg;mS*felAWoEuIKS=)}v7fB+?9Z-JNK`~p8jf^<@ruq!TaJ}6Y4 zmC)@I2Vr0-E)XoI7;fc*w%WAv-?WZDZ5w_U*Di5PtXy8fCjP?BtA)R-Gx(uh_|pZz znFx_W&T(wcn^BDens#g@V{)cnuPWg7MkkK~>*c|UGFV5J0m;?%$r{w<)IyjGcfkQu z^Wglh{;+%C=wz_Q>ORxroWMrG&GV8vQwzJizLk&FGJ|Ld_bVX)E zv3OQ3#h6wy8(TCWk-Ue2?a!|l6L3RHpldANeG2U-pz>dWRr1}3%p`fhmk4_=%^+g< zar2L8W=Pxu?z5n6MzX_TS^S9bV-yyD3fr_qVM!F_q3`-Jl*MK_LHs5g|NZ&sw5->| zv13h?N|m$2&Q{7b!Kx|LQ8^@9%MaQ61ClYwSL)|9W<6+--XH{}5MJqA92?2RhQ0bX zTYd_eg%~d`zQQOadn^KR^5o7$ST0IE%L9Z23H!tAs4!E+p}&1C{1SmJvv%w!*4ydo zHhKok;)cq`MC8}+^AO@YK_o*K?0?JVBcr?Ylv19Et&K9z^ZU0T&$kjcNrWfTp|i|; zzzR&%VN?9d0Zb=>1doB!ic?XkJ2JrJ{Oj6%rT-ZFz9;s*-I=@ns7tSoWo z9QpDgTMHBX(n@9)gm?WPT%A*NWl^`SW810Nwry2x+qP{x6+5XUm82@RxntYjvCW%v zTRZ36|GuoY9@g_%bN12u*OA60Ay-CmSwHbd2r(~!3MK&xc7>$GHU6z1_i{*Y+v|c7 z=0ye91Ky71W=9JusO8@k6dJd5DDk;zni^4gWa#cBsbk*o96}Ld8c^D;I2>rW=?x6{ zmc>p>kdU6p42>6sn;@3HLW~H3*s(n0!CR37ggFmnluvuy{Oj9JPnaOiJtF=Nx+T-9 zEF+)y16^PWH||-is3S;`y-TDr0@^$g`vskr^`AFh`MPXf!O5`8o4)DBsv0k za5xnba>!JiwD_WEfinu|D4ieDxxXk}h>YE1G!FB-C6f9_9%ouPM2(_iJa^vy27u5x zj{b8nrtMBA7f_|pvR_)E*Q9O;0n*jFfZ^ZE-<^>%6iL(13Ey(nBm9wlxf?;NeR^{C z-!$o1FTS+lZ|@#Xl}c2e^VeFx@t${Qa4TLVOwm|{I1{qE|Eqb0iYQL5t#V^qksLgX zM}s~RkOl*%F`le7IokExID|?f8>m$cPp=_MSO6G6TK~YRX{lLBuc{b1a*G~v85$?E z6_|pcb*;XYZRPLVe1u~1;Cem*T(L#FdHaV?&P(}Cl2bs-BT*_UVPYS*(v(}Mm`5VD z)Cl#p3+!E#Cn{0GCWupSZxm9TEiS@M|Ep1Ez?QRn^dJ9;J~oUi2Y`^bQ8Babv%#6^ zt{=sd-1&dPivM+K1lBAFJt!n?%WOCayDY#IMoisXFBib^zM0ArWzfTOxjGoOcwkxz3F1nWG3Y@Ci^ z_1eKZh=m(?fb_g^e*@13w^tn?ya!*^>uQHI=nPp~QnCjSxU5-fL3H4Ql~BN$&RxAh z6%Kgo5;TXHh;!W#yzW&gbfuc8WAYCCZPA*m9t`@yASp?_I_!)k*0;X`3xZl)FQ$gP zFIHO(kd`(A;~f(PU#U<4p>eBQ61eBhmXef&7u0^Q2ti|$u@R(`lLK&!{&g|iRgdg1ug1h<|p!R*75#Cdx z7<#W~s!?st$vn2*wPE^os$B4895#&5PEyllu~(;$?Y3jsbJeyy)zf;PyM21$DE_>T z69Xn;9sY9Ct(UuK8F{{lk4a8S-9u)0D^LB4Vt#PXquZW57p;!ugAa-j6YgLq^r)2_ zj1uGDv#ibHY+{OHJyIjx`H}VzgDBYYPIY7XJbe)n4ElHA?P~vZ4=p1DdxQo%#0w(f zg1H)t#%1yjZjvm!!G%@iH%&t?0yjPqf(3&tteyGoP^zHu#tKLrG+vK%e!cX#<5ecn zz?l(5tvEz4db3+G0>UU{h8_qh>&R&g$lt0($nLxN?cVM8!^G?Rzf@ku%QJr@+8Adp zbft% z`~!>il<)^XPm*eV=h?9ny@+WPNnW@nF$2tAgx>HUqWu@1`<-9~xRUtle)Wsv9>&UW z2%TsNQRyiD;C1Y=*sNf8owbRPh6g>&iYVMM|1tp6%c52dwL*@nHo7ggWQEQ3oi$ys zT~QveWXLv+kA*#d9~O9}Oxcr7H{`$PDKg$EO8f>2Tkvtu>yXfrcq0lL2jq&CkL4Ol zn!Ib$|6T~~c$k-ta}iQLCO!WortMp^k_y znywpJ*=&3lB|xkFlARi-mWj`$^t!!g;P-vPZGPkH?Dmg0^*0JRT8t#BlfcaTatmzJ zsj1Vd(N^}oIiTUhw(U0TYHJgl=akD$JvB{bD$0OsaY!^4Dr{mvQ>j;j)Oh((spuud znWnmce`DqvY_7gDJH&xrX770YO6QzD@gEn}FBDpV(4XPeJ5;ZKGKNdRnYCtfwG}1m z!Ks+y_4|W+XF#>?^-E+BU^AZZYOIWelkC#k69T730-y3coj(p>yYITLb{DCbu6Gqb zk){LRJzGCsq@*t$YORsAnm8((f*I-K7>(>=)XZz``-utNVY2hnFu^_FRS6+itF@3f zX7^p4=7E>KbcJ-mag=V}1GN$$?)loUw~PnkV)wi&FfS;hA_hd~Fs>xOzYmIq=kfukreC+fKdw%vX)~=~uyyO6d(@a0;#%g! zB}4Nrv_7BT2`G*vM4?{{GbO#fa#!#MX)o;?$>3r5qNhw0vgf&cbxY0f$bq5Iab!&YVo626 zwsq@QwL*mCfyEA9Q-a=TquO500)(VViF|aDL&ACUy$ik~F6|pDCo!$qMt)jl5ct2Ai z8)J9B11K0#a8#_zh5{l4Vi;no51TZd;vG%iBDVnmp+F*{j{`vK+bDFaf0IO8 z)y)bAbrrl>rFbzX+U;@f(}xDaQu0`EhlY$rAtKJDGMKB!zrzF$Bscaw0Dd)Mh3>Bb zMg4J3ynD7M*oL^RBC3@BD1t~*WHWPet>osMZATo0NwoMr_mO|tUiW#PnWZGfL4+|U zzO{ua-5q&-rg=U_GIy_*-oeE>Mk{tRhWf?+Z!E8xEjSgS*AB=Xn|%a$6;sSklS*a3 z>>}zkAJXJ-q(a{R8N>#I4hP>hN1VRqX zwRdI~5Hg1nj);k(&PoLk&M;Yx;rV}hZx@@B=ZjzMNs=WLDqZhbpsBHc#saY1UsC^u zKam5ssk#L?Q{!MyGbi(ocIMMW0-_|N*ih-|5`5@r$zJz&TSKd!M%KW`7SSJvI5~G6 z6zV4n|0zNV{H22R*7MzFqI5%RZiJ1T@M@0lQ2kZ!qQEf&6HHmw}e_xtIz3La#p_xcc8Gw;U zK*Ed=u~lzQ5I!~MhkZU60$Gq0PnP=3?Dl!YCMdq`C+Hp&3s(v)w3pi%b=5!03l;MH z2GhA!QHcZI8M-zs7q!4l5%v3@>q7^U^+_qik!{ z6EU^t&~a#M)DM1icV1JYFl%F()xRSnSaGvOwv`WoMn>3^ywID42JVQ%yZgKebm?v{ zg-&f`&Shh9182PWF(4&Xu|RrKT>)bsI;fL|q*aB%t4%r!pBCF+g~Tzf3I0bhC-_QT zT11lEQ_*ySx|CAH!A1EyZ26ZM0_>2wFDBd|s%y_SXkJTOWeTTFk^<8Aj>CZMMHSKlc5+lS!rpTpoe!lGGKVI30g3vQ&_Nr;JZ zJy?%-c+h-rmLHY~qZm%Ts$$u1=z z37a}Y7GYWQc*{HHbEdFoy|`!Hc!e-X3|^CK#$M{U@L7Tnk4>qJlFA0`6@GSSAP4t- zVOC=z5^;pD=^?h^Mr%Nxo-rRJ+*RHlQm7$|$ zq(WM`JmB;cV*-xJR6=1wQ~?5IS@bfgg#*&0yS*(LKT6zp<`HoneK}xa2gddvRbF>8 zgo>ydc|f4e7p4mV_W|K;o0<9`;R^3crZ*(ip-k8Y&X1fwF)!F_Z?hXK>a-&SbhtVVb z6?vz_iRzNw(9m3W%*lhTZks(CyCB|Cf<-{GA2kf94!6-^8ku**`41Qxs;N<6q_HEj zgd+!ATqEljk+$R&%9tO;ewqL~A3anM(r(pK>)U^GLDgJdI<;&YE2nAIVBs&w3#+G$ z+{U-esiwZvpYa(uC>&6SzR0M-IWtBQg3izUAxhG|dASNplBdYjbBW_m#-N||Gk`)z z8ADIjxJAa-?VzTg2czV8UfOruP0dNXZuxoBK^<}X{mzVhhEOOlUXX+$v=SyD#SmErm1O0;BHr}r!P!G=j0Hj6i1c{e_H@5k3* z_*TB|m-*jbfcv}oSjb<#*Dg3wh1VRCtGTJTPxgN$Nolq0M<4o!_kX}t-MEI_lA@3( zjAF(K8zNn4>@_*6;h>vfLMs|2{YYxR(Wt>>g|v$oR8#B?ZWj;ot^Z z>lAHcj=VvhBHL6*L`<55*H0*=r7#t}5{hjb9lNd=!!BB7`UKSxRG&TTh?qtX>x^j& zVNu|T{?4^5(d!R$CMFvofI+2_Q)@spA+40~DZo_85bd>=ZU+2d_#4`?PPTYj0v|ko}^+vWWW-%U;A+m}kw!D9N#L-O7#|#eb^b~*aT&g`^}ix+(=0b}b@3D!WL`_PbOHe;s`KO*8kZB$Saaivd-wOsa%;Wzd-KeAd(RSZ=zKLB-SVOhC}<1~oFgfzi?_*I9ziL+Sua zzX!dI=v?M-*3;_beZ|hGO`9x~@h8U6-Wl8Te@?X;zu#o1m$rL7h5TSUe!eDFe*P6; zL1={Mi?GCTz9`Bt_X(SieT0-@g*GAwA@Rx}RU-ruQE`Lh)OYwuvW@bwoRf9Wn;iN6 zZ|hzUfWWYVj|*R~kNKyYJ7mNCIYjCRxNup2!Z3(X-QT7;Ri-L~uN!c&Teyu5Mt}Sj z;Si862#x-TF}T}_%sM+nm@}(VYz7RILig{mjD^bIbaaB_BzHc>9pVXV_*P||>J zMs>0d>7@6Euk8O7ia)u+TIe8NUFOLXxW zRoEgNXm$$p&Mkk!xLX-@AZ3SO!#38F?5sK~m9|EoDAO4Kg8^R8i69 zGgHbTOkgq$$|!L_I8ok8b~jQ9OQt!2?Y|}LCkNz!Y2KzpLZ6mvPv(YYL#YgV`~Glu z02UWhM1=9EMpj}4djGClzffjKM`*PSH$}e42bTmqz+)*If0!qpJSQ^Wx)+n3klDb! zWDO|)!l9e>f(c%J=e^YVRh=zd4WU%9eFc$Q#$xZ6#K6;r$|7e8P$Il2`V-3 z;}jFc7a`L5ptJ{GFw|T|r*fajb^A@A-@QbFSFKfv0wJqUE_PUfX;^154UI;pe5qbe z|8`yazxs>}7pca#fAiY7>8psWwi#mJjlq9uMOtwO7&r0BdC@{ zIU}Z%qhkt~v7!2>Vx@C~2-us9G{k`mYbL_%N$&tNbRKdt}^m!0IQ11ATzy8bxFv^6H z@6|bm#~H86^n0lGn`%J~MJ*IxnrBN9G1TB*za2b4KaYRi)hK;5Y!9WD^8hyxD*KYI ziYhVDRE(8c8jpj`E_=BP>V~k)pSZ*IEVVS~Fto$}v005)T7b|oseql$$&Kja{lord z4jF2ppp<7Q&j!zD9k{Lj%tJv|B~(YqrVNYvA@Yz~dk{TFN*`7($x&I(?+<~#UDPR% z$@_B6j2yu<+U<0m#uGPhGR;w>Zie&(5w5AnY-}8^6-JfmRn1c;Qle;oR#M6xi5y_W z_>d5S4E}WLXZdM^InYP-gdd!VAN|KQ1a{+4v2IAAjr*!pXPd08n>zYNg7an^t8p+L z!l0I*A$9LrBl%fni*fn{0vz>5CSth-OHn^!F_o@yop9YBxCPB_c&nCRekO>aBH(6^ ztUExc-6zhm^Nuk+_E*n8+f^a6mqs0vhVI^dfZ|G}bs)D<&$_dfSCfIUYuheM`#O=< zAL%R-aW8iHfy(&taXNnQH+ziI9e1d=53FxsQ{rsG(Xw%3_NA%)bQmRzlE1_3J%hug z!_E65-FXhUuFh;zHzC3$_dGT5%s1sg>mOG|C8k^3&WHRd)*4fr#&-rV-8)!T{!3w{ zJ@%Gk%;tq6@$^44nMAHHC|7I+@W?p%pGe7PUg!|j0@M+s>mZ(@`eF14P@DcB#q?@H zTClI2bJiEZK9)lT2IsK!F?!Vn$08~1ebggK{N8_jt7++~xor9UACUxKcd(yVeXn*y z3*I4yHbLohkOX#qoZaE&1~HgX2qSO&0cmUGJ=aF?hT&=pA=!vq+hoP;Idh4JPdQ^+ zuE4@16bgv5(JrFJuHb}+s=cE}hxz6IC9q>b5Sl#I;-ngBu#LR2Io3C5-QWE#hro^b zggv0-05O|}oM_RhtGe8oc`*`jC#pWV9PdB#iQ znj6bF^Fh!MK|NmMxoX&gpA^?Wfml*;2u9B#8vb(Y&rX-thXCvM88M`C7SDB);8V_K zK9((XhLL>4I<%7?8UqzT!-MGL^=G2?!!Jw)#p_7jHT2CNLhY?!S@42D91zA5)@SYV zrxKz8&5T0d@ibb805TXj)E*DU<^=hI8KYkYc%Yi82IN$zz@_65={?Zlbd% zzfqf(6XBpllemmGw#?WY@<0$P-E%8)L7a+@%U&Y<=hO+lo{4x#oo zVlf+=LD2wt#^V0)cSS|IAL{FSQuLs#vI&{$Zwltdl>^(f#GcTCo-e|!xADZ??kB8& z`F|-y$nBHMTF%33>Nz^a@N>t!2%Ru0WWx%SSH2Zm_>A4rQ@F&XWvw(i1V;09T$e`D z;_P9f^(2=23@2>QpQ}G8MXrE_2O>CHUAw>&j-PoSC-eYP1%~`V6)HzUV8&>yey)P# zp@x?%&UZ<>NGzJrbYRv@7U4AJvtB06=x$h{lY#ecuHtgCvm!)C;(3B;K@GeZvgr?Q zUQIiAu^0r$RcvX}soUMY1-8PCM=3Y`&^h&ejl2t$6Cp1#t5qIa zzRsRwA6c7PGXybYpd-dc-X{E?(fYF7Ap!V5nNM!*N99#+Uxvzm!QCBg&E>p9-q|c2 zlKi#C`j-FTCAA?%qyIWCRJ~^mpT$MTazmUc6Z@H_3Ad0e;GgE)5-d z*$V!MH7Ps5sj8k;qEqH)5MnIbStiU4eirLk5dgL-+#MG1cx`u+UVek5SU}HnQ>N|o2S`kkx*?WHf34mCK zN4b#;#!ju)SF716Fo}qH>yZL$^}RI?Tet4vA(J%FF`Snx_|6|5i7k`hI`t&{gwBdJ zmMH+{6BDbKG+hY@Bwbs~l@&PbX)fwFS!m^qB&K;Ue)KtpiRVE-RVRAN=w_LOzY#33Gt%1& zC%OfPnfOPr@hAtvbV_;d8)rYg)3@S|6SczK>?9vrvDwOKs-`AR z1DL z+{;83X1;Hwv3$6^@@`I*&;UTu@jCFU!0`s?pP(t!vxD-4@PrUz{@{vpiZrFJ4j!94 z0zcMq%*X!yNvfsh^Xhob_3b;)San1L1=N@s4nFjlCO?zx3=iJyGnfZPds>YwzlRue zfKOibQ7&~xX7G$E)oX5&mPZ}a7?lk}0sX64?fq@nyo;)lKOB%V*$wU>2j2Bpjx-9e z+?DIVgq!Kx4;xC77>y=Y#x1CS($3}KgfVjdLqkIu*pAA?p-k0&!YljtQ9SBNKlG>; zz{bCPw&}O-7js`wN#${vsP0^S2RW>u)|OQ!S3gGfMtGnxNTqOIl9|N6n7s?c3P2|b z=+CEpX^|btTjeaMpZ#h0L@u?fc(sgP8WBfj!#t8{S9sUBKpbb+u(P?&1H?uYnXM8UC4sw2Qpkf>* zD6(eT(!{N&NBqrO6ANX*Q%c8lbX9U_dV{8f>HI}cF%>`y->gvj(u}90@x)ugS#c30 zkW7kUp|Ea{&Rn{4%&TU}w_-o4p;&NvbyH{kvgV1;tA=gaO6c3!_0hX7$Eo9X>7wMd zyfgn`NvVD+A8k6nXAv0N_hTUkaU0ojis?G;j$In_&r_?}lUhb~MWMegHr~RDk*b{+ zUlzXwf&5e@9|tHpd~G@g4Z((2(goSC1L}BGH-4AS=Q@R}{~ed#Z)e+Hhl!lqJNMY$ zfB%p%dxJ0c=Sqoqpr4nZ%?M2PWg0jfg+*o2M=gYsz1xD7$hFgh234fSV<7x~!uOFj ztf;7RcpvXE+ez(Cn!~1pazht-hgE8>G#WwUi#fPke5Fl7of^fywt~Qj!b6gNyy+5U z_kv>l3*{!oOm3%EX|Q{^{_Oe6w`nLykuVo%nc8mD_L;0T`*8Hc?VbA=?+1&CIU%hL zZ4aQ0&$p{^Kt6Vip0NsAs(Zlrs1-p-#WqnNgb>G9$~NAS+LS7iU8VLWH|t|hz^7wf zkd5o1M(<@Oh0yhVbmCgjwJQZzvr9#R1%v1yH!VI)0T!6y!);8A6DNse5m~=*iMq}> zm-a|DO~IL_iabmVU+Zav;=m4KeIxspqpO+gjotn7HXez{H~C zlpe&1>A*-hT16IFNOLjhf(0(mq03tvy*7lb~*Ru6e}^;(O?pH3lP6_?L38Mzx$I6c-9go#nu@XdUji z-gP7)I)33#GcK+CLp=m~CEO4Y30?u{5111E7 zU^f)Z$ZH9*OF{pzE8X|^obQdn5Zifk4$R6EKeD%=Wu+dyc^G#s4W?jn=S}OB(Zj(s zfKbvnBG>uGN2vh8I-iEFtf%gIeUh+XI8_mYP;jonM*#Me-G2+0NQfzgRuIA`@*GV7 zAYh`^u`KLJDUz$>iJDYBMhAvVa~BezdpU&+q1Bp@lQ~KfwlWeEm;U}zFw%A{)GfOp zR6;2rrd2~MS3I8y(^-dCevb28zWgS07@DY{LvrtHA(AdfAXE`>eAZnaR$Ok&YKui3 z4^#WQLIXF3^4s|LX337ro2>{ywH$~#n=WupK0HQ1Ef)y5yF?T;6x!+ZOSs!)TfpW^ zQy+dbE#uqi|5dS`n?pkQ@5-reb@z1D^?dAy{(3!Y0{v};Y{6BY4~YuT59;w3!B7LK zNosNa)J?Vg@yx^A$m%3#JE?AuB(!2Y0z>Sa=3O$WM;G?1)Q|iP$N1t6;)>0Xixg_R zYz(-@RUi<1CGR3L2vJ;OySaX2Klur>3K-Q%(VqxoxWIj$`275Wjt&I`0$V#<2M=E$ z$1Wh@RR6#+iE+hMR{nVTA+f;rH&FYqb2-{>#FlOqJ%W{;pD+Ro@PT^q`Qz>1uDeKb zk$_t+y4$%<^OOk%;uRa*8Wx`s1ANF9F zBJ@<=z2k90R5ZMlcGYEkIv#Qq&LmXy%orXTl%OQDRmk;zucmOg_>TxaFrZlo{q%{# z4%6BcbFFw~xK!_|Cw{0~o5jrc8ccM(fiFXq@Y*?IT5DSP692A0~+g!Yt74vyw z>z7;T=PT&P1$e|8ncY0u6~TmX@EI6Lj`A$Q<0{urK^({vxpzGz>quqgK?o~0=0V}e zXmYLVbnFzdehVUwe#uKQ`s^q*qon2$+#M%4!dU%Ovq)x?f4xo7yL-i*IJ@XcKZB)+ z(1y)l&R6BvWp*4X(=AEQaaPHE)dIb%3ZZmmQFobta*9khn~;T0l?YBvdzX(95`85A zm~l;&(M&`^?9pKGciOJ!tSVVaiYbp}J5|S-0FNx<2w`5ngy#@LGago#;C}&1x^~~B zsf@rkU`5X>7%n8seEQ{2HzDuvqYvl>$L#?mFnl;Irc<;>8X|piq#@&RniZ>7(_bK3 zJ`C$)&RbGbM)Sd7a^8VJQYg}Eig9iKLu6=ehlC+5PZn$wPTTOHD>oz=AUW@+>4;~t zbFzZGh(Nz9n90;V{oq#RYcFeSHM!(S>&!G1UbczRsw_@`c=w-$soh&2GscWL#!b!dyHcii~(9XyR zarw$ZE9WJbgJ21cF5Q4%;E)UN%x$CXV+||UcaB0#8Ulp{N(21%&=sF zAzwe=*i(b;@}c%`(t67 z#+yYyLuJM9hwt5!+(AU5_We(82u#Vw#Ggwt;iASJc?tXM-~%F}SE0Z1WC88)_3II&X9V4n?k@{Bo&~rl1vq_X zvo7&c_6ro)wioeT_Fjo{Kmw-6MlO{Ba<%c<^ccZ$>S<&|QtkbZW#Zq_V&B05#{lR# z3WSAZ#XA|U+Tw5KUC`50OYJhmF!Bgp$*?aD|r};+30nL$cpfjm) zW?~smtX=|6?`}Y)*o=tJ7`!MU|2;jmdba|caju<`l3pe~=t|klZM9-M8&~X_<+5k| zt+FyUDjGKJw40<5`Ah$`c5*M<+JoXFq)s(jBs5Hkua0S3hK2 zIu-W2*J0B%EKMw+Eud$6Z7m;}YYZ|Y=vw3cgD&T#)sfwjqtiWtT!MzhxK<4z94rtO zFAr9(%4&+l2r-VMy#(Z{*g|2?Cvmtfsgi7MLuj2)x?_f4;&zF@? z3sAC@E%W$avKSBb-h^~UkHZOGKrmKNaWKSoub^i(^>mq5m0lj@hxT=R>&jQg*@Vp( zL&XtlRM|WzYRJN6Z4eO|0yo+})u`D93vh>n3LcdeMDi@aKUyVTIFM1ApG#D*Ew&Y4CAa zuzLy=5^EF4Fmj~Z-Mt*beBAa6f4xNVUXOY}0c8;<=NVl$dZXAU?+~%C-t)Gke1JI| zRMMme<+PvQ*}W#tB2L3LW_>gU?u+Mj!_6ISNy76dKp6%lluu!mnC-_6UXx((J(jy} zNigTbI=AlX!7(>4;wLx}O6X@bMW0Ia?J;9tw_SJ=?MfT z=yw_S+#6kq6D(_l0>Do!IdIk@E(%R6fO^XM(cjy7 z4(+AOValhais3Q}0x~p(hV<`XYXVQIJ9wc35@%*Q@{Z|mE8 zxS{sxA9vQetEb_-aid@P^p@P&rX z!TaiaSbNf3^^~p#awa(?1362NjRcf2`w+;?WmoNbuLBmm>G(B#70R(^n!o- zGGxK?ce!dBZu}gbd z6uA6om5mg+8#Evm`eB^damJ@%fvPkeX3xM`9IRK^Nlc0*d>Zcqzx=TB3U61>G^<+s zu(JCxKyVrGRG_sgtj2)aYc}c`3>WzMPN!$*>E|eFQ=L&`P}ihFzL7^g`|LN$>UZkk z#1|lWrhA{a|KRV%UC85>izz~jXp>%ObUCowTk!I@i?#iTs59x;;HJO6Xm=+5MA$J- zmy1fHeC4F!s08GIOJ&;?H5*F5RJ58p@=XuHtaZCM$_q*Z+{*nwC!XJ`Z3EeGweW4M$WT#Jn89b`)2HjX&28>ocay38t-y zE%p9|ID-Q7KtYpHP{>7L5Kqv`r!GLYR3)LnM2ZHblvHa4(gX$VTio>D2JdoRc}x@l zX8z>lL9^4AUEydH?VEi_gF=*)SUG)(=Btf8gsJGpZRj^B8TZoWpn?l<;>jrm zdIdpwa(h7EaETaCe@ z`I1F-Gjo(UR4`st_l-f2KtKE6_lwwEaHqRiw)20Cb-oW^OaYJRx%&FM@24?c1BWYe zG z8Y|y{O-e!$HgJ-zvs~w&zCwknELOVmb~gR&@ux5f`6ai#}d!)JDbWRFdjVuGJK;fx%IW38YzAbj>9j z$l2c5Z9ch~M51Q2vGcG@(G-P4!@dF|KwOb}3ov~khIn0W2V(VV#R z7rVWEpCbj=$*-D9b|exaTQ;sZU<(DToiBpL7G(>sA8iqVeYfZ`c@;|BR6}EWBGT@X zKy9dnOTv$b8s}dGJMF$mUeoZzRW1&~Bic>bWS;Df1>@4=2+asy$j8V6TigD*Cy(wa28#f z^K7xBR4c;B6kM)Ahp2l%be(6#cy@HmU4tuz_mHdUeB*o0Fs1&!%&M2S$FMhc^T-aH z@h1(}{>R~W=k>M`J2?8@LYQzVz@4R-DQaVE=i5aqkswC76!V0P+}hD$%29cs8t=sn z?dS2d;t9R6!N2qPN=RTHzKTCR#pvwqJ8A}x3X`U3T!pauviw|CW%-T#^J6Hfh8_Du z{9vTOgRcDzJ@>xcFLY~{y6|bUob#P6!>zr-BVqP!W1Rbd(JnfY19A65bWcBWC(+m9 zv5u$KdNEKqf~Y#+KWjc-gKfh;`m!?YXORiO_jN<@{n`FSx(KQL;odD}3QZZwZE1G= z+K=qnj6OR}0#Dx4;yKq=Q zg<4{Hn+$fH@p#qH?;z_DGOLIdC`S~=&G4{q7nZr|YM9ViiEjju3oUH;n)JSYkD2H! zHBQQ^?7h*KrY|Ypz~t#ERPJOBMe(m)bt1wVv#r;lrp@aiLJmnX70qnL9?p6+>@&@! zZ3+{Fn@KS@6110@cMy%ua&PfpXE-gp;XER~Mv}dX!EeXcVj!&fQgTh~7)g@wh5))i z>Q9#;K@0OdAj=?7ckBJmEI7d z0A~d9D$L9fqLfiQ2VA$pP8H(fn21DAOjWzllt*8%ADv;sEvul(&Mf;yN^hhGVudCb4v=H4+(cmp_)B*V&Ve@KADk^89umyr`)V04oAZFfXxS@Mu zFw{$_%q#x!`NCpg6Vi~b33I^KPq)|`qU~b-?cMit3QkrsGFQk;LB0Lg3j#f{zA?xM z&Lqctk-FgaA=uuM3uatkQ5bPR`5V*Po_M_xtQgZ>`kbvUcLs3Ypr6Dx^7XcDVa`d# zk{+P;>WE!$g|*)cC_!bV_bU!yx$`!RW+qa{`Y}Twg9tl-?4iQ;J1S&R&Im!E059O& zh(~y-YLd*2&M!+*@NM?IN5K(gSUWi-Ez7!itzX>7s=g0Ch6ob?V zxAL>MC_8Qu3%Qq+3x1|et@w2@v)FZwb`g3?CGy-opKL# zG;)3yPfAc%j0N2OzUUcZ6H_bSd->4anc}YXlD0O0Q=m~(gf$IuZf8#z0yPv>5db00 z@*RD)yHy}R@h`HeXY*HQ$nu&9(qt&{p4cLLZcdSD*w+OGeE+dleJ}!Sx}nW7##EMY zl$S$tGhBD1;P&;O;KEE{Nw9EO{w$EWnJgj$Zd4m3TinyQb2d<>qJs#{ApNOoWnrzV znsR1`TLh}!aIsU@M-;015Yg5@V$CZHD|)%6zRnS^et=$Of9Hg|Sok#c_z*ijYWiRN zO@E>Kb#R3k*Ayv;X+OxOX9I4~0G`vzTiec2txcjv7)kuNDhRe7O)bV^^%2pi>Yr`MMp&?_sh{**7`>|}o z06AioZ&6uhINGdP3sDXVIv>p$vzn{8i7-wF^d&QScRyh6MU)5{dR=GA8uo0!LbdJS zARn3#!v^N}U;*w1E(9TV! zo%G*g6ObOT0O?QnF=d;!PK+Io=u;XUA&S_<24`}@G&>ITEP~ymE5v77+>< z4&oM#`iYe*MuqRerlB^ToEI(OE-o@)Wy`s$KPRq8FEaoXgz#Sr@gnUL)?dC99#L^+ zQ}6X7tz-pJ9ecjg!GC0o4OrN}-ZX)O?UOCj(8c~Y*=BTve}+pB1RP#)@@mR<3hm*} zn~#&~^3=kEgPxP0txp%Y_c|X(J_J4!9ltc+4IRI@^z4G`8&_VybUQ?hZPY9d?$Xng zaFMObRU0hLMV0DZTtUoHXxeZv(%2)D^3WDH>xM1{x)NmDu%uhvf=!tkFUvOOzIg6^ z=w0!x6FP)OFc)+vHGT8Ot)_N_tQiYUR~KyS;_kX|cXxLuxVu|m;V!}5 z-5r93;O_1goIr4QhlO7DY3=NL-}_~@`2)rrv#R%APswLb%7<9l)RX9OMrhS2Th3I) ztXyptp2Rz&VtTJ*u$jVaASa6?IfqL!uP6m-W`WjlaJ6UGf@32iv(NLQF*=K-4INal zo2N3$#FI4r@`}zWJC>boTAGW|i1?94VL3gK8E|Bsr7ghz5&XH6%$-x6)>TXzRzk*= z^&-J{&$Qof4;%z#S&*=Lh)ZY2%T`e^Fh$-U*4-v?VnH%c(n@xb=*{qckC4uPA|ig# zZwV_tqh*SKvz)n^7i8=iab^x*7(FJIE?7Cl!A&*%udbyhB7P8a)qg|ae%|#Nw5$S7 zDzen?hd}s7Jhkh~@SeNY&}-iBDo}k5Xc`PaAX0vcNFeG^4#`VuDrAX8q>}YoxP7?U zf=Oth2sVhK($rIOMImX9jh>#HxR|{x+1Dg=SKlMX!1x zN@`U0ZxKK8V$po_*qPNdEch;t48mMPfY4Az1RV#6vPZLxg`nr5La^9LboviQN^^HO26v1n`JJo(UB(KHK0Q4>d;Ne^$370v7Z8oi2NPZU$)a6w&RgkpQ;w71O z(-I_p6#jcoN>(FY+pFq?NoGw!EFnS{9K2+h%9AMz<~~T^R9%W< zPxG~BBJ7kKSvY4l_sqrFBmAGkXcbaRHaQw=Z4>5-6I=&}xU4fuFeXI0R>~3{6409! z$%d*TU7!v`I6f5ac#pYVbINE<9zvQGCtk^H3k@{E9*FIWMI_NFuK|+ zGLW5T6-$V|b9A$3aQ#;3w%32y8-C+wuSoIbV}(NeuNRDiwtnh$(-iwPwLeamifvK% zHhgcoSg4C2WnCVZ-%kp{2z3R{=`g=Zh)BsTy;m~BkZG`3;rG+sg@u=1_hNA(AxG@^roT}y z4sL4(Sx_63Ol(C&<}_x9m?8;FBEfvC@I>FOX0V8_kc=FfA|Xy}2@5hT2A60{JydLZ zV(^68R#@!xr|tc%Pkl+T^9?N2WcB}P+Wz+;qzbBx2x*=##dm5{vClotR^V63Fie;3 zjH6&Jz#v>$#|e0MYBIMHdPvnXzwxRXgOiO=79F?PL7%)AmP?d%;e!yy`)!M{OBz^2 zm-!kwCnvhF=rg*GNvT+zX?pw*#)9$03AxV}EICdUJCLt=kmQ2+BpYd2S{XJZ2hW*I zDv%p;Ln}z3fN6|?2a@;47?E5&MF_t6Y+&do0j$g;w9IL-;4Lb+jVFol?&aMVc+t{A z%$?B0N1ni=uO8diVXVUfyl}U0U)5Ekw1n@J6>A7FHz-l~e)9y;`&oGAC?psT6X9gn zxg5N8Vpjc0g!3Dz8^7;CMXqyIA6nud9e}6-!3OUJE0q*B7RqT}9m`B{UovYj z4NoDhK2D{cBH1kF0)&$DFP({(yOU^!IiKyT+EFx0RUl1_HUmgQkI=ZQ0dS>E_t=sj zc*gI*K{AwGeo!EHBx}5v3~gQ`L$cm(u`DzrC7joedwev69#^i*JZ+#p<)^;IXK>#YOh4)n=N@Te2}0Jg|}b_v_Z|GS(p7i6!@2Q zCxu6*U~^Bj>Fi8SMh zCtd$}TLN+EtUb|XVou36ncrn>a{ z&h;_49xz}dn4oy;N<;E28viHLDQZB}TD*Q;kFYUS#p~3zQE-=I_2RY9PSfot2`16K3`Zlr;+S?cTsu z(owgKJ{1yE0c>dpV2s%M*xP~pHUan^so@YW(|#z3jejGfrdqKh<%U`I&q?K%V%*MS z8};u8YC7jW_DPc z|8Cbx$k_eRHv&NEJ3NCq5|*2hR>JX5y^~xqJu>~ynDnlCxXsOewJ-RDro*C@o*KV^ ziT(%m*~>R%w)M%cLEU3!_z`brcGlU$KU|POs{V)&lW2B^#IH4ImdTi<(dYt?Yf|PgldxqfQIjeG8vhlUPD&RS4}A*8Qo&i ziqbO#>$~Lsi>l+Kg3I^)kkjXn>;~T8G>RhEeG(;^>y1k`9>NGd)gQ$M#?*4?Y3oq_|Z|hf2NOrtbMmj^L7J z((%c~h10)}OyyjNdH^uW;yToDJq^Xzzo zkVCFe6yO!4iY@iG6sfs0iybOs!UeRQRJ3GKSHm*oi;|R#4;K;oaCB~l;HgGjDME)+ zWR%cFPdcacXw=K0m{%6nS}*WXjzbinAER<4(`-5hUUYF}A$~x!h$m8M_y#&ehum*5 zk&8j}h*zO_R9`LIX9?IX6es(Ho7H;5CE1;PpSC!W3 zh&@h_Fy4DBVL2EULgA`&c}H-yQlMeE>2>veof$Y*@Mz_kpT>5QeHO4kkUeP5Ta$b% z^(}i2lyxv)nV*JTbpa#Q-5)-Nprxik2qk|SRZSnNvg@GsGVK%>+g+y^zlaZE;6mGN z6%pF!Mxsfl&iGKOkqw;J;Y)O5%aQuV;h9oUu=9M1-S8DXzSK*7F{m@8(@8QNrjJS! z@WJY$$D}>ZL2?-Y(TU+9`Tu!IMjpR>Bdn?8?I1>d?e_5zfN9KBSV;Upo48836JRR1 zM&7}KA1czE1a!^aFB?Dt?$YGuRVkd&nbW&Fj0{?;gKl|0`M%It(dK5X%M z2K1Gon7$yLeo)P;*thX2hek=#Q0k_;7J6S1I$U0Xto7}SX?#_eXeWa&bgHd}Bt^9^ zx{lkE!Q}q3Nb!?Y)@TwT@Ed5p7L~}e{;p$Mt zirsRrBg{1>EoYlerf!y3Egpp~p|FNaXL|ksX_YWK=Q#8=qC(15=R0J+ z(_!9HRz#xK!L&6W2DX|jk8J1@X^T@}Atf)oq_;>23-@&hod0^_eI41x{d{22 zu{@H2{KQNUai#c$MT=|p<=uPMZf(CT$b9e_iEO`I!U=2^9{Vijjm{jcCfx={23BvA zkbjFsHwzgh7r;$+=`Xb7{~oA9nahz>jph@UoKC&370Vv}H0pbvfT}PwAC4;(UsdDe z+14MCgV5PXG^-AExX|+#Xlo$UJ1`7F%;&sokW_|>Nb{r0lLYxJG((OQC2b(Ga4T&9 zchCr!yaZOmzo!yI*kJ^;(Hp5tG)8MeNo)sA?ZNCa&ulqW z-bBB2@5Hcvn`LFf|B>*}G5WClJvAx$qEaKb!^Er@M~(()PgMW^_f%R3hoK_DH(MFL z=zSnCBO1dq8veJVD?aRdAz#&oz$=IEet!jCp>C3^#MOL%j(*Eae0%P23z<6Cerc&t zV@pXxG23<|Ri26kL|KRzPegvUe4u-SL-UnrYN+@~rZS$)D9vFfgyl)!-#^Eb-kb>7 zM}{c1aFJ0ZKS9?aBX%M+0x-C%_2m(yl&l;6nVym2ka?kC#eo&oLhn--?u2RAiTRHm zS{%d9Y}k;X9LLMJ*+&F1R%GME-{M3r9#h&7mb83(q*xb1>+2ICbcl(nc2cR+(?HL4 zHIs<28zF`E9MDhLl$W7^Rl{^HG`x{yRJnH7!a*WFvYZL9mg+A>*4MxiSKF5q-G`=c zs2L0G8w0r~i-^fXXuYGg0jXjJpdiFYLsgS06RAw6A}qz$M#?qr?MoS)_lS@lhnIT)Yk6 z8+WQxxp>`}=D7QANcW+VEG_#@Pb?PHEQ@Ezs9=(k7lR7c<>9fAOW@bDX>B0_WopN} zq;}+q3kID<7n*FIgt9vZ%XmX(k%4soR-61ooO#blo;PN0voC7lrlwX~Vj%^f@{vQg z%np4eO)^vU5F|XMZ)Q+z**mQ7G+&-M3zT-AF%JnNyP}q4nLq=BO7OUc%NPR`s|sO) zNYANgs<+fI?h$^m?Ut#34_rxEG4D@hW{Vyr%~-Q|t`q)%(l~sRU4xENrs&toh&8LR z*mDzZHJFD;eEH&A(_>4knAavEu!Qayj&2PCUW6??E^^~CQoHg{3lEN-)mtTm$A$Uo z-wuw-WhC6f%qg{emlbuVB~N!=^-~Xo-qw))Z)3jsfpBZR@&N)=3mO+%i(D<%M^2ZB zZ{r2;&>^Q0@8C~7trm%7q zrTEZr_*}9_L-5tF$U-n;Q?->wne7pScKl}dC^6-^z4r-WTPDsfz5H>0Bxc)4W& zIeRRhhmJQ7%02xbJ7ZN)YKn923t^1hDCE{rSRo%S_AAxo6sMy3SGiUD3gK0Z7XD=q zB$=8rmo7a$*KiBD+5ItGn3igZOba$XE#G%Z?Uk36n2x?qc$K3wK#y<~1^GZN7IIzS zk8pW!R3Yhlc{(S+qWeUe>Hu~IU@kuYYk;$gq}xgaerJHP>97Tl;G9LO3KL)Pc%zW7 z+Knqkm5+f=_P1_BoUy|<>LOWm3N6F-E)+cOcn#lTnF{qq;OqrGZqU zLdqjbVpCv$Svs#n?B@yJlyUA-nFgy%K12B_RTWALhzg9E9YDu&1mRJI(yWzbCxs7`6SCt`tf1M942EDk@UfjG`-67hH0pvnG~Ba4(pGzhjMs7y(!M3MEM6 zM1c!IX09nYQ&h+2*Y$JTS8<%vn0gjuQhx6HcbLJRzC zJ2C|0gP*Fjx{8vAZvn~&TX!F(^--{}|LDyjvu%8k@Av40lZdA@1TdzAW#wYx?0G~S zo;j|fwsepwh)UgnUU|~sYRKq#(!dfl@rv#WnKpC}fLF?|21(5{ zAfMKqa5~!ghV1IO#lv2S)D+#pd^LuH``2Hv8Jf3bU|Q)f9j3Lr*S?k^EnMoY1y>rzG6E&+ zul}F*8ZW+;*N6pZ)bSejI3v1EaoHnASaMQk!Ii(q9q1Erph7X7ppH>?_RA}#>ytgP&w$bh5 zZFZzlGtv$=z^s$a*%!-+ zM;N=r^L<%<#jWR3KM0P(!wy&bMJIEJiWd6O$+foiRLuV6Cwkgj1~ zyb0jzEGO}8@gesu_W;&i5zisX%)HoihBwj@A3HV#l6Ow2rjp$I&s#Wowd9xYz^Q9V zS?kf#Oe4(a0v{O-QH8dq79M`9i2n=s&VsX`h(k!GDP1ErsyRxIN*zZP$tW4Y`z+hQ zAa`~lcd;Q(S56N{{yE`wS?H^h6BB1h7-0u9^51)8Pm{PKkVgCqr|q=6Qe)J5n+zWb zf2E^GD5b*pBf!fsFifc^RBk5J!m$)beiBzXI$pg+Lv}kX#mj;ePYI+Z@)IL!U$O&5 zIJi}X)C^)drO++|;+i`6g)F7tQ?*-p)DKCEo>@A6s7GNgrSyunFFetjf%q* zp!Ap-wMR~reKAV#?sRFWEiu&cyJ*V4gG_>G!F0wJmH~!!1x{xY$Lyy^c=Sg2MVXXK znKbJth1MUDXKRD!e2*RJLbI85MH2H+xBi6-vG=FV#TGI(w!bc^>r!scy(8qDoQIMs zWq=LIA`=bPqvdVQae5)~Qd8BNRmGGElXE8#0gxqnoc(U1_~H$5O#$v4r$;8nG+S&- zjVyzDB;i6#>mo!Tk~H^=zKNx#u9p6Jka$gn;}2#aeJvjxzIu>qKT$dGOr?S;#rkWt z>wtb-GMy1no&BnQTs{|~Z#~sG`VS5;-`PSO-3#C;@@a#(u7e6Lwtkojn!tfsp{t*Q zygr=Rz#2jL);Bx8)gX~UJTgxmO(}W078X0URUBR8YiB|ZJ%ho$wuw^2%s|eHFbz1= za*~`?*`F)Fhv|6ojAW0RS+^9gls#H5_1}$;U-G97>G0T?Mt&A#!F;E&!#%vG8}lt* zI}j3ojP)B@Wd1ga3H_8p@oCak#Y{Q2uA5@uDxNM+N(G~%?4gPrD=?7UPvjZy+X76H z8yZ$1ZDCR`PLVaRhM`6o_OK0g)PJstSOZ$rG$oG@Qc@C?n{?tv;&`lipL<(Hf76|Wbh*>eisEYua1qy?rb`sq)gLi_0dXB9H`Rg?Yyb_35qe~` z-K6;m_xyerH6Of;JL?604DauE^5fW9F|hweOH~R5F(>khZ{#CfXkFc~VDhh=UT^vk z6l^Kcmc0^lnH_8sf5=f9R))%+=}qHsR10FJ(9}YON$|)Xd#yZ~*+qdz9BLgY!Ou|# z2a6A`C#7ObY9tD2&UT=8%y8ceu&~x0lr5Z<8*rE(rpKfI+KazUIHtFfL3O$Q)uqFW zoiDo?yo8>+*DsHN3|8JAJYz^f&ofSxL=C1@tuhFWiJeu!@!ZX4>&h%oMN!H#tHIIP zW#U@CBAWm0z?B;Oh(un$=05_9oluRS9CiAZm6~R0A+Q1~mAk&t`2C&Dy|B!1H`5tB z;m2@!m~yCu2B(cogdthc{6L)y!-wv6YM^VMl<`6%uCcc^iN3Hro;+GgY;?DrLrVCV7? zY~udaojMYotEJc2PS&Gi!R!Fa&+soI-u$MZO}6TpmhNMbmDk`|zx@tB^jgk!+Me43 zYk(e3fw(ArlhtH)cAR^K8{=bZ!W)y1Ka!M<`l+yinwBhPflm!4L8a`R9-dt=k8d-v8bny%0WXiuNJCy^A|o#Q6d(LQ z?sk}QgyFzHPsbhkzZEJlGe>O?0jUI+()+K$jqD1VX6nmh-NRDdgE9Z0XBv>X%l3a5 z;i)cKkbQm*Qu1oukE*k!KBOGz#6qkHoUYpT%$P^U{AN| z6cTDUyuW=jTfXBUN(rtn30x?HitgAp04KICwv=Zf#^fua$%lVT=n9J0q(^duy_HCF zooVNFP+0sH%H^CLS9nLi-pxn#OOY_dl)21#?$t?=IxN^)4Z=!PHL;;sipN8NSOq1x zktp^m5u4~XPpbZ|f*U2n72%x-@x{^- z)``W(G1PNe*RAn2>=Jr6V>YY^;GiU7TRJk3mfvMnGkjBxn)jcgv<|IBRA)@*oM?(4 zCu-<5L*&1=)#ajhE-!PhT29S}&dMm)-%ab&`Hwc^-!(zA-3?}UY9l7&Y40@=#s z10ya!`}M)GfS->NQ3UxVoW>#4jN$-ut3WASO~yAJj8x?PfC1X;0r>>K z;1V)EhaI#gW3ebRWwr@RIF3@^grLP^Bvd8g*_}`{1VIZ+D%=c2Fy64gmRe~}V+6xR z);N6BVOC|KeC38dOBux=WcjX@?@3(VX)!V3WhTO6KR*A&B+OyBseXW)9eKZ$l9nN& z7@h~yHA1|?+SB|eX=^zT&SL0ZGb^XTr$~7Y*z@oaCUYLGo2Ql zG50%NnSFNLsKvUHVEFKB;z;p=|AxJj*8|4CD}Ts@%@$t6~zu3du{7eYq9A$J#i z`B(XS!)vYv_RKeCuH!_DP=QHQG1ComO0k9Jw{U#l5RmjC+?WW7?Cj=LBVUi#{z?ii zUzDoIgC9cKe+7DCy$Ri~9kr!iO>ea2vr7Rvw zxi?5Pnm%}fe*S7WH5iC0Svw=4#Q)HZC=3b#Ye5oMnuc0XC~o1G&cRLO_S@M)ED_3euv=|&nWy%CqS z{B>H62O6?1LKg~|Q@;~eF|PZU*pR3dg~aCpEv+wUS!D18nN?&RmC+s_zuqUg&=-vW zQ5&UDJnQBWI61yq0XqR+=0y7oDaSDAMNq=XiuQ}Kk^hCtT%xK{9H(S<|Mwaw6v38E z4^U2B00{)1RX50u)?X5oN5hP7-&CslMrS|nmFJ#5QDoF?{%3Ib5T-!5 zXLR~Cq0Rr5m_)d1aFuxbh2s1Mrg^%K>OkA5n4OEc0TCFJeFUxc!WE$yNbj)$^$@p)QDrjPpE3Vmlv;?7yRKGAR zbxW%0RexGkWt&Vgj-&4&F&rf}qkLQl)x1GBoKhJ$!!I6*YTbT?-v zQ|&fG2`j#TB;}yMZV`EJB!w;W-uKN#L;st*5eyc!XiLPVe$-=@U5bt9$Aq-~|6oY| z#~DJcpy6Q6ubO+mz}%PU$%QTgZ6x?Tb}TBAu7CDPn+Kk3_09W zp}9kN4L2S0g}C5TWihB6V)HXiW5CO%AvN~0B|(E%DFwGG+8{hO1W2@-kR?HdMc5OJ zmM(ln@0N(^U&;}6%omAi{SK2Y`Y6J}KD1touh%dGpYB(dMJpJC{};NuWj&Sq)|y*m=H5spuvgf6p}9c(xXn)$XXCf>fM^JlBy zf$|&!h9FMp=Hsf8#>n;bP4@SCI*xO>FpH+J&?z{-Aa#;xdk@rxkS@&SuGsIX7xyB#FVm0j;l-J|JXcP$k z*3O%WPnK|-6t`B^A;qv8cZzxQ;iBvffRMq9f5@HyO!n?|Eu?H{myuQ08Y0MjsHP-X z;kcf%lQ|+3uVbIC-p|-pPafVazoB1~8PA}Yg6)_Ks+lT`^a0aKhOMZr4n)#go+OXG zQQEc)Mi-ZmO?<-6Byg2m+wvOIdaCSr#g5tXy~m*k3NrM0$fJx=Kj+9qH%bZ&@w-71 z`@{Qf8fCXcZ2M*4kg zlfoqXe1Mk$&}##jrQQ`j{;D8;uZZLK&`^i0dX(IPNAVIC{iykig5y~AJ zx-teF5HM5k9XKv{Yd~GT>1m?~86jkX;)g=vS;1l___|AYG;3!d*eh=2m6}5+l~q#x zEmbPMYb;FTXD*Z)9iZOOTuQDOC2Qmj^Ea+(Y^RAac8yNTG+EM=N?LV=c$f)8bZn!? z7<&|O?`z#ns-U~(-Ytls-pNHMJ3{F4%JIf2;7e)Q+8ONh^m;!t-h0`D^*iN%-Xr!t zaxxX(J-Ecc&Ee1(DWqngY*w6j4`ezxK-2vi$qozpjm+;EVsvzij+fnI4Lw{W(ok%+ zMZ%Y0y>;QtdwDZQawA#_Q>i><{-Bivpu^LCv#_M1 zl+rX;M^se>a#yqv@Oi9bSQ<74H*B9X)_YEPZ=y`db$8m!bb*XND55)&4i54DzF|ms z!(=Q0f6_(AApq!g)BtRF}{ZDP}q#;k-<(v#3x7zR!1J z&7`MSqVKM>OBVc-Qb#ZtAro3v00yf&nf&j?3A!tWn%dAT9P|%Hw*gpIduc+!%Q5B7 zREcXRO2Pp$|567pq(qSx7ykGB17Gz9dZ?J3=jZR+hkxo7Neb3}<$Y9d8~0E$soB;4 zl+xEBLHVUrT}+Ka-^DC?`MA%YuK`e0{$Ncu|43ikfj`>0738(7q^8=7wJ#bwq@7gl z_6m9}_`u?+4TyRCr|&Q4?L5wnokfoh2WElla%GlTsmj36!sg8M^2dY{%q!7Br(tS) z%Fw6)y|ZJ)P6dw;bKxK zzYngBOQ58|)|fB>_k4>s3g3j+7im2RHlBnWP?3e*JfE*Gm^KXqhKzr_yS{-Y9j~i5 zivWZZ!V=tiFzchdfAoc67B!WKKanbu#3?y!Uc?|RT`x6eaKN9OJFXYrc-KXgmw>Pg zYfigDmF(ttt3(OTf>^G$7Tnt5aM6HxWfl~Pxo(A90JKn&s&@S4gV>PP;8~377H*KMF4~Ff9;To5bD?otiI;;Z6993J@Nvb5^A#qL|bbgtzOzv zTk(Ahf~eG`OOuhxH#n1-=J?~+*l*^<&MYpi)^*M-+wDe@*D+gP=`*XmVw$pliRPsA zg(vcL2SoF5`)!GO;~R<;{xrELlalKD@s7Y^+&wh=v9G)@2#^un5ZRtS<9Sx@eZw~T zd|YX3e~)|`)sHRM-0p(xA$`zp9@gxZI3&{RmD=H01iG)dczLX&ll?9Eu20wj{xmA& zkbv4Iu(7w=YC~IKc0MOS#ss4llGdJx?n5H|hBPaaWhL1Lv@IobNsByIc>GfH!`Xvx zA*6H%vG^JqL=`EA*KpSFI2PBl;bSixB8uq~GoTrE*>e(h>YC~yY}MxeDXs0(Y(X33O_<9(@JY#b5^L8CsOEFKQE|B*yKA5WjOOt#?zIhzgT zyKg1R+xiK0@;xQV>$UiyLo;QJYe~*Xa7f!~l;tgQ^*qj(MQ)%!D)nI4X0=@F{mVO+ ztzQ{E^a-e_sSm`SZ(l0*gzAYg&cf@?;tusmt;|BV{v4W0TPSS)Gtv(kF0&?QMN5Yy z`Tc7EM=5}1EFOAQ2s}1Q+1Kj>D)?Cz`u4be@p52_v0hD!GTC%I(W< zQ_ccXtc8>|5e>O<3IS@sg18R2Ti;5vgf9zM9YtEti+*)iXv1UWYTd#-FTG6(H@O?Uq|qs(I`60iX5+)G>)Em#QK+?HDzR&XWq!P zrZwW6ao<-|!~R6eTV*b|igrM4Es-AG5j+_zE)%F0)bt}{eURwmgXc4Hy;J{fEkk6u z!2brb#h%9l;+!v_4-p^>)!LX9@B8fy`)`>!s;`VpZ@dNenbu=#V>}W4A zC1+lEB}IZ+I5Y81?IFL%nQTI51vpEq>xgdo2iK(H8Yh{I{2w4XV=#`6^ntBh4D{6) zQK|u>%t_-@~!hxP3Sdmx>Uttt0CO@_vwLvgM5ZC4=olkboh?p zF0wHEy}nd6FtH=W^F{E^hD!B9|uas?cLvm7oIKc4{Wi=76I>kQtr)CCB zAa?{6~;jPSB=-kG+BrBXM7*0o&ZeEyuVWUohShY;Dlp zS5h$H*v+^D*)kc&fN;-BIsD1NlV_tR&@uZ*52*ccnbYZs2KW=Vd~LH1)Sc|>>&h9u zEO0@A0GVR5^!FW16-Dq+w3qL|>*g6O%0_fvI4G#kb-wp?slu!tQGF7@>byg{;|yCA z7eaITo6LPXsBOwIAwQS5GNs2#gIiv+m5=XBqE#^ca)H2Qj*e$!5T{f&Q|r|9|Q|O`HNyZ zhGj)~HqZI!v!OjTJO%~;Gu0YwG^1eFNo= z#6V8vS$9icVF}MZiGV(1kx#xBU1$j$+Gx}xqh$MOo6I`$vy9k4 z9dfn5=v1Iy^++c7;k$Oz;Zp~`gz&we(WUBX26RdaLz9+N0kf%qO0y|tS7#MD$zW5x z99fsF;Y9g(4#@C7dJLL|Qvtc*9ub7Ge_ktQ06*Z|3KrdjE7H4GJ9Ma7eE`&K8>P%} z1<9)qVH`?eN~+qdiFg_+n{R1fUi+*FSA=+%phnJ&oIrSAn z!u>+p?*mmO{|nk;D<@V&0w2bC*Z7poF?jU6&27M>>VBU@Oz9g1o+QN%dB*MHm^{=VfonKwR(5)~7qgjV_)+T+yRa)OZ zsqNqK_V@?YOOCOgNNfTni2kU)LKOcGJQ-@#s;ZGjYnJPy01%JD*ZSxG3CIqWqmo>I zyz3hTd<5p45p;F-jO>`AE(k?V6U$L>hXXJH>Noy;t@Hb0Ln!Eo=^`SS*pk$D$VoW4 zy@LqR3Sa@`_9tu&Rpb~H3o8eFDYwOf<4GCfYEV)zXzahI+-fC0ld#prOYhEf=Srhn z6@Y~e&z@!U(yC8iP#ayt;yYLkfU15%^g!o#gEnH&vCc&!iHJF+Zx5q{GIwAqs(7fp zYXAJB^$v}8G2dXM+VK>xCx{ny+yiR?X_!ET^vMMRTfR8D;N(9ZZu4@2dVmVd$Qw9_ z3-S)%NvnGOKm+yCQ!ijo*dGa?AZ8iyy zltogVT+|J^;2ss8bPf>QegEKa@OO`RpC%Uh0phC3?-rH9F_x5!+kU!hm+=R=RaYQp zo_E7Ubo?!@3j+&JwZjmPpa9E6`+oh+_Na8X-tBg54a+@bAvcX}R-0WIMVD zs~R$(RK9=ffREEq4Tn;(jfE6#UDlIt?YRl{iPdZSMmQE((5J0FsGfp<#iR)f`d?l4 z01k|2lXu4I}wJ`qg4`02N z?Q6bnrI2MV0jpDVIe2HGo|i16aUTMv=|reULHv)eX+IS+Vxd++%lR^7%o)^fn&R_R z^eqnsUM4k_=MJy+)2Tf;1B+dLj z-r%2}Jb5n@jyuv<$m%4`iYzm2#u^I46HRVK@l{sak@v-J1q}B1kn}SKOGA2ADjtlQ zIyroL(>7^rQXo?z+c#L?{+J$c=w=m7E?AD=iG`W&)o(6hbJsFE-F^0m2@y&afkz++ z$=it@+JxXGY3IWuscN|cUWI5qyGjCFJ!Ja1wWY}VcUZ9~{o!-8=Tl2EF!5u_Ll@bs z(9`_y6OcL+Q3?QrEh2wG|D8F0U#}4v6Q^H<8jyIE-s2dz!*R?8pY?dW%|uT0H&x=` zurZyH3AAaqu`9U#-~oS=F=$|eDJsCf`Sp^V%cscBXv|*8W&l;uJIQ}KBqLZq6Y~Vk zN`WkqpbS&+u-`0P!CU5+yfM?ifQFL)SKhe?Ogy4g87#~IOm_mGbj#;i)(eUtC5eA; z8t@dM44#&WUi`W>oF?01CUzg{FnqNdQ7&7YN#L?;zx7iuvkm}`oH06DMG~N*E4qZ4 zo`#(e6+JpM{MCDWyldbIpq?su%seFn6SnT$U`9OYIINVjMvo>ar2bIUv9DU`Qj=HF z2TDP|25D#DK3@;xdVMN?2EHm6+#Ft_Ja`Q)$F26ERflb;C%RFCDO>HfSuBaFpr~Bi z2=q0D2RRboK@FktYHF-rq4K&R8~Hoqbr!TY=be|2-Q|3!qk7Y>rL)aJ!q|oUi+)%b zCum*!>6+s)FB>NNKwxb;jd@FKlniz|zUdyB*>ZU{jvuKBVEr;g$I<^D zHC;Ffe;wz%lq^mpYO`E$N$kE}nn3N@-w?%Ra6^4IND~HqJh!I3_K&HEUbr1ASwns?XkX;~pf?(wP)P0w*AZYXZgwI>h+tXX@O z?zmYqzVX`c$k@JJCtkwHE59Q``8sua&fTfjeM;I!2l5owT5L8}gyRvfnYKM^y6l#V zlc&9`g~e}@S6avMs`4Tv9AJMN5V0<`x5$+dhO$KQ?2C#KkmZ*@0v;YQj#e-Yx<4>* zSFAIqIT)ZR(Z6w!v=4D|;BxVMiwThyvnMY7gPmS|3>j`H7Y|5@?3;)dA`pZ>G@fHWxakRQ^~z8VN!=8Od3 zy$%timz;~(>DKUM|0y2XUwJH?691EKyE{A6&>S3NwhIjh34H*P(4*>381H-b?zG4o z!skoCrzc~Ka^^5)kxrv2PflcteY?h(2EeHc-rKmb_sfV|mr zSMLq22ZNWt9b+!Luj;wOw?%C-krLeR=~sQzIuj}+Jzw9dTUu)iS6}oaQsbVpr4$%x$m>u}itsTRM4)rlqQ)+u%D*%k*MTl<>l^#6KU++p{1#u&FGjUIju}?(KOE!&5mo^Kqid{57`8rqHx5 zB7%^Q;Zg=-x>C7tIpO}b11jc#J1&QwfQTFt|DoF*V$WYxi-QQxy5igdlJ1jHu2max zVWQR8R;s~6r@^lC&0L4|Rm{5n6do|U-m&sltH2B$f4_t~P%PJkxsf=9ZMAokBU4AI zaFA=UK+m#2jz`3=NKeR2@E+PnAOd)~#1>V{Q*!5Ds?OJSJQQc4XHkLb8BfnqAba_k zLHYd;3F(uhq%|v;PsQ+ zJ2!Jf(RIOwZx7ki&|%9%;IASH8uKhSbq1M`h6Ex)8lBhU+4F%v34G$-wOcy1aJd=} z>1q$6Re1<#45Orcj~}spT&Ogoc$f7sNAG{QaUC`J{0F+cWTP~u#E4s4$Eoc};`Qa| zUt6*>Fe=yqvJJYi?R|V}7&FQlsSR2MqjZ)QhVerMBI%G`Wd3|5^KEUDY|VL)qG7NW zUg;_BW~d2_xcrxitOp!@1Lxz)1ZnAmEd*tKw2%KY4~`Jc%2QetA;Wq(iOb75K203Gy9~=3C84T;)?Q0ABvq3bPd)eoB$(6&QWUh zGvrKHl%FCu3RkGqrA`Uda}CgbT!UVA9eMRxy_*xt7--1IRY-^CI2V$GGKpvcgU@qK zE>4bL>Q_K%{8CfvrxL)13U5Z|GbUsMKHf#%A$wl$Jlj4UuVQJwtrxCxk2Q~QMtQ|b zUtks2D!Bq3sKLo|aBN}A5+cK)+CN-d zS$5KSU8%q377dfd^h4wzJ(|kYRv`dW%X4MPf58D|e^u8kxw0#6rjTf~e}-Aw+N=eM zl=<;q3ppdfT+BwHqzo6JQ?HrBLcK2!W)i|#B6Ff20*AmV>iWXWwVV!0{~xa2fw{7- zjTVi$V_Q46)3I&awr$%TTOGS&I~{gxb!;0qw{D$V=ly=ds#5$5_Sqx6)V@!7b2aH8Iw8Dm%PNV?2P&xd$~ioXVWhZTe&&%U5R` zut`u*mBrzVS0i9=gtpLAgpE7cRQr^C*TH#b1+nflE(B_W&)^+P&IxVZff)Aqvs+9F zgsW9pXbs%%nvjwJW_=3pkZXOUak9h+wq7)r;Ay%ysD%>+1)bSog?KrbSlUF7?&hMi z*DyEtA8Fn{XZN7t_h7UKIY4z5YN=&xR}A}&`8dHr2H=_wax@;fCq$S-6r`;1ON$bP z^PS%l0R-v0!bwmh22m2>NaCA=n?071!MX0ww9EdyyH!ln#RkI!7+TulV43GGHPUy$ zSx>|_fk18O%DS*KhfD579U44VF)fExjA-zI$gbRp^G#QSJytW1d!lsutMQ-6kMBHq z$DKTc^|IT1yof%}pcF!AVk4Te%|#A4%8>?{Cht{k2gH3n1O&vme&46x%^fd_=$dQQ zTiWeEpycB=;ybTEM^t$rz%-*NyJ|z;Dv-NTq6|TZu;U042{DZ$w?Gm5LP&0yK-hQn zgTbI`1UwRcOikY@sFA2}@E-26#vW7QAOh71Rb1}$p760qqXue;H&|tW*f1V%^O&~R zdtMOE>!Z54HnbFhWyKYxV) z6kDaAOqOirUn>uda03VDuQn6*NRUb2kp>7Ao%>RVWf=$|*b(o?|6cO;gTWps<*D?Z z#&G*wDD(ZGC&?K~Jtj{>0%eEND1h@waaOB2NA=LR{zGw;>h-{a^cWEUZAqm9`>lxq zYv8aI?hgV^OANkc-GfwpD)G5@c?;6WI+R~3<=Y~IwxNKD zaS^wpgSp+pK>TYXVywXX+Bhzm+i7eoixUx~!)~fY#Xy8`gf%MVk)r>r_fMIkYqP0I zCWoG3w(AAetX-si-Ka_5sCCn@N!zGX+qiYBfUc$d57g6!pySw9M2R2y1wdZuPBva6< zoZ}w6LnhdQ065}BA;r8IB&wXR<#ur_qtdd7zXp-u4J|J|c*?iSa&lw}N?4YE#wR8* zdz2`(lB@qV7Ze?aT?0ZULsXRslCwidg)zg6XlIb0W4Ye_p4}?n*0b^P+aD<-Y~wTPQY@)XKUX6)dzuEye^4h8VBOsc_Q0cIk7g*7KOFqcq-M5* z)|JbhEyC5oU`NrTo3lR+*o8YmXgr?{F;w1xPFotR@NHGq9IzFCw^}d1Gy(Wj@?4%h6s;9RT_K{s|T&=mQ8y+@A-tw^TV!g z3CL$DL@fLZlQ8jmxPpYl7x6SG!LzJ>ki?#1G^Fej@A>@?F0l z8%<&`z6GNj&4x%pe}_hh1ImFTSD*i`Zl1nwT32kB+CDsEvWW!t9wQQ-@6(am-X?r} zH7^s{>2I}3t>f}ltCfd{Da3^@Z;)vYAF-)rvpl2-eP^S=H9%>*$wuI5Xs-gYH2k$2h)S zLdyMRcX%^vs+-ylW|tynA50M7GrOd8ZH(%)F+N1AG9Odn zp%3PX_I-ydjqfb2wCAiA!D{B`K1A$Nu2&6sg={iYLllu?m?iH7Q-1Eh+gtXmGGpT4 z5m7bu6%0slY&Er_3ffe&@vp4Py#Sy~aTYH9hg{u#!hu1gUqg7Ub!10>ixo*$)>QG4 zss9>>jmTWy7aAP&?3$`6}!u0#8R)zc^n4y3<#K&Y|+F zD#Xy+rnF0Q!`3cll-8YOy``5@OGX^XncBht1Vc0X=Ja@FhL=->cDsjm_iA`DJjj_2 zrmxUWMk@_n!*0J7P6}gpEn854v|})NjkZ2zJVgH2h4<7>fhS50xOqxQEaB z%?x{EfNZy@C#NjIgaj8^S)O;9iT%n4!ebc7`(d4!N5CZzq^80p@}1y#Tk+4OVdSO% zUNqus%oDN1HiHk#d-q@Fie?9&eLvskJ6b2f(nNy3swpR~OiKSi`1jIxx807>##rqd z+ufKJkZL0=oK2;9OrtA?p*6^G6hK%Qgu}ff?!1A)#wo9x6|#rzu0k zyMkzk7{T>sK+E5^)!=un!{=Ad?ScyH)K1g`_lV9XkLTx2pRTumt!-XU;O^`6LzRN+ zaHibiMQ9}u=FKQr<)V_rDb(9B?MOPbSmh@Fq=wF8VvpdgWkiVCysVB#woE)M;R^Pz z0s>5T{_i#Tdk*u`KZ3-js|U`8JPlA#{`~Z1G0xvbexLOa@b1Xa5nt;w-9e4v~31DX(H`i6SKMw4N1TqY#tf{__U}vxfbo zWdKJsG8g@XoR3z6)4UWi1l-T=5Yp4&<&ezoF)LOdUR+33Bx7C95=2mQS!8Ppzy%ZF zm~Hk|GtsbBN@o|XQsm_|{Jkoe#$Hq;s;d^M7FTYUn2fJFPl}4$fLfhsBpN^IiGoST znuily_cIb3nqys(+Nz@S=5$UOkp_dKLatF3jsefUb4GgR{u{@Fz%tngNpb3;Q$A_6 z#LYWQ7`v^v*bxD%1BeVint#= zMvRI2KBhqZ>X+Y{wZD_Z_hxw43=Lv=rHS&76Ok?*RzU|rThLWaJnAKmllpgpYH%H( zx1qr}+{>l6lM#KP1YB3X<1=g18c&tcphL z-C|&(`IwHo=1XN7Rd^p)QH9-zHXl=#~(IHV09 ztSjoZG&1sM+U9h%>$5J=_B}}GClEjN9VQEKAR3+$6$i$)WyHn;kCU(FTGg(`&2LpJ zOB(bG2XXYefpxE0s$R5;fNV#A7uc-RHy{j=}i$4j~T`O#%`YE zOHB*m$gRV7H;noj*EXyzMN`{3SLjowm=O%5dk)NN1~B*JM}71%rl6UE^cuEb!WC=a z)QGl@0M>-ef-hZSv-5uZ8Q$hR>_HRPi&#^QArRLY9TpH4Z`9ODXO@5Ic?o+!BdGAD z%X6Fn{Al2WXP!Y$?!TD2-T{I4`LCg~zjuos0WXAKuV3#d0q+i9u@Tuk<#yl=)F@c= zs5MEPs9Nko3@rOf(W$(wrqQBh=7jbYZ6SewL-Qk8LcoN9urvKz*%I-n%Kshj6T^go ze?Kt&waa!h?m=vo*L#uDG^_)NK0iuS{ z{!wA0px9-VQr-V3c7)~B<`?nM&jV8S)wD_Zmk)Wibdl=`RTbr_L=KU?F9bo)*vo*@ zw+{+tB5rVD1Y{rP0Kt=e^BIR%J)Rv!Y(Iv{pH1cxU5 zS)E1}Ico5$`Y1RZ+{qU11XaxmnpAMj3FYwvITAVOT(l1Bqi1K8-j8WmEi9Vp+v=z> zIB~qdjzQ?)8ULs(>|?@E9{2(41e1a1Uj|K8%A*ffAY9KZXR@t|0g{;G+wx(cW|uh0 zELBA}m1xm!CZEYXpF(Ye-z4#HFuB57(4r4U4^D#7z_x~q(Zuae#fgVr#r|gz&a6RP zEp1i}QZEf;TuF@_mB3(k`v<;2oV3C_E19)v%RYlp)sO%@3$s`N;vaydPe6l2qPWlF+S*g)VTBu9M zl7JMnN!~A0i)KQw+{f2{S7?;jCNtrO5H0lmhcl_YCC$0tv=*ZbpB9M755d7J5s%Iy zW(OfG7yCYBv8p&~2XJUZn1z~~?t-sM$zqI}pUHE%zo0->D%~t2bJr1=WC7%BBBCRk z*3gBmfosB*s8S`>5>RpTm=xXRZ3-`%?Z(`Vi&s$<&4(K$=IuVW;CKod&cgM3k%52pL)Veec6RCZHIeVBsZIW3SDj&-(JNL# zY4Pq>x@gNYA1`=jyW$Nh3IY_vlqNt?^?mx0Siara)BYbVKt;gxHRcWg zyJOu7X%HK~4(B#Rp$j5yv$hip#*_6+q>x_HF!?1uZjo=?Ej|0*IkpFcw2QU-AD5Vl?hkRv#QuzYTwj>l#hSHi# zWvKpqM2uwYs;Uo_XKOR%VpgA*l6B{=9P-w5yn+!DG>fse@@Yp`ycnVobfw<5+F`}( z0+cp>|MsthY=3@UT^oGOe8nB$2z2-McRFnEZszw6UL;&8Z8<1yEK0`Eqy9Ei7grt~ z%SNvlL!S8d*%pKo2X4X9vV@38KPIpce9Y&w{tvCDi=UL}eL{0-QZUHszZ3cEi|&+_ z_O-XXJ124WbF|UVMlk9c`LU6#))}#zsA=~Gf%YbXSX^^BLf?3YNfWJ)dJ~%8!m-Q3 zJ6W|BX;64k#0ORW(4tM$W8-6H66v8lJr(Cukuyp!1;TAHpI{6F3=LcLh;|W_kkC=- zdslR=6zI_}Es=TY=yAr~Ne#9Ad6lZDZo4AVeMh#?1F?|gvUCQT9*T{KlkdR=G+`3J|ffx3q;!=b#7e2i5Xc?pTt-PMN#w)W~ir&4J)N-yIGPeSe8LBfdeIUyJ zPC)ceHD45^=B5KOZPfiO>O7aw(Bd5eIO1gEj%dCS^mW?;lwyIjE>xijt=WJrSeTxQ ziHx#7aQvsF>^#0!Hq)3PP0CbnQMHrLS0Fa){tJ06gT7H1b|s4L$r(0b{Anp2xDf2Y z2l-CPSSS&WzVV;@00H#hb(c%w48}dhq5(|$VP>)!8&+pVq6P(a{a`1aG{SG9O})Hs zAfa3Z8@Un&#`fB0AL)h%@S7g4@_5>`?1A9~t>KU+8SIzkt5}0U_)bG0kf5#*^u<`~pMgtw6?FSu!TWiB z^1J2~=_o)yp`yH(eyk8MQJ=@zZu8LSO22`{$~&|4fr>q!6Q5(lkuxL&9mCX$KgFGI z*1aW%f@YIxwl0&WxnXQU>lybx0S@zjsV-0T%XO3Sgae}b_60yz2*^n9UDp`*6a6xo zzG4pq;Wu+#bOPsi6uLzg^amzEg)TD-pSsFIVeik6s{pKQB0+ z9~=UvzI%-ypoYu(B8d16eR>Brr|8o-kyd&Pc6ZGoRU!Adg_T9e*3i9D$(e4v$O~b zHmk;HouBADx;)R$4`zLzy+&@E;Efu|uj&-L+0cqRD}(Gh4%g88tpNJwFPs*uYOhcc z=e$MFL+}QVlc^9RnV5B-JU#WUQfNa16*W)@X!HVC1*cb3puaZS1+Vp5Yo=$+3LgLM z)9~W|b)!`q_t87A4|4}MQm$ODuR{NB!DIBO;h(|#ce2jYEe~yozvl7vo)Lfm3bFfu z4a~_Y8AV-7+4a|3oaARVm__9)xfghWKC=X5rWiW znMwQZMl5A|YgERThq{R{-i4xApi_Oxk;oIqSj*@r?yRg1fK$JebLI&QFL85)qBk~2 zV_VYgK%u8uG}chlv|oNt7q(+L4%cSGHygcjWmYatnRA)6YX0!i#w zqyW}P-0h*|Pw=N_axdmm;`?{N)VYx!)0kGeQdP9-X{ z$fgQd<-vC$g%`ShH4$bYgIrzy5Q_ke3$f_5Ww#1im=bJ7zTXsQD{WxAC#!d-;7pDd z^=6Xn)PJpAfdqXPXPAiegChESbaZ}(=-)7g_zYd+Bs`IAJ{(?F?IFu(Kq61x1WLPES~%>=Sm^un(Eni>Cf zeEN#|dWvfBaVn#q(f4kwWB5=%@-an_9l9ORHh5vczx((>FRDaJPoAty{L4#!lQ+_-O;pT9K67TU!Vy{Ih6v%b% zIRVNPQtJ` zc|rVCd3YSs%^Qg6TMa`I|vM*A4&MX$D?B7TwgNPSs2J;R(|0${V`hBA)V-2B;S zuT%x2O>S8#9))SPZoIC(k_x#rjG4WOsWwj>g_>5IJdD_&E-4DJx)50mXSGqFgn+wh z*h$^6wG?>=IoCvMnyTg??Lc9F2yY{3mqTp$%qiX?x(ZR_h$^X{EU$(DZoIdv0MKuc2bgR5Z)?Eb7>usf*{FFQbJ-|%0LEC^g20iG%q#4Dx z6-J_f?KlF?0i`;}28Dob_AXN$sw&LJ3N+x zy&Qvn6oa0=e8knMO+4kY-@ts$FpeM4JxKH^5k=I?SnvSG5+4+8Wka=OBRlX?$79p) zN*|9~`c*^P^)Tj0v425Zyzy(Ci!V6WB>Fpph`;0mcqyINvW z={Vlc!k0XPa_$Q~E@ABqZBOw8xTA8+__*Ky((iuS%DpqOcwDHdI8t=q@hVHnefPzu z0Xu22m@&Bm_b7-s>6)qC#@WH==E|$L>~CUzHKVExA}l%96TqBVOe-VznGr)2y}%~* zvzoc=Ihsj>bR0jU_){9t!yLP8zpRu@%BSGys9+V&gD|9Y8u z{!I9eO`YOf@Qx{;%~*iN9J|iHNtijG{J%64|1UQ5@4^QEry6|s{Fs-7?nmN2MqNR1 zQJKvLgQ8CKXK-9CI|mLFvgVB+$4>#qHHtakQ9@(n7d?!jdOd6;StFW2q4cM!D@I($ z6L_b=CUmxH(sfLyXiVxcD&4e(Fx1(~L+Rx>nX8_6NynlFQMA5?YckgEHc8t~V;o9v zWnlMT?eTCXSo;i;c#y(&;U!P142aMqU}GM&w$}`FGD)Gy1v8{=ZzkLhLy)1)h90~R zP9HzWoWU`*h4bu>uMy*8m$?7}?_vBN!hso>NXJ`a*V>F9=&&VH#?v;|T`Z{&V+A)# ztNU?ArVDu$Mj+pcjfrWx9J%zU)k5&Q*QG{S>^7#6F@zU~L^d@w)~1y2xW z7Q!E$f(_74g0&9QdQnb{nCG(=yt;u*C>u>9l73WC+vuxR81b!0*WDCxqh#^KhLO%i z)b*C(vk8}+nKH?ySWv*qkQO-x|3*Nlyo-F-)vRFz=9zayQLPd?a2|uzCUAy9?9N8?1U*XAFbx|m;Gg8>d3_v2veZh;V?HRIE~19F z7nR3JM)6g0v{7!`BX(gUsxIQAqPn-tYDrzo_>z3qpCBA@V%L!zg_`+L*<*-rqVJH$QClqWvhb%ZbCoYH_}^fql$k58QNE{srR_oZ-f}N9bZj z7C~v%GS86Z%wT04Vo91!h(9c7(A`OF%E}^;Gw07Z`nd(YEaI=y4X?6|0w?Mj+&$ac zN0GKi%PluhOh`z*u!s3^J1<5u+?w_^DO-Q%`}0HeeZKFOp3uz;Yf)FQW$s=6^Eu~$ zJ1M1Agt@xu?{0%WCWQ7&7%pU2@Z+F+xY9)aXnD9C4;g?O-Pn1ml-WRvp~D~J_vME} z+uOV~zM}iOU)kQ`UqRy!F$^dPgW4rQsZ=QDp1!{qOt=l`yz#3{GpzSU3LwVeAThi8 zboGpdnBj%)RM#%{ikWuU2U+KO=S{tc{g;s$U}v-3F#6|Cb)8g3WnXKA z^?2_b;Lzh6^L5+%PR81Ic|~-6^ZhV$^Y-+7{{g+x+siZ8|3Ao2At4|H5PJj^>KJm+ zOiECZx;6a$nb3DRx}!)Dl_K0S=b8~Y%YD7#cn;F~rRe^rT`lwtxiBX5veGvK@^vO& zg!YUKHkiR`<74)+k_o9nx1$#+H-!Pk8M_MRei+;#hUsNHiAfhs2N6~FQkU#l^ZP9XP3*bR#d?|^6AgEj6wHtAetGPX}r^}X~l+@+liLl@6m|St9?H2 zMyx%>@O{P5oiz)rPuniG@)zef0e!#q@m4J_29!T_5EQozTE)Tbf_PB~x0R7ap)=%8 z9FiQBwKS@3!y{X!pX;L2bQ|@YD-c>VX?I4*gP2uK9!D(#xXx~7WF@F?J$T3m57>k&s4 zTfwlY&LYrf$MW>^GU5Z3MeRvN@%Pje7SmxQJ#(aYt0__ddLf*DH|p9&Y4GJ#cijt6%Om z^W_>5qsG89;&G3iZ@0)t0IT}Vyo5Ookw@2EJiz3HJzEjfVxA@@Jh9B<3=pc+DFKrt z3xYtXHAld#=o^0dPMto?P+&D*kd0EGUD9MEQcN_EQDL=IzreMH7hFK8eBfJ}LcO3H zc)y@EbOu%96Ql+cg^8@f5jFN=l@GNcv@oBfaGbYU*+v9u47={lLC)<(l$&wNGWqD~ z_ukw)pOZwgG7x3D2cFjcGkFg=!f?%r3{+d`#G6R;GU_f7HH7dhAW)+72|&7iwV&v} zQ`V?wf&lF>%SV=nqwNEGN7F`<`{5{pU(e|iM%1H*JB>c{qJF%~B63w}bbMLu!!Z^l zj0G`6;Xfo1C~^zexwikbv8a6)@$+O88ZYhUao$Cs+p8WT6*-+2RIH5Z42@#+)n z4fVL9z8Fupdw(DSxmO7!yCh={jnXPwF^v*)WW9(A=V4CO60UWGKfzLtz77s?Ha;*h zP!P2>pwc`JA5F4V@;oC$H!t&|xy88Uw?0gzm{<=UL{TE8gjQjVWLl-!7eZQfrMoSc zd_L{tIk6NSq?EGI5N@(~vN(ng-7@Wh2U0m+Sj;f~yf0V{(8o|zE8b4eNTN>)SOBw= z2upE-Zb_;;t^d#Y7A{*sr-XZySAS>fh4cBt;R}50J9D3uuIO*MJ9+d)Jgr~)1bq?J z@H^cgmPVbTLt6O+#wpFef8dlaHnrap7xRC1auf^a^CW&Fx-u7NteL`y=wB^R7AW=}1h(GD9Yj$PwJ@=Ew~5zo@t)vUp?& zODxF4{#eZyE#mTLZi)+BWNDWxMLVjHC+Jzd zPf5eLbckhuheyk^8Yrp_gYMnJN~tSTQZ&(BrqZ(iU|F5V&_tGXWbd#K7)EUKSg9l= z&?Hc`vBOb=EFr>0Re#oSSN}e9pe%m6Jsv(fEDhOS+wU=1%>3g9Zn(f+@4K>FS+S_D$|m!0Yh}+ThiOpD$5=qV^@d zk800?*GVYM2ng1R?@;H_gpt5g>pCCMofw6V7HG<_b}_$!BQ@AXHd1b8!~|Y^vSy(f zqtvT7y18B5yt`)`j_^OUpgzT($>LMQo|e>jJ~bh@bQ?^{IkXF`cZMTM_tkxasKa&H z{4eoDQy2jksVAo0*{|O;@j&i{sG%vBWU^n=Cx1Jhiv4CEHf7Lv0^$kgFPfG&x%3Sx zkbt7xk&@82fJ3tE&@y1%aCuIzMDoy*%Ag=29nyYZ)KXXDWA~|RUEDdk@9*Njxrj^P zwq`Al$%00={+cy3@%Mj8s7sIZJ1Gwo=e?M^BwJ3+pE?V6G2v@Kv=Zas)pyjd z1A%um|BpkDwh#ZP2f3gN_mB(cuqS~}NE8~~DXfgrAB|M~uBZ*!Vv7qan6$Rh^aeH- ztZ|DUxTn5QQ_5Nh#yG`hQnhd-v{LIK*T{z;)sF2;OfM^|J~GXZL^;0blztc9@gE1N zsyR>65EHf5^cgsk9w^V~GT`4&?z+Ot<-ZMfJ&6-c$uE;8{E91jXi@-=JAc&2kVMB2e+8(VliC&d+BV zUOn0_`8|BeLfV&zUi}+4uUwcM;Qt2j;jc}%{*eaZMq<9x8mo46Deb%Vzj$4R zN_f6@2^HD6{g(TsW-9kz?jfM}#+VR$Mkbz6=g12qd-MXRFkD!9SzmH9mart!d8$VG zIF;2o<@-7Mbku`3!z9Xg557E_M3m*%G|u$0Ym)(;9|jvPa)$qQj^k9R{WZq%K#V~4 zh{~PYm3YNU;OPf9I2rge-Hp%we(@KPnbIsh3p21~f%rjzVkJZc{hZLeK> zNoRO$RgyNve5AH<0mP0aKyN#mVf9jroFk>q6*|7557x_drNJ^@Ztq;97pxMuGn)r} zyJzqsLT3!DO?&b>Kj01HvWfuiRRdF#A$`SGS~8b(4wk|$bX(Uq%;C0o@V2P0R(H4X z6I0D4Lg4-G={eml% z68aSj{x-Ti)9d>kQ7K{oqQIbcqYa$ZH-F^|jcUN2N+`d6CRB*zBd5<7>6DU|Pc&aF zjl_b(YE1$pkVXfJ9_B_>EylWaMAr6(+pa-Vwjo27NeU)*j5!KTKiq!EV*ZECK^>7g zRSdl|RgT=Rv0pf%s3g;9Lsk&vaS~AdX!}Vd=BU^pzOf3d=TpIcXx z?W!C3*J=1)WywG48;4o&f9ZHWlkuZKTKV@wKg8_4;{+92awc6jBH?3PyK8I|-!1xL zq|YIK2xt6P|L0Guzg6!(!T|Dy@KFLVtm&qGMb8f_gF<>+)Qt|Wu|D|W5HyOQH5!f0 zO(}S4cw4T55G~vmT!WMHl(O3iYchg`D!S!vlVh8xY)7ereA-b*xIEsLktV?e3n1j+ zkSPqq>CTnn=^zNSk(nbj9Lurkmwa+6?dd#FEKjA8--5W9F6w84h+2|EI5P;RJNg>s zX}KJP@+|gE3l~SqXh8&1G9hAHVm!6s2l@bqkD!Q_6B(_`koWNb#Q_;;JLh!Vd!{67 zPJX`0%`leYl|z#67wnXGTzOKYO;DILQyj!J1uA`mqHAcxI@pB6w$a@%0)x2&vlQf> zXSixW)apLo5PiK)jS^k@%9gF;+pfacjKJRFx<bVd5fbTa08`Lw@n-miR+9TJ1Da9nH3!Axl$zN!XU_@sxl4(R#%skiS=D;1|V} zUqEu_NuDVBEpas^WSj!MkI5P-B2?tBq@M%ECohYQWO5V`AU3}+o?YW}~v+E~b6mY(4 z0i915y7F~i=lE^B&7GkR^}6??)uihRfS>r>pNRlOIr{K<M@D-gCp;|DhI&0B3EWHDZq1CitX*mJzeV-9U^h_dj94`o%sht&l+ z$Gu{RcVhfaCQE0U`#6E;4NAJ9+tCO(&gA4VDgyA$1O!pjP(-IJoFfos7F>i%n*&>5 z0hG}vmsiIBeCSc0{12iZmU=xuM`S;`v{R_z#%@V&dpMenF}E#c5aLK(aIV~id+dm3 zo&sG4v(TpM2uRM?&%=E7ZF4k-8t)^;zRVjiCww@oW$^mEHLWa?y2SGGYl(EDMVaQ=C6?LD>mJH-~ai znLtH5ND`K8xZe_Xe$KyL!`p*qhV{2CuLAla>JU1Gb*_c$x5JqMlrJ=Cp~g7%YrYOC z-3`c+WKq2GOjBt4QsKODiOE1!;ELz_Kf9>TH}c!ck#r0A969Aj;?-)x`qf$#i4iIf z{#-+jT%qKl6y(X!3_9nH?zh0oI%V1aqXp#wbcq(M9FKOZXu>c2<{-s1%v_2&J+Y1BWxr2WNk=XxwJ0ofHAleXg*j_^4U?hDq=i1y{2=8o~!Ts>N z@alef%Rc41xtCVVL0X^nDfVy_Fen`Q&x(C>MdsfCi0%D7IaL8SQGB{QSjqlZA)07o zkQKG>{okA#iih!5nfIJp8qW(YW5~l;=umyoC`d7Q4UhiQ>zH4ZP%9BPT=FwX(uM*W+E zTKc_u{~J?<^B=CqZDoAULO=20YK0{$SJw!6v~BmC^&kT0T5UH)sk4%pX4a%Ms6I!3 z@FYP}6QC8`PwHBCN~Yopf()b2MZ!rYWtDiV-6NEVLs&`Tp7<8NXg0X%Ie7Tc_AUWt z=oLVhH4d+k4Gy#^j2@?C72I8inDkB*W@H@$bEckPD$d#kqIGMyGCAd=87av>Y!>MP zAP(_W{f57Av5Dwk2qmBR18?Zxn~9vJl_*6=V6elX^6Ywt zLiFhva%&r;;jp!Hwrn7RHXp93v#*h)ye+2dwK*G>XU>&9>E!Mn7m>i}`E^6~`LLM~ zDe%n^or1;O!9xw1H~nB$7RAGXNuYo*+`ZY7&j(e zf&g`jb_Fl+^xZB)JnnofUPPyjq@4%18<$FiWc*%zlPS-8OyE?)w7buY4=())H~O*f z)2GDy?ztZSX2JKF@pwrRp-o(p-ZkIf=u}ODC+`@J z-H@+!16mv5Q9zmDzk6-3u_nm3)Zw$;ShMKIqMeJiqqw2kI^lPxyW_J_4wZp zvd8mSE)4^adpB)exgmyFCLl6#X^Axzk@n)Io4wve<@|MAbF*@BJNO6Mp ze_l+m4F`j!=8OTox9K&?y>;X}3v?(ua=fg=g};A(v2_Qjj`Sxl5?D9(G5X;XC4m_tR9yk}r z{sLu%UW1xNFjWH?I-M0v8_Fn!ZKUHcRI(8wE1g|HXpswYYiDNYRSFXw=90nUnn9}# zvmcZ6X=f&eVZ(C>aCyN@KLKp_eg!5F@$X)`hZuZ)I9$I@;hC<6uIpb)FW8NTADcoc zq+6p(jFCmfoGnXIxNOi9IreJr?i#_DnxIR?O*xPLivs+eAIk6OYe%~`nh5+0~}A~V?zgZ zO$H%jok6V^F&T}09}BJ;2x&4hZsM$gPQSSBcmm0t4T?3oY7~IQ2UvtM6rLZ(e|kXp zi?N93$Sa{E7>FPSo?HIp&R7uLG5mP;q(gSB&L_khonVw73bKgJBU(t}h?$ujWW@L- zEf*igX(gbH=XBg~c-)fbgiiy30#)Nq4i#~b7vDPIY3M2#DmZ%ngNo!Ka`cElhAi>+ zlsOWy^`gnd^nYLwB2x_$H(SGqd7 z2}qfCUxW}CN+4wS=_Poeu_g&_k!{X%`iGOvD6z)D}x6vc4( zTw!ZeePo{LRrFYh`dl!st8ghBF|?P3>@78;K`TX)+!mN=vn(o~oSKLVSVwIEjj?>H zq8p^%qN1CLFRLrGcH(^sg}P7$u}xexsQCgEdEp-@yTZbMkkX|}#XBY6c31V`LaS@zM+E)pv#}|k;pjXi>;Q@B6#QZv@6IBWY}S^OyZicJGsHdMu2w*wCjMAK z!Wa(QNF{< zox2X$_WgKj>-a>ya@hK3@O4kmzp=6R@V)pj9%|BI@=_dk(Q^1YG^}H;sOvyEM|X(x z=l+{=ohBY}&i)QTV6~O}-_bo(dT# z@nfd}1-)tTu^HGkFqunSxLOrE)b8EIb9FrZ)dUtee#h(+bMgj{1ZNN^0vQgT(Wxo8 z?RWENNs1B~b4r&E3fxY}{#bNNzh><()qyYODSpqd0BQWHW`Px2%C&nc`kG7FvaijX z3!4B29;U|?|6x$W($@2+L+Tp3=jxBD^~3AwuXf(vUcJz#`^3^Cv)0KzHA*1YiAl1F z4-H*WGk5*erM}Ct)={~v)y|XAdrs(En}zw7-9!UOCqE!!)Ma1szcdvyA>IumEH;x< zdDdC_^Wd>!HsOtlYnwI<_qo|-!pAkB^ao+mIF`}t!yUc9J;7#EB|qqPPkM9;AvSa} zx!xI4w*Z0P(v(U3!)(Oq*vVyxj&KKlSW;d=tPWj?g`s2PCdI{1GaDuukzw6~D_=J4 z+FSQ+tNdfidT=4N-f=mrd_{CUa9!NGd`lO#0)NdIePJT#8M$6`?Ki%NgE5X1YJQKc z_=lIzhte=ORIc5geGpZrE3d{Cu&c`N&US)hJx0S8{eO5s|yl3j~sq}E?+Rc@hTa)*SL6nRux3bY}m z5m~T8*sjRG3t;OPh>I=zL%xd(nwUCt6JX@zrIfIym}C4i<0IU}AZ4fH@B9ZFFL?79bdu&@jt_YsO-%M37o$5>_>ak%To!yR}>@t%7FtZqJvS z83cAUask1$MhT1lvI?WjDU?o*q%0Z-bnz0jW2-QY+Lar0E~hT4JT6o1F9&-QF>11u zG^%J)aE=v}j`|q^VVVpSHN!M!+n705zC3fbyceFEmJoXxcUJNj#2D2=PQpwhB&LN0 z6&@^(nWbb-)S5Ig350cF5<}S-CR&d=EE`FE0Vc-N;=XUZLB8B|rFTr$8qAd8Xe~_= z>I69l#U%vcBx6~kaZpxZ)FLivd{WB2I$dUsOQ)aUckk{UKqIB~+@?YId%^h!O{alW z{s+p6qyJ%1r>p%FEanB`?ggNgR{!^Tv5K?J_W{#m!iVjrXbI~irHW+f_3o%go7WfF z(jCt9H*VIO{LZVXbL!RK{rVXE+PNW|h?K;o6BB1qw$me%PT`B9Ooh9)_3~A1Ro$?UXI-jB0F-y@ZxKJD%nXu&sxT;odiaC zNQeIk_|p7$<(O--J|EGFWf+yeeOM1d7O9W)SdMz<87TEaqeQuI%=f zph3?60li*9#%nio)9iO*00MB04!lXW<7uJK$Kp|Xc`5nms&^eQk(GFJCLqv6mP%1{ zq1a72Po@k1iQ;oYZES350X98}(1b5F$;Bk5UzyQIB2$T=4QgtUPXuaI+ zxdP{S8?~V=4||kxF+Pa;#RPP`$1n@E=zcZ>M{%Ou1nLrS&||j`B{TxEm+(qiGD=*b zNufsCvSJIPs0F#uyDz1i*Xh5DF50!gI|V>+u3h8N%NAOuO_Qbix@?=pC z(3)@Xff=f;^mt**VsQ`ZcV2;!nQ{d}6qBY|d}1-!QYvKgD3~XqDR1Pk^nZ(lh1Ed%ab%-#3HB06MzD26z3AjWEhkeW>FmTF(>wHkSJzq zO8m$yGUK3*nwxt4J^OZ1JrFnC-ScxWizY~C`b6a?_8}%}k}SAyNAKA~^aq^wW>LkG zpqfxs4RZ$;$}L^LYZghqY+@NovMQ;36b>oL@4)vxR8CG#O-2n(I%nNUMr>PBcHr%e zWD$(@vZ2Q13KsDH;p(e`>I#5l@!;;x<)Xpe-QC??gIjRFTwH=X1a}YaF2NzVy9E#S z*nPWIyZd)O&d;e+Q!~@u)16`U>aUsBVy0KNK2y5ep*|%H_~ELTSu2TA=dE^a?IL{_d49xOX zA_-6w$;9Vj6o>+-ZVtYLKny%MR#KY{JTe+ea73SmyT*(d05+M6=5$VfsT~mt<-94G zv1(*zxY{bBifUe86Qs>^R>y;R`bBsnz*cl=dCF%aR}6{C6vfS@TwDjU6`w2AXBv`UAt?BLW;f z9|E-Gm2-LfXU}9_4g&9iIsnIncOomtn6RczG|6csPK9Q$m97ZG^6xcF5B1#w`j|1V zMKQZE1~5Y=#e^|MG8UJ9oCNX)K6&W)8vz4_O4vb(XIXm2EI>)EX0TMdp{Qi049d6! zacz;NX@8lV%IXCBmLsCj91H_ZVZk7%KZzlU;p@=J*E9Sh+bmVZ%zs?=m0?Q70qNm1 ze{VV|1xt7TsNTR)m0@{>1Ip>hnxRpGT&$7tm;G3?cm;w5;Cg2VH}LxhqZOX~>%T0g z>$O{Z1Eo?p0orAoOvV>6i(j&bNCc+8HVYYu;Mp+nSaD~G=f`3)P**u3V^HR$8pAZA zTD~Qc_gi)c(dwOFwtA~TDbD}(kRXNE5H(EWY!wDmxihdBqT{ofXRtth%=v*!;5_%W z=Qtc7l+HtSXdSXJZ*AhlmfN;A_e_08*1(p-{;5NBm5g+3Q*mZisX!O-iF*W(^6yc^ zj+WjQqrII-Wt}c#vBk_j9M>;2QiIQxNRzng2Flh9yv>F+*Ec()jjPzYd?QXq{-N5* z+D{YDk54yy(i&Nq_5yX2%hXZzs|@w_Eu-pC3~CBTymE%yBWRzlGVamKY0FO=S0AVy z8>qqoPoT3i`KlIv8%^)i)gkihPrmA)rIT?yi92GuOCZ}8ty@ZV*qHR21IUU z;u_%ZFEp0{!o+8#A3e{goo#s$f?JRJiCCdE? zrqk!do)AyiZ2mA`+@J! zo8gZ4Ws6K%XBWMULG5Dt4p0 zRZN{E>W0*BkHN943LD*aI!qW9Ac%&w5e{q??bs=vtRZE_5SL&<_rmh~o2O>@MjUbx zk;DZ|wct@5x#|Y3(w&=Yt)okzZ*&OewJo^441K1yzKSqq4O~T)U`60-S+xvncZnAC zw+lEc0U;t5K+o1k4NjE8a9=B2ubDl!<3d#~;blsqE77*tH#E`LYWukL73fq2P2-N1 z@}|_FjFQ7P2(+3cNoX#ON`#GB43qa7Op{wo=A&l(IMZ~Vl(>&MP%UAs=x04J@Wf9V9wo$W;BU7*}LEBQc)j}XX$;r$jP*oEoxz+ zTXF9tzpn6&*Ho{Wk;vvM6LaF~J;dGzD2f_dKP8sHM=ox5lC49T4rV4;j?7swXv!rC*u#>{jxBJrt_i`V$%LT)%A zi5p?>1~54@I?v$bIQ*DE31Y6M=Obbf7clQI+D9rL!v`Lrt5q`~9+8so6{5m%>%mG> ze+h>_LWYG{y_%HjjMCDX*?2H`8wQc9Z9`5`{V+RTk_dQ7Y|@%h88VMg+@NvTNdS4fx3A(r@Py zE0X6TR-|4eedV($x+z4j5l3jFh=(>);9#U_fz_N&59@X`X8`6#7m`PA_t=Y}933t;4gx(X|ayn^i!-h(# zjTm4k$zEu2p_h@5y%Z>j;%=uO{BDe~jIO8vsM2y3rZy2%Ww|2c9A!JN8O==oDsfvT z{4FNIYC&AE^cmOXbn~gGRY;(SrT&dZM4Py4#uu4;bY5Mb&K$$~^aHXfJ$80U<`x^v z37_X-(Ae1Z{GPDYH z`%J8xNt8NRVhTZr$NL)|JL?PWhFK}SgJw7%wJZblc6LsVd8X1YwBvOdDwe!yXJaOU zNs8L2bYO~=dbTtH017uVf+~m5Qh-IKy`oI+_4-nufs8ACX6AZod8-bATb6f`;UlGh znzycG$brk0w`yXHirOr0!zx-QDRLB9an{V^wioWA+9pB49D5Ni@6v5BgX6rIaYa2e z&Zl`pM1g&B8rNpFq0dW+O>=#QH{fvycDyjC45q>{y23UlO-jhR@xxVnH}b{HM%yI* z$FsxuGTWGcvrF{LZrnksQQ#Y{@cT8XFq1GVONw&I$}u7={jmX5xybA}iz1;60m1xT zJLD*Is71~S2k&mQhR;#DP?X5yZO+TKB(slSC)Dfl58-R*>nJ<^HAl?i~U~WdA;URO0W|`lM>*Jh3>K(RJ5f6?A;6o|lFFQgjqWT#uvF3E zuxw#dKs#fuinlF2=_ThsU3w~o7x+Xlt_c!}S{v4=lPpq;e|RNfBA3SWJq*bo{;?=n zcp4T$Z^gd_oVL_6`}bKOYMWN*D|IgI4_()V(Ax)1CP!m}h4h6?w3l>JF>Qc_g~9Z6 zwrgTGV zvBY2QQ`Jz+66#wm2ufp&slxIlTqY@79m~IGPs>jVA#^khmaG@jw{-sCP}FG$A-9oy zrIcg)4BGpFPqJ^DYfZNc+!5MH<)$@`?XWR5mE32P@RM(2-=6}tmsxTYwZ;_!Tt z79-ejDC%ML97;QlK#T0EUY}EDbWcn(eG*fF#x0u$$`PP)3ikt$Dj%gOYDAcs0X(o8 zAs?vuvpwMTNw9le)urc|^5vMZ3Ys@wa>!Bu+>|H_Tmn49Z=>Z6dIG`=8R`tL>b@?i zR1!KC)$7%ypVXyKyL`UwQpc|Uni?!G2E+ZR@^FcDo2=6x$7D2p2=dP z#!7TZ`cH~&)WgMpY))tGB0`tA!!fwyg~*{2g|OfgWOK%uTD{BBA=@7YB;zM4c&|I9 z+OKbH0>&^6db-52scBnqJZW`WPMQD1oibnyLQ0%mIfW(tM=>w>dt`%}H!YV8jT^0O zl0H--RH&tU^&kg4C=o}x1+Wo<-$&J>eesQ18Aq$@%fvrj(eq@i1v<|Nd5Or0FY0|p z{7>>@q^y{{sgL|{42=R@e;wMP%_pnOeJpA=L;>7>i>QOYH`=>lgYzQfmm<#_F9`;x zJ-?*mNAAP==AFIyZWO%4m}9t~q5X>sqHDDvZ*E1Shs_^AASchhjT~_~zUq2Fg}Cl| z9KnzEJ=$?zijDScQz*&qK@UZ8@JPz6SbVYwsn9wUM`KVJ8JF1F;~eC-pM`)>*A zzBawwChj>34;mXty%4ubc4mt#%NH_v26%nfDQMOTahyWnMU7;lSvNzz;lZvTp@vsiFw8EfKo)YXx>uHY}lYP>evD z@Nz!EDc6T$@{D4c63{M;Y$}B!`@Yy7fGUI1Ovwh0Hq0)=U&K{Su^x(Ns;aDxge?`6 zj4&51|DeDk0v${N1pwq1nneUJq4-+TCf%w^Em^m&i5$aQYMM6d6;+LY#CJWKBvHyX z4(9qEYy2c{_x)~dP383PEp4C9HD3BX;$@?lz}Cx`Yk%^xNramNkxDJh-RsZeFJB1@ zEi1?}rhHcR_YBA%^eK_!epkL%E}oL9)-rS0whTTWt20_GXPoVXRk<<}F+=VVL+)nm z}u)|-T&4COz$G2kui}c!N;oFctqYGASqA=@7cz|vsNr({@@yI$i5g;(5!dF zWE7q@E0!J^{R@==A!n-6fTNBg`5>!1EV{i49C&pPj$X$5L@8vzyh$1+Mw5xux66a-Kx2vK??Cxo^)~k4Z-E09Sx3%k-HR_tO z3W}*(C^T_Y5?05V&=2Kvuh~jsT91@Uv=%tx=sT-Ee6jS=v9CfdZB$pV=KY`yht-u5 z^nJkG@_S*_%HoW}mG-{7#=wT1fUAqO>plrq{dSixyz=?Ba@;>#EIxH&zPFpWm`c^ZqBe(S-tWiq# zz%iIlh{$72bQqR)RuC)arwjqZjASh%Ny)vQhC#ixkIZumUn zOOEt|mYIq-z7uR1#jdmPi{VKKk&PPOUVY$9f(+M5KPoS-rY)n7L--$nIna+~n`uJWyL{49{ zcrK#lSZodBqm^!Cj|N-&R%?3tb%tL}y3fNer=1V>%~gt0b8AB zF$9qaKatLNOND#OYi(j+WL#7uS5=9VOa=qaGd4#52#qurkjhp~3y6$appgvLWrO84 zvPF!E2Ua5M0pMgP3b?(y{6d%SdE>?cY+0Kr_HDtdvlxd`g&EDr@3pndD@_tf3h0Tj z%NF3PZr@LudO6)VHJx}WaT;+_376IitED7UX_}`tw5pAIjJ3fh($y`B+sU~8PY6`+ zQ6r@cmZ${OK}r8-IQf4FSZQO)&4cf)U`aG-D^V()Xu0=pR@0t_w0nBTZlI}KX|u!) zKQ%;)LuFVW)tNt;+aPx!1{Fu=dSkQ#lK1Mi_G2L zlg0Rn5XFT&%=VoD|DfiFu#K~Yq_F)F#F@-0#A4yjN(S%macXMQ+S>KZe12I-q9k&t zVzR1A?c*L~T1ZChMaC!2lgo9iiHM8Wu|nF#Ujx)iW@@r^JsZEg=mr+MGpWk9d? zjYu=V{At)nPN${&UY60()+J(!TzG|vdCAvuH119#<;Sf-X5{Yz>b?GjnnRq zVNggv_E$kaq``PBm;s;Md_azV_AA!;%F{A~DuWM8)z1`X?Iq~oJtkwB2Li%?`e97m z{)*R5sGos~E6X5a6S3i^W%nN9k8XnBw*i7SpBK(JzVCCd5%zbcY(X_x*I}Q0La+a_ zHQhn%@~hB>wD&nv79+BuI+!?|>l`UxNMrAY(3;K1b*OgcLFxAKr)byCucvp;m^f=w zH|-#dD2kRNP647^c~;bMQhXGOFPuYMc&07bA}`!8)Ql@@#a%^kzRhniM*p)+#Wu7r zs8}vvB1iqFuey(#m*I|~&yhSgpSL}p2BzkDG=v>}`{TE>sP_G1#vop^h-gPlhlkF6 z_WvX}m04Eh{DAd@?_g8iw^D=&KZm6Gy!q;<K$jsXW$kl@bvS4QObKMdy$LXJvAm-!tVH4);eRaE*u(Rq3UZ(5dbIHxnM|Q8@{1+oW_A!QcD&VF-WYNzky87`Iph|-ytgKXlzeIV#`ip zt{kyU3-9YEr0JP_0IYZ^4sMS(sPdl5uBc8lGTEV@fDT)ttDIYB(H?HW{wD)SDC3Wk zZ3A!|%kOdEkvwN&KpO4(dPjT`Ob{=dYgL7tST8ChUv)6|eTe+ybq6^R<0JDT-l%8Q zWJxNW18U+7cgP}kzLC9H7kSMux0hwqK4B0c*~Tfi=dcR?Pl@sC>5Man@UU4+uS0il zQKUqw#)<}2Xpth4P%jn%`|9n$qjEvm;2v|g7+(fuA_6cGp{2QF?I0C=;^BnV4fMaN`}9UhikUJ^;}0I4f&= z>+0FO=knW>wUG}OE%yWa(ttf_uht*riL>hBh?7yi>b&+Zvj;YtRuj8pSib`Z2L7Qf zztv|B)TVw}8;PX|2zYVGOn3MtiLv%odz~X!1wSydaJdsa^K{muZsM>iPa?oG{4amt zOKD!X3rzTm{|w^{>X~PmN8TgQ>j#FJ4yQB=N_GVP`S`GBbde%qGG&oy^I3!S#z?%h z8QOWlf>a|>BG0gO$N5iE@&9g{|It5Bjn3lbf?e>gUsOg+A?Ab)?|Y_%|EBjCEAPyx zuR-RM)hb*?b%~qAv`asF(ve|DKHb7Hg{^x9$F++i+(u{LV;j1EhxmHhreQHtnv4h@aoUWUKC9oJj*5W^Dh-JsfkI{INx_lqwqk5OA%= z&C~(j^DBH;wW_0CwzfSsLb&eC&Gr+%u@Ik_(kP=Yvm|2j5VCGIc^0-!13YKfW(z1* zOpKy$aIp!oz3~@${`OVN4wE000?`=su(_NM`n;?e{}G)wpaBAqNZ{PJmvZx4&Ju^A zLVSZ$MK8aoGujqh9Gzi6I}s7uW!tT)5W&N!PnRK+|I;0F5ul7TYKy4a$~4iRKgy0j z&Rwo=kjuSZ-pxf-GK5?jjb-nKBdY*Y>UsT2EAz70NOpa=%V?ymbnWZ{P9B1C5$FSe zowJ$YqFH6)l*s$#N%KS`FuHx8VP?{n+sM}n{X&M^!Hq+#hw~x)?{t9$v_KZwl_Ui( zDBJJD1dPna-M#mVoRw76$x!XNV)UT`c`T-4Od=lwzY> z&_pR@2vIO6O)iYzMj? zxV83vf1t`0@QFC!{wk3SQ)-Qt&QZ3^sRDhl#_8_JbMC8K%fpd1oKuzAUNZ1q*Niob zoyUM#8#JF@-A6rwcD-o)&S~w0EgLK_!^>~sAGU(2+~1Gb2dD5!@h-%)C`48JQtK11;6 zB63Mcay6FFKO*0)eG|kZst@I4RFw}fXP1aU;j-fUacf;n2@ey`O2Ur)q${P35=Z9q`Y_y!UTWr4<3!xk9#h4bE;nN#z}YqvYO-6I~>97E)@6q0J54`QlR#l z#1M$KZVlbBtGhW)5TQYE`(=Ts+t%Vk@Nmfj%xGd5qv5mW?N7GZo45ah(D~Oga&Sdo zNu5?IKk&mux*jQTW=%`>Zg1XN9$y+g_VV^Tt=Vz!;A1m&WVrYZmBQWkqU>vn*vN?c zYVIfRBYQd)ZDfeU6Z1djRWbj;f5|NgglQi;Pvue39n4n)B^^#bNBraFa|YV4R3h1F zFWG8eNan@QoShv!89Ag3bD?GG*=q&kr5e|sCv$(SG@l;}&bGd5y*CTg(cZG-&|i$XZ7 z@)uVBSmr2@lE7#g8GdPWHX$j0v8+wLYvAA@DfDOH{o*oD7FnYt#(`uzs+INYl z;{X=vik~}LF8i#zM>+4Yz!VnHHd4v3pxj>JQz}xzn@s8`4Ks*X0xYB-2Ihz;AZp+( zR|U1Tbw*$kHlI5CxyCT}{KK>^wFd1%X)_xY*bzxcMvOW1;i@)m6hc%w+mIO=^z{96 zja{_}NluPrVUKF9t+|=Hb-MX}8*wWl0eJS7cke$j;PLYkW z3QV?gn1JW;x{;xMpu zMxeTU6)&~l&ZM#y!ZaJaGxO}KwzVn#w7(kT*}72Xro&&QaDT^65M6l{MVh$twxh}c zwChOzob$h!bI-G?vf{37;7eP39W47AyqxEIhAw*%Nqu`UqLA`J+`G+s^=g3z-isi{ zYTuA--AYwG3T1s&j<|c&U-qk)lmg}0wvM{zC9(zt;;#!W+j(W#{QBPXBE6+O@;H}u z*3oQwr5P#KhqhL8+4}xpi{gK6j4u^v4GHZhFE0Tecy%x6+W}{2>!I6sJ3pxF+#W}6 zJF1Wa0+7munLI{y{I5r>%kMGl*A4kk7lxL+Ny$Y#OS?340dT?(7#uPa0FmkQH76*2 zie~@P>$0!j$ldT?WOSG5c-6GhtNk>nS%?#N2KqC8Uq2I0L+6lsNjM)TN_+Kuv*-cN zau8Y>nu+D?SqK*L(;}X5kp%LEh{{!bvDHb0EM@_SVH35)H1zsMZyCOWtWf;kaDFoo zq0W~GKqC4usfF4Kg^$L2Q)1HrPgAQsY1et<)KSMu91EvyEcLX++#N4O3gA5!gX&D7 z+7Wav`TBseMw1B#>=IF8uIrE$O)_U^T16dLg`e4lPb^Z^RN9ks`dN3a8vw@4UjI3LhEp7*8Jqthb>n#e=I)&Ne8$`- zD!VECMNmu32f1OZ21|DL0>=`MpwwFuFO3Xb7W=7v6 zK|uLF9R{zv%=>roi8z*3xyyd9kwzw??`7q{^$v_MTS+pq-HHzz^q8tCWNcnMx`H<@ zsAayMn&o1Xx$(SVM7ne3rK=2dRNM;vC_z)ca&qerjO_CF*$aykZLa^>*jCT%*?OkM z8Sa!$KvssIiTO(ZtrGakbGgpn8uH7j;XYTzN84)bWl3m`&d(m{|AROGS(6JRHQr?S zA3_A^{a3SBRUd+BG<`A*Y6W>Rrsd!=BpZvP+vvvXl%eV%m{T%xYSqw^|VpRZDFc9yZ^sT}zQ-y(%SGe|L{ zrZcfk`^b#q34?ju$mtNXy4UE_Q~eEt;y7_SksFc$DmsI380EN0pj9iV)7aw67i2&| zRa}JluTNO=xTFaYT>@kY2FfK4^6m4V$>6nassesMq=oY9yc#Oy`CIqV%O zIOF1Rc`}usyj2o|a0wc0BPhqM)2Dqtn3jnQ9OYgQN62nYr+EJg4EV80D)l<0n7RDJ zUp_i|niBSp7Wfyu2%U9+=~#)OcnG>BPA=@d(Kg90CrMlsF)S>(HiD zx@oB8M+I@6=4XP%N0+BEqj+PX(kw$;s&DpBTAy}CXl&LH1K+p&>z zB(JP%_vmb^1r+=&2Mp05Qk4kt_6(bO9NOuimM%bl|5}n`a6zGy04QePvK|%>_6L6> zMS~#aPuRD4zbY)l*@lX+GJd|m(~?8;PXT;=_!35VJ$KTh@=uQB?^%mfhiIe_d*2*J zM0v0(a=xzOI=bx9m-_y>Q4t?{XWRP~_1K{f?t)b$ z3r%~7qC@27Spuj!08`v+MVZYv@WdU|R{?5iQfH}!cB!ZeqUxfE~6)C6vzw zJI9yon7b7fQ1v}37C*eYM2^1$`qW*V+!B|7Q(dhp*z=GQw!xx^u zTxjW7l&M^~Oq#%8hKV21Xlzmc>u$vN_=I<`ucl(_tUFqt@D&@$N_p#wbAHD4r3Vk! z)vR4U$zq-eeXB10UCHGz47avxV;?V0&anETqRqFcN870g!79%aqeIu{gAn1cJBF4A z)%=4>hlJB9i1!|m7HjQEeag;AoBI@9<-_vRY1A0M{RWb!n zAl^V~l!jeqoQ7OPU5~{LjDQ`yEL9K-6fQw^zT{ehvs^6Hw|8$+ED?`J1f+sWdH-@q z1K70`5*V>}e8*)jdrK1=MVBz6E&AFq0h$`xMdfxa&~lAXE=hW~41h8OAdN|0@2kfU zwme@J1#d3JjH^N^J2n4O77BEuMWd$cau14`FjHItW&f#J)Wot=MlbCgg3_V%dZN;J z%;FIE2g#XhX!Myq=*oE4ThAoGHB3PgG>84{Ag99CPCH_udM9#uGnR(@K7ZR@&3k0V zr7r?xt&w%aBpLX(t=~2q(j-67&(4?HIe3$-!W-uaD9{BIK4WAaT^B-rE z4pzpQJcQ3=u49Erwbqq%R1f{XPns2CYns_V)i{Y-Ht8o26?IuNZftHKjHjo`HTz#W zI8pfU8ER{s_bKM;?Z%-?_`9IGC)SBOaibo49vlosTlnv{y!Z%(3BCB@z<%HZzQa94 zn;pihuo)@mkzC?2N}bpS$#i6qo>M(7r{Bhii;te__3E}N*FtXx=$@r9iB${u45fpk z-{tOLq!!k2F6bGnP!gagV$q_wLX(y%_tPhT5AZz_UDt3Wb#Rb&nhSq!ua`n@bFjSK z%y&H91m4hlzmR>E6E1TBBU9LvY~nEvwGUj6oqNyT0dL=X2H?&^$CE?NX8azQtXFa= zBqi4W>HROj(?9NB6lQpZzE>abDehd^@(qh2;!-P5oc1|&W2i>@j;0cNqcgA1#Ut;T z*h~Mt`f8J}ng9dED%!p6pz+E8d{WCiaEsx67@{&lTk1|yG5koIoTUt00Y?S-X5~O$ zrch&w``y5&opIJk4gf{6EflyC{ss@U5Y_z&Y2W3v)Jv$w@lC`UH7E>izCOvW*8EAC z9t%5>MB7)DZX%RXK#Mrd48n@4D97+()PHa&yX`j8KchD9xISr|KY^*%(lIiwYgXjS zbA@ML8^=e2!-B}PWQ7S&oH4ht{XP34VV8nZF$L$b`0&v_4d>6^TMg@Kk=-b|x4?n3 z#-L~oi^v)-i%E<4y#)Psnq{^RjDvNvGJAGWGVsTtZhWcAK&M7KT@la0a-k_@dDbQU z8K{_nZo_1kq8Sd1N>qFDEaWi{37HMIVWp#{O+qNl7#z#P$tFQhWCLQ-dPI6Q21ikq z+|oTexp?#HhZ79!W7tw_ZDbrCf%T7Y>7N?BPrGJp(@E*A~c_2wnm<`EKF3CDt^B7BNGS&WU ztjMQkTLB^dEPukcB>zM4WC1;-i*u)J^X@m%0oGt3h_sn!HA$&!fKk(It8o)rKM7EP zA&E^yKb9dM^t6!xEf6m$$&pyvH7O`9*vNFvw%E9bM%VPIC65=1ZDj1b zV(E~F`}NZViH{EDYuvE`S?Al3(&4N~_e*C`J$vFtc3=NkZEbV6y>}ocfh3HC`iO!} zvE%TAIx&pW&RJq}X5q+EA-(G_f(A5Xhqj(TL!Pe{tQkAMaJ7uDIeTl$2sfex zA%UJD%nw^x0sXNj&`x%T!xt7?f~GHm6vK%6Y4Y_yE-1cn?4D2CFV{@;Sa~wEeM9>D zhu^amE_KVvA9d^PzqYUc-$zbMm%dnzdIi#zh(DKdG8-p4bYdo2z?hGPL1o!&vWhe`I>gIWZwv_(+BB> zDwAYZ=a1!TVQU>xx4fA&q-%p8MH}UE_e8<8M3sves=f7w>IOA?i-4;e445)elmCtl z=vNr3n#+IoAa+&=?g_bwqU{LH!-7!sfq=Ot3OyU&w(Byz&Vx{eI#7;}bNTMqzwss= zO{p*@RIdIRY`|eSLJtF_Zk*TC|NLz4_k4io?d&&x_OIWQGFhynk%1$s(*yQpY4T|5#7p5>4Hny*~Kt0MaE$(wRYl$G*wh z4@#(EtQap62L*H)u+8?CwKl*&IevbJ<4*&4Xvjf`p}F$^)&e9UAOmDHtU{Y90epb5 z4*}bPs%P|+IvZg@rzio+7r;(+eicfxWRAXj2{j{1e9z%z3U2jj9S5gelY-0-#W9Kl zoJ78Mu|>zh@0%BN&doe@XKO8x?5{17bK@?^i_EgmBB)|pDVdRc ztIgz`@r#^8=ng(cg&@Y)H$i3nkVuA0lNmC#?}@tr*Fnj=WYG^E<4_r;hNdL06r$Ah zgI@@~owL?q6MN{X1;@*!#xxC>jbE46s8K{Wzv;hv@gs*_PbF>?WT8R4`i{IPy*v-A z9vI!9-%sD>&<#tdIn6I_8A>4*2wDsp5)q@uiK;M0{!hSS;mcwUWAQ&St5$cJfEdIL zi%)5b_f|OBUou>?oB307I>CqV)PVEn6)96e6DOGb6`w zhdggv@7meYz1+&(;osjG=h_Ff(a2oh#R_5U5_fs{FFS^RG?oJVNI>1b7Ks!R5l>O(&Cb4wwAu_49)7{0lU0JRoMpEkPy5&aIrzCji|*c%brv4u z3{>e)_hMQ}N?0DNPOaZr5sg9>Ws-?UJc8c`Q>E626v=ZuAxbEXbOc3H^_aNB`1*Ls z3P)M(>J$gKF&+4itJd{7>W( z9%Ve7{G}RjbZfOrUj~q3)i+@k+kH9%wir%izNZ7Hy>b8!dA2IiMt7BotoM_;ik2#nx2vp6AGfRcz*0EoFz!o#Hwz zIjNSz<=AGVpX#{Gg&fLosevyN_%=QB2$ChRY?AAS5^(_dGLDcB5)G)qi?|j}!&=gE z`4m)&??H>i5@3v6r3}XVk}!cx9*;2*otVk!^RuqS?7)Kr!-Xo1ldSCo0Wu?Jd|1#E8zYnAfEb=B89{TQ7b(r^uEn)Mjfq%FmTddQ>m~`qJ}5r;S+=DMpUdi} z-yI^b0w=I8CGc^i>dX&SxJsyCd4|M}c>ZaV>M~6q-%I4*AC!RiU#GV`0m8<wTy8P)Y&!bp2wH70~=rFs|t$Ur=>!Ky4pvpi!s)XC+kLPGD-XgqU>v(=sl!AgELHhd7KMr2s zO4Y3mi8i$cNx-wWEQS(pcaKAy#%wvW?Z4$=|#zCD> z3_q|7EwQVwS}IczkC19JFgD-NqFsZl-RvH&pf8}zKd~duVOW_11qC$R%hB*!oqRgq zn@^Mgl`@Lc6|K{iN@Q+f5K-c}=~(_2_d2kN6W+=tmSSRmG1Rt6C8BPyc~AKe;gTz9 z43M6Kg}@pza}$Ay)Oc~g`%{{7)0gvaP%r(aPkOsWG&|@Au9O`$YOzMPw&*RnhvbMv zjG3e)4Ts%EK{5G5jnpC+t-YGONHHC)oMKlHKaGGf02SR zgjQ%Y*O;=0S6%hsHn*pG>5?YeSmV?c>m@}>@!G4-0u`G`!X{&;gtM11FwjxtXf9k6 z=6Q)d#0x_9A62Kk4ZjE=$JDgWbzLDC-l2D$TpWc>-2L_oi)P#iLC^iYT3gmp9=dYN zd&49Y@bR9w2k`{0j?7Ees?SBhjoUGf&d<<}CbFd$u}W?H;BW2X)VUmct|fVzgFX6B zQ0RU>4eLKOPXApRdHPY-wW-_zrW+5T){oK`FDt{u$X1<4wnCb36n}L5<&erlUz9fI z2ww_p9t|UJd(TlX=hq(A{4tSQbCDif)7MTfqgU%gR{sW%pq^}R3I7)C3ZEes(D~Q1 znYMY}0y|%Dh=2~AMzSM#Jhn8#0}H3iz3Ob{=+{;!s=cUVXt4eKxV1ACFTDG2NZ#-9 z;;P^jvwlv7KRkT0P6mnC(oJCx$|sv5)%GgKiP36Th$UO(0#sEQh5{Kc5yl|*0;Jrk?B^(>@j=RBAGEDuT@9jJ zStylKAa|YWB^z32V$2DT@b(&2O-?>ZEpZZk`~#neXNTc&I6|-S=ih4iJiO*k0dSMI zba7VFJh{v&KwhA)w}*_kzIht7$o6yQ<5xNkhB3+lo*;vJKRn2j^}cCxr!V5XFiI|F zp_@N0GLMctudySo&{Q1zp-ZX{FoRbtJ|w=>Ks&-7A>waNx7b$O6&ZCVwji>#__ zhMlJSt#EMCBYIuRQM(bUb(Rsb6xZ0U5P}&?X!l^WazqEeyv-N?AwL1RFCK#q9$N>1LDfh8i6cN*aM|+un^?C4f;dw+MZ_sXr z_IeI`ebp>KxwMHxnVu)u?thVik#4XW^;6pUIT-SlDk1n7CuZctuPc{`5R;BxHU3WU z6SE{7hyvGeLc#CcKeQ@jcvmdQy)>!wQ}~=k}zoe+`5>-V@$T9uJT-`AZ5(B`zXBFNf;5eZV` z1t%WdZY#}(TLSia_6}l6_pZId#9<=PSR>WD38cwcc`LtQMkzyEqfvz)v$F_nnP^&b zsSjJn;7i-w=~H_SW3F*?jzIJM9NWOg_j(h0ij*>Wow)mWUriqCn;MK zL*4Sq6u#qg=)Y0-hd=gbI{udZ+fj;%`>+w($WeF` z-bI*lLKN$HG$$Ae62M~{+{XOd!qfUe_gv(kcv+avqO}TG7NzUytw(Kzh(V&FI z(Cr=bj2C~6_i_6X_L)Dv)#VqLCj1(7q@24wHDBu8+&O%Ek280soest>P3A2bB8H`d z0%c*}t;rQoUBbiU{s6&?N6LO1R+@K}ooTiRMg z!Y&cH6phqG3tL7e5kOU<8zCXV!gTG+nkQO9;I1W7Uoiw{QJMsCAI2^++6@9hZY&XB zDq4U5k)jR z;(wYuhSjGdT9fTksEG95*GvqnXOq}M9*nz88wwrmHKsWEHu5>a&@-!W(T9)#S3WIz z8&gdJg@*d}f!0-D6XQFxc>I>Ntt3}14~Z9g)>L3}O|pO|7_ou0&=$l}bVE@X0>TG; zjcbH~JwKY`*?nx}v1(SCw2+?sI9{P5?wQ9}YPxKfl*%BDBQDx;to&1sp52A%l(rKC z=P0pt(c?iFJ?qoqmbzs}>2V!Gg^GnA37Su&4|}3ll!%|4{T3vU1>s^}-IV>7Tzpet zK-8X_5T1%I$U7_S2?J?84PRP(%@G_lQG8I4y zMV^$N*Jc}Km^$RQZdnf}qP#d3wlGb<5f9N4`*sGrX9`*v#Djv-G>F)A<&GSbjba`$ zD5+#LrQ-!vv2R(w8LIunKW_pV*b({{t(w6`3~|Q+%vGyaI(KNn883Jq$c* z?`rO}4qm=|zz1mpa7Zj}!Q+-t%TmK5vVlHhGGFMPhp&J47&-8{^&{e_r@MQw5|8=% z?-#TuOO%qRv>5;KP^{$PK-W--sIZ zCtPUZ!j@7Zf^9B^l6qN)3N8Zt!$tGVi4)k#V=#0Fet022 z5EkBQXahrCMN%5Z(N6i|P_)`5)PTqWSSCsole$(|(PL)iw)E6cY5-Yq4CK-gb6x>P zaf(cx&SXL{YiI$?-3Va#WEDDFFd(L=2+P7JqA4A4X(rQ<3s(R1;YEc}E^b|8BZgBd zhVc@$53MNwy`bG0XcLpEWuj^CjzU6$5jrvqowUA@JD}1UXz_ACRw0WxmGOC}mHk(l z&v8r!05^&WZi4(n9(Ca|Pz)RHNHo-%K&MLJ>BRT#OGA3wGK9t|$+yK9U zKpn<`Ww?K9d6BF$3#jfNXmbJja;Vv!r4YFRW~e!v|tO*duy{jUYfPSl$zeGR;9V zoPWuVQoZ=2ev#^F2R5a8)p|Dhd%;@RQ=D7GNJ+%eK0GTtPE@h+lrW?#B~ktnC;y9C zwjZr|twjb^0@OA%{68DQ=c4f@K$Y8NL~#CU)192Oz1j7-NFbGqd#sgR7+xvMnE0_F z(XH=px#ZnPF%(~#xp^M_%Z|U`|909oanDsZk=A-apnav>(;5byJy{n0-8Z9a1N;Tz zmwgve`{1uJ1|xPb;+U~+qTuQopfw>Y}>Z& z6L)M|9osfejE-%;%)NK!ee)akV(nU00<%si^$#ev5~-AmMP?CI`rWMFMqC+bm3?i9 zY)I7*v@Cyla~ilJh)1c?<^GK}PHI68F(*+;Nmn3Ss~=)OXOmC-1bz^V%&(&>m3abL zUJUut8A#b&gDV^QF@}CQcl5X|bD(6 zm*L{sAo7;1Yx*QMql-k?#FOP~HVcEtA!w8!JK*wAu$%u$A7%Oy$>TfpcTntTIg>w# z-SxHNidZ|wjIwDPO^uYq+g$a5fkn5Mo=#tN!CQR)E4<*AC8NX*8Z1Sw#QSjxRI4lg+h%X;Qs#b{HI+EemSPC}Yhf4#vq}HLG!8m6`8-=RWKgoQ zC;_G?HBn0#P(W%XQ!PfGsFLQ1s!hxMd1e#M72a2&h>cp(qZAaIRe;$>L5FWi(zH2J z(X6JKbz)0S6bZ>(Gl5-N>K{fbuQb7(uR|KXNo6EK zuuES!oPnWzCE`CDJIX|Ds|w-0Eoh=EN*;?|7U58^uZfoh;&FG|v&Z;J=5Q}#I)cbS zEB-7C$Bc}@=|p~BsOM==mkUjZhTsNL@TFsK*kmq}p>F^H5ZzDQEkvS>F(HeF&B*NY zJwcvPtN|xt(`*?-gE1DmaZ7+(Zfy(bjiy6Q6wcqu5nZX89Ylk_@eBvx1b9TvEaw71Ch5bd6*Y6YuG(hNxcm|*;~c4ar}X<|G}nZ zyTXI_Euxwv)S0>75$_5K6tOwpZC~Y6K6t)V9=t|j|L{CLYLRDh14PRxKva}+sYs|i-O(pFq(vq8B~HEf{w2#Ijd z-{&6N|116rr?T?DsP^dJ6?bnaUeM^_7GjIf7i7(j$(Ewz3|!&`T~muVx>Ry6D-zmN zXR*f8FuAS*1adnoCF$R=PgApaF*<PJ1Hp9C205g0TSxl-d~GzF^HQQ1+j?>nn! z6A6-Q)-EF>psVG!*{@h}c_d^#mXJbSol(Q;C$1Wqvbqp&_{%Bx`Yt7QL2Jn*bOw{j z48fJ54T#z2ib6qr)V7t)u4q7pq?&4uT~H&3*%0i`TaYsF5}ql_|X^i8zQP6 z+&*7gP@%d7(QkR4%~u9(Ofvpu${L1>;R`9*UatRc3%WjSGpYW zWY!e5KS;zDx^IPUIz$>f-VMFqHSqPo+SO)n;fJ|@!JxId(BFLV7-Cp3YX=PZszq+A z;S61EreAYwNW8q_FOzAMh5ZLMdTPA91s}6hig&7_&h<~=>Xqha1yaUi%gm)Z^QKOY z_k8${*GD2Y4a4iKy`9L)7HeprJtdqDb`^Fgcd2FwdCDkTs7R?SrQ!+$kRC-tk3Gwh ziC3WCgn4Ae|Bdsx!z$RR*+_NxFjL**;m!Vfzjb?0syV1rKw`s%YYbE4fK|<0b|1gK zj)OhyJT6@f1^yLS^AaH>Gp=KRoIMcA4w(MO<*djBvYt24CV;& zEf``XFkr-Uh-}%QS)M9W0!O_N)DqjC0kc^6;!@o|Jj=nO;8mUe<93;WAly`@lHnu< zY=W#RMHK^EO=Z;+w=g>vK?1Uo9X*kw1dJ9(YdjbkbD}r~%CssJrSBhmsNo8`*4%B6 zmU`-@pKzSN@0m*WAOqs7#?y(Qn7IC z>cmb6zDr4u7$|FY>wV*plRMgES9K%O5a>ABRD94{-Li)37tBGj6be{o>U@h_iNY(K zH+1ZMc-ZsKs~S?GvOy-!zgA>+1v|c!AuTveE;Z4kRwl=Ho=!n$X%uli$&x*b5z4`I z(Jy=^uJNRxvzn_9a7}N1w6un@{X9V~-UNd=^ytB_F?2P9)Pe*4{iYlm7(>Xj_O{g~ zUm1Et4TW~tmGhnJduGtBx-xT&9%9%+sPkz$e4g-(0<1S|w!Y|4j()Y=DiK zLu%x8O$nW*)i%WcG&p1UF_?>ba@$S3bnUs|P8lyXkJR2BgTgNuuj@NZsHlAIH(fe< zD*_c2yByfdc+uNH-Qp&@e#q=0=mM!MXFXsDBl)=YWB4bcQ?>!iSRT*RF98VSq>tn= zH1q|SIHIgw^;>S zz|tKS^$epTL)tl#!lPu6B%G-fESc2QFMsEpRs^}$URz+9-!{{+q!lL8j?CDjO!u8m zJ?Ma$+$zZ;N3Mu=9&nwd*`lDi*-ZJ5DkD2k-jc0Cq#Ad6tV}Lgi__-qcPrka$;Y4- z?CXk?28XHI>LE^(&BG!yHRIan&pih_wF3tzfi*5y8%FUkt@%n7=-;lAJbfR+tr7W& zis$VpRaL~^RLT;v1u>1ks+>rLWV3hfvDg184@Ro?4x&plBq|O|;*(*_<<%wNXCt7< z<%`XSYu+*c1lJm0BpwO}vy7xIxFFlFraf3C4mG+RMU{*Koq};Fo%Kp|;%YaVCTr;1 zUcYv0*b^3&HSo+^2Vg%wn+r#d2nh``|8yA@Gg9G-IY65A6gogvq9*0=d3W$=cQdGE z^4<*Q%l8QGWdgH^!y|6!6R29dl`wONa3(U2Gi$Q_Y@AP22x1XyYI4(5PRN7)KES;z z1;zRT6n0!DFrZ`V4@#C!OH1b=%@gWZ4s-JztK!;yNURX$UhUio6(MVTVs$n2uSt7O zP}PoJuar7UJ{Y_l1b*Ha05^zoMRp_Masx}A5G-^1FLK6j%!@iOH|TgFjV89nlEz%_ zy#NwiVWry~9@Dci3=6=bArFKC^78Jg~sk@t~T2XesQp{|2qRFqr44f=tX0Cy9NKWvuEug;=SPPFF(DK z-68@#wU>wchI}fBw)_i7mDydv}f!YPKFoBDw9FB zP_K~HhlOn1CO>y7qvYow4Rs+Ng zAf}=g3GSby{t*j|-Ge=^?q7soZSRa8cjpv-&mUJe2dzDQl}9tzGqpJ+HPZMBu=vf+ zbS6=S9#Nzjq;bxoO5*t#@RXb-rsy2SwioC}qzM-o)kVuu9dVW)$9GL}*=G)zQc-7e zp)X4?%Xi!{r;1DTsAxiNCV~`eJP{G`!)qpi$po-A?xJEQ!El+!BZ&ibQW&6-YV4hS zmm8&g5QUxsv6kr?#KM7#D5~1G`mmFfpan{Jzc<-W?P4n`6qnewg&Yms)C-m9X%%tT z1rUMDRRt$5L%r-*M20ZvaFgQjKhSovv^{d9aFq2Vlj;h3J$T89G-TUI`w5{$DMk5u zpqa~IW{Ei=J*g6CgEM5W4V*$Ovq+;+5ou=%;9;~EizK<&c>lRZw5jm9ZQf-4n2L|Z zsmTta&o!!Y35-HV6YZuBPW_Fk74$8-S4B(#fr1#d%$_<+8Wd?{!F@ntft5QY4Q&on z9E((KM}IAI1#KRVfe^Cu?9-@bT_chmQ8H?<*3312WK&D7I(V8%o|ctg;4&~mkci`{ zTsImScm`(RzHRFtoPCdrwd-U)yh5V@dCvpeF=gZ4Ty1e3-<)aoW0S!1*B}BKPU?md zD0S?&^j)6ju!Vot?(GQVPfLdfNm76KNg~&DL0OW$uy{dbex*y$z+GtrnTvoe1}o?_ zLhXG|^Qbm$^K z4%+ZO#X|)uD>WaaaG%)_v_y+XgLYht$o-weqb)CREvgH%`ud zp1-Jt$9!b6MZd0K0oUmTxVMwJOYY77+sPlCN7~}piFIxrV@#P|b8+r`quQXGMm8wA zRXd<`yTE@AKtC5pD&lhj9j!MXt&at`lLbk3UXRouz-h~7YPY0t-*0Shu8)0(2)%pr zZX!IHF3d6rH4feBDW~$d__=v<>gf@ZuwyBhxp)gNR;A0SDW6Z({o^F`zd-$;h(5ti zmtX~b?q)n5-f6ZvJpx;6^>&BO(j@OGN?3#_zi%yIN|KCa8~P!=$RG{N0Q)IpHRX+l z2Ss#|6u45loAg=|2G{HJUcCmEGd=A8%>`h|-Mv_XaQ{>{9ZMB+G@%dnc?>l70WW7+ z9g$*H>g}7YHv|=G`{g{IZNb|bTO>M1pSlT|Qoc4_Zuc`;cj#x7TvuO%R=y1fexBU2 zyr@A_g&Ckw#Ci#7v7Da1&p@M1s141*a1j!PmK?r8qg~zuY91v|ytk?D$6xd~E(5Wk zkdi!?49!Y<<)tE+J+^;73DmLN(7dF@q5y|94v-@B4SaW0Csi`-)KMTJ9GRo=vVL4$ zUfgvAiyEnfS^}uJm@LY4S%_d8NKn7`-oSX-X!O z6$&Hk8n1l{SqF^w$>!ynASRLG2|VmY?cYQ*w*f5S zRfR=+Xc{nSXrwGMztupMjPDFits|#Yf|pPn9zy$X7|BdZL7gZy`j$NB72zvj_USJS zFz_5sVVEHYyi)@GeZl_`LAMCle9(vkQa4vxM;x}x-ZndhTz=%p?JG=3)^ zB!^EpUMW$I9JU`?UU*$#XOY1Yi*3Vb7p#@L`BskM9kn*3TzOecLaXb4LGHa&02LzHtsZS2ocu zSEkXN>9;g2?^Ii~9u1ZkJA&_2xBO^^Z!@ba0eJm7jkNEu})| z@}s3dW*r0|-I?5B@nCzGqv$i$GGQmCa5Lx4lz!`m!abxmQcDDw zJvvX!`o{KyFZ98aggC1E+)A{G$TqK_P9k8vBK?TrUJ;4E;3eG-XQ788Los)0W1NdRP~ z$R*NvQQ1i?;$kp9za3s$A}b@Wc_bx!D7Hb9m@j9&TLCN=V4BhboMHMIe7Q_2*|7me z%HfquPRMLZHWLeGpIu>6DbtO`0PiM0e8lB3>kj_+>1b#fB_0LFC`)SyG9cJyga;vG z9Ui&*0^JYK9rwBIz9##34>|wp2u`D%q|Lyg9RnbR*vw>#?V2PE12bn5yArI)#?T0X zzS=KBFFqqyR?R5S1dd)sZ2VqlGttt{?&?r;(fcHoy0+mJ`j8(n6LxtqrYqOZRPk&RE) z>MG~d8;3Ohc;jleLakk(3#M|3Ajj*S(b1KK={ENl5{=g9uJ?n(LFUclW}yA5M2P1h zsmU|q{{l*8Dj&bmeLwinZMJ-YBwY8yFs~jjtaPW`(~MxgUg#{I>wi!TCoP{qphI@> zMQ3Y}9@O91YNid4#RbF!mt12wlD@z(P29Vq@56p8h=z=U5!EGODrU-Pi10wc&%@lp zYU|`o3#S!hWF6w6ADKIFJ5rbfxu_R>g){Q5#2 z7{B;>{qaaidZ|QYLlly6mZERqr27{~1~IJ@`4IRExx^pyQFAmC&FLsuQp$+BofocX znwIP!`~XwSfyCCjcv8lf6S)-ijW1fSV5Q$JQ*&h=ndQB+kc`+VUt!UdH)5ITfcN+rcbK@Nb3k@tWgQ+}|3yG3PQ z)qjC2ECej{k`KDGh*3QxPs ze-KcMdt2lRKl{Mte?`8#ZvJ0bb*`arI;ioA?+(?n?XTS~SBq^&?x&A|J4N5=pHLfy z_ySMNe`P)gh>T)EYH|xNUz8nX5QKo7PbZ~o*SbuOKmJq)^_tPbFB1dk{v`Xxg1VM3 z2)c`cyeuSzN-b#VKbD+uCI5*EcwM`p@q6L?0z3UG>_0qP@%a2e>wJFH_PKl8F6j6h z_WDUft@d(n5h!7x62r8CUsECTA`AEC-H=US4CW5?0X!WOXx%Q%B+3#pm zOR30)pY?jR^;VqEH@6fuXBcW{7?m_W)EuLL$P^Itdt2K~a2nDg;mH~L&dDfPL$ANw z0JTKn1BkW9^a$ZFl?b|UStq|LDLEky1%85U5gWLpVPQ zhNwo}gNluybqYF)w$O`6Fj1ys-Te=E{Zy^B!+R04{csgUOF~(epX9~7apHkk zXptQsOkT-HNKWoLHzazhddjW^6?tyfZozqziM={ARuiQ*-*p@18&NPyH@Pv>6Zd8q z)-syo-e8BMIiyhRj&MSU{$}~-TU1|CCMLqia453~&QU&$L7G>trIN&AY*!5$p+FJd%6&VR#5n^>?J&yCytx1>#HWMP;u{wdi7i=qEx1KO{VFh6^tk zlQOV@nbu@x_ZJjHC?t`Kq~gflqPqg~ELqgd5_k$~Xe0_mQn8+I)%_#&-w(HceO(cZ zFQL}vyay0K!QIA&A{_5!#(#b5k4&h*;gewJ0VUJJB6*cmq5LP;agF&5Y1>~F; zq=OmQ`)^a*kBmabEcAdosuBM|mn?-KM?pyf9de2DVd*nbCT4_f@gZFTN5M9ENS1SY z(;2o|5F=)at-elGkOvt+X0OD^8tHHu44LyyXMu8qi|pq2b74cy8_^^(4ShI+s~Y7L z9K)WZ1W?GpiaL(}=pYvkN#-lxj*!oqjRPeYHl85+tYI^4Zu z`n|{!+0NJ>mpuLB4Te$pzO*N3b6&3VDyYWOSvXa$fym0m%3X;Pvdc^ZNuMSJD;cZz zaSAmnPbA_-tyR~DCt1vCO5pv~jP0$BfM;SmPwdWN0u(L_f0}x1YF*0D3PrjlYu~Rn zmEk6}n;xxH68K}K_-2eR(j84kH@rshQ2~>wy1(MBahU+DcjvX&g=u_3YvtJRFtT&e z?>WPMFK2TO`A_6GzR=GM)c7m@khh%ivWx$!&Ue`NH+0A6$wR(i?^p8h23JRNE?NxN zaiAYu-Cg;z`^E^rUS>y$^LdvJ@@%n$j6+D6;q|q44bX?x5P}*qy;roNAnBHw;P%M=VtBA(bww=wK{F5=L;xf&S!+Wg8$&{5N)U5 z4KCv{V}eUBojlVF+#nXrK&Pop{u=DJ1gS7i*;T{TobsYX-s4?H6_Z=R5``o z=B8Ac`3l*HDJG@KaWDV{bc;^I3kpi*Pb0h_8UIL!oykai98DfMxPt{6$_1KRsX)3J zrc5HTF&3$;PIrxbf`+D~RJL{W;79FE4 zmW3F~OMsdmfS@(rc}~&eJ#=9J3;8}is?gV7$QVAUE3vCRSy8O+mE>Sm%Z{y68)4Bn z^43=5D2l%WHy}n8zlaIh=o!CF-m-v$)Mec5rSVTVXrJl`TQIpbxr84!75gmyMm4v@ zK%H%RbKM>t>i%ySc?9Cub65$hznzJxEJ7iMh*8^VErB|%^hWEHWckT^GtmM}I`;%u zgyfGn*haL2z_s7pKxV@*+ptm%d_yvbcy#+G#CN{QXhA{c(pl`lq;?Wl8&x*pzlu)3 z+yK?RfHDJKCUU}Sj?P>j>Or4#;0ER#G@Cl)QpL~#dHo8w^PP1ZMdf6I_ih}#f<+wT zuK{z&gbjTeyJl4$m&!Xk$v0C>-lU74LWnCr%A&JvnRwN;Ys;`bgmcXg!Dif3ScnRX z;FPypHML>W1jr$Q6*1M0+&(kNe#Z8ThNy@{&jkN;pC$)1$-`CuxRY@9yRZNM{MCG6 z-$`8Pn5PGywvlb>j-A~Rt!Z1=GNA3TK-Y-r#ZAR0z8~^d>BT}R*yL2-rgNGjFW=Pe zq0h4E^-faa%f#AL$fQ1b* ztad?+f@3+*8x1c`vnXn?h@dU6M9h_l174Sy?I5>G^=q=CV|3~{7$ z&y8LfcSJ~y@rh037RU#__y8W7yYd6xo0mw?9;u}N5S^NT^@Hi82!o|OG0zaSe_q0b zQ8fQNpn|N7I8YoRc(-7IL-3fh5KzNL2qN-cswvdJ^sD^Fb#9_&mH9j zB^|L{`|xk6?7XUL-MrqnJ0nN6u}&&Mku@CXh2dkU#Lbk8Y*$U4Bu;SCGT@=VK3vh? zUHQ63(x6ZK>){=D)_U~(*>R<))H1ZhzMM%B(h!h(&RYbb&04>SN=uD;kW7+TRFz}E zV#Fh)e2`B6FMax7!~!}ugr)iPDNR&DDNgX8{xuF14ZBEKZctA-dq;Bo-VNhY?ZKHk zq$a2q!zBb8?dIlaNYr(AAGm2!>`~-t*6TI${t6?=`9ZgPG*3~GGsknfFh$`gco-BO zQCRA}5K$p=&?HENb@6-()WE8|J#?@39}O@KY~fND`lOc63C9tjQd~y^+7O8dfSA?y zu{BbumJOD3g)3s`E#fCnLs!1~yBjLbqKFjiae{^QpDY?Fb1WxIN_Fj**auW^Ec-wF z?MXuQ#)`e!HpDeGAV6spw4}bsQFn*gZ+~=&OH0SphzU&8Thb*cob@&)sByDQbItBM zPojBTt}=~RB&1b7*v4i9ad{dn5o^>3B`SFU8Ma zoejX#@#biiL}whHe>~>s)rBZ1+_tR@=G9$SP*pDWjrvV#8P}tVYgsqZ=L}Ajw9cUJ ztFm)O2Hd#bIoBv8`+TT~07sfLOB))Cc&oMl7sBIP6(yK5PqKIdD*&58qTqA2V5z<2Kw7`O`HcPlUZlvfXxwtKmI6Gb&yPZV2Xy5rR3IrOh65$ zE8#F5N>#bfYQIyM$EPw!^3?u(==pdmt3*945f#J1$^_UJzON#WN|LM=C46JhiQfa8 zXw(m{c$<9G2G=N3b0E2n;^Qb0WXs%!6O7rt{o>)pz#s3R0aPhSGrZOduoC*!@joEk z0p;)(yaDTAwZ_wFAM-X$3DXl))^`zE$UEF7P;% zeC;S6{P^{Qcq!{U6&1v;AslMd0DNYM8nletuhu79e!#wkTX-@$N`|JYZ$^%8t-FDN zA-o%cM?De;d=E~##23hyxpyx7K+fhvOle1{*DH%}WkuCwM1p9rdLk@k4T2M z@=w7x;@aIoCH{;mPlNr(x zzeXi-?Dn|S{7tvx7J7{y@t(>XGw=8hw|Ki+m_KhF^u&5$n(&YK8J1^#{KA7{(bvmN zNJeGbTkS5|vj60FK#f?F-Z_xaD;=IlITUNd^6FYS`}i{`2)9Op7Ir5STQ75XICO~; zyJRLj1(p=w=A-2b4oweKoM(P{UQr=KgM$YCB1VP|QOS1`BX=b7!~S z)`iTa(3UlWct&RXM`(C30Ufo&_?sPx1!U9eX0AmDF{;fCA_JYlz$)|jWR;aGl9ab$ z^1i8BA;W265=yC(qw76df9C>0!r47Keib)U8Vw6W)=Y@Pg(IXFc~CWL6wy2XFsn!* zbhUek{|!YL6?FEv9$wrhOPsB1Y0x{BOLY5KuHE{aQ^O z?P@7f+W@oJxr_hw)N8k5=-oQ-c!3B6(m%S>uj>YXwDdhr5z?|*6rp&i=G%I^Pk}cs z1F(X}!gGj7Z;Q|JXcSrKElu@*)QIH_|I}=F*-!p)FLt?iI)}4T6Ed;a9YP6sDUaC6>HG zK?D>1AO+_-*XS3gSG+AyRQ@d@mfxxBQ#O+retKFRjq_?1iSdR?8$ zv~$1~m7*|8B79fdm|Q6CQbMpuQdvkbTXlm*p;xKDDI0!x8585jd{T5~u2vgS*0eIL zCWF1P@z-jBtVdWzhj|8|$vQHOWsJL@2?tqTJ}U#-4RNJVPk1H$P)Sk-1nO|W(H&Ey%EKhbO*YRl{6SV2hT zjAkJZejHv^rimeQ_b9^#+5oVw7pf6mRNSbYV^_5>KoJ0#F?qWdbn$5MNU*Ah4JZg* z-3n&6?({bQ;ABfAek)PCHJpr%k4-=GcyRc5_(T}`YY5~nQ2#o+Ni@4{zKuxS2bbhc zRc#!pI8o$G0ca-q(KX+wi;`sDk_vgJGX9Ie`^uWkyVpkI9Gp`ftb9X?h!kO{S7 z7-)lRE${diYLDWmealf+Vys>g@~T2bAE+^3y<&f&a-30FDKqb>skBnY8{9XJy?4a= z>*C{2hj~jmz@y@zZ#i%INhlIxFJEtdFk<2pmFUmP4HW_Srp&S9Zl%68SDr|F2G=DZxbq6ZI6CR_;PkUcBA6jM!aJ7<2-Be@;dn{{H&& z!?XJfskQCbT$LkyD@q($f}|&S$iN_}QqL;K9hR|BM;dzoXN05^m+yP7@4@ z2SLv=h=L(}iZ1eERe>9@I414}w+OuV%s%0p5_M?tRQx= zRk_ndx1EGoU-??i(M_s|OOIcagexp!M|DS*jGF)(v_{;uIWm(2jalh^qe_e9iVSVm zLMTAz<8Iv(E{hCt36Wt78}Op-Df(;YP=fJV|MSfL`k&yJgwMl_{?6Uki-O08zkmGa z6u+Gt#%YVHkXKt!k6H$bmR%&P$X=Zm#BDz4OEX8TIF#iXeKCN?vW5PwZhQZ+)O*dd z%p^7J-qoUJmThHc-oV$j7=FoG%#CTuGm39_JzhlOxh)zKj*M~WT#@Dq+Lp9Bk`QQ5 z5CSM-34u0`n>@bwi^mw<0<4tH)iNu00p&T#7iR=|f1u7!c&XH4fGj%Mx>S#>m>rF_ zYl&32L@6c$)U8^u8-%^e$|hkl*p-~Cese&m@7HUN-PP79i-uA%n(Rpzlb4!cxl|xZ ziJrUhohwZq)DjXE|Bz5U}a+$!JY+WlhH0I2jLJy-_W(?g0e68f}_4;WT#Ot+Ts zt8UuYVdc73+-9oLakfgeE8%GkO!ol_93D8hhL36PwM?Qv8X=E{$H2CHS2nR^zjzuQ zu2Qkm9f2|fWW4>{f;uYuPxanDC*36LA!| zQt@A19$AjeDT7SB;PX0@-u~Ay(v+G-XL;KQ|1bW)L3fi4^!@$4uPcr5XW@eS6`)H= zO{5z@l`9v*=)wnxX^kJTzwQPC&XS4hN6xt6U!#&K-7!xLF6aC*alHvgyu^V)F#&Cd zqVY6|H5!N%^I|W?dqC%XXUmmOV`Nzh{ye!73CY2q7cKd<9~NY-5Me|Vc-+a%1vWwP z8BwO0bg*wfeUbTQVCe~p$U<~0i#fcKt=O4n0)`7sIY$1mE+b-Lxolh9=VhF4+h~Tqt(XZP1DRQg zSG5cnwNRv~1t561BK)?tP1aiz2qGmx5A17WADtaUTp8Y+TR{) zNB35Vjx%%^sz`zqc$1h7Q@r#E1H-_%X2%7^FLJ3xDYP4Tf68(MPek6i!fva&t}e07 z9toAM5#TB9`+)x&#^w$|3(u zHl6mf!@`W~*SgWA>!+`uEUs6Y+T|zR-WtJ(qlbJPUF9X`e{%swrTDjx_JVqjQhlCu z);Y;SHSj)9PKmH8GyiKE`#-_vv--W2cCoG$EvXKTeg&WxuABhJq5r%*WFb(n*g;hP_DO%abro}je#VqFWnVr=CduV=>iTN+>bLq#-@ZDp%#$n6O+NcAUPuuNO|l8i$} zdA*h)h1>`&y>D^aD=XxRJsz2vNB6ALwu4>Bzdid!zE8r!HD=vP#K4e zylA&AxE0OeqNG$K;=?OpoDXB2?+Wd{jb&a}&o+&doMhu&4aiy1_s3={3n3}XTQSr>xu_kz3k?D;q$ECA)`x7~YKnIKG*{aRwU^MPJ{79aZm8NdH;bzuORgO9<_ zflYL@6q<7J-m3&2{b3B>J5W}z}Jk8UT<(qEW*r-V^JRvKjkz~4yg2$uGix}b+iLO~U#|3*}M?v~JcgsJ8 zxOJ+>5@{w2MCT=#sp`wpA+cm%cxXX!9eK9=sodtNfI*Pc1monE^3ENB#qh@WY zSk20&_ETE~H{{IiwEm@(#S{N_MxZ2U;P)I<^L` zRu_jxaOBxjcotElG-;?6o7pf@RFUhF=(cHA78SbG^{^a(uxetv(a{U#NA{86^=psY zn^bv?UH>(qcE(@Hpn3B{ntob!7u@fDVHJ3LQh!G3dEphC*KnGQuca-RmaJKuB+7&^ zl{?<7LhFW_WXsLJ*2C@zFoGi*XI+P7Wx_PU#_ahL?1PUPVW17&w)-3zJNxf!MusMW z%pnNe#p}}pluE>DP}5p-wOZnS9-Rii2u9`p47_pSXPIUdPi}VoCy=h0;=bZg>-Vmf z=@I^v)`G^6w~mK|ushKbTLft=%@Ay&lf^TYgmsfzc#p{WpnM&(_@*~ufe^McVD+YF zjgCNjhR{c{ZNI&}yV>FX{4yi>ou!{4_I^(kem)-rErQzxZ5SFC+1ywrTZlHi9b`@+k11}W>M_`BIE8g^3VqRN^EYAD7&;xcb5!{N!oJJ6S^ zZWCt19)c&M=vwVp=Y@lR>wsUY#0twtt$9>H0u5rp(a-B`+WEl3OD>I(tkS4yo7twJ z?#)6#gU-)jA=mEPJ7iV+23gs!(JZi%iG}?|JT&|G-s2`8tX6=mEOsm%HH#>P1XI3` z;NzzJOUV5c|7APTZ0(i91I@sMKKpa4|JTZMrT<6TaP<27kvA=lC5vS@gE$VhjSIg9 zcMFLOISW16%;#=fGBHyaO*Wk|4`FVv2EM2P9V98!e-EMH{mK!j*lBgBZ~0n1rSFbv z4X6AnT7Ar>IM_#h`)J>@#D_C{e=d!&;jW`^RLCgcse3M3>_XNOrcDH53trBjU7u`bBjHm(nu#e0G<|5SsWnKFk*<}5`= z9Z5bs>qhV@Kf$b7p7)^#dAGfP#@N%J*7F9c|NiLiarfX}=C@_sWvWrwhk;wA8H{1A zIM&-W8KhVwS{U>mRg{neFINm5_96c$cId-K&Q07FsP~cjxn3hqS=iH|)%13=E%&qs}<-|o)qDPTIYbIc(!}Q4^NpbJmnXodD7DhpR zUJfo%Aqlu@zN$1xuxF2X{ETdtd6N? z=XC?#=amG5R?(lQ^rvaw@24xdyY_w`-^T2X3bTvNxM$-g*&cTETYP&wjfF&fT#Me9 zQW-d;bEr|v@m4m;x`o>Kbpy&pkz|S&eN=W3PFyr1^Bt!=qTl`2Na1LMzA$C)tv7WQox{fp_eK2Ft0R+qQotMRj+ z@sZzG-Z>1XTzJW}z#nLM`nUK$4(mp;L*+29Phdwgfog z(o?xsdfl)2Bi7dKIA4wqH&UjEUSiqfiJjWW55aRH=Ys30E_${=W)JJt!r)haVN>X3 z*j8U+IhNSFuD@b``6<;Xn5t}$qHs`l_qj)6YyiE#6shgOrfp28sYC_E(^=99ALl;G zf|kh&25b7$0`GyXSJy{Z-6Jdd-NW(Qe$haCzu}8k!TrxtPKzzL4HtI1XGpxQfC<@{ zoWfo8vNdr}b4eArVO%CXk>11$(EeIbGyR9y84+ysSs|-0&xYBr`gHylI^sHj%QEBa{cnXQ3+iFnUKu>4kfm zd`l1V-j&YeC?m)gCL6UL&8Z?uhDoZ0c@A|+@;>@_ZFi=;U2IAvqjFr4J5@f3=(h?@ z-OHBor^1XM8v0*LK)0DXpPylK-)|uVUhg67ir!P%B-7@#N`|};dr`1XeI4R4^f*vA z_&u0zS~AcIO%Z*+3_@dH2@xMGX!znZ9^OM7mi8o2nL> zMGgLE=?^LH?S9$)sM5DKE$N#`B%}~*C)CPCV9Fk5^YC+MKOgz>xi#4GMC{8zPleB7 z#9I_}tBIGXq3eqBmqIkaKgcd9^{ct@v&c@2l2?BOt6wBIVNxgcISV}$;iAc%EeEOunQ@XOfH?`sqxn)zo=4kVZyYddP zylSyl{;gG`1p;VFI0$9g)vK(6my?ymDP^xXrQqsO1IRlOi5c}KAjq1_K#LhLq5U$q zC?aVgXUhAabXg=E=QOG_IF5}E*_zE4kB4>0_YX^$?5&Hn45|uEwnL?q5MD6gFz+AN ze26-jB^R6VzF*?!aSQjQZKQs*$}Ho(SA|l#>f-=CQ)jOc(ApiUq^+t|)2OAbQp4b2 z%449}pU;d56VXfISQ%&uix zmzb|)-_>w`u6t-4_<-+~W^Rx4)SSPwt+H~4Dr|qn&y{YK#llwqrw=SaK_}Q&Yx$`p<6obop zc9F4f4dd^CuCp#9%SNS=p$#fIi7qS|PA}=Z`(@|QpPqXj6 z^4KOzFA?$d-LsaWe6dB%6m!&__XX>S#a}pz&$3S@1t9 z;%;)foI$dI)ck&O`73k(-n}jL$)_ny4C-n?0g+;}wc-rQv~LCYu&F`YFFq9PBXWy+ ziyg+z%LdN`fKEdWvOL6iBFP*%Qcob|S-;F5WJf`G^_z1M`7?HIG^R zl2_=%gESg?k58!n`_jns^8%jF`)LXb^>)?rs?Bn6E96233Lqe%!A#kxf)$5LYiq(x ztByF~f=@?$-@H*N$deOSBDP+3t;zbiT#|u+j0sgDJCEWJ`t=WV%+M(b49)-G;C1NV zkzS1`3$ZJ|5>_f@@C`d3z@T8KA(AqUL&un$VNO{|srjpAKWOM0e)d17ADD|<5D3-)hQ6-l+6GF}q&>*SNxd{)21qzly$xO4`o%V+=jtd)WUn zcai=RL#tUi?%@pY{L8e;WqBjh8q77n>t_%%hwns|yR^m0w0MIBwr=p`JJvKV$eBCJ zbmC1`X3ZFW0gA|7p_y(-^SA*TBW46rE}WuIq{1C`M#yCpd2qeHooD ze6e#V0}WXj2dT5LSRZ{~i9yLU=ZCxv=p-5VZWr}(dar{|cdf20UB#daB6MQ7oIc&K354*gc zp0a-4p)&eZBM+WrKpxh>p9DV%thu`Rca(em`H0zSzqPxGBv{x5ECjl)Zl9@3#-9;@ zT@?wr7dKFQs6r7?rn3v0PnQNu*~3dIJA90*W4ucLKoD7S_)SYAHdNoHRcQC~u(;1U zOd0@(QP-_!?A!=q&-JH$*s4*j)>Wm@ZFm`enrllkN@_kV(*|AUx$H^j3h3GuRZWW& zy~scF?oWK-(`d_h7Kv~-L`2q;oHs}v=*Y!rbl1yxlYX@<#%CLkjP1Dh>IX?2&O{iN^vA9%kX@AvEVe7&E~*W>ki z{hgd0o;O)SiyV)5=Gt;u>BIU;6ki`3Z+BhXD-vkOe0>fj3w@^;2^i-5rEKN87?!cp zSiwlVQ^wBr-8p42ef21mC#Hbo5Y(>>96qL-@soetn-UoMygDEt2ymY&zeAQ8@+s($ z)LwpX<8ziTBOmL{pW$4xzp)gW4jW&GFG%u2+8?y`bP59}A;--? zdJJo>c}lBsMtC7K&7vxyePqA;Sy&~LU@gt>IFEU?1MtugDYTJI|OrJG$4B-CHrdG-!L*UyWq!q-^PFE?T$ieHn@7 zpV9*pE}`jhNq+!9{I>nHL)D&gC7YVNsgL$K94F`xOdVs#y^P_2H7g3TFHVABvLUpK zn_pt;vkj0O$ZRSV?9})8app)E;rKs2;`cFGhBPRz*a*YaM|c35rmvTL0(bRM8_Yvb zN&9JkaZ{ISKh)~%>Z$dlK@A(2ao9At{m>h3|KuiB4pFBQFMH|Xki6}Xu>Ul7nT4qz zr@@rpRe;fv?rcWin68dMsNdxoDS?Zb)K1$7c%}Ot81OyYvwK((5(dk>^#vOm>Ur_u z+Y1M4yqeJ`s~sEtwu;d_UVF{$A@2W4?ly#6H&(UwNJmM*1w2!h|(H~KJv z0=Xx(=i(wiriGUXns|PFyP|(sM9#pV>?D$bAmh2v%(^CW^A?C@Y1I<2S@gQW*md~} z(djodWGiCWedV6~{wanX%=psPc6vQ>Z59gVP;ipny> z*Ky0ZksFddEPI`8bg|4+0~6X@9_dB_6$QU04i$@i4yrUfjePn54OSY(_j09?9LGNG z$B$B=J`-3!6Oj|0ra!+}B4=>!4IP=nYLH+P6`4*BN6Fi*@mX^m*Wv3{mfPxa=L#`P zk%oY+&I72M-HT=JlJG5wQE!WN-A(#VR!I(!f5={Qcy=a#u%6^YuHnDA^g!4rf9b?h zLw9E07u;Q~ed$d(vGLAU#|HBe`P6er6|{O{pwZrL`b8f3^a-$PRc9X_sNe-{bYMN&HcXb&^IUY%Nh2rhV|l(>^n%ZXUwi1ws|HP@KR*1x=9D9{DcnXEZdaNKX$ z3_IxP3g*L%q(xPDIERbxouf8QXQ0-VKdgH#--b`y2g^<|(5=i`pw%`+1&V1V6PgAM zS_Pt(aNWS?ff{oDwffYAX2q0)Bf)pN{)ZJ;hMa&4_}A}83LbE6Oj|g8HLUXQw0^?t z?vh#XH3puo3>Qo07@uiGGZztC@d1J|ZXhMOJJB&5fdU+ZKRmIQP&$ywiEv+s4ged-n8iUr*5~X zj$Wmx9v|+sW1NvH8yuQ1qxa2)xa8!_&tTUpBOmaUKl5@F{~2Etp~2q8Spm*ZA!7}# zXfUC*Y*u$Vy-eKr)0xU^I#Q;uQiMF=Yu`H5QDD)w@`E8ibz$BB^LuHt8m&B&h?cHS zj}O%x;C5_0%|Z!Mb#=wxcA6A4 zeK39&4=ibr%+6;&8j#GaMzakzuRSx@mhp~iEhMoRX8y>_GFrmM~a+ zcJ17Na$*fdWQIS~*X>qB;%`6-z+augx*kG&`VjMJR1|Jd7MgGrH5=$=m764RHKQyM zPKp%o9)}H`;+Ss<(3GE!v|tOjBkYK#tR6lW3;}7tRL8EmeOi<@JH_K0zdMuTid2_S zOLnfx)r}AM44xBJ2d52+JjD03A_ z@x&~#=n-9SN&LjQ!$j56n3*|u8(s8x+(waqBd1p1l&RGOEHDR`u_X3zerS$Khh~b( zMF~nD2Hn=(f?42>`5@emJE9p`e70##TR0bpl*&~gz!>pMlWx0bB;Dguj_o)~Uo_EfH%2zO#X@a}AHXU; zm!Us`br6I5Qy+pGBCoSx^lL)DjkxCRo*#JOv9d}OE9E7EG2BbBYCy~|8@B$G@EQpZ zc(vRan+Qj(YfPjKps?zugtBM=Da!Gvs4zo}`OY%wNz;upR83U#snu;d`s$CDF3McP z;zSMJV0vb+o8_$(XYB_&OU!o&1bli_aOJ&7_GE{S_{*Bj57G&eT|6PxS-%ut47wu` z)yK(1LB74?Tz;qzletd+f3&F3MBS2`*ZT-JgzQk)KRKD1IUUAxU4S9thy~LW*m(cg*XzFNoANCns`3G!7`>~?mAr>dds|anRK#Wg}r?B4XYhg_> z&b#cXf(?T_LVxY-r)^HSY* z1E@R|j=o{nVdB?4eCF8_w9HwXv?qZJj=lsM`Boa={6-Rl7{=(8O*Up(o*0@=p1R|9G} zhX0?=69C1v^B7xoXD}k&gqRa!;R;!2?@f3+tuEwhAwqI*6v@*Mu<%y?$bH&5XH6q9 z=ZJjxC!w6 z)2aFGgwunbv7<$iIcpgsvcwV3t7;0~5?PkY7aiQ@9eEO3a~kG|589W$9gh4ANn50r>ABdR@=uLCHCs+*EGz#;c*Ok2)UBjd+CZM&palhBH)ioL73Iy#@Epig=I8i&_CoL>1Dy~m82mY+up*Qb&f|r-CL+ z-r-#=S;~EMj1o}N-h~VF1gM_2jeiGW1OYvcb4)FDfxO zyMR(Wd4R$Y6&jC?(gQd_Lrtlu3LVU7yuV)O?BIH3Weu41w>+%?_ETBHr6uxxi^yJc zJ(U@fw$9S!jO#~=YOkK0l@1;oTV1s7@uB|ANDYU-$Pr*HqQq?#NG_N-d2#86=KSR% zOQ=+qgta}?mdPWD3lV$s)Vn?~bcPE>5j~IeG&;b&f)B0M?x9GyoE$WH6`PZ*B6au9 zag%#ae~#qh*x!ze*q7IXvWhJ6^WB|f5ixdlqY0TuqIBq7>u!_l%iTFSH@?#{KHoI|9}Z*=hu8E*_lpOgd| zVL!b#9{ElmTYy^&b`4IPw5$XI`D6TO!{(4~JHxl)jftDq0AT9$T`qnoeVCNsQ-eSE zhCXx!4Xe-j{pq?e6U7Y>9<9A!Sn5AdUFrJnh0qJQB&YjtW`GD00Ft;^`hIPco~9)G zZ+Crqute9@q>2ciLTb2tAi*HezNVFBCG;4zq<)AWyZ}#*Fo)(StmlH|Wcs;YC%n-d z2?&64*l2S9ih!Q8C$q#@a2X(GK+7am*oQ5Tp_6du&@;W(<*YazwHD&*qyR5nv`j2L zy+#M5R%7Bqc*tC9@c{w8KELCP8!ilvxcu?s?7&pIcdXBdv&9BUrB0mSX0q{i{;`zM z--4(yl^y%m>0v#i&_M4nUpyCLhbRR951E`=O3aO=<^GT0?Hgc~bRO;gl53c75m!`( zLtOa9!&ECA3dZqd;-q zmV2_si?Z+DM5VBHo~_bn3ALGSOz|1yLO^vB1%&-HO@hWWjVo(EEaRqjfe`tR7Yv&SHI zJu>bF=l&2wdS)-cqt&zE+&2eXxfe!U-em(C!|j6;nsuDEsH(rqRxm53CK2{|g{r$V z#-3f}Wv3)~Ye*W z;jHPW_pa&{Fxsm5K{i;HahPN>wFh2)WngM7`W#CVKArU+9g?3?r9A?ozeh$Q0393y z0)sh9Z!cM!EeN^gnYX6y1IoV$fhfu;9*LrN675WD%;A{YZ>ynR9=QP4bH~{Nyq30I zA+jNrj9o{u?sxXwK@MACsE|EcA8X1ET5k&P$|ngBG)Dq2V}uS->y1ozhXugWX3vfu zI}Jb3h?irl0Pduk@TOdC`)#gi?&d+a7LJUa2AFeHQX={4Bq1{8ZwyYp`d48J>8o{& zdy4N@%9|nn7XGW2H-rhc8HKKzoDv_ho4%#Mx|avNssNRbcNGF*49o#`*FMO^ElIpL z*W{)H(Rd8v*gG~2FVGSM;#5xUevFe)-^H7WM0izm(4^dgJ7YZ*2a^j(!;#`j2B$$~ zkN!co3v&@9iY!D|+<)gl`evUnGOM`iu~5=rwY{%GkQve?$dnZIJ1cN#0QD?VTR}hE zk;PjgA5Y3>5cf*sCt}SxVh^n-q<(RG66Jzpuwsj*2Qfm?t8MTICf6JwRex3o=E6)b zeU4dbd>UP`@8}u!+#KDIj9EhU{OSYs*L)vCf<`OaK;)`^CAsm`l2S7}=6{mcp#Etl z!!z*I8Hc*e?OQxt6_As`8T4fwj5nT~1Fo%*n$#P@@XT>Dxfz_#JLy)O>xz^-lw9+;V0- zJ>`VVgf=1RQxQc!@RWvAT;^zCa4jXe6RtUN?Z=<5-cTR>t+N=ps|L>eSU*MoG5bcu|v(KWikQ6n}QrD240 z<2&_vzQ6an_-lW#vvbaU-=8`W8n0gw;nUz_VPO#|DavbMVcjvs!orroyNCINu|V}s=8V)RF4%UM-RptvboXI1HM@C= zN9!%2;Nesf!qk_Vd_R;)hE@p$Kui0Sx(gL1A<%8;mxr#uns?pwT1&ij7X&7^eD<4g z$jVLHD5Ir=Z%L*P1FF`**G(6OPtC9MegS)u*GRothO#5T@#n&_mp0K8{V!+Zz**pc zaGrz>vg{hMU+2wEfEaD!uLObaeMjeVcMN3$tc9??VI|t7w;2%JOa?L%6$!i7(!9L$ zwO8;h3-?&y36?O{>~pp5{lHs&nkXB1;ItNzeq`#Y00e#JrETN9YaR8_q3m)Z}gle-iajiM)($-`q%Bb7RIDct=S zC2UYxyfP1@%DiiW4}rVH_(cNdgx^w!nqmIB0((=`Rks+)5tiR**s$M`s%rHpQ@y1` zB4v9u^ z+Aumh-`fZ`TK3(Un*(Oc^^B;Aiibmczzwge%Ywccxee~8oPerdHS4?aVK$SnJD4Df zhanZ`e&wq~M#>zEIC-%Gl1pVi@?^hKjwqZrRaSVS-qQCUJaZ@Sh{Z8y4vck)%;D}F z!$rq<*ml7|PZd`(fho>h=*=FNn^#Tu9M~DnHi3RP zvr2pzPPtz^3HF+@?ltA)2#p0BKMWdr)?2BH7fntgfi_nOC&ek!(jw$06_?I?vg<+y z3d|@{lI!>(l0xfJa} zrj~q!$h-^Tf1R*iYFrvTz_|kh@HDRfSwyOA7?bJ4#t6XEcv)(ym-V%>`!wZG!Jq<- zs8{XvT>BhP`yTw$dbbCPQ}jvunPvRcB!u9htegquuL`F-5zJd&w-B|d^bUHO9(p$! zWkVG73EvHLD?Kztq{2uVjBJcJ8uEIW0qGBZwK55Y^M^|T8NX{miN9L?o z68*hjsjA(}DxHq{|4gD`FHEl#RpzV9G!#($`;N8GaUIR{mi+DiczgG6z{saE6r z!vSf9Uxx%l%2!-LZfTY0=D&BeV9cg|nX?uKJuYAIkq=x6A?e2pf9Q_()FlZ*!M*{&( zlqFO<*8;QEAHxBK&h7^rfOO;2(q{#&6n6G4R^1IzUkp4$O&AJ@w@xn0wGMNxq$XXJ zjT!1=tqyws^u?TsvL3zX$b&5u*kGKjff9#J{^_my%UZX}Se_#C?jHD0%rENyK*JV6 z4{y7p-+xtvHl(x*mn1i8F#GoF;fr4?7jDh%{x#$uR46YvjnU^{|8J8hmpRK|P{Fbl zk4LhZ+)qzq$H+>-z9W$UcG=$gq54t6eCiVSz2pn#m^b-PXWw-e#MVrK5kV;y8BZLq z{rB5d6Vw~fb|N;PMmpWwo*}_A^)k1e%m3C^;n!Zh7Y01t$O8bRR1qsoKKMK;aC(HX z)*cKxEz`AC}!U<9Hu}iq8>hHU&0-=sz>)pJURI(IQ;v7dO$qXkR}t zuOhZU3xBzpdqy>yc;b|iQlULk;1v#t_5Cw_wKrFj&JU|&)!U))3Gud8lI4Az^Y-v* zmtf5GR5x{Ng2F$6TWYMeA>OYGp89XtH0>d)-e7NhxFJWdw3}WgS50>lMFli}n28YK ziZ7hIT(iqaihlcFLr4%==JW!kkY8vqYTD@cQvsijkl>`?g;U0BY{RbMfZ;V={vT`P zd`b;axN)IEzdfWM_%#@WT4R~Dzfwbq(~rapHE&j%O6z(SujH9=VyYQx_-o(hm+{q4 zyNs2InM0o$uF1PmJb$ncepY71g;sA8=IR@}CWXH>uxqTJ^aaMCG;<5!)r1Eda|$8n zPZ@h+NS_(XTgDemg0~n)bD?nCB2pbso8%{+Lf#O3L8IFH=_=xxIt_UMfrqRpaPK?B ziY_W2`I4m={W&C|915RbM_Pg=QwF8Tkw;&GL6q3H zT*2&QB$4MT@9DA^V1NPhj^uULL7d^Xa>;MZ<)fd$EtXAk$Af&mW5R%UXwQsIm&QKj zJoqWV=HVPOQuu4m0W!uZzbIg*ZrOZcV*zec`y>eQvrh#8ud?%E`)S|NS}GH z%7Uog#<>+6;ZlGesdhFDyus_CI{3H-}fgm!%# z7W%Z4>0Jgh4s0kZ@htv#*4j2<2iD1Q@`Ksn7IUI~xVVX_f=$SoLa?Xgb4JPIwa*u! znSF{luj^w|5(rj~h@Q4gKDj;@Fhw40OO@fs#L!rN6PI#)P{C3BF&!?dBpPBy#}GBD ztHxJ5y2LMH6Yya^%u6k+Y^5AEJ#XTt4$>l|>&cqn3El)ehnv`(TrcexH!Z7k4-3lM zlRnRk9QZwx2otPU>3(lj*!pXwTUtN7C>9&*V4O&FQi|ZQ}UTBtAr2 z_r~QGJpVRll&k9xv8)fzSUC_1v#{(JU#!=uakOfcJylE*M^dGqeyo3g^FW2Tc}IlYG#275-U)9hf}XX7rksCYg0mgVA^#E$=oY&ib4&U-Ow`IR1gRyu5e< zLc!PfbCKTsf(HM@r-?@iW-+M)nkhY@r$yoGPgw;s4nETA{zE3F#RRftzOS4&fLzrl z6@QH=NbTlZfV|}k3pY}&85yMMt^N|mJ&r3TYZ>!;2I5k!I9X;fjK+tbRN=QYD(VFP z2HFG_c?bZ?l(H(R2v~x*&UsR#|brm&v|JPy!y;eED;81snFB+m)xHP!~z@X$^R?LtUg%I#qp*>xf zWDR)=UnI@lCcO5@^wX&ghJHc|Q1PKmFA8+nw8 zwBtcgP?d(h)t>}UArtoO}p<-c?=r332~&KdaW!k#TxrB6D_T}(l8d%@&B$2?ag#{(>YPtoV9&w2u_3z7RsY3ISV%+P;pwfUXAZ*jq6|81C#h+I=W=k3G} zoML}8Ns%vFT75@FKIGi)?(#)ANu4byWvg=Y4JyBY`m*ym>>V~%m{0_ZBS+|=aI$lH zH~-4p*Im*@G)hFod7at2>|xL%L5BfArGYFvvboFo?{4_rz>R*rOcMLTkjH!MF@*{GC>!o zjC>-csg}oR6R{qx5KJpFunOznYQ&Z;zEG}%a>}DUy#mVb_5_5GNW9O55>!S3uG}AL za?c5m2Yf_=4YbzFAa~NiWfT7MeKzCrWsK@IL+UlgcdkzBSHkS+cuW_9iqZJW7CRq@ zO^2jC*DgG6-oJB6pOvsecl?i1vh5C}aD34DVX4k1AC5eT_nghCZZ{!)_Ff@)8o|*; zQ6b}s`6t~POhRbS=@g|a*Jz~Y6!@Ytme?4@516vFC(ZyxoxM+ZRLq^9OHqze`JGGIicsehMpd4AR2Sew=d{`C$;j z-HW%<8sFbEVs$)p-y`NWFC@}z&bXzZ_a9M7}nf4z+(H|Vpj z9F<9EXizAjH2dO&``A40Sogzt>~s4AS^jxO0K=FJ!kx+OtpmoYJ&Myy6I zJ5P%^(J_C>%V?zm0Uci7e5X9aGePC!Q(5N4!2pUuxJp2Eb2SnKru(Z2;*vTXGr&&y zn1GeABJx}#k5?&&hR5ZOA-7Ww8m3yL(xXH%8Q>o}x;o2fKdlS@U4B)A;|I4#M=APE zUb!xLlTncOSnn@LBySUgJ{0f&X2m&~X>lI4A#Ng9k9?Qe7}<}* z*D8773HQvmX{g3ChBh6nlyfCMazTHmj~5Er zzZX`}Jl|t(`9M|UveFzA$nF6y=I8?vA^DP%B`qj^LX9jlPy$HSP-6}Bro;%d0 zeYoEyyZvh+M~Ra;_N!)Egs{+j+qF9CmZjcJ_v21*ssnc4f4r$5vCC_wZ?=zO$uQUV zwn`fb`9#BFl+N(Ve)v-qGpg&HdS+(Ky2zfMAoOSC%X!LwQpqC{L6J4Wog7Fh9%xaV*B~ySK8gkv^nt;^ylD|_3tn7jsPYTAN2bjK1?BfzF(U# z)jV$rHz)0+FK=E6`SFTnoLcDWYTVdy2B?JK0H1yFP|0g6P;{4lMQYtN*At*LL1893@l? zZn157wy#d%ekt%zGj5NwJ>5R=4(MRIWiDu^IYUu&phRwwE!|wwFXsTUtkKU`3^^qi59A7TE=t$1Gv&@PS@7 znNeGT4BxV~3!D7^sA%%lYJG(gxTSjJ{RUJF zU()qz+?v#oLhq}kjo5Z1ywb-{WM{VIiEtl{Tjvy{h`cwxXtXqyj^@b|#Ms!^0IA5R zsFirbcKb-7@EQV_>>cA4r%VR77^pYcl|rYBEv;_6oyKXv z2SR1g+_mcgS|%#}HeWGe`{}@M-;^gOCpQJs1*{&n-JBY6nY2&?fxzinn}pk%+FBhd z^Wz%zY^hCD1;aKPo$v)0l#X5kOtL&=To+5Mg-U{gTNn_!nmT1hFzwbiztV+ME2rM-P$qrl z$4J!Ag(RPdIQD3(ONL|z+&(_q7)6C3E81@(SyYmr(!@X@5ZO4lo6|Wb*mSk=9kq!N z>IZcut=?wGn+VaXof=ILXxE#bS<%CB=5aS#d}}!M@ykTYY~x?qoXI?rKOvm?aMBfp zp+C9_?+>#W-@*0A&ro2od=~LZaPbX{HQld8gBJtfiqJR;wL8K7y5)%aS~591iLZWX z&F2HGcE~arF3O33wqLhbo`-{MU`-*9J>xR{s;tnxbO)L>tP>Pt7U2PN;hkFhNqjX%z_U|S=d;7=~19rnSP!!pB0_kvjS_>Ax1%rLJ z!JrQ$ntNc-I;>KQP$F437p%k*$wV4$vp~>Xza11wtIom9=hjc>vB?pVOm80f>5#KN zQ$FP~GqwSOWT4kqQg4 zu~~H{aq5{*mFTi_a3FFG-US}OdD_1(`t0|uNN@LNC)9O>2Yo-`NOOPWb{S2Xx+JBv^SWwVXOE6J)@;exz9? z3}EAjhdfx3IPU*Sx$E{Ze4KEmLwnnlnGBI#0cxhKu^R0gN)tdpfCQ6MQ#tkZln!(C zy=O}SaTRTs%2awfQNTXR)z^wb#>Xc9N5dlejURFVU3kiOg2M)s*?Pwu=$y}ggjq~i zntj=5dAoq$YV~*}kVFV5DgFX<(~#>9@|)^*F{Ax&q2yK88EsD26Lb1Z7*V3hxu(@r z4>wRGq~qs26z!tU=k~}}rhKK?%UM(Y?OQbo+Swe2OH}P}xY08)yw8KU!14jm&k!n; zj^nKf@DF)&+s+`|1FylLaBsVKeWqLjcJslC0cF<8pgii5Xc{5>1U7YK`9`a!#|&f3 zIsApHDGX}~YCtot%gsw;!BEk-nOal`^wj(v_yzkop) zEvys)pr)rMz9Y&%glTD_%xZj-lg?_!sHx&Ig$5bvdU!p1oc#k;1sq{g_m7N*48A|H zO?-BRKrc1((3q|4f8x}VBY~Ul>P)66gZt)_U{|HQUL3vThuez{i#W;Cmko{!zMw}X zXX&lWZ1XkN!cxXKdW$zQd)TR)d|cQK-G0WkzVoS4W?^}JGS4r zx61GVn_BKBG4Lg+2Im{1)=rE;Y}&fIA$q2M6mt#@StdTo_?T|dlSLra*9`aXnC(-` zX}up3SnqzrqC(tstT0}hM)vl93Uy+7g(Ai}u;&Oh`r=TGo|VHwoi~lBWt*p7z@Utz zoda(`W2F!lg}Ut$qrO4{V1uGtJZWFes8m#(^sPP>NRctGiv;BFFCL?r&1PgJOO)($ zwpX6vL&zZYqMWlq#gFQS&u2|N_4opQZachh(@qr?BNCBL2c)Uc<)2+A?&H-dmZr}e$TdLQ2zike$thJD53DU|3?)$eZx_6H%sh=9Qt zE7fhyX6TYPMLfknVRw~sHjxV+OEDgcUiYPMjuT8OfIfJYEHBg!?Y$4DTFGpxsKygO)Gmn(U%F3XFa+r7RDk9&XW}K#|C9{< z9@Oc?{-GsZwmFE3EMMhP>l=|z*h4fpMj+mtTHejFOT0~A{xEoWY{>JobdKx3})gu*UqC;5z{|j~bYN$~H^S?{L z*9%y&9?flOB%p}3w5r%MZIn=@y$r|he_4P6ZkS$u&*O#ATxK%0;!*OMhS5|$x&0g= zhgrdeMLGcE?&Y9dp>ICUsh<)k{JBorN8m;^-)em9vFQQJ41`pbbNe` z5(Z(s>FhNafMphFSM+6byZp>e++pd(*}N;vt8cPc8+;~vB)XON=w5`QWiFHxba}cP z<-F2owliG?!?VNm;}&8A4gdK{8q|=_{fb|wlKykG!c0fQ_s!vNva5IE^rUDDunfR3r zv;OnUnd?+l0ciWJ_fUq2x(BCV+#`iV&S)3_>{DUsXptV2<+1p9aQ#!fikaA6=RAcJ2$(qE|H(T(|k!l{L(LhfSIsMZk$#%O)O z5!b-ct!a{qF)kZ<9x?&AIK5P+mgO|7%Uh2*>@!{RwfY%1!$Q0EOoX4P!X#K0{W-FufX3AK0GCz$OY}x|z@li__xN(-60~wX;7MX~vUwPtdj~F+ zaUh>XU!LGca7>2tAZp_0_hqr+YO$Jn_iiwzE3JOx-ERNC6_9s+*_R}F4@&v=8mr(= zxOfDBy^E6$LGjD!kx_rfifyDym1(n-XcK%aF_Un~|H|Qd*D0X;`r>FJ(w=2*#O{f- z^cvsmmDavh{9`evrMA{Zrk%h$566bZ4D0P5d%E-7@)M`{3J?*X$_I*%>~LtFP~BAT zA0{~Z_LG=6-~SJ<^SK7fdP_AH58}GXTl7;WVmm#<|^DP|6aF3Lm zKqb?bG6^POs#>B9dL!@eKsL^;uz&7S|6eUFJjovbM!U&HdIR5i$Z^bKHlI!IPM;&G z$pag?Glh~sEk{(jCWmX<`xtkd@c_~#D|4e^(bNJK6T5Gou5PL(6*l$OsAMtt6_O+& zR;&zqht>TFy`m>K2&SXX{o-gy;9x{zUq6^ia$sw0%d0wULSK3g-hW#2+JrJ-!&vpj zArVK+HMO)Wi7#I|5VX1Y|3XTEV*pRmX#V|T)uAF^w-sN+1jDb9x7 z_s6*CW?xpJj^{D(1_o!EZ|+Me!&utW=aE0icC{L+kw(wxcdNMtv~bE9ztQgoC2WqD z!OahE6K{HY!&?|E=LbPy%gc+oV->Kv{kVzf#{wUgBiNRw^h|p%PtXoQ1OPvSx2O)Q7Zq7Cla&e)!X8ZN6A6$PqPuI1T(vxy}25)Nk{{*B^ zFT)u-L@)n1{)8*k7u5ySIR9u7T3!g!c5q;vM_q)a&}8K+s7_}x*KfhRn_W&Dm_jQw z!Nn1}F3LaAShnv-y<~c(3I4Y<&mwEglj5ui|Eat z=mvFP2hD|YD^w-MfWn4mZ?d-Mw!0jyrFD-EnDRc#O>7D*R`^`G3og^B$vxU6>CxqQ z0@9>8p+e}_*;2%cF5^i~X;7LBV!VZ59r5BY3}_I^(cOfotYg|+pAgD8cMc(qxteW2 zeUS5G=uWc)t2@IYE#upG$cTdzV0)@6aRP((e&4V%cAGGcDX>ySv=zX=;HhOK|YfWUCfQZX)Kfl5BZ z9({{+Nt80dZ3(c1bENq4=-F=O_`h3`&RJjE{Z15v@ZZy*tY4~ma)gdR;21lVvzLSt z!4mOMyTc&%ff4!6KQWJ!QWVM}Q8Ym%sYYLr zIvsdGA5VevyMEJq0-hEPb){oDHBFB9G2##83~NVg1c<^(rt+H!VTrkavkkltK9}O% zVME|f4+)0ab?_l~C&}fU5J?2c13nuuB3n)4P*?B0U;&@#mpaVtPR0$6bo4$ue64-x zwG`8btNM7vo#cIY6zNY~Xu2WrEF-5@U3EEQ46~{@sOz=#bUY^&Ek=*+s5VAm$aolP zQQSF|?85wSL95pErkraf;F!g^BWPf0Fh0_kDgyw!$Y(onT40J#WX}6J1yXGrBzTJ{ zx{3TTG&Ib8a|10Pdjq5Q{Eltxf3b$)Y9G})?($9lTru3NpWiBksV+R^f@y^^zP(TT zA<(g4Ic!{!^mg9wt^(3yuz%ttihUcor>v>@h8fs#w~*nF>oZ||KyE6fMYtw9orC3m zT!KEtL_IRiQ@ue{v}G$d^6;XKnV+y&YLhtxs^4PmTTM?nXKj-WDETkglWDw=LrvvVUa#&S-h{@r~+ z3EE!0mk3~@hTK|aDLUwR&m;_R#!meh<3AVmRv8f?fvF7i{^w?@Y5ce%GN)}m^;O(AaoQy>3BqDEN8HypS?sMlliKRN#M0N_ z*+X4A#TO4poFmN=4tl9fzhjE9FU8b9?f4S~!u&c1WNO?#ycr}GKp^W@B);46AoARN zL+($@OQ%TA_;N?ebI4_HBpv|2#D95}jnFrkRiM<1xY3tSq0Na$*&#bKPmK+1r>{Ehq68n{$|ksgEmxp$)8jT zZSXjBkb^zyX|&^wU37al;iG%Q+pcLNt)ZoJIVnFsYZ0mog`}|Tj(rAfexbnwXBmM~ zK8vTuf_;ZnZFI&T+00MMAvk{k8xjDvP-eKh<{WGn7XpcpSx~7 znu}5YOfJ_1Ej66@0N{ea#d>+$vQ$7RdXYH^;Fjl^pOIjj-@+_k;GMZMAjsKkoby$z z`$negLk!B?d0u^u1Yf8z1%LeHVpIUoL-DDNvSs^1M9gnbP^Jm=8MGv$yG;Tmk;?dc zS&_9xFIw%}>1=85QlI_$dvCv2HPL;mAe%CHBS5qwb<0d-o%QME9^-RCwCC^_b86S3 zU&?P#ETg>d@p_}Y_k*y796q-#t-kjhS7Q5t`GzyP0~~%+)LK&LKo_UT`F_B!QjsN- zcK_B5Uqv^~V|0)S69EqntGr)tywB^mh`)J3*Pi^*6vz*Yy!b@pBy)qaQbWGD(?2fo$fGpWAlrkF%Qd`db!HV%3uA zFpZZ6kb`Y?Q4_1)3MF`mi8th09+1lkEf<8>C$p?IenkO_xSpT-&ZY2}#$pBxg}U@j zt^ZM`qYl9zRAhF2qs-WqI{)tW`a*O))#7L06Q{3LRS4Hu%&hK^a`2(b1 zlKUEY#MSesBXa1_+P3XLREp6wxi*_tk2EoHF&PT;Q|lx>@}WAY%GQ}K1$WoN(j4HU|Di2M+s@T<-y@_Bbl;u5pdMzn=#8Xk{MkiNWj|A$XW=ye zp6gPVt>V=XXC$Q6&gli%gA(*d&kmL( zt^N?iKjLr|{4*h0#U{Gaq1|$;W7d7T%CR)h>G!V04JNSl=bHQ^&Bw>YLCY>*%)lzA zZA;5FjEth%{I1N0Qu(Nb?WuoDHFb7+aMB`E-D+``K53m_b!cd_`T9^=QALHv0#r~d z@0Bfs=^-vQUuwoL+{All4rfV-lzZ{IuMcMw5|}8< z9h<*ZGooFx!~@)V8!m!?4ZO)k?2>u*JEwZ~sh0@l``Gs=&Hmgkax&yVO3fxe(&j#P zQ5aWHi+l=M2tFh1YnjT^=6itz>M+cYvzet!Pu zSk6cKPTPrs7{^pT_Z|Jj#KcWW_`u8HD+s-!x1vuK=MU$m`%;6ghOz?wzLPJHkG zI;jZwQ9tK+M7MUx1pCaboh{M&#%2Ll_vLw`S@%7+?MbCyGll95gpc{}avRo;z0#ij zK9(bgsJ2wO5dyZe<0s=gwVou?-+W`ebN?X*+r0QEG7>*AC1vHHpJ8;{x66EMJU>;^ z$6f0Hc^n@xQ;~YL za6}5no;?yMIk3t0fTv+ox1=k|n4x%)(`f91)@wDsaSw)e zoAU{xr7qUwJ2B)9=Wt&!8Uxx_BC;4K&V|N`Dxh;4wgXY`_TRmHtK%n<2wbph zEkZ^>Bud|uM~=eb>kFeUCtl(Z6}*2U9hv)LgvdLc{hqd8HyEe)%uVSj1zPyZ{hm_a}1aC+7yxEK>OUzsc05yHc} zuG<*MV|}vv;#Hl=YfYZI9RAfK)z9V_c=7^RSHIwHq?#=S5_#XaQw_K8D&^3xe2eXW zbAsx4r7SegDi`1K=(T94GhIGN3Zs4h76dikyg>%*F}u!tqszVVi>b&j#Jx*p#Q2t0 z+kPTR>kn{9k}Sg%=8zPeZ;tFGM*k%IWy~C^?W3f&%-@rLt*Z$yZG5T)Oqq98p?nqS z=hOTl661N!_hh;GHu!gCFtjSS4!V(sprVgRX;eXqk@h2Hq*WY!M@5J-i<&}ib&tyR~_zc`GFnI}iJylaE zUeYML6Z#xS%F(SDF>wxjzF8a|Zot zA`8JISrQvV{n6Vc*Rr8cb+Un^oYa100|U$yMHd27FE7?GO&XO!xgkIv!qPCT0%+(aWRGq=I|1;%&yg2dTlLY+`?pbuqhYp?=q<|)7}JC*L<%$36mf+&Pd_{Y80b` z8mzU@u&;Y4mR=1>|EOjDovC`g@w3SJ!-2N>ir@1!weidCHcXY>VGr88*TwMb{FRQL z-o~U=wEek0ine@ecvU!mbq&r&Q z0HSLf`Be^-!Z>TLv<=IYr=rE|uTluD#sp<^?auOrl;$M*x6_L`>}7*yiae-_Ul!La z(8eY4(6bj4(=cjq6uW3*JXm{gsy>NAp*cb^CW*-Df`=&_hDjcX2)f}s5 z0q>sne)_sTP|Q79f(=kcSs{@;`XRkCP_h0dQaRvT7ZyA)N9~L*B^bNWEuL zh*tC(As6ylPqT8F1_KwAEGO~XIQXame%SJE1J^%T``)CRK&b>`d-be++_2BTQ zapB><{@2VkY91eqsX%WqPfr!75K6%w)Zmejaj%>Y1bH z_LWSe@L=*@gq}OX(g2XpCL5_U7!N(C zOtzR|GG9Lp-f}l+0ieO3Bp9lIqY+o^2>5@;^nDd^tLk&xH)OuZ@W5Xrx&sw0dF1w zC5KwrqbM!pL4_SwpE1v&tT7q9E)_PiEq$L}=bQh<147M(7^nn%I8IC#kq7i(5E91g z+EQ(r0t009wuD7Nm}#Bm3vVxni=O=U*PPnoNfLPMvwR+cobUA`Pz9j z)`vYd%=p(|^1Hb5R;Z$xB(@$#z*-fX5|UPEEr1&Za+9HS2Otk@Ylj(OgwH0c%4j4S zc6Pix>a7usEf?zP&;T7 zhI!?{fc6xdeLq6=`Mi|tc^?O;0kyEy^0lw-x6!60;dKHofkgezaNS-l{esO7Y7@~+ zDGZJgi9Gm-k&Z|v3e+ZWS0wgZK4KJ9k{?yXJ;M?@M?^$)%B59Sc1~v42 zHM{?22#&91fOohuLz^F9$h+=- zKluAU_g$~?%>K&)Yzt{OX~E@BOqdC`*dmm824G)Q`Kqx`4Clm;x|C!Ss6{+sk#WuiQwn1gt_ zR{^VxG4J@~6S$@)ojWhVy{MU`Mp1VcuIPZ(tgciWlXU(KX~%9w4&{hwgC=kepmOH ze>lN*1Y;Gx0CjwR>;JR!{sBf7uylCMk1}>8!c+u=Gl5C1TW0by1GS~5jNyURDlluQAhW=?S4h; zDGjpBz(R7DFaq$X1Z89Uek3#EBe4|qZyA1ZE>erM<^ga`QO7HSC-OzS{!mCb-Easb z{Fs{m9p3FhTmW-&t7qk4nm zPuz?}ZEc(pyBYypTA@LO?ZuQ}+4Dn(#aAwP`oNHWbF3wtugBprVjrYJ?zOVr#EgER zS`oVG)^ltTnF~{DNrg&^X4?vJwh^4kKf#uRw>x z$o5UN5?vH+j9AksK}}dw2AUYuJZ8z66=#j%we(eKQhwsXj&8Rew#~cs-5Bos#MWYs zS9GVcyAq>eY=iCg+kg4L*^u4JCs)cBd@TagoBR=%b#FPoH(QCx1*7ph+sh~FFbH}7 z?2BvjM|HY=%r&)P4Bk}o7kIX_t&PQ5?aeYG9;|^V>=!n8ml~ROZJ|QCAgUMIm^Yg^rx%!HbH~?hcO|Et+l$eFZIN3HWBsPf zP8ToFCd4g;E~{}9h_L5#UX~xqmS*@(NgTN#F0(d;Ld9@1lG$3Dk0OhncUrwq#fV$BH`EDb6UCMd&5}-$}&;h zLFmv8W4}1IVT^P=sC;t{J};t-B%bEL;Ya3#8Ha`w)#HI$jKqxbKa+&t?IKVjB_lQw zMpB0RFblprFTA)H5bYzj^fZf>N8=*ZpB%xcZiHTe31fZTJuT&HD1ZvyYYpoY0*&K! zpo3hgO4f{P2QF>917h?^m6(_kP@xLSFgZDzMXMO6Iu~ z6Y?U)u!`fqG+m=c5dr%} z6@mWWneM_>Jq3$`%AqfG4+vbSm=)`CFA-8)APlkn3)=uZzRs_w6t{-i$Ha{D9DItke^Tg8mxogX zFZc@9t8h!s4Tqu`&s=!xMuf*GQ~f$dVrU|U3ysMp4_P0aCQ}a_pjpIMDvX2*v)Xi1 zE*%kU8kc@A&#HwpM3`44-ps*^ z%Ss-1+&RK%@KLf?Wg?F@+hQoGN{SV3y5@p^U}q@ts8(@A)o}ozqnVrb$)^G>MZSMf z)3$?+5PURkRn!m^{*I@}e<7CHJE8xj*7o{Cv$xFJwf#tLAW#^gsYw)3fXR#nWu%6m zO|L*T+2-EaFgsYr``E)_g2rVY7BPCsnMoAgjG}}o5w6vAtgnrS6`04%LPk?4y@j*2 zdlEUaZ=+Ly1fIeN3k71K@{x_w9ED5&BGfe+};nSy6HEWR5cckW3{3ph9W=>LhFuo>u%^rn*=vf6w@x zdWN5D{n_UZQCA>rmVzuUfwZD*Mrh_gQWnVDpRU0tsFT=EB!QP!%@LG&4LYw-8aq5q zk9!*z*wqO1*!{ul&2?=vz#7ORC;HO-NFMlZ$S#Q0M1bp2mC}HK`(U9Ve?0Ufia;sezS17wl8KjMqUcx&I1y!%MN2b zfF4=?8TK)N;v-<|Fbe|YJ#+)bf**zR9K?fhaA=Bk=AuK`G1(K4vzmV09wKrVihs5o zZ%NhkAZ{4|4$fO*D-HH2N{AeLK_`oKf4X4|_l?BiH|4dOnC7ie5L5qDJ2E9`7}^#B z^{R7ZlpC}XwV~NvVWvNQ$FJyZ?Z$B?60qv$BEqIP_znlwLlU78cOKX#L>&>^7+ZKSqzR)PwZ6^in z>lvGVo0bcS7T{)-9@_`Vk=dg_sqTSYyf7ar{BSZM2wulEsedq8vscw`q4N%yn{*=u z$I?Pkh5%EWI!c|xbaC+ou1S%)RFf1U$F0XICFa;6vGCMc!meYAsT!^_**2n&<3b&C z_y?|%yQWKlnaAI2X!=bqs>=2EZGTOO_?o#NhS%(BN1{z}pN}Dse>6?G{gr`=WfnAE zS2<&`5XQ1>Q4e&y{ERW!SDB-p^AW}}B^fjyZ?0N(2D2F)}wA8s|@g@01OsAfYk7$FJ>`*C6Mei7vfa zg4LsQ%#McEL{Q360B+7`UY5jNi6Z^tXlAp9h#&Wwr_fR)oe z#S81G@X%2zQGMe1Q}9j^fjp+JZoDTEt~3!2r>}Gi82kadrE*i~hYcm(jsIb5+?WEGf6?+jGrGlk zn@B|LkNeT(Dw*z?RaI&x~ zuRl!mRa~HG<0vi2Xkt3U5q)#gsrbd@18`}c!Sq_)=V3K}HtLBTmh@2*U;;~Dnth)L*#08E@K-FBgrcYgoQIl9s;lxEBelA6S;`2g zJ@5q4#Vnae7h}*?HEjzOLt~P!i^JecCe}xJ*5n+FSDm#X)56nV*0K3e0-NBzfs=6@ zodg_VheofGm7eia`=t4AkTNl#?x%n2)8R~6&EVB=5jxgN<}Uq`bu%NM<9^iT@qqxP zg}Zslu|(*5#tLBdYiX4j_@LJJkBndQx+8hdq+N(S0-Ue%k+)7qv#6p$XfxtW!TCV? zDa)`gqU`j?Kqg_cupeAwvjv`;y7D;*(GObOH74(` zQtM=s)6wN4-@w;3dbaF_t%%R(0MvMp0b}3nSp4(^JolZZqTinPgqo!C+H^jjHc&dR zdAR^_F46kif?wY-{t5Lf;xQDKY{F}j?OO-PfjyykN1YSl05O|wFr@ZJI+DmV0t&9+ zQ`6>=Y{*Wa_iyihmIs&>ljpAck31x!BOL^NVGxyR$$e~)5Ok){YPdvufjMPBVbBG> z5q~E~+ft>_} zJd|eSW4Yx73(qg6!cN)hA;y`|at=*@h&014ZHg%7FV9{}l@ljQ1?~qTF3S!e1ya1` zKQ%9(%B4RLLwyfe^(-!BcWVK~#dL~&EuNb}WZvUXnJW%s0!^9M%0m(pIsXI=bbD`4 zz-GDLp=$dmpW+W#fvjK2IIwJuT7)M!jlMc2kDL77JrzHSL9z^SBQk}ZQPQVodMu-+ z4{?m)vSVA;8&U`y(CM$VQv=!L@pPq$ICE>;h%xuavzoTYBT+(2?y>h4ipolctt596 zSE0_Z^TTvB7q(OsaZu=9Yp73!mBVa}NQ?aPF-@uaLaI7_bfYDPlHzT+6>osA;y6&g zq3pO&XU9usUUqCm@!<+^ySIS#edK7Y8$egD`6%k+*h0>~>N1BcN1bQdU4Mv0Witd= zP1Q-muRaxz?l$D9M^XJ0TfyvO{K|M~`d>tj!`MeV09Grw{q?3iiD$y+WzTIdEoctn zBcO*vTMI(VBL>%u$>*lZC$y$8?wQvNxob}d$Gw-V2~U;BHKRv11gO1;uSUi>7oRtE z-u>2iO$of`qw7xgf?Y=k?dCPS@KJ(qBQ_MIxG4>xi1m3?;2yXftkNY~`{7!eBklAr z{!HvAtyZ6*R#s2m%hj`xa%2d`;Tww)r~HHj4sBh?yE5Xxji0IcUF`gFNdxbGVgr{e zMn`_lg|g!83jZ_*u&1V~;9xZAM-Qb^MRo4Yg-syX;v~S9wq#b276@#4lx<x+YHL?TxaJzyeFf{ZKHBEQ!uEZz2~%`JADj3 zGq87wkk)ZJ(>0eIpDuMhCW5PEuU%e$sgiu;M}z=UhYk5jWajT%nlN|`7BcHxf}5+# zlXevSXrQ{m8zRrLHWvP>`Ic9g3ssjB*YU&X-jYL7VT5T>$f)=n$5$JIN?b$GiM)nq$|`Q>&BVUyavp7zp7c<+s2$|rY^IE+ng`Ke8x zP-#!A_)wr0E9$(M>9WjNvqJ9&V0@c=s=E0%z5tQQ`WaFIV6c3@5l?4yxba?N$f}U@gwh0?!G+2uYcHDTF^tK_H*z=s?}NvlWa1T zc*iKWkynM%aRNGJ5gU9U4L6v>ueE)67ks_M#=lk+7~Ho+Ypu9|nL!vSi@M>R72T9V zd&<&X=T`m3O7N|~l|jbjrjGTtk8-3V-tLFp3E3?<2d5#5z;}GJzL`L?VO|Lwc4ZX; z-9b6lJF*FmQj*`6J3zyl#rubCYoHAR8Yp<_Pg733eE&&dQajmSwn{U}(>GhortKl$ zI3G}LZB2g~cK^csAx~`|01S6%0P5!!I}&KV{|VZ;6Om?p(W1QPH?IS68pXi2|8Mja z1anRs;e`aSTx_Fi9aB;FZS9F)Krr;pk%->Wy^#kNnk>Z(t`%`0P@iB{#sFu__7ZD4 zuUoK4Y2Ip=BLRohN4S<`5@HCQzw`H(fj`vB^plh%0&G*B+iJtc+8p(wi)diE)5pxz z=~05kI~NHm18u@iS#fp>@MjpF>HD;ScRDG_E8cYvi&9r7`qxRaoSl*s8`d(=?AbE| z7Ea*3cWy(!;A(<3#MH9uWD_LJWwI0Ze^NAUd+R7nlx6B_xG>p7Uq@K1wz$Dg@FyL* zF*WT|ymWiEcpRZr@wx~>? zvCHM1QO~1nB2HoGGOZ~IaloxgR(wzUEuUcgYnPU5UK`C3mXgM72rORLJ|~JAA+4uN zU=os;Pt7TJwk|uHWAaiy#*cmM$;u2=Ga+HO35+N;LL~zH8yRjBGr^)sK|km%>n(>n zO}M?cILtJ63ehK_xdsWj-AtR5F*ZT^NZEG911xrmrfZLsqTP-ze~f%P{9xu%5{7Kq z>R={$hxa-uq5G3669NAFlMHY2;iU1WO0d}WuMWqxaX~FWYS#U&u+D1fFuH=w=b7~+ zHgk6{MJ*YTB;c4KbR%5SHF`loCkmx9wtm zj4H@A$lriXwj_4n&oVplJ3bkhO(*131nA;_7#YCM7f4P?MWGP$jI+Nm;TA@m{e>r+ zPNjy|-|mlgU#kJoX2dn0x7Jto_`>8^gluo~f^>Jfe8r3i05->1vaAk=$j&u6S-hwh zZF0*f32rWJZ$2J)lr%rNr;8TpCWecDJ1GQG7ppXi>8qB;zC1o zTl0WzkwzWLs-(3(jTJ@<77g!L6I3$EZ3rBI+q!zzeqYulABf=nA%H`j`BUw&ea>wp zVkA7uk5!yNvLlr0ghZ%g(#y6xwxhNX4%4O>py1JSK6@zFs;J(ON?fe-7aA@d&Itv1 z6|hnlcm~F~OTyt$HS|xW%YBprMd@W0YChebu)1HHt}qy#umAnwZtQlk8OnV4{&^sC zD;jwd*urzh3YwZU@d8xHYHc?}JONR?0kErY5X=~)(HZV>L;xw}9mH)1x0N)K!okVO z>9O{<+Gj`a$Mm}!w;X2TPi%+1^hm*(;by4gxgx!Yk zz1%6fna}CMq*k5eT?k>vMkI2pXy3fnjHVI1aqMLSbVg`Yu2hc-%7z$wFo4d9gK4Km zB`S(ShRe3-f0akLK_D4a6H{bWM z^ze(VK92$jT*;JXqj@}&Oc5`8a9id(X+@4H%z>L2!7IpgqKck_T$wB~cMl5!1bAvm z;zdg#C%!^Zl>49Oeu;4AowI3};M4`_d?67V4sbfA6j@Ys*v^d_d#GVRW{#x^cN}El z{qd%Q6z);|p+)YKJ1_aRVLgt>ejq32mp`?$PQsWvwqeeVvrf{0)fd7PTk4r)Mb(UH zHTqR71iTG!32SlX&;-6w1OB=Sf)7T-4#TWJlwnmrb&5uFF@Ne5NBgA`8bF0fZ_f16 zPZ_z7zNkQLnwG$uK1uLTfgk?OJLNJ>{|hFYFCk(NS0i`r?>}ox$)@`^kPdG?M`G~~ zy|>paeKkrF*Ow^fO zAn5_sE5sh&+r2Ps>ZVl}gdS{F*<(%$UetF6vI!!nh8~YBOZ|7dgULS+qyEJWrDMT1 zFJC&tZ!xEbw~GqWCv^y?HUP)#I?P>FRdoZS%!#>s2(Q=Dma%kbkA)~Sc^JPZK?h92q9H4J=E4w&<=&kn#|uOC z2AcJ~)=a3RTqwHtYja*15I z|Eu4;js5m>A^G_>#vNX6M5j-Rxcy+bBX?E&t@EjY9Jt-A!Xe+zFwLR_rIFk&X|9CV zKAC*l;fFZrm1L8@sk0)5S+<~zK9)9+j#XatA75@al*l}GE_aC@!gtbiR7N|Ndf3o^ zOhF zm+Ih)ctDW^OvWise;H=lq5MiG@^VQK2*8{6{BVlv{y>lGoPBFSI+hPJUX`{C$T_ z+fJQfXh;si@l{1{FsvxCOvtEr!q4!?6$Cz zC)J+&Bf30CN`4wTz;qUv^jhL`q4Li3QC^m-bjxo%YuGw0Q{;AI!GD;#a1NUn%K3O6 zGmXz_(k9}7^TwaT5S*bV0|$Wgj2%3I~Obl(1AD8<)l+d8KDH!)Rye5mJ*=43M0i*Er15 z0=m=nzW-8VmAdNdOZgP@Ma1&=MMKZ8TX}ZtLFqW-yVR9@8k8(~{y53KXIrUF3sZzA zJ^LVZ5iH%ua1Pq!K~D^wk3AIGsqkVt)+z&0f9a9Naaw@zR8xPZBMX6$nGa55cAU67 z3eoPfOQaYACuhC`HKxRkIu#W_)<;!V=cK-iE<2@k@khdghp^TAlb6ciNcwYnPN&TgShl!PDYh`+I- zOgZMBCK=KE@;KNEq;|cWPP9ERxPDC5JHGi$`FKa@%Z*AIBNAZEubxf!e-=Rf(cjTp zad(0%;zxIz zgu~g%o)(d*CgL!SVwz;5ZILUiw4W=}QC>~Yc5FUL&_v?R!*xDgUT_i+hmn?R6#i)e zz;JzWw8|_%e(}XYBzs2MHE+uxsR+0nl~m>eMB*?2%SjX05H^tF zVI1&rBk)F>codYP@+id&9^?{0{d4_BD6d{XB=5fRW;Y1rsRQ-xuhTa06PX#<+?VoX zr&$>Ekzib=e)uAgn^P{-MW92Aw*vhnH!VuJQeW$O@Mg>f9=2mOEv-A#-F9|MEiY;TQ8Q*=!U^4p4C-`ZmC-2_6)Fnf*DjD4zjLEgVE4fP; zFl!T*O%I&68Zt{@ok;*KyD8Ku2DFl1h<5eKy)qoQ?YMRRVM> z*Otk#ln`K0Aa&Rs9(d&M+y&BOvpG`2ANqQf6?kEN9)@%$Q$~Pj<%TA_iHKQ%%ZgKC z`gb0WTT5-Xo%n|B7+LMUy=*7XaRlOEuksY;N3Sxv)fj;o<=G2qFv^T+;&@HPWEy^; zG)a9Mt~``isIyN*yUR7|1{q3yH!~D5-ncfES|PF->DATwayz=2(1LuILFY#fJEdIV zCB64;yY4Z>$#9mlEjiv)Z9jR|jFF0I-T%>!f1}Ej#OI?vJsJtvpg1v2S zX*)$wQx6Dr0qjh=FBX|hJO_}`wv*ZR;n|Kd_ZI+X&WgxtG{c`enE&uWd_t0K(>A+k z5}DnM-A2SZ9P+ivx_PoVtO-BglmvYV9CR(B92LyC#t+=eI#pI|c35sKQ1k3dK-G(p zIx8&53&yo_BRBDd2*<~N=8B<&Vs!JCu<*6#x8i2B5 zTQJ$pjs10H4xpnWM9~E}N;QjE8m}j1{|)q;S7SnRs5DJ3XJtfRf$?MNPU+L2{e(DP zmdn|yE5OrU0NK{VZ}&zJVTIV~sAQhYeoDLR2}whs#sDpE4u59+=4@@PXUz39QdV6TUa*s8 zUROV%I7CdL=0s@IloXCs@w-JD%?8zwSuc+8fOB^y>Idk#Y&!DaOwRb%mkZ07yd@`3 zo9<-T{lBv$VVUo*T%%=u#E8KVkwW0C8Ta^r2o=Mer3M3M*pa`u{*wsKa0_Cmn|Sg^ zkbUi}&CO2!i^=9f+~JI?6DnSK&E7q?f-nhGVpC1YMbjh-~>IMk6 z;<)3gr2-!EQdD3!>75t57@0w+pn60#g49_q3ItNrzguyK%2lDr~P> z7$ZoWv^bkH_<06Z8EqoqLgsTf_5miPLvg72eLjw?Ax*oTcr=OET*5} zsR6=Q~g*ZHXdbKNrA!I zlNYp`u051>5^oWleLut~aEr}A70jUJpKI)@<9`~EBRPaBP;PtVg#(aXGiu_?e;&>t z6?;Zc|Mfqg`AQo!%?L&npj@T?w(tJXNbVXFvSUkOnGGyUe_xB;u6E;%JRqI1>crsX zKZ#Fgae(Dfd-S^V2WU7V`j@=E0}p)2-n!DLlcEi56m$XlZ-8vZGk*Q3^ko23|7D>; zj2e@5;wG{VQiDh$x%{78YSf-thzyFr zN*R|}U$aP}dO_)t?4qnZ%{@i5ly|3aW<7KV8pu{Ffa>H1#`g|}e*;}c_=hMp8sV$- zmm}J}bV2av;_UT5bT~g(Raaj^=bwIJZb3SyTLFl(y59jM>{FiY?lj;&j81}ZuDPY~cKSt+njR)k???Eyzi;6#pOHE9WbdCR0vlwI_7Co*$KvPt@Y2u%ng z&tzvF;&i{^XM`DV;~GRp)Y1bLTk60Nj}U#Pao9=2Td)}N_VR3xHj66uTa?m(zFYA3 zg{VRKq>4KX(D6G^0jPt>_%sl8XH% zi}C!KHz&x|$3xq_6wx$Ph%v`-P!%4&COs1(3WirkaP)Ej?239@PAbl~)dT=EOhQF< zMw+@XQbREjj@Q(xv;2|FZc=xIVd}3y91-@SYuyBhKKdJ-0qJ9XeSlzf`lt@fVjF-{ zHsYomGeG4~hQ_lFYPf5AS_l4yp8yXP{Fmf*a3=?d49EO9VfMZV{oScTSKIwnb6)#B zHh-&s`PQJiK<>g3foHJ(t^;HJ&}))baMq@dfoT^Lnpe^F`=FydSv>G7lDXct^YvmZ z-4{eO10QJL7pP2bP#DPf38d#qaJ{!~)7YLkF?4VE<;d=>uD-=d{fkF^Hgsf~3N*Q= zRbd<+;hyJ+#p-_ZFs3B&U(jYn7n-3%KCshup0)#^mIe!yIp7KpH=3af0FSA20IaX4 z_eCY)i|aY@KqQ|*&90!r+`#mcU-oBjy#>Vl-*;O;Akj#`6v0sma{9~HeTcEXLjBn@ zV5v7o%JbCc^@{A9!GTJp42mqA4w?yytkuh|CwY8H$Rm+N&*2et)Czq)mgt=mEtd0w z4DnWxe1xb2DIKj4tZE=<9K;0-i!_uZv{Bs|v4!X&3F=t!M6j@-+#1@>(wQ8pjweI=lQ|}lrPV+=uZ8cAL3jnfCi>G2@o^(c`t-|XV!J;wQ-g2MC?yNf zoAx`tAQ)TlUHR={YiGk9z^tLKx^AI<%b$aYAcz&1UG2(kVe6&>$n2fv2X<(N(euNV zW^T*n2F!o<#h8G^edmq?W*a>4Y|l8l#Y|@*pDh19DmymeP+d-3P|0*ADg^ zjpHc!PL*>mLLPKf$&n5d^{5kYqoow6-L5}FPB)IWmZ+JF;rP>+3mfw=l5YpWJe z;*FLw7-O1M9)SlK3V_de5A25X)&l>HF2LOE>!e%u9|!^f4b7ivAviL_@cY#8?fYfU<7Ckm%3LWFx}}0zwDBzFQz=-*B_6{O^h>u zL6!4}@oA>q8x}}OF^{#CF{}qfl6yWdPW_hY?DbdW1wXbi+0+lwvBo7j#gWKZgpKG{ z&R%_y4*1kR=kTr-CKru?eAvGczb_6@Z;BFFDF7pQY+%Ush7ojS7L^K9A#fmYA#fJ_wnwt8UvVyZ#y;}^Tugl+>fSci z2mTA_tAu}Rf^8c&hA6Xblty8Rrv9O}S}WBJdnrFEVzlvQdh3}XLBo$iPN@G$%k38#w%H>>BfXLAX2--OorO zi%2+&o|la7c|H2xQx$06IZ6U*AnIUCj{qwkRW-G~-yi&AZn18WH=?!yfafoX%kp0^ z26C`vVy*j)c>@pw+Q{@nGpKyU|7ky`bYZ-Kx<{BgR-nsyA$O9?lC>iNX=AXc zpfJa(A-88H6OA`HZi@gTaxSkar2^aqK+P5!TeZpUJB=B zYO9{4`qd%$xt>id%X+1$(iew#mfv>0!`TD(+|_n=9x(O0w7-P?l|-PQ+Zefj{jmIa zkos6Vet793#yUWcS5H3`eHj|zlE5`Mo~Qojnvu1Jl$)ukct97UQG6KNf2VvAB&+?k zKGD3d8o}x+@!~0-ZGjBjhsP!H#@CUFqSrekhW#?ztf^;NXtddX#@Yq7^o06i_OuIaNd(iH53TQZNGLG#%VuzaBbPAZFB zIzK_ad@3CFM>)Oda~@v;!{v!Q4`7>8H(Lcl#2PWV$`3pO_-{VIw>mA#TW8DGck=(+ zFdR#?>LX4J%gkjRJu;JyjEN&-<20q)m#>1Il(QayN^~{#wePpK(4Q`k;>8IW!QWb(nMU7Dod?5 zC-J<6)-1g9W5-IS!+iTRS?ntdDrV4gLTJUi3t)LNeo=6pTwCpo;XjrXtsg?BQs6tF za{jxZt1j4RI+5LS>khcfwm_3i^%j%K28Q8r_-CNsQUCe=;v6VU{0qz-&?&p0JQ(f8 zS{)uFi?Ma2DkMR=ZT;e(WH}+p(+?x|8CPubcFNDM6I%3xM+%il(-d-hK zh=Wp2ao71w;Lr1Bi+z{J)<7LO#?N=gf8sYh58TE1bVZUpd2+l_m-*h<@mWt)lcrRT zn)yc~J4tiwv2SHoY`ah^^hF=2xb~nCfZPYj56iKy!V}{Ms_dpMhy~7sCkV6!vSeTd zuyed2(iZ=7{M%&*#eZ!|8p(yqz;JP?Noqss`4I1IEmb38NVl;x2dkit1Wb#q-Q&is z5m=&2g!fmq`QMZXDy+Z<{xn0_Zku|%bcL^l_Q+p6 zxYlnr@TekTTSh$t+S$|Ld>x=6HW=kIm!R1ni?kj4o?u_L-^}v5Dzw#YE&e8kBL`H& zHmmOtM|=|SM&Hb>(W++1c^EaS{815Jr!rpO0awLd<1|Ovl|s<bQKJ1SjOV=GyfC`3(UpWd?0=-Ti$Kdmgn=xlXDMZ^4B-e`k?PzXx_*2 zcNOVp_xz0?t`QVRm+{9RrJ#QOO>DBhiaAGvQ6`~I;V#ZkB6@$x!`kcLZjZELizLbn z&~W{mi_Fc(Z1VtJGiU+t<&P1F$E<(tnTI4QTGsYxeh(-@i`s1Svpbb*^jgEuliw`f z$ydsvTt!5B*x(VlS#K+{froWW{ zDF)9Nf*y&;JpL;j=(jeMXjwfWYx|$qdc_aH+liV=y_(n-*8*=(a!2gqN~oRnh=cuUn_%LE)9Pt!>!jY z5KC-_#UD(*Vo_pv4}!=W!FD-_eP)Oo7|Z|$ZLs!z?_*#Xuy71l|m1uU}R@|fV0ld}%x^2^l>s`3L zV1#%Qxbi*FdWQIF?=8PS40GtcTrTW#&*HELb+N1ThTf~yz4x>K-?v1*B1+`6sMIT- zww!xoTp{w_vD+q!Ho=$sVEeC1$AyY8+G%(?(ItSs$ZR|+M6{gl4*ztu)g#eqJunea+`1H6?6}_F`l*^5=uz*7bFb?8#j~cd zrpmzThyW$^#K#ReK0EL7y$#k99h7ByGZwOj1W>x53@ocX=IMR*C+s(3|cbatQGf z5vSOpbc`>v*`g1m*_S*=zQq~YP#83NWnZ%mU0qgvAOR7p`@3A_A>Fq+lQF|G?X=XI zqd|Kl*rqOAt72k(Fcwf1yN{VkZ0_+sRN93ZrI)!)QPjym3jtr8$4{$BnZKg3NTeHX zd-9RgC~JG_pD9^cF}46QISWwP*032uzs}wMAm2kG$t!&gMdA9NBec;nt#q}?b?fmE zPrTVaAF5gZ#c=V{pPN9Y$fuDED@NAq;0PCu+2<(@iex#Ty-jCKM_ft8a|q~>D@cW@ zDF=bNpzA?gZfI?IDt$s#_B{QYwJues?QXD|@@&w&cL6NY0APyupQS#hm$oT|e z==FR;$)BBXJBek;m$F$~s4BGG<=yS(qNgPW&hxHnk{#$@#H|!e$c%lBwHiOp#s9$| zE&%3Dz}ALg&}oVrfnD-)Dc&AXV`OAxal177$LtVH*VWazmt{>;2aYZQrxNe=@qrFO zpe~NJa*rmB0mi}RrTrGp_=UzmJ%1`_jLbngSZ%#EZl&ZNEmgn`<^B3OF zU>UqV+49S(CK-&*zGM)~kHl9idGnntrdn~qU5+!ar$RZ%(&{_izn_e>Lh}{Nmqjc6 zMH4JKSB07O?ezvAlEK!JuC9P1oAeYg;K_l<;KIqxT|Fs}!v0sv4WN3yJGRU@Y%kqc zI}|9Khw45v4XVnEKTe*z1aD5 z<8fj3vvDWf_i-)*nfVccmBebv4m~sn@cTNc6SblLMUWi2=vY;9RRFjj+w);35eVy` z|I-T(NhmXz#b8l%Ho43r1@8QT-k%;9olq^Vu9`jVZm_eduXO>2cB-!_>y$!ZqnzS|uk4pLA;sN>4ku z{%MKrAx&HA9CbjP>%O@8_%9D=xa1lSNpW4XI*ybg$>?)*sY*C8Zw%_j3h)`l|D`WY zF)#^eB=BlFPPp__-LFhzV@l~z1h9-5uua`3Ogc>+fRTK1|={T$62;|Q^wM~$?DYajK7}Zh_QTeCF%J(g3p%t<$kCn z^A&bGG%ae8U8N7n%vfka`6BimtvJtfeRO;+CaF8Z;njtry^>RyqxrU9nN*Avun}JS z%8nF-LI4fy?w-p&+|QIk^K#y3x2lNUyp`&Af_Ww`y56G)A~*< ziTpla+JwS7^|frsOMJ6hok|SG^R@b3*;rN9dJ!u-wRwg3pp^ z^fb-1Npn6A6PRez{_-BM2H*YDFrKdqgaQC!67TWuO!w<2u{?yQ2H-6@9bb-syqigV z<=Bw?2}%Bj3lY4<8LMOAS(9Da+@)!2LF_hZKGdlKUPC`~jtr`z99#J4*o zeOO2JzerWM`$9XpLdrc{V-{v`YsR$hrNKL+SmcP|+top;p8y1_mRg;j3i^k>NWC+4g8s8JVjC{E+jD?|mx|hyu-G?m*_dfJF z=p%Z;&;vuk)Xs8bm{q@VYE!!U)7r!oaVU5}-z=c~tk!e=J8xWz5%w#E5H9F_+?{pI zB}T91TbvK3ttdRbc=zQhHq?3_+kD~&PF&C}ysryrJqk%Eff=+;VTsM`=LPlp`C+c5 zrCPw7CiV(9ifs_>h^bKsu1PQtj4o9uh#Mr%_^SIc>u8F12+=B!b`GmSodFIg%t89? zeptj}I{qQy^&Y@$jK~dn%L9laC`)g1Mb!xZ{Fu3 znkn{NZ{`OP7*bjK_&38hspK;nK7^pFYff+<)+=d4fT(&;y|}GDcr~jSw#gtzY;P7l6Vl zKDlL2EniUVD_|Y&lFlU*bF=W+F+RoQ0xyw9C%Nc5~V z#I!7+5x_*KPry@)AFczd6%1XdH}UeZ%8tV|Lh`WNW&sz9SP+I`KdatqW)5Ua3Myfq znv28B1+n0c|oqOyxLmOZNVg2CI zzr&=~TP{FfxCZ-V>?T+2)q7Gz`vxH6vH21JO(CC|9Ca{yfzrQGI3B~V(zLQTm>;+O z+l|-|$-J&HLqqyrOKH+wJN6Mfe_BF%5m zN|VzNh{=vf=!pGjwsX-H+sFEQu)f`6({GF2$b)C^@A?O~eYxRm-135u15L-jAs3r0 zMQeX7{mBukLF?2Yo(2?Wx`Fl=uP7fPxkWzz7cs;8sHM~)BOZIEPGzO%pW4dr zQl@5w7fBBRFiy=bXQMLbm#XsesQ++K+i_v|%2-3k;P1mpQ2*j4oGVAKNx`Kz`AUo4@PC(ahW%_60l(6$1t~ulYfnbC-Z!{}q zIw-PPCa#r_N~HZ|W^lv@F!ok^>0sE}v6W0%w*qyiRmtr{->ylPGbl zlWng%?EFH6EbDB+kwxP2#j(f%l#;d7sM?bSrqDtU$KNNJ`|v-?hK(K-g>t$yV7oj! z$sI39#J$#P>QpI;u8X7Z>*!mXqyK|jfK&?{0+-KbV)|d3{0~;^tZ(~Lb#ViNJ2_6z zp}%79EsisWXSkFW^uJuDi%D0o*_pmK+%Q6bO^x#BqWBneQy(F^6rCw!Qr8~GZ~)>PbkvUK1K`1dyIq%ZOk*# zesP|zqe6u=K2~_Er<3|1v#I>b8}jklnVG*Kxb*7f2`I!3B+GbE)(^L4wKqvuUJ-b5 zP(lfm5j@bqYt}R()0Vf41^XIcyaDc}YHo1oM#SnD_urC7r@(<+{4Y626stN+nA=xP zVrz*l?Y)|%SfVRE6ESnSTJ#lxtf941)``Af#OeIri9?GYBztPqQr`v)yKP4OcZ;5N zwL`6|lJ?tqA;yjf?kLQu;hytF%j9T05BUmRs5FJT{RG|z)&7bf`=;w{&|aK&m#015 zAuB<)7wn~0Zx}=EE&Gb%a*e%n2i_@FWWi5P+}T_UpbV)%wQqxtA@lV5dT?ILw2CrA zer8L>u+vH2eb#cN0BO}#XA7kL!RCsX6?-+SWn+vg^+^gA<-%%dcR3@n9G4I5idja^ zv@|&h4K(!D2&~a>i?l`f27cYy)6POpIAe_`atwYN=k1OW^;clH3GyCWXItD5%rY;QT@-?ba!_t4NIr=8c2zBEgjMbOG(Er zrGO|Qf`ot)Qc5kgbhngrEGz<&((zn>`u#tze24emozHyc%sF#r^4%z|QD5$S571+9 z_@WO7m8!pn)a}-;q|IK8Bh7eP5GOu!?e9SJ4VEa*tF$wlQ13V}8iW(Bzkdk}KlPwa zV!dq@-|OJ0si5#%Tu%3xCq#~{9dMVgz5@;`Hu8P&F($wMS>qais9fyrFO_F!k6&8} zYK;UZg+1KxN1i@d``WvRQuN#051nboHjz28xRD6|?e-f>sKo)G9aHv zM$T2&lcX`>H9QFvu_|Z*Rm}adP4L00niu`?BlCb|<%P`*AtDMbAkS@Sp5=5JxT~jY zG72}m;++?y7{#Tlj>S?djUL6|hrl^oLf1x;8+0I%lCwi8ltXyAh#F97(h!F5IX6{d zV(!)}!JCtx_UY*spnFh+ZNZ6QO3t&EKFv6t^}Z;R5A{}<2g}Yk0mckBgrmDJMo-F@Sfjwvm+uq3OWU-&0gQzJr`(}T{Ny1{1-QB?l7Y0)}qVyBSn zkaL528>hWciyd<{lYab`KNYOs;SRULc_-EVorHF8MD6EhO_C70VOEnLSWyrBF5`Fo z>9!Esv)OEExRkYM58xx%0sq262>x?*NlGme9 z1&JMNh<)D~W=MKp*DgsjcLe2!jLni4?YDqJIaYIra$F=&9=pvlo!_6^3ht{TZ0~;} zyKv(kiuo&YMfvdFfhM~WxH{(eCNIP?VTpSmy$i!k#jM$=)DojvOHV90c>$Trcepwf zw|bP(Xw$eqP#YZJZnZc)t}ov=e%ALaGfZcCpYQhr1cX@kb(!=GDc%-5aS117?$$}Xj&9ceW1(mPjDX|r29@iE7do4y z#1y(Q)Fc*$;kuPQ3)dH55W0uP;(r@?+d1+BfHb6lW0ymivsl^Pk4p)>7`DCLfZa*| z@&Iwu{>3-MWb(St@D6u0K%EesWTYr}B1jjYm$fgk*SAqeZT7B1 zJKI#;G2`1s@5|{s(<;t&z3T?SzX@kKFkGWFL|w33Lo5e@R!xICifocd$t=d;gT{e~ zo%eHy`ne_wIjK?saYni3aJSrG4k%sm(xx{^^M0)4SzK>?!t`l(3EcE}C;v{bvw%4pyYb8e2@&f>&jrWnD*7Z5@(ph$K6xu0 z5_`w(>6h2YYnwS7pVk?%pP4mbi0AV>y~Sn=hmWkTnRXoC%{|#&Ob1_P~*FgYzw z0dMaQXGbDu0i!q7+N~}ii!r6BB;Zq(i70u%p z&g0iQP9y|K^}Byw(lD}GI&sVSceX|tZ`G;h5r0M7xZ=)O{{xfNIu}Ea;wr#?$<~HV z_G#jKlx8=Lh zcI4>{IIrk;WbT_N0iKXHd}Ni2&mX0=oNp@MzDsSF&(pLJbvV4PA+~#J5OQK?CUA*yYjs_R;w_8{kT@MsM zH22qnXRpsq05LE04WEyy#*B z0YR-k#73qogHS^;MJ#f>1~90fq^A=jSqD7mc(+Bom6UVbQHF^bN11c=*?vM|Q1cuM zBeaFiv~WFgU5%x8SLjZsWpfx%2Z{R4%Q7g3Dp&y8U!lhnr1gW_B}5_>SBcj&9EDzI zPi5`JV#f{0?7Ru{9o1;D#ZOo;?N{3&Vwk?;4`j_wei!pV#n5Y&4w2d6Y75@u8JfI} zj?TUVhcT8Jplh8&0XZD+hJQa-N$?7>Cb{{|xd#*U8l@2$!)|rrYjmd8gGQzM+#6(5 ze6ilN>s~vH_wXe{TRWW=iWqnN5ZI^EYpeGUO6DGAA z*ZyLbmP=ZJB(2;2U-Z3JcleaG!HYgv_tDtQd@t~uc{2@4rL^>oWtibx})V4nx;HbAWiABN?i#P0gUFe5tlA&V&(Q@2sSC9 z`qNygZ2NN#=TXkQBF^%{Jnro9A}AKqo9BMbrrIhJtrZ|fn4_>W-b8}^qlL^tw|VAO z=XimU_vA2@VEp{bJ+A!>8o%by{?k3gRp@iI^wsCtf3nYAs$#CU)WPm@o8^Vq-AY%q zeEnl`LY8SSY-bIu&aghP1pq&02LOUM8c5Qv^P*lSRleK-TqL6W7yFt+bd(~N0Gm&f zq-ntV(9wU?n_~Da;%7gPRmasB< zT#|LQ52urwU%hZ#=%G?}w;p(iPXY%wv4Rncmy88VgAa!@+K0Q76Jm49e>H%!)^4{r z>pj)t`;;qq3G>AiRN8h6JSE1Adi~6z*o}@+Fm>G_>dZt-yS@9q;P>uK8}K(wX>PMT z5NnjYx|+TTF6(sR!8^-(N*odBs@I8$IwoYzgHz2)NY4Lv>my;G;};G-jad${IJSSI zrr*-^&nb|}2_{Y>QN(}k)zv=TwDHQTuk7jmk0&F)A-{kN6b4vmb$ovMQJjq#uqxah zX``wReye_$K?Q|lMaIxh(*Z0*r3A($W1Esp6>fp)I^P99F`Lf@bNtfKh6Uc-0e8kt zMCCg5zyrl0q2dyfBzDWs}Pm9{aNtoyx8^ur84|F~BQGCtUM@pz@`RhFG(iynkOr|(4e;g~oQzq6MkFG5IibV>ceYV2{-vDt2 z2k)jd-Y8~*4SOm1$wAXY>5FS(krIDza0kJ;SqFd&3i!3T@y3(z_vL;bFDDD4JY{J( zY&S%}qJFosY+!{D1t56qgVioTZH~9$XJZhM_XF5c{eDR@@NoU5R6zFn$2DIdEw0b6 zp+5bA{l^_eJhu@O<2tm9%bNipytmduH-K)r6OO-@>;k2~z1|Cx;1Fu0>cGm=G&1y+ zBAIiQo%gwo8f8iVYf5& zaHz7GPCPC|fIWUNH(^0KR}i>^$6FL0U;TK&NT>Kf zi`1@`o_N-pGloge$Yr2NTO{Ost3KWMi$w(W$;4I4{SJnX2j>q^x;&2Ia!#QUBo>HY z62_7LvJGo0c`$#-QcI9Xq50;Xpxn>%ZxM}c6CF=rpp}vo&mcg$=GD*tDZ{7ziet5| z^;-DPqRIj1GaraJo??j$M}EL*)a2AuwQndea$!16|8{Qjlc$w*>rcRyFZz#9ywDdB zs;WUF7NZcrW?WQs5ZFDklXO&TY?c1COw=_l&*NC?H?t_k$-Jv9$y|`tjBk=+p8h8$ z>@-;wSm8G3FM!B3%_B70X-Ou8JT%KEafmmCe~wo3%g`61p#GH>4ob#&*f6e zH_Degtpju-^PO${yI5Avge?Y-F7hM@DLZ{5MgGPUsw8WG?5X*1D==wUMIMG1?`>Py z@{xTK1YdWf1yqa%Xvn1BBYt)p-5x*%MY{RHpRFL}@rV-_^m;?4wU4lguEs6G$#^)L zxQ-Do=45u!rvYeF0+vF`xTdxNgJM0%k+2&dBR}4|QS>f=NG=jEgeCE4T_anXzVi%y zh!DoZZ^V7pJ3{H}p6A8j$Bf|JX*g{Wb=zAJwO+QS{&1f?jl}rj<2;lZ?13|fKT?aA zIQoqP`Ecb0BEXqzW`Gz~N%}ko6VmOA_tY7h6>yv8_08bbJqS~yN3<)(XJ_nT6AX0) zyf4DSBrRqig((ucIbLfqRUIN(MSi`AARxD?;LV^YazU6ancR-O1>EhHQXUXXKhM=G zSOjS!!h76A1SIbMulO>k>YqKwS_yp>XdSIb%V^$*&uT;?Yd5NU)*7Ni?v}0mkU5u! zB|=qdp&FEcom{m}c4v3&X>w4s*US;%>Hl^@npvMPz^m_mfb;KDukrBgm8Fg+QNv1! zfG)VXCxtw_Fh=#%HyhXzC_R5fxC=W=ZL%r!BYBj`&A;kt^5x{y&9Dy=Buq^kA#kbEu!8xh(8NTtSTsou) zdoQn|VA2$8s5p)kVngh2FW1$w3mM|Y!G_wJz>*aX)3>ZVqr367;%F)lV@Wz}sZ?oe z?GY`vZ`|@+&@F_~w8?GX^qO**Oe`N9OUthzN}3q_9(LAI63S8!m24t z#9+Ygj`HqJdIEi9KFOEw{^gVk<#vy!;@ zErdLoxm^ZZpM~P9Y=R(K7*t-gMCPOR^%g8|+c`_uN1+Yvj`y3;M?{aS4vge&dW9tH zr%~(e$+mMJCZ`>Clo8tS#==Zp2bGNOek?;n0f}{fcxI95K#}2wU(GkBQCK`@knvP5G|0&riBmVm8cV%a?RE6nJGI0Bqj}HqK_g-h}m% zdRG=4{6V%Dasm6xO;Z<*OV?xoOJ-j#Xhn+!Sr$tl@YA}!=-`ZD#l>g!lLe;K=5W{; z5rb^P8!o0E1}5G$_A&rc#Dl`hwsn%JM<}l?I42gWbG4+_Rst*NVnCGa74eoJYDA}k z$k5C7o_!J=VQ7O{<@ZXdeiqT!j(jY_l;wE`<6z37yq^fmB@>yzpV0>*LYB)!5XASM z)HDmxPXnl(t^ZY!araA}p$z>9nvN?k-P55Za>0A3T)rrQYuuS~ee$!l;TYsd-7K*@ z%*SaAZomQ{d*B1v20WkiuLLa)S`L@`U-8f1JD$nk75NaCXePtXW-8BU)U$(%!arT4 z`!IK^g*IO3@@EAs{KDaIcvIajt~lEJ&jv9sBqqN7u`h~CA&_128AXDy=fKT+rWv2| z#?E$0Nl9e$$wH3m^l&4&6o;VwTU-IhoHW~15^r?-2Vvdy%9oX?_Rk*n#C$nc>*5x- zj09mi+Zv^#52|~5V|K1BGFPDQrUUOc^^X%%B9U6ck`tud_#_LZ0YYSBCf~gp%}DEr zCrBoW-=h5DR7jmozB$fM)k#iizdx8X77@u(_J6f*-pWU>bfKkcyHOx^irv|&c=8QDmu6K+bny}Z0Q*Vdwl*LIQ}R6X8S z+0%DHeoTZu_N_v^jzEPTy8CmRG`fd${BeGl= zQ*%IPb=J&kQ9@7hn;;s5E(!?yF2ZM1jf7;c>L7=@7?NEXv|n zKnpKGlHRa1Y+jw_o_QF%{%PdeKzqInx{Eze06T6SFVY&^ugJ`4KIkh9IN z$u3EOOsjEDmcqh(!oTHQsW<{$g3}5ouULB z*PpSJA+3(yOTf7i3CNdKjk&zmpYyh}1(C}@%7b$S-Q*4NpoE*6V$`I&oJ{dFcRbrN zaGj0B0Xs`?Q$+~Yx0g7oukM+9j25b9-m^I@10Cq&xH34v#bV>cJ3cdHW{DztB=0mN zUrRDI24r%ogy?tdc=9|6cDR|}GnV68F^qYW>ixW4FLC+Rw}d$qo~_5zBgxHw)%Mv)+krbWw6Ccw7t0Rk}JmQBvGT{jfke{ zdF5m@99>`1?%v3Xx$qUoYd!z5uX1~<(O~%J%)>-L&mdu_v!|`FLFHWHycx%Qn|(f zuSN=8sSUTd&jH$*v$(oT^P-+p>wOi&?cW=S&`CBh;N-+O`qYw!*BekFs_A}W{Z9*!F5LHdn@R_r^84ziXC4m? zywqPF1JA(!H);F+#OC|74nR`|h6rpl`wzd0FX?ZveRR~x{sAs zVMaV)^d-t(pbLXQ)!=nM=B!0IOWpvq<}R8jW~UIw83<2jNO3!fY>?_`iZ;T%XZ5+8`GfoJy^V;N)RJE6 z)EhddM`Y~iL({ge%EQsGx5L64{qHdiFFBeFW7K~0C2nf)WAm8<$`Yv+k-4u2IDx)p zvs$)LeV{{QBCrAs8_p8eSJ|EdErlB(oW#-nH9e(Dv{9eZM#ANWYW9~$*KF!Menowt zloa)28^9V1j0n~w;m#BE?`CvDt|ciCt6*Kur9Vd zd=h-WhN3IMR670JM&wGZ`GI5gdHvh@?HAwzWmbJ!TijWPFd5}0)}*btv|+h&MUk8x zJt2ch;FC&mlHA!d53~p_&9v2-z1q;;|HQ83s6&tUV!evL=xvc2M4TujBvOVVx8?RG z2#>GfTQ3L44&cn>1xPJbfTdyHKa!-Xc#H(je`d;+oqkb*VK~Q#1kU~G;W6VV0=5Dp z@2o;YAngb3i((`+s{XFCNv_rhhm*-#pBRttbJ;o5 zdj@7FP$xFr1b60|;XuyQ4+_>=pa)V-r7_&qd5T zGljfv6U8wrpnad7*RGpYIg@!_s-w7iE*EPZI*|#r3-`8tfH_c4lcs>GJ#q9~n(^t&PV-EV(hiUAplbBo@1UHR)TV9cp`r3Fx&;OWSMF9hNFEQ0Q#|~r* zm~txMCDvnyhRNzY?hy>qGbu55VmjO?sxd@G&_sfI{COsi_#{lg3f&)^PJA$1-Ep0l zFARr>MnwS#MZ}pDD-*6El`SH{W;yM~AA?m{CTH2sDlsn-UB)L5sWQT|V&G2_g%idO z(o+NFEt&RMnxRkYyMu0jiBBTX2+*b*=+hbs+L)!Bjf(dujA@X!T@DxNpX2!ZwQ60y z9>Fw%m9}9zcgPT!G!CCM0!lSk`~vyQi)ln|Yei zTQMHIWdsC|*aGje{GsVft*+yLzd#8<8(V%{eQbc5O#>gt&Uu-r5Yd+Gl}A5T-|=J#lw**3Fl$1P*gL9 zUPtb=glxZe*56nsfH)I2Nfz&j8lw;6Q#>^p^BJnMjy6;OwPM~9b)Kt9EuQV%YrYBH z^`#(U)l1y7R^o=Msu_pQOiI@c6vw1#MGE@Qke39*74O(<=P0p$0DKP500XW(V7a|$ z@R?T{JOH?(HgGLx)_#xBPS_k@z{;0lC;i6eM(u@jr2e=f%N)MIx{az3Z$-x~^Ar`!z?_j-}$1NH-dz6fox^gSz% zMk~*ykk8{^jtXj5HIiN4%o*m5az`Ka@v!-E1IZ&uXP~b7dm|2Xd+HT8-o{6{S2g>M z4eaenW8H&mRMxl{_n^OUsmU(6qu3kfb@3|Ck2b%4YeyjDd!7q!yr76BQ#>@0-8%j{ z+X4qR;N*W#9vO|VqyZBn%2HH2NE2e;g4qgQlOoy``0lXO*7AmH-tSqvCU{C<>b6|P zC-R-NSNx)pLJG5$exitnj93!+K(}GbSVue!Kl382VS2)eq#r zyy5H$mNht^zRm-l2f1u$r?(U}q>PgBXxx>4=H@oYC&tpWaN;#QA`;_Xx77C1PbODg zEU58*?n7BB>4ndt<}_lc=~sjrNtvF@xOc5=bljZtFYDs1Z@A`M zC|&&_zCh}hfd0I9(Y*^Isp-tCVFcDdahHsTm7cK%TZVz(9MrtxS@YEl%eXu_=BI8Q zlN6>$d$$|+SCvuMUo8)8c{^Z(>cqi+fBJ%WLymoJIOyYdLuOC%Jz2CCMz3&^g0&$L z6jj5Y(*kOh`UEAlw7QqhEwm_eoY6FuT>3{M*tkuPe0fnbp4PxZhNCp~xqBt*cY^h4 z_GR^D)-pHV)d!zpIX8?$#nXB0?db|9-6-zlooz?$m{y#9 zlhxq#j8+GX3`1`|a60WUs75E{v&^A)MxW3M3hPXzyW3sDv!B-BHf_-Jqf&Z{&6<7Qn$f#&C3UhPX+^f zeLyel`|LAJPVWRSnY?fCQv~*tgtL>WUy29SMc&?zk8vVoeuXn+uS{NPTV3rh?gzlGY z70+l=;_lu}AyhATqdh<`#31E9o~~~=|IA~QyCcm<#an3G9-FmC%H@PEaaB_hlzs>>?BlLKng%3Q_zAqJOd~LkS)qL|26$ouofAszS zyL^`h%d$bych+KuVnPxxAHJJQjqTyc)RL^Q__o|!OwFsM^9ehu+#(23-C&W4T_Sm~ z)hA*3f$tyGd%&-|NqqOTbmYDNwb1vk+vx$#CL#pCeNmgge}DOfbz3*G<)yy2dpJu# zjb*GaKYY=Ui(L}`ofmhHD}L<|TGhO-MSkB4 z4G$3K9~!2TXe?tm7Zp*Jd_x-eBTslMImY!qPIR2!{SV>K%5&0r`?p|=qp5z!UZ$fC zC;rdT;sYd%$y<)jd9g9+cI#pp*im8zTQ$isd~2Gq6fw;fB#2e9*hh90NnN>ei$4(F z2l=N(Pp-VZsdg*o^9C3T@-_$|hyiy%1x-IgM1$x(|J#8wC>Nj$jyvoK0|4z@Zc;P& z#21&qGPHEdcmdaMZ;C!~uT_F&!k>4h#yqFY+-#y2fg|)5$*(VNA)Dd(Zp`1uJcj`7c$wU^p$$P#?w*I8Tu$I2JIqvX_!kR3WW(pS ziw@Rw)ePQh547VAJJ{N+dL{{wDp(>i%ZEf0qnf0^3nu;l7v$T=^}`@O1(NS32Nmeq ziSeW8aO**Ki^9Xx3 zCXt&sRvk7y8Dxki;R(-6;E9yXG|!~gq#M2s7w6l%vzawpNun?gdyf$=kkFMO&=+lP~3(3egkY^9FDro zHti6*)E?$9&`$>TWP$46Q>_md2WW|vhq?~LwDt|oJD1j0Jf zjvVSC4~z4%Y#IF0gyO~TOA8KqBPj}~`0Zr9^G0FK=Bu^C`os^8o4jfaD7i-_NPHzs zqGFQvdqjss8I!=8uC6Y9`R6%W43}-Z!b93$9z>Q~=#w<(4s3J1^>pD z0ot4FQQ8FYghJuyz*Y)a(%^$%@4Uq1Pn2F@^+zOjSok-U0 z>{*qal5<&w>o6u_ez{aJmzYJF@E>e(|1DrPo0n`6I&*rB|I4W4PFSds(+d|rqHXC<@i#|&npXR>h)FVdq6zYK?Dw!A}lqGFXmY&Zh6Nz&DAcvPMu%g1pn?P*jN!-yw zY;4)(m0}!nZi}@@At78h0%J`)sRm`Ol}daTL{@HL(RrEhQ0yC-J8OYk*1tv_x;9+L zuf#F!ruOXzaKU`9y6k=)7ACY@L61L^%ec>vqQgN8pT3RC@y%|<*L0!!aho{E(cX>P ze=n5pD~8ZbVwSC#=3D;>(2D!TFwTQ5JX~wc2R)l#-7vV}z;kUL(gpCl-OvOLl1z*? z92C9>nedV>mu*B;vO-^TE$)gPw4-djx~z%t?+hf(O%R$^;#s9*I@x@EtNWH=RopQH zP9@H%p+e8>Ei|+%t}+lVP}(Xu#3>kt28w(n6e+pj)nX5HG5LMB#=G7q1@7&VyG(!i zavA}WYaS&=*c-c*MuEKl^(RwHHL$Ffg_MI&7YD%Et~Z;vBm!b?b3d^7#Av9V9~}BT zfHrZ#Tw@Q_K0NDHlasYLetO5tXd46lahjno3bag1#H^D}V#e8`1s{M7JQQ~dw#hM( zb^9I13FY)!Ig(QPX7X!|YSe*5hc1r^U^S6xER0Fzskk=pM!D4_4YcbVeH&B&@?`ua zL7;nAJn9W4VyfJ%p$j*@DR*GkNq01rvubyallWiI) zlS3ruR|DPXXUik%_u=A#3Se0qk`gYXCmFal@w-Y;lpflv*w24JLt?$MtDl&3roBaw z5kZlw=R8S`n7IP+8+4EXoYr>NWwWHAdGke&b57Ig3EEIF&3zaP;+%s6W<(rrZU6Kd zeekc=_$Bb}2*&*5ZYsN9^Kmo?0#fq8;_JVeqB=oc@AZ0a8`x&#%sC*;hc|z$QWbmv z0hDJH{o3KpLx77WKU#(v7AOag-fInC=x5m0>lI8oA-7sVHuHKk6$@-Myz38sO5 z@UuIu&jyiMBj)!Cw<@1B39{s%MY@{J9inZDJ4e;U*E^C6--3-kd^Y*o#AORlzhm7I z{Vg`tmaqSoWHd#S(6D1xqp-{rLZHjPb3LxTnUKmIFRo@-L;8RY1v^)p{_)$j@e&&!K!-sl(p>`~3u zz&G9dfwi;k{jg`?RKObdSvYIcI-5vS+6Y82teE@z67MWy57-$ns_ToVh zMB7spmmckp_L9ubIVVP{i@KXYCZE5$eK)g_yCU}|YAZDL|phpENytX^$k``Jv{@F|=x z?Wy5cp{IjO8lMdBK@T{4%eP=8)sp7Pc;&7g-K0J(SI-9T9c>2J8EFXb=#KjS#7+Ns zp6L3w`FH<^8EyShsvw=4{JG=TD^Z0?|AIZAx+b#MqqFX_;=g@qN2j@!K@sVJgDg!X z1ZznyOs(Zd`hi2xro{N)aBRJ1x3Pm-n_h;Lr?geSpT&8RVpo#TzZ9%zHYO zpAe3DF(>W-MPu$m5YiBemPcJ_`W`S+h8_J^;omLxpC2+^ z#wAY2jVXy*>^d~)7i~Mh+)e7h4^8t-Q=WD9Fk>&_uit-mSmzCis><TdB zC71mUv*F0nvP*;;v0$yK<;I$V*kUjVD`jK%bW$8!&ms2oRp%-D5 zd_!^vD5X+gFjT?3VwFS!{fyPU8)uOAAM9f(U>rQZd`^7dHCv|KafCaokGuv;l+7`b zk5%Ss%8V1+_<$DP`gYS)8g}+L6BQ#v+Lfc$g2Vfm zF7dC!dt<20E2E=cp;SnPgujzM8m!6DoEYo`<3Y+~v}DbV>fwZD-cQosxqNO<>%K^x zXgPCR{~ouNULT%PFN|%ZaZ%hLYw17$q2I1uRA%3&N}>q<#|oVbikuPk?|Q$%Mz&us zHEh}fs=3Xw2_*r5mO)^!d~G_*;hLpj1wF``Vs?gau!u~0MUo}_js#DH46dtDUVF4t zr%`Ee$Jlxp8x|0$3C)VyHs-W3L%BftaIG#D_d?f}x4eZ8dfY;fPHd^N z|6kY}%KZ+erA9>K1z&w`c^*xPj2M}LE?Qab-7bb)t7YF)%-o(+4}rWJ2s-hVLuw*3 zhV@CRnntoVyV?>U(=Q!DnfAzHiY|Xx2nb5vaYLp`bS0nq_2n@+#&K=ae&u+H$=cl` zvH$q}C^w5R?=#b83%r+AJ%irhCUnzCf|W5q7hg6~2&md6xsErLBZ|6fmb9|=r;Kx6 zy-&wQG`&DHo;R}mFL4wMw3XWapypi4d_NsP?Fg8ooBX*B^Bax@6 z|LR5N^2SQe?tJU4@2+t!X!V^*Zr|K%$IgGB5 zjxdgJvYo%y!wJrl{@{D5h~2I5UB5W-s=V_0ZJIE1U1?_sWVi=fN$-7_5r}D{vu}Iq z{7`QCq*zCkYix;*-BZP=m#9ayO=Cr+J4P_3U5_eXG5C%fnGKowwONUQ0H5u8p=@Au zk%3vgjq@*$gPyy()Z1bjq`r&Og|l3!FEatOOy=g&q4ykC&DVND_M73g?^%IOwk8fy zpt_Y_AXXGT8r#yCgH>BbrB<^Neh;>i11@)J!+){lT|FZQr18B5$W<{B$<>)yvtOrn zJT$-9tz|Wj`LcOGovBdX+kx}_Y{?}eD``9N@;t6aaaLC*s8w`jluzn=UYI9|+p=gMt3%6lx>z6=!f~J0*&s^KlJBfzS)&8HEGHcu+l1NPVXWwwyI% zji!qC+V>{WkX}Q9^^gx^Q}4z%K=1?tzdR|D5A#L8l1CTLch)GG*{A$!WwdH*nP#SJ!e zc;C*%yBFHt-b79^Z72Iwpg@lcM!~6xZ7363WQb`@_fqT;G5f?gNqrgL`M22~2ZR$- zjQEfVZz5j@%6#n$sgI)$yVS`-T@m0rD(+SYdC|~MgwSa}RGV|^yCLlR>4zEdUwG;E zV_CJG`kFo&X~E(R=iU_3^tkAX3PaznTG?u7PfC?<7gB-!(NtpxI{V@SlvUp;%Iv`s zD-_G?0bw(Ah|2G3F!FoSEP`FQodw=%8^N$b6;c?cG40-wTA?1lAV-Ih%jZmwv~aOI z(2ftfy&9B;rJkAv>@JxsuUIrp_n|cXa8>dQ=8o51r zKgQm4)LkGB19?5KKZKXRJZ<>w0AFqR+lo{#aPvkQ2dV(JO*$ChNg(?yU@tXSJlGoF z`jCq(of$>d-Pa9))H)2cfO-%>nm(v^RD8x2K?c^#Ugn$p;Kr9Fj3Z?9kYR;_E=7~* zyqZ#JiFksdK@;v_YDOZRmIZps_DtT2rQJlC9zCw>V41?F@+zGsz6CT?I4H`LR0qnq zUO8$^Fe-`DDIG>8}!~)dt2QYX)LPw)Tiz^0L%7rXMn!fPCC5reQKjWn_+N#4Hizp6F z0<~*$g}!7a8k^Nnkvv$t>iRYz`~Cc;WXhK3vdFc)6`qz*2qd2LCl9V4+CmHS@L0dB|eE8SMr*aWL8Is9w# z>?L;!=>zBdetiO5+1V5VRG?+;xy8c*g*4`$3VKRI9di4jjEHsd4ij-oD_Q>&e7lD& zN_jPfa+VlSIaX0#HwU10CmHjCJEIFDKm;?p;vNGY>0Dou{@$!{>mqx=mgp!D!*QS8 z1^kVh1Tw=#q&~?0mfpjZnQ{J9Tlei^Ls`Rh-av~%`KW_pxqXvWe6To(ilh*i=7{7& zjHlOKT}jtmakZJ}aW|E(#!bEJ%c{Y|)g-)HRdP0*6P$L-v}9ku6zVzO`N5i}QD-6I z!IUkS*XJ?+GK6GEt#P zTDYsC$!sphmt@i=8Y836d?#`cp>tir=fM9ka7lz_29wk zXsQqBdYDe@vy!kFgoqAcl57%RG+c!oCodLMrK!U6-} zw)N)q?2d)Qj{I04L4k zYZ!Xv{SswrWlg67yv$ z`%j8-sH~hAg*=RF;|hdn!3pS4j+)yY_u-h9w&CJFooexnx5IEo*J}MPYLW+93jTGH z+8VNN@$L}w=>u_JZ^4zVA#D0>`AE#O>Ed8RyJiSz6fg(Z;6o$UoQ6jog2IS_lThEO zyD}h1_5IDitP^Y~8ca)!n8)aR5~UnyVHGz1iUZpOy7O)7>}^9jdBDw-aty+^g&LzB zw(G>4J8<);`_M0=UqXJdqEk0B%k3?5BxN<{yomW=6GK5+M_!+0r7%?5$q&$lR^r&$ zR#tP#SvorB=T}mJ=}1TBK||8ZCUBwDkxkoc3#Pqu@B2fEYphXkymYM!X;8q{kQU)bl+^a_ff#DxmB5>s=hh$aGFs#7Si)fIQ!0y zKd@@K?Ji~iV%3Of!-OE7IOqB3R!`8nCJdw!2dI{h;$j~A;!?5h8QOm4n>r*?QNPIL z_q+Hj47<-2$R~2bUu~(hy-gNMXaPa$mR#X3`@RP&eO%Vy&8ovkyq9Vsh455T zv-0$Xg=!6AaIoCRyQFo3cz3qv3vJ0V>WA@pBpB9C3nzxZgBvOa(B~S(CAGn`Y1XY{LOhhCBFo*DaZwl$VCqnVBmwXLx^?Puee^Adig_;N``$`im>FMo1TVG* z|6fzr9oEG1wL!%zpkg5cDj-EEfdr5yMUbLE1f>}SLX;xXivcO2TrN!#=}BmUgoJJ= zVt`Oox-=z-G=VEUARvT*wB(0){hn|C+}W9V_dRE3-r3pboGNlzDo>tbFNFXFjkv$uRCF^q{D5DLm< zzPIs0+mQpl<;#4+Y#7R=<^ppXNhdjBiR`!8b@~1~wi2hZs-J?ieR<~uSwa3QCI#v= z!u*~#kGs2VrI@;^)M0sY*iVij0G}!yRX08 zemXG-FA%-ixZq_A#pap4OZ%?K_vAr z)LU~yayc;!4~+kO&+27S7}IXq4AnJX+E|g>flVyUVvR?{#RRG0c@^N~Z5T@<)aOLH z^*rng$}y@f#E^Xod+2;)B;$#64M(HH;KtlwncbV^Hxmu}hiNaF3IyO{4avxmPXG@P zgYj;8l_n7(7o(BvZ2MQLs3`n-vb`0T@jD9%>-Rk4

    *kdw!3gU%!f{_eeByur%6A zmL@G!?N@)5z`qb|ylS==0&497N4|Ib4B>74fw#+9x}YmqI=teoo?9F>eW$$J@s{25 zMc=gftP+Gy8M;0;=-K*>Fa@W}4=M|9$`=DbeDGt^=P5|%xp>@{GP~N1%`*34oDRj^ zC3^2>WJx>`C_OlU1)YiFE|G?HZ|MH@3n<%Z9az3k!dS+_+0F%147Ij&PkkV?I#G_{ zO>p1ps(`R)Rryj=%U9Y=3{puy%%?*;lSp@+RcCEwJ}U>EYzzFEF><66pOKp=ey3?S z1XREoB8zyq;@BBHGkxC#i{UdZLtdA-a%W?9t+VS|gX8d)eVho+6ny417+ie$vP_!Q z{hQ>^@}|~yv1uwfgQQ*QOI&GB$xj8&o)bsLG%%-aM)~gFKaQ|Lfb0e~i$HjoIPljB6zNnC3 zQH4N$!ge>9RiWKnCyDVWgl?Yz18*4*Q7rH}K2u*|W06%2loVNc3vVxbWTm0P&1LUR z(yzA_vB%%8zpZHj;Bk`1mzbSu_<$J;9ckEnC7V!szhPsBFLV!k4^w%Ly8r*!Vht9w zr!iv$l`lAbK(Tm|+ScPex18h zob_b)NpOUIr*>7xIUEFvNu6`e!kuGJxjeV|eYAm*^v3N=eYuN+vk^_9-9Md5v0yhrf93uKz=*R&)%TI!l6I(%$wD7()5moo5C zb7#Gm-2f->ZTkU)51EIHr{*@01t|W4K6wX7-msNFYB1Qxis7}w30F>tuQ@LK3kIaVAY~=j=9Ic%$iRyL( z+yyaPOfty4@e)kredj%c=A<&iWK!@su-wG@|4|8Wn=UzxhutT3xyx^@a_q{=pF}UR zi!+A875%WuOWOKeZA#X7%=Sk!vi|QEh2-Ar_ArE z6OJ0l-wys9Ed1h$uBlu*KM(c-BZ1HOvHDbsO68YXy5CBr5%kA!%dNUf0)Mnxt!vZ; zm2aKTQl9;_D4u7R?H|y^T_@2e?s~$Et3f)tTgs%c6c4vIk>hGZZ%IWnDOSa?e4AbE zT~nZf!(kfbQy<_O^{&l%i*SetjN;A^Xt#yjcrH^EC2U3G(x^R3C7PM`8{n)#uv8{X zc;HCV0ou>N^qIwf#9=`ty|sf2FemeOyE84q@Etzmeu?pxeWTyPLoL)=<>)n_uAK+< zH7Ohp4F*72aJ!2k&7K620F2Ck9bdIip3a(r_W|?2`DHxuH355#f<;#LgZ0VF+^kOx zNJ68F)|;v)G>5jj^Abow)46_c`=t#RPM7!+yNpkjoreq#Ml3mg!)F-chPowT(Q+p& zC=ol@MV8-=ESZNetKIm^Pw$pFN-M#oR*T69jO#<7pw`T@C?xKboXy;r@R2ujol%)t zxw^gCh)o9kL&EY;=xn*m!ja*2KLLe&t*dl~wM|>|oF(u>sx zxnf@g=KGPOIs)-CM4pAdLrUoiwHo}aRzvh+&|F7dFl)2u1aR`abt*xv2(6&ooNX=}S;2qp9CLaNXgR&+8g?ia@V;JG59)mxi2>t~$% ztyGGCaRxj?S-4#y4--c{{Yi`2Rs}CpgKzPyge?~pFoTyma~i)F@9ORhfy^o03<{RC zpFS+)Nh0Z!Ce!g7kB9KRo*N?~zX)6a)FA~1c8$(X1l=V9cX?gBQb2UZX!Dub$6blB zC)DZv`&l+rS5}?~Hk0hpd(Zqw&{ROYf0SKW&l2&9#XYGQ+ndFDfq~21L*3&KnUS62 zCAyO{&sUO7Ur+Lv;R8+su{f<=W?ywq*hZL;Rp3Men4hYj!B2D9&yefaYKQ~DeVQyS zY1=^A2DzUnkBY*A5J?L#N=u4EU3Dsz$gEkN;?OD&d93u;8pl4~@Sw}0@P7bHozgI( z^-!DNF#N)cOIZEvXX6@+=QR(tq45|0j`hn59x3mGYi+KqHIITM+S9E(;~t71YHN0A zsJYm$?|a5k0H16$X-3NFcWTeStM0Z2|He1!-Q*JEDVKzc^-TI+6Okv|x;0G@GA(E1LR_Kn6 zG{tv+<%Hj-DO4xpVahboSLWMoStSfmjd4?i!BAp?iAVbS3NGZD)}v{a7f8=X;;!~w z4}Ow$cGe4_KcoR1DeomezDSeVS^h9s5ER|-ymND>=Slprp=&0qaoZYpI_eSCDAiqe9G4o*4;QR;Ty zKo;@MvQm+TRLmpaX+;dTF)a?FL=dN`%>nz?nMhVzuJ^{EKdWU9cH<3TG`ug?O zyHhjg!VvT5vvV^eHsA-{0;cu%R=JY2A>P6ozQV;yD)Axsr~D_rT@$eu@9e2FtK5cN z`vUR5TenT&X`lqsL%OgsBcBEsjTh8HtU|k+`16sL5K70HZ^h*Z{k-ISM7qAB&P1ON z2zFrVOM+6Oa16@d2}3fImCm;37lQ?LdE2}f?yZk=0n4L&^@NrRvc4*;Qc#Vb6^p0N z6x7nHyrG)Nfk|baD^?pzCMe`qA5dV+SGBLcIi;|N?s}yy=vgJ*9?j<v5UWzO@m@lhYQc)i~Jp8}d5iY38_ODA>e^*VE%-N&NF^VF+7Y$_0a5it|N zn=c|nV6HClE0gk5)$0y^GO#wUR@X227o&*4^<*PMT&S!^5yV`Y#zj(6r4qw`Z#2Xm zlFEW`B=3?cs-;G@r!6j&^wP#73A9%~{`PIo-jXUOBrdvQmNRdyzfpxn?>T#uW!(5? z=A!RV`M=_}$9OqTUYQ0;lHa3Rl|k1Pl5Vc$Plmc&3GvKz8_e|j8vJpRh5WI!x3i7j zv5$V21~}1^+FY}w*aU#y{)fkw0wQlvUg;~zn36hFxM^|5;Ho`&`_)&8za@qE@hysM z$drtP$<~5QAE?gC^b=>i(A6Wre-JcL92v3ur7$wxR4x;}wWDT|Gpx3nIoIPUbajop z7K^QId0Z0~t*sSMvHei4uM#4$PX5F~+g3e#I@c?tdxR1E znkbObSEdNyb`yc+F1=;i)FGnep4+^_7|9YwjBt%wrj)eZ7lVHziZHF!$cgQ~!SjC< zuLXz7(Q`}1rHFJJ!#+?6BK^hVu3C4&?ozs6$ZpYPS9=h%v($^`+e~m*9Z?W>h*9>u z9`tx>mZ7(fA`3{6Z`aPcIV&azUz1QlbjaAd2YpJKZ8f_vo}I4Q)Mtw`|GRE%XLIR3 z+5Q9mFaP@0plbbH3!f9_0f-w5xB?ZR;E0}#3p@+asws&rsxtKRS{u%UwGo~dH!eZRlwij|& zEiP5>{L9?id}U2WY<-b^ORFUCDNHTZWi+NrGwh@*EjekTUCR4WUd{+Tk=%ef0YF?3 zY%R=~R;mB4_zG;>A3)^(t`iV@?LpGquKN5&LHZD~ElZ2;Xi;F~_6r_^dc(JJe{>bmxV}SFo6M*=mL~P^ek*YKdUr4ZZ0jw> zBOJS8YvY8nk3@Z^0?x^SmF|-6=~B`tF>M&oz;wLujo*?aIUG5_oW7|{rv%+ynA_|TlQcH^=&~9b6XMfZnLAr+BH3B4l+eX3IJ=H3mL){mvObNg& zJa2nug{^qt63I>dwM4VA#oJthGoDH5<{QxZBwoAXrR>P_P_vf)bBs7tIOYsE0&(!E zf$h>h1eN^QdHO?nT|`<^#^VxMJa|f6rUZF|a?lW)0sC}`&!-u^gn*)@tSc7VR`O8- zCs28*NGA+PVOj7`e~?S?=O~b7d=}eqDR$nP$}@FDrilOOe{L4eerU_}c52OhtExM3 z!T6hz`Z(%%y6^LlM9}YE6!yzyDmzjd!N#4r#%YsyIN8Bx&VZuwyEy*BT4uaQdo}DY zWWHYz)p&@|g$_+$oLyX3_8F8LgaAH<|MxXkHP?Co`pi{{fk-F{5Lfzue@BU$opDnSht%;XZ!xeqMMY71_Jm=xx6$or= z0|BXV0VniYl1Kiua`3dcOD|~jXPKGDWVrWl!`v0wC)C)i1!7vntM|LQcI~M?)?q7N V?zYKCmJ7e`8S0zpk#rtJ{0}|tFD3v0 literal 0 HcmV?d00001 From 07125849c1a78072f15371a8de8f33c19b72bb3d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 01:38:47 +1100 Subject: [PATCH 0035/1088] Update README.md --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2819449ce3..95d480d026 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ -# Unsloth -### 2x faster 50% less memory LLM finetuning on a single GPU. + + +## 2x faster 50% less memory LLM finetuning * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. @@ -87,3 +88,5 @@ Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.o !ldconfig /usr/lib64-nvidia ``` 2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. + + From cfa9ba41c152ef4077e20ba78a59697b8a66b0b9 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 02:34:41 +1100 Subject: [PATCH 0036/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 95d480d026..77cd9204c0 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ 1. Try our Colab examples for [the Alpaca 52K dataset](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) or [the Slim Orca 518K dataset](https://colab.research.google.com/drive/1VNqLARpE8N8eYwNrUSDoHVjtbR9W0_c7?usp=sharing). 2. Try our Kaggle example for [the LAION OIG Chip2 dataset](https://www.kaggle.com/danielhanchen/unsloth-laion-chip2-kaggle) +3. Join our [Discord](https://discord.gg/nsS4V5Z6ge)! # Installation Instructions Unsloth currently only supports Linux distros and Pytorch >= 2.1. From 24a7e24c4bb5a02c7e0290ecc6807af3ae89e586 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 03:28:49 +1100 Subject: [PATCH 0037/1088] Add files via upload --- images/unsloth new logo.png | Bin 0 -> 59415 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/unsloth new logo.png diff --git a/images/unsloth new logo.png b/images/unsloth new logo.png new file mode 100644 index 0000000000000000000000000000000000000000..fa05a8ef7ce0343916b185a46ab685295b9fc324 GIT binary patch literal 59415 zcmYIPbwE_l_gz3hL|Q;er6olXq!o~sQ0Wqol3b7_7Z5?Zr5i=MYl)>B>F(~1g=Lrc zJ;l%W_ou$czL~i*_nv#snE(|f8G<`hcR(Nzft;+=TM!8A3=+_I`L6TfHdLcAfCNjUzuqG_buQ@NCVzwnQ1y}yO`f3qCdHN>nQpTCs7 zqjDz=--{iTd{H!wm3>Pk-K88WeA}}RS0zJ`Pb;-8>doy(KE#o{{u4Q)G&>Za5D-76 z`KkE$Cpv1-2uPFQ1FcsDNF5`4nOU-quub&vjtK~bq=!E5|1!So{Oix(O2|eMgB$KO zKJ7iW1yO)BZ+{R~zAMLn?~Ezp<9Qp37$k|Yv*Edin}*eY>(A@Ag} zh3j=t{egIl-Ti=fYZnN#-d0BznKraXz2SGJM;kphhwT$X3SXL+Ina-q)~D} z&lKr_wfghHw@Kt4>DQ!Qwweaw$7!&BPNr{Hd=1(A@ta>?-ApT?^f%8@>aBxk%J!x& z!Kqi}xONO)!wN|W4xev`RW%D&dn~r(R9}8$~$mDP0 zePa&j>Ta6t|0cqe&WU43?Ue=Mv)_Q@x51sdj#)cvsa~ck(%-#VPuaI55!V`?m}1&T zqPN@NXW?rdVH<20NqhL+R;De?-0f7Y~tMMbJ2^Y=?~K zC3v4e8VlBAFctb=tbF@Hx;P6mDJ3h-QyX2GlwPc$ihyFwp7j#-jR{YC zFG`ZctLO5;LC{k$=-(1q$iT%n=aR3+nZBsZu%<9#TwuLyP!zWNH*o1<;>K$X=a4L6 zFz`6^3bcpykG9h+FKYZ- zNwoKtd+0!Pn8n6I^h`G!z(;C<610wq48$U*H?IyO$^`9U9|-!2{(Jf%+kk#{eO#f{ z#Jt)m^%6q_GtIV!_Wr-8uto;ZItc0ib(5}qrdFn89{XDIj@!+sZ}Vmr83>5(jH|X{ z1Apts3rfL$!bVH@?cb7-wibbuFrS2A(NRBO-YY9D^)3>wcXInZts`Jj>MW*iecfatjzYisJ61U@Fo%~M{1W;WNJB9 zv$0YNYE6c7g01n%_{bg(6=y zObFp>y-YEJZmW)|+~n@%NYzT*9pPwdNDk>GXi}f*KgaB8uH2L~RHDnhBd?;SQgbQT za8K`!{6HCuwub!sBi2L`zQ5;)$(m$)!T^Kj_QHEmk8^IaWDoRSej?wwrl-5C$y-97 z`wZ%*Z90}EQgjIFlArYyJzE0lITdYVUauLqDvW5gF>D`heZ59cRW9ReaiH<7x%#mu z1vk{*f2luWD||U0DSyIa69#daF1Vc9PWT&u@Ox%hUdN!iR`ZJmUr&PELm&atCQoy-E)%zdmnSyK!SY{CP<4TTgRnv&SJmB`598e__YDU1+V*kNCRqQ*?y*FHIN>!{$`ebrJ+1?@ zmIjPQN2=u~0c5@P>KEnU=fB1Z8&9U9H7g3Z+17k_@{#_o8$R$s1E2ndsPw{`sS8jy+e0m!dP)alrqc?wrZSUQY%{cS3QEDS zQ+X?t_=vB(l5P7! zThF(rDyuG|zb{SyD5X5z9igmMKUJATVf|Y-;;gIm6mVB5wTxhNS7B#&3U z+@^T~y_$kB#{mqF9D`R1`;7mV|$#e zXbe=k+2_ZHgDBr@Jk!d4iAw$7&kDuZDcudGn-`e8)JOa{1>)~*BpG;1=He_kseK*H zgE1flBqMGTu85SV*E5W|#;75jHh8LI6iZLHCB{rE()BEy>eyh6v(3$N&A7dN^wY;G zosHKce%jlMVR?Q5`1OgIyQJhAL?LP4e6wwID0Ot<%C+J@o`8bnFfMPk!637TwS`}K zOWt{srz)ra`<73CD*di{)HFKy>yk%pG0&}oY;ljTu*OJKh_hL*h&$|5U__1vN>Qn^ z@x1VKbH#i9bnFs_QlWImqV$C7!Uxw0Sg{o=^LiTRxr;!4PE`KJwFGU1nY0pDp=>8N z$0yQ-qHz*bX@UyqbO!G$C!Mi)%|=aBlrMIKFHx)_7g@$MWp*gNj_QMj-WR4V$YABn-9o|Ib25fQJ8I2cWQ$7kI$vgeWBw&A?&_6 zwieCPmTQaA(GJCh;2_eKN$thLlI~i)sXntK?g$_hbQ-oR-nQQZ=1q0s5m_%ka5l5B zD^?B`q}|IpqX}8A-Sr=uaS!~LycN|kUP#ddK0D<6_!D+^?oI}(V_vO~n#;K-^=Y`F z{u5z}!QnH0egR~`k=6rx4O`~!S$1nmYhCC@eK5-d3B3b^F*!d5iO9FzeW4Y~#hn!4 z#qtzJ)s>?7|3V$vXbO@6A^C=J#Fty){p*Rh7Sup(bQ^EwAI0y_YaVl?=!lU~Csdvr zZ|=5^mX#+*SBOl(P>n;KKnzT9qR3POPzTG=8npI7b{?t8;J0UsY`-%*Z?cHotH^D+ zfMbED7RYDvGgvRW4e)vAv7mAf65I=Nzt6twF-@xh=NUc_UeDY$B0~(|x&6qSFgxhd zRG5y}I-3p76ZmAt*8Qdq-zXrjYD;JOkV?@WS-$@z*V{;U!b~J|U+5n8z4M!Fa>$4E z0>ANH{L|BTrafiM112vTq7!E3aE|3NM9{%Tn_~K-vX9^-I=|=YD9*SN0tFMtElOvzJawVPZ|-p;ZGd0YT+{=iA~({~ znb-K4Gg#zdbBBU({g3WkyX8hWZ)K^DKP>Ctih-r~WO;B8`ajceC=#E=Vj}&d%}DWE zcdsIthYy>k&YO_uC*0PQSuj_|lAX5}wb@Hs1))tZN2)^HoOPku6?57nl4p~~HH1xS zm!~hsGoJpplVl4q?c9XIoNI#6mY|ufFCNZxIcnEi-Ga_WlgblQLqCmHjl=S)c(#FB z$bg4-ZXK26@Gp?62mnEZ3+Fz)eP~t78&f$wii&j#iZO-j?qPq)!% z(DPG#vG(gadtzA+aj#fsmzuCVeSDW=+ea8jxwR9b=Z%?qc!SHC`hFH*i|e()VN3I? zzyzprG_H|D^oQNuljgoWfkTika>Ntj(kYWK*TB}PGUhsaR5JuQWyL<}_?4LSgaqML zf?uzEoT^_g3~s6-MM$r|?(FSur*{o)C%=S9s@l+%mX%qw{R{G1>F3}kWf`z1hL_uY zW82=7DK*$~RZ&SzdLJ1W;iZ)XnI_H3pySs)=*>q%B4;su;&Tiep&pz#|Z8vbjt`Y`4?#qKG2K%jjTCh?VAtx{IHOu zFNKLt+8j6w=KXa%FKO^AZDfJXKd{{+R4DfTNrF!FbFNw7OOm>9RbkeQnP`~nSL<@x zA~$a2;XQ=ww-`t(mN1>qmrUCKh1ZDDOBCZGshcrrYpEXB>+zLY#`0wazFmLD{=mSH z)t2`@L|0U|cq|e4?cJsO8uYr93Kq5Zm$tAjd4b(4%)+4x?A&(aEp=UZeSyxs^_9gj z3^F_SDnt2&;lP_Ry3}Y7szs%~i+oRUmJD(y{NRs^T6+Amb#@j%YoVp0!9+?|G~b6a zm}|Ak2?w!;HFjioX#IqDVG`K20DRiVaLW+edFC(P;rPIN8fWioWSuuLhZy$1-4lQf zf?ySMljHKKwL!(a`Y)bwNVObUC)ynsr5Z2BB>ar246@aQ!YU?<%ae!yBn+DIA;=W; zf#)1VT(VvC`O?-8Ls<(Mk-FukiS%-;*?)Ce1WD0&3lx&WUz5YOS9Y;_N9hO%Y?~8x z%reh$L+jYuuUj3Xe)U1^+Ns4bS2B#9WSA>}BO-{EJ7BI^-&a?b=hAVFjIypi5C1z# z3tFHort`{iUf>6dFzoVhQHu{&?us>Bm|d7{D%(qIP4%Tg6WwZqzzFHouy+>1W?;k| zkTHoq+HW1rErV6(4?72MyVq>q@FTG>?$OU?xdCVZTV)OYe4uP>iEm?Y@Vq`-|Mw6i z;p`E`hEGhT!be>w^6;C^_B)QkxyODGDz92UMJe9pgTLs*zZQ8hx74524f_=dqbFG4 zkW1P9Qt#{?@`eCrxW|gjw1K`|+QL|Vu{+TJKr`Grz%@Dic}Xc21vfIqKW}_@esu^k z7>Ei7kERL|RWD2%Dr!+)n6)l$sHi9k{KJAQI+x5xzp|RMe+9x~e@dlK|amD zm>@`RJX$FPB-9{a1+9b&>2SD_U|`Ax+2gjjs4MCufh=z1<7d0926uf|on2c=nxx$B z-5icq<`~TTiFY2Hyn8jXj)5d@oTJ+aVvVbC;j&3UKj5$8ds?W^`iTI$csg@eH!v^u zc#yPocNd+@&iTqiy%06B_6vvaTzjDECuw40;`<)&eBBBwp!2f=`p@ZN&q6PeKdw8mImwwq;>wUWg<4FDz7iG=1n+}p>oD*8s7>Y6p23L6Cl#sqDh;>=9E2J1<$%?ggg-EXU!+6}6DWYQUQsFI=N z&i55#)Dfx4Up?>65Xm^wk{UIc(cB!3fCx_vSZmMM4M(u2h!>^E0Fm+mD!1C%xr@kf z%oV0T#eI@sN2ih6I0WJKcl`k|&6w%>>2<+sJij%4wOKlYskk#j1P@|=0?TLo7VRkQ zcTqV)GOe93uydTwCouovGh>;Z0s);4icw$ZGN`+SSV(_yovoN5Sqe^3J!QMmz}mMZ zNF^vJ_+P}hn3~W*`m}F&g3wK%U-DX(05aJWDRXB@(Gwev!LQd?+WMlhh<`umq%rWx zD{JO0Lz=I9rWUp4OIx+%G3?vmSns#NIUAvH;Zx~X5IUh1^7N+-10PmqjS-bX7jBcE zMPM6l&$Wm>Vwzt=H}X!FBIlOvT;}z@!zd$^zuY7vqmR$Lz@Rl(@IyHJUR(aTs8>4O zhQiy3vg6K3`h)gdgJJeUu?ZG#VAJ^Lg}r#3mP`x^w6_O;9B-Ovc;~sJJ=yZPZuw1A z5qWB%j*s}|MWy}ncxvnCim^nnYZVF1qVd)WiVLaDaT9|_ss}*^3$}+-3odFi;us5j zL??_no$)Ro2H)4!ntDh8wO;^$-rBZ$Q`}-;#1v#P3O_vxQ#y706gGN&zFb4p!W}JI zY>?&A+0)HWvoFPfcS>QE)6_IC`c;SoX|dyN>lal^*kX-!mi`Zy**tPZ+${?EMcH;K zx8%F`#e<1FV7%UG&~>i^|iMPHyKLc1w|u=K9yWi0Qw5_3ybaSr9tGkqzE?(kD%)Uoz5W$ zPT?U&l{1?AWE#6J_&coNjH~b^fDty{>E=Yv%Eg@5#Nb%jd}>u9*W-o#WVA0U8@&}& zz8`MjQ68hR4$h)CQD9rjIy~WLC z#e1xdle`-l|J!dX??7l=21wMKLchK>f_Uy7-;Nl?7ToPMj;aIa8@ga56g~uz&X}9Q zp8s95uWnJBly9~2(Zp~+6k6DYP zR0-$(j^o8TUh8c+TJ4EQOblvomqzHSsHFMSix5%q-InwteCeyONQ@3i#8te_J$&Z|AxY2_6M1!kuGV~C9S5#@Xn+g?DGq_CwLA29w&Qb{ zxm1=XN5N1)V=_sCXsdR~T}f)~A>!B%9(D_rkh2k!w?DE9eY<2~Zr;dIROdaKr(ryp z@mjvKNUxTHLp@(5W@Tx~$Rx+0$|jlg_>u9#@#AzJG7K!7yN#^@WZ?7F*dO`7YqUSU zF&0%qA3k@lHLN!4U>?++u{6^iGpnO=fO7pe&WFso5UeL$lV8`c$xI)6Wj)oKXs9ry z)7KSL&`OHn&)i)i(IDCxs>cOTkA&JQyms}^lH(GV;6%=?t*wEnsoa!?%Tt}Yi=!U- z^8OT2>WYerEAWZ0$oR;Jd7sXPPdp@4F(ld@YOMW2MSK8}G<9_`D%Rhr+|Rdq(1-uxDZ6>AzT# zVt)nA;4Or8(bRnmLSKTC5?92SqN^RZ!TZfQ9(dHLKta{aZ9$tJm z`A7kvRYS0Z2pZ>T-F{3gbDuF((z|XvTD33g-i^QQSZg#VJF;!ib44hAVHbR#x4GQ5 zx1fOiN}m*u^csl)vv11U_^#n2DttnC$Mt_BSw|@>LzD8G6n?=doi1eFr6p+20HhhA zXZ)GTKyCt%ZFd4u3qpEBq5ME*zO;s#pF z%uTZi&n*v-%VEpt{f3Tmi{bB`egq`8PI^DjNQ=ZhuiO>DJQe4U)mkDxVR|N^G4!cC zm>^afJ^61m-#H!mVSi8jE4VQup1b}N9#3H?%mb{D1ntkw`7rx-Ww-Di|FVqTj8vCtr zPP;a$R?C;W*_J)j=WK%{Ve93&YdT2uaUkno@!^l4K7)hYqc<=VzQHJ0Psu=*&m(_z zn$N@S?$qb&FMZO5ItxpEZaUkXW`0pvFJmZpYCex|c0w1Dv9qhhG|3&nR?_KtHVYK1 z>#M9=HJXOwMFwGE;#Zm`2|(*3>m^Knpfs~HONzKepLM8d==olrP7^`VQx0Ac&gjjG zyo3GBVP40UJNZ@l&?Ooqp zs8oat&1s5KDsv}B9Dxz)+(?N0Gyz9mGxH^+X`wXq6sH+yNsQ-kj~;qLLg#+)ijSY4 zodX{Ip^9;gG2p^{d8mwksK zQEp)!^lqWAxc~Jp0V$TC3+9OBSaa|^eIqBxH((}5Bd*$lz$X5zn#=u+9M#+?_JOO+9o|ldn2Q5VMBaGfEPLd5nJyhJiMhS-ind&~aODQeP-MD%$t@uHY><$Blw^T_5<^>(m?ZhT012 z2~DH76oGn1+a(%AsJKE1Z$`Bjz>H4)*dtQN0o*BMX#2T+Np-3Zs8)Q1`d6_l&H@B; zxRUb18Im4gMYGrW$6;b9wtCbSN4|E2d|Qu#qB|e=DP%m+nsLbfa@$-A>J=GDDsndC za9 z*~u<=0gm;ieI1&`neC+-hjvX)3|q^pQ`|fpIt6gc38rB7eNsn7I1`*ql}F;La@wY4 zXR>Ux)ST=1FgP!)ysyuV5p!>mR_JqMX9Q!(v`wvH5&X0{#ceN#^LNQZw<4RFK%z13 zM$eclp9hKR9p&)PADrb_)fkFiG(0qK`ftk<3f^hl4!B<~6@ZrTeEWrzKEUihFF-IS z^lV{i|8TIz_t*CFqpj(fj;!+JWEhHMRc%9Kh>r4}E-fdG=wPrTtG|r>< zzZ%yP8hVTVRR~DA#<5_`b3bBlgXgil;&hDh%avY@@r^7VP12{;XA1s27K?I<@PUc| zOv83k%|c>;>$#KEcoPgZ?|kp0umgFl>k&f&)8jc>rI%Kzak4qAs-v?#UsWv|)^225-e*N^y4^db1s^L?U}BUG zZhTFt=Tp`v#IXhE2B@589`3Y&R23D0>G`vDKJ3LEUzc;m;v0!QV{y}Yd&xNn%?mV4 z+{Mm&tw8|ZjS18uB+kqBQSp-9M=OQMm3hbun7HgadmVO>{@1&4@Zt3RCWvg zqNe;jj~9!0(2u+kd`SrkK{5- zDkUqdTrW+}MIVE3`|0g<1Rb>+%LPxv1?jl10|5n>_Pr<*v7z;RG2?Zu>Go{68gXQVj&mX>G>5wTdV{ z>WiDm5f9yYdfSC?_JH04X&8Nem1tYv1mNbc=l~5}xEi{0vDCIenOW6V%ETnIbU|TB z^l=tYg2ZJo)qF0m$GBPz);$B>^b!Q@xW3&~)%;c^LlYF}ok9FXdHY6JFX_x$ zedB>g4V|Zx;g;5ur6%3ab(gjTZ0Bfn>6{^wGdf(uMbC6>EBm%8JoIvlhn0kLZhd>p zBhJIq4=JM7(K&GJeX9F(TQpir{N0P(WzdYET?6r%`#%t76crDWM$ew;F|W5%+Xi>Y zg-cw=Tv{?QmQnHhk{Te!rVv4PN2JkvcOgnSwtStM*wf0?A3XYD?)r<5)6nNnG z=Amb_mDba1^z>&6(dmUa7NmKb6T)1uTun`m#ej;E+E2&snR{gMa(oZdcc`V$LX}${Lsc#fqPx?wEenE^tAaY~iBEeEM zkm^CnTCoFdw96$wVtTmBHvlOxl~T_&q%wj3HniCD`ikAE84p*3pmDZ^<&U@;zW?M} zg1xi`jK8NVM9#&0Rrt_an06;EtjYp8e25?99*o zHzh{t0nWlJ5SL9z()7~RJ&>oJ!&m*Rw)GZ+HRua1eUe`%?H+(j!zfXa^Xa9cnXcxnf+urP&vMnN-8pEP1qSQ0VXY;%jPz6qAL> z4WJ0}z@VV$+IrdugYQv6P~lTGeTH|I8rcnDpI?Vsna*m{Ok0F^i>FZ{@15On0p52& zJ)jp3@fD44z&u(oeVm0lJII~mu>^y|NmN@C7C`-lJum$0dstY5v zWYA}LWYe`Y25o;}wg7;i)`k;4JC;La~EVFZoE4$UN=wD+i^J5C-R85U-GcPx6 z0rPi?==4*_TrUhI+k@docDu!~nMeWX_H z+4ec#dV(~#c3i*e@ACL)4s`5a?%lD`+B1nO7~c#-@vX)hcr*d6(fjJUp`jsvT&?irpkde0=JDqW zCAsO`)*YFHcN-{TqO1t2>>6G8#@sTHVm%fIJ-L|99Hx0n3ltnx)QBFkS-b3a*)9AHz@qz*UGhz_Resg z%=zyAygNU_*)~&ypgO0e(IQJ7(N3xf=UzRP#MPYQHbO1uD3@`vJhynTt!Q}@AAnD!{>u(PuT zXdk<4amg9Q#|qkNw<&$v=H9K`cyDe8A8&8P*-6eAM9P`vPL2m=#q{3pRo}lh-~pm@ z-7Dpqojr~OZEzwAa56+jD)f0ypytmfvLeXE@wGPebiX`v4J};_W{P+=b{K7!D97SmSPS z?P!iY@O)6p^KG&?x0_$z{qVKO6>^z4|v|xGO6mJ}?$)LeF9(KD6RRQM14r zs#RvXxbY)Qef5YzH7UxEvF2_&t+0|}Pl@I%44WrEo{V9yVqD~=q_QL}j!&wR1Z^q? z?#ggOl34&l(*n?@+~qI%pT^SJ9S1#NNnAu#!wQgYJ|a+qw1+z5qBN9oCJ_ ztkm`vB5|52Tzt!#2$R`9p**u)p&t-+Ygu=aL)3DZcrGU@@{MP!KMBd%8G5hf@c37S zAv5zZM#&ugv<0#JrODw+I-Ej_DvVfyVR&L9ySc$AGc64Sh!Qm5r8{-pa!DjmXlcV9 z+}NmOCN);h>=ZC3sSLy3z(WB|eou(#Qm72(nrsQ>%-q$+;=UF-gKAYOcZf|?K#6oZ zxVe#$BK^N-=QgppTSsL8?kk$_u+zCeRh&L6)uZNUuee1J-jSxK|7t3v4fV@aq->26 z;Y8{WkHrPn2i*ymk&~1A9ainIZa)%Wc~HXy&~<}*4BFrZTrb0Td(&;5goCZz-M#ko zr=s>2!sfRenSdw`L0k>#cnjw!0V2PiXd%1st7^Q)y~7D+MijY*?w8e`*u+5RS}+1K zyJPrpq&H?Wc6&Ucm675CIN`(q+75x)cBP5C_=&0{B&I^lwhl+djUC=e#x<_DlKl-^ z$%i-tTVWP$aH^_0`fQy0rS&_kDW=M5S*L>XWS$x9hS7&tVnB;n=~r+S;J=Nt=exL? z%t%-Du|*tc=?@2G=qD>D7=hxWZiBk_W^N%wV6L%OU_Kaya_zbD&!*hI9K||S6vp>> zbIe~?3?Y1dgPg(g@5j*Qtxz|Rj9K?)M9pbYL5lIBcbE!C?OfYjH$aY8=o`bGw5OEl zaRzb|?;(v-OJ0p_5@3OF>n%QpaQ4z zuI{f;TvVtzSOL1V7$ArO)TlQ4E8w6A!>9lCQ?jed!s zLQcAW`HKlN)UoOv7%)V{DGK6?j}&WxmBx1MGxE7jRH+3^`gm0*0-g9S-NX#2jn znwEA0$?zu)X~09lTd2S8P5%i&M0stL0Gg2tN1z%i=GZ0!*uv;_15QKs6@aInK7b?#$mwjB;cK3h3$w z&a^G-7ljNCq8>HV)IdyAZ4)&IILBe2-B9P}B+DpHfT6s0rihN9|^EmlF#l6$X$;mCMe)+OH7&qAZdCj>a zk`=;|i{kw!`ff&yUp<7R1 z7?Fy;`V`5dQ^{H0!0qo=HV(K|7!ssJEV{b7n)YhxAzI}=a2CV2rvo= zT=c*EbO8uA!oNJi?biBJi&);bzL^gMh_x5BXG$=Sy{yLqfu?UScN6CAwE9cf`n&MR zkXsJ_+2oSQ0Gx;&|9<)^4{*Cql)r(W0*aOpM-7=O(QHlSZoJltq`2+s=q{%#_?L!98& zC_B-Ts%h62NUnO;!Nrs_k33vfi){I9*cHj_yqOtmHv{g_q8p?~?*#ub-DYi5X0=Ah zM6M-jCmbjtBxnqtGi^lj^fPQd-nls_co{9%Owqp>3BvWj4dPybiVv*8UE_hS9I7@e zRAC?P)~!8@;XbLs!-98sEnY!!(069TzEkw{^p%K=q4#Qu&gMNEtU2Jcs`M7|R~3V9 zqRB#zZwpWU=!ju-q9ey?ufG1_6w>u-A-NT>(J)$JirwidjeV?`L389zS^sy0SUe#3 zU;;cP1JHG;zJKpSsqg$K=Mo&Wwe?9cRZKJkMyXHbM)S=R1yUCnaXEEn=sBvSC@M@1 zGZrSeQ#Lc~3TW_5QQ>Z!c;i9jc*n+$jFA}rgsE(yX%U2@g|Pnnd6(VRiM+WpK%@GvUwtOQ9IzMQ8>3DU% zek{Qd-?%?RdPZ6db^fLM^ox}1p+8{M1g_aAEpN53lDogi?|G=WF+k}nxK?sXYRNse z2`E_DXcM(>eSH84t|JP zf=>yR-vZCl7RG{Ht$SSxkl&itTYO#e>hdIS;6x8IoHqv2VTbM1YMiG4)gN$miN-bFBWqBADt`PZwcFV4187F z)nB$Ox=i&K!O!{{fo$!^Q3#XZQh@9nc1Ls2R>TGbIL*1pm`pV6l=ghLg!1`6*Y9q1 z+?^Ud8h-kK;Sgz-;$qre>rho*zA&V~6Eab#0mf!=oO@(c>r5PkUq6}O=tfQ9i|58z zMESSPlDLo>S&jJKWLI6(=QC(^cz*D@msFC&HBWtH8r(GQk5aGDW%c>m^P3gB+^B)v zRr$++oNwb>xwKfaSN9blZ@X&59Jk)N8;h??NJcJW1(!D4dYnFold}^Nq=}tiV@QJzt5${NZPz8_CNStwBOUA_^tB~FL5lS3K@{_K z6tGG34xm)3)WtAA&1nSp2@V)hBll(OHed|Qpu@?iSEFQqQdf7!tzzHWCD^FK6F_(+ zroB`>6E3V^1nMl}Rv;`wLAH^rYR0=Bik5b%TnYlp>XGZg_;HcIv$kKCtDdo%vzcYxbXV7Y5Om5-{}4C+fn$R+6ycTB1pVYE$xOX;326GrEJeR-x)p#hT&emh(9JMAw5I;>v#DWrs&s_ zQYTW=##Rouk5*}oA(lkz%9A%C1bPP~ytmRAiijC|@;Jl5=SQo^@rEm3fF(uD$-;2$ zmb`5>!u_N5z0$ZfD1Qj#{Le$oZj{nJuwAaJ8?hR>NE#{6aZfd+j*FV`C+_4F<55 zO5Habn~*nQu5WaXMAn$bvy9O`VqTw`^9@3UuTq^s%{h0q6`%)XkI;9jHgLs=Hm0zr z{Z7|63MJp;iEy;iimRXq`K91MhXO!=XHw?!?(pwH3fN2@dQZ>jKM%Z$g)d;s~%3EX;r=3l4`3P3TJoy#xzDo6wx@ZUdkbJqcr!IhvC4DBo$3lx%4UuPwg_i7$ z=>K}8UqZ8rzOo6IBdWJ}i|u-F@@=yJ3{88Lv30aV9WYZK119$|?D;G$4r^A(P8gtq z#9%!v>;qgxF}x?h1qjOsPUKq_CC;vR@7oq7Dbug(ZV`$h7qTsvb$CB^5(9XU7TNE4 z^`nk>sw&L$*STiIw!$Z*j%r+#pB}s0i@A4vV1!Nef?p46H~-U2po@;BqU490Tc(49 zfY3Yub5A{lyjj|6kzdeq#y97>YS`yh44{1mv}NC6 zlDBwmnYSu-cNw|{xf?Ey;v!o!8CX-CGrHYvDZA?EZHsV9mKp%?=)9inq@ty@d9Iqw zYy8c`NP?tGIL5I({Ha?sHYs2PaPKiKtq&>UFSxC}7ytZ5-YMh&aIxpr>q&>8YrZW0=LMkhJvut_p^f;?_#Fnx zNqVS{r7zV@>;YuE7||Z!{>5WanS>b6;Kzn3JzzwH>!I;1m6fHn>C8V>F(_SMpgU&% z^--Ehb8+7UjA~lD3SS0_gW63$o|-soEIr^vioLRD5Ntf%w5%E~ zNV|)6D(w^dL)Zf-lGhxg$}%SmJK&KxpbqoeuXHY`Q}F2L`?vybv%nTg3{I`pOsg57 zsD1aMY~l&)O%%w}zR+e;uLGo_jj@76c`;URP0|55mzq#o;dW+yQEeugMSsT7MnFn^ z0LG6jw*~w=%FgtwG;->$a`&AVeJPDlC!2Wtl1y_RXzS4qK(ik#U+p~NzOOxnHyWZd zNZQFw5AF%*^4|YR83Np_EVg)ln!jUV-elH(MA=!Zu=Jj(zM?f_tIR{J=I?27&cQYX9 zE(awrKYqMbp_r%hRg~S;M{9ray7uQktCSE^^`zz0Yw0C1ui^l}r_kCtRUo%Trjt*q{Zx>PFeDt>`Xp0yc}Y@{2q4= zSTx_h3Gejz4Wvt7wLEpfX_X}8^!(K~NkYNa&r8yUQ-&tVjJger850F}k$oTGh(f6u zYOUS>b)@&sFsO8Np12(gDNtVU_b!ZJ&rkT$<4hk|th(J&rJoo>cGCejYPptpul7B! zCH3_5I!m1cP#{vCWb;1^!;dgADj7PLpSio-g{Us6x;s+kIstAaNj)EZC`AZ=@+@IL z3QLAe;Q?^gtTpaT>LD|YM!u$@kTiqK^eVab2@^oe-Gvzzj6bcd2M$DnYGf2y`JCJ1 z86RREM$czQdc1%c@cCx#$1YNUc_9Tno|?6gf}_*Od0)VPgxHva3%j1}(YnXT2wlVF z?$2br4tnoD1DFd(Bw4^F?yW6WK+DI*&i>ZWAu}_R{c~P!uDoxvy zxOJ!3a&SS}h`j5K*hHz3M2xUsCq?31?hH1@o|X*v6s1_%A|;-9YNa~*wKqj{qtO8K3=;WL_U1YJ5t zYGOm&N?|(I@6OZM*ylo~%a0qtm)T8s{H+5Gp}Z&!Ce(HBnW!v}8Qt_+Q*O z=k27hKVQw-fE9z5G;-B*l&m5Q_Du0i>)_@;8pQ#Rg#y+C_Po0E1#(>s7D9~?1~4+q zvV8eoSf@P&)rGx+b>=S(d1sFSALxMV_d>o!=)6R?rCEs!f)>xJY1D*WRetg;-lY_(qsa1Tna6}j!Pya z^EMT>rTaz&DL(hFUJ}0EiLQ+ZjFXkoQ+4EtINa^`)b?6z920- zIaQ@`J{3jdx8UKr`QcI{tk#*kWsc6Hb35v7H}(u$n@;=2L1j7-d?p$Elc(68Hx<|i zrDKnSs4%`t8%7l^l2-eDUHj>a9yq)MkR7G3nzomh;2hO#78^k>>Em(6Z^m{06aMw* zSvMSIexdF9NO*Ku^2c?AQJ?FR-ad!01(#KU_2QzhCS2x+52*ZvRC?lK6r{gl0n~x_ zaC;i*deTgpO-fu{R+#=PWki(ZWLD>aK0ehd*$K6Ah4Qj?)z9g!&nvlK z*L-;~)Fo49&e|GOGXw&2i9S+V--hfa4U4xZkDsK3d|H7y+R(Fc%{J7w7yD3xhWLx? zwg{CKOHLS3pX<ZF0eZbp zF-ha3mbu^d)qLFrL(-Lqf*Z`h=Ha)dbhjMYA4qTAG8aM!TabutuCq^lo!>om25(1L z8`Wd~9Z%h3NA$%LN*z{<=@GY3swvO%{&t`wR6`lM$WIa@h1;Ltd9!SchH-O6MH*}P876rQyv&bt(nfT_7i%H{?PU8KW63NStp>cz4BKq9!$)M< zDl}&RM=!n84&Fs}!Ym-Yc&6Jm6b-}4rn}Y=ISH%)3V0Y$gx{dKm5f*=p%|u6c?mAv zw*PD>_3)YHu`Mrz&>clgS=&|CZedX~!|(|HU@Jq)TAMDm>;`Hc00RrSIcP>&?UQ_S zqly5NYK%!KF@)5T{1e3T+h-;{X)xph0i4qYzD7 z>QFx$TRXO>f-(%lP0AgrntU} z_>9Yfx^ceYP=p_95V-;kUf$|$ij!TCYT(XHurULCBigMSn#Jp_=j)zCJu*6;?FSj2 zdqdTj+ySWS?bDi3h778BsUE7!LnnWgHCINMpJ_>r`70pdypZ!25IFTYC9BY(J%Bo< zxkUKVQRCVN0Wfs?Ae-2fzHOcxAg7dAl%ZRb={nUMT`%gYR=dIB+eejK4o8;vS+aZ) zy9zqv`R#|XtsttZen?su9pm)F2*#hMKi(vKJ7Ag?qz8AMkyqEr_7h~8(e5J>kmmI3 zZ<)~<$a2K!>Vx^L9wxv22@8rBY>W6DNCq2g#-<~q@DKX^GU}wCA>~;(jQ9+N9;E_p zIvJdeGw=1BA_jrxDP}PV5&0$AT!2$Vw)Y34yr#^~d=4%h9=hpt=Oec|;Q2Lg;elBr=Hg<515g9Bf*mLsD<}Z+}TX5c5Dh7rWmMWmI@k zQ$RkKz?3Bqu9b!93pLs93$EL8jm(Nz40+6^o;pu5y{XrqPC!PB*%ii6CDbVKlrbRrwsTxHpz1N^>l&Ok;K6XBiu@{Mn zd_L5Dm1rjA0;KkzU$L}0PTS1sG+&=(qJ)AYJm9znVr6-XPZaw?NwrZYL*8Rh2$jhR zt_gD~WIAmJ$9+<6KG|)JjK}8k&))SAqxgIuQ(kX!QQy$KB^p#3h>ZQ@)!#V}#%&!f z?yDREco{MPLw-&{Uh^teEG)3NQM`{%B#gh^aFvapw(&8{O(YYIw!+MCzgZ5WAN_+x zQbjIU8}}m^7I@bDPqFXRUsAP7?I`PP0i$9yxK#p-x9-hs5u6&*2f2OHPdF>2gO(X zU9hO@<8dQB?`*qVpS$o~LyXeO#Bf5rdE$Q-1^pR@n8h?lP;|N-TFvts{WrFBJ0{YLCOQcGhz!4KGafo5V{@x6SESlQxd!%H=Qe*tJ!EJ z@2NA995m_1E!=9*{iFd6US9l_BzO&Vh7L1y(NBXky)_?eeKcYu$J{Q3`pX zI20VFck8W=qE=Q`O6l>R;YdXz{;QnW>U-W0alMC#vlV0xHd2XT1WSB7!fNr|V}Z)! z*ZyJDYZddv2l`w4eoC@>R1|3O8yaz41OB!%9hd$5I<7|rD4mkud7pGW?(<7}A2H(d693JLCDFWE zPBK8Fnk=X){?|nm;V1EiAC#JfV;rHoMDb{7jKRAM9e4Vy#lK-muKM#27BsEh_%%v|2F|P^Wviz_SPR zK0AV4Y*j0`dzR}BBr&&z&wTEWBM1E1z zs_dUno&M4Zy-qUMn5bsn=%-T?mBhfZvqZ*OuJ!>kXv$6lm(NO0VcaZh5A0$Cys!6@ zO%|FLAGI9jl}oN>SQoVn4n?H4&jya$YTG#`W&WJ?@?_+a%tHMrBCaUo6J~g)K$s`I@ z;MiTe4hfG<${D>yyzV{--5aN8hik~@_F!I8eWYF|(NIad?CSl%Nte?U*+297 zt}HzY;n_+-e9j#0u-y;mpD=g%4OuKM%a5=N4fN3heDgXo`S`%KyV2V?_!(-&0xI*e zr;0udw6^VxQ|R~X;9sBAKZ$<|*Z_l|&Bs~vzrVmj`X|fxg93>f*CE;81^Dw64E`92 zAejF)u*A+|IDtOGWH=O6#KYJL^EPv#B!^+6$W&Jh){3-0Jr9!!@JWRPdhcum!@keQ z^!`LkLNUqv`qYG9i=D)z=fi9ZPBL8;d3bMN<|H`iqBDRYql|M`#T{I z#pGq78$c^5RZ4w>JN(peC)`jGm+m|Bs{_^93Uprt@tbl|-ZX9qAA`WN!Lo>- zO`h705?@Et)rs}?_Me2Tcp(9q*a2sl>c2yYb~#a>VfCY-q8-3iU9YVV*_Nf2=`-)m#qDyBKYtX9)eJWlJMenHHP5M(J=^U z_1S~+=X98(8TFghXN~h^p-B$h5n)Yy+>k&RD2qezc{U1V%Q=hWYH}1WKooDqUc)UsLEHZG%<`wHUKzaCs!Q!h2B< zTHwDwG#&8|3>49z*q}7)#(!4SUs?u|z4ql&?6DX_1Q?kI%Ip5rPV$sWV8`YWxpZjt zxhofhy{-iCVI6oC-TDVVu+g?{q*&7(xVy+lvdNz!-t%%iBnoBZV|>La#Dc6ONx;3I z2I{avg4@?^(p?I{GS=cB^@)e)AbQsWHl~cZG=vKSCXfSH?%^(ueg*t=KpA$a^pwO7 z_Vx4&lDhhX5m#SsR|tcDKGkIC=fn8jAq_(HInIwa(FsalXKiJf@oWy`&@@iV5kg+zz}X;jqcIZ1 z{8u7$Y(EiqUTIqP^=CJbW&SPnGF9*pFx-gA)+9aAiV>32m&xrGGm*q4 z3$~0M8}^>@-A15#uMMVw`XT9#_{0#4?2i0peO^#$rUI@(yp-_BXvj=MNc2V8FX!lz zpREB0tAD=xn++T(5xMj&A9eQpV=6CJN%h%Q49$lH1UlC~c*oLw*jAx$CP5E}7g}~C z`qQDgW;;qhk{SV8eqS!IckfBw3;YRRlNC>v8dl$8L*JB*5!qXls#&wP*!ZCai^34s zz#>zN&8ZBtfQF$zEiH7jsU%!>{g$p4_^Q#zs*OH2p2n^izlJgD2QP(|YimG-8@(&4 zxyC6P{%JQQUfc?ae1QhNhBw&+Rh4!>vX3o9GI1?WjA9*#G9Mh*Yywl#Ov2@$2-spb zgN+3B8x3;=xe#e+Y3J2empiPCBcN;IG@{?rpC2(9SO}}S$+zRI+LLU8pwChRrZb`g zefYuhklAFO3t2jvd9Ko7P-6cG{QXHx6q8MEBpZ7Y8Z$p}C+;td)n@V&qaIHXh^w%m z#mWgsk)a@TJM%8RRkFn&W$O ze%d&oX(x2h>*IDj(YZA+T}cNb@I!$SB)&d3OxHg46f*h*gRPZih9T(|Z)D=r`|UnF zm91Dj(r$vCvINhDGU3I`NVkMS$TsmPN;j-P1rSw^<<~M*=EaMROKUSGYMU8R_9C`r zK``9WJv?4gF%mv3$wl@Ul-D=wDL4t6AJFkZY_F=6lyZ+G8T^I5-<=N!r4?3+10kJ~ zCq(Sw==4WWV!MdcoBKchl}N=ExO}X{EC@{-j!3qxOYO4-3_(YB(PGn9EA;`?Rb8~y z>FY8liWB?D!E@&Fx~8q&tScoMSsWQOJnDWse0C5dO_moG5eX!uf(H>1BqWWtl2G0& z1e$M_mpwd$sVUzlvOM`2tIpMz%)NAH@&l!k{Tqchvf`IP&d?I=-Z|t>1q& zKL3}mGl3Gzn-`Vc@ZVw(A1a*tUfWW}CNk3tvd8?oj5ykQMIGkT+Mc!ez_=-^?dj7f z6F1CqMRD2&)sT}wv2qf0vxlE@YJ?O$(m&>IuVm{a%O2Ebq0%l12I?$zfruBC>w?iyaCR*4zu<4^>85IuhBH>EubCRUv;)iGB^NQUlRIPnHou%izZerJSsm2mOLA zPVxNN|FSAn7w&s(<4(o51}rk&Z-yU(v|AxoCTm?#a7{BLKTW|r7~(c#Gh#xheIWocbpR;|Yk7!^smBH`%K za6yb*qO$gDeBa+eikO&;!$!WF`%Pqo>aa57KyV3DZaMfJ;=ng*!G4H1P)<66W%+Bwxi^x~;oE0UZ?C>c zKV36YD;FZ>FMQq}N1$2?mDJsQp$u)G3w5dJqu)~he}0@KG)1xkS1zC&t4Hp`@ktN# z>QFsnHDOcs=-@LBnkVmhcz2Xmw->^iCvnK{n8=qi%1pPKM?MLNXqOSeqCFPthB~(@ z;);;hpNGJ|IHog0>OCdO%|l>#B_PTUS<^!# zQ(7(RPMq~e631OpJ+dfqHl4aPo1m8|-{|S?Jlm2nC>x(kJ}TKC;#2_$Bb7mc zJ7A;EB8~Va5ah09O*{RWfliLiTdI|*YfqKlufDPgrH@;j{96Z$zwbpB6MRbesGkV0 zpYkQ6Oa=4E@(KfCR082SnxL5ZDG>kEZ_yw@?2~|YcfxrRZlkv(WCm{ z;Q4QVmN;8(X(jS!>1%?^xYBz3WQS{^2~0P70}eS~j{v=pcW#C~63Qss?LHUGdE<$A zb>H3`{(e^MWeYG|v-ZUI2l5eW>kT)NfKl%I6Nf~}-=JsWhv~N-r zRSyz2EB@QDR(|Z`RrDO!(d*_9PqMs=bz2lK`k0h~rI+@_K!m@*E$9JrngMQGd^9x-YywLBG=Pp$uT8`b?eW?=GGm~Daz&p9`>=pyqsbS0u$EXexDlq z9+o)G&*4Mzg|GLxz=>ya%`9h{4x_Sw-B{5)y`Xt()>W~27P+PFJ%fMrU)#cVA`)-; z(~Jdc!s*{`xs*kSfh;z}?UE=Q|A;mT7lAMm`<5wRHK)?!EB{Lx$+g|SjbQ%vV>^Ah z%MYZOq1ISk*4K5Eaou9|{8oOPHvKSW#40-pB{OcEbm4VTWa|}E0T%sQ5?%+bOEBkJ z0jee;=qX2X9Q>)`e5WD~CJIy%qAH$VeYT;^rd`*wHF0vO)O|6v4WYQXktI0(2E@WT z{#_6r2WH(!PBMH15;J0bxKzFu6->?+KcB=du5lVa09!oxMc+bKt`DeUsN&6F!4~?& zV^vYn@nF^hc4h9p%D#nP{4DT44fa*-B0VR^9b6EHCCn`M(s%{uICHM0(FjcuB_BuwJq!F>ilX3nI6j7dRF#qA9=t0RtQT-liK%c zVzkzAd|!>i%;DbzK9`m0`@3O)2w$Lrl7Z$-#yH{dr&nI~>G zT&-O|R;6@A3Nhn9Jvp%koo4vb4vsPiba3}%g%oQ%@OjULwHpbNX`JWiK-z3#UOQTd za=4-9X6XC0xL%D4dyYvI&o^&}(EbBbYL4^e-{8>O1WhYyq`yAjyh$Y702MQxCm#C` zOaQr{`Yc4novE-_%t2H%FbU<@`Ym;{0u%^=X5u~~A(8R?(4PYyU3WwAG=P$@_x#yC zyfC~qda`z)8M?kQ!Cv_gafxXX@hZ}EkfApsldY|Y0OQ*SA||m-1w;Zm$Zf#!=utZv zB8|*5uV@Zv02+J9rnxgdW1KAt?5<$LLQQi|v}-^y4hidgV@{ODABEa48{+HKuSIU; z&2l|!C?q(4>6UX!^WPocPa%AGB#`noAc!jD-!Te_lQ`~u`b+FbETtynN!*-SX_%GqOKk->~>~WCemz!=T=`! zXE{f^z)@Nc(T6R>e9#QK0Cq}(gI@m-zM`TWJ%U1zSV!|Eg{^rKHY&aMy9zUNbC@Fw zYHKV~yK;)Gx9tY=m%XhWt-ksxuYuu+&b3vssRPIGnw@=dRZxm0end@l$sy`8&0W?SCIAqNr3u`(z6wS`480>HjlYGaX z;IFR7#@63%&D>L2N?GWJ66aG%`McEW{$Wo}Rg-rdsrSyS+r%({UZXAu)cc*4a3}*y zsoG?`#caL=VF}OC2kGWfo;o>hm#EBch)L@Amaf5dbLNgXYko`5b*puHV={`!DVp%j zr~W{`Hz8UG*LP@_P2<(TDrkF# zK|B?3N{#;2zo@A(Q2g5xqN19)yfVSC(0+KZ>$w~HaDGt9N)_NTA7|6YBz4Zmwum9i zMLe=aA(bS@YWxYk38^5*|zo$xnPkTt+S`e$cqyuDG}&bo`|~U>(7{ zwteAAOPFK|L&Pb2{rk03957)SI8ubTM75=9Za}EEpOY62j%kn}rL!Saaq*;nPn({B zyo6JrZ&F;$l>#m{T=mZio2j32Hxl5*25&?EM&Nyo0897R-$lA3Lxg5)vK;$lo8Ey6 z?=HD2*?ujFHpaO#g7H7aHR$M#B)h8gd@kJ?^tF39w)R}TQBRhcx$r-1;_4v<{zZ(z* zn-qL94imL(#nl$`3G7y&@!0Myo+~Ccwh&CMl^x2gb}LMre*sZsC~47k_0r=3X@|yxYhg#QT$%smO%MqQ*yxO6VU6lH8KAhU?Thg6-Tau z_jch@xoU-Tzu*=G&cNE};=3yge3x_IZP2%w`CMLj689?lrbW*reKBCx2`sJnG|woC zzXURo!XT`kz*I$xjWc&QZtX#OaJv3hJ|T>sh3#|-Lj3mb*tv6n@7A3Vh0Vnm+AtnV z;#bvi_}G3mBiUm~=nHp71K$Jk&zfI;UbLUOGZv;cm`Qi5{$Sg?ZhhbD{FG+^Mc#Lh z#0$e~r%tP3?Kw#gqYHLM%G9#OMqVO~Om!dmtWE)XU+-$BqJcWif}5^ee1TWs%44ek zIQ!=v^!SEV7^m3;oH0;S;`|%9Zd8DmOyU7Hi1Mwr>&>#Upt>x19p4WektnaG$-0D- zY^3taj5^}0X5Fc6Pf`#LaScJ-0bmZpLz$#v5+XxWSDO>5beYz40eBuVSIzW=U7Ie4 z<)+}N-nbF^5?PVpQ$OOQbxB9#nYe#-IG9VjHMlui?tyv-1$!*Q{tlLpq3ujyx`B~u ziA@5w>WJ{=3{Ku>D^HF3jvV7UVu(k9^4W}(cwe>d@ipyZX+?%uct>mgR%@lCqG7uR zB$M1ZoJ|HJkR9HSG9e!G;1;a#MLKJdZmrt_pdnPrBI%gK7uiy~S3(j6vZ56TcKpI= z;y9>*@*`drZ&s^+jcoqATNLZIbCGUMfvVg!C=sOIBnyFWx$P z%G{xjr>y<<7JkOQf+TEa1ow?vbO0PbhvmwDU(6U`-ubyV9|%oHe5mL^;knBaGhoEV zG33j80wqynOHgU4fO;If0w$>rQdCytvlj%EklzWQ5nJ~N1lJ5+w7QIc&MT`nTM=Tb z=3U(9r@DIoC-v=>)F<)+s)@1_3~1YoLhucN%b9CIN85J_4D^8s(`Qla){Ka{U=&bx~NXM9VVLNki?gsw& zVaT8^@@jGFc-$MgfP+d>MIo>Za;-KFPs{pk^s%W_pki!0^J!91hBA@Ifi-Czb3efa zr)%WA!uWGlz(Md@FV0c&Wa3nZ@8B#Sp#e8@j>4)%t@Oa*vPM9B2zX^1%N#ua!2zJ< zRXu6C8Dj4VI8T~fQ@r1>qtJe8c+Y$trEl%Ni0E0U^7hVxmpHmk0sYxr#du zv+Zs>v}bFo{8>(X>_T8|0!!0^IAR`gZl&@U+HLdveCy<4S?Q$PB=Huv5O*^&dS+HT%rp-#u_X65a1gjIj6N_pLjr|3LbuT(x=3;* zKmh36yjJiwKoMMfJ&*LnggViShx~|WsI&oM3fX1rlWWbGMHym9z5iHm@yvI*!p!L0 z57ddds{abQxLuP-z?2QEP#a6M8O|0+=g73BeYWXC%TaYxgF*@lc|CFL#6bHXG?-3w zj7A;x=0G$pxkB)F01MI2p_w`{>z$5{~32n{kPk7y?$D{vD z>_;D5BRHmGL3f^k)(sx;=06P%L+|ReY&Q0JBtRR;JrhN$TL3ctr@iL z^XLw&nOX1l8+zST`=M7RfEsp)?s3AggA_FVgTAS$Mz4+Acs{jl0xj3$`j4^f`>;Md zO_YeCZj;(-%gFOfK2drhLjx)3w@vs2%W5kbr~Vf=1M~km{wZmL@yM^U(&+SP4>ANU z|GLpCoR*l!!)#_q>LuvyQ)>E$tr6O4yDKrR>IAeIG)p)=+dEHyJ=OtMV$z66Nxr}f z-HQmkmR{Z&9?uWcany=_RfEziAgV*mj2Ol4b!$PrE>cCdgeVC4KLvXb_b zF@DN|`fdZ(aUjAUAkh2;=KEmfRi$ ziz;k{-X$RUp$yy{s6x1+h2`$?2)(L364z7sh#J!rW4$j{WU3TId9w9!QE!@i1urdH z@nI3R-9o!;+Bf+y5m}Hl5DK#~_!CT`@I~V^p@5Np;2jdGHNbRh8X~&Svle;MV&yfw zRz!oI6dD^w9oFyMn4Go;SMgNjQ%5_|#+7cCkuK|ydbv(9pQPkvKgd_AmtIMCH*PEv z_Qq8ZSka%U?ZCnLs@^B5xOjUq#=j%E0XWu&#WZGeC~{q}3K*KCUS8x1Yan5kGuI_7R~MB)vfXD`%^BN z+C}%zChS55y|y7Apqge$ZNpqDZCRWz45N7hAHLZQAATqDvv)Sc?P7P@pUJ-h zF@dI*2iRbxT;(yNL~V)#Y}#_lBj6DVJ>xS(Ee&c>=hbUhTtXSD-_@@K7b@0eT_Z}+ zykPp*HC&dLs%osVFCMC>o+kQO3tyHyiq5+xhE-KfqNFu~W-Q*iYvM~dv`gC8ga@Bz+GH747bA#iu7f#4cC`h40yu^~vo6 zPJvKaU=zxzuJcDG@R`5cn8k(4EXB=7IQ*BtP*+0-Fmusk1m*nN4r<&Py^z!oCLp8? z-vLd1sA*XrZ{lyaq!$?M-g_0nd16v9o*O`b1}oI8=Yy>@m!mv}{B@1*4J)D5$U-LI zQggz{N+)Q8`oMi9cZ>B^xTy3tl`zvkQ}DjP369?CS>i-v`0!v|@vOd#`tHVI|1zSe zv9PU0_%wOT&s6LE?awDohP`l3SVDQozALR6n52EIRIqF@#L;)TH7MY4yVw_5x=vHW z)gQ;7v2TApOzeCzD%yoS`%Lp6DZ=6n6JN6u`Kzm55_-)C$Kyk@{jf;uk`pYEpA(s_ zzgb#Le%IO$r9J-h9e*JN_6=?IE0x*&B+yDiBs52bxyF;e_`ZP4H{>7B%wcp#WZXFj zc86frf>6)u6M(y0#kvCznP8DiAU;E-MStmuWJYMbR0?Zxsk}e`{PmqjKAL&!iiu*| zXygCSmSZh0x0r5;*DjzwB!;)7!ck{0;(V}N+l08Q73#TL64~lI@zI+GaYnh}>y0BA z7z+xWi%3vx-vq+4LIpQ(G5{S)6oDQWEj9YR3z$yayYiO5Ddqy&);OK~Ls>{rb>_mtAk4EyAY z_27^3YgF7TV=L&!|6~6c?2K7-$Ryfh2Fgg^*0EO%Q&MF>GBW|JUVZ#EZ6gib&`R&t zuOtyqFuS*$^>=-DF7_O>c@|k9atr8z-h=Pp_TN@HNn(V@$=<=Z)k9M3bt5RPA9_w` zhm%XkLSt>=_P#OH61pTMz()Gkr&_a&E_KZFbS8+F{SDM02!_JOK?b>TwRK5*)!)Y9o(fo)B-kDc!aKaP_A54;M`eiKDkFt<(v zd`lahMFv`Wi&zOsFVfD}4ii>yGc)Jr1-tW3SQr9zj%! zY`}M?o6!7@{YkNOLCHq5Eg+Jin<}F|T7)SOfF4W=h4AT79Ls0hY@8e&A9&G*)UtT| zR1@da`RQ3ur?t5v(~;z9&Y5`qS5Ea*tI{ouHSOX#=bG*UoT*d#HXz#*(vX7sLvKxu z#1+=zne(_Vw#ZT#Gh+Wf>k86$-Kq@)_j(3llC5z3=&`Bz1)arkFa?FM5h^(UlLNOV?GUm;CZa7LHQnsR}f6xb^dx5$cy=R97)ljtnask zO=nF)guBG%NXhvK_R!|;F`j#3fLH~^m$yLM3_l#c5D?#AX}(*VA>P4qf&8#Q+7QR1 z1tzBvVe#BD3FqZkPnpsdjUzS}k)5@*3cNdWB+r{_4GMa!ru%E_-a%ctCJ5#wPcG&?kl-V1gaMt4W&I`{WslHQc z*Y_yQVv!ezX8ZBYObglo!$g{AW$!y*6yC?>NdY_ygi$$qiz(tg&CLfD`33sJQ~Ab- zvd>zsh%itLAor9?WhsOuU+opmbTZvTRz~;kkDkjrfyvRFI51+( zKB%Zfo(teCOszVae#>-pa8Q2GTk_&$Qdnf_!T-UH^=EvvvP}Sm=gjLLWE5gQ*0pUi z@bV0$S5q6E@#`sDXv-=xxP;vQHYgpo2P5BHK*@}v5Y7#dlITc9anR2e65r@?`8VVh zF`g_|W)>;;BU6qP{4|JkahVI%?*t8)>%J3g7|=qtjIBpCX^CEloS@FU8ZO;t0JkUU z7hpJqIroAPavdLmw2ZmJ0TQx%Qwtv{hFpa3IFTAKM;qZ^Pom>}saUig!1MeWJzUj~ zrD=F<>$izwxY7Wzbl4rc6kAJ5zMEp`XLX9D{|g*qU#J|C_r=KajBG!*1! zJcB~iyG=iJ6V0PiDgEQddbZ_@5`rhu^k|%`1eE4LleK?$-W6y?t&Ua_qj%Q7V%rMIlx=w~t?Zy7 zK-C7tAGf9pW$GmEo}h|8JN**yB7!&^6X~2^>l|`?g&Q_+ZGAT13oQV#EHCE-hM5s4 zW4ys|UV}(aADODck64qa^a+q)N#@sHo_B)(4k`Y$ zhQi@8yy77BvVre~^*`H~o0sBH@#8|BaNC4AO)x13(c9`OZf+AJx0@Oo5NfQeC)02H zpw9m{jWsE`<{OUptawX{D+Hh6`QrQqbF75ze=NX52nSsy!m_K_Y01WQ$7ik)Y1u}zNJNu3)Q8Sc(2HcbFJlzHukuyc$nsS5Y@1&FD?0h zS^THx=Ez`?Lfv~OnE3gWKFb{VdzmowQ4wt6ZQcb0sR#&>B7P9~0*@MrDlBFrGOV(c z=KMGL<7;ccJE(`kO|b5gul@ybGgBaMkgw`65Vd9{(er+xQl_)1^kFWv$d_T_Dr<;Q z3HX94ADZ-#m#|d58AL#Q(yAzaAakGkRnHIIlD}k{8#8ylNV7lai%98jiSSUQSl){q z7`<1JD`z|0k}cN!J-pY1b}cy^nD$z9OuG^{H7e64+&*~tcT+&|tX*@Jz0qhDMf835 zLObjap+ogKY`D4gKaZ2wN5J}tDXNlWCh&Z+IgcX3vlV1m>O8Yos!`D6@?ptqm8MX6HLsX!=`TSvBqcYd`8ouy56Nmt9%T|p8uZ_%Bioj?9)d;5e}dTLOcb$T$#!etg$Rxz zNjhHpfq8H=sb~n2M5nn8&UaK8TxB{SWf1vQ#rOXFhb`aUyy{$~*omgxIB8W!`DYF_ z+_@u?ie6#GRl=M-Nb#FEsY~R4{3oHAoIyibbr3)NizqMAZ~84P_v*tGyKc-ZFb9l1 zqJ6^88C*OsSJE_BO&j>V^y?c&*RYBovyDyy4Q+Fc{B|i0)RzJgKWFOc_k$mqdQSywpcnuX*Sid?K!1w%Ag`sCnwBlHePcxgs5E5qBjHANLUPOyG# z->iOxFNsuojl8`k+-)Pe zfL5Y?)y{`_vGPlcR7@ql0J?)+yQ=Yq)fw>VDEGx{o5RHajC+lLO_!T?Oh17Atv1*t z@t*uTMI`o9`4IQw3N~SyYLi|h-q(tQNFzXHRCu+2zkr}0LnyDWWCLn(MA!9e{5!7i zTM$Z!kc5MGK(c+`tu4U-l+BPkX%V7ey^^qgTSsM^&Y}dK{s6JCU;iMcUDs|Jjl{_HXL3lF z16fr~#wDnVgQbp>tb24<^$;7+45j`pVsQjoySjpb@>5VBzGWRm&PRd{h-m;<%K+9S zwTXxrU7TxhD0lGgE9O^?bs3;t;g75 zbUTVd9>Or)#u;HY=^rwh=^HVh#^^Z`1bJTzx;yG`fNO(dn3SwcdZ*n9P@KC}`Tnfk zWiX!!fa1{8^T7~>B#?P8Iq+O0Iy+C+`&m`!{V4odlijZ!`qrlAk6<$&E@`h4F3@jF zwmtai-9wt1j@Z6S!5oR}kpV|~xq8?U|6lY92BQF5SSrliRxZjZ*a3+&N{Nndz#jVz*M+g<q}jRR&09mfG^X0(gx{P5|}eE+_FD|DeNS=Ksc4n z68$)0DiS1XZu5a377Yr8peCdD6zUpAS>Fz!;s`J`VzAyE?6!t~{pu!C%E4wF^ne6$ zN>X!KT)LFZ)7r)8UDnJet0UK` zVUIKc#mrLpl_8I^(ZLFmnT7`ciL~AjszbOc;NWeiU&)ttnsaGPT#b+5dP7P+%G`h|DokGl^O^5tx(?-;Dkev$<=-9kZW zpj4*F;%(zU)J(LvT%A)sd%v1*M=$8g9#%%*cf6!rcYI;on5+h$^%B0RE!-;gl_xRV zG>=lZuH*?>R!ka(-g|tyoGj!8kjY2)U#Jkm_Qrp*31dAr=gBOc$H z9V?x1H!+J)BkJ*w?$wo9`ksDjSpJbp5SySAB<8R91S)Fjk^a7YM~4HzDElDskNI`< ztz5{&)5n?SSc@2;6Nq&-;B$H9;9QlNd)8CJn{Vfz%P2?lRUYKqy4<|U7OmMJai{t$7?mgbg%Z@ZO-WE6R z6}g*sHxF3%0=74NIp$={{p0lX zmsO;6uMfu_mtYQ`20okE+h@4m61M1r{$e<3w}sNFA|QH$^;OLm7MW)GH-A~l^vcPJ zRWwDQ%CDkAQR%9bj)=+bMAXT|Tat}vm;29q=oHRP-~OnjT}qrIGHuPQt)+cwBFX&I z`m*q#TvuXOTV2MH#g`0(gK@D=O3jCM4$mKX8_oYm)OE+R`F8JaU5eV;QnXc7d({dm zsF@nAqM~Tc8m*lORjtOZQL_?_6%@4QxXF6^>ghrTolw&W$DBHt895p*5q0mI5HeL{Co6Z`$jtSke z;fkD%j2<~|&a?jTr!NaSNI@WZN5Zc4BFnkf1LEcC4@ds@jsV*7`gxiYX?XS%edkAv zBT)M;PIbAk#Vz^~-^Y18#kR~QB-Wxh>pBeQ!s(t>~e>AN!u~&X49G z>E2XUImH|s8p~)*2=vy8oLtY-sY4d?^I5BY{214;U&SL7G@497!)*&Duwb!~{H;@0d`jK#kR zP?r+K>ia`azPGNimZaILuiYBtGkQ3I8jTE$c0o1=9Q~^f&oyNIy}8 z;nK^t>@X^@xiBTv#GeWJZ418;JL^_XOSwbPjjUd9t5V1Okobt2sV&z;H#U6ps;7Wu#NcP}q5x40m` zHGSa$onhW$&Saihm-*qs1rFYBllh^^#(fO%aOjd?;O&tHl;S@obia!hcJYoSBLnQ73+E$7hnNg+eKtd&G= zzto?J%hb}d6JKl1XlB+0kA%=ZkR*9xLgnLUDSz85ZsTDJzcS3p;R>!F>=d>`xN)IF zXF?vZZ0qm*ba=J+n#0E;8SP3) za`}y@BPFE5q_d&iSHFca^bPL)Ei9M1_wm(=R>FAYD6HK2+9hTeiTU9? zC#6EFelTazY>n3ENB*&`GQN6+$f5f}ogYr{()453!GAC85(i zf0?<71oXt%rc5{;dHmk`v~46E_f%bm-RU8;VqyDWih*6e{>#I+im!I3TUdH+9GKt9 z1;vs`IMgJdlffAD0`Aso4u`Ws2{Ms~pQ&3C9Y4FGM~{UCxV~t+RYT8AT8v^8kzOYC zZ`HtFIKd0+3T08Hbvoc-aoV6a$ww9th-y)+M;2=lT0BOcSOC&5==vnkI>W0=JXZa8{JSY8j3prYmvXN>Gd4oWuA4U!`0XEJ<0Mi z3(h}Y$J~+rmMrbL_O|-t&rkse5jG_YasX4~X-+e+S8vp@%s+0OnX|_)&0!60Xg5RC z13l)H>8pR`u}Uw-U#3<3wU+_8#_tDWP-DjHuA3c8zi+<8Sid5bb>(SFsX`Ro!I}H? z=p1PVh^(gaDHYy(e6KCow*rE{zVyiaU}}ggkaZbEVnNOPr2)^E-rvOs-)>~Q5ugma zpL*Bi`&11RA7_A&S)s+vBH}g*bNAxX-nK|>;S}wRch|PzB|zUApuJ(G$#IU3rjaMn zs!IaF>Noa$rhB~SR=V7w z^TSik3u2DjHd>Tp7#8`iU}4M~v4LkAEn%=ya6tgmr0m7*sT1dIPO`-F8#6NaCog`H z!wMav>!_~)Hjyfyv=Rh?RSRNe~t$K z{&`9=Bg5S3;IS1>zul}m?dv2Y+o>w&5BJ>Fz+15Hk${}iH`nhI8I>Qhc1V0W78t?b zyP;;9l&3H8E8-`cT=(5e+rG#6PAPNce2D!rIvz@&z_vgTv|u~OXNcnSL-Bh%rL`&r z%c{mz*UMiY$HTJbpR=<0@yTC$&~OfXOvr-*(q86>tP?;mo{>dvof&<))(ArC`SVB5 z2PcYpHo*Q~-oJz|;x4MhUJJec-ch^xfo-_C-MC}YJ1nyRXEG;~LR5oaUCDto-J^dY z>q`~ljsXyo^ZDai1XD0xQtzSu)3L2;>UV_a+jdj^vVJq96kGPR$QXM>?X-Vp+NVVA zJP(4G93<594e^y2NYV7J+9|$R1&%b){o*A`6rn!>8uHuo&(-aZLHoZ?bBEe+h1w*i z-V%BhGyS!t!Yk23(26sx?~jJiqaPEqJy({^`rv7MF3S%s6AI31H-m^<5TxlBd9R{V zEF({7M_k#9#A8=UMpwK6Wl0~PVEpX8V|$Jo7e;@x5b+f3*Sml2@0bE3K6)z{dl+wf zZTK3tKwhxFwN7F$IHK6(cjt$I)#z-`#m=I~R%#c5UC+IKeg1Ww&^r)!O!-ZEOGvW@ z->GkB&i-C{-p9N~*Es8ckvFUTGlcKKgA@U4WT52PXV&POi&UD-49+Sw)cA&Lx}KE4 zg?y8GWwX2M|9v|yhTdhD&*Culr8?6H2lv5mBo5j$D>ISo0JnqKkxLGvPu0%vg|myi zPUWod1N6eboG)HBB?z>zBRpekR-ZCmwCfFm0i*_z087ot49?P*(2!&NTVH9cJJ5dY zxiNq<+w7|zP62#jkUzO7#Btc)=R8#{92j^L5Ew*sH9upZpoeXHF^?h^GY!xtQhDM2 zdR`{F;ckL`sB!`7^WSI=ChqaJzd9_#N#v#bORfY)4#U-HtjsBbRzJk}&}5%`((SChQOUmd;{Mdgh%L}!0t#ECqE{AH-1 zg?V-O`Xj$SJ#}4Q`)#!!&}T@wA6Xk4^FY&?28_0xRpJUp{RJrGWmvM07~gZt;6AD+ z)H8Tg*zSD&D4kFaUkJ#5@P2m%FmH~FwPz}RXX%dnbU>x_H*y~WVsefg1Gl#mpABy< zO+Y5tgR&nIFTN_|7JwotrhUx2&f$R_Y^(RzBVJ@pB{2!+i!(4Le2DxtN=%Z`#BrHQ zG}kG3TxYn}_xRSEUs4Y-2Z##AkxH94PPTKO;vjcP|9n2*r&LAlHAWAOYZ2hZ>e$go z%?GocjC$5Rch*=oj>w3fBPT4r&Yq8ac1dBDxz@$yAwKVS3@8&j+I`0O2f2FY629?N z2+O1Nx7I&)I(&m(=N0(ojHiWqg<{S6SNT`)k6T4}#QC0dn-GL9vq5e+?#vfEK5Ekv z33ogH8kYkl40IAN`vi++QWHMfj&q%o5Lmb9OI1yOFv4&$QZaC+-rh~sHGI2Ve-c9K z&%w%foRATmE*_koDS?y!)QqEJVo^z7P@^*cd@9fCaCkAV@~;-?eK0Vb74oUgSKA=l zGN~uq8RzY0;MMPGeK%7u6teZ1VfuP@Xg39W>EaQ1d!Rs&PUbkeZ|?H)au>L?xk$o_VCSqcV53GKowj50l0A)-3smz zeGxuhE?P>ar7Ge|3PMm%OWssQFlNjo`S@)SnYy6}pmD=rzJk~1XsFeAJ2Xwo}4 zPOHnCkuA*8w73Ne;$iI{Ut-J7_$C`Qa(TS*P+Z-KqKal6F|MvyX~RVLQi8 zkacu6;MERwSz;_thR%J*>sdIb<(eK{{pPCGuKwZ<5hrc%Ok7XJLQ zqf<$`SH~}(USungni*SiYt+A;qtG{JsjS!GKWmidgrX|?slGbA%93uR=U^})6XZ)F zGE+6I*=FERf}U%%b@YoXYzLW|(afSn1jGpIfkrpxoo1ehgR<>&#fw zwPhwM-8IvBVdk zyn21zCpHl`muN}vnZ2y84X;1afE;n`e~c!mUHo&e*JK}sZ=oVB0WXoMl)``aY?7|R>krApk`~&irwxH3gyLhZv&BoKS+ej;{ zo)KRxo{rq$L76n4jn>xRD1unNxE6h=YT=z;w~+vK7)|R(FwM=1uEV`mH`fm6bHZNe@LuJ12U6lGYWtOL?Aef<>x; zOm#`vmQ1tCmWHeU0#zZ_*rjJ0eRRmUa^{0UK@-b29L*zBphB49ff4TJC|Y*JfnLzl z9YcVge=w#;nIQHA*C> z`(3wFS2d$2=a(%Y#Lx`?y~g|PQf6{{%EET7nenm}TjM_z%*%H!j4VXK^9P?WT?M_o zJTA8?;T%5rDPZ&hr&tz1?_ah@2dJ|AJ?e$K0SsUkTu&=~?Gnk7s3zG}%Nqn#YHZUX zz!0!O>U;J=zI%=z7bgP&c6}cpL=haD;Gej$xzLjfq#@=Pl}}muf_7&D)&Ln(TVkyG_0i39!Du)PeYRing9&;QC{QI0VUaPZ`#I}`R-B!*-2`>1bBX{EZb==l$@t?O9QL(YnjYHotCpzx8MMwVoIW zll3FI`%z6+)1n55|ji7HF8j)J^fmia`GX{-rV?7MwVH}gj=pS zQCB5zMOS%m9>!~Ojcg00m!RxqSO<>XJEnu+FZkA7Yg$KZdbQOL$SGoLAX^mpfIg%? zCjy4BKvKZr>#9bo;(~wM6Ik*fVNT=9F}G8CTN)&`msc_>2OsmVjgDC6W#U3=YHF#K zzR$rcQCJlnPqkN|_RBk2_#sI_r;e;J0-+#@c?A<_-St?4x4Q?~|3B=?grX=(;sHOh zM)H17*dm+Xr-8oVPx8GdmtlB27miPep!0v?qoc}8?WDUprcr$FAfxi(9JXC0u*vLR z)FmepI^gd)*Rg6zjn79+)&Eq4kS_pbQSIXVoaf>}&pf^rEObrEOJG2AZ1wu}^lS4YE~;vIsx1M$@_h4f^q3)ut}- za?ze}QwkqT01^f!S;|#CbkU1=(B|&qlI!O?cJ&lxkguU}A`G1saReSbzP zDuZ65*#Gk3dHDX5%*wO%X3eOpMzAX4f00IV>a+K;=4749Un+k2gHJba?q1@BSjsYH zwX80S*nRkW3^QlF%+Vjz@YIlCuC@L@F2F7$uk||p@)Fy=+wfJq_1?4&`~~|qP)n0r z{nm6zK=S}#FtOf06T1dfWIpCV(5QYnmbQR^E&E?>DQ5Q>Q29b+WvG9EbxE#}6 z&v~@4i?km_Ju)jMt4`0tyg*LiMQ?R+fz5fiaN33XgRx}!SebW-vHnnLt9b#PSx_|`bBQM|joCX+uDi|JtX>-+%z`&e z!wtF05Rc^p-wgV6Z~3Ud>y!popPbb7Wf1&Sd6Jo&wIjOhc=&UsD{$WJD&WW>1K)aC zcGD769p;#|2arJ;I8L$&YDJzcAC&IC7XtO4WB0UDZ`UToYg@xcB^i0|OZ!fy`3xhN zNJRHH!q`Vf?t{N(6wGn+T{Zp{^AmFw7mfHza?($pe$8zT5$~cR@8Z!iqeSkCzoFQq z)z!Gt#0fTPDH_r7a)7~8`K&*IP$j_LP?%n$C@LRdUVpu7(^$*7-YHS2Lyg}590|Ml z?j9JY93*92>~OF7rNAV2-hbhn4HxGDYBlS*G~wj06lla%#A`|sK3{n&5;ba2R}QqGqJu7c2wA*A(dT~LY*M8EwZ)K8u8YBLkqM%MfK zcp1aBYk3RZ`!lH?O_1i9$K_C-d7Up1m@{Zm^_#e>ti6mZ&COW%^j-d#hI?&5=f?SS zF3)27oa^`rwm*=LXU7#vkN5&x#zr;r9D5DTX_p}?fy9K&T-+^>bxMQ z{oF@Echgv{fMdp9jXxJ zQW7nU_+X6F-iOzF-I$?tn4$YB^q|>ut0~i0PhM<`910DUqil_xfF)>1>;tmSE`RYo z0ql{}L*9Yot6VL|kYgLwHce+zm3-ZxyYZ{vJGMsM#m`*sP3?X1WRF`e;Kr)&FB{4# zIbBQIe(lz0^80l#A^=<*A-_Cs7y2-2v0}q0v#4I|mP>2qJiO#dbN4edZX zX6!V#*nkE)-1gOfPqT@LGTY->-m8Y-!>=vHm%4lA*9=GIDpbSdUaEuzdNX=0_7*x; z1>0dEU)76WXr;bFuokY2fn5|Wm&8RTvH>2YzPh*(I$a^qf!@At<@5~U)3*EcO>)g< zeAQBDcJYPS;y{goRg|(1LydjfK7v^7w_!U1Sbs=IQpeU=b%CzS7ocC00_67}>)zv@ z17jJz0N=WPV7S4_)fo5l^?1ysAD<3^1Sh+-RSO?Se$NTb0A}O7?X!s78acc^Vo#Du z%P7|*y&%=`Q%ba0KFQCQU0nJ43umu5U7)fY<*fiMBdrLAmjyPnWLKRklxb{g&`9w? z>kqpuQV*!Y{c4caN8*P$Q6L(OCy>GWm(1Rl9EqgH6PN_WiMIskx_df&|HDdM8cn+u z&8ve;gAHF?pDpK!_0?L;PB!bJ0Gj)Z6O&Z zXNKO@h#JK>#+-J(|m%yyG8haQzFXg7HQJwa}eC;N`Is zn}An#<`EaKh|Iu3&G$Wyp&Y3r|0ARIRcsa)Ac~k3XP)L0We1(h_XB%b$rndFx#gcD zj6#JLN4N{#oMfJbjpNuzkFO|sdi)S3$-1Q3X8cID7Hm-y&rj*u&=Hg>&gmCov{M4m zW-eS~Q3OX~8AwuDeqU`yTEpk^rm(u&lj%MiJZID>w{b6DBpJg7S!HEu7*Xx$43HKb zcc&x!jjz^nIt$!k9B0`)A$kqnqVgZ)t>0CKf6(5;Q~0~WkxK*?i8L&%kQQ-AL9W%F z9N$?QrM#$H0F%>8j~TsIu9w%w+jD>ui=5bt1oha=L6#vts}ugL3NKN*BARkz__i$e5mSXt&_L5NYvVZ_Qk~N z<3>A=8^8|PMVd#DIr6Hs*ZHSsHnQA4I-W|0#lJf zFTE>vyCS5D;hH^1gb`}=zMu%18+~|QqW~3WvP3Uyj{mu`Y^xFESZ4xE@=5Qw!8M64 z)<&p&&oYUWRaE3=^~+=gNwMtSrW!<*XY-TNS-j=Tl;vNMBfAR_Yq)nR!kCF2t7rWG zTzO^6AD4p{QWKdTI@ZM@wFXDbKgHsr!SY=J)SmLn+QJiCckZ=Zrn>1n^MV)Lc4QS_ zZx81D($=e&%S^YjloAuzzhGIP;W@7=C_o+0dp>2bs`yN1-=EGRb34N20O+&bIprZX z>5D9Gj~D6(besABg{aF(72x0NSBatZXo#)gb>8int+M9&JnX2}DQ<7tktW@t2i{Nv z<1SGJ42_on!l3bD&HAMT z`noOP1}2#8#Xp8lR^=kNg-3r-3Bpt-k+(?P3u3M@uWj z!;gv4HQS=8bo{T@dYBjCTN~VO;KT-}*zOJzW?WM<{%!mxW!1=}OiT%wPDuwUpoj(a ztRP@+g%`jM>;vf5vHgK{4Njs~4HQ`zwE!ZVax2`4g+4 zZeHQ{*mQE0N6@`W0;G(!`9v+%t;WaYnv~L#lk#X|5-c<6;v8>vF<686t)_DM${x?l3#cYPYdiJ9^MyGlwbH}x+DCe9OQ`~ zW-B*1_i1=}zS!r>L$3=;P=GxPB}BRh6MfGJ?vQKHtL@s)l7^LE zUbw(UrPr~D2>uW?F5b$4EV(W|WDws5)w*fpYA#Bv-VV48zmlZ7A@SGh!maem} zPrT(K=^PjrOhctyhMv=Bat|}BUM=? zN=qPyn^otC6ftKvH;(8{_3dZw^A`Lv3c6~P|B|189 zkupPBpfb*`kn^zNkxRuISsKl`@0)NeuOQ0;(Bfj>^5S>|X`s?K^qIW1p0F%UD?*8% z#K;OtkU<_$wbE)0Gb?$mPWEXj)jiROtmKkQpwnC{D^7Z`Qk`9KsW5URVZk|W;YIR- z-0DJV&)a{>xH*ebkt;+7g~dg7v!^VS8(+{va_d3`b7W5N?2(^91~sCT9Fb^J4&6jG z{9xOA%Gpzo?x?d^YlIegdtxFk@hI{yk3BE{s4>fXLiD!7Z@6ytp2O87yNrwW!Eh#C z?U3cvr!6YH0AuemZX00rVF<$OT9Mp(Lg_vntL4jXiHkDL{&#s;-&)p*y>mz)J%7%9 zf~}E|tjg1W*qT7JHPWX06D&T6xp@ckN$0doBF8GJC`W}P{j{sSY+NuUbpS|~~YD&VWGg<5w4tNhTm zEF-UHB}QIT&N|D%29z|F>KTyT1wnH%QYxm}0l7-`u(dJR2*ayiOmU$-)BhZ*w#~CF z<9w9sn>w8gzL)X7&KpYE6Hly_ZJ1#6h?|7CMX{dxQ=c=RdQ*et=1k1URr7v|noM6) zg@BgZ2y9Z#<=3Ydu2&W}WFs7bQcg!Av2qp_G&cm9IkaIUB;!~E4cUoVB`O(zQO z8ENa)V!HywNmue>?l{dcPmG6&Nyni+M$??{fG(0RzxwkQtt*UJF?p;TXv{Soi|?60 zPUy7-2wI3QlUwM<=SXWSm+>$oyUB@L7#j9FXF7u|kE^8ZJx1$A7p3nW-%Zxb z3U{`ROcq03nWT1?Km7ELu+B~*2I-RBEt0-_=YDHTYt`Gm$fI2aQHdl~U~{XJ8Y{B1 z-vb&jQM%uf5Mjvo*noUT;IgheEF5_|fWSrz|850F6!m20OtoPQzqnBIVp?sBFuWnh zYjjXI4c~u+-1N$YNsoid>Ul+%zzuZpeJ9`Ca9HL|l#yp~1pS>b314b>?@#V8w(SQ^ zWYT?}zzgnTJl?#ZBmp?wc48O}LSlW-!*Y{*ZthQ{ghTGG z{I}oi9J{k)-@b6=yO5pCpFxX*nm_ittjFu1d=)Ps=$abHmZ~g zD33rC`ZPfe#DKCZ>!mG5b$s_tb+yQaSe43i$74+QqN9EINi2nA(n^NuJI6h&jm*pS zAD;!O!@$DIQim}Vl8k?Ob!Q7B`u+cMsgGs^)cMFUT8Ps zbx{i7d(+kJ%P=61w;Juiq@2`Z7)Bm@8|aCNpUz`bsj+9-_gT6E%=TH87DiWi zUrqc*OxW^eU_z-Sxm9p7=8@-w*NBJ>cHG8V0BL3IF|4f=E$ozciW=g)slo3SwNC$ z_xrVX?t2FDTsPkE1v69AuMvhP)YGwrkQPn7TAPqFJSVxohrx1+DWtawFStz7VA~2T;C?y@ zj!$F2AJ@1An2)sfKkhnN=C;(7DDJdX6SI_ADZco_>Aky{uS$J1~m0o9i`GNBt zQ7Br$LuS018cZlI!rvnC!&IM?zXLtp$`jy{m3}R5-1G_NzZAzLtzzUZ4(yIRl)beU zO9B1?@Oe`VNqD~xSfa@qqfkKqK8U{=*1^0ZZ1Z^m*_YOq&OxQp75nZ_Eemf0bx7Uv zZy^f2&P%bOfMvSTvLwejVFO^#l6 z*3Qub{m52(k}IX<12)Ke|2+T7)^;`Yn>>7|ldxCrCjdV9${)W-Dx}Gt zUdu^*4BJolFL&~EvAaFH1N;m zS++e7Fpjv#T)uA`Wy%?LKK9&~7$S)5oIMC0gYPr1yJ1jXEbdy%@4pePs4~O-yyWaY zFMPAgVp>zxmlpKbEMLyR8_i#Z39jB0*euBYd|-K5nK3o{TaxG!MzWLNT6$z!^SPd~ zk(v9~Ca#Oj7--mF0cN5w2O3U)F5pyER2p6O|9CKxtSNOSi*eI!VL~abwu*$PCHOt&OLxt z?fo-G(S-gYwSMuFyXN{M>AC<-73YPy&GJ7mam$%@ zY=CiPjr7=hKY(0q#2o7w@}xdHy!Rx zyJ&eq#Y;bYGs}X?w(Ie@?Fi7(<<3j-S`Np|bKS!O zSbh>Sw;n8fc*TpM;z)ur)ev`QY>yaeycwYYR#CuE{@qP)u4;f|IAMLjJNz=Ra^yNj zxa$l9!7>lm`oB*Ifp)cLR8@%s4k-0p4Q?ehKjl8Tr>lras z!Lphb?vPrnKVaprtHuS;ypLPdS(KK+PHlQ+-PaH9HatCS4$%ads9h4S<5(j zeO$QvEs~y%JBUJ*aKEAB^%b@nH$vE!v8>;lAz%g*{dTOV@p4-NQiPt*&3V%Pnmg43U3% zFit!}wXvk-6)peN93URMVW2#V3YqlxVBn74VpAj3s{F4v^q%isc4iYBP125F)A{Z3 zlPK)cPNyZYKiHF;TWR|As?NhUsu>NUH!^IN{4L4V0@j+JXTQ!29l<4j4(PRO| z{1;qNIkz;+M@EO*Z{wKB_rGe?<&#IpSKni01&2dF4m`I)G=FZB>FgKCd60Azc82ML z%lDz2tB(97qfxqiVt_<)nE6LEUqlqAdFw$Nx?u-|f!4!@@GoK1gchpcx5R{HUNkhJnC6I}xbt z4}*6-QsOlcHZCHjgwch*r!Fg>-X{>?20A~9*8L{k+P=j8*wv;yU$9oanKuRYp`4poxS6KPDVTCB zg^da{Yiv^;IH^wAgA>f#OKbz0eS`*nXT1g>6iVGk8xR=s3LK37X|kGLNAno%u6^?n zeff$hS_s3?1a+$TlC$93T)VXB%ln1N0wf=e@x!11yx#LCaxe|=zfdaH2-OpDU1zh2 zS%_DHD&k zdCv@mFskpq!&{hX9_z3eWKJk<+F65rTVzD8k2nc*eA{YYYX9`6stt9DMjV;AWKU%bl7wKK5! z>uSGW|4drisQGASvKkM24&tqX4QOPvv0@A|fjUn)BE=qUIx;G(LJA3|JKoSd$+P?t zGa=l&55|2K%LV?~a_g%FTt~85<(^_B2%!>O9+$J%w^rF2^gk{DXXjG0 zKjgeki}1EOk;B#sC#s09jDSS}Rl?E>@QPMmR*9Y_ir6)TW6#>poRW0Xv*HL}+3BJS z)MU1`2c0pkUO(M)7F+~W8oHVHSiT!-*D4|PI@h~j*Fd;z$7MS8Zj*&ifP{BPg>ae( zsqEzW7eK4&hGvBfqhsXU4br$XAie)zRpf_n*kbTErZT{q%(uav6(8O5#9x*_lh*f) zTnU-LxFq4+R89Pi?;1@CT8@7I?LIK%GAJq1I3Y%2v=MI!)D2o0#K+jTGH^AHBRfP4 zX_A5#?v-V%EWYDsnS^(km+m)r^6X-be8pQ-=ZRmbI{aw~XZ%XVaRMtZZcIcHguXg$ zp!n%84tKx;PMp+M-vI#5W51Y1{f!eaM&|@aN#@HdpZ*t%Mv81A9bsIE)pRz%d7+pU zZ_;AnFAHp84X<>`Lu*R0coLh(si$UW*Gt427!{#8l-Bn0EZl1L3OUEP)yv{=A;~a> zWf!M(rZ>_r$kIir0&xxO|5ePsAU0{yZfhVKdy=-=MGsGD0Fze>KR8PIiisBnzE%c#w%clC{jV%hYa}0DzvxxcQpp z|6}8wnFj)}Qm)#k`mdOvH!!$}@4?RMQ4>`@zI_|y!}JLkbHIWV9(XPMCG8y4)SyLv(!^p^IOG`pua*@kQPz0V zTONUP<2zxZwagmIEN?l)J*{lRGk;UMYeNcLUyCJX?&u@6c4BX1`+O!Y~i3OvEZLJJul8N8lCx5Qe!85ZKu1bJf}c3x4+Sw>R(>zY4upX zv#hX_d2^}UUXx;?m8fsARSpSjKo0Q{Kp%5%?xs;UE3t?h`Czhl3Dhi?Yl(Yx)wV=2 z+%cEki|%Xx`+OcdOzGGrB9C46d7FOupA44hb=bSx@Ylt z3{HxV?I|EOaoTw|mt5nx?jCPYZmh>FlU$`;Dxq8RyIMN99bN&7lgeqRw7^zr>%p7o zU>Ozsq|EmSUG0D}Eoe-0n&Gx&C(GpPu9#u{t>@{uky;Q2yv*Oq8p;X+!+@r_wddeK zTE&(wcv7a*B++%AM zdC4D)&%aU)MS^Z%a%|nPJP()28-8;IkR|x$q0WYBUkUAKGe>_mUY|?QlCvppKfEKl z)~qZ%_g$h}FJ*s)=EV0vI&Po+l{1o#9B2S>48VyRdqiPittIu=iQI;ND=7#C=8UCF z^qTcb%wOh_a|04gc{D&HLIH}Tj6ol=B+<|`aly?*f0Xd-Kq{=CeeyDuvE)H0b4+TR z-8nx%Uy?ud?;asYtb)sBSqksY3*UZU&5*=`|CBxSP&VUyz}bQ};#~|HLAfi;;jX}^ zxtcS@z)YQ8dEvfirZ^*bzgSp!JE4+3Bt(6%wRUZP$t5f}XA?p2DvwDn+LF9^(|W70 zSObXck;V05&GpQiK0*NjTh1zVZXc5*PJjQLXcSP^J;6+-zj^Z~3#~h9LT3EpJ~()# zHNXwSdw7NLP&>*y5lQ?F+IyS@Y--=M{`>5dV+S$FqB~nQ3A$rIZ|S}O zk9By|qmuwgJvD%ST&H0s3Kv~aS*W4^-mwCF`_IFLxB5db)Za3oL^>iAUsTjKQ4G_& z%H0F7q8)B2)0*BJYhFLKNeF!doNw}~`W}yf^+m1>T)GCK6Io~S3kZERL!9K4rER~g zC`GuPqEi?Qs78fwBwQxvCInL3z4{(B&^W13St@{w_B;PqYslkIWW`s43Fa#r(VrGS zPpfs`#55#1!XE{*iA@nCm3mwKgvLvyY8y`EPSuuIn&fJqw;kdXasxSsIBGDr~ zGLhX^Zs6;#H>4f;pf)y+$tY&atL9sbId#(y$|<|sujH~mFz8?7aWnivY*EC9w2iJC#oouf_dCk(b2g&WLd5hJe6Ba}m~mcPDlTq`jnNTlEG}qV91h!&H!~uC zEd-EIen!u&`SE6zlK&-*`5z7wj1Lnn#vnH^{#vP^ndyue3P}$X7|i>$wwM0##Aj@3 zQGstg+NWl2<^tKH6_WjX1TLDb+U6j6gC%oDN!uv?vCuoX77Wq4naol_Y=M6jyCp!k zKZIwxLVxRf3$f^M8f_)8dlOB_{V-KHp*#2TV@8#FLFZAWY*EzJtGYn;{Jbx6F}KgD zQ@Mw2TZd%H_ho{L{|v?@U8qmOni1!2F8&*)Rk*;*xtH;A#@B4~`u^V3yC@iJ{mHME zJ>rhqJBjA@lydPF6JglOCX?r+D`#+;ckmDLtYXk{%)|jYa%a_6r61P&wZM#_P)2H* zq{Fhs^Lf2rD+r!BckS|4nh6VCtL?0Fcr%-gdqr8<$fUWwAGTP#BT&DxDT1qFyAnmK z$uPL$v>{LdawFh7d8xwVTo8aneA@_tYXvfrpMNIu8rP+TCdiEHAv25DREAG(vF88^O@6r&IG;r6~;DP2m>%WUu-V1eRO=25nO z;XLYWGRH5S+n&EpV&VH`#Aft^q5k0?4^yP_Y=-h9@XWc`8&mh?v}_*?y?hqW=)dAt z>SoU;J#O^~W$EosT}{nLp>!a=-bC8T^g#U{eQ)aQ=RJB5`EYVI5*=Nab}pMwX@@I4 zHwqVAm1|2*9Q4&5lf{MlyICVD4+C4F@@bWEy!qokRx1Wb2(2z{*>JHDF25qgzGKMq zTd+0>wEA(feGBg}aQG`Uur_9*?(6K%EA8Cmht?9g^>+!oHWQ}Z(4O zi*!dVh7>C zr=u&oPqqKuwRulhpC>T&?SS}5or%*C z51Eb~d(&T82Q=5Bm4wTzU~ zdH*G-Pt}4-`Ok^dUh--6<4cq?K}v0)yhUvrIS%oH^$!zYVY|U{@lPW8P^p?uZDLP4ep=K5CKDG03gIW8P zG5Yg*+_?@Pbp5$vdnbgk5$z7uZ71OI_bPtXVTia=4~UYuQp6(tIpll$~2i-Vcuim!uAzSf9?iSVSUU7KQwq1`TOUvZnySo+F`+m=d< zR)Zj+a|<~q&}U4m$|si3pf7Bh4@`{7{~1swhCbQ!99{M4J0;#xGc=k)jkOBr8%0o| znv;m+8O8Yz=ATqM6sLS^|M)8XscL$d=8)aB+?h1FP_X$925iq~j<#v;ZI@SM=R0tl zZaS*cYav)3TK%lZHd?XT%zcPvUV-^Gg>)$|2GOMV*S5_ya6?S{C5)S;=^OosEoq~# zB8D%C-+#8EfU_fxkRKXV*kW`)L{z>EYV~P_ER21qjKE!(S4m&l_uo?4TmS?%f2aK(b_#(5LZzw*?x5O46Jo7W*cxcInX1GI)g zi!8txez0Y$ZfDISf!|DL(r=sg*Sb?}aF5dF`4@MMXk`y_MP14}>|baoprX(Nud)1$ zhQ!N4{m(-4k<5RKu5ioUE$hf98%r9pv_nghoOwE5WcTjqvpeRdhdN^O(f^D*3T>rS z*PUlDENC=OURrgCzpIFgk=P=t@j=&L9?9?`S|}A5Ju~>EC%_Cy4i6|W2=qANgyawC zt3jgL+VMwINoTaWcm_i*41x*tOsNpmsVKIzLJ!%13oH`xD*>cLt%;BAZd7a18>26p z!w?m%5f?9D&gCniv5q=D-E`Tu(a5FmIP3gj^aA*;ML9>(Iq!F z)}~YgmM-W|emJ31JC9Q^jeDbk|i;? zuqS*zFs9(m3QOu78|hr$0E?j`PKf-F(M<61DdrK^LWOTu^SX`Jdu6oj%nihQ(+bDx+`n2??B4Gs2%Io?5fzej6JJN zb~6v)HeQ^(c=6DP79pK~TX~1wwWeF3j?qcll=vQ&_YJ}$x~Vm2o>fCv3j3ycsv&Ta z{By3NNy3RQhNDb5;zmq>3>rrjAtLe)0DAHcw zkeqd8axk2LlSW4GmpC4({YRY&Gyq;Kj8V zsq*JsrFJeaK)Qn=3I*h!7J$izuNY+WAM0sHZhl9Ks4G5rFnmYmq#}wU8Yb;aW5vXI z7111b_(cA6^+|yyU1tf>`i6lz6u?FqiL9bL>*7+fFAs%d3s&MQ!@Lg5%iRHem#{SKL7 z4V=C1Zb8-@(HSQv$V=9R4UvSUAoiMu9Z>F9f|)9xIfr!5;~Bga z3hQa@@h^iB0WqFGGn7Qu1s-%{xH}6TezQ)#(_C|0~Q^J+40Bh5a|8f_@sXFchy+_(yWWZ{~Bg z<3=@wlu}alX^h@zMP;~uKdE;Js=|Q31oouaB>hD0X7x|lXEPkeJQCrk8C88y2u75d`!*Plsh(T)ksrguaP zYf}OlB-O{HHJpW4 zpS#CGIASTM7*7D8@bdZAxZ6>$d-~xzgGCgwywkp!NpLOA)@*er5$FeLfe!oxvZ+8Y zNc2Sze+KS?Qlm7xdtHh`gSHAfKIHf!^ymROF7P^nRi=|f_b&37T(%v? zZ~?rmrO+116~8PJ&J1vy9fgNpjx$ZRx~?7|Lc%a3pt znhRrmnh6%+6P%>l`}so?`D-Qgm+^al#b$pemXW9_=qKxB9~5r#%!q^+MLg41% zSmjwNa7+YXh3`?cBXu5JSB3)K;^RI%2If4jEn3}iA>QVpz-yKHu*s_ijy%aC<|aOc zF2YDy^YCq~>!z=!?&|Di8u;phi*W^0iY)UZH$t7gxmQK4)A))&t zZZkZS@cLa&!htvw6<8@^-S+*x)588~YZBvd+Hxjc_29D-F$vYJ`_CLqS=inQM*HSe zj#n?;g4dptpDuYpIazH7GP_AM*{karLRw4Vk;Nn34l>4@0eWnE8qp}ppV`8Wp)wt{ z-kpMy7f*I6s46@RQ#Wtm(GeAaNpDQ;_V$wdt5!o;sTo#Pyiyx3lb%kVgl!PEC&1(fg4`7z zT`S~=)CU?UYNpLNsf`pnfkc{it~nHs=^v=J`}=CQr^*d+M_B3Q6&$Q zS&}B`5N7`b*6s~JyQLpFjS}B|`uJV#d#rc!MR8Gca1}vLFETh09;Gv~L!U~IN9b{)jf7Fd`haCMl z=3_XRt1N5Ud@}3)h29$w&|KF8LAM$I1W6+EDXB#?TcEYn6B^sQZ{e!eQ)QimSr4+EHuI!bT}mj z_?HV_Q_%~l1w$^NvTazwQ({5F3jk^z(H4V3)YkLW4Rp0%He)`&0q?}4vk5PYJ@<*& z4%UrE`FogfaAo?G6dn1u{V$a=X$0aIPl^9}6bTv7 zhPoX~iRrsPUw7guNAkF*NnkA^V$&K=wei9H)HP# zPs<#@@uj$`Jbpv)BJ|^Pk`yvDbG}l$5=7LdQ!gEOi z?TV*X7e?IJqs|(Q!FZsDTEsQZJglVV)bBa5ZP)*XzF+L*`0fDyCBSPBFwqL?L)EKA zO^eq$7t-P`iwDO@;*NLlzPH6VN;7!TUT-mi=Of7ni&EZ9E(R$;F96?bm1RtaL>}na$ZSe8OOx|4E<0Bz{uC(wXZf&sB4yk% z`+Fd~VVsM8gTlb}nA1)$?n+`FNB7;=9{w}eIS*6#^-LL$iB$nc-Ll?{q8%6gsd-!1 zy7O^E6wSNy4)aTA(H^DnFh}Y0Jl=c(`NjeA`sJFUHJIc>1v$}%a{I#(EtXW8Uic1| zf*aBKP2L)FLcCd-lX|`6le1mU1?2H*5m*7{8EfJl~ywQ z#4)PT2xCq4TyFWD+I!JUq)oP)2CKP9kG$qC6`bY*3U){MraD zuGZ`5zCE-$*s=F->0G?pW+q>CrY>H3uA6m-ZKSzYf|_sXyDKhy8UMSr8}#E{qVVB& zq|)zdc=)WF!4IK|gmLMgfs!E0~SrG|Dn$69za(^@-yYPR9J5|n zgVd9y??t|I-7+UpSSrSa38uQC&qLC5z9x1tN+u%qiDY3VOFLc;n$0F>YG~&-0c4%&0=!BP3sW5lSSoCO71QuNrDk|}>b{|=x2C->J=E@1O- z#3%{dL%^hrTTqlJ9Zwk7OhyX_ul9b+o&tS+I4=wmF0C=qsi#gvC1zDU_K7tO&88I9 z`9RDtS>6eF%%R{j)A{wTB?$VHn2v79I|vX*1iBh^7Nhb1B7W5d{d(_Nh6mQgtptcu zFcYDB0zeFxS2ynZkd@%F`X)okyc{XBL^B81G>iVVMEhN_km~Qvi9!g)v`^A&^MS0T zFJ8}tTDR-*uN?vrG#s~~Tk5r(pFS9?vs}L(pzQxCL{{kcW_zDr3C(`1{mgwYgC0pE zgEw^C{421i%_oarxoXU%nsBm54Ye0}A=*9perl|jBECEs^!JTK3HueHVk9s`iX_>6DVld!Up;|z5Pc=4yxv8+KU5W(+?J&2e-*h50+06 zQ%F!#DAbzL!z>+T`WIN#JQNgGjM1sS1h1zCL~pmZ-Ponfb&{Hi49V-r3r|5#{8jRjU+JV5coF8UpH*fco$lP6t|lr>vkRn5-f)_%0cvdlSF&uo$@{ zuh6$LNeUcm>s@;;QRBbb_tx#v15Al7y0WUIq95pZwV5}yMA!Rh)}6AZ8j?tKO>`|2 z1sw^wb<1lD-@QgNG5!CjJvEQ>(Pb8x;OB<1$QN&^#|I>yFo>4O@-0bmD28G#5{2^= zC50?Bg~BT=LwhJBHu*w+laP%sJ;deUL%mYAI=AxVqb|)vYie#zGSor@7TBl?mw6W|o7$rOu5<6d;negT9(&$$fdW@V zXI`YQu#;0@?R5UT7|!1ewK4MP9UXx%@!!SvcBrRyZW}p3q@m$oObe2AedxLov}rg*16Uvk{KKf98%-e@q1C;*_ii)p8yM` zKf79|EF@|Q2rNl#7w5P)pRp%s{;Qg@{f5gVUYx01O0Eef;L_pzmL*Q11n9~tojN}p z#*M0IoGIPJRjdsOn^xCFU!x`oR@IaT5d3Uh{ewktuayi@16QE1Ur%bd{;+wmyhggUAs$Ak8`C*>suBLeJIWLU zVOrzzEylCYt8{YmAS(Pju^%7_H=6$Z53qV2a6Sf1N5}b3uk{l%V4F2~9n_@_c0K zYA!zCwi>iiPiS2a4Cg!t#Qd^}((=mi10(2L)H+q>-FQH)um8Ejk|5YX_$Ms(lQf;< zGl-C#!uu1MF7H7&%;uYc>A~osD4_aSRnas!RlHA~jU~8cN{-xoSsZ7daFyd@42V`m zj~Xxob-lZdJ+v-z2l^Lc)jlH5nizLMG?t~x|J26+e6?0mXETY3Hc01d;(VO5H8%i~ z=lwEoYWER(*zwYlv@aGg#y+OqjWtTkH$AnDqm9h3PFm@1nko+CN+k@@mL-RopZ{8d zE+0H?uVdaOy_p9AhPUc4&nC}lEg-&i|C-e#7EH3FnzuZhuxuo(>xt&2fz8Xg`&I+u z*3(Un)EV&TXPTi8yNPg5p2fcbgW#e=MxE9z-~EZ$H#Z(f$ALt z-3G{3=}jBty5|SatLP*U7c0_FdS+ez?suLX2tVl{l2;46)CoORB`=6#_e!u{5kwxP1-h5 zcd6v#vgX4V;WY)a3EXZ+&X1iVPKG@>bDCi3&aO=>^MX^FmG5Rc>7uhTPUfr^_KdBD z5iY&x;7>RFfGjCS|5NkPwh?dO$ssQdoN2D+?pyY&Qw20#l08)dHg7xdY~x)C zb79o*2jw(X+>BqDp{XrkbniW}7GUSgO=B2w-d^TpY=G?j?;$zW#eU4UfB~uNFha)P zJu=XlapQyhJk0cWw{9JXArz+#Iwu}PM4JD}ojH&meZLs=#!`MH2D%Gqy!9>-PK6FS z%N5!O`_#sCREbb|B=Bbs<2M0f-=k*Nh2L_W<;JVgq)pthkh2L{s-kR|3I5Hp=@&nl z$L=L5Eeybvbt%(G@SV9=e+WL*B0cThln>)VkrX(~=ObWbcF8!1{prgLX_FVh&8 z4`IgR48DI}9=(up1K7YBu^MdPtVvr?d*piIrOJ-8%@?V{!OjWe4B(2^GIj6Zg?vb+rWZ8r+y1ZBgZ2rxM45Knz z)PZZCI?MMl9bxdt(L~i>Q{oxidatcYz?NNCQvs*qOh(W{ql}Z|lz}V#B0oQRZn~4{ zgG6VK9z+;maZi+*_*^?|Qt!yE7zQb{|qt8$+k;r_g*2QPVa>axC$#30DCawStu7cS( Kp02X;P5K`^6%6VC literal 0 HcmV?d00001 From 81089a0d252e6f6b97548a351f102bb487b09481 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 03:29:31 +1100 Subject: [PATCH 0038/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 77cd9204c0..c1268b4798 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ - + ## 2x faster 50% less memory LLM finetuning * Manual autograd engine. From ff86769c3e75cce30cf12ccbb7ee47a3135a2c20 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:37:27 +1100 Subject: [PATCH 0039/1088] Update README.md --- README.md | 59 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index c1268b4798..24bc2a89f1 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,9 @@ * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. -* No change of hardware necessary. +* No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s +* Flash Attention +* Train Alpaca **fully locally in 13 hours from 23 hours.**

    @@ -15,34 +17,65 @@ 2. Try our Kaggle example for [the LAION OIG Chip2 dataset](https://www.kaggle.com/danielhanchen/unsloth-laion-chip2-kaggle) 3. Join our [Discord](https://discord.gg/nsS4V5Z6ge)! -# Installation Instructions +# Installation Instructions - Conda Unsloth currently only supports Linux distros and Pytorch >= 2.1. +``` +conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ + -c pytorch -c nvidia -c xformers -c conda-forge -y +pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" +``` -You must first update Pytorch to 2.1 before using pip. If you have Conda, you MUST first upgrade your Pytorch installation with the command we provided, since it also installs xformers and bitsandbytes. - +# Installation Instructions - Pip 1. Find your CUDA version via ``` import torch; torch.version.cuda ``` -2. For CUDA 11.8 or CUDA 12.1. If you are using Kaggle or Colab notebooks, we also provide a distro: DO NOT run this first if you have Conda - do step 3 then 2. +2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1 ``` pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[colab] @ git+https://github.com/unslothai/unsloth.git" -pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" ``` -3. To update Pytorch to 2.1: (You MUST run this if you have Conda FIRST) -``` -conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ - -c pytorch -c nvidia -c xformers -c conda-forge -y -``` -or +3. We only support Pytorch 2.1: You can update Pytorch via Pip: ``` pip install --upgrade --force-reinstall --no-cache-dir torch triton \ --index-url https://download.pytorch.org/whl/cu121 ``` Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. +# Alpaca Example +``` +from unsloth import FastLlamaModel +import torch +max_seq_length = 2048 +dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ +load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. + +# Load Llama model +model, tokenizer = FastLlamaModel.from_pretrained( + model_name = "unsloth/llama-2-7b", # Supports any llama model + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf +) + +# Do model patching and add fast LoRA weights +model = FastLlamaModel.get_peft_model( + model, + r = 16, + target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj",], + lora_alpha = 16, + lora_dropout = 0, # Currently only supports dropout = 0 + bias = "none", # Currently only supports bias = "none" + use_gradient_checkpointing = True, + random_state = 3407, + max_seq_length = max_seq_length, +) + +trainer = .... Use Huggingface's Trainer and dataset loading +``` + # Future Milestones and limitations 1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. 2. Does not support non Llama models - we do so in the future. From 0d1a04511eb7de15e1b7c9a0a13d5b151b5b6669 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:40:39 +1100 Subject: [PATCH 0040/1088] Update README.md --- README.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 24bc2a89f1..53a52c31c2 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,15 @@ -## 2x faster 50% less memory LLM finetuning +## 2x faster 50% less memory LLM local finetuning * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s * Flash Attention * Train Alpaca **fully locally in 13 hours from 23 hours.** - +* Train Open Assistant **fully locally in 4 hours from 8 hours.** +* Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! +
    @@ -80,6 +82,9 @@ trainer = .... Use Huggingface's Trainer and dataset loading 1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. 2. Does not support non Llama models - we do so in the future. +# Unsloth Pro and Max +1. If you want + # Performance comparisons on 1 Tesla T4 GPU: **Time taken for 1 epoch** From ef43dc3f4ea88e7ab2a456ae3e7ac4fa8305b72e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:40:52 +1100 Subject: [PATCH 0041/1088] Update README.md --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 53a52c31c2..93f100b8e6 100644 --- a/README.md +++ b/README.md @@ -82,9 +82,6 @@ trainer = .... Use Huggingface's Trainer and dataset loading 1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. 2. Does not support non Llama models - we do so in the future. -# Unsloth Pro and Max -1. If you want - # Performance comparisons on 1 Tesla T4 GPU: **Time taken for 1 epoch** From ee641a075408e9399c9e1d023dc4102c07c92aca Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:43:26 +1100 Subject: [PATCH 0042/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 93f100b8e6..910e3a78f3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## 2x faster 50% less memory LLM local finetuning +## 50% faster 50% less memory LLM local finetuning * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. From f383e5dbd775ccec04746eb28a32cb788c2f8954 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:49:53 +1100 Subject: [PATCH 0043/1088] Update README.md --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 910e3a78f3..79c685cb85 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,12 @@ -## 50% faster 50% less memory LLM local finetuning +## 5X faster 50% less memory LLM local finetuning * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s * Flash Attention -* Train Alpaca **fully locally in 13 hours from 23 hours.** -* Train Open Assistant **fully locally in 4 hours from 8 hours.** +* Train Slim Orca **fully locally in 260 hours from 1301 hours (54 days).** * Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!
    From c5ec42291b375bd72e4ce3262cb9dc7905d9430f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:50:13 +1100 Subject: [PATCH 0044/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 79c685cb85..af65590289 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s * Flash Attention -* Train Slim Orca **fully locally in 260 hours from 1301 hours (54 days).** +* Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** * Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!
    From a31fb33bc973b7251e0bbb2246c6b18bb1b3053f Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 1 Dec 2023 13:51:05 +1100 Subject: [PATCH 0045/1088] Create Slim Orca 2GPUs.png --- images/Slim Orca 2GPUs.png | Bin 0 -> 43135 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Slim Orca 2GPUs.png diff --git a/images/Slim Orca 2GPUs.png b/images/Slim Orca 2GPUs.png new file mode 100644 index 0000000000000000000000000000000000000000..2a438697011369c2ae7d018146c9fe69c54b60bb GIT binary patch literal 43135 zcmeFa2T+#Twk?X$(%f@#KJPKjfG{d`kxE%m41#qZu}x@aZKGp+4PEq^~D={EbR@cgsCj`T@o zrC4^pO|>1&t*JSG?b@})OP8v6ZkLaDnx1Itsh2AcSK}8I5i#hkPZMf>`SK+`r_jNq z>fIIv`{LqajdX|P1xwjpRG&h6O4hY#;{6yhUQ;;zWQQ4G{pP*4~c8dCZB z>5kxyrjvCkmb(()-z3dzQ>&iRStfVrF-V~f@MQLKF*U-s!23yZfgr^ zaGK;hbM4D}QCVH8(eV*J?R!Iq`5)vHN-fm1u3rwY*B7ulZMM+snU^^^RroUHal~mV2!( z<;{X+8zj`6Cx$zN`x~?6828ukzQU@u;|tZv=D|`^eMj;GrFEpI`fk(Xw+UrT_K259 zX`RPXpJiHg59RO#e*O5fU$FJ%5!b5-kdFE^W}->Wh^FI{pwR|r9qZou^0wmFjmDk$ zjpZB?W3?uY4HrNCvuK0hRWB}H{Q|$RvysnVy?&iw+8~whFRr#^-R>g&K*zW1_%yh} zB<+XyXr%-_*eLvkDXA8v*`FwE_Uq{M)L7cLu$Cx0l&1F`m47?b4WRmbYNrH{942XS59Mdo_bFall$G@x;?pIn|^rTuqUK zgM-20oT!~@a|VG`jKDr%P_!8@p}m?%A-*CeSk`-|Y@k#cS382HI!@o?@R1`4*MA=7 zQ-61G*+!xG3U+a;i2LjK!sg6dV9-;W%-?u`WlVfM%S=9_|wNe z-Mx1&=KD;kqV)Rcw2|jqa%>tO2)j>?_VOqz>O92SJ`Mdo+(QieDXb<4Uld z95H!qSo-$M=Xm1D{f>gmHEOm+nC(&HC2iC9p$%tZOU`vGD&~?9-GD@R zKtfV-nauI_k~a@RLPA7_b!%)UW|=Ejt~`#@wp7N&$H&KPdcxAT8xQ7^rMqwWCXq++ z!Yh7wZ+kIs)(lB>AJ_Wz`!AmyNbjF-A@Duk&~tdjbJg&(S?7Celg(YCqocRvupz#7 zb^ZKi*;TbKQ#XdqG=2PST{r)(y;(Cc&rTl<6ft|XB_~8XEv2q|3G1E%T|j(OF}fA-H=Tg5jyOb9#Q_KXdIO94ANCM@2>9 z$By;=G)v58Eee$Sg3R^r(BXQGmoIc!!NTA)FZz4-{Wl+eIYvp4%wS2iyXo=?&%ra> zwrv}(x9{cMyZ1CcC-?C-S8P-8)2D1QB|6U2Zx9GOHg7(-XV0F+ix-!i;B@qAO8>lE zG3s30+pCpP?51fqZ?ZTd$QVJ0*P1-HhyLPhY-}NIZ3glI66fwLUdwQGJ>h5Tk?p~$ z)>`1dcFh_tYiny;TU#nE9kt1E6lu!Vc=3*kNZ-Zlc88RODu2t$I`(Y8p3>{%58_b_ zxH8maDElnU+?(&Gqo!7Qv|P?#%<`7y+f&b8Aq3?ZQnOR$!##E4^(ph0tW)`W?)-R@ z8XD{g_|22x2dVA&7VI%)U+WT zC_%$-LUBd`AVOX%#o`PSU-PeDuXdk{4N;5MPBCjdo|;{gW}mb{$T$S0I~gj7*BRXjftm2To}&Ua>;WXOGsJzy8{+ zudhGe% zrYFO(6k@gmK~zOiF~X1DTeTAvNg+TYw5N_~RQulSo>T$SNjwmcjM89B{_2Q^h;srm zCHcO>jk_IqBm>OVldfseT01#8g{g)=8D4Q+MkC(f@jz=~w1@PJ`k6Cl!f?iSo{iXn z#dI9V;UV}YZ;xtH798lbbK$~;-n-8!;gttQMk-LURbL%l#9T7SeYreLmBGQkcxbot zRKKfbRHJ=wS}+R5VZ?|0K>sSG7~Nx{Vq#bEP*fN56Z`tEcXGzyt!=J|b2^Q^#A%Zh z6&E)eA85Xc&$^ZUa9L$cD_0C6Ie6g;j*!E57FXDIM}Gp+8~U=*G(<5_O3dunKUCrX zVLGuo&-Y%ug_T*=J6B2gT5Zur)6|#8+}4F&|M@BG%xf0{BfwZ;HvPX4F&=S+1Oku} z2(D2fB@DJn^o9XqHcT@))@9koE2HXlOzX_skJyJ=Gdka&oCj4pvjdffE_S3&@m?8f>oGA0Ig^Cc}a~IO1f4TMsJGx zA?q)efD9FhC?|NT0f^Ng#5!4L-QYbmHXB;V+;QG`BUbNjTv2P>=g*(pklY!+0_1pj zc)WM1DIh#51edWqRoUP0ap9vpJfz;`yr;#FF01ce_Zl6wDb2Y96DC2qg2qA0I1Xd0dxW$@R(+vFcLyn;z+@GZGOMUBAvP z>#3Au%1U;2lep=;X98b87lbGmSC z!y@!o*bdnD4Sg#wFJ}|CGJf^YyJ}01%SAR$=cyQa#*_Q8=T{G0E&CdR!1&qIb8T#L za$}(5$hp*Rx&1iNmrm|Sfo}Aly;=w`1|))z@osZm+UR`54gmC7o1?bHUHem~+|M8= z0EXVmZ)|MI_pLBl!6|jXZnVe5`s<8mLm~1>>3gg0=aw~u2v%{+0z+BX+h$+M!0|7? zZeej?_c}Shm+w0(A_e^X{g=s@RV<^I-jahCiGNV$<9PX z>P94gl<2qOj^-_hPU zAFwUR~*mTlzu!&rM(f^GFO*5zoW zH?kG6t}8eA|pUnmM>redQ_t!5n zYc5qveg)K#x9w`Bde^1hv=LpzmK*~t$g9a!fjCItzCPM=8((QipI(yS?R9Ikw%vxX4w&h zdZ!U6F#T#V>z;^s!_qIjg?Lx4P{gKH>z?du87LHT?XHr?=Xz|79Supw$?x>58|kj$ zX}4kBqxrjOL{*2wN{;lzQTnq-|N6^6eGzHUBjO- zrGyggXz1wXzoa4FPx9P3Am1iCg9WK1$VZxIxl{_I7r%c2iTepKWgZj6nyGUDM+krJ1}nXXceHz-z*!>oh7^#Nf=W zTent~3(El8K0W?m!*HHxbd0&c>TOmWy@OKOX@F!K1g^M8X{UvOC^*;JlyMgjjP&V|^G0KN&jrIlgm9fWae}ZHfhjafT~hPL2MeVue|)@6jbeGkIihrwnsdEZ zON@B7ZrQr^Q_rz2_S!=dQXU8n89RweDXW-IV^zM20fI&qHl6zU&s8s8yf}dmiPVeP zav*(L+rC>+#={x?F!L=!T%$E(;r#jBqBfsLnXL8?S5J!bao1X-!9v?8TN$lW8R$H- zUjK2t&v&2Tx^&0$SkqDfDb$&%x7bNPooZdtF80Ew0&4hR@90l3j;Rncke2p;R>rdF z>&iC@{?$F7uE)4}TXt5+*VosFX(WVAjP?dzU?RCp41D8XyKddvr%#`leIQ^M7G!?CI@;7qB)8$|(FP+_W8&s0iFVf}D+4%GJl%JxELtZcDC6EA z|J=EAZkD!ET5Y52a`xDV6ZfJYP;Y|0l({NABG9iotvT053Pk=Q0@7!rwWpIJ2Bxh+ zSb1Y3BN6=uGgISOwvPgx&R8J^M?)wuzt7*4c?f>o^>u=n@wDC>d8~HELq(a@Ng=Dn4x zR=vX7cYCVN90lI@jYTkLLv@RTawGvtHm^=LTr0KP};L|2}mRz;UluNR1sN z-mveMx)RZTC5?tW_t+Cw>ukhITJf)wj;<<1mt2w-FEl6Bp(QzJ+2(UxEm0cAU+o4~ zOgZ1DKVQpFZT2-qSuFpl7UTd~gB?wCmdJcxzzftip~10QYTf{OP9w zSS`Upr%jL4Xn@R49ljJO9#|%Q{Ze5gNFUGm2DiIuMemHi$F`=FPGiG5P`Vx{&rJ4a z?ApCM@5*u}m(hiLz1Lb;bsV6?ef6#F zjT_9HzV^%hMjUIfuvqK*4F!RI+JigS>{dVhrSXn)ce<{$J}AJ2>s=nVF{{8jsDV=9 zk^u#%&ERw$-3pwv4XOIr(W7nPE!>i{yBc@xQVDrcKbI^iML2LtO0|x>otB7hoj&zP-_?~wF@XaNMX9r54cNc*2#ERknV7x; z@!r&qp0sy6Rl|t0U<4rob{#lyK!Jr8QYGENw(h;z5vrGuwj5zN`29U^VQz8p8G$Q# zMdq7W*c%Q>yOVaQ*VS};IHmpN0xz|12_ISHXCi zS)+^8c++a8n-(%LKKpwf-vji_!3zGD*<6sc_0#0|CACdI;JxrCV(TN_5)4bX=Lblh zXBhXiKt;LwFv1jyhZ5rNxCe*HAT!Z<)~WMOqR&2d zo-M>-fZBjc@WlA%RIiYE%PGCW04=o7r_rkaN;~3=?!;#z4Gcn><(3Z**~K;ZIlkBp zwLJzO^$P1KgKe%TWT+F3+ zX{|_I$nd#llyCc};sb>Pse$)cwE0}#gWy4*R|ZZ%(0Rt;jg04ULS<+h*REXoGShX% zdDrap$sT4!`Quqb^aa~N1j;|%dni&Pq2$G(+bZz}XVAsB;c!+kiTf&6({bcxW%c3W zd~HztY64P4d7{aA%l49Eps&>ckk_tWP5n$eV8sRrn|H$n$I#=^k46eo0NGLMYnmR4 z?w|wb%ZJHiBIlT-t$w=3vZJhcsJ#?~-4!fX-IW^X5oHNRYG4r0(-ULShTDp_u}eGE z+UYl=pGsPu;j97nGIDSi8bbc|e%<3l8Yd`#E;C*y3Z9KheKep-h*{$oTDBXl1(AU9 zq_5mZ9z%;U)E*qTeL_-W_d42gr0?gF_K8HrKnw~)xCjkH0o&ScHYR@`!SXF9N2bAX z)E{zAcl}4;k%xGwOVbk|HyA;nOx1%IqBW?gD`sXCpPlEZQDH1H7Web#-#2JH=@_1;zei+ShfRc-=~L zlI5YwJb0f|P*2Yw8(NHXox}N;1K6eF7ZZzLdpPjLnv>Q5!y;lO7pGlPZ zOeiEmS|1eQL*dgv5VB}LrfVh#tqyCj622JWxR1tP~sAnqgE*u&k8*4*7A$AP7 zUKr<&S^=aHTi)ydA(Q7+(!gr&0@koY6HUBoU7Ecb4s8X=s4XpTK}Ther*0Iz5mb+c z86PbN5c0&=*B5~Qf>v=FKz&8=ewme%O{8W))$td%Rscy?hH!3fY6|*zd!bR$i$jHe zbHU|ucKVHrTlchqxm&zsiL#1{*QQOI%3>~^koOZgQ&3PK)1%|SA>|MPT=2ELT=3(t zI15*tTK{fgmKIA0WtV^@kE1NQqtRH8K4-t5pWjlz$3{q?0vm*{8B;gjlU_KvX^1EA zj_=cH-7G0Na7@WE_MWr7P_mDM9CimdSq}tAqS4B~{<=V9jyjd1$t>+U_+`AbVBnW(v3d8?5m$>+|OUXbRni@wV@Q4fmHjW z@R$R3{R)v_X#30(>oS}h#`-c>JOCnHkIXMf`VA@yu}#Ob?yr4-nzM1Vx4|Q+yQ+6I zPJ1Lh3FO@u2<+~F(;E<*LYvsxm#tW_;`N1(x4yV?IvI?LX5F*BavjZFak9Do5ysD^ zrpvo^(rZ)rP$C~)_Ttx(`ssP)1R5lWWMH7Xu6az)NOXEX4fF{tK!<^QXHZcx z3kwU0N-B9PZ-jza)K>DwEe8C(?{9h^q8rI=k3rLC!sYSjpMUO6vF^R5$+Y6DZ=_>; ze0pN|0kXtKX!tIHfq_t~mR%}+D__{v3Q6|?5Z6X5hyeD;{eWQ!8{Sobm{jxBf1Fx9 z2bjb0h>(ro1>QMbL71`uT!HtEgQZ`AC zba9%!bt4GwcgzN`2ktoQ>p?UNfGN?Nc8>&W+m92z5u0$YC)2axc9j~4b#^G1>q= z08#K3o2b$1_52Lb9yozFq*vQ%@4Go0021{ovJM?4FlwGmah8C;McY{V_ywG%Y=~p8 zdUp{Xhs$O&Q?~if1Wx>}NZ0pB2E-O2S&(+h@3@7R#l{=e^fhH?;F5pLe{hoJW_CZ= z*fZ!K6Lhl{ch@n6lbM6LV6IM42}4Jz9pVoi|65&M-2@j4%NWmwy%%mGxCPNCHT5jM zdGx|vaTb=@(@V`3vixYc^KT>2qP?I0SDVknoVq!w8d&d?>pySd17J5{;6?BcA3nh;~|2)}qjlN~?y}{PPT}XF1b<6JFoC$1fZ|80A=YQ$r zqqcvPY*hzuB-;+YojZ4K#QHgC7H2qDi$~Ob`{wrhd2WVAZ~t`kUsk36j5Z^Y?bZt) zNpw^iaq*l8R&&zi)G_7L9BSSry@%fYDLa;bYr}R60iXZ;6 zrvG61&-hmV8Ejlr^5~(0!S`_+aX#6j1(`*u9l$-O+QX;a-@5(`Bx+CQMl9e5xf2In zd5s&=wV*5XbV9j*Xq|EQ?w@bueb2(bgw9m4PXsmtWnUo^%DlGr_TMrykIY}ZrU++b zsPE?y-xP7HuG6UFBuhd_C6^OUH&tw%vFY^1P$M?L3V=Z0jw~-zf~HpxI8`-D^W4nL z44I(-9fm-gfdQU))=hW;k5YYB_vq21AyBu;$OHTo3RFWq(%S;`yJXoiu6idj87Sqb z0sjGsk_o7eP8u<~cVLZq+U7*jG5ZG{z8^C9kpve_a%1%iIZUTfvpXRo-n%vn>@Wcy zKsC+Ia{ht^xgS3s0v1t<)=nD&-wn)ptRe-vwGWzX@Yf+I2ERMkcNhLf-}D-kSs1FG zjPvd2qNE0=@JrbbL-D*7v4Eu1e2{cI1g$UU+c!A~O)9{T!{D!s>Qim4YEm306DLNx z!$~Hd7(;d-;SNt8LQWr4#R{}NDp8uEq{)ijXnMp&@)r>=#7}^%Z_ayGT74WJNgxE2 z#`$k#&DnbFz5!(I=hDtjN{UOc0v9ya!CTSP^M|%SkQsT}f>#{N{e7M?aGo@nPL_TH z_!;2{70|rT^6N_UMF9=z!wA|y2a%~BHAGpUZqK6s*D-9bRJ|Q|C{;3S0fT`)`}xe| z2_!S!< zpMG8Z@4x?k4%U20((Qjp4)NfSlzpd-{~a2sF%8FO@H^yv_;66h!}YSX8@dQAu;+y| z9(E}Qbrf~#EB0T>3qh@@G4^1u&$yhk3V8P<6iqMu*0uUn>OiPak+lMzqUP4tU=(9R zAoW6tE5L|C##Jv+1*^T#DT*nG_yi%C&ND2UzCISIfhp%D>ahVNp&J&bre65sC+>nR zf#lhQ-G~Q#@Jn)D5f0M;Fz51<}r?e;#P^a*TQz+612)5zcNv5){hl7!*hxhNdNAz||ZWMK!ozX_ClLAcBGfE;`38WDLNXlcakhC-yqe#|5J z-!yIb?5l_7uobCBYN%2+!J>NYQrkl;50pF)Wor@$m{yV4)7R)qzrkk#msR<@iCyLaKV>AJuE zdI5sS0OudXs^`#HPYe8BUWm=hJ^r>L;^N_0aIzIB1uH}X02=-Je2-`xux@W4>!0ha zNBitJF?0qwW({mI;F-C`ZOFX{H6c=m6($HBhyFWt;-7&;$`I3$(_KKRFb`%PWp4#E zz|l?zD%vh{we;-+SiD}r>ZOUkAuegIvCu6q z)S+`RKtG;~kMJ9x#m(?7<9dZCHuW*D-FC2b|Z>6Qm zXb2Tl3>Gh6eij7g*(gmzzVpd9-t5;e*u>+2B5(-Y4#UI41O8NE!8~~sHIfQpv_ub7 zF(d22T8v7l##kpS@ESU^0eLQvc96HpWkU51URz?bjXTiSoF(kHkkg^BHhUKWN6553 z0=;c0GW(4iH%id@b^SIQqB{zrbteu0VgD#(%NV2S=g*g9;+jZ^i;E*riz<*6TJ!4I z65s24WS|gGp)pBsy=T^4Ehxi?jyAnziDDB*LGs0Yy%6gTIzV(>@zZkg7L14dlIf3r z{ra^kxE6YS%Cw=jw)PDxt0!D2o#W5CCnqN8RLS(PVhb*ueY>KKL8d2v3EL7eX~6>r zRA5hL1R)~AV0PLd0`f^U`cl>q*3>qHd&c@|dbHmJF0(aUj#qE(@wd@f`X1|^ANlxU50do*_-6qwoR&a>&rmWG29!CwO; znPlAn0dS6Ujuo8IL=&O&uzQ*H5t%F)>l+3xZvq&c$9ngMe+6nCQV$eilu_ zj(m{*F1_^yHt8yM7a{0_c1Q&+BrzBai=JB1H>aMFxhR*x?MBrWfTYHx6?RdLG`j?( zwbyvha44J_@2|g{nds!F(g+FxUL-PyBP)7KIokRDr4Hyq8$#4BoK{2>W7YUR&cI#F zoB*n_1aadpX@3?w6Q3Nu;dg#Q@ct4-7$M^v#<$|Clt|K4;Dx_#^%v?im5^=DqLIak9(6{85a98u3yLr5gVy zM!V?bee}tlK%EmUzNP`^3X#DPBqdmj?)o`S8fZJap42QU7@2DXuES!WNyf21ixmQB zAiX{GE;Mwl&$5RH2R*r%taEkmxVQl3IT0xhf$?%2BnYK#CmyQc5RuEcGZ{4?5soB0 z1%y*?{gICi4LXQncV&+APhkD?9Uut|jP8bgy8j> z`RGBYx6$%N{iCBvF1Ha{ydX_-pYZu2XTAk%^hLaO5)|bP3yU)Nt}gNo4h|}xKK-b! zJUs9Ta93x>r6Uf9w;Z!ey{3 zXE>!3qE$)OgbY6WESm%U=GJZ7BBRj(@>J;vh+1}d0X{^s$^uyj!{~nm45u=1qPL*Y z*-p&?e03MRLxqB}n}v)|z>gIc^}D@1%kUVK0N{9o`fUiaotX1XDp-9}f#FX9BxErZ z0;No>Ct=BX4IViRMkiP;ulRM7u?)yi5S@0XZ2@H6vk~eq!JO^H4-qO&m)yKd(`GWpK}X3kBYJPEa> z9JH-W?8duXq1aM7OHz|v0;FsLQd2A;sa{Yv&3?#EdgdPO)DWD5ucf5|^^Ga--&grB zG!`Ob09p+B@Av@;Fi)c><)<)4vBiyf;!?oED85IAyKB^t3)N8fxh1s)e&8L*O)z}x zD+UFC=veg)EV|g+fxp$ifh~Q7-l)3F?e5(oWGeJUf9cmW`~+iE!Ted$DB6v~s@Qi} z4i@0%uC545(byZI8mq3VmvBLyfM!{YQr{iLAV5bfhFNZ~Vqg?yYSih(JtNyC(|2*e znU5)i{0V!d3@1lEopepoTB4Si4R@%5=RF0wBYr%2(*c7=^xAlm=aJ#-P9p!iCq#-U?$;o2R1G8T^(P1R{d=c=s~2kE-B9 zA)0?oaYjO-4lJ$z%SKr!rF61zT05}%6CL5vGfxKl;z5kU40i*$@Ex5Z?ABHMj0!i& zeltr;BBK-ZzO5s#i-r?YE!hFkH{(?*fsKtvzQGxR)5eby=WD*6tQ^(g=lo3OEm&HF z_aqIy^Q#9X%6#Dl6@VZMye5QhsE7GV@CW3lj-aQc@e4qBxBU4Gv|(3a$h3om7Z2hJ z;a8Yq0!W2rQ3DcMC3ffc$vPf8e+cb_A&L*pkyIy|>|$kQB`*ykr~(~^Z<`fz2gxU) zzg8Ztv~#8_FZkt0B~4CXEML(faZNEy&T6qyM2{s249blf4{!8W&oV zVrkU5ZD8xxtxZUz$YaWIImc=r9cNnh)@#w^2{0~A2B94}hNNExH7M{$Iy9oQX?DXF zpbk5$=iz)MKGa0NDCg%$UTp#I#$!PzA0I}FDaH3kqs7M9jXhbpF@6%65eQo2Dr6r* zBk(|dF?BcV_^)q}-$;}F8A;Mmf(5?k_vGyCLm1gH>x^D>&9FMT5;LiIL{ z%Nn8KB!-p(D85FQAfyLXWG~8%5gUw~Al8o~%P#|T$;{8s|B7JhFIUq!*9n%m0!q9q z;s&-y7h5DN6UKGlot*!be_tX2}(}>4^x?k_UeA?Xki`A2AE=Bh$ zhG{Vqu@g%=E77D0L9{ZVp$~{6!ilC0*G|R;coKh15NRbL9)0w_#{se2VLd|%mEeU9 z;L&A2sWpb0;zp?hYQtfItIHKr-vi&RM^AJBh1#oD%Qh5dKvPHtm*7>(sas2RHonUE zul4Jn)oOj51!Fw7@ZzV8Tx@-FcXwo|>(=Jx_TgNQhKM74ky;rO-khTuIF0G(sr-SQ zl3DF``#(z^ED*nc?_M!Bz>~QP3p{HO^L;Zgfwp$E)W*$GmpA@e{-d`th?baZ%0pe^FQ+-=#Qth><0%78sZ-NZSvP1 zH*<7I(;44K!>>OAZ-RbKaL7`FTrK-A`kG~i6JAAnssCD&vDZ)aEZ=NfW^!8Xp+d{_ z$mGP>)&5&q;Na<1Z1nPci~5o_aEhJf(B{agdthT}S|;$X$YP0J^I!6X|IZC2Kgyds zZyuU9Mua1qv{Ncf|8L;fy4;=pZ3rfjkc*iv|61nF#ecSqoiO~(N6Dok15JF>u!j6b zya%Y@VAPr&d-v|m$!4Og5R(d?As1sq;N4W9+*ywHXc_tA)3a(gd3b_-`V(i*!paHs z-3WV^w5Z5b@3x=FDJ8JIgkuvdKe3V;x|A zvY4~T^W-B1ryq_lpmKP_uFkV;{r;oy1pFTQ$!o-Psx}7difEl+3+M!NASyWiTAkRP zJ9h}5F(58*3mTRj#U$(*&{*E!Ym7F6EWwDU=Uc_VYkcG=0yubu@i;^H;R?;KVj*s- zbx5-p5ethx^eRXx{xSv3FjM^`TGq>HixVMkbSx&hPE-9SNVo@nP2lB}w2LQ{PgBKQ zTt}gb%9g~?*flzXox69Rf(o7RWVExg@U1og(XTG%g^zP(?w1DpbOYYWKHJ~CBw-&$ zH3_MK%M@ycY4Qyi+#MMB0lMI?pU#*XkRwMF0ICYedTz<(b?7FjxMQGri;hl%&8QAI zNI&Kv+QG8`8}XQ5@^#~cOjisfR}R77akS2w5kyQOl$Ph{)cuz1BSsIT^Z}!a-L^J1 zTpbP+4mnv~zo}?_IQ(5)aPnMP@vYB>4OLdrR6|A^J2yM%(dr&-(1vh(8H* z3Enq`xA4)HO!F1{JkVsI8!1nBwAX`4mJA=5N%SlW2CfYQptx7xO79V)o7L3O5eDa3 zR&wV?m^tAog<(t^+jJJ-+8ZDk1oyAeuJ6hSMGQlq!B=71=Ef#V4n9RpSKth8&{w&| zg}Jk;rM4c0DA(LFKumGGqaggCkzi6IFgYXb*&$-gy(w$FMt=P#b8RABejG<_5uwpZ;K4a!T0r z-M1et?1S<|)5d5HY@$S4`o}?afRuRypIA7UA)0cBi9`0u?oiOg66Ou?;+WK@o`ds8 zW~i>4x@SOi0?g3vxyC0Z!qAfwL)TaaBWT=mPN@jAgW=AzGmIecW*rdJ2`tjy4Y!w` zfNj=nZcizemsCyQYoGe+$>V{ksd#*mYm{oHP5DgmoHRUFm~!yL?FxQ7oSmI% zxLwX?4>5HK1;}__+fFQ}I}RWT2)UhN7@q?9>R+Gji+*E7lWG~`2gi1i;h?ZZCU{XK_z%2v3uL_1y&|&O4 zQtqjZLdGUUv2gBt$p(p&qqek}bgwKmZu~G#EE;-aOT}r`A)G zN3tuit`KU7%$B)kL}iQ&drWK7=C6-4ud!pAw6kdZpLM?m(Q2Y=^{gcd=!fcLHvg(t z^lhlyJ0SfLQ%V+abZ6*Iti&*!MdI(HM}OJc+3f@q*I#ca4!ajI*T`LAmvA zm~h6hzVX$Hh-cvB*?P^Nd2ABh3M&_pBNh8`)PZ=oMLg6P#2!vt8{hTlKH&fkVH;$9 z#EI5vM%0+9BOqt4!T;j*HC#)VEQ(WZ+qQE^kP_71eCqIcm!OIET`kpV_0Th zELy#N2aJ7~vzQj}=!B|@uCpDQkxWOgC-J+aw`JN!;IZ04IaGtccvUTG)Axe3AIRN? zU~&4jf2c!CqF}fz~E9;R78;^YL&PSaN=aX1b#>zl9CSR?B|7$gj)(;K;g{MC2GCDeh4E$G0X-W?xAX<=yq^L z4(*PzcQZ0Fa6%&JTqE2E1_$k1vom?;83-wwPyqxaeFgzYj7GKN3ZRY>KtyknjMJ&b{!uQVcW}kx#j%q% zA-Dwu>WP4i32azlFf48Wn0J~UR`VA%|1vSv#M4~hf1W-7gHO7As0q(WegcEQpC_#j3CCoe(~`xDl`!)^gk`D9)2YX$58#fNf&+kjJNfLH4q1E?59yG z$h{1MvjMm)jZ~W$P@XkMlfZ#(pr{7vODUaZz{ziaQ3zMg5Icjs9*iGDLMU*Z8nSC- zVeImyOUqExNn%^O<#1 z;g+gXgn!UiY=G^RD-=gG47WK1Wn1T<>PEtSr_Rde6$INs2+$ewn-|Q&FJPV^#*JJU zfCj%dnIc*l45`%VV??Z?@+qO~qR0ofHK4f~c>A`?KOf3-jKft!bl&hql3w!@Q7#Qo z&=?##e}Nf%I6MXP`6cDYFXGvN%E)qoauyCjHy(=%aicXGKD^+oic^lY%9J@D37k-FdsWll-%nz)pheqh_)K}RL9~qn z$fQX#1i@dQSaIi{f7$>Xapx47IQ7}nwQ*?x^>;u}R5}mPfVgO*voIP+sy-n5PNP&3 zSfhCZSPr5iR$PfeKS5^{Bc$$03UL0D1mPQ76@5P5;Nr5j-zkM$Be2w*hFJYK-a{1> z>H|(`se-?{5I&%~j>772PlK&bkBDKMCz>^g)J#?)T?Fzf5~SYf;ZLPhwei5jZ3+$6 z(3M00V0mw^b&YRCBniIo;Z;74(_2CS9N-rvSswiroaV2AzZ(AVaPIqYNXCOT#a;u; zMEwGeSva}2eD?ScZ~f>=Y9ew-III{{qlPDGx|qn_fPjESBSgWSytXd0ey7SbxOY3mR9QhFjA^ISD-xrdrLkVG_KrJc{yg z8nx-)o+BD8K^GurkpWm8jJ|>zUU-FQgc)R26#VwScIW_T(wQ7!NYo^I(FhCpl7J!4 z$4en6Z3pNhmarnWYRSS8Z8*+kRhTFUX8>oL;RFID7K%KH?mj7000ZzK2TdB%B@1SN zgdd~zL}S4vgIFRN4-*Ed9J3}-)T*a86cWk~%)-)b7I2=w1TvxF z1d_Jp9nr4CB$53#56XEXto zRFWXEb?aeJEJ`Sc<(T_0Vgd&%0hQ6fc*3>1iTU-S?hy6ptFY*`x3`hJt#h|NY zXzB+%LOGa3;|@&Hb-)(tc*PfAQayU~AB;-{K`@k|L8IRuLp(Duf_=*j7{e!A-BmG1 zLY3YyxGdX>4k2_@5siW`W&yo5&>N8d5S@nlC_ij`;0Rs-3c^sReNSSw%q2D#+~fcv zWTe&v@p&8&mHe1>-G~BZxTP4iJrLY5=NZc!8u78Rp^^lV||fj93kT0QefrVh3DZ4+SuVR`~#jZ9Py7k9Ur^8y5dTKAzYlnWmAhTg)SMxC3+gEmav(u)mL{u zOyoEYp%HAauuANNb4Vk_B2>N(WOOL#VmgHwyZvo_fc+s9ql+)G$zKeafTq9gO*q+H z!;7#g4NZ6E+Btj_#Z>8;Ozzn|{F7s7tklrw(8XV{Bk#i5>wsp84tb>HVZQ{mNfFr# z8M%9xYNb1z!xBD>>qe&t`HGR0l!PDf2Cv_SG$CY(flyQ7On{(zx43mimcplF$Ujnu)9BQdDc@qG7$*kjRTUS^Td0v=Z!sawI zZlc^)?$)3V|IoK_ZUgoL)fjq>(g5b-`AU@FZft!x2aRQLnV{^fA z_r9Qmf;U%;7qUlH$*4jWa4sf$F-E9K!k;&0j!9H>*Q-) zofc8DdebZc*>F+d5IGwnGYB=q>)%n66n*RVZJOEy$o$=HqaBCPyyrxAHm52ko+f^} za$0m&@PX^m7@Y;B3-0}KVF|dXY0OBI6Tvdw9+5_4L`WRP8ig2>#HBB%u_iE2@EO_S zWdO6Ij_R-s4Gtoo`NF}>B}4Q*t`9;BZ_-jppIiOz;%(^Ix_rK3mSF_+(Rb9Oy;vDY z970o$Cvi`HCO9K>|B2v>UL0TWWM?r{y9QSbzzAnklLoI`(CmY)HVky(Q92n>xDpcg z)@5k~VCwM!yrl3sW9q#i9fzQ91ii&}8MEO!S?>m^`r+zHDrBi_1Kn_MfX zu9fiD#zPL(kNQ#@t0#vEH8t|GU^}@o_o~45Dd{kF8fUEt6eF&weFJw)2e=Dd?%-kB zfYcL;iz-2S{U(SIVeRltS$UY_?o8@CvAhojB81>Xr%OrFaTC#cmy3LXxJL$lo)=hX zl0GO|LYOhGtABB59&RZ;1~s!1E=$rTlj`lhU8!#dp+w6XiQ)%aU@=leG#*=)fq}Gm zG&_C>oVC<*NkgzMjl7LI&&T2P!sj=$&rn zL^-CYXU9cnVww^-oQ1~bAYy4y+;6iEk~-0!_%cZ9eesBtnx&>bLD1k~QQviD@B??2 z{gz+tea5ERX%PI#LWwdI0_;73yat|P8!|)tGdCvcsVq3~f@3%J7h$~ex>;h?%kG#X zb%DQ!nh-e!b;A(vXi$c3i9{&A!JUp<>&;nf8iJO#f)f}^vela-3C3B-Ibs5{_rMPf zj_7({43z^l%7gnaMa3iI0ka$U`Y7gxY3Mvc4|kN{?$rC%dTl(-xJquKx9Ts>8jmrq z5nPhBmS0<!8MLP~?f>=Ec9*of9Zgd}x zQ1h=sOB7U>F!^wE4oQ3IU_ArdM(3vD=G+~0#}c}sN;vMkkZ6wf+9MTf;f`l8$@k*; zkl=S}r@kk}0Zs}c>@d%wjB7ChzqddI?{=xFY8Q&JF5=fo-z{U-vBBjf8Y82$3}3LI z^>&?n#0A1cUjZU$2w*$Q{>&M>srM*twCp8GnI5pHAQ5IhbRW1M6d!yKU=M<8=pdHAc!1 z7p%eTVePn43j{RXhy+|gcRx_tx_kF7YkPoJVMlf;?CrQUX_aZ8H1HYJmeo_4LKOhv z&Aq+RV2!F$iH%0Kl0M@I-6JS}t{JX}*_6e4N>fv_35Mjsx!QN|!my5=MEUf@$ynF= zOEm)P+PPtgB00#HW}XJh5480IQZOWE2z)8|tp!LombeYoH(}3>mb_3v;Tnm*x1hK{ zwDm?o9*(&jWBtnqbKvF?5Wza(NWh^#J-2rz>pSx``^H<{%LJU2ASHA!Xb-!4P` zL!{haeyzYC)tVw+LBtax)`VCm5^>ojZo&x1{m&J#cv|XTXgTn7LD{UTQWI_4fI+^B zg|*3`(0OL*-WzTG14udJ$DOlgpz+aV`~LEBy>o|Kr0jsgiB11^9$!#(m2b6C@BW9$ zSM}R}n0$K+fU0@M$J0l~AWI`TBG#VZ@$>|i4%0Eh3{22LNBkt zl@~Nc^x?w?)}96cY@PSZe)N_=D}(Hh*Psy8#7kfJz(U>w#AV`4I?VMt^r2+E zB<&hfKC!q&20-=D;D1O52dqMQ-_J~%ok+vAi4&0j33B2_(r}1v&XtC7#3xg-K?S4C zO;=iw<^(IPq3^DiVu#vFi3Xzt$_jG&hzdSIyTG_woq9Dj$ey!md!xiTlmj%RH zZ?BLlLQbR6HZ1fJ5*Pnjw0a1MmUR0HJKWf$1|AsQ(nWy{A*hY?wn(aPfjPeC6*qv) zhN*y8S~{auj6nSC0{%5c;*fkHtI}o1lqnF}G>=b*27i};$*US9$p<*5Zy=2W+EAyY z3~><++pn^G%qr3Z4!QzEsLbRN0D_`nk50}37(`M)(op!JFah${P53nhJ>Pf9g{TDx z1_sbvggnmSQmIRR-X6H2A{36)aQrCT=oLp*29;da&LNnI4ui}M#OXqVA=@>L{E@`N zEM#3U6AImIv&;l&hi)g1&kHPYO93jS0klg}(Qq{i`D5rW3;>RKd^>Y?!Gw*iZ4NX| zx;F?RM9)iT3MW2((XD=5-WN)9dAPNT20sXhPF%pm}S#s1;? zMk)s)K6R()56C2g>STZ%lZ!c*x(VWlAk@+999=$)2!koNLwkVE zL;r>~w>Dik42EFO9ajNPcK(P%X?7Z({}k`Hu|x%r#F85VQ~_5T-mvD;CY91CI_)ix zZ#b4Zrw;c+%qagBDY(9W;L>heF2?e~4}Sx}Du=^&*5iWTc7V2j(aOp;djEDQv-CGE zpO9PA;3to#VqsbJiT%I-?wJ4gzfb31*GoLln2f?DEzpq0TnGC{6ncJNE-|~|kD=KR zJvkqOk;zQ*Td|Be(`6p`=fBq9+Vf#wviQyatG(|I>$#8L{qlszdPrItl#;fhG*l=W z#M6*8C8eZNX(~merAR}l&|XrsCmPCV4{ci7LwkMC{eGU`b<9^-u{pzhvIA$p+IXOzcfC0UAl;)!C`|~(QPnb52$_*vg+y*H-yuE{b zz14$P#dbY!*Y~It`JdcM{{K><|6x`^y3Z>5S?@wV6EuO1VdAy(;IT7`?M8a~;r zpBvMj3$Szlv=rt*JW-GQpn7?7|4W~GhO+PMK+`5Q81`Fqy4cR}$1htSs{a}LEOJ=8 z(y05*_p9wMf-)}8W`~Y;-qen`dFJmQ>Z!fG8ant;%2tle)y0XLu>rPcH4Fyd-b=U{ zRf)X*S~bG4ZA(b9p(LA}jnKDMW1g>JWzlz=Qo4AR!7$NkV-TD9;awfn&e-kzk7QyZ z?fQ&Pj91{wam9cLwcD?Kg5dLbhF3!9RChwbP?X|mT`Ju zwSaYe_V!29jJYqOsoWe?|24F6RF8PFThUzGU^TehZ z*!9I`gVN?fojf}p@5fWz_J({u%X#ozIwH z9?e(1B8$ZL`iJpAl3|+Ts1;lX<5mB!cSEZy?2-BAhK|6*>#SIrwY zrtNl1a}YWXi0Fn14{{A(pX>Ly;qq2n_Sw`K%VEB$52m9lKQ^D^Gdxr|dh5WAELXRJ zcWx`E?XN9mo&0O?1@q(1dati?7WGYrDKqFBVr@Q3TeJln_^O-pO{X)rSpj71tF3np z#7FzT(I>b%XYfw@M)R!}JAAB9if%?9)}VVzch5j7!T zjKG$ly(j&h21h{(=yuoBdmbVi(PcNjd!G6+buXv2?&qx0mUxTiy^Xp0`p@$8Tbt`6 zr03qv`s7w7sykQKAMCT+EvH50*v-FR(X?r*xmWgn?V5l4$mH(mEt`xA02 zw)@p87I3|W3hK|Y4|b|Q6=-Z6@C>~6VI}-6W2*c}{I_M5v_b~!9<=#GQ#dj1qD?Tf3OLgo@ z<7jXN%|eo$0{AX;h37 z&{)#S6f-}4ICg5L?uuQp+T2|H{+X?sty6{-4dczr*PD z_UwNzyRXc;II1C9vDT*O?6+@c_~u;#BnC=}$HcoL-vVdd2$wT|r>)kXHr)zt4(N(0&rP9e}r( zuo6e_3*)QU3e&Oc6EqBaz$yiXFbqo8)%Kipq8Y$+xDe`m(m|@`11u&iID$Q3GE3|i zVOfx2B?rMZ9JTlVhZ!|`pC-WXk;AMw=*!I}UoPklX533K3m0#l`EC7F_GK8~x&5Xc zw@^j&0wZOj}QsQpBensmzwMw3hm^-hD+}GHZc*;CgY$L6fAZ?{*K#eUq31}a;3h4 z?bwf~&g!u_UhBwfL-o-

    8V?AJR^m%}(F4z5Zr*V8)8O`?lROzo(?OwivZ~L^2Vu z7Tui~bQi>EnJ974!Co6GKnp}1n90ByJ^C>G|Nhq2*1lu84MuE4Fagm)5~0N6G7ytW zF2ZvpZchYyM=T>FI5NDj?}E7pObU&IVlZL-1zZ@C=DQfqYQpWGv|@-d>Ht9qehKgP zw!Co+>JDMA23cK1#xj^i!~pSC4kG@HgwqSY9a+_e0T(a$t;D4UJ(W{!Jw}ZaQEzYc zai!`?LdAJQ193FZZZ7ic>`k>f$hplNv7m^t!!M6ox40wkV(P7>707RXEq}rGi;ghA zw{Ig>C3LfS%Ip2!g$St~MYN%h2gklIc;ACG%+9mpR|~ekmRb6XXGlB!wSHhQ#0drM zwXx}48yFdv2DppHHLl88E32=kHdKZ2=oXAt%+3vEdNYpjPF$OnvoyGkUi8z)k00X% z5;yC(0zW6Vb)=zIuKQQDLuIhefno~N-gxMy2UD}z<%Vs5@4$Tg2fqWFD&j=Lt&FFt z370za=A0Gz?qr+;Yy3HtamXxHlHJkjRnY*?W)cpRIm?jJT8y-DW~AUAjab$6@+<(t)8@MXERtOs%IN z5JNRAyrH`7L0mm}2ECI9HEGIZ^QoYub3(6M#Z>qW(;fRB)#>X?H-&NK=H`KYr|2;E zz&B6$aPz>kgDdZz9UeC;d3bjER$$zwPr+m1_8skdH@d!k4aXDGG`bNQ?f|+xH@73; zW2Z{kzcuw)%C;*u7{B^CONLJ?D>b7y6y(pH8jrm~&Sf~Q(gX?)##Z+QtjM+(TT=^K zmSiKSzcal27xfmVF4w>O7=P$i)!MB?&3uuova8mv^?!MUaep6s0i4?aCfIL?Ng+?| z5?H@O(6I4ojkwD{Boiz=8oI@@FnGY}Gt;QRNaWbDjn&STQB?QcLWo?!;2$I5b1K7_ z>4`5xrv8PgO5;)<;EG6@H9+HEW8hv1Up69e&$SDTdhdkEKk`K9phs_JOz7HbH^yaA;fxNk+=@Ni9&xsJ&|VeIp+8}+p9n79yEzkr*HWN1T-PR zSPscLP5j;bx=RzsPq?pzE?%(C^ZvHRgic*4yV+OYo);#_QC*g^2XN=@`$tX;4-Kvk zuG|=$nvvi*?C`|VYkK1xFG&7AW6ugpqL6a}Mh#zazx;SE$>~<=ylKA5xuFD(y-_Q3 z1%^o#jw#j(rG?ukI4|9wGc33v_$|vX?Dh8*S^H5L<)C+~!hW=; z;O;K$`B?JxFEC!oySQ3WtTb2 zEWcwA*Mmq2Lo@2D%skJ#d00g;^69pf&z&E|-B$BsPY=eOoZ-JZ#^ZTo>{l}> z5KpQ;iBGt)WvP0GT76&6t^dQR(Njf7zW4rm!uf4(@V)1KsfjipiGkkX#x9GQ*4<)1 zKRQ$ei=@T{jB{MM!ej4j9n>*?#I&E+37H6V`K9Ohx#i_>bi?ZfYXXs3kPQ295;g6 zmY06xYL`kkiYv7wO@l?JZ_VQNa*=PQ%e2e+chrIgF_e5wa21eAPa7i(|dGlt%rG-~IZ!SlJgw(Yz zRG85kz9^23-J_eDGSH(WpmEVmIC{o@iJG{DdAZ)K^o2v~n}sWaH-{cw3wmcwRl^i? z|Ms{~pa_946`m?#E?go*(3XluFyP_oA1VcZWRR>Dmo_&u51NgXal zGJ-^;hOjU2p!0&FMTkN$7d9|kRYGbznc>5>ddx5sU@QE=@cae}kz`^i6~-0{E*D zpNbG^@yEg3E;`^O#qa&khVpYxh1xfcSvQZq`-^_ODgAaqp$wdoW!Ck&=sp-Kj4jB0 z%NLvfj3mN-A#Hx^@!-V?ALmb2RzkZ(5+d>n4l8f%WJ}hD(W!PJM4z8<{4`~FDxonxDX%%r#e z{_-8}*m%FhGhZ{BrcN{0CtV3U`SgJOOT^WfSNH5J8lSQ)8Z{piT)t!$KGU-hAhT4$Jl(%fxjbWU{3$A0U9w*x{$pUYUdR;)`u ze6q3QR=yw_fBtQGV~V{Uqt&a7*iGh3Ug1p{YVuMErMhgISyve|tlcFcIXl}3PNf5?I(`YKxlPjbse1J zu>{Z_l0-;!WF5O;1DAv&kqqTBjJTc>8U|4hgYIK%E|p%3QwbiXQQ^|!L;VwUDb z2KQ&;(%ETP$fl_Jv`9O-c_qQ)w7Hmx-nN-v?KN?o54?33ua{mCH+dWFZrhUoFhaNBUY9>eKknz4FLho(}i3`DQrMY;uY|W8<$`8pbVkq1N$< zUELRk8r`2Bk!f#?b5?!8+*GS`rE4|%>B69jz18{iCpHhHf0x58S-jl3_G3mun1i6-7-6lJv?IBK1ct@~?QCAb4s9e^ zEl^)QS@bVSp5EE$Z9ze#n?0UFEW0f@7yV3hKE5f;&Wp?O{EmjV!_&PSJSJzx6Z zSYsi!E2Yswyf`MZP{Kmo+rQT49Vz%ZfTcE6u+DA+;$t#*<@o?~N?Iu1;V=?_y&6L~+LCD%t&{n|DDPzO4tz9%n3)GV4U?`S0pj+$=&=PK1we{zrg zUwsw-`NA0rG?a%yLF!e4SfItp`Km7F^Tr#JYeQuB{WAH2x#AiMX2*hkq5pj8N4sL; z;y#p@$Gv-Y?BX4Re(Z9mrCbM{ko~T8VsP+I%~TzFwB>inW$OQ%LnOdju6!cxwvFqO zh?o0Tf8)XJwB!ZK%3|L=J5Kf6=)!D#dD^yR4GRlP(L!P0I~_sedBzfnq7|l(#p}x_ zM=O|C#SR2;2l9+I-198`_i)nw=hJCW{^Z#+W6)9$96l^O8izL}@6^*4D6(vEk+tiV z{}01IdX%PvPD;ed{`&%Ii9O8!8_$feF|jFwW#jt~AJ!HX6=BD>lKEjD3W(N!fXpuo zjAt7JuJ9CMCN%ft-y`1-Zef$!>EPh-CM&BP%Z}lAPg5v`%Ka$710xfYb@+9EHfaWC zW@heO{^?>+9XCEW>51I{ye9rX+I~Rv^$S+=BO@ah&CNGjTU)b&&U-G4*sM ze*ma5@L4$mb|%(%NW$Ie1@J7QvFH?MQsn!6x5^)gfp>U#xB%P~z+~o!Gud`sU0r1* zrTwFEAC-Jkz+NCbd=@t|~^nx8a|{f4D2F;LTH*bSVKMASZH)vit4Jq4;i zecBGa_r^_|f^u>skX`&*+S)#2r;*yOe9btlWmZ6I5QDESiW?I9Ttu@NEvgcTgj^sC z!h1mg{Q3}ZZE{@~GSsgi-d{2{-iy{Y7-dQ&Afo-aFjLcm8nNHt)w+kUIauK-3h8kzwM<)GHD#RUaFpFDpqfIFB0CWRz`JyvkI!2#jI z@Y<#AB5Pub@f`y%C_0q9z>Yog?aP;)fC9sj&3UtYw{3H4d0UXOAu zO1lMYGn%PF{^rB?-9cNfHk!fXfWrjBtP;lQ)17C*AQK*k)fP)PlUitD3@HZ3ov`I6z!noa~74i%v}y1k#$&fRlIU(IZ+MB|Ted z4KK7aJ0W5&1vzUC1+$3b*RBbG;)ngD-{T~n(sIbXTHVQ@`wg!Dm!saJU0K3dnV`kjP9G4kBXzsmn z_qgdFtZ{I$wP3G*;J`O0(17X|A#pOdw3lpAS z?8lV?`Dz`-zehJN=O^JI=H%xOw0Kz&FN{v5rOG$x;f$8&C;!~I@wQg-x)O;UYfx-x zUcuFtu4GRIe$2^e57&Delqxii!<5U7A%H85wU73c-_)*{OT7`u25k^Aw-nNXc>J|Q&T&=Yc-|TZ(|q0 z1DFA%80c-lR9I+eTmv%%@;P~VRUr5F;>DmEq9eT1486lA&ZFb##kb3VC6LYa>)ku` zV}CWOwVz=6nEf8d4x6|Ypu-a)ChllG(lauGh;YarnU*&zD=Py621&QYcQ=L_D!Xr- z#aVm^uKcNM*HRi%E zEXwxr^V2vvCluhs&BNmduPWf(iin_l!K|X?=jT6Dea?T%w+GR!7rd}wG86zKccy5( zK#EhkN6CbZZd%k7c2+-!hWJcPP3u7V!s>*-QDF4;X>xh35~5p!^Wrp6AyEa;|MqBU zY2n&3lB?9|fGe{Pcq9F(Q>W6iv%`r0Z(V);DJN?so||{RF%_|qg;tYOQ#?WtD zgAu!i1*@y7g1{23+UK|$diQ8?O(FA6x2!80>OGm?D}V7P}A3 z#Rx;gvY?T~6RxgD;J|VRCn@9IGxh$M*jVyU>z5XBa}NLqJ3Lho7Z=AQ<8laRHUROJ z-uWgzw(x|=%cb|^a0uBwkd`R5)^?Oa=KJWK0QU};~t@E++8^ACFLhMqbZ z`Bp&Mw)an%b*{yv@X6Ds$D?ZtX2lUy&;`rA0JQQZJNtfOA}`5-z~}@F3Yg$?-<(Vx z8hD0ehk~#MWg@GaN_6c-aPQVfLF_S;wBpM3Dw~n+#w$@P+*cR(yo1@_KLZhbfC88u zRqy^j25J=f%7~Qii8L!PueqI{FAWa7TC1w_UJ?p{3A_T2Kei*7wXUx206etx=%$I{ zFnwUGIc;}PP|zDBp5v*-pz(WFlBMWK1!@ibiDwMb`I3-Tr$@fi!4qH|1?0kYsN^9% zm5n3@3m%Y~QuZ_uacgm|d;MgIEsv z57hp*Atks7Hl<{fMMs_(Rs#5-#JGqxY!YsZhp@)P2Q>~ug%vF=y|1$QVGnPW^C+5U z*mq0a_!Glz$*RGx*QOU_;0m@}Mra63u8j;Lc+ zAGZ~;FMw#NGP8+@3PrgwvKhq#LDUGSMUK-XCs>8vfJhk~e^WD1fj=Ng5%l->ptc52 zA!NS7@51NgAddKxQu=7rNXE#cq7YeYVJfkcL23laDzkmxWqRp{ECOy)Y;ksZ8 zDc)8}4Rp4wVLz=~-)+P!$)S`PFOem>d?FKxANKZM_YF7WGiERuDK&4w&}rCcTnI{9jo98^&rM z^!Tv|bl9W|?(gqM8Mhu4A<3c?;iU{u&p#*^@qDPb!$xcqZr=nX4{K_Q1+;jOQOSYU z%@z_65a{mhy$hqcBM@e#ZOyycAN}f84NQx)b#yL4)m$}&3Vg4)cvH7XP!y;IMnKm| z(FXRx9mq#%aA?bs0DZ+I-DjQ{7C4E36nfFhDgaF3-6%sFlW~0Xr-z%jLD6zMH&+q~ z=`J|ceK*TaUAn}HJ;>XDlwx?^P4pN*?xtHG>ZTYF1s-W|o0^)gU%y@gO7@h@D%K|X zqN95ODlY!wWI!KOrwV3f`*EDS5JGwT@>&=a4I4&{j~QN($F9Xo6QYwJ*xlES_S};R zjd=bX&26}~j6A6n;cKYu)vTNvrq3>ygm5TvB0 z)+U|fFO;R(xq7e{iw2;@Cr z{^MU|-hkST0*b7mo%sD*9+6Z8c7)^*BO=hjgUO%l7sr9FZ7YXxKn?4)Pe&eMM%^c` zpg=>)KqRjI1u1rk>lB<{!V;eo>q3=nIJ$=eM1WJn|kv zf|iYqC^h=cr@kd~Eo z3xdfP#7^upD}k1@8!-;2Zy?VwJw4rNeu4wPypfLXqv0bA&HR8L7(s|&1|pEUfu7r> zz2U&C{l6y9Ka@3SFwT7gafI)eFJCU@4h`cJlm8nGf3+=wOLE96CypMa;CVp$+n}ML z0aMRE$p-`_&URU1F)xY(&nlKxZiY0@5BwLMalTlL?`b)M2DZg3#E2eOnD#}Xqhe*f z73~+K;9`9?uC4`??Fg&zl+daw#*JF@4=fTBAJ5-M3PV%VeW2XY(LQQu&`i(F3|aHX z`d>2DXv)gU%l9cdI{=8Fqu{bVHol;vqZ5#UD2B>gJFogSLJ8H2s@;J*k0Sd*-> zPx}q2OK=9tRKMux=yo^@@ItB*jFYX5Sye~|fSglcP6b!sT!rAc`r`Uug9)O70@h)1 zP%cB(%{k>_vvKQIdWxvghr37SR2AoTl~usw?-L$18V?vkzB@WxOvHz4kKS5cxDrby z%<6C_GRFpcbvzG;coBN^3Y^reS77eojW{jT+Mkp`=<3w9+TGG$ zq}f^RI~#fJO)*3NJipm);i~lIqykybEelv}L*qfvyfk=o4%m)&%Jd2I@+wyrC?8yr_jV1kev|ZOII^IoGw4QIqK<47oh;?N>x;Px(H-@v~+YDzr+h&axVicL!F6D8Xl+ypUb(+Dz1=c0{&S?5o5sK zqS+fAW*rEQRtZi_4N7?vX}nO5J5?6>AFFG*P26t~^@-!GC~#8XB{{cNTp}&8bx4*- zv&M_ytGR&)CAGDgz*tHMZbm#KDH#WPI&um0g{n{WI45S!uh}p-4@5n?#>rVrAK*|| z*Y2Y~eg-vPs4Qq{ZC&f++;`l?WziyS{O8YCj_W)Trg4sti0;D!Vl4&V_C5+OsQ!*w zyF-8UFg)A`*MlKxG|#fFrG*swE^V`b3ecwnG*_X`2L!u5JJ!!lUA@jCI~s2c@aV4S zXb{R0@a7%>i}_GlIhdIp76qH&Uo(w(bxH|jN^1&^tRe=zLEXR_c5Hwkp4S~5PAVyR zs{?%Wg>tLRLLCOXZpH^76X$S-%qQpOa!dcPyaz z&g|qX_RsfX{p4p8$-e5fh&g@LH6_8+aq5bkgoFgPo6`bmuZr!%6U>)2Yhr482{k3k z-nX6D!^(H=15cFeotBmsR=30hQ5-;00TRKmD@NS`Yo4CVx;-_n+Foel9}bwX z(^x;l0A6pNeR;wozy#J(plPb>cF5GReAz~pQZKKJyI0_r+qy0|W5G;k4^YABu?`lJ z%y7DALgDSf%y|c0vk{)7^-=sHB5LEZT_b7t$2L10+d)rTnH?~8sx>)GaTuElS>XIr zb<`3ojy?e#!lSB&_Xb$Ri8F)1-FT{I7=RMjQW^^*9UUFG7~EYBEO=zSq0;&jEp!=m zxtjU{#T}{h$;cM{^g#YCP#SFYs`N39p$L!n*gH6sLjidmMQvYE-w2+y&5aJ-eHKJw zDIq1rVAckFJJ;$ICtpKRT~zTxXrYOF$g4NBwAiOeJ9sYW z&i+r6=WG7WS~qN*mgmzAan-!@QTCc;5(N$%*shbxKw4u`1Eb*sXmJs}l8dLv8sX~8 znn{`)!7_Res{vA9n}oj}Lm6u;^E-UyR@w5b^kL+gve`1-c%a5_-n=P+T#8`O4vkNXEeogGMA)rCE17%?6B6CuG9Pq(elHIzc+P5|oT z;^O*Mc;>`|Q$`phF`;@Z2OyV@qHlVraWgQv_g$;A*+-h|^?yuUHLOvsjbZWr^6}#z z@X!6`XgDRcIkZg_Ox=-|^gkfDzsG)0N3mJXUip%;Q>O6XVtF#QApI#UoOay03;z?U z^<2@$bGX+c5cKQO#bJ!~18?`Ks;X|OR?$({_jRH5Rmu@qZX={$f)&FPI3m1P7{v66 z;=%gI0{s<30lp6AX6z>KtFzlP$G?3_YSkb+ka1R9D1~(80Of&#SYO`r4lm8@nzn9r zT&AU_@3$28X}`(HsG3=6{q$*nMWg9Gn0MSm6^`8{JJ8v^#}~KNqd%!IRB^M$C!v96 z!v^`}#-E1N#jy7`8Gb)|dOGaxJ|3Rokl`ZDVwbERs1IW9lR_-j)=&UIRXeVr95P0HsOmw5vJA8e z_)d}SY%%hV1TY4$ux(SKo4=;rKp2KjU)X1w|KTyJ+RtT zfCpl~NuCJZCOboyV`v~rI)S{pa?-^bhZcc{ufeqj-JR3yNVq{b2BX9X?TTJorUgl^ zLl3kf9zD8^n&x9`t4?mPT$(ohy^s)ku#P>^XIunMBN||C;CehK78B`Rqg^REEaI;?0?3R%}(Oz@`3)2jicytqg&! z!7q)Yyu6&CM5rxe4j1YlKfV^Jd8@lSm9yhph(7vla&COeoD55CjRcFs@sR#}bVj@prwimHc&5-jf5f z-s9jH*TQFF4n(F;$N`eK??1+>a{g$tR z+mP-E1^!D|Pmw-3EQ}GwFogm_^wI0r51{F&MPY+cDd`t6$SDDKPheAYFh9}yYG+%q zk-fRDu+doOB1kS2=62H3(y=2D3S->64yE(+y|2qK72JYYd;;hQan%D6?+;uYZ$H0} zconR1l_?yY+eD0a%o>yE2}%K@^^($38u-B<#ihkMi|}C=2+&Ibqittk@WO-!tDLsN zN~Z#nVD!>2+pV9UPw~A9MLKy^+}!IJ1rzE%kU<4ZDgn~m!`OX5>BjA!qkbm{KNF*s zSIEpv(hdUnqUiu9`Xrio|IOpU#tZ1Is&;z8ZR`)q^6w}r&BWBm4IIHKJvmO!wQ#Xz zl694U#fm5Bdmr$Cp-v{q9Lac|a~_PPa2|LAdj%-Wx1cSbn4T_0PoEAv5Z%9`a8d5U zfE+c^H49@68q6a}cp(uSbZf>W{oIA#$7c^<;)03#8VuL8;(C_Q=7`u2X=1Xo-&1+V zBbAW(oJob4tM|f91(!5cFzQ=-6h(3T`N+25Gx*aER#q}lx@2U;fqb-sfnguM$>;1s zJP3uFX$JbFpNz}@4r?PqH~^Rl)2$QerQ=V_{~ijKNIUa3&JHx!L~H^jyQfJ@$H_@t z&Y?H+h14D=Km`C!EP$>I026CR4QB_weg!%d12Y8|6R)@nbhO{$Is{V-pV-(~vrHX$ zCq+M(b`+`**HBk41z3rwBMh`fWdR}<8V}@+ii^6Nc7)*(L|>^_v=sZ;?P)w}7947C z+$&#n3_Jv%fDkgknK*@3 z*95V9>Sx*J*iN84r4UD;q@RwPtoronA&><>__3HBddgby2}ckuup+JpaX0@76MP6c z9wRgln5A(bP7w?Mp|qC2d4^5C-iJOOWsC}>QVPk9zL*vP7x@f4s1L8{GGi;Q8idD+v}>9rg+Vf41oR?u@E{%b$1lM2 z!tcg%Uvq_@t-$bN?#tpkXJ|toJ>tMC5hE5A1Qt*x6-Gu!V^Ql&VGOMpDeIPWez)`K zsL&2%c3$@-c=q5Eqb27=dJ9G<`2@sd)4QTipPPALY6&E=tg1={LkF-#CIuIfy*?`Ok!` f|64eS^VF?6l4VCXDIW*>lcId$jKb^V7w`NR>0OKC literal 0 HcmV?d00001 From fc58d3f5bdd6e1dbd1c5d365804a25d582a8e44d Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:52:05 +1100 Subject: [PATCH 0046/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index af65590289..b4607517d7 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ * Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!

    - +
    From b2df01eb080dbcacf59a0f14b08cd0c7fefe8ce5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 13:57:28 +1100 Subject: [PATCH 0047/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b4607517d7..f57012c1c0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## 5X faster 50% less memory LLM local finetuning +## 80% faster 50% less memory LLM local finetuning * Manual autograd engine. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. From 17172ab1803610406559149991b07c1f1f4860b0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 14:11:49 +1100 Subject: [PATCH 0048/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f57012c1c0..a59ca03079 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ## 80% faster 50% less memory LLM local finetuning -* Manual autograd engine. +* Manual autograd engine - hand derived backprop steps. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s From b69c8989f3e992e15bbb68d2e05deaa364909701 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 14:35:51 +1100 Subject: [PATCH 0049/1088] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a59ca03079..5f1e65e632 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ ## 80% faster 50% less memory LLM local finetuning * Manual autograd engine - hand derived backprop steps. +* QLoRA / LoRA 80% faster, 50% less memory. * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s From d9743a3089c981444159ca688357f451f1b0962f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 14:38:03 +1100 Subject: [PATCH 0050/1088] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5f1e65e632..7ddd744abf 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,8 @@ * All kernels written in OpenAI's Triton language. * 0% loss in accuracy. * No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s -* Flash Attention +* Flash Attention support +* Supports 4bit and 16bit LoRA finetuning. * Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** * Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! From 4e519995d0ba2519a9da6c1f4ff7566e105fb577 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 14:42:42 +1100 Subject: [PATCH 0051/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7ddd744abf..03b6bc8e36 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## 80% faster 50% less memory LLM local finetuning +## 80% faster 50% less memory local LoRA finetuning * Manual autograd engine - hand derived backprop steps. * QLoRA / LoRA 80% faster, 50% less memory. * All kernels written in OpenAI's Triton language. From afa8ee77acde78009ceccd5fd352c8c0d03f96be Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 14:42:52 +1100 Subject: [PATCH 0052/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 03b6bc8e36..6598592f73 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## 80% faster 50% less memory local LoRA finetuning +## 80% faster 50% less memory local QLoRA finetuning * Manual autograd engine - hand derived backprop steps. * QLoRA / LoRA 80% faster, 50% less memory. * All kernels written in OpenAI's Triton language. From 4fcb929a9f6282663dcf35f27ee7340d7cd027c9 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 1 Dec 2023 16:04:15 +1100 Subject: [PATCH 0053/1088] Create Discord.png --- images/Discord.png | Bin 0 -> 58500 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Discord.png diff --git a/images/Discord.png b/images/Discord.png new file mode 100644 index 0000000000000000000000000000000000000000..00b4d53f78f96e6530fc280095bbbabffe350d26 GIT binary patch literal 58500 zcmdqIWm}tT(>6@KRA}+y1&X^%kX76%F2NmwyTfXWTan;W+})iPD-eQf2<{Nv^-Zt4 zu=@OkH(%Ib%Q45uJ~R8wc@p$VPW<%?{1+%FD6b_YL=;g_oUmO;aw z(NMYu3!sD57fX?lBoW+!X3&jNc_QO7a3tfR#(&!RjqJHW8y-Zl(o=Bl+xv#b0&i`V zX2{hUAH_qYV*h{pwBcvZprPn_GhH19eXV(|^w57Jg@bQ8SEjLq&tJaDMA$7x&5b`i z-d~AY^N4wR`$xW-jbB zycA(}eS-)mpI$;QCmPZObu)_qGzp8Vm&{5tV`@4FYpAU*Ut1A4SwOMFL^UY9+xb<*__QUr0rzWh z9x|yaYGIL>M33S^H=RRGuQO-i{0Kg|{0N^`^J0-Lm5b)OmMIQ#H*f|gQ7=krqs9tXPf{Vlf^L+?W4>OfX& zJ4c*~QrrspT_cU5#Tjd!$cUXnzhpx$fi(bpq84}}H-K4tavZViB0?j)3dN@&anBmh zh`}fpJ-QSGZV-^9cYfN$shSYy^Qs91|Id8>E}zG`*X< zw#6&IUcmbDkYV%J%$vr;;zQ1@@o-N^-I*q|^Z0Ih=P6Od7T_1hK!u?9ZR1J>7r$o< zf{F7oKpvc=BTACKiWYD=M`OKGvzvWL(V`!8F6guBmF;78&2?L1q6BPXY)k<*WzVg0 z+|2f^<}A6iZ*4U1N@{|vAkBiC^#(Fk>DAntG4?U6gzbaj`oV^Y!+Y`JOB~AWdlK-2 zeE$KaW8aCQONQK?T1N&mHw)s@(W+uoLMzC00Sjm%tf0oJ+8UrD4zYmSnPlk?K&G8# z$F8dVYqprYSU6j%njg)JHSIs7oA1O8v4b6)b?mF>3_N!a75nu9Khj-Tw1ACPF+p*m zSD9RsF)4Bbk4Pb--Jj;8(#H3VA^q&C>{rGK8Q1>lUOH(91pclvBPU7z@%|V}c)e9u z80UN4H(@@rP|t|^^AoVngivj5jIm9NPjL~5wA+g2(zHB6BmB^`xD>>K)7X4|3 z8Q5xoFX1K`^Ot~FW^Z{cY5E3EXUj&9XEzRNfzIh`1B$V&(Z`IA=jXB7zb>aPtp#kX zo#qSv0TPyzDb0`kV^ATyxh3yBS))?LHzoO{n{3 zS<9=s3xx1g3>w?SZFRwL4~W)cTQ?i}PXw(68H}dR!@I-!7*`wj`e9m-ef9O5#9fvU z9v|!3T`tYd8_nK|S`UcOK$_pOX1EvsTtl4URD=gpJ-lJ}hzIU@>)8j5^UAhoSmEDZ zbsAWcn`@5+`M?=R?3WwHwyxz;M-&|%5X?EM#8u%GS93dLLOYx-%ZZAKV6pyIg$>*d z`XMYJ?uOOC0uD=lxLF=e_mE2QkutZL9EG7`c$W$V@z9Eymda;=Osxd_-fFx?Me}&2 z%8@X0Z5zla(cJcgHx=*Z$>Gy+0)cd23fD!ypsyt!SH!JU+s82U*6u*+h~PviaZ?bj zYJzG|K21t|^5}kV4Cd*%6RVyix86qCiqeCKZq)c3*H^-q4`W3Hn@-NXH=GJvAN^nZ zP*1Wx7QCj{GM2(6630LGFBt=|0zr8j2^#t1n+=4o^A*`Mq{{PTL^4wi+Da+$Ym|y6 zQiA+dqYJF#ELms`CUTk{*c^wnLi5=51g^S>)W1{d#7jKRZ24{+KU9`&P#GRgjiX}k zs6`bZ?l?Xq$2qTN9x)(!)`W3*?l^fMIk+_zYu=(82=BQief_7S9nz^Cxn`19RyW9S z5ay4XDBBfSM9?T?7Q1l_&lrTRvRXitEt=TPDcy_WJ*QfSN7nPT z=e~h_hXyRf)mNl8lXCBRqw(>^Hwq`VR|~&1P!O_cK77#Q|B#pH1-@NtDLUE{6-AM` z#p8DA-}D3dY16dZ+ITOG&97Rxkw2BFk(|%F#8zohMpO8#aT+2$75XY1=Xg+E61#A0 zs>kN~un`47QPvQhl4?3f5cB}3Gm@t zH55o!Cn_Z7^I~R3v%Wj(&1T@YuldF`*UY=EMK8zQE~UKD>cgSqNW)xKWcIKuoyfB$ z`Izg2;e~RajI2`}orQGqj$`g(0|((K4c&YFR{3sMn*UQDrR|DVsX;v@a#&sAjh}GW zio`$A$3phd18&?7n>U;}SB6YpL0f71*BKOT{`Tt$27K?T3}WTix+${`CnX!1uoRm& z;;TfLcuDrQ)IZQCyouF&jQ2-0{dh-Ki%$OfP`M<(<;2QCjrYJg1OFyuFV7*J+wzU~ zQUnI>4D>nuQ`raO&FurRieyptGB%*h%_CW5NNT=CGhLFhpTs2e{2BxG3B~yjjO3F) zlFQghWERwEw}yJFQj-%E$v^frVMT1dO#kLJymp*iICTP+675Q8KnD@^KN!yzmIn_< zmSurzN>?^HOt-4h;c>DXJ0dc;KlLS_UGZ9K$W%Z7Be*c1HM={vuz_76Y6vueknVTe zt#5ShH5X2Hz%*W}82Qv0=gq{r=qxvHQiZC{70w5QUNGpI6qF>U300LA%&?DdbpMbQJAsJ+EHR z3(^0VBvYj=@#JjCzH3cMHb=Z#yRP7ZQK!5y%5E$*bV~>c!++X>`oTtjRQ9bd&>C7A z!_y`vnP{KzNA!8|w;Z1CL@I&J*Wkl-T6uIF`@^_~OV(>0tD^e;9BVA*sjF&zuffY(z{Ktg0P}GRLYhqq> zT_sG>zDY0-<1k;+ zK;Oo$|<&YKhZd@s@Z=WtXqV)_XpLt(uMYJb6UB;4Jn>XxlW4s$&Tb)sbyx z2~_;2y-By~QcO3Usd(_oir|IN;%C@-ncb&&{Z4*oM^^K*P|DVc36~3P)9+d(h+eMz z5=S9Ta{ZJf!0QC(yFl4i)<>FxX|(CV$CHD3#G&HLlaEf_<&{Av`S$sC8Z>izBv&Sm zkd^Q1Yi87C259_Bp_f;wv71#xZ0wYc+y0e>JXF_1dC;#9qcF0ubKn2bbbiT;*?8$! zyo0g5(Cc7QQy6erT`kwnz%j$lxkiiRW_owkU+YznQ)tb@zMa7#DWx{$^Mk5Q9w*J+ z@b06Mag#lk&cd{d_~V<|I=;O7FLUZ`~24o$k9cpB=eHE#d;-cOGnP3^Yt>RHlv55&#i!PKv+}D9saDO9$$U7`g>rTe2oE0T ze}q;(=9QmS$&gY3coZAlm~z)<%ceYSQ!2OGq34Q{x<~F|YP+*5ZiW`_C*mU`E&5`~ z`T|%c>Q(LXYWBpfLjnTk+s&TUcDu2RQv_kW=X(EOWK=Cy-OZtOpb|a;d^^ThZ+J|B ziTzxSypW09t>dMzM7#6Cv6EYe%!LqcmSX zM>zrr{`d>!5RYl|$AaCOGPhn+y=lOHvwgku!Kn|M69l294xomVy^rv;witj^m9Jfi zUSbxz@qd%ssQL71*m~_wpZO*8e5iz5#d2tD@1_>0a1ESOb=d2u(s~G<3r=wY5Y33| z`@`H%a{FM<#36_;!WZdUVsl{WEvT)8=?wJAUFcF8-1~XhcoveatXOn+5DKMl!lrkx zhH!s(geRw;;um6xZ?3kvzyN+wS`exCf0k*cO#awf=q^+$x>^@w8r`hIzagI7*Y zIAUhii+|mwiC5Jg+HwTwk+#ll-^{#j3qX>H>UQVR6OtyaRw}TnN0LJWSIQ{AlzHdI zolTS6Re^z6RLd9YLoJ+2DmMJKX=}$D1&30`HO}Yq-c%0#E57QY?C;;AM~a85!svZ6 zL6$QvyT=RDv*VvN?s8}%J!UViSjI`9RZ=-;qjfrM~fCsHM>F`YqX^(%ln%F)!0U zJ26Y!n4wRFS*ARLu)GsgXR~u*Vkqi$ya?{XL;L~^2%0wY@(y!z@-VEmT%5ZOA^l{6 zWs(FjOe@|_OgKjHd3c}bkruj`vJDfVZ9rz4lhN*EdH&Pqs3MO=WmKo^Eay^_Q0na5 zTiBT)aDCZ|?lXaKR2N|XkkmmXsD{{!AHI$dbVQ?EI^icOwooHmt+M|^^H5Os_N9#Z zd0qQ?1T|{r&W{Xl;-+%`_h$gNG=hlgSv@P?GvF8-@Vk9chzS@Ad^(uMv}YaqSCegQ=FxZv@q>PTiGv%cD=chVr~@JPNmi{hhpb zbkMl&a9l9Y|GM3vhx_(eFfhD61?{Eymx_;s?m6$?oPRlA z*uS^|k#FjmCbe*(k1#yw(^tOZb0~C{8Cd}(I^}axIS@7XhGUmLW1hp|-L8Sgl$S4s zqbnZpUDW4)9$>DyTCa0p#;Dl^*{A|C72teaA7X^>^0uvagq4E6}1UN~SIatyVm#0!qZAi70~E&o3IUwTw4if8wKb>x;^Q)!*4QS=1GB zF`Zp_l9W=sdI|%kw6>~RCSOT4c_na3ajxvo!3tFd6!zIYaHBF9rDwXi1)yfXUnzOgHfx31Y(RIL1j=t zfi;MSMs=qF$PWiSGLmI&crWkpViu9QYoXMlmJ(a}dugy^&pq?i-#Eo>`+Uku346-z z+~Rt_#JQO($@wXF=$(WjZOrJ4i(~Xl18L|XB3AXfd10#yq`G>OnULy_&{wHsJa zq{TlxLJRBp;=e#>-Me|?s?7F;{7wQWHFl$b@%8HshcS)g-hUhWTWRDVSL@lgQ9LWo ztdMkO;4@;+f@yR2u{K~gD8OTSti^L2uZ$jy^wFIR^<-(al<1XUfpU+w!?v`cV z&wdUp;UE2)Ribl<uu?@kJRn!RX5L^$A!+Ow2hp99QPpLpsX!JGX!17z>1##4wdW|41U9)aZ*B66Xy z00)h=SxYIGC-I-}c$Dw!tznh5d~h$vp2tK}q^V+dCnxS5)$E19xD#^Mng zzpfM&L#373q^GM{mA9@?FLZ3$PGtMzzZ1JGq;xat&9*!t)HGNa8x(coN#`knHEfut#uR?NKD zWfSknjZdt6%Q*?8`ZDfeCP58R9f}-)oX8KXdxT_P znDS~KwHOIG)u-I?ev~3(Df-qI0m5R-)89sh%>qZX2P+ZHw>6$uvQm{RKL>ME4))mC z;F+SA62+^0(^HJ6BIFHI2$?$-^t^cNroAZ$qe!-4#W$DhWnPPKe7KxC5i$f1$RVmf zsM1Qe35mhtmO#N-IL$LM{S=qKVC2I}lj&BJC!AgJ;*I6uI1F&BvVkDE>5hWcj{{y* zVxK@lz;M$6f;VSZCxYhIQHaPgYx}>>w_@0lb7>6skhHA0?!dRKj!tfMM{Jr-3Po!j zicMvD>OP0_3Br6qx9nbY-~_`vq$WWKc#WLc00VevX+eJFULE@)-g%+-6Q& z$F%RnN5$uw)H|cL&ROuo6R4Yg7kP z1uyB=BpiOY@8SAnkFGiA9DhYO>6;&9>vUz^+4WhnIyR#<7aWQigQtIwHaWdZUf;3I zm5VZm8)OAJ9`KsJAVC)QG33?pktQ$OI3Tg>dzINSE2mcH3xJCtR|wDO_~Q2Sryo9Q z+))q|)wn3zUlP>Wo>X$M{pLo#5jgNx77-SA%Rj+>`3@O3k@P$lSdYutf@Rma2?u(XfQOvnbY`YbvV94Mu=Ck)he>-m$X>rAN)})ahd_>6R$MxVYl^pAorm_>A`EATJmEJ6&tCk=;lYQ ztcTDyK32U5OV9&k+42<^eQX@dbttQR*zayExR3`_;+j8!>{m&k}U;t&oR-)DY{1U#5Mqfj0%$`E@O&Jf)X#=v|3IDZ^kj&!l)ecjoqtNy0Sp+P%G@4U5x!++Jatyh68BP)Aq zyG5YrbPO~rkA*SYk}2rJv*BDZwrmGL?w^aQ}Zj{srA{<@vntQ&&JAeJ>FT4&i zTHrw5^qhC=s^_M{tun8*Y|yVdSiR~?a>y*^zDG@y9fW5vcH&YI6~5zqHx8osj>X6o z#FjV@jNhK0R(k+}MIPS!apD)qj;u)y5n29aYH~S{JaTEk;-yPVf%qs*g{OUAS~zhj z^1srG{Is|vxv^WC^=x$M7QDt4hFj-qzD{ak23$=gv}_qdy}L_xZLG#0rTaqb{wpkP z3<)s`d*?=%1cJk-)vIH%YqwK+(+C+lD^0d`U7;W`nsFz^n|w%j()Cx($IE_kgS4?i`kwlXOgQY!>pCONo=tW4PbRo{zTreYMR z)kbFzaUa3H#MbsB_@t}TUsf6kxijiUdoetj!bbyWh{fJF1bS_DvV@%E+@*fZ)W*gA z$yBNcZx`)2lN{Is)n2Ob*qWvrf4=y>-ga4YIA+<0mA@)^*vBB_{7;$bMtg}vlfwwD zzG|Wr9*3(_I%}#%Oal9_Q|$vPFaniny4ALGx^l05 zGgo*xE6mn!YQa=G|C=svc#1WlWl!HYa>?WL7hc4$UKc8ZevH^{NX_3UvvTe&54GyV zWl#CjL7HNRJhv?kfxwXIl)x>V+QYk;qgWbEg$||myFG0=wQzMwry08Kz?_q+OEE9! zRMtOfTUwFv>UtNSs^Gy!Glq={aQn-hmX3auWBdvyhGG*lw4?j05A#h=)8#Sea1}q-R=*i43&C6UP`;pNJ@Tp`%}$z&}L(IE9Zi9f!{PU zIi#pe!S%FhhUgVXjcJsHHZ-W{xcP#5e3VV`R*Of=#7^J2<^=5>)7L#GrpBggEv%ziGH8nny z!W%pOLF~JJSvlMU4J*}?IfF;@I|0_;%J>5{3I@Q1gA?DGv0vwmj?(DNWcNq>7 zzFNo1{4XbFB4r5>s%>++$rdo&fv>0{N_<)?YXg6nBs5B2=kGJ8(Yqv9qXQ zG3<@k!71;IoFibXhWYTZ@?pKh@Wd1?Oqf)%KIPdxNJySPQE960gkARAbk`bJCZu`= zcsuk%aq1XfRz8xK<5J5BEtcYe4b#AhUu;(WMauT{r>chVUubw`Z!nzVDn>fj9zwd2 z5Ha{&`f%GV0Zv_RbQnu6SrSX5IVHzx%A1m=|+ij)z>I4eaiYxkIZwkjY(e$)xUqb#a zHjUgxZU5I#QD(xU%tR=dUWqw(7&ZfT8+X-!a5=|HCk~3G)PO?7gn07xe!N2-4IRHg z=~wuF6NWGB&<&g&lmDy~IOeIpY%$2i?17kep|f6dpKHL=cSi1Pq-e|uh(?;HsvP~l z{6CdXJziZoJH%Mmg%?mp7dx;FhdN|BDaE@9M-k&WI2*Ca*>Go7TCd&5tL>-gPuc{j zc)#72nWo0KCOIueE~(~_s0z1ooYdT9SPtF!P9gbMe(Kp7u&^1sUIBA+ zU>?ImXNh>mHH*eAOeFSA43wp;xrQf2Lw$ ztKRU^ZOrsM<=tOFx`-Wc1Kw`hL|^-@#zOf$ZX%GNDBAdGIgqL3H(gX(sSks1)LGYD zVf-}^qr_6;D+K*BX_1x)Z{Qev9h?T~DA;lNX z>JV4Lx%+N%e5%~*lBtfDV=0jVa)NAWDyGVn=Id7FdCLsaO7Zu;wtOy?fes$b8RO!e zodEx;_h6{x0LFJd6&Evh-tKYy4%((L4|xG$Tv)pYW1kx5nBB~IR{A4lHwt#f#AL_ zabS)cyG&$l7+6B@`>P+QdutLLN7@z6PI0;pBg2V@r6%i3r$%S44he|v`Ksgk^A>YDH0~?m4`6@gO#<^RWp`a8}VQ?RR&q*yv$@STwm99#ii=v4Yihd`VX!GBzn$?Xq55 zpvq0kH5nCXA3Uw9Ue_j$Q2%QpYTMkqq~K;VRAzkrv|#G2vv682j*?OiTGUlD0eQ}5 z^Gh;9e)`&vG8cBxSD*{IdxVFAVyze@mpB#FqwK>yG-RPMUT`c5%0TA9vg6@knZ=2# zqd2?8^50gC;l9W>qg=Boma*E9Fk+n=o8C#JhFlR-IV{R#{$8cW&LL;@|B7lM6PEJ6 z`lv)|>+p276Hs0p2*WJ2M7 z$rdkjYLn=%V)wjx4M5b#SV#PxrT+u=JS9bB8M@87x&uAO;7Z7uny0gn+RMu32-%g= zSNXH*%1)v`<+}R+%YSu(MV211jM1vI$|JvN_$@E7{1qm?~Y4cOTHzb4Dyqo8&tNDzj1~=>AFvb^BADQ@5bNE^mg|vXx@rofJ`QnC zq!{Dc52Qumz4zbtt)|0@ab-8*P&4)5{k*SSk-7BAlcHuxO5b@ddaQ`#l;; zUADwzpqfe1XQr}d@~GhAU&bw}&CDZ<5ztX(8}^dvI_LZ5hhj~I92R7HllYxlT1#nQ ziNGMMB6oe9eLy~_UCxG1Mq#tpy*@=uLwS*TV@l1keyOVrz*I*1(+jm~W{>%O zdy_1))g~P{Nd)iI*bL|WiSb;$e?)(x`vlC*!4@OvZ>vHJHRH|O33!}7nvS6MTwY^n z^jqC$xt`aHr^@VDtV*U#Ye|g&7*JD_7&ty`sq&I7Z9-Cw_E?Df!^WpiYa*i-*7p-G z%2v!A(+{Ox|HKRT3r+IK!3s2p=Gade8uG2f_|RV+JQ&S84!@llubot)T;jrULU%1;{Ksb6Lvoaw_R`nqc_qO99W6@?79rkXblZ< zbK(q}3psz%&z~8nmGH{u*IB>NgjVQ={#OhLfYe4JIXPdZ%$j(t5_<;ii(OP+R6ZkS zGNq)IgKlxWhzyi-K-_aq6#Cc|wT}w3G1%+$B4j_#{`Yk@-8KR|{HPN%b^BOdhwHz- ztjV2)k^UXMm2Y%5%Er8he!^WXn^~zh5KLFVkD;2c_fM{xVbZ5@P zi}Oo&^NlxBRI{1cUI7;Nc~d_2NfvdT+<&=@dN^E1=jU`X)5; z>(hBs9fnc+Zcet@wf65Dy%FEzkII3%o7SqN*j9Clnz1!4JFfi zzA1|K<+Fur5jif~+~|)Z%>G=mhhCnCF6h7lPnI-Y@@o;^_%Skv*F54>y#1VQgrJy1 znH1OA-a9RvjffoiTr4PXpfsy+469XF(j}J^cEN`cMoqH}e#a$iO@N9Q)%VVRCki1o z3pmD<2Ua*F(U6gC#6*>x)Ki@t3L>H-6&OsJxb3$^gvGk5Lve}f-QhURExaH9s6rrt zgd{ZeB`48TntU37=+*1sqUN95H^`T}Sgk0lYm$(!2m$jEC~Z=kJIpJ4j|4V$*(mUo z7WV_=S7(m=4pEVt=-tVAAe>X8rO;7bKDyzL6r(JVw0-CQ$pKe%T~oqNEow6 zG{maSRjq)zHd*-^T6*sk*h*Vb`(ZYk#Ha?V95UmriE%1c*T*?M`)tSoM%57O+O?r~ z+mS#~H;?=l70wwU@>6PO+FVL4iRPTr31)985W#2ETYEDdv(*l2m0*1pDgNgCLsJ`_ z?1iVp2$vYSRLv3ofs%Oh!788zi>fM_+pw>*FI=l(TJPvj(euT|M#~prH|Pow(@Z)n z-Uw~-T-(wt{|VV*%Ulv2BMh0wOPfF~9d$;Tm&-rB8PjB5nUX)xZfyT1qSdifUm3M+ zB6u6^eeTjKzyNCII=BSSRCb@8J)7;?K{zP!crSWK$f$|t6og-`%uODw&84C<2)X?! zBzmHc8OW6h<=1MDomXqDJm}rFCw0T;dfZium=EK{Jk4adY+xCgv2&6|$ouumg-9ue zzL7pVaa*3}*XjANCY8A|K4vWi3=wD4F{i(5h#RSl050y+&mF#bD)RY(u=hb8>3~lc zolVQqFObdH%Azq4s;zZNzVj#gMI_#>j|qu>8CO{0VOI%}RXD#uuEEyNCgVGV|8*Cb zpNIrcK1HGT$A}b~jq!wjOS=fwHBg|4b-DyaGN@2f+b@5U_dcZyGsZJ^HXL2SFXI?o zi-l4e(MI2IL=T{3X3&-nHJxvFWzWBN$n>}8)@iq=t(z=`|`8Wv6X}Gnq9qP3q~NN?HAlZUiNtAN?be^aJ9z%S#jT`Iz{X&^GC==oz5 z=r1<2%r^lqo}z?czo0g^;V|B1EZuJHBj1RD1+8uqG8?b`qo*<9R*2EhF5nIeTiU33 zLp>$1ck@Mc8zVLZSqS)C+hU#}Bax51AF<1^e1HvO8TWzJ`wz4%NXOGB{|mP$u7h{z##1mGfcI|E!s8Oe1oL zMV#^Vv|{l=GjamfWBL4%&C`oLI_Rq*D3iJJwY;t4PxD~W560&ykCE%0qN+w`H{A{?QK7y_;RaPC8O@`VSBHxjqVUyRDkh2UxAlkA27HPlG*AH2T1x z+v)H?_}l!ks{gA%F}-9Ub0~9`o7+qpae-DA-e`h?O-} z8fs~l&z5h!=^yH(Z{0?2miGbz#PMZ`ildMl@<(mOo;!H0bb_OXD@0zsOs(glKobj3 z(ypIwvJ~Zc7r!olB4(*%ls=%Zg&A@1*Uiz1BU_i_W;?E)vc*oCkxgIz;V~y!-`ktJke6vPB^(R&G_ zf3Ih+wz%B3XH@FFI+UgC`ef~(38o9vKCTtm=iWV`G>Q9}m{&Xbgf8&3*zXB%4cg3! zOezuP+eze559rXZb#XZRUl2_IJTPN6ef(nDXjL9-r|iSj5nd@@o@=A zk!7NkY1QK;X-Ch^m4jrCeE95p^_VV-K0?ZHx{I15p+HswuIbfe9>2LhE-mbS)K8>e z-wv^OS9Q*LQb6af+&;h2#m*EU1HYhusj9V(LwJNgR-j=Yt~~J;sqk0sXmuG17eoQ^ z@HA@{%Qh%v+Cf>9f=xr)21Xe}rk!LZmo5cc~v|Lx}_>_UD(aG8K-I(tNY6hyV~BWmx44?`B2Y6MEEm zRHS}F$w5o=#JkP}76kzuka#;MhxiE`r!Jn=Z@tS+yf9WpE#Pwn)-?wEp!#bstc;ni z&|xj_$g4de<%))e7AJl%<$=ctQv1NzSd-2*0pcYlfi%4?GV12Q!Wkm_g~kG z3oIH}*USAiu}jfZ)^hTiJ4&oAEDHPI_WTwvq?Fk??5F5 WxKI^+3;okd8w;8DOA zf#%uAgCpe8e#l#17;4zUu^D}?S3Y2N#kipBjDL}7u-Ubn|NcXQjr!}mj%7S*cv^s>=atMZ+4BH1;w0N{)&Q{-3DlXFyleJjf4lG9(uQju&{25cBK znKA2O#md02f)fvpshn-q?)ul9v^;?$U-$FW2h1<=nb(>y!1{Ot?gqnr5i+p7w(;qSljzHMygoT%KC=Qt)K=Qnr%s*jx=1uF z_!oX?lv&oU8;w+aC8Yda+k;=ntDSDWx~mQT#%O&z);Jq!>_th97&1wBncSMdBoF~a6S26LRQs;Id!O_&-E+#sZ zhf<~J6iX-&Q@~w2xqvq4TVT@@2TSkEy3W+=ieA+~DU8Q^lWZE$L~8{#f}MmGi?9Lz zFux}-q&YF;J6=z7Bc7l@8a+IxG}9Tr7}9BV>%j_)9u&tH)}7b~OyhSl?eyCgWmaiG zo}pdxodF^WnHd3yVH827?>w1k%FGHOirR9-}%YKCkPk^gcIx@Zv{J{Y!rdZ ztiX=+BU^89g#u^aO`DY)r8D4(F7l6!1-IU&p$go#@?Ktk`udjCYHG)!n_olV6cx8` z6aM9T9=&~FM=tu%IO7wtnslAMzCA02i=tZ4fSD5VVt$tEL#RU{84_11`|A`EyPo)7 zwe(>`uBx<=_``2Q>U(BeN~j-HzAUjS>^oWCj2G^$kS%(*OY`+D6(q%cSQZq8? z-cx@}HsAesP7SjRE0oxr#Wq&%U+J>bWa! zF5vDJ+WC@xW@I~ke8}8&oh9OuL^5vM6{&mFJ3Nf%r0z6eaOEV{@f(aG$ z5DaDqFoJkv{B^i+wK3k5k;HQacBH!Jznn^n$S`spf7I7JhJ4NYG|(JlyaW(fV{+#9 zDHk?uC%HKd0~UbmMDE@Nc(`+E75h4g-`(BFi2m!_FDW*9TXJc|059)TcJ@*89NC)s zO^g6^hR*ycSEyEq7GsxHZ5(Iexw?3p4}7S-H;xEdV3|RQ$IlWR;QV_(X(Pl_Ar-g2 zG4FAIqnTQSuLQ&6%>-AL1*pLNGBM*Zgb!xVYOb$+q9(pJjAjqbO+d#Fm5|bmJ|-mG<$;VL65Ckx#3TFHmrAcq<%FK{3D=X3}z*7 z&e;Bq2Ib(^ss2!ONKn9Ii3O4~=#XQ1VjAUuPBdFWI?4B%o(V<72P8+At0$?6Hy3FH`y z`9IfDPLMj!=V=3#bmJl&OcpIkY0wpXyviYp8gH)L zTceV)mG}5pwDj{K7p}Q)ZLv>MGUEK-C~4U#luq;imi(NhZN+{s zHT~_$^-s3`m|iA`Jt1D&|9qqzWh69KR}=2!L=_z*!a9^Ram1r%l8gL*7Fk+iHc4rM z+|pgDukU7JJ`c>$m%E^+E>XwQ$AHBDclat@rLgkqXovper^zi-nQC=z@rwi`v7E~i zDS1mq&y};NQA(?#t(~^rKZi@&F4N7+t32!^D+Dq!Z+)8~k24VGl_#AidP7$Pyn3Nh8`uf&um~YhJ zz!fzqpIREEI-jhOD(nEEh>Vo`N31e_s_FrBz+WL&`T?Tm6fyE_qo+^xNC$!&tILTk|uqTc{;q|3uP3hG+lO~ z!*m+HHl*=W&q|8J@akuT0OG-vSP%Xe z&bx>82vfY9t4ccq(zxqzf=oQXu)$L7!%be*_r-@3u2x{m$WL`>+-PRf>2qNOPaiDv z2{W2Xn7)r@adgDHO8!C8Hqoefewc}6mL=#c)Z+Ez@EO9grI-k^wqit8OHka-UcFmG zkRa0n=eGSdI4h6vFD50P$Wjb8dZ;D7vg=m7=e6e{9m8am!wbqQw>Jv0#t{LF#;=cW{jDdUV?O9ICFwpibUcWii}80PEa9GWVfY##$Nwh>{(powRYG|ZCq@bSHkh|aTH zFL*8$8#4)-X8TbOni8cZJD0gXJ2n0$DR{8MK+mhqY3I^EmEFcN<&>cXuba>i~n?^Soc( zs`C%3rn^t~-fOS5cAp2tO5rI|o$#d?a%1*-F=gHS;!2f8tVNHGoKm=re~3(J^|`HN zabOg3KD))*xgDpK&BG!rG8=YJFy!Pg{T*|9%X&T83Szb|`-tGmw*}GiZ#IQWH6()H zILoy;|0>UDVMii0fyJ{D)oe#wp9{&!&(ZimNO52ik25T-)^*c?vuS8X;-?2RYBVFp ztP6L)>Fe48?9>PC-*Ix^jypP2a7-@J`Ns-dbbLhe_$(CaoH9XhViDiAmw$e>DwMBJ zWA8H=MpBshF2&?=w7(Mxe^~M+&L3`Vt=iJDYx!2hcKwFWi%+zhNYco`WJ)MSF6H&o zrwk7s!;5#(?2C33ND^IyZFX}3l;8KR3@ZqVUTt|l9ZNSk35H? zTgxEzIOBQRv|K|DSl=b;*LDJrMGC(DgT4Hl(4g%cnA3+AE;a4Wiq@)p&r)MDB;ARM zrYJ3&czXD@3)Q0FmF9Gau~=m56AjQssCj12;$DS1rSsSQUZh?)iKWaX;qA9r>(}ef zF-%ee{d*+NmtI5);=jFBdhyqIqlfZxxw7Wwp0wBFOfKdA&3Vh4mrLILiF4ti{ zVwpD!T*+b_g{Nd78n`o{zrMOzXHhYs>*(uN^us?OIXkZ6y1TlNnlPG(Z#2DmC2#4-*_Z0uqbEZa`~8YZB0zpU(Q-I6erXPPaylOL}70B@3@xXA0@HM zjLu1eoN-)N@}$Y-uQm}ldG8}rw|i)Q|7D0eULG-7ea>(TpbEoj-_~L?|2;M>g_SR3 zm7c-n3>~c!TtkzSlW-@HPy3rDr0lvB$^*mhCUe~N@-dIg9|-m*b6O@PoM=qr$b}lm z0HsKZX80@9hzd|F-1=-go>*x;{Jsr>=UmYk%{X=1Qpu?K$gSRmP;{IE3n)Jsr}Y+|C#*O*4L_!1xD-ISgw}Z?FQ5as`k}PXxUnn zq8gmuZ9Y{Yr)j`gGC7P2@ket`T~5)$r#`wD8}$s8CZJ)eEebsZ)&m&MRLQrziS4O= zFwYz7n0FV=K}H6w!z8%i4HXc$ZXiLLoGo6OS6^k6u_XrRmZ)gGLiyR*xzapKM=jE$ zWBh&QdTlpw--`rG1`mo*GY15mFK=$|sSit)&L%x^N+ zR2Hsh{5;|DZ|s)0uSc&bGa7w=37{02XtNo|jCljdJ+6j${jY2{8%@p((Cu^>5)4G?U;z;Ii@-Pn`OqS%Nj7t{2OZ`jV#4dhJY%< z9f1cw(Rt$=R4IjbM9eEZ@|0C+d_4W?MEgJf#fxN_Pl#Aw0{DmZ>_7{4SsGHLX3aSo zQ&r)HU1`6C>%UcqBM!IO5_Lp6#Ddr7KJ~z(BwWQ0;@bZBoxQjKH!9hfkXP^2W!{bP z=4f?Y1yYtXbw9Ad8j8gRhsm&jm@R>rSh)yuqf-(7yMyBR;M3Z zoJwb8@$kBnibtUubKZ1%sEReICsS8Xw`Fy0elnqR)7XZQ%s|T-Ry1NLtS=+UOy$W9 zmk)I-a?P877(eEXZhsc=X<%l;(pr>-LYrFhwT9vpOyRmySv!$hGgY)VKL5?r;zz1^ zN$UK6tQY>?Dl!WsZ1`W$=SoS0^zd(8@{$XQkvt3fXB_sw`<;hj7;Y4ONhym=`c{`c zjDVKR63`L zuCN>(9js510vrsxHz*i0!|yDBZzOC?#l-QC4W-THl2UJ)fU*qj#pkGfE8VV@^ZQIc zjiyi>=CP=^JNE1AYH6^kF~O0o!ef(qEJlB6ZAW_K=soSxOw#>R1^CV;UP!k8;(+U_ zRrGOX1gHIP+9alPOIyr3OD*}dpPQ$6r$d5MO@;k&wM|>Y#4|-&e&e+0a<)yIdKo|r zDfb@Pd698BStpCVo+g8Y={y|{n966E-STT`U?@m1IkfQfXOa*OaTxAIBCWbrK(>(y zJDcc6$c|kJG^5l|+1Z*{&ZbN+o3e^sY;9%cs06`vr$1i6jwBXA;I>Q;rjf5 zQVZRHam`zc+->ni?)FyX7v-;aKJA+RY&$W9|$f+3^(w;3Iow zFMx*}`gh4w!F+X(Q^0OoYga{|XFmEhV`2l>wAV?$ww5Q9m`ClhS=WR~%l3N4@Gn2< zl+76^j3yUd%r|-xP0-OE-?imR??~J7v)k{t zO$tJWywRglJA>a1U72Phb2;& zSL<$Ev20T|k*NLgMiK2~Z?y}*T|sr1WHCR1xEg?5KJe+_Axhgun>?kW#s>HO+2xB0 zwQPObCEAH0A*nE0dh5(H^o*+BXCTNoe0m@ zjh{+NNQLyzpN7q%J0HLOE!AgyrF6ZF}qMO<|Dlpp`MRe3|W=4(}><10gMH8J@D#YbdKWB zulTQg6f!kNagbN2r88|@yhinjFq(z@Jm~rS_h`03_VaFHFh3_n$KBi~R|`o-Esx-x z&lZYvPje}_^4Uf*M~%m_WZRY72~?4sV5H~%YF)!Z+$g2h)XhTMd^{$>?izyq(T4j3 z>;c(N_{|8v-3l-X#PP*-haJw<96r4B3=RocPOfwuGYZ^&SWNB*Gk3NqozsL6y8(h) z0lxs=FB2mjHkshsya0M+#^izfCpuFM>xdUb$JwuAhW_l`qa{D;^(s}kc&z?+aHI9Y zDzq#nCP6ok#N+S1+USuv3m2cpG;!tAaJos_pfyZQyC2yd+IZqtCTKLJjBRYVh%%2k z&Y2GLv1rM7U|t4I+FAuhqs0fJDR6t^Zuf(?4t;tvt`^G}_MaUjd%*+?l@XVrN_{0| zu`iKdX*Z(-NKL!RiONF4%ea}+&#yvtO)w~MpRcD^|K1<7(g7^$%0~EA_A=?%L$vp zeo?1-9>tplG&JfwT0+%&uv-6nRkcbf%M~?sSu&bO8K%M}2dPZp-G0a*aAml4kLr9f z$&|WY{aVSvLQ}ChEJsco6D!T1rCP(J1@7IeHNPCBG-bEKtSjUPo#8o?9x?7m_v)+Q6q5{Oj*t+#%_C8((?mZAIJ~Kllvgxr7;Y()`k8ro>idKW9wcRX#FWO7zn1A~3;{i3N}VP!Mx<{@1+-%d@M1bXi ze3z&vb9tCydJo7n0>FDv<>e(e)fUDqT3Rl?daz17J;Z?kbm?J8AyH$;o@u zthFjfqXno{%}o8s`x=UH3~7NQtY`skNnY(})Q>qTRx9&h^SOTJvJiMqx~?l>ri$QU z?+@tIA52}46E$!Ffg~__i*Uu`o`aWWxF1XaoT#QN+}49`jtvx1Hl(1Qw$`Dh={FT! zwQ9!gDLuoa!+-mf#BTf!!A7qu@?3tIoK6 zxs7pGrV=&b&oR#HaGIO{6)#%(4eRP0T)3d4^rDx`oLMch|CIt4@XglSK04$ zYRq(M34EYh!W+sjIQ#sTRu{b(HLtWNMVVu0hOrV7%fpX&;=G?e%tMI zWv9DMczaLvDm=c>kz@g@+(sHp25$4b4`0iJsv;r=IbLj>7Ug|m{4xLFZ^J$jZBJj> zIbCqtwDq9=+}KpM>0zqbv377UY$y{~5HW!7?%wIhdt}>Ee3 zlOtl|dR608u9qmLNe0`QeCJSrVI83g8m*HTb?a zmc`>2j|}IV3GeB$CDMRpF>YCZ3mt(QBxV+(R2$=^I$1c31yLVGh7i^B*nvX$+K&ce zQlo;3aIdO`7PHY>d}=gwc~V>cKXI*{5uNrDLh@ z{8}x;F;#)M{PtzTUYMMDJNrY@(V{ctQ2|~_B*hdW$nRpAnltnd@ z^)g{fFTRlu3Crv`Y=o~xn1}USTH!*X`NPDEt*lSys+#%5-{_a)k~aT5em-MJWa=sJ1dz_1IJm`vN_b^eN6}nE?(x~8w#hzQVb3T5h z?)@deG}V}R$AnWV5M`Du3-to7pTs{c@xDSuGu7^vUCZtwRpsN(2TosD_@09UvwWYR z<8pujBe;%YJbem!jqAF#Llt)&8*mZpvhTd)$AIbz$_9x*s;LQyn4I~1!DAO6n7`nP zN0>~aRm9lpCc7`wsdlTtru`6dz+GY7Uq9?)XSe2Xzt`zGlf_fS zBvoq}@Uo$~F<4dot&f1JlYP8-|HgsaJOB-$>3oa4(~n6)!1iLDH>J@pW#CT#;q^6C z^F4{Co|{1odh%jDM)ZA(;1G98@554+ubEW(=aOZcv3=1bql2G4=a{BOx66Srs-bGX zaHVnVl-NzO3qOFH5?S$lP!ldq$newz@L7PzW`qLzS4o$UHY&<{qsn_` zKLu*J*ghb-Te`V^axoSF zm9<3ilu#Sm@SiY%Km&eT94l5?k+6LrjR`t!1;xB=++CtXyruk+Ih_%I%@wXU&rXz9Njbz`t35Z%pGnX49W^*B7t!EP#!$%jL`!Hnq z6X65bg?15a(Om70*{!1yJrk+Y7C`9>{no1#*bziC_`Q$3SL8A6xJVB9Hnm z*qlC0x^4aS?LZuv|FvMerQxW{6GmCN;)M{TMiJuazc}_$X=&9fcBEcb;~z(DOptn+ z-&qqKB*>urr%0}#z1QXx*F<}{wsY=CPn~^wn6zu&rrAu9m7XqvISxxNiZ-~{d~~qK zu+ukZD|eu_t%*T1vActPa?0Gx+dUbu0rdDccW{h~yCk*gryqva*Tu*p3-Km5pZ-w3(TPQX(`AEh1Qhs}# z+D=r7e%#I~9@)iUZQFJ|m(acoKj4fUcDc6-qzYLX`A$Es#Zn=vML3#8a@NJ6?e%ZS zZiq02Bw-@`VQoempz>Yk3RsAkz*7RELodCYl&w(0{VlKVW){2we35mbWecx=k>v4J zqf=}8nRlPExZ7@Yk{fw__3CGLuc9+2cRAoyWcyarLXk&VbdES2!7NP*d@lHu{vQTR zCkV5HylU|ldXIPkC2npXAiOp7tcdCZzrv&MX|;jpEq3LeH4z>{GA+G~lO}|*%_U+R zAjIotHFlA@7w2gsBnCdr0WmNvd?>LWqWu7Z|FcmT@BD+FyQ~PLp7<}ojC@X zQ4Q1X7LKrcV~)U7Q2o7-!=fV;=hx;kn}ynqFMod*wtx0+_rP>JR&x8so;{^uXG4ai zVZ{2sVZY9?%K*n?mL|jv52npH-|5xIA6&u&)$Vbp-|L_MhVUi*V&+*ev?gk!>g{KS zf4v!V0I(DD(C{L}{z2@Rw#7 zQP=YxbnGi^x1p}(t7=%3%2VJHkov@6U9@RiRN>y@s`8LcV^=NA6$GNdAZJMs#)MD|tW7bh=OZx79ab#RkO!hiNOrdfmUMdV+NQZ(1dtpb@1JvhR(PdZBr6*cf|G z1y(o^F6D7x0kMCf4u;dH7`nRE^}XN9(#E1Xg?*8={crIl#FO)coP}81_pT>4j)1o_ zosDj4w89dv>v(Jo8($aJYiZeCyZ$Y)ipHVqTNl4RWNhz(W! zT7!W)?kXJS+uq@GX;93SK1?QE`e!%-Q02eTEnu5})9Gn*PaUguML&j8*z*Ff%R1XT zw|>BjVh^H$Pb>#fZjnF8S9BEW#?L_g1eifP#wh?n>G%hq|62LRSmdp}y7n03> zG1FRSx1rtUBm3qaNXcGjeH;r$0-?TC!_EuOhk0kzS9i4WInNAahkmiRk)XGD)ZYLP zP4uBE+A)^!b?|_a|CB^!%$%vm+2#E>w$Ea1eszY&5e(rz3nN zZjDm3rLKB(jXmE@3i+cV@pAFQ;;n;N!Z-|zxa0XbJ1^<-D0U|czVW#{d?4o%%?3KI zYc(Sv6wGFd>Q^yv!w}!-Rkz`m2DqA zGQW2)K=PahbHo@J?q9c4-2v7}(7y3CPfpdwvV<-SCBWNsGuB-s;B{i{=^~EO;}o-# z@2_`p-xZCb7)`t(Xl`HkbxQwAN}o>9?bBv!BNX5SjB~t~La`KLpJ&EJbP7Fo@~+8k zN+GX*`nyg&&9#AA>0~$8k4mE>ad+i)X{=^vx*CmoScx%_JOitDwSlCu;teU&8c#VO6e8)~;P^o%9ONz3R5LtL}!c z3(=xk+B10+ULo2ZWVc=Z{fkHW!K-DL6 zAJnD}qsLZ2{*Pr$X)nSU)V=Tjs)>Y|r<8u$iCka|<@ir6x|zP8n!?hu+aS2;*DW}1 zZfHb#I*8+1!aY%%CF=zJsn<4wivHe*Tfl% zqV=r!sp!FVLMumZ-HIK$L(?$LJH`!)g3lYY8_k(pD~$z9p49=_mf8~+#~WtiN}vAz zRx|55c|zbI6l}#1y|;dSmWI>U52Jc%tG+5s=+~}Kc4n>ne6sH%PBS`0C1i5n*M9i> zgpNLM1gtK$G^32cMS{ri&$)q1##ebKjSq_{o#>^x1sb?@*E&tkYWijl)_&)cthw-S zh11mNTm;eyq8sbwS|zaxC&JI6Odw zJ~xE9PSiLJ6LShKJ2V}4$ZH2z=F4bt@q{=o<{0i?UL-slnk=S*3YR3}cTv&!^p%eQ z(>jCFW{$T`wUY1$yur47oDZ4C@2Dcypey8AYXztO9M;e3+nRyl+0sH3 zfTG`H?4~7hy`l6OHGO?~!F)LvJh<*Dy;zb{rBm1FvYY0MX8#Dmq>l-LQQa-pl)VyJ z{jCF#=-A{O!I^s`5>qPKsg&f8q$dPy_Tg z44Q4m8JZs+9lrV>Rkn+WKA6Y3(XJlrgjvhl!N0;KX*las+{V-9NLYaGz*pslfVhY+LkMt)W^tV)0Xa`syZ9yc+6T+j5DV{$es z62UqvDu%(}Z0w^zUmsBuL9>jtx(dMG(#>A?5qsS`$1iLQOKp>QUsO;02jL+~7J#9) zEoeKK-{T-uzsvxt^=#pm{R6OfU-XnZp^G+zp%W(U`}n)+ckN6h?Stn2n;0ha#~b*k zeI-tEhiDC_(mGB6xP*y1DmNL1;v|ivazY8w90XkWXU~gp|5?+7@NpIxnc2F@7y->n z=Dz9nYg?|HMi1Fp$2|AujEuAMgiZ_XQ^a1`!`t47z8f^k1`=f4m~W)drUeYDLgFQr$3A^u>^J7NfQH>zBfg&PXn&#INuR#(xCNQ=^+%(Y>~8-P zlR$Np3YRaxc(e$Zi3e5K81mrj+W+h3O6JYqEZ|6HS#4fPbf3JQ+({c$u*hcfNsN0s z@=)gA8?TM*^L>T(cj-e@ip=D+0E0){olnz_jkN{_iOQmDduIDUbpo-nx1`%GG58b} zn(=8H2isZUuBaxT2{+QIY6pt+ooC8Y4f;!ilhTkrdCTO(1z;Yaw zAN&k~ZnAJ6Fu9JoTuo|x9X)jwyAtPpu-#S z#2rp@lrT(8!=>%m+ugFis#wbYgm0|m zqSoB=YrLbv7)&nVdP?UV5xwAFCLAlwt<+Hd2+ygER1#vMX4~{`Wrgg=*E4sQo(@EK znPmvY##1xlkk_G1WmtE!WjA>v^zcH>JdBYo^d!LaDFS-sV#S^F#N##)fD+Q+_W z>9F5N$~3E9R+*DHhx+c&686%EmH`J6te(VdHA4PBf36R|+?tF+Ds)kQIiFoXfc~a`;uZtFU4# z&T;uubQ4*{al`k^)I5QIA`Pq^M>2OLG$A;=%PO{IoG1&M;y&fW=JC zvIPuxK#3403_}1rV0&3+Ok}siqr~4oYs9s8XsBx9vFLg~7aqI`jVh?r*a};vy`JNy zG~fz%I`@87$VpT6&EcM3PRFLa3mJvOE+ap+b8o;>mwKNN+HjmNeD_&0@evR{hfQzu zKACpI*SV4A<9EEh7(zJ!x;5(eMk5@$?Dn&RR*rov1q= zzOKEEgrZZWK2SoT7%dpY-+4$#_?>rT*fW-vF}&~5mQ?vBE2-?BRM^7sbi6EXVV<%Q z<|*WH;*!eO!N;3zzRPJ!cEkkmDsbTvGKf5m?By%}YJ8QtUVgyq>x(TRumYZkUU8(b z7GDhAM(ij0@+}Axc@ebJSBW&+m;Z4mU*8u2M0H{?ZqX8#2bP}kD8CB0mc=XoE;b3~ zfe7m6y=DM`mEW3T)4N5I#Db+CdJ)^rD=igp!w_=dX)29f`8JpH@PNa71yU|rPZfWb zLbm>CA@6_;NKA4~f!ex)Csl!CyV&G6heteWw=RY>1zwG#);FEHG*^fhJr*A|zkC7u zRh2bYV2HOaPJa;2r~-e4`%tl|qNSsKMx+b*TsaSI=huq}=U1gi0DCtX4pn!RMt!*K|A(seu9PG4uvIJ6Fc+p9EEmDkwp{rrFxEx-jM{dPC zH$R>}SotQgSu;#fbsP#M(1K&R!Hht1k_(1vM-U`L1H2S>Y4mPeEpy(X_ z$$aTQ_H9>=H_mQuG{+w3(lY_QlBqxS?I(<89!-8Pl0XLgz|tTRXNL1@re7_ie94v7 zR27Q-#N#Q;0>e2U`Yes1|44CbCk!H7Rwqh+ik;he81Yz4OQTbxnPc?Vuw`rJ36HK4 z(~{x*;~X`Ut-;y8=}FW-!ZzlWWC=S;>`#{3eKzr`6Q_n1NA%Dmv-7SR(1pXQ(b_X( zg7WRdlq=e)erga^+vt}#AsR?*;hho)S8JR<=NOcQ1WGaxoV~n7P`w&j2edt|Rp&b% zGKEAXnpKHBuhLR3*9kOH?&$<9s`uxt6CIiP9g3H^WoxzI$RQ-4ec0qoek?b?+Na{b z4y&csPOtrn4jw(5vtA&s?9d1;d;-w;I% zVdK6Yu^%lCVz&57h6 zH+NJ^`ZQQhIh`!d;rt7R#*VZ$kn#e|9ac{g6lD(f1TZH!pB{5DL|X+%F5|AK>UKWs zq!VZw*j+bN(LUVB;Kh(AUp$^dKObe<0P6QIb}Ho%ogm!*4R1d>N!;+CU>R!W6D*c6NK ziPV_8ul~x|-?(|DVCnc%L>{4FLor>t5N7p6`Q$ym-Yx>3&CIetumR}Xq#kltG0Z$# z*}Z1TL@Uhe{Y5PSy?ja}fD1?SVQW7IEa;%?R_l3tjK@=hn1Ri0bi0m6B4?4;AtcD- zBAsWsQ>Q))5zEI4l~DQtR5i(^3%qIJt(Ok)w@?u(<$ob<>b2BzK0xR67a}AiBoaek z8jRGXu&#L|cqERScjB3f4BH7M%n*XPlB>JCBKi6VqH0ZXYQoXC&EyrRM-pdBst(Gm zcno=C_)ZUMDoXSxeao%nQ8p+o;n3p5Uj}D%x*iCRZPDO^wMROg~)qo0i3narJ#` zex2LEhc|`QVj2UbnYcpKcjbh@$zy7ndUI63P~VwE7}4tS(v;WeN@Z%;+&K~b_pR|~DD^HZj`x1jE^D4{y% zK>T1ucqiE8U>zp`RK3bd%j8f$kc8$qRPj~vRCVg03x|);A7^uaL1p+t2G!X{A$BU8 zzOqzBuW5E`-1n*0ET|rvC<% z>rpGh=QTJbbPFi=o*~15kBno1*gBedo9f3+RS@nodjO7s87|52b=z82a$V=XKCF_z z+jO1sGcz--3APw!;w*e$KlNB1spZM@tZt;3I-Z)8d<%uaOb0O6Xx1ISmrSG3&m1}h zPCwRiTn?TOQdmlzKTXeH zrbVZLlC-3Z*-*)G+iPq#lF+e3|FNC0KOU^bMhCLaa3K?6S{BoLXAi&Wy_Jm4J+Il% zU4%b7A4X;bbx-mu!3>cd3n~=+_wuol%wNhopris53KgcN>*|?azNsjWz(M?};&kYe zLXB*e@*vXI>|mr29O#xzZ?NdpvF6Qi_?a&1^Nt009X@}14_^7C0`xm^qc$I3mT7pf(<;|1xqu)4^ zW@;w?{4MSB^F(HAS%>uz$I6xKJND3@%XqK<#fXFqP>_q9eNpG6S!ZBWmnSbWIs8Di zXINR~OCF7A{^`nqq(Z?`&iLhc+nc>cJNl7)UIP?`T1*CQ-%vvXB znQf0N(`DyL zjPhS>ts0`vL3OnoiKBTza_Z=vSU(tpW@6V3fa=evr|GIwtWfbvWA6=a1hkgy>w10t zTcsaKBKqAo^t%DCYGL>>q)X5ZB7}%r<1hCRy8%;KT8hbfj-&9J=0CV9_(o&u{!H4% z<^-*40fSO`U2~+vNjPb6I+}}pXNWxhFJ5RHuf}Kr*Vyz=MvyFqNv7le)aJU+Ms&L&CoydIbcVW>PfmRghu9HFb zg_(3e?l}evd5Zf5fp^M=O_gpo40bbLKPf11q7kwEV9?k9vC~vtU=~L54ilrUuuRL< z{N*?%hNNT*_Nw_#bK-?KDXDAY$sF~g%s<{|Y50o<6bY@`KUA|OLV%hm_W8RsVniZu zdSTc8wTRYEfmvE&*I`{{(ZpvoHhUNE&U%e7{L$7~-AIN7_<4xs6anrdRH_(A)VegS+uyj3DaY+cTy5q3HVW zahJlvO@*k-$=@Ao=D{y;sf*aOY`djpx?NWnZIfI^L%|$ATA<(&3)^f4*-xrpT2RH# zOoVu+!z4N8pc@PBMu&M+BR6H19^x;WB)4AF&1Djv?7S z<@CjBK{h1240h28k91jBK08IvVpoU!z1$VNP<^N7^lak@zb}cKJrYbKgpmyYh3${g z;^fP13=21y8&UQ7D`vpuEzVJxrKhOf`qpNzHI^?3ml|#W-s7y{qVttMlIqD6&C${A z=hdQxKAS^%RH!TKdLbITTUfij@1Z!wToPidzWk?8zhxbB(l}K|V7p6uCV)~X+pFTnW^DK@EWU=U1kY4d*zxHL`5@hOR8Eb(1*Rx}9shUm>F zHTA6{k%d~jRB?tAe%z_RcNhqUFTqrX0iThC!(R#z{&Xlp4E-1*WOs)5cTKO52t@A{ zQR@m1`I%|*O1m_vFt#4m5zFutpI3?nkdvs-HNnRnz-<23mvH3q7&E2RI)9pC9q9k2e(opA@q%Jq#Ea14(9v_q{KboP=;TmO44?2$Bju50c za;o?FIBISKO0CsQjyc+VnvhiGUH;U@qi;F;vOEKdO-@2Q!ZoQEYX0&wf!dtIJDK#; zzC5hTm0@ZO_m4lQM{^RM{i?k)qT?K}yv>uk@ zx4Y5Y_vmdMobQyI>zItON}^~K(%KGee)1!a;wvK)Qi*ugk)K+Ej~IYPp6{rluM6Mw0%GHij;>BKfq;6ErVF}1 z6jk4@%m_w(L;fJ|I>@*G zR>$G}b@7t6#}i}_U!Y7luI@Zkel%fjJuC;P}^!`>`hLpkkhgVSb4 zSV!a+IBkdbcKAK7VtzQqHztTU#)XaSr);=-Jm69t9k)@aeRx|B#x5s=4zjPzEW5$6 zSqDd{Gd`I^vk<=cr6~q~$O{_y+Ilz~r8Vr)CuP{>LR#T{BpN2&anJnGUdv(HnFi59 zlIzc!e}0^9)q-;&b)cEfp9=jC**I*Ne;h)hav}J$3-rY$8f@i-7{2*L&gc3ftDr1w z<_d%g3WVe&dKg!^wAdUDatl^Wg(f@LV9SUoslu3>flzO+-S=Yw)I9|5{XFC3EE8M3 z^z{v{&kCkLxxh&mL&CJ;4DBJ%+5|&C?uIN)EGKbX@9ehGfAcsdbgKAq^^dP>h0IO} zl+#GE!>eDmGx(NbKru(*00l&QKE}m z^WkE(Y~8YOUsb;%j4>rKz`EFOg_MCc|A=c9ng+*`lk*&C$AJ8F!;WX2OgX+(ql$$^ z?#zDvy91gQ+%2!It!Jj9%0yia{TbbN{n%mOsnkt$cDHk*jZ(9Ig<5*=j=b^WgNip$ z#^1>zoYeu&wpA|!MMdj;q$M` z+4^V^Vi(FQwf8g0p?^&oDbY)D-rb6pqRD6E8J*FPI%rkw)HT8`H}{{KoMYq6(YfVG ztL{dONPcQN+J7Zrvki-yV{}_>k{59+mhF<)E!WVXn zX4HhxJD|E#xwt?KDj$BkpOFV=tMSP0U)~tKq#m zP<55>Ao&1ISe(Ccj6wjg>wUB%QjOiOWOoY^IJ@P&&B3?hnByq)K|}@(n=O8w-N3oe z^Ch8!f42HQB|*Nx5>zl&e=Cp-}|fR=j^1$WaH@C5J0!4vD02dey|;^>bALZASgkHQuyOWCjulNZ^XoD}`h&?3}gRn+WF z2uPQ72?)|4ASo#&Dc#a>=#XxZE@=*Rq`SKtq`MoWyBogky`R3mcRxH2=Z`aItu<@b z#BXNJ#!;(Y4Ad;S6uNXC-)P3VM>7(5xiZi60Udv8inh)S1nRc{5!xJ1L2ETuH1&^- zw`GEmRCIQG(OXr0`mpWHUq_^1IA6+O%RW0#ULvCoF)xTk4qjV9VUzOC zYl?mqR;o-Ph&lCmb3g%Fy{QTLwK#+hwHJqWL4|?b(B+poHYVoS54M^wor_Wx$w+QF zpN#ol$4ekSJdq%j^nL**j_%3fy{mW;jUomd$J!-~wh*Ibgfd0i)lE+6*@N4->?aNn z%LxwTY0-o)PX+}@T5i<=Zr13+%t9z)a4gRXWTbcQ80|lD5qmAhTd=?1e!&|$ zUmH$-JW2ReLxhRyUgAxJghZdZ^WNccz1r2@^8isZF%HLUzT(?1l7A<)h{z=K3qKD? zx#Xtx4hCRkvjtWB%7vls(SC7wJosgD%vUDva%Umk6k5ZT8w%O74Z-lkT`!SzrE;1i zSXLZ=4-KGpyr&Tuu6}7T2VUZHK6l_bYM~1H(dz8SRDi^wx!yZN`qB`GVgDf7*{B$- z_$hsw!{;dIdIavFP4yT(!_m}MOu#|AeGkXe{jgoon1mt&CXrpXxAR(g6=4O@Xu7xG z)(r%Eb6E+Wv;Z$=HrK!D`EUtQ)xDb>cXBT*P`cs!>LN`P)N{A2b~Jzd<9+B;VgVHT z5>eY374(iM8k({S-OV1Xko2B`*soDDIGyH|NHS06Z-9<>WQ~N_lpVt!Q+2V8pa_&` zsxLx?BCSsE`_^5f9q(&G3)&}lrBlMPH6Hjnt<;W=r)i6vOz(TtJs)Av(T5V1o!!1# zbWwy_c~HQ>NfG=gBKk(FuHD-*o3T@p`mzIIP$7V&kY2Q-YBJU)&gW5tf+7R@;|z{z zc9NK+3l5Ue^lB12f1{NQ1#P*3wUwX#SX~?M@tpdQVKHgO>OMMrnR_|e69vkhsxpo9 zi9Ph9O?cW=15*v->HLDqr)hNEx%ie{>+QZ8w#pSs`D47em0ZAialh%{6H-#e@r+fR z!O^N_z-A!gn7j{mggfa^##P8)v;xeDvzvjVl!RPb-g+{6;AiF4bDWnnb*D*CqzyC2 zF5z7Y`_|W^x|uYBG(U9DUZCpSNMm@NFTI{Fnq+ZTJJLzfgK72b={kD6@>Y9m(q)6= zwDhWNFrUuzWa+gE1HG)yMof{$(RtPLSB(yqyC=~uY?w#^gN~ELY7{7C<+fTRs;7?id|MYR5;3A)8uGXpbcs~7kf@ET-G;tcr_t+o{o-9#1sCS} zZ|}5~H>lC-YLl2@15oHqc5o?zqpTB%6Jg!MqRdzK+#{H_sJ$;$ZlcTUBweAZ+tLthlZYvO0%^ok^-Zpj;+dBOlkmU%q>z_SqW;zoeQ`;#Qx#5DcCuK z<-MV?+>Grp3G`rE)Z?w|rVIkAOg7j-00W*mG+iJKs0;@#x^R#-%|NCEe^%2wNGJVR z+r>jA=95p{ue7QQaXUuQ-4z~E4Co3>HOHDwH4xi}+h4A&pPM%6B$9!Lyj;Ofy&3-*51-y7w*cYICA(vFu2{%lqA@kbkkNS@i6 zg}#DUPHpHPfNe_{oIG7HLH!zlZ2u)kcG6}9U`=Z3nGt&hK-< z0rzMPY10!oZ{n#=8&w&lQ}GAQ;v01kR2U}SnZyTd>y($>|L`+Z`1PQW2Wl%b!vVY+ z+W`kxS+;@5v{%lSmbeo&!FReFxX7wZr@&3O;~5|jV#}eqS%w6Z9q|$EbExkGhZGr4 zcquu=h14gQ-7Fpoh2;%-8rwIiu8-xSbh)0NpS(i}ehhpwT{L)fUYVPJ1Mwp5vM<#D zn)iv#$8uM5Hk9ZZ7Kg+Qc+}ZeM+kmiOI~}OzK_*uu0&&Yv{Ih|6_P|7wS7*T$n&ZD zrkt>6vM`ZFw*5ODTvF0A3hcz-iviEp;Xx_L4`L-%0Gpv+A&VP7zsF%{kZ#YiB?oYr zp8EyME?4_wbFHx7ZW)@cX>~30k+h%pMtR>??bU2fMuw!)%!-Z$6Q4nc+7kuTn4W-> zd0vj1vNQ(dkqcSs0#7XJ0XpG^Zk$jlD)sJq7%D4?rsZSCZ1Bib*DF51)xc`aL{ zy6Vqq;l4jG7dNQ%O1J+F&dRMZRC4Y8$-R^GH@6oIZ?6TmI;uS<2nCpQeh3X1P!Iv( z_D}kSxT~^|*s(cmx$Dhqd$hz`tqC;v2il2P#0B|q>F*K6c$k=RwO=K#Trh3vnm-E) z(PaH_4Fbx792maj8I0dZbJu9$QeikmMR^^4?%Tu<~sNze7Pjeaj)^{I(^u|cy;hqxo zPJmX?6B7;5KK;I~x zekF_dTk#6qH;}pFQqAH}P4Ii3R(ck-^ama`T3Rd|z9zmRl^#`w!@&Xi227Q0nUUql zjel0&y#4<4Gk_7s4b7D~GF9SJ(^-7DwHMX*DZXqPL78gb`{UD%BpiN=rCp#>ooFo& zdBMOpT1SDFo)rolY;l>Y+rRnkGdl>l8GEsb+}^Y00Lu?U!ru38#Poz{qxI8 z6q~qjb+7Josl`;5NZ)_^>}IJ+SZ@^>6chx{31H!?X&A3ZN)@sCV}*&dOMkdZJi!lT zz4+oXF>J0*K<@3tk(9{(rMrOsq6i_}qrtZi8%H{Ar+54K%-|Xt44aZq7iyfWTS6$FPVLF& zJ*+x<3M~?l##r6jcdyf{Mt>`Zd@z5!s#Pe+3a$@yRmAG(GgJZ*ewI&v{c7+bj4-N? zMw3fboF?&p@`z}Xwak1lTYZXKcxvo}b* zfy4UrIu6Tbq_&q|M#w1JxBv4NWTO{Uw`YeML(L-0@!im#F`Q67FGxl2?MBZ!R;z1I z$JG+7xkK<+=6eS1i``u*AT$Ls3F~}xRxPrT2U@g2~1!HD`m25TcnK zbvSIFL3{fLd^kwsM4+41x!wu_j2k-7&W>G_G)#V#2p!ehOGHtaB)RtQl5l|IdAMeK zNeJXXHpb^9hhtW}2d^J{c#y@Bl%nfX5TTpS0)oMczUpztO{>D+oj2`B=o9u$O|fQM zW`sUw3bN8hu>zZ z9rY#(F%j<73mEUTd#mT4$@`WTbLxrJs7;?=4bvTlK7A=L?o>hc5j2K(D*;O;!4D>- z>rbNyXmQ3UnMq!6@7t9kD5@oS(-848k>O>=PIE@hpE*6{&+G?*lm4fRG z0|Gu_+w-c2L=!+YAG7h-m7oVeXQ951!m!bTqFcQ-nkGkrW3wC*9C|}^U>P3!*h;bs z1P(sEY3#OV2W>>aJE7ug&jLT1zjD8F6tHSujw~r_i`sxe;J2}Ph~41ce+5PN5lN-^ zWTPRDKa6acv4$L!iGw3A5XKNimK={|ob zOO+OQpQ`d*8c|I)hYu0nyZ?KL?q>@8pN5fJ6ZZ=wP4y*JLdOdHoDHs8AI`FZkDovo z9}CXxt=@*Kz5_Lp2}9+?(ZZ%m!^R`dmWZ@L@}aO$VA6GSVnO9;M1E_xA|khxFBYrF z$DwuDjnri!(amSrEPm#94vc#9ELks9zmaDM=x?XPbI7-F!(cSA4@8%ql(PcJtJjDa zg?yrwlyzwvvzNi@NDFR9740p7yvlcipV#-^B^Y%J+JgD+ z-o;LPb;wC3jG$O=DC9xZ>pi1lMR|pAU&F6VRmMZCR?h~T>|t+d+JTwn@{{5~KNnp3<8r)+KL!u2svIHZ;2owD6+s03U3*@hi*iiWZp6KEm5WJxb#J=K(CkM{~oxR$` zym4-?*}$)EhAQEK`y4iXVGq?t4QNWcNCk90`i!6SliJvrHMx=k8R&jPLzOH!Upfc# z>xkpi-mxbzR+4_AB~J&{v3z2S2S#tZ?b(ixeW`dgDvbJPmY zCK_$bo(6g|o`tW~A=x>gw#&kYms{y6MJ2GA;&geX6HqB5cD1i&BfiJJ1VWj2iO1dDYd&cu1Re&5>6&%1S?-%(r#eazAF@lhS|{%tbDCc?2=U8a8) zbbVJ1A42myi3Abz?8SG7e$@eP#1icjf4CE|9mXE(-IXzBeZQi0+_o!jjV}ykLion> z0XdVVo%MB0+;zu3e9a-#$`0__(x#`H@XWV&s%{dnALpT#7xq@~y^=fPQ@6Z<3>sMI z9@|9gg8J{nk?2Yi2*2@aYWU+e;md0%;7eVN$9N@c2f;`WH&N`<)_Y`_ zY%_4nb33c$Z&B5>?$!*7mrU0Myv#e89?v?-25jG?xy^OlYck%PHCZuWj)@ZlNJN$( zi$|B67L-Ezq(1dnn)hBaw5B)Cuv}zWB3G)tPYKTR`gtwQmEn%zTR8!rJdGCo?932e zxgD}XBdQN0;K3e#RRRS7>>ow*c7Wms%T%mZx2cY4Z58!pvTUu-%>o3b*=81+>&?Zu z76^44Vq`tx!z-@zx3SXD<}1}FTdqXGN+71mBT~HQZGnlKo{vHdbqrqnt(zUTZ@#z; zw3}P2+xUz8c)72%6=Es#yj=%a9VW*EMaAo?e0sv|!mdevWLj+bJY3rKRmx2>bkSe=@AoKJaWNC#5gtciKVt;a)o@;cqBlK-1|>00JP6rPm|Zgn2*$K z)fmIobvNA`prVzm4Jin^?)uGHiUr1T93c19Xr$GlcE{@RUC6DR^z;D3v!H{ZfNsd zORMYYBM9YR^>*)NPPtAt#8HU((=>I0R|GIGl__W``KM$E@^d7{IVLYVs*-A|Z12e-!5xSF3eT@ogb)E>MVup9whyuBoF%6Q{ols< z4P3FYl56VK8ry|U2;y9&sHr^9-H}q4gchsBddnzGkhz#_&SKjtOB1;~8!m)I2xq0S zO)c@b?1XiDNr}RTtmaBr6d7ueiFr3yk3E0s_kAE!(OLtVS0jCGzDTw2g^mCKjyq9U z6T;91Phh6iJU82ry1J`9C>8DO%$Aa>?XHS9Xe4pYw=%G(;Y=R?S}ihoqutsniF(OT^dWhP48{BnheDGZ)P_ z6nC$VTaMR*wST@Vym+REjjvL2O-QdL!7W{F7av&Df& z#G!eU+*;ICMY}my$k=&J*q;fwo;pv{f}PxWR^Voxz)YBt7Q@xUaJr*L-k@w`_8n0s zMPqfWgLyD`{%8bwj?29S(U|w85&&tS8g(0Q`>A zwTDCKPW6(Sg9G_D_-ODpCHh4_KV53PvqP~DYO?a8hbEAaGZjUz0F1&G2@?PT3OjX? zj6O~Zv!Mi6A1BQ=4Rtg-y(l-%{!(=vVb2HzpFC_X)H+fQCmOFaS}I6D{=zDBgoj~` zRx7Fs!Bl-N;+upB7+&AaTL5tuwLrSBqnv`Ub(w1ZKT z87&s|fv%VWxFa*bt34tbFtd55?I@{6on==aNzi0dY;7CbZ)e*9=k;{4koxJk^(F4} zYXSdQK`BjwrttCJe#cdH$sdK{>wq5Bz9&B5HjTR^3@I685PSwK^V*axPl;B+#yFUV zTIMsW(Ew6gl{Z^B3>mS*$rA@c>e_h9YgI|Ej)jGilbd@yE@5Hi<_}(PQdk?T7^Kpk z7w(@pO*d7w3tmfWuE$lwb68cDJ=l@qY8B|+8KUCg_|HN1f2X;{*oQ=<&AA_iDdrdtr1VkX%%T+@9U-Rp@cgn|A=PJVPsR^hEDCL?Yv1TK=?IFGb5YD>RO&3Eu3F_ z??*!7YDbcbOY-vD>)b7qgYE>B;f|X5f_SaY;)bNXXnn5Gf?J>Ek=960gXhFTU`&y0 zwi$urldd<7a*6)z!BT%xQQ_9w=C;6az-Q`Zru$LeTgrIvrpHexjKMZI20aD~$PeKz zFMpmD?WT20_x6Q6w<^E9VSZ{9hSFZbdOK|BYnVIC6V!EQU85xWx&2o#p$fyfUb_*Y zS%H14)3=|49V_;Ns{q!PG(6J5Tn+T{Vo+c8J8HZsuPgCgy0>IEA=qg#1CwQ%#qnRd zYc5b-0k^H<$9|k15bzkw9WTsdM`Y?g-cw`sopC?uk2&aCGzSde?WonL_0HZspukW? zx9!5?nRCtp(3Fo+pXUNlS;yj86rrqoQ)Uqr0JJ%!uQr-%OQ(bZ5M(bdPCP{dg);Xj zviBdETRdoAv9mYMx|NL_*FFyof_Rv~`RUBI_7g8Sj>|9^V|*SIPC#u#GjQqq-K{^h zRedO-1~6DqUK>fiaiyZL4KA@UUTtb!b|cF!Y-^J8NMv7IXCuxUtU7GfF>yuXy7te2 zq6`2fa%*=-`|}SbW@G>e7xi(LLR)(#({!u##v}+~*Ympz;Y%8P0gdiBsYkU6!6D6C zi-CUWgZZk?_BDgY(vdv?kN)tb^K8IFja{ev=ltSGXslq7>iW-jK2AUia zf|iFVd8yVj!4fvP=5l0P%t)eraF?f~aXxU4ttzEpUjpp%b^!>dax6LKdG2u!lwPey z5c!U>&*ggF1y3+Vq}U*j;{pJQ{d$#$?(pQs@qznsV`Lgv`JtMQK*9{>pe9+^N{vIV z{eZ93rMWmt@1(ocZ+R1iE~)Q`Y_vxH;pm(nxz#EGrX8v?%Jrvw%f#Oe8kQ!b4p%LA zPK&scmmHgcqpj&N_5o}JWv@1S3bv`m(}n#jB-%Rn7W5gwVjfV$dF>V2%fQIBm;$Xe z4iJ{}xTwRX} zblpW$pbnz|UnBDeYb?Rfu@MM1Ewn0)4Kd;X%sOIlNtuXZrJ!4aph<$Ds@iMfT`%S{ zkvSkgudkFE66HWAHbo0m+V`I52^#%Io6c!JDzJ!ZN2G6BQK=a81`T{Ri8_O~k zCUpt;+f;Xahv!gDC>GhDk_p0A4kOIY$k7l|hFhJ$F>Uykk%F$DVy&ZZ8w`{~<=_Ti zG5Jh$0Q5&>lA74^&#J10KrNi_{2HBEU_l;PK2Y-4{r&vVuY`WoK~Rwmb`d-%4;N1r zB@KI;DDtsVP&`-g{uWX*{FIEP0F=D}b=YE)1;AT)y1P!n7EOC}aP8h_>HKEYY`c-e zqNO5Ig(CKaK>e9d$MQdr4z>A8Y(`a?RTAP_-6> z7SP*pLjoUu6g!4%fo61`e02I~-6Ave7nC+SIOr}{`+hvxs?P3wzV^#wkzyju%30^Uut2b zaZiVCzQ7_;Wj;5UTOVX#S01q6&=3(eu8`NzfJ@S(@_dvg49+^3(9kJJy1j+0lyp&5 zD*dqTBNiopQRdR<*9+nNPW(+BkDY~P`&nEWD$n~V3RJ3KEE!o3YYNXV&rgf=Bkegi z$K&ED3IoPV?b(!N`6FJ^t3%4|?T-4WpBLEh8};Ob2mFitKO=YjUUHtdu;9-l;(!%^ zats=}ACI+9v#S~~AW42|LrI4Y0)=0Y8tdc^^PLX~V3}krun2|&5u{PZWtmm`i zPe9|_&h4X<1kr$L|ECA-+8dzu0yWu@%M^se<@_s#EsE(Y8aj#XNxM8kt2?V=hJKF{ z2~b&;SE~5V!EX1tCba%DzaDTe!epeL%Y9Oaf&5;}jt*a`%sm|)xQbh+Z+nW1arFBL zGB<+^?ny(LO(skG&Yw3Vq1GSTU@>9bDpADEB!|PD zivcqf@j?WS!ZwN)^4;hKXp5ah3h$1^wkZUxI8`w~1MA&G?USohVRP}VvxpVI+WzOs z5PA)cE;p3CkR_a0baD3*2f-RAvB!KYgkAHq#gxsRdJ(W`zc0f68C=s9!<=<`$4l38 z9*#jRql9^qaF_QKS^xuNAJy17z0*inHJ>Xv3lD#ut7}=3fV~&`DxZN@-A3LfL{$@- z@u#;P_OX#6EqlCSQ3~Q3ODcr9>4$x)Pc_cf)HN|uLcyn zoQ_k>7p26ob$RZBnwU)tk=OP&<)PwrjbZIUVWT*sz{R^+@cN|Ru0?j#JQVo}dw`w} zsH&K!vuKEZNc7s>aU2`d9$mVci;6*el^vD=-u{GmFuSpyy`pJqaT_}i7WwnAzXtZy z*OPaCVggrcp)Yo3WZQw=b?Lm*pzj+_X`!ea15;-Xty{er3Z@Hr6s_Kkz!QJ)%rs9> z??55y?WT3O0-5lqY6l=xImZ&k+CaU$VEm3!*E#^@831=Aczv#GG*IIIoC-`#tWTWpG8*bDYdOc0 zhqlkt^x&aFeDg+_DM25v2m`PH6{Z;Uh%vT0wz#*xn}B-2owu>P01I?mtDBS2bmILk z&2;aly3GF0z90ZR5!yJs7}3n+d0pV2qZ0q+%^%Uo2oq5d)0d~rDR?`tkgufDrv-(rnCNQp!=;uTwHdk}t@CNy!~ix7B>CZ#7h91!;Im>X1pA0f&^)lMhY@YrWyIH#O+xJNLLp44UpFeZ zu0_y}cfx!111aV#GFb?WU#fi+qd9dh-C>aiD;G;pP%tmA=t1*(*G?CzeOL($<+>|&MXzmY znK@^GWh3W)qLL7d@qQW^Y2#yPl*HlZYAP_sn4#ohNzrGqSNb|4B7!;6lu%#AUo6k- z>OUG|LmDZ+0p+QalQcjX|H@WrT1A;rE_h~xI0y(FY-`2>L5fYx)Of*?>>S9%JnH}n zazK07U)@Ks$2$;^{!>ldq<6POM3}J@D9A|!H9B2L6jP=}V=h4NgE8gh=QvKfj>nxX zi$gwtZcpXBFE>q#D3H9MJuk}E)*p~Xjy_EemCSncbo&=&k^!b;Z_7q>sJFOe{mV^}zR#h@E+7iZ{DJ-xJS=|lwTlGo1T z!^HHcP%9NI!n?WM0MQ`X4A}|bGEcv=iUnL#2r>8lZYJo>L|FW#@&|GNfJMGu*$BLQ z7i_Em6F&4iBc$ob10;rxS)%_)RFK_x@^`-SFK;IY?C3`YQ z>WC^McuV$j>~bdl*Ih-lVi92l8O+lG1TgHpPFbKmh2m&Nn2f9 zSFgY806wsnQ7N)+O@v8k4n%QK#t-KcQ-1#Dt=_MoxY1)e<}4gwDQ>6t`p*o1|L+e5 zQ79{t99fxhPmM2mR58vR;(XAhW*o>Ppv#O(G?mFU8m6`VuQBQ_uNuY(ur+xMmJU;I zXTP+=8XvcEH_-p+3j&MTTMPG~#0atd*BG)*s*Z25RHT$oBsAZw_negv`6B%0?|>;w z(3XwJH%8=q{ezgliv~Wxrvd)O^h`|}&slqVj`M?!9Dj{^tT&iEw(Hj?)5ASmUIkE8 zf&|$gsrmgMl|ML(C?DyN$1~?v+Pvj|zHgKlyrwK!2#y}&!>l3-2*WjK{jasyh-6go zrE<(`B-f-Jp!PPNRWAWAvb`!IA`M-z8o9A;i9af{sAd2&^mzVNw7~RCSA%+%NV^X$ zB6^Z|8yh2QHq9GF5fh=?N;t)8X3$i5GHh!*fAJXqJJv6G^Ts*;oTIi~HKz|LBZs(* zy6{jR3k@Upe^hc(xyI1W!T!`gO8b`!clm&u?`CisQP9@XlZCZAv|STQ$bt+nnpHrU zT3#2%!!f9Tl>A>5Kwxksn>so+?z|bMb{|VrGg&%w#?~`2P~+ z8cK?7G+SNAp1xgZY~VR3WcW?vwmsD_9aWvNP&1YyL7_0{0aStiedM2GB?m+@IHwH3 zy$d97Ne8a4fT_WH+o*lp-;Y&XD>IeLj+%j+`*(7ttDoM8YN;lVI69=5T94Rsi~V>3 zU8$rXVjl35EmbeKo|D!!q}l6V(b*G-rX@hVo?Xk|aD*I~?(Q&j@rRJIER=gH1(L=N z;t-_XH?Z@dY2sJ463jNn$wBTs@ znFBL3%O#JRMv#tq`jujZH`sE(`SE3KW~EeBt^(_qf{lLxj8h-8JYMnwq>w&85-sgB&d!ISU*{QinF#<@5&}z1XJoo2UQ9 zxm{6%3N~;MCxq+n>Pd|Oi|OJ?^0ycnjvtUJza<)Z9~#q_MMQh~-&7!nYc7&nt%=Fm zapyY=)tRU(;p5U@(3PqCz5eC_Ki*?H$X$qtsIpQalT!VcSe(U5_vJ-S znuL~*L<5=#s$hc8t;VyaSqdbRZB#TF`0?L`Wk*Q|&vw0g8+kZef2~x0rH%{D)?1^m z*wdFIvS=CBBVv*t`CrTnVNUzLmX^0b`n^%sq4v7!m3gZXCdXVfHC7A| z*>@2+9ebL1IXN{oCf+k8d#eW!d|?r{v@pZ!-Dvhz|1SmRCnBD(#0omdpD>Z*fLK|3 zwH5+2h2C>_2|fOo-b<`m3d9F!p*4m`C`tdE4op;=s62~E|IFUvUq>--<({6T%dd4* zpxK~oqRnRN%J<5zWXED9`X&Ak7m?`GY$#P3KG?0!0%;Vf^5+WVL`!!ALpO(Fi*iO6 zVA&w1|05S`A_f)q64^@!iM$aPUqv(}ZBdNy0BQPe7J#RflWJk+C?}TT`(MT>1G=Dc z0hG*Ia``iV!-SBCaM_f(`(3P2o5kVzhFbs%rpZ@jzGdt<%f74wrptH#u1o&u@%eB` z{X6ls-pZ+$(+FQBTV?No$C}0>6fM>dn(tT=fRCqosPRd6oRGh zQkEK&HTbhenDrHol1Oky*T-)!u{eX>@Me+Sj){4%JshtE(6KBPD1wNZd#}&MysLrN zV2vC>weIcK-*#+Do*pXhnlV`^cUBMsU6zDPY067vjm#X|69@6|0crmKk4mz{V4~*% z+gf?l8XKN5v%0+l7-PVh<$j9Za;@b{{deg^N-0`4jgp^5Q!;;upz7YUsuL8>6lMxt zgUb!Mt96YADoB2hGC6gl-8d;VMrGfk5XD_+|ChqX{LO^ehaCbBrARX)xgIRNO&H2W zDoJcV7#(SdVPxF0prN7ez_f4p96PYi#t5|V|KG>P0pOY0nR0G!ZX~hHud`5N9loU64O7x!A78L;{WHJ9wE=K~Fam0xG9m~wl_PRC&>B#j>g9Pfr&f2uEC*s5IUI_?O%4YEw_UU6(5Ocrr#(DYLk zCjX&?nr;y34?0>@X#PAnqW$yNrD&mBiLod;umPvTa+-J?X#H!3Vgh*DtlLPUU<_f6 zRJcE+Wz=!~tmZp+N=~L*&J6Ksp4Y3ba_UQ8-_v{k=1T#HX|YkN`Kybh0Ax5x{-1*| zUGP2NN!m5YblX>UmNX{l+7Z$@7k#W)bHs`p{2KX&VT?-p*@0Jw-?i;nHjc;3A9 zQLK2d?LBkT@!iO!q&G2AD%V6b8J>*M^EPTP~s2%yQKfwcUE{a9kk&@aQTJ?cN>c~6xn#dAka;9Hre z2{gryn-m?@eh3feXmAfh2m!mBY`Jve0i9pY* zY(T7M%%}cWtP=|?jVE|6yAF!Dlll|w|B;v?mEc%{-F=Iu$~t`B>we@&S&b!oN|mMG zGShlzd#J?O1xi+<-S*Nb7wZvKF5HC^e? z9O&k<>Lv9zvf1A`=QAk&yU4Fmc`oV`gOq+Aeb~WTE{l6<$<+nFr^@2bc$MFLWnZ6y zuP%v?oLwP}OkO+;^JlJVQD<;qEiE&xd6w_DbL$G02iwda63ZKQjf?Nv4=!PSzG`#k zI$0<(X(h3Y7}?wP{j-sAW@fTv9yX=6tO@;O&!++h;F^YQ6}3x0V7kP2w)^^Rr4jYo zXR{Y6{87>a+$F4kwPPK>6aX34R@zP;4->ncA~=lzds0{w+%El+{K6Fk+^k=>sLx0N z>#$SPD}zng|3kjh>&#}yXm|DLetWlThVn$>2;0u|Z_(@so% zla?|v&7XnWieH5$v*oa(IaMh|3S3W)=1uX{b|w-1HXaHJ+tb&58}^E42l|tVHUF`> zy4;dELpIrlU)&!m?~-?8n{*vU4lAe;D*c#m6=ib`?(8FJ-kNL=DUKz+1N|d-c5Lq~ zQM}ff&Jz;0>G8fi1U>};qlFqfcq`{Hc{Em7?L#<;9g|rbb!+ODLOQpVHHwb@ib9!2 zJ;gaPru}+}Wj~Q{td7T1z7JwyvQ_ST7yr3?w5K|Q2HrO#jj3nh<@t9u9B(s)B?>G5 zEE9?WM=mLJNa%8CIO49H%)Qonb}wzgQs#ej<=wM+bDf@GljUTU0o(aqwt{+2vDJUZ zm1M9Q4(Qyv4TqhY!8+{+le6Hm>dNC|ZzS_fscp0;pIZUMLR`Tf1m2u`=9o5taG86C>e4f1Yn_p6S`-c>2 zk+!}`x(jP-u!V1ZM?cGbz3uw0LxppRR!i@SsQ1fWm^s=z{@L88y^cIv>Zs^{^~oaQ zX2@WmLC<6a<6dEiia-^UiZPkZLtKqb-jHJnTNdy#nyl7@;l$8?j_9IQ6wtAF zIVbhP^&z9E3;dX2}H>amO zHj7tLDu%|L3)~8sOz%YQB4`Ok4H6s0g? z8Ws;9bw`a=DS`RCMZlU%1|dM(;5nUtd(4;c~&|!ow%^aL+##B@U^O4uUvSQ zJ7xJ-FgPh=;>vGD&kqvF99m*VX3!p28!Zh~IMsvc2l;!}$O1E-s5jTO7pna>6u0uT zzj1de8~-8JrIIo-G<03YZ@gw&AaDM{TRz>(N{&BRvR93lY|<>Vb-Vm!^YA?3A|^OC zrRF~#$*NdvKWiiY`(A-^MYqQKY+LrU@^HO0aVI3VbYRVGN-H2frPv|6J(mJcG6=L01^@a6;fUzE`s8+kU{)beb6- zrL8$=KwA5OmDMUm#$>4MA2Xnet<7rU|9N?-TJ*t=r%1{kpc3FdY{Tit;=MR$& z&VghO)6HNI`MGE(Hi{{9?nh_;&qcC-RqV~%C@O9eIsCV+Cy5(F{%Vgm=+7k%W5D=9 z?FaCK5s%8%=8v2MOQ~j#uChFYsAIn_3%=xjJk?l&|HHr6-!RtlxYd~kuK6qFJhf94x~)U48#rXdB^RI$|Y3_2eGwYhsU0;E{J`U_g9C1W&-+jWVgE_4dIj*`&xygBE|#D&1#jg%0(q5FDDBqhc;$BK6r4 zI4%kQOxz)~YjAg$`~1QHeZebf_-E$xHf>MEe!Dp9vT*CIbI)K4v$3T~0FiRwv<=F| zLO>F%WF$t&l*^ytb#Z`|-?|HtK96q`xBlF`j2Yo4NU3N5Wv?MsF6io=Ekp(KU+Y|~ zAy$5-P5uH=y=>49S=nRyLtoY)k}wD^|Md7%wN=-PE!Cpw{9t0JQiZcv1jV@a)Ae0z z-dBhI$%hPzadk!HJJHcig`~Ri|EwPAT+hDfi>jy>Fz1c~P|3UNQppN1=J zRN>)|K1E9;cr48JfIJz~f)y_O^YY){ew*2s*ZfCc(hp1)*)FrkWuh;ZK2hLu4z~hw z+cYIp2(6m9B!vDbWOjehWaq?T?&-NtZRw;FS=0F+f2i!i$exu{opqb%<2-XMYM;|M z=<>jR#XLlFsagZjw?CtwxW@Rht4d*ihekef+P`yo$F59dV;rrhygHZ-z<+I97- z=zn(6{WB9Yc4o}oT(i^MvYGii&v@qim=OUS31BYsWut1p&b?v@2Jt2j6PVtU9`37N zm+a5f)M-nO{v!w<7`lh%ocoDeO=_=@BUMT78;@TAAxxEqtv^_lDe^(taa;Sje01rM zCauvFU-%`Ylj(pd;iTN)k7X~b0e0gnyKzgc7vyj|N00dSzSWA~#9_j$ht;55yd~b+ zVLtq@cLe|b!EuXDW&uy7viD-6gxs=fw{qv`Nn2{5_ z-yG}Nra+%{8hGYdI|v^_HveO(x@6_wy|lUC3}uaz><8y~-)xTZTuv#dYq?&1^6K9! zsqQd#fO?u67`g8icoqjItz;TSq?MRBXtTREPF`BoV(BjYi6C?xBuxz2<5|Q793rm@ z=zc}zJ~uvpo02g{r5|LyxS6RO%C1=S*efb8EtR{~_lm3`-%;iLM;vQaSS%~d^|!_ zm#@#TH9E$Mmrjt!FPgJ^In^PTM&SDk>^X-@#jO?#S^f(1!mIz+-F1I8nKo@*TwMjR z04maUrAZfQ0tQ%$xFEfn5SsK7I!MQrB288hBGpAYA#@1E5HK&j2>}8LA<_vQLJ>mX zd-fby_iy;l$vOFLp1G%7bIsgOCfzyW_KY!S;1`jKuy}XS%}(6`PX5%aXxpW+_!(#) zUDz-Rb!G4Ey|OtWI81NP8V{QRHs%hv6GB&L?hvRrzqQ}(v*$M`RW-;fVw@5-!dtiE zpptJvRJk@OQ@K}dZtdT{ute!s3B-seQm!4+x2Ja*2n+SM>iz>lp6&~``VE|g1q0lT ztYn@A9`hWJDZ~6&!V=ruq5@Z~ZUyK)GY$~MZPyS{&H>kU2>#|js&BbyN{TkXweXxH z%wfw)fxcl+6nC*3w6wDIFUl_-KwhlXWENO zY(>h$`?{n8*7V6+^6jy9YS{{#%9%G!1E#U&=BCA(7*nTxR8PN0%jo!uZ~8SD$&Ng9 z!%)tg^zQ^9@_w3~gOH85xO(60$>BA3OnK#jJ&a2XcgiMqE@{Ie-O z)Q(W1ECLAMk#d2LcYhS3h&o~5G%2{lO^{;t#;z~B$Kvq9_iHWq$F6?2%M;q%*uKx; zr5I8(n&y$OcWIZ#N`hm?5Q<=Mt`g9ced!R@}!YBV=kW2gzHdp2k@Mcl<#HwR%S~_y2 zeOkT5pVEkN=BR68#7DQer3N0YOzcklS$!~h(D9m!s|U?-vCduW_qxs>z+#&BPbcQg z9xIkUo22ZbxKPQlA;!m%cPYmu3DCwB(B=wgJ23=-bYSiqNDrQMo;AuGb8c2cf|CYC z!18lvUYfi+k(Ds0nb5mJtMyUiXdhCYx^r+$uD7TJlu6pPW1|gYddhnX6La?mx-T^= z{1456cO^nHMlG2Y&t%Az1EH?270sw2U*%EZ-ACpdh`jBJRXz&qOdfS_kQ5bkZFT(k zbi;xvbH7jgjh+X`uz!GGF@sXYu*;8V7tvAR%^Ql8A0_AIOw|9*sh7VyeL}Ze(LgkN z`rPRodoxHk_|*ItZY(jOI|lO8Oo|#uZ8lQOci)+SRXQH;sN4EH&LyEAIX-^)$cZ>g$}OkL#a=HuL)>#bVmS{Mzu3HR z+1I5tqoEzK&vLyemrn9RhnK}C70UQrzaA?OpeP4~)`CB_*fk8+$+G!tFy`Vl0Jkdt zWj_(~pUjifohGwx4u9UH&_=NB8A=x7#3HMg3czUKD(cqjiMxB0ibz@7*6sdG_XLQ zb>xydml8T)R@)zD4FCgG4CqSuM(&YG4Y@)0GjW39JI_wuk3t*JclrRDIWttz zP7t{&|1!|D5o2MnK}M6R1n=k*CdJ!5esDz~$J9n)*Fh+>(@|L|yYGCB|lmPN@A({bEKU+lWpj1ChKH-#5+pbPXT1!s*~^K%R*Ld-)xmglz*voxk@v=U?Nn7k4RqyloiIAf@Jy+B zTOpf)eu-tA*l|Z!j^5mW!*;qhB~qT+p-r1HMcD+}kXo+>e%r$K>&EaJ3pdI|=B*^)8ie+h_rURmfy5)r#Y2{&P)K!D;o8b2kQxXIwsMSV8dz3371&D92WF%rkS&iNfFl#aeOIHuap zkH_@lMX5>eV4l#x?7{g}j!b4)Twm?_Ds+QPATQow`ks0#JjH3Y_KZwUmZ=&P!e=^W zY-<(Jxf0=9>+bIuji{8WkX%m1y$-nsP4;re-k)KaAlivXk+8+{Osv&+P9temwL> zgST-=LWz?{ved{id1B^EF8Q!UMg1xrhQLib$@DF>xzz`2OOG}po3W_e zBH|cv(v9&TR0}bhcq%kvo);*a>1FTw99(c>{gkR(8#`}oo^%*dwvQ;-liyqbTVA^; zWo}CZh2wax1SvS=LDhWV&Gi>(3aH)P2AZlnI0m100R#hZg!feFw*e6|yB720Msi8_T*sa%5_s)CqjCAH12csiK3~w{Pmkwcrp8k# zu~+Hvm`gz&Pj27(V&wStWY-?>k^h4aHqY-)Zr^@wSnJQ`8k+gAv0ss(pb)^cr+5kc zC;0w1GgZ_5v&@EBwEUW)eHoP8&ngHxs=>-q(bI-PkDrUY|HQ&F$cJ3V60p|M_DIZ$ zUM8r2T|bvF$^j)bkk%)wL#-?QAdpP7)HgE-u;gg;N34$maB3#}p`^>u{$WZM7x$8U zu&(5jPNS&Fqmih^8Pk0m_ZJVaagq&dg9>cC<5Sn&Ne9D){q4(#Vsv_LKZ&b;T}8!p zS1wjo*ZI^v=1Ks|5lOR?<*pdKNH6iy$$11BU`{HcR#DQ2ZZDLrGfZ_hMX;3wEOCIi zgCUSY)@3a|yvOzW+(Z&ISBDFlGgW^Ji7t3fscyP)Ds$|Un{EeHgEzZ(#G$6KYd+^| zl)=YPJK>ujKZ+GlW82&DI8(WwFo|tUB9(Vm&aTZ62Ee$rJ-N@m`iXG*{XXA13EjX|M*;7N_?mWGQHz1OnRi{k zt!qcy1HutKt*sjY6QQpbv}8u!^)T<5ab#XXziRPAiXFGCi)#j?=rJ}}d|B}l2}3|m z{bpUzGjD$nE;n3Y2ujl`aNw-3qSUu`)#_MUCbY9u1y#Lcin&%;wtk_U?KQh8a&J!} zU_5j^qIZhZFpCbQtR7DJ2GOmJAi?3`6%|DV&s}!|jWJ{i)z7SF>CodHn0`OCU-jPT zo7+AGPlH{3m00g&_s&NzQYX?qB{Q|=BD>}mlI+T0Kf^AFoF9vvn(72UE84jjD(3Eo z&I6@zt@fD6`6_0Gl3->&W|jRoZkMXMgOCjJJS%sF=4CMH-=OYZu=}k4(5Ky&*F-q= zUaGC=FhQD)SvoN~vBTI-2pH;JiF)=Z#CvGj^qZ;m18l~~(3Vp(X>nW8?Ui*3s-UiT zKY$O%z}0B12gmX(udeR+)y8n<77EBLd5I`}G?S)!pIWW*E}Qm4BV@OCi?tm~cTn|v z2|+MqW+>-tNMHF|O~dY3a{-Q6cy+q50g{z*T2jg727;OK+^>;K>r>=~&E^npGi`l2 zA~?Q%YR{qo)9bLYG2({&tr?5E<>p_1kzW6ICo(8ox7{7ZD7deB3IdhMtiUr7@mf_* zb3TC}UV9L!c=4ucQU^mzN%0_vOAaBm<*SgUm0CLC%?r7&E|6>*z<>hE%jS(k8jsVM zdz%0Xx3VAj)9)36BjDnIdcNUoWYx=H#Dh#o2KY#sqw{-YIkUlPNNN-nLtG?n;nSx@eK~^QV5k`6+8{6AgGJc$eKmO1cG~iP)Uf z&u9y$xm_)BxxUb@%7q6X&f|+6dwG$TxHUtPWd)Ia2VeS1rEcn$s4?Mnw3Dl>dhx_{ zoqpAZ*XKWqEL|ICQ2I!i^Ft{8qlEjH>D|qtx2Mqr%eMW)AxtX4B#ysEzjP@uZ|bRb z-d{3y0xwdwYVqv4id@sRw$UjCTKAv>Y(581a!2%cv|u@2OxOPmZ@dH}3}jn*q;V%< z3%^hLBfD3;8tMF?JECw1xH@Vf%~y6DjFhQsMCUB1q)2^pt(Ivdm3FQeer7IO_I|`# zL=xc45OA|hx>3J2pe;m~+igoMcY;?A(iT+f zWj<)pIH^KdrlR5QD0P9%fiZ1L2zu(iwzKK&+et;pu*a?59o82op9Jq8<4xQ18rg6| ze9}o|)|S7eE!u*3uNO1wsdS00^5M^&V<7yY{=0Eu#3zN!eBYM^waq$4J{-}W%Y`$( zkBnom7-K@16ITCo!Wsg(9*1QG>j8DZsT&lF;L+=$XAcO*q#(8AXMP z7P}eCWwMc^uO`hxQHA$aO`<+&X&4yK<10Kgo@ZA0XxxZ9+Rc0y9>ko_>Wx(5ky8!( z<`{V|jBZhGvMsDE;n7L;DWL8;49xCZv8aZ856050sIx75Yhm<|$~FIkhR!_$OnBW3 zPr(e2vC+2^socHDun@CDDcC3;lk6&Aq`d59Nola6?88t9`ovKPLj;*WN`LGyczk{} zh-_uGc7p1mWpC^w=X~Dc`?gUxkB_Us(A`G3>WAU?n1kqFb%gq@uDvL<`A}GL*?HXE zFhC)4+Vg{7j9P`FTTnj?C&Ch<35#^0-t-4*GpYazOK;eRv3dq=czO~7dc{7rVn!PWKH3? zw>ve5(ZnQd5#LY#EOES9fjnOP<(z4}Na!FYXJcMu3zXoT;oTSzJ|fwWwvZX>7O=h8 zFxvSI#e*0HHOMHfbG8s9TZX77x><@myOu{mAtN;Z$)F>s(=J%NbMH0TWBrIG1O0`r zcCxEShCHhg*TKC{jJA>WB}wmOeqNFQ literal 0 HcmV?d00001 From 0edd9532586b8573cce3ce7594291b11d7afccf0 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 16:07:38 +1100 Subject: [PATCH 0054/1088] Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6598592f73..f879fecdfa 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,8 @@ - +
    + ## 80% faster 50% less memory local QLoRA finetuning * Manual autograd engine - hand derived backprop steps. From 28cc035fba45c7daa16f8b37d435c69a89346ac5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 16:07:53 +1100 Subject: [PATCH 0055/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f879fecdfa..677fbe06bf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
    - +
    From c2197c221052855e9c00cef1ea7494733dd9535a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 17:50:40 +1100 Subject: [PATCH 0056/1088] Update README.md --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 677fbe06bf..f6e1817fa1 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ * Manual autograd engine - hand derived backprop steps. * QLoRA / LoRA 80% faster, 50% less memory. * All kernels written in OpenAI's Triton language. -* 0% loss in accuracy. -* No change of hardware necessary. Supports Tesla T4, RTX 20, 30, 40 series, A100, H100s -* Flash Attention support +* 0% loss in accuracy - no approximation methods - all exact. +* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. CUDA 7.5+. Tesla T4, RTX 20, 30, 40 series, A100, H100s +* Flash Attention support via Xformers. * Supports 4bit and 16bit LoRA finetuning. * Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** * Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! @@ -131,3 +131,5 @@ trainer = .... Use Huggingface's Trainer and dataset loading 2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. + +3. If it doesn't install - maybe try updating `pip`. From 071891078955e13189e13c290d46c39877211300 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 19:54:04 +1100 Subject: [PATCH 0057/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f6e1817fa1..5401d1872b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
    - +
    From 8dae2ee05049c3ca6a86e257b3230154382bf509 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 1 Dec 2023 21:54:31 +1100 Subject: [PATCH 0058/1088] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5401d1872b..5d8355074f 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ * Flash Attention support via Xformers. * Supports 4bit and 16bit LoRA finetuning. * Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** -* Check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! +* Open source version trains 5x faster or you can check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!
    From 3aa16bb452ab82d7a2b2987ec3bfb47c6812582c Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sat, 2 Dec 2023 03:08:16 +1100 Subject: [PATCH 0059/1088] Sticker --- README.md | 10 +++++++--- images/unsloth made with love.png | Bin 0 -> 63453 bytes 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 images/unsloth made with love.png diff --git a/README.md b/README.md index 5d8355074f..4580e83170 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.o ``` from unsloth import FastLlamaModel import torch -max_seq_length = 2048 +max_seq_length = 2048 # Can change to any number <= 4096 dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. @@ -83,6 +83,10 @@ model = FastLlamaModel.get_peft_model( trainer = .... Use Huggingface's Trainer and dataset loading ``` +If you trained a model with Unsloth, we made a cool sticker!! + + + # Future Milestones and limitations 1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. 2. Does not support non Llama models - we do so in the future. @@ -130,6 +134,6 @@ trainer = .... Use Huggingface's Trainer and dataset loading ``` 2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. - - 3. If it doesn't install - maybe try updating `pip`. + + diff --git a/images/unsloth made with love.png b/images/unsloth made with love.png new file mode 100644 index 0000000000000000000000000000000000000000..9bf7ec93680f889d7602e5f56a8d677d6a58ae6a GIT binary patch literal 63453 zcmYIvWkA%=_w@o&(k)U_0@9$Oq|%|JbeD(-%Sy-6NOwsq-5pCVDH4KoEi5buNG#p; zeAMsn|GeN0JG(P;=gzt3o_i-uT~&eT?)|$U5Qs=gQC1TK!a4tn!0jK1`wAjN(|su4Eh74hxe#OHg4k8^IecbK9^ zIY(!r7G~_!Ejux(DJk=1r-faTS74yA5vDte|A&tizlt;wkTB1#3 zi2H@CgBbMV=JfrfotU3J=qY9qW)*D*4yfMSDs=iQjKk#Xy%*ry(wI$&Jtwb4u0P); zjej7EsKlv~KW-=N&;$))d{~ZR4*P`4>PaAk>oF0U;rV^NIc)M`)j^p?c z|L-t-0?6-M!jAQu=|`%p5Q1I_NH|-a*qsim+fpW*=zpvG{2=T?@(3=uZ*$DiLmuvB z(d!6OxHPrIu!1!F*{w8Q7&kBIEtWfOhcCv}_um}y>IjM_`aU$52^jRh`Clsh8?;Yc zdc|_94JYs=*d@Z~mB5I_Hxu{M!pOMuB|hP=*`yUUtMEJQKqi<16fd~Jh8NP1>@3B$ z^XuyXld_|QG3dWhcuF6q&Vz3TLG4!1>!U!FWLxaL3*?0yG{!lb@ZS*4#3w#+w~zg@ z-4n*K!Y?NZvHVQ2p7oh}W&Pa}X}qN3>+3>LE69v)*J~4)rkNgZi3FsI5u1Wec=>m| zifQ9ZYF(#QQj*zu_b5+`|W=bu5Cdw4aacNsvZSf4v*Ra80A z2_$6ueHgI&9W(#85ydyDL`!@iaE*h{$Cr?7@zt}H--Vd>692bKyf7{)EEru!Ehz2T zuf6kFg3{LdWfva7d~ z*?uv}-Xy>P3ATb8LXZg7#{U@uZ=X`-+TfSmw^c8Ka4#^V4*AP^*Et*SpS42^&&fKD zZ+I?yu+$M=ff^w6aY_U0UoQ-bwOZB!>4MI(+<2<1o_bpR#$VzBAvI?Hx-zMBd~D#g z=bA0Nof$tM`LahHw505da~0s60s7A)1~hinnor$sN7$m(5q;S!7zix0O6L~G|2{UT zqVF&TwRX-kNO7W96ih)RpwIDqG_U?ySjlb?*HO^V`nM$6zMtS}H3v-gUnb)+?lAFKgfkm!S1g?_Q5UL0ao^A|G3Cl_c5>_v zllQy*dJA2ZTA|f|vJY0)R?=BjU1bt6(yDK%KML0-LfQ2_IQ#o$Ag+wzH6JxHGE1)z z%p6RahY{-WnSJSMe@!`-^bjP1c}C}_BZ<8nml+v&f~l=la01$U?5FfwlE$Q9a=rW8 zCxfzuHVSPbmzliQ_et3zB2!-%`G+U#c-@M94pQr9W!hNIPOEa8m}ius8T?hed1_1t zwI9{vrirOMoZ@vkZ}dYRdW}lzEp-BSEDffs>FKE$ zwOZZbLVMm_k5v5)ef4n3gGW4a*{;?bhBFvDPA7VolOs?5-CU(%SCB7gPY9eR#)&po ze`5XqE8Mxe3BL(Mp4YqH9d10o@3cextG=kQ%Ldu187MzZv`0K&qHk zL8l{(;a<4FjGmAEY%mfZS}oZQTr52KtkuTo@7}GIu;Mpq++1g4fsqLB2*~u+|Mo)>S+Z9CN9?Zh5PjeVW&A~@fT{ez+ zQ%NS%W7bA(o`lSNXKw?L>lMbQ0pxP4OnQqHmkqnhrTz!sbk_Z4Mj0qw{bI8yQt3;?&s~ zUMljj(~W0S`*QAL{xBgCMQ1ui5#qcVcs0t3$j+0a*?@k3*i^A7`cTN1;T@VN6nZJlUL2p|$FpnDU z9-Ci45TQ_T+`Wt6>B&pEyo1%epJyepQauGnU>8+Jdr*72zzEQLjmlYv8qS~38yTWY zHex8aalYD^+wR>G%Kfv}KIt*Y2X{%TmCIfoaao)ur0r@#vPAuDT7jfk@V2*M(j7cb zJ~Usgkvc0u?MZfQU?*_}ITnC)#=Ny{aqpd!VJ|XgE}1`(qt@Nk${yh)?6A;y0e>M_ z02RrYu_k@!wprSk2Ikt6R{ymk_wZ%sQ6)5EJ}mPDjZF!`W^@?-_E< zx+v+{1>tt}&r+iut$yHCERR+;>-o-b_--y>p1CVTG}x~$Cassi*$E+*VniKW#vxx! zHJa9L$%6koet{9JXG|SvXE>QHKl(cCa1qDR3^OEI$Qy2nWnuyc)izd+qq0T?JyPiwPFV|JbXe4H;#Qir-KMqJCOa>yl7+gnrDwz#t@pH>VxOL$s z1u5EJh=}lXMQr(WMX%w&wIx}1RyNCJJ?fO_gXwfDB~nIN0;jM|rF5AINIUIj4vWPb zLF=b8xbPi8OzfMqbIOaq0gDY|0`!cyBhoqjs*d8%40rveL6n|mib}y&?Y?&7{O;yE zF*A#G^GqtcY!zAgJ7s0RmcLEnHq7zgEo!9YQ4M0>l&RG9c{IY;A=h@=ox1ReRVtk& z`!6?PyQ5UW!9I*sF8r;=4Ix>&cCv)1UDFnQ=k8%|>qFJ?$;2xB zXDXDMu-_p_Jb}+CTNQQl3Z#bh{h&iH9@9fea-jw@raUd;xWz8O;dDQ|RDrVW_&&z2 z@C=Tf22H(!EqNEDCgfDd77Sz1XKhHHscE6+UBE*->whgDWS_CX50pYAo)Qu>p(aVfJOX2+_yupVo%is{pwf-?z z^+8P(JTVDu-kv*y|1V8?9@r@>~tm=Ot9;v4g>X zi~{ZXQfJ%)Ic9bu+Ypm&579fEJS=V3y>DJAS}0Hkqd+r0re&Se#leUidWD%HCtm%A5X4$}WCfUHlL| zaxg8JcVFOZMHAVrksuw@2b25Gr7^m;V%hzLR*&r0#$w z;Gc~*X-C&Dq@RDA}&3p>C6MT!tNV^MY@c-Q-HrfS*toEz$n+G3%>Vm5RP z_MYh0O#0zrw;UttYf}dpsAPzO@bl7@FENM8|rzQf#Kdv$#Zv?ZWilzXw+us zm}}>WK{dUq5!9^oI>*m5y&n}6G3NUB7#P7c-W-b9pO7we{#zHXUr1X1chKAKK#pvF zYVpu$t(OQA+&yf+6UKWH>w3m;tj5~|+GO-x)|WBYx%ENO3KQ;oKgtJ6l1F*$XeqL&y%0lG>Y63*C0wTt;x zUhT^EU8bj4%!Px0{v7+T8Njpy^OWs006S>w)JK>bK^;xm@^@H5OGzU8I4dMq)5>I_ z-;M5#*aDt>Y@aJGb*htR#DF7puPd2G{>T14_3r+R&2pc7`< z!B#ieKd!gW^&bc&>fK!efwcRdeeHjQcEa%9G}UK*G=*LaGxR+t9NcPPud6fuu(Byr z{^L{r4&vJ+UD&VB$qicBgJAq%psN^3)`K?|8u@24-i~wZ`x#eh)te9f#-hT$_q*WV+2!;Q^%saGyhdab zFJUJ=eC-9KOODfH`e)u-%D@JpQ+i)k4CeTE4(B3GI#a3&m1sI&s?I$nJlO z8tq*_rS=unUwe|;bIl17X7^|S+!x>V@sp*3Y^lz!IbQs*QryUuy81p^y;z1Y&&=2- z)iqeud}mpqrPG;G1Nl2$RL5x^lgQ^JO-o|t&JgySw$p0P5$?S7f9xjgVapZ!!P>*_ zvWF3{l%X>u#>Q{|YWDz_7B^J09Ft`?+5n&$5uOwn+s`T(arn*zAKBAX6iuIEU7r!5 z(+z2^^7KyV!I7kv`-h|G{y-@jxNq9k!UZb9_he}4A-NjhY3Bs-@d zKMry%s$s|Jmuo!7rwlp%C}r>%y8B#L&PQzZtrfSz}xj0rCh zNZchMlE`if?`CUuwEhWu8nisJR5$ziT>uhuB&#NUG+wm*aEIg=`Ur}yHOOe$aW}6u z%Gf$E*TnjFmC+OO@bwL^$pgek`f}U*h>J(2uvPjOY;Fz5+SFKu6+ja8>9HVdmw7=T z&1he2r!L)c;~WW!D+=+ca265LFF%%FefxGf3nK&-#25I4+WXt`zeVCTF?IYh$&#&% zxEu#v^#PIM4%p$0Hm2-?ug4&!D5YUsk^;T5xuQ46hp7Rc7=D!qaJ^VxE3_fhE1GY3 zxO-LA>#OcdE*vVio+{KZ=ZL)uLv>3&{tro}@ERwgx6@g_T;})AHh1^GeG67*#M$u< zUd-)?enWoze$^IUyn82O+LE%SrsjVKRu_9`m*2zXX&Vn37vnRpb-9Ev2Y!Zv9xF81GpRwnoHUsS~H0Sv4LXI0EFgLL=zC7Tj z>vN+G>J~ljy|G?+y)HKO_}oA+_dWK_(RfQZ9sK5DK}r2$q*F6G%J4!>wgDMX*?{_& zKak@PTw}tA?Go0tA-9x%NKZ3*d^z+GTxaOp(Q6eHFiit+Kc_sm#$2nzN_86vjE#Qs z-Mz<9$p|UEJ=1C->W0;zpVf_Vyh^BGp+nz9r(I%&L zSaet{XAMIK9>}g$sMv)3RCC0K+V5H%aO&zls$0WWxCg$<{O{A?#Pmb`=AY}<*149d z%%ZDQUwAl}LK06qI~gv&e680fJKCu|j7>=WW8QS@MdyK-qlOYKS9L5islZDGd8yV+4}E0My(9lke~ zCT5%0r%1KX4)_@=Vt*-+Bn_P{0%oL1P^=Ng>M-{YU64)K`P$#gwGiC4cUJQJ?92Nw z6g4Bk-_*lk4x%$RtT>V0vXriWnyWp>$IVKiPfH8jJ23ssR}P^#e|ggg7Ia^t%eYZC z8~hM+z7gDqUTC3mdk)DAyp2JidLyb`cMPm-Y;vosH9STJ2epFQTO+iszsE9j^7A*g z-AzrAI$ESbUROm=d4Wd2Mt<9ouMl&-7LMr#fBm@slU`qWP@CiL{1gi`qRDxbgYDuK zrT|q(rpaf+%FxSe45JmmwG(NkZi3vN@}3xOwdu|czczyArPAWkgR)#Qw*kH)2&&LFb*~F8Gior zb6C#l%#yy`Qs9&qka{b97u#*M6nl5xF6gv26x6g%5syM(F3EtL$f2nwq^EMAEHZ{a z$U{hW%wL{esh1Hal)qyE5TVGepA0hLqF-4m8BTb2!uihEP*39~rlxZ0>$N>@FSaVF zbQQ^V2&j2pxVY5&GD6c%?dKbc(dn+)KPM)F$_*PIRWkXUVQfuT_LVm-h7SxVk8|(D z8v2dq8*`zx5%3E`cAk}1G;1K;Uj~&Yq*J{c6dM}bV>E(wFLUF-9ax5~Dhl&oQ~J&Dfo> zjH!R6IF6Vr=|QTnyhLS7fpqyZ{Z*f1e^y`Q=G^#e6*h`^$l@bgV?T{t`BXH8Wl)mW ztzaL$Op1^6cGFMO`@1W$KeSnT+h8kF$#*7uEWUkO0BB(Z9M3>oE59yNOM?kfsM5?K zcm&@>(1f_5W3H83MAIVPv04g|yNS7Ot0U$uf2qybqMTFLxT1?e)pd z9Uq`F(Y2R6`xh$7Jk^w^n(5h^PbiBY?)7C$bqsdL^!+@c>b(9N&*FnwW5mfr=s)5~ zp084jlSS-zmaJxYH&z=wh{e7m9m-4V1LZayu%i$(4x>Dc*Qip%9LZL9eJZll&t$0dHj#9~A4NGWqkyHtmZ#gtUULof79(P;0-34URw>-}$$ zw3Z$}xZSk5pIj<&KH-ge=C z*mPeU3yz9%IqS|j-S664QBO>-G^#h4(E0;j`$Aqj^(hvaQtKH`i3ll+L{gEf>y!By zmsR{QsT?QkeEYYpBN=!Q{(+rQXO}*ju#?K9+Uh(9yEj0ZGT#YEx`kkxb=U>^oLxD! z!O1I1`sG@df}pT6i!#pv()*A|@@h{;?n?xf>tHhP8F0OlULE>Z-@}I-kG!Yxs&x}_ zI2k2BhF+fZKAAQOlp&FzgkH>ubxLbFB^f#J1F$ zwX_o@wGvTY+D&>gN^xvI-yKuh2*}Wya}IhCY(AtNJ-{6!=j_Q&J;+v=phskko*T@) zIea>vZ6J}$*>~{Cr`M9T*KaS&e5Q7_wBEc4vdM;0=PJbLSlnjDXU2sIS}!C{uN&$v z$0Z@nry8CV*@bQG(yJXVPW^Is|3QCM-S_T?fZXa|vjb(1kkG|z0>4|D5&Vco*e=1+ zr)SyY!dEaSiY&v+TvmN1Uz)3#zR3t!#?NY+3mVssp6Iruc-J_VDG##a_zm9gZzC^S@qzITy_}jt6U4$^YaaX?m9CQ)`gQ$`kW_>eKj7!cxQQM|TYG zyDc*fw%51>J|#c?iSL!mXrb{(^xJ(}VRDnS>fT707St)nppj*GZ zO0aKpzvKQy9WEcRGcA{QC5+Zqy_uzU9K~JP{i`d53d^2Kg3Od#*SND#H|`)?+BBC?{7{PC609O>1)!uzG`D@qtM`&zKIq?cLZP)B-$?< ztSuOHUI;`M*?9aG}?+S*FbDj(T_flKcfNGys+@$i*;*+rZ{L7A8dlar|A zOe^KB$u-v64&t7MZ~t&Z!2;~vXcoCgaFa*bA36z;_pW6}II)i~V=WCoguj&f^xJQv ztd(kQr>Z)K^xst$%$xHKeFb%y39yScl}A;&EyS?A5j`L?3Ql^dac5^e+qYsOHe1m0 zlco>xLl((o$lv4DyDmf8gjnsQqy#Zv?fTwo#bYNYCVnw&bWql8TkZ~7*~pF2sac_> zZa$2T^%@s!BrSBI`vjml7wRVIm_WdCP+9jnk6da}d-;=f_6it_coepdyB>*|&GuXq ztwpW$Beo|q{)r`|}CT)1aZFU-FvMBcgkMWg~(g^5jhiUfJ()pTcwoU_QH z&!s*4^F>@H?=Mzxk_--VH!qQ?bJ?Crg@G?G0|l%HDA~0tu9mlEsx7OC;f7PY6`h$X zZ&A4=zksSiM8A82Km0f|NUNDprwI{A?+(KvxL7C?2(UaWKda z=_V4n)7#2$!0x>{QQVN)A}1hzv5}i>Im){7?0hvL_YvW*-Jzr3rsV$xi=Aq$SpVrI>!>D$Ix{w9wfI6q)zBwwCTK30f z{CR{vIgRvQPq5(}ThAaR`N--wZy`}O^vuZ34K&@eES_blB~xxuKb=e6>k!nYwtK@! znLlU$a(U+v{-*DJ^!QP!=)=4={&(dhSBR45w^imtB?M_{|{mySJT z3Y@0izXr|BOcutT!0(NJV)CEIQIn1dh(9Vb#$QUN60Xj15Hm0^NKCo#U}j^8MRtlT zy0a3C@4e79h2@v?JRL~6?7%EfdjV&8ioo+)ZkTt_iuOWkd5kX1lDb+ZNxo$ASdFW? zq^^_}?wR9>U|c38UD0`!_LyrM;zg9oac;~E7w+;HW|!5-Vv-h*+*oF;W;7A+xn=;d z41P%tQq-)tO!kesNPhccsXj;Y!I?D0+V2nT&F*fu4Y5HUUs;T6fB4osm3BJg;e69D`at z;YofXb8p!H0NSx^P1~B#0v?MeWcBWenckIQ7*HNCil)pYZM8*94 ziJ&BmQkOCe$`i6kN<^}<6m-i4wSiQT8vk>%o!hD!C@%aPRCz8peKNPTzyyoW3F4T*tAi=9f&RPjp3RUz9z?6~ z$@H0hOq@A~+^e^10Cr}V!U+t}$0KCS#i4Il$@m{M>O5xc3GkRUcH{Eu+0j9KW~C~_ z$lGTCTN6w5#%ncn`q`)7;>xWR-&kCJ_5Po;#6SD)EB82ap{FxWmw|!saOb@ ze#-OalNH-0#%a!?4a+7O%h(U&Jt7*+NT5UwD4*|3+ebp;x5YDNTm+(v@A-wdx>f`u zX-vKSBlwyX^=G4dgh*(lr7~#D!1W^USE!abl5p6#el)FTm3lL~ucg{hg7HNt-~Qs% zCds1r5cfP+>B*-cDZgc`Eu9{f$j&25G4Zs06P&L1yEj4+;gI|K1*~})AmP#9HQ#-T zl(YE6+6i19)0sY$Bu?x)-y9}Vl`v!_ZP)7MiypW!i>O4+JF`>YkzqP!{n%ahQIiMc z{%SN(l!nmt#k03PLWVbS2Q3^6S!EZ=4-xPyyIx*6k);MnAnYPG&&}UnGSL2pNOB>r z^C>grcusI8slm>!(r#U9y()hAGoqM3x{`jwU}BI-qd$+^w_~cpm`*$(636uZv|vXIqvD8uMu=Bmczt+}2g8w9n`pEb z;kUQp*etJEDwTuo88wf&V%Yh<)FJkn^xy|deUAs62gMA~pq%JoU(Sf^#On59Uw8Z2xp|LS)siyh>vQNEC&Z$Mk zg%X9%n1^s#OJZ~7KZF-%+y$LW^Ro<>JWj@gIU*L0S)Nd1A^=a3dO81ARDaj1?(qDDFFdAU-OaE}?8i5lZ;;+Yg zx(2u|LCalCWCfb}+rz8>@Bq7*lHTU*6-bA7>w0&*mg*G3O5jP86jeChC5 z{tsrb{i-$m4$)FplfA*FF*2iTyt%pDolkz_9FRHfpZJTSQkp|CA6EpQInyK%AtJWV z2id&9=HXyp&qKEdZO}5$mq({y)3vCB^b^NTbJLN+7^}04qRyNbr@NKKD{!lRa`xq# zbPtgFm~PcmkGz$m5jguA*F&7N&x)@R23UT5tuhOry(hDnJCY zGG~eJC8~@e@-hFwv7N$exb`Tpr49;c7fIoSI+&&hcVuM{Doh7MsXuBzq@208tZPtP zAap!QH8xl8ZyO!_y0wH^XlK!u2Gf+<$Og5I%cVP38>}YS$e9R*Xlnm%-l}j~6=b`u zw2*L+?poF9?Yu?eFg zs72O$%=cOTtZb1aAao?#%bpFQq#wh8(9P==SJUbmYUUilgY&F-!)!fX$^ii@8VXE_ zMPj37AHbzD#ODt(ByP@D8mLH@P8A>Yfgx z=7b!RbOHXyxoti#A6$*OdTLY-7wnEiSL)vbeH5y79Q>m6tZmEDxu4(FTMSU>1MMoB za7uM6UD;-qr8l}|JLWy^e6AN1<8xo}? zenOvbHKpcqQ|m#thDO}eyj21#TcGft>>|8faFyE&JfS}#mH}~qy-p+72SHMN)J669Z0!X$N~oh{6-SdJ1OWV zPk-^A9r)gS`jwsgwTY|8Q%&*F0~6il2gxEzMsI^dI~~hf1A0>DnV(!7&xFa4srnmS z#XVq|E*C?Cv(suh>i!bw@36m| zyVLBNE=t#yO5pQiLIyrkX3{oX-#%&P|K*A7Rb#zfN>bn!E`3lgHxB2NmzeC#_wZV8 zkJvL$CFK>_&mpXf<%ol*23*PR7gbx0wDn2~oe$B07UfN=@hXY3I`XE;KVUVx826B0 zIW#3OuTw5|+~0SrIgfD2_U$cKM+_(MAF;TW$I88xa78)J)!AyOwEk+qX-esPsMw1s zoMZjkl{{;?!K9VeHG)isWq$a_{D8I*HFjB0LFRNvo>BtODF-VSE^*|$303OL9@|R zd_}lrH|me{+MbCT=GE7r;WK(?OXhZ2J1ThL1~treYq7t`j*={7)ug-q8p0^8txfeu zilw9xBlzL|VYT|+@$YUfQ{3B57bl2O-%ZgFk4}(8!WJ3%IgP6rP!`xOk!+yQc;<6; zHbXRn`R725dY4B+8A+_-W?#3t3r9a+yEk~vQZpj9E8Yzyk-Dn-4IoFaHe#UWc35op z-U;vF2daw)i#W@kEqOxzz3qCFgB)zcp~p8OpsdJm#X0QgUaX}O&f}!V;ed0Ueun#KU*SRs0;n)P!df{%zPaCO^ z9g$JdleZrlV?ci@AI+{iC5hefA4Odgt=7$ldk7$NZk1+e;&q){tK5csd|$(BA~ag0 zU%*Kc=(r417%dx$UlXuCq_sMnS8EKavd}U?n`627x@x+wBVCT+R z7)MqSJ*zaLEOWNYKU1YWv4s5_%y^r%Cs1+dht_D37ZY#e)yf#}ln-P_XK1EIy7o1b zZe_HaE?C&DbGoma4z`}&w3H5;x_kpUE>GL9t0(x7A`hrY5N$XXL>~!>4!W)|Zb=+! zBe&%5k3(3shJ>j=>r5Q0%*-Y}buH3Eh^R!a0TKH7d(*Upn8v;=e*&-F^v@W5VT}zu zqioZ{o>TbDVU`QcF3IcB*7y7wOc?2MIE&go;X$|~drtlkBac@PRy&nLBQ``1rmCF! zNjNeBNaG)ewxmNN!?1w!s8lVqQD`rbYP#?$Z)y7`L>yIbQv2g!h2+?ntHa9X32L;YZlY(MLeg$<4m~W7gb~g?L9E18d;82 z>$fW-UQyja2PXaQP0_I&>%)_#xwA}O{QQzyL8K_-?Vg&rnTAJN9E{ZdLJqD_-=k55 zv=LWEjQBgFug_(GM*uUwqf{Yg!Ls^63EN8U`D}L&(;Zi=`a!^W*M>uS2)jRS6$CD}kxOv&gG8k?28Tmm%IBR@Ug3>#aS# z(cjX%rhPgo-zPJ>8yE!OX6E4GR^xNB)@l1~bkU^kJ)t8sQ*?!v)ys^rbTx9SY> zKkBo^zJH8ylW4m@q{484*2~+o-RpY#?pQvG&!_u8??U~M#i+qw|rAPYX?2C*7ZeWr5xAn-f zD1EWqrx*5b?G;xQg?i_~aE3p+X(E@_hs&+$ia^bWyPLsG1*_YYwnd$#+VG_^zU}~} zVpfwegv`fRrizpLgApRNoK)%@|;yCj4ds z>%I!qF?$ebT!SJdnNHs8eu}t^d})f?WpV=xOP@pD-njL~FX?=r$9EUAYoQPXvu{R8 zCVY>piY%>czaNN0B)z-XW`8NxHZPF4PQ9cirK%uf=QOb3ukduJyFU{*M?hHkU7L-q zxHNf$v2D9wsLCDSj?za~HaCkt`t_UKKlClxny!gQ{_?DxGt&3=`V8Qvyb3mTFlOjr zSBO5cM(=^I*v{sfnEo@4pgh7zv98G=xk2S zdNvkjM%kld{bgcqatPR@M;JS`-Bz&S30jA*j5pfYx>$tEzK|>pXX-kZ@T7D51P9Py zvM5K2>Wu9kY_)&0G#hD9V(_xOC0<}y;DmIhta^-+?+R28Y&8PRH65Ql{Gx~DunkDR zGMt8=hr5MP_#o{b&#kZKBaA-Smh?HLQ$M;@!^AW-35xA8=Sb{<8e}p^ke8KJ%e#Ag zs3NQC(o&P4>I@~1nLO4{DhdpIb)r?MO4I_@7l@&8?Z3Mde?umnGr-in!(#;D*O&g3 zKPXWKw(YW`JAu*JSvNy%#ft9KXA-n)*ow6YOh@?c?0cQmzyD_AOuPiNXQ?M^nR)HR zlU)N`vum#2?sac-96J-W!?CJdTV@A|8QXypij?4nJ;ez1J?f3nh8iHa$;~!#x%Fis z&M4le-Iu;2Nt1=AMCi&l)&=CPUshAXg1mOyrXi_6L4R%vp5E`nKPs!4l^5wqWdwiw zGqgEg!o$~hw_xLHQVR_?-Y{r{h`+l_PXwQFnapDf21aScG{K?DMq`Q!m>PHKHPHT< z(_GxygN@XiNbj9AY+9lN9!*SEOg^|-uIQMv z-n$0PpEBK$A}i-qACY}Wx=1S`@CH4ZqG2g@n;iLr(4^)Q@CPHS-SFLisMW~VES&i@kdwO^FzGalEy!fTS~L5=={@1!}2O;Ibb(W!t4Ve-N}ru zO0XsVk@lS*^O2o#xX=a+Q4Vt6POM?iEFfgJB$ydH2joQk^f={=n#oGWfV3`#$WXj{ z#S$hAp0_*n^h4Zc2lNDoEH1h!2EM1G=+Y#UGEl~}nHlPZ#eL)97jif!hX z4yo0$9CaAW@dM_dXDBu}+Ewlu>V~ktFPZOjHG!AAs=j(rsKR77V3$B2?`POCXV0YF zTyW1MZuGP#&9>njm5mFNe=CoZIX^U%pgqyX<9JdRXsT=1Uo zq2NxzP3wGnChW2gvjuT?Z{0Vxm!-+=JwN&RtgW=MbSUe=Rr5U6@f`{PFcsSO?J879tY6JQ&z?^4ngbA zQ7LE>b~xFsF(;wX<8J zzC2Zf<)j_kvk7+IOa8dzLax%9l%os7zWSfco0y0nJh9WI-Gd(El+D$%vQ?OX*-6AI zOx6Q-g{vA2_4i+|Y`PqyBWgZ6zJ~8B8+(bhpP>lYO$>tCQhOy0_}w2@wCChtc88EC zSX*1a8cE=LqpQ0D^a%Ae*w25cJX`!4VyOtZDyOQp((fi7Dl?Hu@dnf=y)QuIgsLqP z0~w3Yw7tGx7574+{GIYpr|RKB6&|=lki?a_Nj2pOMn?F&$7&ey$L*RL9JAx(BB8W*^+OH$zD%|)_4BE;(Y1L9!zW-eiWtd@ zcLHmW#a54o*~KuYRf6t|<1PDnm5uM`5~oUGsXrQKVmbfU3$O=oM>G+uxY|$S{dpcj z9>EssL@tA-CR>YpD|&h`^M(8@j*oJNd8KWuqQ%0K_z57!@{|Q>`I=m~)^Iukp3{sL zOZ)ze$!oj1iW-b!cnF}jV>8_kv^;`|@QYpZ5Xvh(g;RRAqRu#U(B}ak02F_EJ2&P0 zb;jlVJDR*_TI$Gg6OXIf7AxU5_iq6)Si0=w#Ws*ahJ_IL#4t)wHhOV2vHEt%_6)YL zk-fPUOz_-^U(S%l4TDPo#nx(g7n5FE4vw%Lm*Qo-xC)IYn`% zJTNyn#1V0A8H{%Y^oa`LWRrrI^g_g_PCb6VnFpS7KA9PN}v^NrvZr3X_Y~(AemZebIbVES;*uCjTttLyf2fD%;fUw8( z&}EyUP(v;ncvMkUn~#6H&}_uIt$#m;6#O8phi$Mi^o@uG00w%~y#YHts zr-zjt``IzxG-f`2c`xFZhK0Lwiq{jD<&ZP;zEAfO8^#;7A}$)HaPoHuM|OpurJRWK zM@r)7&bd{$>YR5yNFCZK+rEIlz5H>a|EEnu7LdWz4Gh8;THJC0A$;Jn!yl`9MDp?r z^il#nZvjODcryA`2KQGj9tSITJ4Hp?i2*x~RNVIzz!yC6gM)*Wzx$0JDE>d5zA~Vy zrfZw-?rsF7K|va%TRZS;Y|LYp@&z}+g=z!_~5!CK+v_BhUU`l=xr<$W>y7RkRT1%|<4lGNL{8SW-SAaru z+MCJ6!!vglp?G^q@7?B-#8A1Y6%l%z#A1goNCCT;gxK5KBEF0W_s_^d&G(4D?&g}X zA&wu;C%~;^$`dO+uJr+yI&h6pm%o*9F4B`B5=JZcq?;oNmZNG}Yo3RPXA?A?AJraU zH>({}+^Th+oapj&#iM)rpXsbr0gr0a>@ZP&Q6l7cFA*E$a0bKPB>6@q;fI>-GMY1b zr)p*a@B6>~Ll1JUbg=&&yg&(z1=_bpd64j|d9^G$H7sQl)JC*2VC zf97|KP6uC|Rb!Vb#WrPGdu0#4kyPV?C;cf-DF=w9Rr6GXrvg7Aa5Dcq4^3P=r-miI zZuiBtPS};%b_}oOiOA-Wy)Yp#!^}*rk1U;|ER`%f+mK~E_KJ5uO8#VvjD7i6|Ibj; ze2d4=11<@%oN$KcR$RY9sS5{;*YNZ~j=sLW0!1|2pf6ECuKHmpqRi_%WfaWNXGN%9 zX@@;21nX|LxzB5NoP(s;_;`BiH9%u8pt8G_Zkq(09Y1IBqsnu9% zZ@mwJk6unqW?!IPwR;YqFtd28H-SOBk_BPn9|8^CZyG+moGcEiit$^#3O^8Gf7wBJ z{lhk>#XhfZXV|H6{!~n57Y*-3?V0S)m?>3n7Rv8Qlj)+7d8?|XMgW*lbQ0q<8FfSa zLgU|H_P|j+32~=hrL8VDdUkZN9&i!dx&2VWAZE&=-iIZv*1eU+Pg+{1JHk&3bvol+D)M{FY^+9; zxL{ac_}<~Qq)wFFo&SHv!x!`Wj{GK6d-7gI-G_D8-q+qsuf&*|fEVMH^4fRnIpp8# z+~*R|wexl%tEwW+#aQ!v4aKz8*1=WNb}3~CKh4Hn=~e^YGqo*n=X0yQlaOr_fK01i z_X|G;WC^yOr^;wItJ3|>?&**0C4b~Q{+%4K_bWdXC+Ij4G}TCml-38Cn-)EhTb+oM zJqkQ4#=P<#-<>!)m{!UaF{PnF4W6_-PnWzmZ}alnk6dKyTwYdgf0$ls+(Wb{QKuAH z9s){C<*y#wX^gC_(Yvb>8G(T?QmiL9LZ<_?F^qq%*6ZJ~=``U%AP|En>HQN-Qtpu% zj(BaQ>Rq|aC*#7B5@fr!o1zV6!w|ZN!%y?n+v>y$0H^z&L3%nm#y#h)b@We82pe5x zX~=Lq^+N8E;gec$)B4?qJZ^o5IgKR<>ji=XqzUvU8Q{w*W5R;RjfzjCs}c~zcTwFU zS+yt(BwMnYZp)l}mM!Gd)$*xIqNnwvN9);{V=w$uRX;`d zFDc6M1%_I^ZqcDUa?mBo3i{OxoOZxo1DR72^WEUkxbTzpr`#NxN~Dx4@=%z_S6Fd4 zR?3yzK}Sr1PkLXpFaGzfk$ySU%mizM->E;vreY9xH>q$kvXyg^IB9mE)55cZfI)5&3OE*CN?BYy6O^U8}&!`KD!BkB9ckXyQ*mTnx3>R_>J_a2iM_H zQyTU8BxrPg^fSvK{c)GsWiTMbY0tLSc3m(T>I%niOw(*InV=JwgX!n2SdC6ke&MYn zHurx41w_AgLet;Wsc8OXW6z>8wfU4E;SyT3rM>j+J;;Hma9j%=nyjdsMvR&G$~2uv z`Icg$7PW3JdZ&OjC<(`1EsWnc=YpIp%=3Z!VO%nM6f5`TUx*4TD{<_#%iyvfkhbZM zedUelpyV=@XSc}0|5RsW}r+OhU=&nljm zF=ih*3^ZiSdgV-<#N5MCu`Lk1&B!$_mR(QH_-8kACMnW;JU6IuuG^D#hp_+WE$eY2 zLs-Yp7uD1}o|0VlYqW`)MPF4W@UqcvZE`8KX!S}k_OX-`^ezHkZ(aDu2jD6DvSEE5 zY#IMUeq(tBq%DMdf9vw?@P~eu4+bOI$RxV`4k6qC5;|A9!{18Zcj^Q(8?NMN zTFp^-kx65}Liybc>rGVZ8OjcX1Z{D~!s9c602~MFKOH~DfA^_SP728r-gk5oHw6i= zHA>>TDFM{t`P)`?e)yJAf+h1aTOfi8CV9@LA0*#QHQeTWTm@uCHEy^N!$$n zR@f9#v;Xf&2Jh|~sa34XFDuEX5Z_zgkwn!l1J%8Zb}KjQn%Q!tjJA(7vH+?bbZzg` zm*Z}{Dx*Fy@Zvcjl0E~@Ge?;;gTE?T&W6I`d+2GO1<^?(SBh~XcU6~`SU`{oM4+IK z5Q338D|^P{d|UIy3%oding%WeLjF=_te8pHZNZGeTW-^1ZNi#mE~|h2kAr`%E)VV% z?xcyNnVsIZVMQ>VR1!QUN5VjhlD*o`)qVTJzB4q=t9>8Uum0Zf%gMx)3LhX{NwU4q z!pl+H((QFlXPoyY;Q`L%LF86|j?FtLX<<>q#MH7yjrErt(>V7}Kc(LX&I2!EnMcO7 zr}AALjfh`I<>&J5I@5^PL)jl<{^ubNhd?EN&-7erfjF2yx)gH`UT-v(TI_W8lU-VJa{?I@ zPDX_!Tqe@^!cQ|JKsgTu_Kc))Rhf>a$JXfE>VJ#z(*uSMsG!F zuEmgSc98GR+sTQm?dkC@2Y$JvPzZe8pz6 znary59myU0{_J~6iL&M2(m0L3zz*enwD?g*h#^n|XF|rK3GiKgbh?7tbY#C9Hlw=% z-(s^AM0dmwD#xafWTRAkk}eK{uHztmD!-fUa=(5*Sk|Rq9Cjyii&&%*KNw0YGg#X! zha`)*dMssaCcv+aGLra|U)%0T$}=7qn0K1>0X*&d#Qc1*H%~a;4gYpw`vYC$Zzq}f z#K#S0mX}|ryLoV~?!FC^XEhrINq#5|Y0x_Cudc}~0sdYG>WO;HG-6^RomqUHvq+Xo zCBu|n6pEusSGBO=E03Mb&}V%eC~Pe>TlBtK0!SAQ(==LJDLE@_9|lRODnn2SHvku` zQvK`dc;!=xlwTJz10%a+A&Cv6-1spaf7>ww$5pdjFMZ z0lJi-drx>|hPaTIe_=Rq>wyFC`r}R;Xp+7c;6W_6r)|5gCy%LTy{@11RDkbAB>=2m zGQZoYY??Nl8%2ZKYq>DeYwdVC$@q-cT#B~12Bi!%^R>u^Ib?^Z>yN#|8J`wsP+@Le zkN0N~uR@qHM1G1q$7taW-_7f;0fZ&UWH{Mk4F8ZNV%GDGAv{58YZ&(4+)$zJW#~_p zR~u56m&0;zFn$|-Cwk}o0GJgiK$P3dD7CPii1xx z%kn@V<+iq;Q~uI$KF04btt^$sx;&uDU)Jz*^qllnM1B(VT4q_jP*Ru|ZK+_$|25{+ z;>)DH%ils8?5u4%1JhrDQ*t)e^U8Ga@00oc2km($6*h}d4Bu=Xf@0zE>yi-l|8$Je> zIMQ7-EVT`s{sIa1O0sV&KGlyow@a^rW{yBmBe5=$v0_!nxHUnQrFbo3g=Mt!DPb^G zKK_3hGd)RID;0!ShJ$N77(Z6Hg}xm_$P~=kl%@QTtj(wi6NAqeL6r!3JcDm z`YgIh<81A8FTLJ!I&6>Dv=?-^Q#P2$%6kQ7o%g3@s@}>SCJ_TKPG7NdzJOVhAZTrq z+}+p`*%MDuo4ujD@RNU!9D5zq@eKr0zObzZK|#XilBG1KQStj6RTTeb8hyQ1hBn=0 zOj!ML64l_FHy?8gkDR;s#i#EM#alHk1TrRFKNFu{jW}hnb!KssJE3yqHB73#zCX`? z5*HH_>w)YiAY)5Z7dvdlNxhoOmS%0eP`Upb*_5{>{x8VKp40 z7mj~Lds=m7@9)d3$#giN=odV)UfLk?d>f2C4HFc8)Tm#mp`&>An}H|xOL=$hMD$|A z*SKk9{ysA!(MWU3Bb;~=+c+N%7f{{x2jn(J)E@A3#X*9Pejcn58HFIwMDkmV{CGmy=AKt)t9_2f}t!_W*Bw2)2CZ>a0&L}l1E z$qd#B0oEiqY7@+VeW$;>Ie`C!elRPsEy2-u&|l+p&GUfeD{w#nfYWnXig~;uSfk}~ z(W(KAT=GjUqih(n%~x6k-raRCqIOOe^RO>A!n@-6{xv!yIL(hCER_nG29uBU*#%y^b+Rmhwkbq713#QOD)t_LW27;0QU{ahc zAML(u>n`76Zo9B+5QFqbj*epTk=VX>bDIr$f$FU@=GwcDGA-s`2t`hF*QM)Iv37u#f1_w|6{(VRHcHewuG!&-LL)TF4NO)>Vq$3!!w zp-ggu?>u#N^+v7#Mf~)ji;`Dz2cgS^zO=rqHI!I!EiazaSQt(CBboxGWaOa^$*=1P zjqp4&BhuKfWw|=sovT38IV;b9(L?VC*`GOn(PI?$!|wqymxOlidU3qMpBe?);wM4S zvYL7ZYuo}A?dL2(uSpn^7o2we&heGDvI+&C~i>?EKCv>Kh4-^T$V z<$kr|#uBra%(l_?x~7r{(4-2fd*QSW75T?0mmF_KE9{?Q*6qk}q>EU?Yz?o}aTFU`s4(uyY8 zl6O=TFqX@!Qs0hDpskJ;YlpgmiQzSQ2Zo@&UNgBXOe1P6X^`Hh9sl1(htAt+#|XO< z0_Wkubg~z}GUoO0W{J=NvxQ``9}mD{Q(zYdhKD5mGH~1tqXY}2eK$ryi_wV0-=y$>l6EnVgRXx&>B@d`0gy#bkc#v|LHQo-=Q_Y?LDE|+A z5^NC=_*fG<_&p2w@e#|0M)%_2138TP7+0aga*Hvk#v8t@L_Ei?SliSJRhEi2Sx`Px zH&qrfZpgU%|Dt+B8T)Si8|vBcvS0@0HX$2lmYabmYSX2{-Qa)V?KAcqc05B*2rdC` zPgGw}S_|4lv;7_$ar$kx9a#u0ORU(E&9Lhx6?NmqmEy~fK+MS{nF^(zY#ALF%?5qd zvPp_g5woQ@EcR`Rwj;7_`-AKK+S3E&b34}Gs_O%pxjxO0(bnqh#{ISt&$fzU3R6Gx zL|Hl0)phjIsx@69%Edg%(fu)z(KAx;zy9kN?Dt?#!AS$xy4T0=#H=D;L1$I6(6(u% z-o4{mJ|KjyL59=}p5^FbO@Ufcxzq&f=B2)|O5RKRv-oVW ze3@*CeR+y17~c7e71J89p^>~sDl8n#TAAH zUo?3n$KBD+AslPMK?p@wuDe|-gqq_DHu4LhT}v$dnuVRpcc)KIK?a%6sUDzkqV0w| z<6e=v&efO<%psqcr_^!bL4ec|cr-RLANv}Q}hOvfdI_DR;`o?0g5 znAC{dj${@ENGA(($iI|yYb34X!nX}BnYDNZR;sOQ@WNE(ay#d4-41nH9z$ z((Lzse5#pJQWt*&5i@TJPsF`RBgPmT)(>Kmmr-teDFsy!#>b{4c;r)%&&xu5t_3aX zt*tC_a{l4xir9jBG#hO+Iz`q_VjVMYsP@aXPcwUGGMd(#!%FM(?D(BZ&HbOoS6w=q z6?E4Q{z~fVUanFH!&Uo3A;*4hr9RX#B`!?lM1e=SUzn)=KP`aMc!*PrZKVUIA<#RN`FR5F zUE>Ai8^|6F7SaT83hy?S3AV)m)CgI}z7w7o@&W3XZkshR*Roj5J<#oOA3p#mYAEmV zFc&{f=N#-9A|AZyuBMjaQKT%3HA6kly>aCsQus)#|9>Br3;$2PSMPYu@L8f{TLpXL zJRwd85BoFcXFL}v^5lbG&wfpjefGS0eJZuW^Wqy6ZtIjHKTCOS?gPc*vCHm6Ko&r? zwE$WT0Wy{nXc2dyeKC2Zqy&Ge?EWW7-5eOY>XIpheFe~oIfJ?FR^@@2#MDo9@b)0? zaCTNrE%FH<3sWrh%kO|v#Y`6g=&y~IWbxI0-kZ-LCZP;LJ6{p6W!n#Ir5+yUIAL7h zo|9JQau!tfV8iK&{NJYoY9%Nnv9S{4&0bw6Euz*EufjgBXJV?lJXQXIxGTS2fVE)Q zG0131IKzIIjuFI*<@xr!TO0!pHyH1`zizANuiv4Riom4FaRk_cI?vvW-=KLnVS!12 zUJ8nQ1phXC`Aw(Um3i0y_QK_0_7}j2#W4j=p;X;>i3uzXLd@I2KDhu|Fm_=$MpQnR z{hqPfBuIa!X>xl>NjQ8>&LMl(D!JFMX>zL)8L_j>vQi~Gg~7S^gti8Lkm}1k{H*_8 zhd%}ilat=cqIGOJ4+EqiVMe_@x4hI1MKn8m5n(i(ruyx3ia$@z!-U&E;~9}f>+9nhGMNUUKf3O_jWoej_?(I3`*pUR-zK|~4tko+c1 z(@nSKb4Mt4qfL15yj{Oe!Rq6!RW}P?B?{7iAEG=X(Ks(68+t37Z}bqw`nsP%x`l)M zco8E$Gyn)6u^a)4xSqs0XiC@%4<-DcrlWb_WG!qF7!cgQi~2FA>%}3)-@Hx%_b5VT zqhg0xS8sAkAuSl)Ij@|pF}u2vJn7Ekpx`R)Q(ay!Ug#gsr^;3hf%)(Af|1#A4G)># z#zuG|L@xAf3{ALiFVc&&Zj+X$pofm_rhyFqHe-?SH@P zz8W4H+Km;%knuRdc)Jd@zgnJAo0hq&o}CA$hk3MA&y)0r@CNp_y0ctJU%`I)bQoG;Pg$d69kr1JmU%Gag;?w$dXSY~#>AIuW~n zt(29yxjyUJb)*a?d##ub_X~zA!u}ZR3mJw?aZrtn1dmiX+&4u?+W?tV)T#%d0h4~q z`gUI(9rALm+M!b)cjQ9=zrH*nG`>+oa;xKnxPG~xh?ppohl#(}3gUKbw6;k()Y)_H z-7Z$=eK;UKK(6-=@P=<&W90c%We9Io86HiEl$a-nAe&?_2>L-LF0|fhAgpnv-0M|S6sdX%C132r=ug54aLppWuz)gNvlMpxF* zI{}=vkb^mE$n_MgttIR3>9%P=GMjZi>sI1IXdhXEy2w(qT`-7aU@&$Htz;6+g7W_61^>EMA|;s3?6-zg{0#VLo}9|6qqB61``;jz{`>Y8(zWVj;e~`t36Bze=O^Sm&3J%&4k-6Zz*Ghn_0BC_i7 zVPq<0=%iI3`yUr#_~OQi4o4;ON;N=rO#x0Egi>wo#?e40TFw>7!*){Kw>dnBW&AeQ zi68N={*WpymK%<2Is^kB>KN;F6=lmAtd`7?JbN^2-AYmzu%+yx9qh!rF7=r-`bf)P zB=Ad`Nx5jAC?|G~NC6JEtZ^B3VNWkP94QY;td0PJgH&G;SfR{bQReh$e|B@;1braEshx2vn3Sg9 z^M6vebF<)I@Pz!7{xEPB#qO{nKKsa_lXC$dAXEPvn5uH_j+@yI`cop$BM#32FfJA& za296t$g#{8;#JfkegVkABHHM;U2z133sA>yNErDpd!%+Pad9j(IcMPQSBD#c;*}2` zb5@*e?4a@7;yZ+RT);2Cp*6ZbGKK+aP{9o8%i+$eYp?)~M44JEcodC;*kr#Z(M1! zGs1cuzGswL!m@?HmFzH{0r!DMhd&Z^vr%&I2m+P`TzE)}JUUagw77EI%Z}HkBSb46 zB97GLJyn5Q`4kkeHGpk0J=_&}F%@;Xk(a(Ng0qh(&5&z90sG}P9^0Gw4lsBrE5&ij zP=oy(#4mBGEs|5lTEgC0wOeW7-N9~M-6&X{tk8AN37vEJ#kz|LLI-%mY+W`834&A@ zV&PYCnCzlQ!6FwW-?H{)dl3eMvF|)V+C<^uLOe#SN1u2#%m$m^4Jua=2<4FWp=Z{t z4hD11$0k5Nk<%Kh`Qr#!V`IH)e1P{w11P>kd=Zf1} zCOkB!%pva>-vKkpTYLSXVKEc`UC&qRZ`>sP)1i%5kf<&O+61nao#bi4PLKm>JbQ%5 zL4Y(kB{B&`6O1Vk?p%)LdJ)dIzSok!H1!bILT+RGT~&_o00WZ+9au9kAZn9@Zdi#h zZ2+J`A&_X5tV6hh_f|}E5g5F?=xV8^8>ud}^eIIU?Azg~MW}=`jAv~`5Hp0IZpL3* z&$9#R=ws59Q^1|D5b1q%MbrK()ffusE2#FDH5z?yxu<$`Q5nC-cJ?@g2WOX6U%@+W38-UumR4D>uL z%CI%u4*}#*AG{YHCfwkAC0_t&R&(U~-}$c%OHumtRPXH7Xt=^W_e~TSNEp1oS?Jeg zYii9lj~o&aOOX=iI293WCbyBGV=A;7kFdqm6c4IsUO8s`13=PSHw z_M6#bIZ;`kje*Xf6!*7&799XX*`RUeXM4@(-$!$XPRmFeoQJ(Q`GK_ER?bWRbvsx2 zQO&|_kBQIcEW~%|Z0jcL6cCVU@DtjEPtrqi)9>kyl4}LXNkkRvV$NY2nMV%wi~hiS zpO5mCxe~gxu=|dftvB5P@OA=lIT}T)qaAM#0WDxEgY15Y)PVS2f#x?1Br+q=4FEqp zsI!HZS&ZwpczBIhQa%ni-aPT1(JFqibvy_4iJMTl+zDo8sKwdA9MWyS6IB%r2u=9? zjf6Shf!(8{Wd@r@MoS5UF>o9w)6wezv=@hlK1(+B#A9|0K^4u`2z1x3_;5r$sWZ!) zSK7LCwXx1GKG~W1&UhgdtV#z~j?SjspcZM#QpG@<%+09ljeb#DL6R|s0?!TUh3o#B^sBI@e`y2nFEHuAv_=BbR&|jsGq`X z#QE_&}q4oJH~a~E!~bKsH)IAq3TOpgyTZrFH0?gU!zo1v#SJJ#C@ca`?5$kU89h}9cAJ<@g0-U)>w~(lyo6|T?^o* zJCl@;r-)LXN3=+B6VZ~PZ;gZ}qNNBa%x%ov{BpiX&kn9%5m>>F%GcLTjg|UJd7*fx z`#t3C>vnW^_#lfa7Du+g7+VNG-8)G}aKgvxqg^ZeWw411vlRDZk^ODT7wSUE^Wp!^djhMXvTJ@9TSm^F>Gf)sm8uu}f`ZCe9xzalS zdu_ND&9-f_@GnuE00{lYQ3a!!D%9E{VuP&Q$RE&+d$!G%Res z2}}-7_dhY^+QfH&87?Ehky_iY_48&le{@`)Jes|-5oX~>A#5)UB6`ovd4SaqkH}GF zL%H;VQGTGhpOFi107KvL8jxKc`_{BMAqSLw{kvUM>o#~FMtzoY+&uZ!YF?{Mm_7al zDX;00sQ|r~1&59^!lfc5=!lHt?0BGXyj#&g+3Yf8yo;3ki zhwmw1&n~ShTW1pxk;0$pPO0B>v4|Z=oL3S{nt>?=74{Fddd7z+(V|Gz1Cq8kR7kOe zd5AuB#%J?Iq{gWYn%%s*mB!GHFQp-w;zgVU?+Hba1N7lM*D|QfXI;5dT&im`S4^6n z_u`!!(W7R<4~8^!TG>9%IEx8|7bY=Ogp_OahV)9 z1>k1Ziw@HUpr9`XfwRXeopc?k@T*ui(ZUaVN*4eI{PWa*yb8D&}bTu>+KD#a$;{o-n+KC#pK!%=s)04Jf8Wk)#_A?$QLn^q0uH#2PG zV!x-y=_(_sm8({6EZ)6>gk6A;{9>cG&#TSM^x)bGv~yVidB_YlPYo3>C6ABdJ!Q`- zlngF!YaxGeyt7}HTq)C(?3~x*R(nIo(99iaWOo-5>+0bZ-R9lekQLEOuBtq<2{-I(P()SMRsm z22Q>yO4y*SNgZ+As**rg9JP8v3|SWRkx~?LcAlZq5wf>V@Wzc-^G28}hl0yS&&a6h z=M=Qq!+;O@l_x+cJYcy_Z8zMjkN(E#d8Xh>8aB1!=RZ(%_{Pi@nG>a%CgVgW4#`#9 zp}QsqAPm*rcM%bOzql43>On%R0eJf-&(luCsa4KueMQ(gk>&0jR)8ZD9N1xG`-Ajk z&K*x@moz??SFJbO85bZCGNZOkZq}rSH3#Fua+ZQV3Tc**rlB$zH-IVNzW40Uu%HX6Ci$3xj7WEl-~uVgy(^iXIeD+{bSzYfOk_`7F&Xd(_1w z`l^3w0@q?jrzcjPo0&e6oJ^4HA+@%^f6^2w^X7M5jA)9^X1o-IM=-qNnDCRgoN2gj z(t4?>+V-0S{|$i(o3a8d{_HWiQ{0p&lY`B*sf6`AH2P~ise{5JHxxXEWc`A@N-C;# zdMd6j$@1o0>2d-f8uO(cxzBlK7eM{w&UePm<{myszk%rj(eF&Ed7^Qj&t6MUarpN? zj9|Q{2&t>qg-gT(A~&1aOK-p%nz3O0dTB6OAaenh>jaR=EnI-iJuZoLFkdMGYdSqZ zLFugSfc;XhaG6({>b%1aZLS?KQ`?8uwAZxqM1tX15ly(|h39xHkw+^Xj>s2{dLNpj zE)OS$?b)!>5Jh{VNa8e%6XS^6K(?MHT(o1TA+J?VUXk@l=0lPEyNJQw9=>M4MFG#yyGW(LswgBcP3Jl_r#EBGu)Jx_|bp}|y z0?|c2E*jDvFoLo+n~x z$u+n9Y7S!@j_p&D_knJp*EV*pjPsFNARD4lfj;(<4%2_v1%2gcJjb<=Pth(4hy6B9 zyzUyTCpilpMgwS31wyI>AZ=|-2;Ji&0}2c4Lyz6u(CWVh#HXzt0;!+TN&5tFvGlte z2)0OHeUXPoeh2r6<&A%eMuPHvyG<(Gjq{2f;m&YL_Pe=_JRcj)X+xUGSrqDPyR_}( zS$;6%tT{EYHE2=~8^OH|G!!`p?zc%$AIEH6%>@xvX=v92&D1K#0~jwsX7_+}J;i!P z>|s|HT5)FZJ7{RU2Exrg(3-LRyp)q7cS$tpds*rN!H|C7Sm!9kl_-C(v9sydn=UQ=)>ZyBb>CD90H#tw#_$0-D7>X0T4C61U^w0q}t% z15I%*@YquER38m1;e}9*r{i)nxot@9R*YfZBX3#pAo9S7zK;F#QnhHp;#+3&*)?XG z^Suzb&;gaed&Kw#=K~!~7EEbl<*UT;GNqDwV`EBNU0GbePw%iZ-Yi&dRc~j)I?7Im z@8HDx1pfF`qFfJS+#AR7?ufs7Q_Fg>CcL&%tPZi-kJdl2l-@xR3X)(R6vhlc@)hvW zW0rRYIk1f>Ob7c3#+M$F-PT!)vjmH?WC#AS4^5t%);=%+F|?4jV2CTvULrNsx`aM^ zgf)Bh0umn|eM0XE(E?j6UJdgo-1b95<&Vt5&TMJEpLJD>pbo^5PQfK9g2w;R!53-AL4|$;_>P z@V~7FmYXmP-Yu*bV)BGkBaHn17L=AEzao$p7zYZlltZw?H7^lVJf}zNEfO?o5T_)j zp}GqF&b9`b=lvZqrmWxdrETB=2>vElGJtsv?bD}yb1SD-rcX;R;dE<#vP~%)gxUvo z@}tw!y<8CV9@IrTQ2h&V6^|IHWecZ5@jHw$w%vdmQ%Z6+1_z2?#HS3Goilh80n5MB ztPUXYl9vv)xp)^O_LZ*t=j$R^i&gUnO+#h-2siUd_z6a7=5n)`hLXy88?tEV}SDPtHME0#1xu_AD#!R!8 z9&v<6`#{wQQd6!pTVEh3>|wm-v7YZJxCfGN0~fLXZH7ZM zP7@A04)ZPG))#s_=f{BHM7sha;bDm2*VhLux8}nt-%)Glb=6j~1?NO;_|e2CV1-je zJ|nV1h{~vjkn25dR;JcFgVIJY$}M6!xLr^_#(kJJ*&n>%%^~t9{sm;yvVxu$0}GlR ze{<%9z9dlXyo;ij_N@anlMas%C7ja8sneeq-{)?k%n$Yl3o3N2Qq7n=@GJ8d@FIBIrjD&igr-z zZt4R7xcL@8LPOJ);jY3p!o(vHwcGpAGx(nn@7KeBh3!*6)gCHR6|RF1#0>eK;9no^ zZ`Lv3bL_ID*`sKZDbTow3H`dy}$aV%Iyx1 zH02ADA=^4Wm45`9XOVHSX@?-@w&||-1|7IH#K?;Bt4KxAZ{2?IrG%wopt2EV@f^{o ze~X3e(%Dz!S-1-z_xQ6a?0$`n^x}?}Hvhe6e2SPwYs+|!|05X1`)mniPaFMIe>=-a ztJky1d{xSMS;=mKznvwo&;G1TVE_Ay8ZBbD&|(WP9|d;qJP-O`Qw-z-eY!Wn%*<5n zF+I`!mqOE48t*7xE0w0x9kK6??F;kbr9d9&uYSMD{b!|{c*4-8XEH`(#$h5*X+P9+ zPewG6d(Tzts#mggUePD5_gBiL*Q-0UEUU12u~Ds?o~kJ*Vz3^;AgD14JL2qC+2Jd$ zxc%PaG#mSR__ao$I|84H%%;~0Me;u_04{>6?lSHQjP!+F^AUIdC4vqENJ`jF-#mY{ zt{T9q+}L84Zw1SFt!N08gjtW;C`g|ic2=~crCLqK2eMIo}uaWg1?(NKNG$(qJo=>buxEu{O*`8c!@SPc4vh#q(@6ONFC+ zqmD%44h?u>mDY_2CJo|{RgWQYoz`FX?ih}CFOXLzwUuVdM~nV9WGK$g9LSxz%vwqB z8>QU~N!Y0Jxxv8LLg8^!&Dg&Gk}F9z$N5cvyD(3piA8Y+Di;rKO=uzlG%M?vUEADL zM3`x9s!*MT(&R)=v{WeyFWa!OjO|rg>0pGjI+OKtO^y-kk2V5ZrGcA&=ChW$!;!0H zbn4Zr#Kl<2qVTtXVm?+Obv>Wj(!AfMPz&$Y+#A3(CdmCiY*+XetBep)KlpEL3Km0S zb@>0q*uQKI0~F@Zj1P1)X}pUx>8=Vl^4tAXyqYKLwdfziMWI*vg&x~Yg~&@<$G@d9 zJUV=xUj25SeygXqaaMqWQoM7bKFyljr;FtSUAYcAQhiM)9w=zhM8tKWO*pj*Z3_<; zqAmUqaQzO)%eRY_U2rpz(U3+cd8=I-I}PdM8EHW)>3{k$!Fd(5=$c29ojtR=4NKBh&YW?=Lv&5+x%)y@kcBEuj0My4 z2k;d(0&a%C(F#!kDn4cq30&{tvJ?FYN*-F74I%VHbHU`^jj~;K8T#tbEqN8BWUkz| zU~oDi;{DxgHWEnb+!*;1M4VRWh#Obh#~NJw4P|*OXZYr-OJ1qNN}3>SnESaCEYjT+ z9dg!}O3n^H5Qh0}eie$r!bt6uS7_B{craToZ?Xf&3IMLJB)jz8AOdbdsH=lTM?PC9 z)8nLpUS#J@$MB$nNO+>^)~bw=Y`~Ym!83y=E%paCkwZ7zgRk1GbMx~WHhk-L*4D<5 zE!#ejm!Y{b4}_#V2J>-7`ru-=!@vEMN?-`^P&?R<+9nqAeyb`@1Rc=y5t4*Z?ZH`A zdSzj`Q7(IGbRKWX^7lYNO}#Stv~v4z+<|{rpV}k(W=dw=NMC<{+$v&|t8^+2zNgGqZv2Uz4M#B9sQwVIr3yMpm{^ z-H8BsiY`*f=v%zi6n!Brh-e(UJUxL#N>XZ};8T`w=B?A}Va5MwxL=n3glB3m_tDyb z&PDKTQX|Z}+}J-JzfH5y*B7H^{hqNJCVc*Ne^`x-IpA=KcKP;Fd?b~lJ1!r9yFUe; zF2AZ2zjZl=TMiJsobQf%_%Vg@0sI8-tR-Foyhh5IRUN^;Dbbktx+1ymo0i7Lg`-ex z#J7%F&XY3#ljX?NiEMK~LMeAgc~|c>+BkZ?RK_2jb9_w@ zvG*KGd>LJaot>+`KHa96C6ESRwY+}K8Y}dkW(%(DJ1@K=N>LBnTTLPMY8_wWdbjD1N-d;MaqIEF(Vb!+X-_H}AHc(rNdmc53BWg_%ci3_C<6~-$971} z?OcHnixXK`0BOBJ*kPlm1=7&z@KH^zsFeav!oIBq=8y@EZVJ8+nw<`B0Q0TO5p71N zLcmul#pL#;SfL4V)U+x-_ zGY=0gp*Pfp4NDf#O;V?p3q%4VU%ULXs9x}Vgo|~a$u1}ZKX6_cD1Bs>GRzhB;;9R7 zTyC54S6AiZWAZiB+JnA}sVszpenv@Q;^lFbu8z-rDG730NRwwduHSw71=)GHtIZTI z^(QuIps?fGT@~bsXyvw9{Bh}G3wK=k-dZ7*J;21ruH~;}I+t1W4!TXIES!54^F)bS zV&L))<^;;?G#C{syN-AW*qe#R#6f?&sjmjBFQI&=bt{uZALan0l$P-bFs1SRGx$Rw z*N!IMHh{7RU3?ns~kfWtYyhnvx7L=6E$)4g4w3WaKugto74)>Rb zdpo2(%yWv#^7UoH8Wb~7t&=%wnhDu1zkOzJ|LiN_svJJ!Nr1*@dvUbwGN&tUV^dD+ zh3@pOva`g;){>L;3h$JH^H;KJM-teB310Ihp6bn+FqcEPj%!%9h4KClY82S+#x3WC zwPXW(AlFj_gsi-DawYWnTMi3Lfwroz7MAQ5Yqn3lQ4T6$V z4v%7kMbIP>>?u*aWoL}54yB8|ZqC>>d{*i=s z;`&J>b5|&iW2bg8FV|C3-gH@p!%7R1RPKIkFh%48?sntlvUgTMN6gr}qfakt^MQ;c zWcoFq>F;i+xO21@;0ylxuB+gQ-e&{?2?1)D-Wlmc&TV@4{2CjH69Rt5ahdtfrL3k( z6OU);*~OKElH*HHUZ(?WtZeM-@on;FaSt1-U0`W=JjN*wz4o%>ioCuBVz(Vu@zn+O z^;3z+4KbFKN1tM17>)nucI8L>;*z*(_%2E2nnCqedO?5$hN8Kc<4oKNzZ9E!)bHX4 z(AABdnH$A0rYze$Y%Q4VEByd!UMB4mwPY zS<(;)e<}%!FwIJTBmxoFaCfnXXe!Q~=u&m$xg0j;QW!0pNom_mECH==pbFozBa!W>~`|lBZ-4)The#kezD3N;AzEAg{qj zChWCeV#qyG)it>2V^Y!~I;s<*3glAEqupEU$)p?(cyw{4p9l9BGH}i0ih%r>f;e ztAJ0Q?erlF7T~HUoX#VyX-iu*k@#f6WzJlEF0T8cik^!|;19mw<%n#K8YuDeZSLPN z49MP$rS9#;ko&(47|zw7uhZirqI@F@1ckgT36fHr2><{(;h{!!OXn!n>NRAG%{b)3 zAJEtv2rizHY1r18nk3Ovbr307i1{R{Vyd8{hoyr#sUY0UIh$6uxO_9}BHdwW(UQ$cPJ zP^7spm`%Y(@m-DLMo$@pR>2kas>Ed>!*#uEB(AZd{>Rr^aI>q5pME@?f|k@HT&>IJ zxl5@<2Q&Vn7TL|)ygY@*T?Wa#giQjx4Ul_Rf7fn#(dV}ub}cIq=jqvE*Ir=TuscAZ zSqy|+M^ZnZY9t1fZQRXA=z2ij^gd--6MZIOQPW|J8$Kp;{ZbWEnr^Yah2Vl6q}MDo z1rzg}^Rua0WPhS&5w1TSPS*z<^iw~BE{wL=37&5NZ06l)=@cr&tQ-r{cg3I69PpTq znI%vJ=pbQ+{odq(jKY344N`|cLqcX3zfkGeR_WbjY8f)eCB+xx|GWA9IMpIvr7rQb z``lz7>Tl8MR$seP-ekVyucdrE6NG&!Uad}DvSIc0r&i)~!HIMqzgl>i^dOd#s2v{%8OOfJ)4U1uKn1oG4bD=QT*bUvf@hP58*3v0=Uj*C)sDikw zS4P02m(>tG>yPPKSeE`dH9VD>>2=TPzT`=sb0od*QtP_tT2@CK$j^d3Nk3|1xMJeX z(AsB9KeRLc{30Cx&>jf^ifPQ~qwWDS+0JfRUMb+4UBC{Yz|{Ms(K}T29(z*8*JW8`NpCkB=v2i+wO99Z~F zkd?#Vfc_tD{e}3z?yQJ}r}zWQ5KaDCYl0xoJhPzFV^yjRyV4ZJvqS?e&2 zrZ8Sczxu%*;7!1G7+J@GI(u}`N$9+qbDh>)7N@>aE58ai8tueXA%v{-+mrwo_<2)( z=)eIGdMgFuJ$$DhUZQ|l&<;>}Q3Q-U^PydupVe&hr0L$fCZG>SRChy+6QJcxRtygg zRRoZ~`oeNFh!!R}SDRdiDUFqc$!3!2Eh~$;__UdXl?3opQSf06Y)1^b>#ekVJImk* z&##TEdn4UHDwywHZRbr4by-*jBsFm3RcAa`b29njI6b?m(wsfIJzo?h*IiWkEta+VoP7iKcx_iG%I&L9n3wp{=}HwXu%{BCCv-=s4S?ByH?8yFhqQZ0`Kk?f7+Ip3_Ue!3@ zkdXki=a?_<-2;pz>4zv1@@Uo4pJ?wIxF{K{EXh1Q^9daVDBn>p2gL^~vUOq;9u<(YzswP2C1}e!+8tjN-8msWRo5P`hLeV?C zPj{IbmThB)#D)j?gKquOI6gG$@lyO0KvA2yql+6V0C!qdU7|akt1l_+DCG!Iy3K=a zI60)v5>(e=cvROon}H=+nVe_{d<;!Pr(aka$8ecWgcE}GGtsW}Lh)vjPCElycB!Zd zSt6?ZWyot|~~Cxu7_^`$Hoy@6o<(EM;- zz&sDNpGdx0&2p;v&CD0bth4>1wlxg!-+@e9H@8*G_91GP2K0ZuG;nB{s3vr95$Xmf@%q+u-T9~#^6!v;i zXLZ4M&P$?jwkyvnAzgjvBGzcbZjR10%fz+41W32o2e_`9PF-!0a79&ojjlSvnld5U2rxp+V`#bs_K9xNYZmwEG2wc2>~Zg9|lGn4*$*Z^FA*i=Q#;+3+0Q6AQ z;Dhf@aVc8{I5>tDL7+K7g9eC=Kz?l?VESurS^>5@6MDH#faw+XrQLe6HJVdC3uY@0 zvv2TklGAgu7{h$tO{on15e5B3d^v1(6t~p_pfWO(&^>N(^#7UXAv}LrX3(-OV(4S# zwW2czcXtRxG23gnxPk(-y>fAw@&MecofyprzYy+85w;Q1$(LIdfI*NJ#viv6Fv z27NjmtiQSn`i}AF^i{>|_ls!pGsTUy5Q&pJhdq+%a@fFvR?|yKOEo^A7EwXdGVlZ& zXsmAA{ECIefDYFLKuxxpURe4C!~uYt$H-XkDgGh=fzE(034ZAnb;o#E7MrEhwP4)f zoyl{41?KRr^JmNFAac@Uw4k)B4velBcVjX)?5Y^jV(m9pz(9zQs=N7DEi@rl;L2dL zMC_i3D=L5ypHRdDGXKqrd_I=_E;pwIRa|o`B#54TcK7d;+fvRfj6H8)L38sT%pI+J zjviLN@oK~Z_j!65p7r&qN49f;)=-bwZx-H(?I(q=HbFzo(I#SfVH<;M z#ZweEExQ(3Qy_u~2R z-fl7Ul)z$)vITDYj%qon-gTw5gRrGhkqZ0kjo0Z8k0Kqk4}5-sq~I&HH#Y!Bf%b|Y z6K4@f>IIA_x}_G&v=b@wjG&ES5GxC%y^_iH`Pw($X}n|Trm)l`c(#B#HW%N?4+pfH zfyZ_c_@ko{lsXzi%1t&FH-2{Cah#-aus6hBs*^iz&)+D?^jw#xd`cHM)8>$69-UQ8 zol_>}8Bh|>CWtQlQ~tVQ3kbP9JyhY7ve3{WAAsFH65S>bQHr?W-OEhK5f+#NR`X%W zj4!PewuFvh4Cv@(Kux{97$B|~YwZD62hYH}3RBo$Va?S=KkyfxvPUQ)2UU7gLTb>^kWaIgqn^7)I*8>3Q7?m^szn;1vAFFYe{ zlXOIMw)@z9zG+L60Jx;lc9d#xHl2s1-~pRp-XMu_g|CGA(m2x8ODb zisjj0b}z8QR(>Gl)shU8I>hhjb=^Ir%3xw66=Pk;vSg%Ru~j#i*m5X8cv-wk;5o(J zX+4xR`zl;vt?(Tu%g_a^B|p1}z^0)Ms7mx3YJG^+jNZ6LI#@5eqka5ChZcGLklvI3 z1qdlmofk*Yv?u5)H0y1iyzA{^gMk5fICegaR+wXBT3HAlES64W(Tky>n>pk(!6kQ#Q+2i4Q#xczcnX`PCCE5d!QDh}cGg?DVPucxFcYcf(4hgykKL%e|It{1FD{ z?s0$=-xfeyFENiKEpkCCw^wOht4TanLZCV?Op|Py{)V1v0q$6$0=c#(ofb*;tE#F zw9%pEoS(}MXc$NTK$u-5O9Fz|3%HMRw|tHD=Lwxa0gejK6bF1fBlN>OKCm~2YqGAm ze@<_G=vv3H8Hx4lCB>PXfN6QLZNhcc%RLcv-b)5NyjHaKz~!hVP3{5BcV^r7@U8*T zXcL~0g&VEcZ`*AXUcSI0r7gCxF^>bZtL()0pkf)FCsiFMKj_uN$;!JF^DN=M&gm@; z+}eh27KWOYo|T5ybx$kYyB{ETVPi}#CoH1(`)3ooKoqb9u=FQ+znx?|A6a+LurL-+ zzuS~s=1g|ZQ~OJ+93No|t4rHad^|mlCu#bTA_zQe3}t$)r7pq7+|&Ja#3;TP5#!K!9052y>pQEUx>F4U;64bX;s zY%I0-ISzv{Fq8{M=j696aB$$#J$d6hpe4$iWG*H%LD2Iy#UD37KA^tw12FxXt|YH? z1#n&{HmQo~pIHJ^VYRC`EerZ!9GWmC7MlcjpXPdCNxP&00?`Bu0ON8Zo!UTAprwpB z*h%0wkB`OR$Az<6QM2&RmQscch-|DZ5 z3fu$vq-xiH&5@ro*O+udKjY#qjcx~TGP!nbBb%)DL{q^k&mF0>vF!t zf>za$_iOVwU`5iKheC$EMXDhtsev`JhIjM)hPmEYHaSuNi>{W01O3^J2RYt1(HRdO z{k8Vcy&1q=v$q4uX(a|GBzPk93h=xbkpqd?-pB15D?MJc*|y1j4(+zqHaxes!_a!&0R+DR)QK1wcJH+fL!aX%%zExJEfyHBfAj^^hC#!JX8`ky90ZsiKb}cZ zxwSC!*v!(jb^^*GS4)O;*{TU3r(;F} z7WO+=ByI&Wxc+v07dW#k29rnY=&A|`3byDtJ4ZFO5qx0z9kvazW?t@*Xy)WsI05>HSEyGcu(MNPZ<1pt=N6XRsL0RAA}g_ACEh8SF! z^GQtW$jW%hUpcL2ZsZ($Djlu}EPTS&iDwgz3uT9Z`7>|otP@z%+U4THI1^Sw9^s!l zCJ8J)IQf==Lew$0f#;YouX=yaFlJp9KCaPGj!EP;fP3|naKNPyhqw2`=bXq{&Ea!% z4OVM_b8r8${rzY6+G*@00S$DVxMx4*c1v_!F0imp@si_+zsG_^e;M}6u5E0_Llq&{ zA`j~c$@`?v;4>hK?zR*Asr`i#>PdZb_|}%)9!N*f!@bptEkFnu^cDwldELAuGOK+x zy^ZI9b$7yWgjLArSP4=>82_V;7f!Ypaq8-gMX)R(*LjVq*?$T#hBZZ3qXxfVf?pi^ z(+(P&3O`lu(N)x!q>{a}NeYuke}n^=ps}9skZAFd7Ng}h%pEUj%J;EgW1>f6v=B4= z*9)+8bGt97hJH(Qx_Ppsh~9JFF3E!ZD1%x`iS_)CQWW;%$MI-XHhA^1J(4o;=a2Kd zx#b&8J?R&!x{2bdo|Ui(QBIxgecH{BLtpSDn96@t$m6Ze<&@}rRFotxd2->G_>6I~ z8PGDfHLjc< z05Q}2J#qd^cMPq`HZs7xCUF+{o=%I4RXK@BcDzJ2onLlAjWi&eKAVm6E8tcZX?J_p zb_+0=ce&VP(b3a;6Ux54DtFsd(7}$x2YuApW_3e#u1*@M$2hle<{UN6Srh>slplvH zL=*g>KOnhhY&u<#f^X)w?Zg&u^l4PwwnLm;iL}uo z7&jzniVF)_C_`g$;Tz_xhx!<4J>&tr!Ho>(lC&JLnzq_r{ zlmyLy7hFtJepJU#7L0!LnuHn9wKb<+ySSD3c)!`gofI^ZS-16=_<4RlUQcDB2>mR> z`EMx1?$JZ+$9WU)?bbj(te_xt23bV;94fnj zJM(4C0w2Q-0Kn$0Mt--J{PnotT%RD<71M`u%i#}j2c?L%X``YtceKTpDSL=EiJ zb2*Llim|X$_9Zxx;GV~?(RS1ELBhs>NCS&zPF_NuB0nnX!6M!{cbEHV+KtA zKt3N_23DU7mEk-Z4q`5^W97GO?lMOAP}Lo&jJPbn62^tk~&83gO-J*GR6 z`pkXoDQmxjA&fOmGs>Zd{cMAgZwOp=^bF$K&oDu^hi>`KRGI=pTmX|w+zkYTU0a-h zjBKg7TJv2%OS6L+uW+u$PQKOh1eQ;xpBM~*h|*0V`J?ODbsaurXd|<5sEvDNZMSDS zPj#?3B1Bu2;M3)US z!ar=Nd~+Xqg_07EM*jfps#l7$40$BMg!mFn;a&|U{ubg#>BXl({@~{cWC1U`4wKVS zgR>lS@SDK;_8S`MrzM9>@f|)H2XsEg1SxZBWP`Z>~h7y*? zDJn=Lhj?X;mPuc{5o-dJxQ6SuSY+B|BPyD7^z->l$n$c_??Hf0TWd{v36{Ee@gvesm?r7FAeGCz(4?*o-}LI5d2!xP!`^W-%r*s_;c~sT1ae( zVPo#Qk>q4f9{~jTQ+?^bbTA0g3D_DFLxJXom-9)Ek0~i%$%pHH{KP;LY>UAc@MeLKG_x319D_93(sA?;IuU!~}3mdvJ6^6awJL_?D-s@s!UJt0dEP9(20?v{*Pk}~D4sk8E>H1!D<%`QUcd7NlX z7AaXDMumQbL+O6S&xMB|$n%XQP6{7E|3T{kOFwRjFC-<`orlYuiI5BCFPtkh@f3cN zrt$H&k9NyMbn1fCS&`G$%1+ErZX*YEd_Zbs3g;a2Q1-E(XHnmtMBfU8_mnE672vkj zxs@0F_^9CI>X^8&l9*DFuGIN>--V&E5OzKP43i)0{zuHiV;mLR|EoKl#gXg$bu44kN}cN zY=Q55o(&J()p?k9-hVicTf^K@#oga@^dR|k{PU=4_v82E-OOc9k>(X|B7Hq_Je>)0 zPkhjr;W6x5mCbnoqBjo-kXIaJKz^P4MVaZ$2Qm zV*BvYBU6T93U!s*ij}++$qL)g9o;B-2Q^AtQq`UBFYE+X&gg&fWU&0t0Atgi%WZ!Eu@>-`&KtO8 zHQet4`k>BibM*V6V&&54=4wpkRJG6Z$(wVbq*|cH;kY&$*Jd!!#aw#zzWXEbZdOo)(BrttV1o&pyYFsv9%!k! zKTXTb{5MnI@nJ;YHah+?QlQUtg>N1KYFt`#v{vuHPt~2fpVQcZ2AfZ>>)&EBuKfJn zNe9Y7uc8!JmYsqlVoanYC%C3-zl*6tARa6w?{MWDlqWOUMwkcMipa$! z3p|;I3eqa%nvIM{QcjOAEdCMP?26LioQM*u>q*Z!Nvx@>N>>8GE)w8Eg2Kv4cjYQ} zfIbI1pURMsM99())i&<+qU%|#2x){>@VNA%^71ebvGW-sD`zc8A$wBGAdC-%x#qpx zeh*!Ayu%qH?^(ZyajZ`FJIqYQgf{@=wfc}*=SXdM>ODwS zIg2`EkB4DfTTHfV>a|ucnwHw7P~Gd#bYmF_|f*VQ+T+Z zSMst0Y|<}RFMD~}lL4c4C71Le;5Hk?9X4>_c!$c#WV7rJBWc#$6O3)APWQimHuqX@ znkeS!FID<2ZQb7%&SjOePfheK`}i;7Ywn|!H|W1TfjB1%dWwLIp77^u9^Ex(xS9)n zXnsBSy$iK`m@F;ACSXRfyxwt1t$u`BQ^K59UYOs?Je~ z=cEDLYTN||kD5NFZr4c{ET>b9+!zcLqwGL2#s&8o6K;@eNxS(frtgXYB@+=?G zM8HEa?Rq(FPsP2QG_4hginKcjONBgMC<_*B%jHr8AT4v-=Jwp`#?J}9xe-0{4-HKX zk*Y;+YoT!6wLm9)N7i^jm+yym;?t9p$Mvpm@CCh zp2a0p8o15kKEG`jRij!Z$4^!xn-8dSxlL`SY$=f>`klOhGq!o7?EfOdRz+uz2T1*Y=egLA> zcy3>{VvWVlRD*EbRX=*;#P9d;W^;eFr#z~EIg%xMSo`d~k6)5A2jpb_eUAzHJF&T` z4}%_hG2=Nb9o})tg8XT{L0zj8nB+;y7JB2kY&W$}SDf0q^kkXiW)=s3vNd&wvojAc z=gG>3r&L5f?jhq3{7z&kG%RZYR)G;(nh5)pnn!vpw7I#ZRp1&M+OMSEU(?Aj?`>Na zCj?b_qlUru=)1R4>HrdBm2c3A7jsh_025K>RpN$T*zV3Q1Q2P0kSPb(!7XStGwwe_ zUfFlI81{GV01JP+2=Onin`x#-kzr<*=>=mHq(hj2>}MXjMo&<^1AyccC3X%E%nL}dz?Bswd`APCypxe=_8L;lD0E5Q=rePboA6^>}n0-adD_l!|&2` zB#N>}7ErYZTt-Qq6LGAs&j3iSson^dVH!?%V))qmMB1;vpCN!?D>` zwR$=_--dWMw!hmRlpn4A8RL{z=L&ym%0qD|6$@7_^#dE2@+P|NzT(`{Sq5<;>V6s7iZ~wy{4w9lV3O$=Nh5;mzFW_?|iEt0{E>~+eTTI@B)blaGppi;F$4wd-=n8O@ zO^Lui=$QkL(FKIYDM{Vs=OTfgoM-koXr9XG=AMn>aPMsA{Nu%yDi{S=Xvpi8Ms-4!~l;H7793}G3cv}rJ=1K4B-0g&xqk@ec`)FRKRid?AlfC z`zw*YdREm~wex9x=iSi?$*E3`7ta{t$g~v{PUa?}pVfUxNBq_P$7I!X5oAEZY$JGv zZlKBwB2j5VNnZ(C@G{5FOq|gLI0jiXFk5Zp&Ye)7EZnd<2BX2hvw6S6`sIe|7rZs40Jm-fTFwMBI0+Mtj?7P>dhV|KBqu3$X6a)Sl{mWZ;`@Glj zdHT-sD%Cve<%>OEKL3A`RKfCw+_zy=v+o)j^M3#M_``&MksN2#y~*vD0%b;h2FHgD z(o_Gc0j4_?vx#Wh z2jggnXR??EB6XOxf|n;L_9QI3akMyh^k>YcnBNlLr5(9+GPkg8!zPr2U*Wyy%V+Hl zO5@lr^(3MobsC5`zPhRpt5Ws4H|m(jRk@ugxrK92vw z?kR+|%8f+C@baK=CYc(|^F_+f-e0mTk`MbTpCdnDSeQq${%eG@rG5713^S&*P_w3BvSI6L|85=Inwf42BvhkqwTw@eLSNj`u zU1F`%pU)iPA}VAoe&DfeKeRaNL8pI)UimD#2-{&<&q%mgTN5p}yrmQWyy-1o4ikKz zmon?*Q13Ty`_inm0ojq4&#bz(wI6mB9@UZB+y1wW@;H z_Jc%Y+++~Oc3H{Sh54TbM(Qf&hNJ7mVkQ_-`g>uwO!M#4;@r{%PHJ9l-%yFlPT+$_ z$Aw!4dP=i2S?{>VPVB!S`%;n=%87s=Ks*SeWX%8Tj%h+zJOSd(TY)4a3_Afe(bw_?;CHtmsD=b0H4~ z7G&n&Y9Lytnbf{EMp|8nC<`2tkQgwf%#1`joB=owy$)>KN4M>SsXEP}t|Ay1Oh4QV z%uYqCXP1(frmwhJuLBC8;+G-q>R&TznHqVQThG5_A0?Z!b0VMC758#Vxz`z=b1b)b z&=I}dlj>4kR8_F`$h|vIkNLg&HaMj1xD56@@MS}#&~*Bu;gm5{A&Vyf_}Kk=po0;{ zkAGmsXtoXA3T_P5iITKfn-Q&j^1xo+oM9vfaZI{VSmx zz}vwHH=9T3OfE)c03Aa+b{Q^s)Ts%ruC;Iy*iLueoe@O(jq#6N?yd8CwY7jIES%L( zH&Df6uj|S<)l0K}Uz2&Ep%1=N^+v*CphiIqpSh68CCa>+`BCWJ3=ZVY{Jh1kDX%!9 z=#Z%kLBAHUYNQ#r?}jO$FK3r)y!oE`Rw?C^N{oZmgay32(adlvm8Zyh5-`+c z>IMJ#_Q7~w7wZxmOCq+#hRGG*hac5D`r-bkNEHeXVTq`9D5tFk=pK0`s{5jon!zrz zlu*=-7{q$L4(7kg`r$Wb_HR>C>;a$QvSYK4%BE?*nzuHPdP5%ci5#TdLqzY{{sEs; z7txOH=J)8(e>Y+VbHo5)?fU&7#A)-#JG!?M|192rr&LjG{9T{fBD;QcAnx-n z>EVcRM=-;&89^Vf|3zugkb)QY6Z)r17RUS9T#=( zj>Luv9fW24%b;?>Vk}alHW@xCanzAKn=g|R<6?_y!cvvM*)4DEqnWz1>Moz@_P?B? zzrQ3x68khs_(WxiUgLDTqJ@2YRwQkwMSAlMU5^g1cJTnD6U~=j*C~4wd;rw67chlW zW7QjChDu{u=Np9Btk(3}zxF@1fdJVdGxhzMUa%%$sRL?r7pmXY0Rqd5MKaPMQj7Wq zla7rbbh100s5}PdkaV1woh|bpK%QYNkqE#0B+FkH#CahQWCzkXTmCF~KJFGnlPQGB zx&W~gWqJ1y3Q^B`4fNH2#LuHVjp!W!jh0PEnvyRXbbSSm!3HzSRm$LC!nZ@Fd&V5d z%bU3ub%mZhZDB!mj)kRZRKWJ2(%O(|)W+=9_XcG|OqjH}4l`$$VdjodPLU#H-(dMzwAF$aXQB5Gr3JQS6YRp}tPj%YHY9 z@^r?New7y>%SpxXYh^5;Nt@-%cRMP5(F%atHG;r zA)Pls^6<@5Pdv59T<+YGe6l zvwRVhpQbeA#mHhil=aX>>IKMp(*WXfWI?0go?u{A!vI6@qUGgX8X$n=(e;bPBqc@rU|fe9CBcBvg*+0boTKZ_R=oF6tu zOZ~aWUTW43lQhB9Bqk7Zb29?{3?hFG4d?iL13PDz;4kg>`Zq|f9m)XNFWwwSJN+{w zxy=P|MJR$vR-Yg{7fb>gl-2iZ;48^2=btgQPl3V5&NvWWZ1;F|P zJb?WlWy>vn>W@QV!#O7fx&zb%X`l+DM3twFef+X1Ayh}KJeY;;AK<5sjGePP@ZY)zJmPjZ;DvIS41z3_0L!5V zlemD5GkYK{dXJ44l}>VrSzXE$6Y36p`Fn78>2NYzjVd4AY|`CaWSRQ5S+GYFm+nG%N zC-fOBswiHi>2eB>?YcS`DvLQYH>aCy28GLn?3JSVVFj@K`03SHzx)FfSuxjEEKR}$Hk-Rn)>O(Nu)DgV zi||vwewLVMwSC|hWdJ$BdiDaBn_Zf2>WmR3j}BeCe)Z-XmQW4A#sNH>&IYdN>33>> zfeU6oSN&kaYI2JA3b|PT3|OP3i>@7U*q!+*Qp$4eQfOFDe*pJ?xrVV0mp*Fs8qTXd zIWM0lL8|Z#!rJ4bcNW=aP?Z?CscGnNmOy$%O9Qn$STbieKm;UmiziM{8j)<@qu#(J zT+z%UduEC-SW2^4-eX^Aaxpabm?#6HCy}X`EN9gm@4K!qijpTJ;;+y5nAuW;qK$BE zVB7HVm?C6llec zjczdVAdPbAcOKt~;r!PNP=A#nY7u*`TI#>#$18J5g2im?PqP_1*^kbQ#XP{oeVC#A zCNa+kF_qKqg&?&*w3kCPZRFZ-%-Dbr12Yo>1fyGI52KUjmI*wLB-V!c+*TwMsvn}s zIH`CH^XNEt4+J2B-v#zUghe`uE9>I1t5WSDBLklog)!a&OyGcyd70XAXR+Vy-Y>W< zIy0cc09K+uY6E~1Fc=GmX|j~#am6x$vH-?>ZmRXj+z~C9+D!7;SRDw1Lk;jG$}lcH zdfD*gs2{PNS16L_lZL*Zg4vE{yv6u9gjc@CAbV1X+ejtfhS5$|ut?UuXaLu_3cBm3 zc`n~gjoX+?Wt~usad33+;2U0%6!17l(|hKRJy~{&i)(qoiGXF{f|jbrMgNQ9mD$py zp`+xQtHS%D-CA>!<|dpfBXdh-3Sr)Yf7Q8}t+qdp@Q$RfgOq!wyq3prMjHl_Aqj@} z)cif-hppj2SHT!hHcMJ0gJ9?nW=N}z%HU9i9CzyqQZ#!ToCU7P*K-uPcWPlX~M8ZNz7qV98g-o)#SB@0;u?L0~SyN;Cl z_#YcUj@uNtnej3hM2TNdcu!yF@2^n6CT*<?%==WW%=KS7O7O;P+|pxgP7yTvgn-? znfstOU!ltXrp{NkzBjfWqW0hY+F}?_grJm;le=yX-C}mi55sq(Tz5MbzLEU{fXO@G zk!R(i{W@IrLonGsn#D|Zk1ywBK<|bK2an4jF|0tW>|Su zOPT%-P#!U{4-UOJuvzuPiRgX`mjf0nT`m6jAea+Qlfvt!tjY;7ABIjT6{e-Cu5hn$O)xm{0{(_svWjo&Z1Zzc_LN zaw+nkN>E)kLBZELy4Gw>awd*a@viV9ReRBdH*s+1xKp^?iW7Z@je>vnfb(!C%m3+B zC}%4K%cUg!0?1%Yevi$A9wC2e+rM=)I~DBebPQcz92#wh;6``NJGE1#Pfeckcp0_i z?5^>MPw{LH6FH{M-fI99o-$kH;_jY{v(eFQgbO=qK;CIP?+CL(i6NWz#dUYi&9rO> zlLq4_m31YM(oq=43&&lAH`livU4E1rU2SFhHqVuV{~+SPp9mCH`8Nviue{ip$)l@? z)Mgs};jGlf-j<$oOo_T7W!rmo*1`7yP+2!yi>kXVOcYbd0+qvLKSghv|P$VThZc_#QBhD|KEqy$732Y^GrnLFAh|A zw1e@dyVEm_?4`NJ*HfVamZd*`dVgj#cmh;i!0f83oG6|Z{yAv>_4_l_K`)XC-qHw4 z#f_;jbZnazBPqTg{Dh4C{m%^f<>We*37I8o^S18%>-O~Rvw&|(_YtnLJBQ|g#^>to z!l<5i_CkLS&N16JNT$K_W!vT8ZS7T%s3)_YQpW;F@|p7K%xO@M!uU~4=B{Jh={Y|V zQp~hv-D$BB1+P*+&)CnMn^D}P@)MDVG32#$V^+Am{o#+)$$_d;AoqXgLVOO!u6Oqm zAvGHobzM3C=VJ3fRtRS? zM>NZRP5E?-uD~ySezMTKB?eHY=<4#?Y;;*GIHLz%%tI9RISfI0w|7=&@`YaOYm$={ zAm1WNXW_}E@1r!C`cV)w#;%k$60QhIsZcAm%86)bg7`YFHSrbL4vW+N*!J&;>GpeC zNj7BVyDrQ1pt_oh?P4&)qrsW+vskFnQ`D3BWs6s%1RvIwz*SWwijTJg1vBH3btDZ! zzUu06)lh;cXu6LJU>dDRg0Q`^%Y>8P;PWRCj z#Re90#5O|Sa#Gwht++8akR7?(+^p)~F&u#N8o6EGY)Ww&wJ0H)dw4ivLhrohr|EA4>kmejqh@DrEn7-2BM@Mm!F) zo1!?ktL(foKJhxr>ArQ&=!d0i0O#DUV&)sB=UB{KXs#)d*VmtIIp7VRxC#qDmAaS` z4e2)>iO$ZWy3!`?!{E+&1UmoyN@iifyzA^y-L?^tJ`Bx^a!`8`%q0W1!xTk+N`=}fB>8}%_nyviptJeTM-#d)LW#&x$wIF>FS)43`X4=Tl%O5ctcaC*MoRlAIOmt7L`Z`LviNgxv$y#8roy?SSx+ZB8@`u#W0nA!w%Nt5peFs#<7>2T zl*8M0&Y#acNL&za=h5=53evWVgs+2VfAWJ*h9&9FuH?<3NmsuT);zDe7AW&1qdW)F zM~Q8AyV8C)KyH*$E*#%!uA6o!+4*+Xg4Zc)AQJJ%D0}a|WOdft!#kqGg36SsTd6J*^{!9#aX15tCpG*mnxE!irkRePg z6uyfU=O!Z`EY-QlFgU?~>w9cy_{y1|(j!W+%Z5P5oUD-U1WErO9%+*-*&46E6K=nj z7HZy~`F(&dF6ec`XR7}FsYM}B`Z|H~!Li|b*JihdGyo=z zK<57cr-E1RUau>$pzOh%QbZh{ZBjrDT!6ZYs{DTYE)8U&fe*WnW0@y;ptfP&J7Sj} z$iXdeOSZn7n;Ee17~uR)#vH85CdWB@k>9OX%3AYpb)`*8RcFrk-t?{&HGYHM?@Yr$ zq4qdTf{jcTjd@DxFIyJh2>M?;-YIQ z*be@lf7X@c%dw|pvAo@|e2@x%n)r?l7FttP$G*t^^E6C_b=LsFM)v=Ty7EA#|2IA) zN+n+;cc|P}j+C>M`zn->Eg>OyZbPS=91+4dhE~py9NUyTq1L@kp+cr|;>1w-uGQAZ>nQ|=6z@>B7{D<*Ko z4Z*tpR@Tl@+iFDj0z~o#D%fVZ^jqKfy8-gG52+Y4R`qGQXW{)v4kAG1wgcAp zt#FlU-7bcmM%e`;;leM@@ll~xh_@}{<6c#p<^AjTPl8P&l`DI#PwDqmWIAx(Kuj-2 zO&!Mew3;EuRe>uQXIRc7%)t@^m_2!=pe)=0CS+9!=ZoSi0rB0`cIby<@uu*UZ;hGs z&XLjIq&7Ja{`tyk{`6FXZy(%^Cl(mWu#7vbsT=cQv;#$IVZ-KQJW`88Z3YE)ww!3+ z=_j>F$aBU7j+x|7K^-8y-SsZjR#?EH)2bz_?`N0Zo?FEXyr~|`$fKY9Oga#q@r;yM z({%6Z*(I9I?qz;UUbr2jJ{y$Q$a7itj^EuuSx;KJ_LfS1bTvj>h9WJq?pdcJ+K_g& zTAClm`-;d+dO`0T`@7YIaLL%c>Hg`Jr|7;TAS%cCN|c|@{g03K=4K>+^ix4`c&DJ; zlH1PAJDAEA8(vmnQgcy(YtfbE6dX?@M(@~pcrAN%5ek=FVm?fG$D%s%3HaLJ=qr_N z1rc#*9l<-~8GQUAu!;Msc?%A;8K%79`xXL~Ay?kedIvffp=jqS;xh*-T}7!gdW8M( zU*CW$M(h>Oj#-YX*^qeX1~z^e21rdf%g zn#2hWtu1_^%F7yxL{(*^Jyd|(#)GuA=|%UfWKgj zlhMZNDP+^-Lh=?GG9&n2i*Q%uzYRV~32&feMf@C@sk;BxL0q%1Q1!;tqP|Fsed)v&uekoC_FuJgr}nDt;^?4fq#FA)zOT;5P-)6 z*DAO*YqJ}70ek3onv$o{n^E%4q!`>?bkyx}J~6hHWt304lRZg~6b}jfYD8$5%lDX_ zP~u?@T6dUs>^_xKCkuCrAb9%L?7|jzi^Xqh@6XK%_emM1cpWx_G)*rHJClcL_}^{|btO+r%G3IzNK)6_0raBOT8 z=u+=Y^bW3T{TeBpD7SWA{tuY*^#|iz{iSlN!3F% z3Y8cBk(boEM>-=esA+oZ7W?k)4#Y?>B{${qS0%<=xUEf*j;ze4L3tb3={tVj1h9UK zgvS%_%+sOQQGEKa^zn2Wls{T|HNGgMzuG;IFsT(88UpD|7&8~lA`i5+1G1^~$IsXrcR?6p6!E~V^)~%tL1PT2t_m@IOuQ6_Dg_AF@)E!v9R*XT zLnPjHg$Jk(?tadtoR@W6>4Bxw&9sn+8>4kAr90yV>mdz}3Om8Wrh1tOKj6N7&5ses zNa_keL+uBm3%w(eeBR#&-<4}J#ii>Cs`?w#x9h_}(i^^(kRd_+7xZ37_riGl%vn=T zj7ndmJx!kSa{|3vaxU9iARsa#{2*P5nt|QF7RjXj>(V9|C4WPcIQ?*MgXuI=AGSi1 z>(bsWoQsVM!Yw=pWxW4b*tzBqWZM>s-meqBQG<<4?i~U zPKPfgld9}l#*`lF063cJtJ~`PT59;S zgy7I15FrF}66ZOsu3t-sKN;@H!p71uuuhaF*fl$sdBUcU+I`AZ>7!bM6ZhJi|JJ$?zmj(Dz&E ziIC1KQV0{+UTbOrrhu)JgmT$LnvwX5V zc&phddWeGYe4isr34pHPDPjz}#X1}PbnS8$Fd$3_Nm^KZ@aI3ajMV&P=F!N_vw?n1 zoNc0v?T7ANq99OF80-O%bdHAgJIIb!d^jE_e*F4rAQ7;U~8qnV$V8D>0t z-l8XqZkFd;r2ajqc*AUCHAq!=dg)OS4A$tpuIMSdWB1ik0WKbdPj1Py!5s8a*H`}@ zPJR_6)rLs3>J%!g(i=!_HSeA>jK7i`oOxYIEj_}1c@TjN5ylDUTN7cOgI`<64HQ{bCT5j>xmpq0C3<5i>u7JmZ%AW@fs-phk(F&r6{}9=nrF4 zb<7up@7tVJ|7sgbNxd&7WH8OOF&q?9;VfrRQN^jiL^#=CdtcG5w6F{ta8@Gk9}OV^ z^}GeZ4MdXyki#3wp=>lBfaLLS_?&$? z->~P>BTiawLMPsFBLD-Q73c1dO3{iA;}o)I4x>IAREpvnt$zp_(y<l#WNc(SR*=8J#!NR*dh?IYW#8#M z(ILT+j5fc)PLuG!4tA&DP-UJ)kU-I0Jik-R=`DCR{pp9r8_tlsPUTg4WRR&AV)Dk= zB!_2c)UedvseaSex~6NTL$Y;?*K>9fS%yNHGr8b&poUr_sJ}6%`2O-{Y3rAf2*GQi z(v))$y#%yquRUe+44EJ!>y8C+pOL42VBvvjdk(6y%D$%J*vXPDt@H>a4?=NcbzwwE zqrc?019tZEKz+=q*vpU46D}DNU5?Q70Uuv1Z9D96Bj%X01u65RBGG&6E#G$oEr2DI z>B@IhNo6fg>g%=v z&3`buGtcHG5nvg*2B?d(bL za+J8f=Ns8uOF~8qE?ToIS`0^}8uz<0R@~VhA`6p_bUQ-77$ea(rU<$^Zs8H7i`P?i zCRD!k(3)%^6V%8{lpgKeRN&X1mT&6V%oS%?=P;yLmx=x$nfMHOcC5pOvM zqVCevmBB0Ily4qH*M~(uVkyJ38mas2RoSbd6C;D>53B9HF(J*Oh&;ixwkyLaUz60m zioTYlNdpbRYfV{M)j$E+u zvzJBAoJfbTg%-kDEcMj!n9-WGk4pDoLu#;ZYv_X)(FL1l}?rM7M$>7N4=h zsFHsa{n790dM!TRT5cU{%GqIyO?z=$-z-7@w6z&_ck*%=Hr2_0Zw5jR33ppr=3#Nx zYRl3#JGq2XX1+UBlh`McUsGXYEi$;x3Z`2cb$OdXOAqxi-n&{RF0KQHJ+Pu zU{I6$erD^5T#q8OU1k*9m9q-9ZKvu)1Bxr^LQjqx;VS{-tr3w8h`iKR+Y~bNc@np@ zk@T|TkR?}?cGtia{OrWi^P-Bhf0oi-UD$ei4J76w9bZ$-t_JdRfw8)uT{6*amHXp8 zC7)>I2-A&ieRdC0ZV?E)ARH^UhC>K*2D7;zzw!pw%Pb2h}P zIOP{tzehNG6nXxOFZ+@IEX?J;7ZH9fZ0k-eBSLt+I_YSiLhXP+59~28aUj%onS9G? z>>mI?=&I?JOLlk1eqT2CCmw(mS53K6dGURgwwpNH#KCgQ& z4ks2$vF5@mX7EsLOyB;9XuCa=;?071-D6Ue@U))+Hf~{66*m9Nxgkx-tnKb^n?E&? z#xgz-sHj!gDv01_{1-N_|$hsUcyj*a|G%V*m#NJ;T>+BztH)i4xTa>7ohsVDy2DSJb%2o zf%Vnl)A>uUOp&4U7*aI&B*oVbnZpX#%>D5`ENJYp)U4I3eF{`XK$K{k{HckYp&vX5 zr?|adW76jou8_CRB*aMjIQ7-M%GhWNNo1lv(xXyw>!D@0(2@@0Ca|wIw7Nsf?iKgP zV-W?>ekEkjg9s;-uL!>P4Lsq!`C04ir@qr)vu6}Pu~pHF9VUOR1r zwOVG4Fgit9?U4AR{t!C*aMvG8MCKSf%!4)#%Fqe&w=m{A`b{&s+;Wp$V|n09i*F%5 zrRAMhZv$TQh7ivkjTjKvjy3&IMsXuu?Z}{fzW?&-^|#Rim!2F}ddEHTj$Eo;&46gb zz!uJbe7TV+AfO2g|9`$T13e9q+dK%~FDP){t9-BOUcn9wR5>|sNC z`fYk$EtqPwW@z+~xJMKmJ`JSeRxFl{2pJBC) zcl??cWiupagM&o6dO3H^7R(J8}gNRU2cf*Ai_4!S$E@cJ+?FcPnJ~KNib$ z*MHhBP0gt57tCaVLe7w_U7R0@6z#$r?MIys^CCW<^8GiMl3G8QNuR6ksj%-Zw^?%? zzH0sQK@{pLuxm2N+yBx>DL(pJIIlu(@P`43GK54(aUp;1AV{wEGYN`&HxlIxmV9>j z6lD}3ZVCrGk<8(!GikrcV}gBoR(KBeY$l606K#$;j7#z{>rD70xYMf6%&ke#$JQMN zPc?G9&T_k4ZM!DK8asipXX@K}`)SQ4J&?KzBcGwZ9Tv|vX4{=co1Yc5J8Cy{I^qKn z<5MBqIaw$JqtGKBn%Jz&y9(FlOzxgDs3y5I|L9f-Mo!+pt?GN}!;I70{~`;&&2lM@ zOJ6>@0}$#;oaes`JDJhC!f-g9B;)UJEA_ zQ^hY$KBCZE1=Sy0qLn2SCf6%|N^@-uARd1uQf*a&y-}YsVJ|jguTZ2ut(i_o_durX zfC)c%9(JLX%Cbhfn{2=V3xh2NEP0~N7ye-!K5|R@LP5J6bx4&JPVzjk<$mwxn|wrT zjyR<{a^X@GafxwucA%4Vk`w6^Oxg0oRhSoVMU!yO$$oP0sNy!{`2a>NqTsrMU*K7wEPzWtLg;Xo&rSLL%iay6J4|xV|dzn zS-ktY*jWIKi3z1UMA2c9HO#e#yCb8K)%JbT&cd{Yt+I!O5)iIT7RtEK9K40ru`(vS z6Qm@(cPPxdp05g&e8(3`S9+;J^X$MR>c(2onYX-;WxlpTHeQR-{DH88g_8c5riYT9 z0Ry7X258}j>d@`+@MK)k)&EGck+-^?0raTHZlGOhXbfYWWB7WEXD~&)3^$0n;Rbf^ zLQlSdS2So&6Gtkg_o)<3J_-KRp(AWHOy&ssYT~u}qNlGht>T+DY&#&?=zLtSiSe>C zOeG@%S@wr3OqM47I--&TvTNtQf1mDj}c?O8t+vZ38~=k|#wu zn{rx@we4}9xyB-e){Y!P|J_1p6kv@T`S$<82J$bkZ>qEH zh{d9wG2e*@vexBnOL>2&c^^YHi*_waGmn(GJ9C2{th)xe)#AqmI)vYRt#Y^Cd13p+ z9%bNuWCK`%6I?#W!(Tf-OL|bgGo1g9g<+TAS~6Sj5g0c%sKyp-LdGPmQLv%r*4S{; z85znJ#`+yF&z@zwRH20q{AEOhl{n|8o9vNIe)%teMUUQ)CV-SJT`RFFo{K)LlcV2AE@tLlhol_~TjD|nq0fT4R zV6HWfl1)lVG1^>jA8weX>|e`Pzl#Q0BH+`EA8H?GnnaHw1K#4LPe6!UWpg}g2EY#uTc__x zvU1cgt?cWM=gt$(Zp#oiI^g3rX|Mj~c%D~nu)HXdPFt-nfxzwau`IJtQs%K@XY??< z)k!VYpYcieG2CXG>GUEy;gQc_`UB`MqRnGx{;#4sq6~b;fkp;Qij?CQtOv z=h=@G9Df*M^bBLZ9>eqM4g6a(@cFrcVad~W-{EM&NR!~lHv#GozUiI0Q*FC3FUTfO z>8@~mc-=PGBNY;YWzy+w`7Wf;kALg|)vT!dBXEDQIf=5eA2Bt(piudYou~W2hC}W? zyPIksS8)0Y!R+6vDI&UeVJ{MCuEPK1)P5Fu?6ughUyUIDCp|4!k zhIFpKjwZg&%-SjLF5re-tJAS3{sGWU0T?&(>=E6`E<@reNr1lwCz3(SAQ`Wd-LO^p zNte-T%tOS&Xc4FKqE9n#VAR1~B_A2Mn?qwCcH~tDP)x52idn(`IOzYGK6>73J`t*o zD0&9A1CxwN+Dl>kIzjx2yOO*C5EwxsR2hXYs0 zNHwMs@xtaIV*X^Dq#U6&mX*ObbK9^j(`~6+oNrvULC#WlcbYA zSY+>;-L1=vwJWoRVvrMEbh?0*&RyNNFA|%NKF@jNC!et*Jqb>p0~#&Q7%u&}oCo5$ zyNq*y#uOAx+`K6+`)eKB4u9$0TmBxxFA4tRr*wAGF^j>-L*8a?`6u8`CJY^=|0*i_dI;%mb-V*r{R=gxtG zorTKf`JKWZL?}|XFo{S-CUHeM{7%X%dI-3gJhaL9zY8lRPUeo)WDC20%X;!=pKkq4 z2~ON;0RFIBdDpArsVIGAq+AB+i>}tuIqC0U3*~58gFmq?s9zLMn-f4sbD%tmFfZY# zIH78 z0o9U?bChtQ?!w}`k!x3wi_cyy?iq-0UOO88cP+CmKp5j=NC%kv3xv%e-W7mm%|_NE zE!@nMoeK}9torUo(rBv0b0e2i=NPDKzXC$sOWL9AVY0Ch9SZq18W?-D_y}$opf`bc z7I#Wq#tS)u7Pob#g1`POQ7I0yjIVcT>ji*e*as2919ElRLW9)ek| zEioaYrb9DQ)7a&K8DW3Kh-22p7QA-``Dn}T3*xc6i??gz&ZPn#oL?97)@G8GG=b=< z-CT9=Nx?0W@2sA$CjAJ;Ft+s%5vu}}V~d~@p+{t><~TI0KZlKWDE2x;?@4Xjo~4Sw zC!phaYHPgC@!y`J+cygiT5Njcdu}>VR4m%txxk{EwQRguvkZ<0XfrkuUTm1d>g*5h$>}}IJqsIGL&)@#< z@KS}IUGph}qZtyzDtD0If_MxUP;eoc-x_gK%w=cEA`udLdk)d}O1okE@Pcu3(_ozD z(1Q)_epH#QY5CWm1xrVVG@&n<#(>6+tWaJfV$0XDF5hhq-Tnu|9s)>nC4q;H5B{kH zPG$cR+@YW+>ri_eLd)PyLn0T;*3;v{Wz}j2{;Vf4;5MCP!@&W>xeH5&lhM>{ZOg;G zejG2w=Q&ROW@fRwDZB-jYDvnhpVKngb}qzfruMP@+K53{UNr~JTFh@;@^}+TvIWq3 zpbeO}|DRv~dQ>%0$E6jYBEZ=F)Wf`eb3YErPP450jeSz>sTQC1NPyjV_g-dPp#DuO zc7MTG<&Bo(c>YVj;8EBSY6s%Ql>iFo#?K#@qt0JEH_dPb2&FO-PW)QpkYFPR?My6# z)^PaP7&_-M3Gk7dg%kHBshPjuA+L){LEgGj3*^dT1g@vuz0%&*Et!;0x_E z9{B?neLx^TAeytS8_?>8w%-4X@7@0GIYKL9UmFMH?I?WVnn&!I^decc)(Zjp!oMkm zknRrKlZy40qs%`G)YloYq$i2Jrq0ertNuEMl&*WZuqV~fjOl-|Nm9?P=t*O)@OHJx z-%TtjP>;j*L}Fi-hlKr$=GAefwQ?Me=g`| zN?`YogXQNmPV1z&)v^|BJkQp~Dp=5n4Ptx6d7E=kMv&Bl>_7bG)$+L|ISD1|oLN*;?7# zU;wMLjgoBs0>}eDX`%|7E*%uat0YQK=ORrXfLCm)+nAO#AKqMQ1t7cHin(T4e>~QQJ ZuSd-5iUz*Tc>wFr)X?fmmBH;t{{zYkEKC3Z literal 0 HcmV?d00001 From 4b97a810b509c93f44be4c037c7aa18fb8922884 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 6 Dec 2023 02:59:22 +1100 Subject: [PATCH 0060/1088] Pre-release 2023 December version (Mistral, Prelim DPO, WSL, bug fixes) (#16) * Immediate bug fixes * Update README.md * Update README.md * Update llama.py * Update llama.py * Rope Scaling and max_seq_len will change * Update llama.py * new images * Update README.md * Images * Update README.md * Update pyproject.toml * GQA * Update llama.py * Update llama.py * Update llama.py * Update llama.py * Update llama.py --- README.md | 66 +- images/Discord.png | Bin 58500 -> 18382 bytes images/LAION 2GPU.png | Bin 0 -> 51428 bytes images/LAION 2GPU.svg | 1518 ------------------------- images/SlimOrca 1GPU.svg | 1424 ----------------------- images/try live demo green.png | Bin 0 -> 15262 bytes pyproject.toml | 4 +- unsloth/__init__.py | 4 +- unsloth/kernels/cross_entropy_loss.py | 2 - unsloth/kernels/rope_embedding.py | 8 +- unsloth/kernels/utils.py | 4 +- unsloth/models/llama.py | 139 ++- 12 files changed, 122 insertions(+), 3047 deletions(-) create mode 100644 images/LAION 2GPU.png delete mode 100644 images/LAION 2GPU.svg delete mode 100644 images/SlimOrca 1GPU.svg create mode 100644 images/try live demo green.png diff --git a/README.md b/README.md index 4580e83170..9c44e2628d 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,25 @@
    +
    - -## 80% faster 50% less memory local QLoRA finetuning +## 2-5x faster 50% less memory local LLM finetuning * Manual autograd engine - hand derived backprop steps. -* QLoRA / LoRA 80% faster, 50% less memory. -* All kernels written in OpenAI's Triton language. +* 2x to 5x faster than QLoRA. 50% less memory usage. +* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. * 0% loss in accuracy - no approximation methods - all exact. -* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. CUDA 7.5+. Tesla T4, RTX 20, 30, 40 series, A100, H100s -* Flash Attention support via Xformers. -* Supports 4bit and 16bit LoRA finetuning. +* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) +* [Flash Attention v2](https://github.com/Dao-AILab/flash-attention) support via [Xformers](https://github.com/facebookresearch/xformers). +* **NEW!** Works on **Linux** and **Windows** via WSL. +* **NEW!** Experimental support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290)! +* Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). * Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** * Open source version trains 5x faster or you can check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!
    - +
    1. Try our Colab examples for [the Alpaca 52K dataset](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) or [the Slim Orca 518K dataset](https://colab.research.google.com/drive/1VNqLARpE8N8eYwNrUSDoHVjtbR9W0_c7?usp=sharing). @@ -49,7 +51,13 @@ pip install --upgrade --force-reinstall --no-cache-dir torch triton \ ``` Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. -# Alpaca Example +4. If you get errors, try the below first, then go back to step 1: +``` +pip install --upgrade pip +``` + +# Documentation +We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! ``` from unsloth import FastLlamaModel import torch @@ -59,7 +67,7 @@ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False # Load Llama model model, tokenizer = FastLlamaModel.from_pretrained( - model_name = "unsloth/llama-2-7b", # Supports any llama model + model_name = "unsloth/llama-2-7b", # Supports any llama model eg meta-llama/Llama-2-7b-hf max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, @@ -80,12 +88,16 @@ model = FastLlamaModel.get_peft_model( max_seq_length = max_seq_length, ) -trainer = .... Use Huggingface's Trainer and dataset loading +trainer = .... Use Huggingface's Trainer and dataset loading (TRL, transformers etc) ``` If you trained a model with Unsloth, we made a cool sticker!! +# DPO (Direct Preference Optimization) Experimental support +[152334H](https://github.com/152334H) hacked Unsloth to work with DPO via TRL! +1. Hack the model's `config.json` to be llama model. [Example gist](https://gist.github.com/152334H/d8a68b51b83bac008a02e69ecc81d5c1). +2. Use Unsloth for DPO for both base and reference models. [Example gist](https://gist.github.com/152334H/4847f3a8cca12894877e6b30698b0b64). # Future Milestones and limitations 1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. @@ -94,6 +106,9 @@ If you trained a model with Unsloth, we made a cool sticker!! # Performance comparisons on 1 Tesla T4 GPU: **Time taken for 1 epoch** +One Tesla T4 on Google Colab +`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` + | System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | | --- | --- | --- | --- | --- | --- | | Huggingface | 1 T4 | 23h 15m | 56h 28m | 8h 38m | 391h 41m | @@ -113,19 +128,28 @@ If you trained a model with Unsloth, we made a cool sticker!! # Performance comparisons on 2 Tesla T4 GPUs via DDP: **Time taken for 1 epoch** -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +Two Tesla T4s on Kaggle +`bsz = 2, ga = 4, max_grad_norm = 0.3, num_train_epochs = 1, seed = 3047, lr = 2e-4, wd = 0.01, optim = "adamw_8bit", schedule = "linear", schedule_steps = 10` + +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | | --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m | -| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) | -| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) | +| Huggingface | 2 T4 | 84h 47m | 163h 48m | 30h 51m | 1301h 24m * | +| Unsloth Pro | 2 T4 | 3h 20m (25.4x) | 5h 43m (28.7x) | 1h 12m (25.7x) | 71h 40m (18.1x) * | +| Unsloth Max | 2 T4 | 3h 4m (27.6x) | 5h 14m (31.3x) | 1h 6m (28.1x) | 54h 20m (23.9x) * | **Peak Memory Usage on a Multi GPU System (2 GPUs)** -| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) | +| System | GPU | Alpaca (52K) | LAION OIG (210K) | Open Assistant (10K) | SlimOrca (518K) * | | --- | --- | --- | --- | --- | --- | -| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB | -| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB | -| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB | +| Huggingface | 2 T4 | 8.4GB \| 6GB | 7.2GB \| 5.3GB | 14.3GB \| 6.6GB | 10.9GB \| 5.9GB * | +| Unsloth Pro | 2 T4 | 7.7GB \| 4.9GB | 7.5GB \| 4.9GB | 8.5GB \| 4.9GB | 6.2GB \| 4.7GB * | +| Unsloth Max | 2 T4 | 10.5GB \| 5GB | 10.6GB \| 5GB | 10.6GB \| 5GB | 10.5GB \| 5GB * | + +* Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. + +### For replication of timings: +* [Huggingface LAION DDP reference implementation](https://www.kaggle.com/code/danielhanchen/huggingface-original-laion-oig) 60 steps on DDP Kaggle 2 Tesla T4 GPUs takes 40 minutes and 46 seconds +* [Unsloth LAION DDP fast implementation](https://www.kaggle.com/code/danielhanchen/unsloth-laion-chip2-kaggle) 60 steps on DDP Kaggle 2 Tesla T4 GPUs - **Unsloth only uses 1 GPU whilst Pro plans use more.** takes 4 minutes and 34 seconds **(8.64x speedup)** # Troubleshooting 1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: @@ -136,4 +160,8 @@ If you trained a model with Unsloth, we made a cool sticker!! 3. If it doesn't install - maybe try updating `pip`. +# Credits +1. [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support +2. [152334H](https://github.com/152334H) for experimental DPO support + diff --git a/images/Discord.png b/images/Discord.png index 00b4d53f78f96e6530fc280095bbbabffe350d26..e1a0e14a567462d744824f3957aafc15e4fe4bc4 100644 GIT binary patch literal 18382 zcmZs@byU>P7dMWgA}H`tK%|v!l$2T&knWV0mTr((P$Z-qq(Qoo&ZR`US(aXqC6-!x z*`=Oc{Ql1Qo%1~WxpUulW^T=$JNHGHnu;tT0VM$r4i2HboU{fG4lWP}=gz%{4=}%E zyfI3}!FhorFa1vIbH?5hej=%M7UD2H&W`NGh4JnMJ^MrUK+f=$@TTOYN!!L^KBu}? z>pg!y;Iin{aUqmbTmpsiO&bUB!{A#GVLRV~OV?rdD!)Ni_h_EHdJU{V&8CSH8ZXXM*%C< zIuQi-?+&PW)ayWNf_FZbJ=@+AuBgj%&~GIgQf(Eil%kop!-I{Te@X;Mn&9KhYUMb3 z{Va6!YO@+!mQE3eGPeL*CFWe9*U~9yHQt=W(ux!Fdzh()?v>rGRNIW6UsjMxmN+-s4RmQf>A*B-?`j`L4Z&Z7v4G7od#*BQ$yc{9eQiUdZd60NmnyOW(7tGb+^GOA{Y;B{$3lN1927a@ut-hUW@BfzvQf2$>$x8e z`R@Spp_*i%cuVWfgA)%Y+F8{F{?A{0^CF+R-i?SE*JZ33b=(V?{eDVC(j(+YMpc6} zkU7MDqZ4Q(V&fZ1bmS*&U)|vNTWw5#=HWBE+^R1Ph)CqMz0u|6*i0|?Y?Ck`HWm*D z=L^A8YO&6C_~mMr>KNVrb-6AvQc%U;^gpzHgNMP-n^J*^N4f#DblQ!fZ0VQ@zTE3f ztK4r<-)z>6p@m=Nn>aZ$kivi>l@efP!p%j;|0XO$lf9@_nfKB}{^^ING& z`V|&%`GOblOw%jzB>fJCmb!aQeazT1%n23rE2{waqFKnr*j09GXrd-*B1}<7ngv)N ztS*ir?3C$xceRwx9Cgf{`f8>%5jRs@(&U*%&xu#EOqHLP-pP>^lxhCS8HV&rAbx{g z8BL2>AhMl<#RqE2a6bG5&Fj4K@n=t9?}W{x6zM|VVC+g+xl_@?9`R1Abpy-yu}1ZV zX5%4{`D{(w z)9vEa8UoE_d~}vXmTt_DmHI|ZSsUSxfmI-i=EDL=Pvd{L!V7=Ww(fJIC}h0jTR*Ad z{XQ7vVe%(b0_nPSvEa$hB*VAst`a;IYlet{qm z^PgZH3oW>eRuDNE@fCHII;cNz_%~s-e6?rDO)y*+wUNAihkI^Z$XDfjxp#;m;h`Uv zhyAk*vE@dK8KoPNg3mdM+$WiDQteUwyW`*NAbq1ux9eezUHNXB#Y3Ax_BsE3{my4L znv-|}VCxy9Eo~}|&qrCKODCFfYAiRhLhGmt6FLlGoiAjP?LdP!7g5G_*hMU1e6!Xg z8;=yt5ysN7Yo*Ur_W8S-fCGrO5z8M4WOEoLp=tHM;)um{Zl*pcEv^3BVzD!AjL%sS zQdgYcHOM+@Dkf-63OcAg@hfJONW2q`H3J+X62Ff+D-*x++Bq`)%*2R3eap4v$Rn{N z2HYwS2d@<9?kV>^Z&1jz+^a-WHP&1T`gc8UpmsCJ`U%$HaNc~_Ggl4O63d!qjzMyH zn(@(4p#O&OF5$tW) zWRZ_gJNw`KzueLIz#1-GZ4spuH7BJ3=Na-g^j2JfO-r;_4a#P0!%Az7H{Hh$-7ikMWDqWX?o_{GAm~>o;^~4lr+WY_OR{{ybNvq< z@cg~@Pl=m7lAyi75Kzlf7^R>IRxoP07RP65$V#w>z*1d{Bu!wT3c~L)6@4MO3Ns4! zQ^vAzCK!;4k06-K2n+^60aPICA)&DAV4qu6I@1rlkB=b$*98WFdWZ57pjYNF*FulG zn9shHCiyW6m;4vneKy_*v~EKtjD=YJQ{PIW{(yXZ$}WJhAryr5+)SmXw|dQC`}aF* zLkMW4Y<0e>;WyqqiW0jDwNgeNdB>VsR6{VRp6V|HHhNm?vtWmwmnJPTq6R9ZEEKmC znUe%JgAbJcSyH|>2TI;GktTF7*+gWscbvw<_iunh9|D^ueQ{;D9djBw8gJYmIzMQH zMi^V3BCsO;;>s+naFhsknm8IqE)L})_$sgm1Wt|4!jB?OOPej<(~E5*aoQjCeA-K^M9gaa$raZQ48lo91x<|2B%R}JuQ{W!L9{*Rv} zzM>?w&(6#oU`#?a(tJDhr93D;!?G7q!+6q;3GB==C@hyw-uftE4kzzW5DFnvxvWl( z7`P?J*^wKKZ%ab(40D_WOR^u#8^lpW9${_m%epO-ll=bRCix~AdSjFr>Wn=}M!B%u zKByig!z%s!T}E>eD_#Rc$Iqk--HfqcbpqmB83WeQcZEYiLeGM#EdPmkwzA=Mz&c~m zc%xIm&!KLniINNhETigd7|(!dbDNqrE+1LGiMs`!qVSLc*ag zYk1vA!Al6FtBQ`J%+Snye!mrA=Rnp#Yr#sN~CTG zpVV?HW6;$RgyoSdXFiRx_>FYg)OAOld5`YlpvXJyN<$d?lsg86ki#87oh?y+`nh<* zxgp(_Sg%5*8)Fd6W3pXmO8srG+zSk`Gv$Cw{3a&Xp>aY(#|VISLce8R)z`LJP@Rlw zeV+jF$)b4UDDe{~IwYfHusbHwY4w!ay**IU1rN)|8~x4A-dO(<%V?J2)`fSQZI6XM zwm*mF;!**0s04dYZmwK47v1%(nDhML#u8*Wsdq8v^F@K#+i~}kB`sPIlW=zz<-4zo zmRiGxPd!pur>DmYS?^(N@{7Vu?50ZgdoTU1g+_@9OGINBx1WN7CU;l)(`dX^?4gn* zd6WTb8KtRjFKP6f2LP#r`tWMAs`!`OGpi+Smn_W-`eY zsxSK)odv70-|xc)rzF_0A$E>S4?mpjkjVta^&tiG_wTSe$RW*0-BAe!k+39id13An z&d(Ghl#DoKx)L?=$eT>lupyEC3ru9dtHjHP{lsA@d27fAG}&D@y6aqy8B=?SG5pMZ zt*r|y-!`7?FO%`}EJu>pyap&RzTBR6(ah-6T%J+Cple#`VJ4S|_*&m+t@ z<7)EVVe+E-G}cttD{VkiLe|8xS|7&%f|fjFyo0isL6=>22<*_f&4vMJuEU^7DJaQU&{o?W@8^QE71x)c+NDNRDVCwHXu2wl$=tn-51r?zLSFPa_|?}r$^X8MlCc-FbVW_$)*(g>K?1t8qZm2 zYsV&}gJeXnhQ2iG9Qb|gEAG|Qjs5E!%#A4m&|d2(WFU}8AcHFJdPbC#XJ@T^wH5tk z8**{d$+DQHVa|O}-0EoE!oR|wR;bm9(Ab-Ana;U(^Z>5|PLUE7I;wz0jjKx*ZQGOk z4^*o;8NVWG6x*!DITP&Um4%d-yba!h-NCyK_i$h@la5a~z*+4|dz@tD>$RifU(dXr ztND@eoThmW=9>$<2FXUzjBMU<7Qr{*DmAkGtrmlZUukbonAEHBRUT4A17K^HtF#_n z&9qmPrqY4X#4xe9_f`x`G9dq5Zw3_WFofvw+44Z5-H9WmJ<>it#OX~O^!C9x zTQ)pXrH?gRe73Rb66nPkcP9SFOo0ZV@RB?yQT}{qYO2ED&*|-wkJL5f3_--A#rchr zOSZ`q$GKW-bSgSquA!Wm2{(h&nu<5J)0({SbFxrLIeAoCM=!>YY^R>FAKY1gE3;== zx*3Bg8?>vyTuWR$7)PY?J{y))K%Mt+Rt)2h84=z@n5u@F0vNNIs3&X}{l9?gQF6_o z_Hg8Ad@-fqY+eX4pw*`+_AP5cp@c!7Kk$nY%=Zk1{-9%!*YDFjN+STSj4ITxcV%;W zRx4;Zc=nz_ytIWqht{97kxNf7fJT=C5ME+ec%BIHJ1VSIV z^(b6}r{$NYX08TlvrX*3m(01%u918>mR~-m2E^OvM4NvA9i{udO~hhkNkT%aD!C$Zo9y#Qe_9&{^Jnnv#EktW4o-peQI_X!5T!r zllK@20Si8mNV^ywLInyh(_MbNbF+}Knf*!oiG!NbVO=k(M;tAP-|oY2Ut{R;XE9Jt zx{AHvY*(>Zwc~q5_cc*ewwE7TsvZW1G3+|q!9Iddjj_-Nu>ZB#8F}g zTXrJZ9vV3ebP`}Nu#ls|BX%xR!T3u5M*3zWk^d{|CW~P@HeK#hJ zccSRQ30SMs$(U1W)VH<45nNPcAW@m?tl@AilKYG+YO4!M5O9+MwmrvG6AeO%Sik{S zmBtm&Pez+s07Srb4?DP0`50j&@+085Lj`s!I^z&oc5_S@&|*Ajz1cwJXS>*I+!H%h zTA=6w-_^h*ze9kNumXfE*t-W47Sw5b>|&^6T*!T=e->;&e;tzhf|sDjRL=t9qR&Y> z#{6$2nq`cIz(sf^N?5Jsw-wPXZtLQ8C>D=_zL$9=OWWgfANdz4oopjI?8uLIQet=f zAftqkYD`7rWTo0uN(o^;OF_>VtCGGug`nA6uq_In*y#7lFSL^qb!yLDedB*FuY|cA z{L0T%L0u2J#9dF`XAmp(;5hbm?rENy?ten-F?8x*?_h1%{%gEt=T*kLqbwFm?O|)T z9Le&{gd9u$nwdptJ%hXDQxQ!VVn8)l<|ecoSMw>`C!Uc9eA{J4J3R2EH{%0yob8(fw8l5_2CDrIf;aeRtUpdh3I{I1a5wU72&4tXp` zhc0K%JVZdL)$+!WTaXsRelI1f$4aTiC1EK(V06^$(`|z%ij^c!B^m1mzWs31U)NdJ z0z~XBG@n3Un%bXQzdpURn+fSq>edh`Sz|>~c)xA@+f(dg>bNQsuy=l47#*=F$~bJP z2uN_HyBPo)h7Lu+#4(k1#uMS#CA%R;drQi54UN^|(v>1r_^_$8^1S2b_iXA5T~Y?% zTzpZXCd;~Ko=x=Py;k?h>6rb8#fmb%V8plF_#bDVz%H>YJ5K|Bz*;4nCWJeAa0k?f9{yS(C&|kM zd5CW%h39?Z+ek)yu=}a#ULOYw>ckv%MD;`*XMP|WQI*uk;k~Fv6@bZ7W?JpB@5VR zvuj`|``T`)d^&Dzhefi!Q(lI(?*(q!{{| zT4(!UVvt~L2Nap=xcWkq8Sji^*jiBp<_fIlJrH1mWZ0x!Oy6AE)cY%sCX1D$kLd*K z#!6p+6YJoW3%J{r^Ov-<^K&XF;L{&gBf}k;R=eldgG!vD(Gh-?mDwAb0iHq}T?&~n zajpSet-fEa-xBO*72=9KP=l8Fze?QgaMYjIIA8N1swN?0Fp&x#%}JZ4W5EuPe@~bN zSE!P84Fit0ZYwsuTGn&E%QlKpTxy7|EZ*z){$7|w65%XYL_tXWuCiq@<@Dh@dbVIr z_MMQ9_4FnY&+c>a3qNj5nto*<)bF7o^U)RGd@dHDn4OhIxhT;J2rAbFRpO-2T(l_| zy?%|vQQz{sj-h+N8Nek9>S$bA(VZ%ys1hAXsYAs-YDbF`6(}Cjda2{p_==kVOcni_ z*={^=QP~oH+fYNUL=%E4d}3cF2uCs=GeT1dF0VtZ>BdzQnNYt((^JlV3sAn2L;b}zhi9&W#xpkD)(03oX z7XL{luK{QR5?GCB;pMOAb%o0xDypDqlrGqbynmj5n;*u_cK?V;;EJ6ZSWYySOYUF! zcW7K)G-;3Rry0NGHX;)Rzu6X8v+WRZeg*%5^Md9{OkdO9VUUJwG$Z)>8-BYc%Y5C8 z)+P}*xS^u3jl$EyLhEmf4M0)!qI&s03`9_!G>GSWekN9g*rA!xZoI|{s&@AtWi z))CzNEm=MEy^K7p9`iDn*qi*UnU`JvWuK3%ks+{7q;LjE0qL;jeaf*j6GC`Y{KW*m znKQY&l%fUnq3p0han8#N2Brk^T0sUd$^7(Uq$ zTRfscwewZVn+)hiw_=(MyLko(s zct(p;DBzz!C>3d_BRx$s{>^GqHSObqFJJ5_9cJ|6Kfc*+>kb77&yZ)>Ex(ID2DvC2 zir*+~EGYF~q_{QxWoW#XHYseRY7g+<>%tZDnaJ^`pk8ogA=XZ=jNI-^Cny>b_2(_Y zqtCMo3u4wQvsm|!QXVMInDE;8J&^Ao)$a+@2l2&7kZ44t+`vT{I@a1Htez>r?xZW> zD!HMlHZM3J=FNC%1M5EJC(eu2vb%}+#vW@P{@@MjcoX5+wj%HLa2Eg` zTT-OU_vQX8Hx{et4Pp|9K@C4L z?dfEx%-o(TdXpyX%$iHur6o>L_hfu54$0-mc^cP;LLz=ru4B!C%se z91nnOwm+#Noz!c3W2^6KB%-y((9Ds0eSC<{D0($CVsciH00&sjH?SF&c!NKf z8P4oIm*@9<<$v6IiTmB1bn3ndTli|CLSgR#!=I6BBw7p1`)*2A) zy>s{bW#y>}g%@u=*CcbRVxy?K?VwHr!$6V(fJ4P7Wg1jbw!$!@ah_+Oy9Rk2N;N~Y zL6xszAwG{Odx)6CO@{J7R5+^oTvdIq1&-AoZ(=uhS6-5iK!xBd?3;R1cY+6xhQq{n_h%N$Vc;aP z&C!@G(@D7nLH!A&a{~N(&4cbzqVGzmcy%cT*PaxO{oogw8KaqzCOccN^^hdL+{1G( zE4`gUPvK57u&WriaaO0?{@1A~R@;2n`Q~^PpDZ^D5~#y|gnl35;n@9NFE)Ha(UyNN z1ar&X*@>v&-zIe zzWIaG^TtZH`)D*U)`R_NU}T~9V8SKHW(GLRMX8wfV9L;bmQ<*{tHHtO$CJP=+{KW* zRa}rH}OR>o%qryB*4TgEFVE!MolaSSe)SnXf8_2ApyU^9rB;9ti84> zkop9xCXnQ_f+{^bD8pMeAueAv3i#qgl4n;yNnGzSZ}7nPvbVL}`Nx(z!13tLlp6H$ z57Q#O`ROuQOLDh&J`M)0N|4}S212MnYk%@%E4$o$hZ^Yo&DHxy9?;A?236)YETwbb zMCW|R@O`sq+&#*kY5xh}_8p!BtLXJK*pwYkyUx+){vb|=rCGa+CznppRhY68jbJRR z-ZnSd)h{DY?cExRMc?wushr55`I?(UhB8jP79NDj`ZyN?ehDA_HWa)kNY7NyxAdw_ zYj-aF6(O)h_4a|;bwknBNe<_iy(Ze2XMu`QZ;{DwrLqY@Cd+fn(xTl^w6z!B^lqp@scZmP^6Gq< z{iTdM}YaHpTIjf_O6|vz)4k9a+WPh)a%N=HH}OW=jDmE_m!c(eBjgypV>e%usqfG*R(QG0CA`0BB~a@*Xmw#? zU+u-ONc%CZ;+S)${SQ9GJdu0AHwchH>0n3MrorGFOsSnvl2roFD)A(3A$&EnL9KlM z@dF=AQs$|mS=1Xp{#;4Z)S1s`JaN&9b29b)QTN#Kbw;wh=7>viF0uz$Yl47Ie0#As zKgmjX88V#u-^Kt=7vz9I4WQ$kSrBzG#NnN4~lai-e^*BrZ+DY(x}N#DoR6W%}Krf)&CD=R$n*_R=!>L$>BD zhpWN^yBrJ6{f_fFm(#@|Q|(64+`ry}@48L0vm8%5#qS6>)Of$QuT`&kMx-yU&c4UV zU%T^Kb1653F=aMn-`B!s}7FU1%YK!>UsY~CKz)MS9Qa+z5ceJ{@QtWpnN0ZLCtovk# z_a>l?-9}!g*K9dhRk;6plbfmX!_249U2|#a0Yv_-YZgo#Xyb?N@^cWK+a-H@Y@zOJ zh%gii(1dG!c{;;OGgy2)Myly6ohN@aD=2$Lz7tzW4I&%0DPW5+Jq%nd;zwN(f)CXE zD^vd}wx*;-*3308$zdv^j9Lfa>T8BSktbVs)YzH5wzo9yB&_%pZri4$@pcsnI)?9b zAeC0D553-7*=2}MoBPHkDv`Eky6zTyPK`5LvYRxDSs?%TEg+G3z#F|O4W|)=Tsw#g zC%o14+!xig%}<*SXn;MOm~(l~f`*zEaJf5&N@utQlGyx|TYZ`Qw;AgQ)-w*p;Ek zpFHw{0SlUs$jUUzfG}Ng=yUpXk{+j0vwDh7IhZ*i1?glm zoW*5q;xkwXArdc*L~UAyW8Qivz}aQiv-o~_WxH(Z7aC(uS14tFWu^eQEcB3Y4<=u5 z=C8O_n-+AAAfn1fs?hLzz@J}ioGFAt>=|I@eQrF^{lgY5*_qBnveX&xQ_^hA+red< zBQ#Zw1Hc)jTyB}&0lnnTufoj`XURX?6IyQ65UBi>OprUWSJmuWby&D|^wy#Ot#!FM zkEbZ!8Q(HBNNvH7zi1W{5Gu7s?H&S+C_}Dr5_#BKOwSVE7@Z2%j!dkVw78G&(Q9T6 z3$6k%DJ;V1{!XexG&h4KgX}EiPQv7g)kXOLsW_iiUPsN1UKK5=9Xt6`*ZmHToDf3Z zs#&&l-+!w4AR(~v#RbtlQhIyi} z0YfAmj!mWSy8No_V>K$tC_?_UZE4+Xh>BzPk4k5DT>yuJ>+=9)_` z($xRCC3z8t=|?9mrb>Gnm7dN(lea1*hIta4@#@UiWXT;XK4v0*0ecrjk&33^qZo%z zDA(2>(%HP*w+wfkyf9l zHQhrBHeO#fdg2fH+a)y2#@pJBO30nB{F!DA$Aj|5Kn5T~@Zv_Eu!&PlS#=j{aVEq( zNndviRLsPdbWACgt<%>)_!bq_;7>noY!7(KYa7Dx+ zLd=J=1aS3}ztA@6r+}TgZHN3XQur77)yR#)gmrcSGu~AJEN5$q}Ha#*y{A@Vfq<=L|KOgTfEq zhlA#y>Z*Sm=Ez+QbX!W6XiM-7G-PmW`%`qi7tp8{;G+6s5BT%>UYl810{9OX6 zI?7DSg>HDL4u*^O$XEPX&x6)e`gg&_(}Y9!soadEwvI2fitVfmKZ|sQD7zbH{>&H+ zRzkhv(0`ftGL`iA=bOAA#@mbE%R5?j&fdzXy79Rz5c+!z1pmnt_IABlFKhxV;h)9E z1rzJa5{iB-bx}x+tos0-Xxj1Yfb)yP{p81?wPTm>f+L^F-JxeVy5JET%kw%ric8bq zN)%8}l9V~~$Mj)rx0!A*_?5coZ&E#CK=+hMCT#!F(S0LW`uE4EsNzpsUA`o7P1tIt z{R@eGGdmyT(bD-lhqpYh@+ga53`+F}pf7Asy@+)v_CwRxEJ4&i#3L9tOSHB~+HLFhGD45e zj;KsMFnuB_SBdn^v)J|z)poqXoiYQLJT&(CF_e(LNsx(<0&LNMxXKq_7!i%zJrr7I4X$qq-@u^ILJ# z4jz88X;c`tSIkEoGC7bA7)d$iADfobY$*w@j&`@b+5gIZx>C*#E=X|kfRK8BJRF@Y zUcL74Y(3WLK(CLLf)mB)xcMy`lBDR_7Iyh>s-+}*w#Up1iCyyl8$~*vY|>l~&*%;M z2OVWz%*B+QhWd`wyN!^)o`TOEEnTc4?|L_Cf?8{>(n2Zm=Uf-l76|gjb^J)dkY7}B zJF#sn9D&(>kZUuYr0qHJE9B$RX?|#C%h2gQe0d{C-0owx?(!R;z9ReiK!QeIk%LMc zfv>gC`bagUNb!8K&fr6hl7vS$dqOBeMX@5eCn;kP>ob3--cxn)R=meWzXqaA4{#dM z=^e!yJnc`^IRL|zc}zma&3A^3l^hdP>crrQC81@2^%hGZ3iSjJHOw+x4(0`2^8H^?)EZMo5E$Z?)Vgi{s9meC{I|-Z7&Si$O_=f0B>qH zJZM^FH+gqfQtm$KP2&|OtK2kQ<^ygz1rG4OgoI(5z)Q(Dvf6{+y;Pz_fU81}oZrbP zJ|I3R-ZUmEk%F!p%gF%7zCCyjtt~hzm~#itks6nB5pO$PiZY&G9r*f#I~og(e^3py z97KtIN?gG-D~oYbi>GPrJQf@Xa5I%Aj!dnqby8Kny6)W_JFE~o$38^YEj0F@ZnR5? ztli0r)ENS*7B*69!$y%loEE=zY1?=$+w0^(itS%DQ%0~I_`2w?E_d;*j*c%$Dma~# zx4p822O<1Qm>wL}CBLOP)ur%v+!{}}(^@~s%eBMxJGe~2tRIt~QdqWnma2Aa6}6@o zgi~4gXr`nqcPy)4S$&*2jE_D5N<1tc;yx^GK2Dd2QI?-=^)FS8qFK|fP&$)Sh&v6g z;$giy6)rIA`ulwwQk2Wsq|Typ-MU+bY$Vny5yi{C=vp%X0NIw8sgi zrCTpl0tH$G^i<8KReA{od}o26n~xc`m+9^)H8zezN>q+Y9*v$BBa>ypKMUYxS%czw zg+@eFF!|PNMcNh4n^&bXPWGURxjMJ`b%00L!h&weQH4h;%@;JA|8?i(bzOMe0V2-+1dLX!w}(Xcm;2#NNr)dufa{ZJ|P>|xEHxY#XaPmVxs}{vGK7s7OCD_u%igj@!D4? z7Ur>ld`*d7J0dp2Jn@Y8^&a3lsA#TJ+3k(JM4x}_vuZ8PDRIH zkd~KGjo8mDqGv1zVHnUsL665wKni=+(F;l1oZUqlT{{1#Q$?fc_d7#Au7PE}hGmRl z7xkV}oDOdiJjRD~WTsSdQ+8U#I$N(~I?`OCRQKl6)mkx^cf`*+UE1-1(;OoM<-$1l zjYL9Dj)eW0rRB&p~ublbqE+#uPxM~#K)$!G6gD=1XIhQd{hz8Yw?MkBZOD;Jt0 z1T<7kH;c1vGiFLA?`87I-e>wA?$`ZkyMFS1Tj4H|Z-H0~`9oH5{(8mQmo9D=+X-`JWuE_yP&@to@e1rc zY`UzEy<<<0c1-JwQ#QWUP|a57M&CLwZ$0 z71{l-t~`lxCbf6@*)a9QKM`Swe2}mm;?$n7(407mG*`8zy)1u`DGWas-7B$U zn&|OVv!urS^4=l~e}nwQ#G9d9VqH~8A(exo`TR2C$M(`t<`nL8n5qcel`Fu;xQSI; zBt3q@j;)wJJ3^^4YVu{u$0;;*tp@PI)6{eR%qqa$N<#BBMt@F%%HPq~18j4-l-D{IeXquxl^7}ikB#1Gk-e8w> zYkh^jfU}JMzK-2t^t(a}o%-~Nr22}7*TKAo`15bjMy!Z9BUCv6WiLl(TAi1VoDugsO-ML17!1>?B@9;BzRBUIXMeyOg z;u}b6^M2hX(>J^(p_}sUbQ{}WamfTPgIX$zP->+4jYh1NbKp_Ny*lV8 zIo)0s#lzvfZ9v`>d8H&w^&NHhrT3Z%ap0q#ULuGVtyI{gMq=X z%YTfr_eNNZ)N5_+E%l&Nsh&WgjxAYg+z(-X< z(yXA@Y>9{0eXl)++&d_FUmRTl8tV4 zmtGfX_>}6Bq|FjH7J4bC{hN)1vOAm1S41Buo`Oif?8g-=S!UgFIVL1tmD2!%xGR_a z`dXq6(;JRUIsAN{)cP&3WwWJ@qKUNuf<+%e8tOS#<184rKBXx!=;_PMyQw-XzI*a; z_mb^28!`#-Y+OnlPAY)WE`ImAvaq{UDL7$|KA#a-xT-3XLgb$^E_(QAS<$4MwmITY z{&A{SUo;aKgezZvn%)Gs%b@A67^n3$k*=hP6D(TrR&P@!Rkuo6-iU!oZtn1-H0{bj z%G}mcb{piQdaLnYZ!l|G{L};vQ?uVt?%c>K= z#QS$2;#W;o#dF1ql>!%r5nZk{#z;%k%AV>JSTmnVXOfutyFdQ}=(LYDvntQo*BOUT zQP<)(Ch-~q&NIN<)YYaIV@zbM9F4us^ea?p4K31 zwiY33_jC}{)WT`gka&A&>bNL=%?IiNRgngbpB*N2jka!>W8CJdsMP6c21qnsi$#Us ze(go}m{}Eh+}-;%M&A-WoG|^u+S-IS;-?fzZ0;tG?0e&PHsk(Mk!S4ae8c;ELN9f6 zkPG+f#fQk*_bd(DZW$6hWIM1$_jYM~LsANto1k)qUD$vt@>5;B4 zkss%78UmoTRlTlC(bS5?MwpkjS`QcC&VPxacvknhX+-B+YvAd^949mqarVMHMSa$_ zl~r|?Pp>Ut!vl7xv;=vwDGfgpOw;q1`+>D|w!&qW5fToNoV4_1wg60ijx3By_m0;a zOpA6F5E9uq`O75}8Qzk3L8^lJ$vlNjlzu*E^AjD#xQ@6=#jHe=MD z4$i@XaW2DPO&dggdQ+mkXPJk~5*O0KcD*MN)nc755izeo8<$2eijAR{|T&Ir`$ijD5Y+Kk~ ze;kz;xA+WY3_c@FeyvUM`Q9y7k+EgI~er# zZLpQ$ns1a%7Iu!0%{efx0VCAq-!{qYCNu%`GD~2e=6?*MVVna7sO#=Fsq7}WK9u8s zO0f*Y0*;b{K(86Z>0&70djAaL%l{9b6lPF#VPbKM9KbnxWs*Zydj9VSVgC2>iRhaHZO8_&-%@#wRfH%mLNxiGf&; zTym5BC^#jCRsKq14vcX?(Ub1!0%nO~%dO{F_~?^MEWfw+ndszIFtBn~xh)cjmYy4n z8^Cisz457Xs2mF~;WN zSf}=!TvbEj)com({}|y|@eFHx%wvJeOhUHKlh=o(edxZnv zM$n>v=UnecNwjxtza(hIjvETQ92ccSElq@vdvKZIJph;~okWs5Ke2!b& z{POfkhQO4B!BON?+D_9r3l>Y7e8Y5(-5Tp1pB2UR=S40STbfa~*wG)`_?X{2yH40L zRj5}LUFRg zGj@Ae7I@#3C=@->8frHR~ZH_Xuz+qbYum}b`bNL!xqi6RmCC; z$?VyB7WHEc$QM_1`)|O`|Dd+7#O)Su&@1Jgc}uOf9tB6EitHz0SXdAp2IcF#hn{UN zKfaaJi>@anWntvF_5iG zx2GP9eI&W{&*u+SvNzIoee)zfRN@tvOJ#p(eF-odeYBd2g^lk0le+VFY|P2_P7fIU zp1WVa?x!+{>7TaHuyHefymWZxJ5l!hc1CvNgVLdPr?EDHgYuz*r|bT&*y&FS$}e~U zm(3|sP-p|+MEU`Cms%MgF%hu=lVg8&JpT{bqTqc@MuyhFTwz_;6stm=Uf$Xx8N=tH z)9aMm09ufFgaPWCL9(PrB96No{o9xY({5QMR;>Jp%hGOCL3Yfx+rgdcvLeC>foybD zVNu@eBmK%4;}ORXz|8q}?%`Rv3*t81WBvyKpb|Evf0)Z-dEFXUhUw?xT{{nhiI0AI zf+784SZBZLt* zqQ2ywTaCKEYDdl;rowcIb*O0a43UA>ga5N#ng7ghZ!eX7P%^jgLV6PDP>$_luIu8q zm{_%f{b1o&aLVQVBH)V3XXWo!ynXt`r2oS7M&PV~te8%pU-S+YBU4a*+IJA-4av{I z#szR`gcxu!dF+LSdyk!nlWN(gGY7OK{@kS(Q{U@#$JK#$g1Y?&c?Inxh|IvZR_7L; z=UJ%l4jgh(*NYUK`vr7_z#(~{;)K}`fQ6ASa58nekHqWPQ;Fx~7V7(h_KNBOcZL2q z&ad=%<}Xl*5aGP60QJa{QRF=^ZN^hJLREctWnY|lSXAXswn$vwDd0^C#f z3An;O{6}rh?gUn+`Mj^n)U)hfL!~G`_C_?(<)_ z26!q-;R?_p8#k}zcK%y^%|$=vZ)J@LqX1Ctg_n-`S1R9KUF=?E|MGhD?L&zh`Oyyr zN#N1|s?_SSEB%ae9Ecr5tK$ppNuWbpuIF|1M(j5KLxEp1;5h7^{kYjY*O%uGaKqw(bFn_Gz^0*tC+9cd z@R9U|>w7B`>~k+aV0_mLYWi5V9(8|FtZ|d04)ahIJ2z%vI6dGvxNh#&y`X7^z16$6 ze|`D%->&%lx7lVsx{byuF=z)xbS!zsu@30nbjk);@`mNfGEr z9g#I$JDiN7uCH92U4Py0*1L7FTlSjXy;B;LIQgxJ7x1_gpA%9CL5T{qoyxjK<)&L* zpPcuc;x{c;?*HCAIq4Fr@r0}8&`Af2Njxr#PYHLc+kV{f=>sT3XvFJH4+5?*t-P~t z#`V=nb@R1FKJ0dCbTA5uU3>Mk)$TCeD;I$aKHP6_DU+QxzbrSV9W>#-fo(mH#?pWG Y3(Vp|a^6fd1RBKP>FVdQ&MBb@09cfSG5`Po literal 58500 zcmdqIWm}tT(>6@KRA}+y1&X^%kX76%F2NmwyTfXWTan;W+})iPD-eQf2<{Nv^-Zt4 zu=@OkH(%Ib%Q45uJ~R8wc@p$VPW<%?{1+%FD6b_YL=;g_oUmO;aw z(NMYu3!sD57fX?lBoW+!X3&jNc_QO7a3tfR#(&!RjqJHW8y-Zl(o=Bl+xv#b0&i`V zX2{hUAH_qYV*h{pwBcvZprPn_GhH19eXV(|^w57Jg@bQ8SEjLq&tJaDMA$7x&5b`i z-d~AY^N4wR`$xW-jbB zycA(}eS-)mpI$;QCmPZObu)_qGzp8Vm&{5tV`@4FYpAU*Ut1A4SwOMFL^UY9+xb<*__QUr0rzWh z9x|yaYGIL>M33S^H=RRGuQO-i{0Kg|{0N^`^J0-Lm5b)OmMIQ#H*f|gQ7=krqs9tXPf{Vlf^L+?W4>OfX& zJ4c*~QrrspT_cU5#Tjd!$cUXnzhpx$fi(bpq84}}H-K4tavZViB0?j)3dN@&anBmh zh`}fpJ-QSGZV-^9cYfN$shSYy^Qs91|Id8>E}zG`*X< zw#6&IUcmbDkYV%J%$vr;;zQ1@@o-N^-I*q|^Z0Ih=P6Od7T_1hK!u?9ZR1J>7r$o< zf{F7oKpvc=BTACKiWYD=M`OKGvzvWL(V`!8F6guBmF;78&2?L1q6BPXY)k<*WzVg0 z+|2f^<}A6iZ*4U1N@{|vAkBiC^#(Fk>DAntG4?U6gzbaj`oV^Y!+Y`JOB~AWdlK-2 zeE$KaW8aCQONQK?T1N&mHw)s@(W+uoLMzC00Sjm%tf0oJ+8UrD4zYmSnPlk?K&G8# z$F8dVYqprYSU6j%njg)JHSIs7oA1O8v4b6)b?mF>3_N!a75nu9Khj-Tw1ACPF+p*m zSD9RsF)4Bbk4Pb--Jj;8(#H3VA^q&C>{rGK8Q1>lUOH(91pclvBPU7z@%|V}c)e9u z80UN4H(@@rP|t|^^AoVngivj5jIm9NPjL~5wA+g2(zHB6BmB^`xD>>K)7X4|3 z8Q5xoFX1K`^Ot~FW^Z{cY5E3EXUj&9XEzRNfzIh`1B$V&(Z`IA=jXB7zb>aPtp#kX zo#qSv0TPyzDb0`kV^ATyxh3yBS))?LHzoO{n{3 zS<9=s3xx1g3>w?SZFRwL4~W)cTQ?i}PXw(68H}dR!@I-!7*`wj`e9m-ef9O5#9fvU z9v|!3T`tYd8_nK|S`UcOK$_pOX1EvsTtl4URD=gpJ-lJ}hzIU@>)8j5^UAhoSmEDZ zbsAWcn`@5+`M?=R?3WwHwyxz;M-&|%5X?EM#8u%GS93dLLOYx-%ZZAKV6pyIg$>*d z`XMYJ?uOOC0uD=lxLF=e_mE2QkutZL9EG7`c$W$V@z9Eymda;=Osxd_-fFx?Me}&2 z%8@X0Z5zla(cJcgHx=*Z$>Gy+0)cd23fD!ypsyt!SH!JU+s82U*6u*+h~PviaZ?bj zYJzG|K21t|^5}kV4Cd*%6RVyix86qCiqeCKZq)c3*H^-q4`W3Hn@-NXH=GJvAN^nZ zP*1Wx7QCj{GM2(6630LGFBt=|0zr8j2^#t1n+=4o^A*`Mq{{PTL^4wi+Da+$Ym|y6 zQiA+dqYJF#ELms`CUTk{*c^wnLi5=51g^S>)W1{d#7jKRZ24{+KU9`&P#GRgjiX}k zs6`bZ?l?Xq$2qTN9x)(!)`W3*?l^fMIk+_zYu=(82=BQief_7S9nz^Cxn`19RyW9S z5ay4XDBBfSM9?T?7Q1l_&lrTRvRXitEt=TPDcy_WJ*QfSN7nPT z=e~h_hXyRf)mNl8lXCBRqw(>^Hwq`VR|~&1P!O_cK77#Q|B#pH1-@NtDLUE{6-AM` z#p8DA-}D3dY16dZ+ITOG&97Rxkw2BFk(|%F#8zohMpO8#aT+2$75XY1=Xg+E61#A0 zs>kN~un`47QPvQhl4?3f5cB}3Gm@t zH55o!Cn_Z7^I~R3v%Wj(&1T@YuldF`*UY=EMK8zQE~UKD>cgSqNW)xKWcIKuoyfB$ z`Izg2;e~RajI2`}orQGqj$`g(0|((K4c&YFR{3sMn*UQDrR|DVsX;v@a#&sAjh}GW zio`$A$3phd18&?7n>U;}SB6YpL0f71*BKOT{`Tt$27K?T3}WTix+${`CnX!1uoRm& z;;TfLcuDrQ)IZQCyouF&jQ2-0{dh-Ki%$OfP`M<(<;2QCjrYJg1OFyuFV7*J+wzU~ zQUnI>4D>nuQ`raO&FurRieyptGB%*h%_CW5NNT=CGhLFhpTs2e{2BxG3B~yjjO3F) zlFQghWERwEw}yJFQj-%E$v^frVMT1dO#kLJymp*iICTP+675Q8KnD@^KN!yzmIn_< zmSurzN>?^HOt-4h;c>DXJ0dc;KlLS_UGZ9K$W%Z7Be*c1HM={vuz_76Y6vueknVTe zt#5ShH5X2Hz%*W}82Qv0=gq{r=qxvHQiZC{70w5QUNGpI6qF>U300LA%&?DdbpMbQJAsJ+EHR z3(^0VBvYj=@#JjCzH3cMHb=Z#yRP7ZQK!5y%5E$*bV~>c!++X>`oTtjRQ9bd&>C7A z!_y`vnP{KzNA!8|w;Z1CL@I&J*Wkl-T6uIF`@^_~OV(>0tD^e;9BVA*sjF&zuffY(z{Ktg0P}GRLYhqq> zT_sG>zDY0-<1k;+ zK;Oo$|<&YKhZd@s@Z=WtXqV)_XpLt(uMYJb6UB;4Jn>XxlW4s$&Tb)sbyx z2~_;2y-By~QcO3Usd(_oir|IN;%C@-ncb&&{Z4*oM^^K*P|DVc36~3P)9+d(h+eMz z5=S9Ta{ZJf!0QC(yFl4i)<>FxX|(CV$CHD3#G&HLlaEf_<&{Av`S$sC8Z>izBv&Sm zkd^Q1Yi87C259_Bp_f;wv71#xZ0wYc+y0e>JXF_1dC;#9qcF0ubKn2bbbiT;*?8$! zyo0g5(Cc7QQy6erT`kwnz%j$lxkiiRW_owkU+YznQ)tb@zMa7#DWx{$^Mk5Q9w*J+ z@b06Mag#lk&cd{d_~V<|I=;O7FLUZ`~24o$k9cpB=eHE#d;-cOGnP3^Yt>RHlv55&#i!PKv+}D9saDO9$$U7`g>rTe2oE0T ze}q;(=9QmS$&gY3coZAlm~z)<%ceYSQ!2OGq34Q{x<~F|YP+*5ZiW`_C*mU`E&5`~ z`T|%c>Q(LXYWBpfLjnTk+s&TUcDu2RQv_kW=X(EOWK=Cy-OZtOpb|a;d^^ThZ+J|B ziTzxSypW09t>dMzM7#6Cv6EYe%!LqcmSX zM>zrr{`d>!5RYl|$AaCOGPhn+y=lOHvwgku!Kn|M69l294xomVy^rv;witj^m9Jfi zUSbxz@qd%ssQL71*m~_wpZO*8e5iz5#d2tD@1_>0a1ESOb=d2u(s~G<3r=wY5Y33| z`@`H%a{FM<#36_;!WZdUVsl{WEvT)8=?wJAUFcF8-1~XhcoveatXOn+5DKMl!lrkx zhH!s(geRw;;um6xZ?3kvzyN+wS`exCf0k*cO#awf=q^+$x>^@w8r`hIzagI7*Y zIAUhii+|mwiC5Jg+HwTwk+#ll-^{#j3qX>H>UQVR6OtyaRw}TnN0LJWSIQ{AlzHdI zolTS6Re^z6RLd9YLoJ+2DmMJKX=}$D1&30`HO}Yq-c%0#E57QY?C;;AM~a85!svZ6 zL6$QvyT=RDv*VvN?s8}%J!UViSjI`9RZ=-;qjfrM~fCsHM>F`YqX^(%ln%F)!0U zJ26Y!n4wRFS*ARLu)GsgXR~u*Vkqi$ya?{XL;L~^2%0wY@(y!z@-VEmT%5ZOA^l{6 zWs(FjOe@|_OgKjHd3c}bkruj`vJDfVZ9rz4lhN*EdH&Pqs3MO=WmKo^Eay^_Q0na5 zTiBT)aDCZ|?lXaKR2N|XkkmmXsD{{!AHI$dbVQ?EI^icOwooHmt+M|^^H5Os_N9#Z zd0qQ?1T|{r&W{Xl;-+%`_h$gNG=hlgSv@P?GvF8-@Vk9chzS@Ad^(uMv}YaqSCegQ=FxZv@q>PTiGv%cD=chVr~@JPNmi{hhpb zbkMl&a9l9Y|GM3vhx_(eFfhD61?{Eymx_;s?m6$?oPRlA z*uS^|k#FjmCbe*(k1#yw(^tOZb0~C{8Cd}(I^}axIS@7XhGUmLW1hp|-L8Sgl$S4s zqbnZpUDW4)9$>DyTCa0p#;Dl^*{A|C72teaA7X^>^0uvagq4E6}1UN~SIatyVm#0!qZAi70~E&o3IUwTw4if8wKb>x;^Q)!*4QS=1GB zF`Zp_l9W=sdI|%kw6>~RCSOT4c_na3ajxvo!3tFd6!zIYaHBF9rDwXi1)yfXUnzOgHfx31Y(RIL1j=t zfi;MSMs=qF$PWiSGLmI&crWkpViu9QYoXMlmJ(a}dugy^&pq?i-#Eo>`+Uku346-z z+~Rt_#JQO($@wXF=$(WjZOrJ4i(~Xl18L|XB3AXfd10#yq`G>OnULy_&{wHsJa zq{TlxLJRBp;=e#>-Me|?s?7F;{7wQWHFl$b@%8HshcS)g-hUhWTWRDVSL@lgQ9LWo ztdMkO;4@;+f@yR2u{K~gD8OTSti^L2uZ$jy^wFIR^<-(al<1XUfpU+w!?v`cV z&wdUp;UE2)Ribl<uu?@kJRn!RX5L^$A!+Ow2hp99QPpLpsX!JGX!17z>1##4wdW|41U9)aZ*B66Xy z00)h=SxYIGC-I-}c$Dw!tznh5d~h$vp2tK}q^V+dCnxS5)$E19xD#^Mng zzpfM&L#373q^GM{mA9@?FLZ3$PGtMzzZ1JGq;xat&9*!t)HGNa8x(coN#`knHEfut#uR?NKD zWfSknjZdt6%Q*?8`ZDfeCP58R9f}-)oX8KXdxT_P znDS~KwHOIG)u-I?ev~3(Df-qI0m5R-)89sh%>qZX2P+ZHw>6$uvQm{RKL>ME4))mC z;F+SA62+^0(^HJ6BIFHI2$?$-^t^cNroAZ$qe!-4#W$DhWnPPKe7KxC5i$f1$RVmf zsM1Qe35mhtmO#N-IL$LM{S=qKVC2I}lj&BJC!AgJ;*I6uI1F&BvVkDE>5hWcj{{y* zVxK@lz;M$6f;VSZCxYhIQHaPgYx}>>w_@0lb7>6skhHA0?!dRKj!tfMM{Jr-3Po!j zicMvD>OP0_3Br6qx9nbY-~_`vq$WWKc#WLc00VevX+eJFULE@)-g%+-6Q& z$F%RnN5$uw)H|cL&ROuo6R4Yg7kP z1uyB=BpiOY@8SAnkFGiA9DhYO>6;&9>vUz^+4WhnIyR#<7aWQigQtIwHaWdZUf;3I zm5VZm8)OAJ9`KsJAVC)QG33?pktQ$OI3Tg>dzINSE2mcH3xJCtR|wDO_~Q2Sryo9Q z+))q|)wn3zUlP>Wo>X$M{pLo#5jgNx77-SA%Rj+>`3@O3k@P$lSdYutf@Rma2?u(XfQOvnbY`YbvV94Mu=Ck)he>-m$X>rAN)})ahd_>6R$MxVYl^pAorm_>A`EATJmEJ6&tCk=;lYQ ztcTDyK32U5OV9&k+42<^eQX@dbttQR*zayExR3`_;+j8!>{m&k}U;t&oR-)DY{1U#5Mqfj0%$`E@O&Jf)X#=v|3IDZ^kj&!l)ecjoqtNy0Sp+P%G@4U5x!++Jatyh68BP)Aq zyG5YrbPO~rkA*SYk}2rJv*BDZwrmGL?w^aQ}Zj{srA{<@vntQ&&JAeJ>FT4&i zTHrw5^qhC=s^_M{tun8*Y|yVdSiR~?a>y*^zDG@y9fW5vcH&YI6~5zqHx8osj>X6o z#FjV@jNhK0R(k+}MIPS!apD)qj;u)y5n29aYH~S{JaTEk;-yPVf%qs*g{OUAS~zhj z^1srG{Is|vxv^WC^=x$M7QDt4hFj-qzD{ak23$=gv}_qdy}L_xZLG#0rTaqb{wpkP z3<)s`d*?=%1cJk-)vIH%YqwK+(+C+lD^0d`U7;W`nsFz^n|w%j()Cx($IE_kgS4?i`kwlXOgQY!>pCONo=tW4PbRo{zTreYMR z)kbFzaUa3H#MbsB_@t}TUsf6kxijiUdoetj!bbyWh{fJF1bS_DvV@%E+@*fZ)W*gA z$yBNcZx`)2lN{Is)n2Ob*qWvrf4=y>-ga4YIA+<0mA@)^*vBB_{7;$bMtg}vlfwwD zzG|Wr9*3(_I%}#%Oal9_Q|$vPFaniny4ALGx^l05 zGgo*xE6mn!YQa=G|C=svc#1WlWl!HYa>?WL7hc4$UKc8ZevH^{NX_3UvvTe&54GyV zWl#CjL7HNRJhv?kfxwXIl)x>V+QYk;qgWbEg$||myFG0=wQzMwry08Kz?_q+OEE9! zRMtOfTUwFv>UtNSs^Gy!Glq={aQn-hmX3auWBdvyhGG*lw4?j05A#h=)8#Sea1}q-R=*i43&C6UP`;pNJ@Tp`%}$z&}L(IE9Zi9f!{PU zIi#pe!S%FhhUgVXjcJsHHZ-W{xcP#5e3VV`R*Of=#7^J2<^=5>)7L#GrpBggEv%ziGH8nny z!W%pOLF~JJSvlMU4J*}?IfF;@I|0_;%J>5{3I@Q1gA?DGv0vwmj?(DNWcNq>7 zzFNo1{4XbFB4r5>s%>++$rdo&fv>0{N_<)?YXg6nBs5B2=kGJ8(Yqv9qXQ zG3<@k!71;IoFibXhWYTZ@?pKh@Wd1?Oqf)%KIPdxNJySPQE960gkARAbk`bJCZu`= zcsuk%aq1XfRz8xK<5J5BEtcYe4b#AhUu;(WMauT{r>chVUubw`Z!nzVDn>fj9zwd2 z5Ha{&`f%GV0Zv_RbQnu6SrSX5IVHzx%A1m=|+ij)z>I4eaiYxkIZwkjY(e$)xUqb#a zHjUgxZU5I#QD(xU%tR=dUWqw(7&ZfT8+X-!a5=|HCk~3G)PO?7gn07xe!N2-4IRHg z=~wuF6NWGB&<&g&lmDy~IOeIpY%$2i?17kep|f6dpKHL=cSi1Pq-e|uh(?;HsvP~l z{6CdXJziZoJH%Mmg%?mp7dx;FhdN|BDaE@9M-k&WI2*Ca*>Go7TCd&5tL>-gPuc{j zc)#72nWo0KCOIueE~(~_s0z1ooYdT9SPtF!P9gbMe(Kp7u&^1sUIBA+ zU>?ImXNh>mHH*eAOeFSA43wp;xrQf2Lw$ ztKRU^ZOrsM<=tOFx`-Wc1Kw`hL|^-@#zOf$ZX%GNDBAdGIgqL3H(gX(sSks1)LGYD zVf-}^qr_6;D+K*BX_1x)Z{Qev9h?T~DA;lNX z>JV4Lx%+N%e5%~*lBtfDV=0jVa)NAWDyGVn=Id7FdCLsaO7Zu;wtOy?fes$b8RO!e zodEx;_h6{x0LFJd6&Evh-tKYy4%((L4|xG$Tv)pYW1kx5nBB~IR{A4lHwt#f#AL_ zabS)cyG&$l7+6B@`>P+QdutLLN7@z6PI0;pBg2V@r6%i3r$%S44he|v`Ksgk^A>YDH0~?m4`6@gO#<^RWp`a8}VQ?RR&q*yv$@STwm99#ii=v4Yihd`VX!GBzn$?Xq55 zpvq0kH5nCXA3Uw9Ue_j$Q2%QpYTMkqq~K;VRAzkrv|#G2vv682j*?OiTGUlD0eQ}5 z^Gh;9e)`&vG8cBxSD*{IdxVFAVyze@mpB#FqwK>yG-RPMUT`c5%0TA9vg6@knZ=2# zqd2?8^50gC;l9W>qg=Boma*E9Fk+n=o8C#JhFlR-IV{R#{$8cW&LL;@|B7lM6PEJ6 z`lv)|>+p276Hs0p2*WJ2M7 z$rdkjYLn=%V)wjx4M5b#SV#PxrT+u=JS9bB8M@87x&uAO;7Z7uny0gn+RMu32-%g= zSNXH*%1)v`<+}R+%YSu(MV211jM1vI$|JvN_$@E7{1qm?~Y4cOTHzb4Dyqo8&tNDzj1~=>AFvb^BADQ@5bNE^mg|vXx@rofJ`QnC zq!{Dc52Qumz4zbtt)|0@ab-8*P&4)5{k*SSk-7BAlcHuxO5b@ddaQ`#l;; zUADwzpqfe1XQr}d@~GhAU&bw}&CDZ<5ztX(8}^dvI_LZ5hhj~I92R7HllYxlT1#nQ ziNGMMB6oe9eLy~_UCxG1Mq#tpy*@=uLwS*TV@l1keyOVrz*I*1(+jm~W{>%O zdy_1))g~P{Nd)iI*bL|WiSb;$e?)(x`vlC*!4@OvZ>vHJHRH|O33!}7nvS6MTwY^n z^jqC$xt`aHr^@VDtV*U#Ye|g&7*JD_7&ty`sq&I7Z9-Cw_E?Df!^WpiYa*i-*7p-G z%2v!A(+{Ox|HKRT3r+IK!3s2p=Gade8uG2f_|RV+JQ&S84!@llubot)T;jrULU%1;{Ksb6Lvoaw_R`nqc_qO99W6@?79rkXblZ< zbK(q}3psz%&z~8nmGH{u*IB>NgjVQ={#OhLfYe4JIXPdZ%$j(t5_<;ii(OP+R6ZkS zGNq)IgKlxWhzyi-K-_aq6#Cc|wT}w3G1%+$B4j_#{`Yk@-8KR|{HPN%b^BOdhwHz- ztjV2)k^UXMm2Y%5%Er8he!^WXn^~zh5KLFVkD;2c_fM{xVbZ5@P zi}Oo&^NlxBRI{1cUI7;Nc~d_2NfvdT+<&=@dN^E1=jU`X)5; z>(hBs9fnc+Zcet@wf65Dy%FEzkII3%o7SqN*j9Clnz1!4JFfi zzA1|K<+Fur5jif~+~|)Z%>G=mhhCnCF6h7lPnI-Y@@o;^_%Skv*F54>y#1VQgrJy1 znH1OA-a9RvjffoiTr4PXpfsy+469XF(j}J^cEN`cMoqH}e#a$iO@N9Q)%VVRCki1o z3pmD<2Ua*F(U6gC#6*>x)Ki@t3L>H-6&OsJxb3$^gvGk5Lve}f-QhURExaH9s6rrt zgd{ZeB`48TntU37=+*1sqUN95H^`T}Sgk0lYm$(!2m$jEC~Z=kJIpJ4j|4V$*(mUo z7WV_=S7(m=4pEVt=-tVAAe>X8rO;7bKDyzL6r(JVw0-CQ$pKe%T~oqNEow6 zG{maSRjq)zHd*-^T6*sk*h*Vb`(ZYk#Ha?V95UmriE%1c*T*?M`)tSoM%57O+O?r~ z+mS#~H;?=l70wwU@>6PO+FVL4iRPTr31)985W#2ETYEDdv(*l2m0*1pDgNgCLsJ`_ z?1iVp2$vYSRLv3ofs%Oh!788zi>fM_+pw>*FI=l(TJPvj(euT|M#~prH|Pow(@Z)n z-Uw~-T-(wt{|VV*%Ulv2BMh0wOPfF~9d$;Tm&-rB8PjB5nUX)xZfyT1qSdifUm3M+ zB6u6^eeTjKzyNCII=BSSRCb@8J)7;?K{zP!crSWK$f$|t6og-`%uODw&84C<2)X?! zBzmHc8OW6h<=1MDomXqDJm}rFCw0T;dfZium=EK{Jk4adY+xCgv2&6|$ouumg-9ue zzL7pVaa*3}*XjANCY8A|K4vWi3=wD4F{i(5h#RSl050y+&mF#bD)RY(u=hb8>3~lc zolVQqFObdH%Azq4s;zZNzVj#gMI_#>j|qu>8CO{0VOI%}RXD#uuEEyNCgVGV|8*Cb zpNIrcK1HGT$A}b~jq!wjOS=fwHBg|4b-DyaGN@2f+b@5U_dcZyGsZJ^HXL2SFXI?o zi-l4e(MI2IL=T{3X3&-nHJxvFWzWBN$n>}8)@iq=t(z=`|`8Wv6X}Gnq9qP3q~NN?HAlZUiNtAN?be^aJ9z%S#jT`Iz{X&^GC==oz5 z=r1<2%r^lqo}z?czo0g^;V|B1EZuJHBj1RD1+8uqG8?b`qo*<9R*2EhF5nIeTiU33 zLp>$1ck@Mc8zVLZSqS)C+hU#}Bax51AF<1^e1HvO8TWzJ`wz4%NXOGB{|mP$u7h{z##1mGfcI|E!s8Oe1oL zMV#^Vv|{l=GjamfWBL4%&C`oLI_Rq*D3iJJwY;t4PxD~W560&ykCE%0qN+w`H{A{?QK7y_;RaPC8O@`VSBHxjqVUyRDkh2UxAlkA27HPlG*AH2T1x z+v)H?_}l!ks{gA%F}-9Ub0~9`o7+qpae-DA-e`h?O-} z8fs~l&z5h!=^yH(Z{0?2miGbz#PMZ`ildMl@<(mOo;!H0bb_OXD@0zsOs(glKobj3 z(ypIwvJ~Zc7r!olB4(*%ls=%Zg&A@1*Uiz1BU_i_W;?E)vc*oCkxgIz;V~y!-`ktJke6vPB^(R&G_ zf3Ih+wz%B3XH@FFI+UgC`ef~(38o9vKCTtm=iWV`G>Q9}m{&Xbgf8&3*zXB%4cg3! zOezuP+eze559rXZb#XZRUl2_IJTPN6ef(nDXjL9-r|iSj5nd@@o@=A zk!7NkY1QK;X-Ch^m4jrCeE95p^_VV-K0?ZHx{I15p+HswuIbfe9>2LhE-mbS)K8>e z-wv^OS9Q*LQb6af+&;h2#m*EU1HYhusj9V(LwJNgR-j=Yt~~J;sqk0sXmuG17eoQ^ z@HA@{%Qh%v+Cf>9f=xr)21Xe}rk!LZmo5cc~v|Lx}_>_UD(aG8K-I(tNY6hyV~BWmx44?`B2Y6MEEm zRHS}F$w5o=#JkP}76kzuka#;MhxiE`r!Jn=Z@tS+yf9WpE#Pwn)-?wEp!#bstc;ni z&|xj_$g4de<%))e7AJl%<$=ctQv1NzSd-2*0pcYlfi%4?GV12Q!Wkm_g~kG z3oIH}*USAiu}jfZ)^hTiJ4&oAEDHPI_WTwvq?Fk??5F5 WxKI^+3;okd8w;8DOA zf#%uAgCpe8e#l#17;4zUu^D}?S3Y2N#kipBjDL}7u-Ubn|NcXQjr!}mj%7S*cv^s>=atMZ+4BH1;w0N{)&Q{-3DlXFyleJjf4lG9(uQju&{25cBK znKA2O#md02f)fvpshn-q?)ul9v^;?$U-$FW2h1<=nb(>y!1{Ot?gqnr5i+p7w(;qSljzHMygoT%KC=Qt)K=Qnr%s*jx=1uF z_!oX?lv&oU8;w+aC8Yda+k;=ntDSDWx~mQT#%O&z);Jq!>_th97&1wBncSMdBoF~a6S26LRQs;Id!O_&-E+#sZ zhf<~J6iX-&Q@~w2xqvq4TVT@@2TSkEy3W+=ieA+~DU8Q^lWZE$L~8{#f}MmGi?9Lz zFux}-q&YF;J6=z7Bc7l@8a+IxG}9Tr7}9BV>%j_)9u&tH)}7b~OyhSl?eyCgWmaiG zo}pdxodF^WnHd3yVH827?>w1k%FGHOirR9-}%YKCkPk^gcIx@Zv{J{Y!rdZ ztiX=+BU^89g#u^aO`DY)r8D4(F7l6!1-IU&p$go#@?Ktk`udjCYHG)!n_olV6cx8` z6aM9T9=&~FM=tu%IO7wtnslAMzCA02i=tZ4fSD5VVt$tEL#RU{84_11`|A`EyPo)7 zwe(>`uBx<=_``2Q>U(BeN~j-HzAUjS>^oWCj2G^$kS%(*OY`+D6(q%cSQZq8? z-cx@}HsAesP7SjRE0oxr#Wq&%U+J>bWa! zF5vDJ+WC@xW@I~ke8}8&oh9OuL^5vM6{&mFJ3Nf%r0z6eaOEV{@f(aG$ z5DaDqFoJkv{B^i+wK3k5k;HQacBH!Jznn^n$S`spf7I7JhJ4NYG|(JlyaW(fV{+#9 zDHk?uC%HKd0~UbmMDE@Nc(`+E75h4g-`(BFi2m!_FDW*9TXJc|059)TcJ@*89NC)s zO^g6^hR*ycSEyEq7GsxHZ5(Iexw?3p4}7S-H;xEdV3|RQ$IlWR;QV_(X(Pl_Ar-g2 zG4FAIqnTQSuLQ&6%>-AL1*pLNGBM*Zgb!xVYOb$+q9(pJjAjqbO+d#Fm5|bmJ|-mG<$;VL65Ckx#3TFHmrAcq<%FK{3D=X3}z*7 z&e;Bq2Ib(^ss2!ONKn9Ii3O4~=#XQ1VjAUuPBdFWI?4B%o(V<72P8+At0$?6Hy3FH`y z`9IfDPLMj!=V=3#bmJl&OcpIkY0wpXyviYp8gH)L zTceV)mG}5pwDj{K7p}Q)ZLv>MGUEK-C~4U#luq;imi(NhZN+{s zHT~_$^-s3`m|iA`Jt1D&|9qqzWh69KR}=2!L=_z*!a9^Ram1r%l8gL*7Fk+iHc4rM z+|pgDukU7JJ`c>$m%E^+E>XwQ$AHBDclat@rLgkqXovper^zi-nQC=z@rwi`v7E~i zDS1mq&y};NQA(?#t(~^rKZi@&F4N7+t32!^D+Dq!Z+)8~k24VGl_#AidP7$Pyn3Nh8`uf&um~YhJ zz!fzqpIREEI-jhOD(nEEh>Vo`N31e_s_FrBz+WL&`T?Tm6fyE_qo+^xNC$!&tILTk|uqTc{;q|3uP3hG+lO~ z!*m+HHl*=W&q|8J@akuT0OG-vSP%Xe z&bx>82vfY9t4ccq(zxqzf=oQXu)$L7!%be*_r-@3u2x{m$WL`>+-PRf>2qNOPaiDv z2{W2Xn7)r@adgDHO8!C8Hqoefewc}6mL=#c)Z+Ez@EO9grI-k^wqit8OHka-UcFmG zkRa0n=eGSdI4h6vFD50P$Wjb8dZ;D7vg=m7=e6e{9m8am!wbqQw>Jv0#t{LF#;=cW{jDdUV?O9ICFwpibUcWii}80PEa9GWVfY##$Nwh>{(powRYG|ZCq@bSHkh|aTH zFL*8$8#4)-X8TbOni8cZJD0gXJ2n0$DR{8MK+mhqY3I^EmEFcN<&>cXuba>i~n?^Soc( zs`C%3rn^t~-fOS5cAp2tO5rI|o$#d?a%1*-F=gHS;!2f8tVNHGoKm=re~3(J^|`HN zabOg3KD))*xgDpK&BG!rG8=YJFy!Pg{T*|9%X&T83Szb|`-tGmw*}GiZ#IQWH6()H zILoy;|0>UDVMii0fyJ{D)oe#wp9{&!&(ZimNO52ik25T-)^*c?vuS8X;-?2RYBVFp ztP6L)>Fe48?9>PC-*Ix^jypP2a7-@J`Ns-dbbLhe_$(CaoH9XhViDiAmw$e>DwMBJ zWA8H=MpBshF2&?=w7(Mxe^~M+&L3`Vt=iJDYx!2hcKwFWi%+zhNYco`WJ)MSF6H&o zrwk7s!;5#(?2C33ND^IyZFX}3l;8KR3@ZqVUTt|l9ZNSk35H? zTgxEzIOBQRv|K|DSl=b;*LDJrMGC(DgT4Hl(4g%cnA3+AE;a4Wiq@)p&r)MDB;ARM zrYJ3&czXD@3)Q0FmF9Gau~=m56AjQssCj12;$DS1rSsSQUZh?)iKWaX;qA9r>(}ef zF-%ee{d*+NmtI5);=jFBdhyqIqlfZxxw7Wwp0wBFOfKdA&3Vh4mrLILiF4ti{ zVwpD!T*+b_g{Nd78n`o{zrMOzXHhYs>*(uN^us?OIXkZ6y1TlNnlPG(Z#2DmC2#4-*_Z0uqbEZa`~8YZB0zpU(Q-I6erXPPaylOL}70B@3@xXA0@HM zjLu1eoN-)N@}$Y-uQm}ldG8}rw|i)Q|7D0eULG-7ea>(TpbEoj-_~L?|2;M>g_SR3 zm7c-n3>~c!TtkzSlW-@HPy3rDr0lvB$^*mhCUe~N@-dIg9|-m*b6O@PoM=qr$b}lm z0HsKZX80@9hzd|F-1=-go>*x;{Jsr>=UmYk%{X=1Qpu?K$gSRmP;{IE3n)Jsr}Y+|C#*O*4L_!1xD-ISgw}Z?FQ5as`k}PXxUnn zq8gmuZ9Y{Yr)j`gGC7P2@ket`T~5)$r#`wD8}$s8CZJ)eEebsZ)&m&MRLQrziS4O= zFwYz7n0FV=K}H6w!z8%i4HXc$ZXiLLoGo6OS6^k6u_XrRmZ)gGLiyR*xzapKM=jE$ zWBh&QdTlpw--`rG1`mo*GY15mFK=$|sSit)&L%x^N+ zR2Hsh{5;|DZ|s)0uSc&bGa7w=37{02XtNo|jCljdJ+6j${jY2{8%@p((Cu^>5)4G?U;z;Ii@-Pn`OqS%Nj7t{2OZ`jV#4dhJY%< z9f1cw(Rt$=R4IjbM9eEZ@|0C+d_4W?MEgJf#fxN_Pl#Aw0{DmZ>_7{4SsGHLX3aSo zQ&r)HU1`6C>%UcqBM!IO5_Lp6#Ddr7KJ~z(BwWQ0;@bZBoxQjKH!9hfkXP^2W!{bP z=4f?Y1yYtXbw9Ad8j8gRhsm&jm@R>rSh)yuqf-(7yMyBR;M3Z zoJwb8@$kBnibtUubKZ1%sEReICsS8Xw`Fy0elnqR)7XZQ%s|T-Ry1NLtS=+UOy$W9 zmk)I-a?P877(eEXZhsc=X<%l;(pr>-LYrFhwT9vpOyRmySv!$hGgY)VKL5?r;zz1^ zN$UK6tQY>?Dl!WsZ1`W$=SoS0^zd(8@{$XQkvt3fXB_sw`<;hj7;Y4ONhym=`c{`c zjDVKR63`L zuCN>(9js510vrsxHz*i0!|yDBZzOC?#l-QC4W-THl2UJ)fU*qj#pkGfE8VV@^ZQIc zjiyi>=CP=^JNE1AYH6^kF~O0o!ef(qEJlB6ZAW_K=soSxOw#>R1^CV;UP!k8;(+U_ zRrGOX1gHIP+9alPOIyr3OD*}dpPQ$6r$d5MO@;k&wM|>Y#4|-&e&e+0a<)yIdKo|r zDfb@Pd698BStpCVo+g8Y={y|{n966E-STT`U?@m1IkfQfXOa*OaTxAIBCWbrK(>(y zJDcc6$c|kJG^5l|+1Z*{&ZbN+o3e^sY;9%cs06`vr$1i6jwBXA;I>Q;rjf5 zQVZRHam`zc+->ni?)FyX7v-;aKJA+RY&$W9|$f+3^(w;3Iow zFMx*}`gh4w!F+X(Q^0OoYga{|XFmEhV`2l>wAV?$ww5Q9m`ClhS=WR~%l3N4@Gn2< zl+76^j3yUd%r|-xP0-OE-?imR??~J7v)k{t zO$tJWywRglJA>a1U72Phb2;& zSL<$Ev20T|k*NLgMiK2~Z?y}*T|sr1WHCR1xEg?5KJe+_Axhgun>?kW#s>HO+2xB0 zwQPObCEAH0A*nE0dh5(H^o*+BXCTNoe0m@ zjh{+NNQLyzpN7q%J0HLOE!AgyrF6ZF}qMO<|Dlpp`MRe3|W=4(}><10gMH8J@D#YbdKWB zulTQg6f!kNagbN2r88|@yhinjFq(z@Jm~rS_h`03_VaFHFh3_n$KBi~R|`o-Esx-x z&lZYvPje}_^4Uf*M~%m_WZRY72~?4sV5H~%YF)!Z+$g2h)XhTMd^{$>?izyq(T4j3 z>;c(N_{|8v-3l-X#PP*-haJw<96r4B3=RocPOfwuGYZ^&SWNB*Gk3NqozsL6y8(h) z0lxs=FB2mjHkshsya0M+#^izfCpuFM>xdUb$JwuAhW_l`qa{D;^(s}kc&z?+aHI9Y zDzq#nCP6ok#N+S1+USuv3m2cpG;!tAaJos_pfyZQyC2yd+IZqtCTKLJjBRYVh%%2k z&Y2GLv1rM7U|t4I+FAuhqs0fJDR6t^Zuf(?4t;tvt`^G}_MaUjd%*+?l@XVrN_{0| zu`iKdX*Z(-NKL!RiONF4%ea}+&#yvtO)w~MpRcD^|K1<7(g7^$%0~EA_A=?%L$vp zeo?1-9>tplG&JfwT0+%&uv-6nRkcbf%M~?sSu&bO8K%M}2dPZp-G0a*aAml4kLr9f z$&|WY{aVSvLQ}ChEJsco6D!T1rCP(J1@7IeHNPCBG-bEKtSjUPo#8o?9x?7m_v)+Q6q5{Oj*t+#%_C8((?mZAIJ~Kllvgxr7;Y()`k8ro>idKW9wcRX#FWO7zn1A~3;{i3N}VP!Mx<{@1+-%d@M1bXi ze3z&vb9tCydJo7n0>FDv<>e(e)fUDqT3Rl?daz17J;Z?kbm?J8AyH$;o@u zthFjfqXno{%}o8s`x=UH3~7NQtY`skNnY(})Q>qTRx9&h^SOTJvJiMqx~?l>ri$QU z?+@tIA52}46E$!Ffg~__i*Uu`o`aWWxF1XaoT#QN+}49`jtvx1Hl(1Qw$`Dh={FT! zwQ9!gDLuoa!+-mf#BTf!!A7qu@?3tIoK6 zxs7pGrV=&b&oR#HaGIO{6)#%(4eRP0T)3d4^rDx`oLMch|CIt4@XglSK04$ zYRq(M34EYh!W+sjIQ#sTRu{b(HLtWNMVVu0hOrV7%fpX&;=G?e%tMI zWv9DMczaLvDm=c>kz@g@+(sHp25$4b4`0iJsv;r=IbLj>7Ug|m{4xLFZ^J$jZBJj> zIbCqtwDq9=+}KpM>0zqbv377UY$y{~5HW!7?%wIhdt}>Ee3 zlOtl|dR608u9qmLNe0`QeCJSrVI83g8m*HTb?a zmc`>2j|}IV3GeB$CDMRpF>YCZ3mt(QBxV+(R2$=^I$1c31yLVGh7i^B*nvX$+K&ce zQlo;3aIdO`7PHY>d}=gwc~V>cKXI*{5uNrDLh@ z{8}x;F;#)M{PtzTUYMMDJNrY@(V{ctQ2|~_B*hdW$nRpAnltnd@ z^)g{fFTRlu3Crv`Y=o~xn1}USTH!*X`NPDEt*lSys+#%5-{_a)k~aT5em-MJWa=sJ1dz_1IJm`vN_b^eN6}nE?(x~8w#hzQVb3T5h z?)@deG}V}R$AnWV5M`Du3-to7pTs{c@xDSuGu7^vUCZtwRpsN(2TosD_@09UvwWYR z<8pujBe;%YJbem!jqAF#Llt)&8*mZpvhTd)$AIbz$_9x*s;LQyn4I~1!DAO6n7`nP zN0>~aRm9lpCc7`wsdlTtru`6dz+GY7Uq9?)XSe2Xzt`zGlf_fS zBvoq}@Uo$~F<4dot&f1JlYP8-|HgsaJOB-$>3oa4(~n6)!1iLDH>J@pW#CT#;q^6C z^F4{Co|{1odh%jDM)ZA(;1G98@554+ubEW(=aOZcv3=1bql2G4=a{BOx66Srs-bGX zaHVnVl-NzO3qOFH5?S$lP!ldq$newz@L7PzW`qLzS4o$UHY&<{qsn_` zKLu*J*ghb-Te`V^axoSF zm9<3ilu#Sm@SiY%Km&eT94l5?k+6LrjR`t!1;xB=++CtXyruk+Ih_%I%@wXU&rXz9Njbz`t35Z%pGnX49W^*B7t!EP#!$%jL`!Hnq z6X65bg?15a(Om70*{!1yJrk+Y7C`9>{no1#*bziC_`Q$3SL8A6xJVB9Hnm z*qlC0x^4aS?LZuv|FvMerQxW{6GmCN;)M{TMiJuazc}_$X=&9fcBEcb;~z(DOptn+ z-&qqKB*>urr%0}#z1QXx*F<}{wsY=CPn~^wn6zu&rrAu9m7XqvISxxNiZ-~{d~~qK zu+ukZD|eu_t%*T1vActPa?0Gx+dUbu0rdDccW{h~yCk*gryqva*Tu*p3-Km5pZ-w3(TPQX(`AEh1Qhs}# z+D=r7e%#I~9@)iUZQFJ|m(acoKj4fUcDc6-qzYLX`A$Es#Zn=vML3#8a@NJ6?e%ZS zZiq02Bw-@`VQoempz>Yk3RsAkz*7RELodCYl&w(0{VlKVW){2we35mbWecx=k>v4J zqf=}8nRlPExZ7@Yk{fw__3CGLuc9+2cRAoyWcyarLXk&VbdES2!7NP*d@lHu{vQTR zCkV5HylU|ldXIPkC2npXAiOp7tcdCZzrv&MX|;jpEq3LeH4z>{GA+G~lO}|*%_U+R zAjIotHFlA@7w2gsBnCdr0WmNvd?>LWqWu7Z|FcmT@BD+FyQ~PLp7<}ojC@X zQ4Q1X7LKrcV~)U7Q2o7-!=fV;=hx;kn}ynqFMod*wtx0+_rP>JR&x8so;{^uXG4ai zVZ{2sVZY9?%K*n?mL|jv52npH-|5xIA6&u&)$Vbp-|L_MhVUi*V&+*ev?gk!>g{KS zf4v!V0I(DD(C{L}{z2@Rw#7 zQP=YxbnGi^x1p}(t7=%3%2VJHkov@6U9@RiRN>y@s`8LcV^=NA6$GNdAZJMs#)MD|tW7bh=OZx79ab#RkO!hiNOrdfmUMdV+NQZ(1dtpb@1JvhR(PdZBr6*cf|G z1y(o^F6D7x0kMCf4u;dH7`nRE^}XN9(#E1Xg?*8={crIl#FO)coP}81_pT>4j)1o_ zosDj4w89dv>v(Jo8($aJYiZeCyZ$Y)ipHVqTNl4RWNhz(W! zT7!W)?kXJS+uq@GX;93SK1?QE`e!%-Q02eTEnu5})9Gn*PaUguML&j8*z*Ff%R1XT zw|>BjVh^H$Pb>#fZjnF8S9BEW#?L_g1eifP#wh?n>G%hq|62LRSmdp}y7n03> zG1FRSx1rtUBm3qaNXcGjeH;r$0-?TC!_EuOhk0kzS9i4WInNAahkmiRk)XGD)ZYLP zP4uBE+A)^!b?|_a|CB^!%$%vm+2#E>w$Ea1eszY&5e(rz3nN zZjDm3rLKB(jXmE@3i+cV@pAFQ;;n;N!Z-|zxa0XbJ1^<-D0U|czVW#{d?4o%%?3KI zYc(Sv6wGFd>Q^yv!w}!-Rkz`m2DqA zGQW2)K=PahbHo@J?q9c4-2v7}(7y3CPfpdwvV<-SCBWNsGuB-s;B{i{=^~EO;}o-# z@2_`p-xZCb7)`t(Xl`HkbxQwAN}o>9?bBv!BNX5SjB~t~La`KLpJ&EJbP7Fo@~+8k zN+GX*`nyg&&9#AA>0~$8k4mE>ad+i)X{=^vx*CmoScx%_JOitDwSlCu;teU&8c#VO6e8)~;P^o%9ONz3R5LtL}!c z3(=xk+B10+ULo2ZWVc=Z{fkHW!K-DL6 zAJnD}qsLZ2{*Pr$X)nSU)V=Tjs)>Y|r<8u$iCka|<@ir6x|zP8n!?hu+aS2;*DW}1 zZfHb#I*8+1!aY%%CF=zJsn<4wivHe*Tfl% zqV=r!sp!FVLMumZ-HIK$L(?$LJH`!)g3lYY8_k(pD~$z9p49=_mf8~+#~WtiN}vAz zRx|55c|zbI6l}#1y|;dSmWI>U52Jc%tG+5s=+~}Kc4n>ne6sH%PBS`0C1i5n*M9i> zgpNLM1gtK$G^32cMS{ri&$)q1##ebKjSq_{o#>^x1sb?@*E&tkYWijl)_&)cthw-S zh11mNTm;eyq8sbwS|zaxC&JI6Odw zJ~xE9PSiLJ6LShKJ2V}4$ZH2z=F4bt@q{=o<{0i?UL-slnk=S*3YR3}cTv&!^p%eQ z(>jCFW{$T`wUY1$yur47oDZ4C@2Dcypey8AYXztO9M;e3+nRyl+0sH3 zfTG`H?4~7hy`l6OHGO?~!F)LvJh<*Dy;zb{rBm1FvYY0MX8#Dmq>l-LQQa-pl)VyJ z{jCF#=-A{O!I^s`5>qPKsg&f8q$dPy_Tg z44Q4m8JZs+9lrV>Rkn+WKA6Y3(XJlrgjvhl!N0;KX*las+{V-9NLYaGz*pslfVhY+LkMt)W^tV)0Xa`syZ9yc+6T+j5DV{$es z62UqvDu%(}Z0w^zUmsBuL9>jtx(dMG(#>A?5qsS`$1iLQOKp>QUsO;02jL+~7J#9) zEoeKK-{T-uzsvxt^=#pm{R6OfU-XnZp^G+zp%W(U`}n)+ckN6h?Stn2n;0ha#~b*k zeI-tEhiDC_(mGB6xP*y1DmNL1;v|ivazY8w90XkWXU~gp|5?+7@NpIxnc2F@7y->n z=Dz9nYg?|HMi1Fp$2|AujEuAMgiZ_XQ^a1`!`t47z8f^k1`=f4m~W)drUeYDLgFQr$3A^u>^J7NfQH>zBfg&PXn&#INuR#(xCNQ=^+%(Y>~8-P zlR$Np3YRaxc(e$Zi3e5K81mrj+W+h3O6JYqEZ|6HS#4fPbf3JQ+({c$u*hcfNsN0s z@=)gA8?TM*^L>T(cj-e@ip=D+0E0){olnz_jkN{_iOQmDduIDUbpo-nx1`%GG58b} zn(=8H2isZUuBaxT2{+QIY6pt+ooC8Y4f;!ilhTkrdCTO(1z;Yaw zAN&k~ZnAJ6Fu9JoTuo|x9X)jwyAtPpu-#S z#2rp@lrT(8!=>%m+ugFis#wbYgm0|m zqSoB=YrLbv7)&nVdP?UV5xwAFCLAlwt<+Hd2+ygER1#vMX4~{`Wrgg=*E4sQo(@EK znPmvY##1xlkk_G1WmtE!WjA>v^zcH>JdBYo^d!LaDFS-sV#S^F#N##)fD+Q+_W z>9F5N$~3E9R+*DHhx+c&686%EmH`J6te(VdHA4PBf36R|+?tF+Ds)kQIiFoXfc~a`;uZtFU4# z&T;uubQ4*{al`k^)I5QIA`Pq^M>2OLG$A;=%PO{IoG1&M;y&fW=JC zvIPuxK#3403_}1rV0&3+Ok}siqr~4oYs9s8XsBx9vFLg~7aqI`jVh?r*a};vy`JNy zG~fz%I`@87$VpT6&EcM3PRFLa3mJvOE+ap+b8o;>mwKNN+HjmNeD_&0@evR{hfQzu zKACpI*SV4A<9EEh7(zJ!x;5(eMk5@$?Dn&RR*rov1q= zzOKEEgrZZWK2SoT7%dpY-+4$#_?>rT*fW-vF}&~5mQ?vBE2-?BRM^7sbi6EXVV<%Q z<|*WH;*!eO!N;3zzRPJ!cEkkmDsbTvGKf5m?By%}YJ8QtUVgyq>x(TRumYZkUU8(b z7GDhAM(ij0@+}Axc@ebJSBW&+m;Z4mU*8u2M0H{?ZqX8#2bP}kD8CB0mc=XoE;b3~ zfe7m6y=DM`mEW3T)4N5I#Db+CdJ)^rD=igp!w_=dX)29f`8JpH@PNa71yU|rPZfWb zLbm>CA@6_;NKA4~f!ex)Csl!CyV&G6heteWw=RY>1zwG#);FEHG*^fhJr*A|zkC7u zRh2bYV2HOaPJa;2r~-e4`%tl|qNSsKMx+b*TsaSI=huq}=U1gi0DCtX4pn!RMt!*K|A(seu9PG4uvIJ6Fc+p9EEmDkwp{rrFxEx-jM{dPC zH$R>}SotQgSu;#fbsP#M(1K&R!Hht1k_(1vM-U`L1H2S>Y4mPeEpy(X_ z$$aTQ_H9>=H_mQuG{+w3(lY_QlBqxS?I(<89!-8Pl0XLgz|tTRXNL1@re7_ie94v7 zR27Q-#N#Q;0>e2U`Yes1|44CbCk!H7Rwqh+ik;he81Yz4OQTbxnPc?Vuw`rJ36HK4 z(~{x*;~X`Ut-;y8=}FW-!ZzlWWC=S;>`#{3eKzr`6Q_n1NA%Dmv-7SR(1pXQ(b_X( zg7WRdlq=e)erga^+vt}#AsR?*;hho)S8JR<=NOcQ1WGaxoV~n7P`w&j2edt|Rp&b% zGKEAXnpKHBuhLR3*9kOH?&$<9s`uxt6CIiP9g3H^WoxzI$RQ-4ec0qoek?b?+Na{b z4y&csPOtrn4jw(5vtA&s?9d1;d;-w;I% zVdK6Yu^%lCVz&57h6 zH+NJ^`ZQQhIh`!d;rt7R#*VZ$kn#e|9ac{g6lD(f1TZH!pB{5DL|X+%F5|AK>UKWs zq!VZw*j+bN(LUVB;Kh(AUp$^dKObe<0P6QIb}Ho%ogm!*4R1d>N!;+CU>R!W6D*c6NK ziPV_8ul~x|-?(|DVCnc%L>{4FLor>t5N7p6`Q$ym-Yx>3&CIetumR}Xq#kltG0Z$# z*}Z1TL@Uhe{Y5PSy?ja}fD1?SVQW7IEa;%?R_l3tjK@=hn1Ri0bi0m6B4?4;AtcD- zBAsWsQ>Q))5zEI4l~DQtR5i(^3%qIJt(Ok)w@?u(<$ob<>b2BzK0xR67a}AiBoaek z8jRGXu&#L|cqERScjB3f4BH7M%n*XPlB>JCBKi6VqH0ZXYQoXC&EyrRM-pdBst(Gm zcno=C_)ZUMDoXSxeao%nQ8p+o;n3p5Uj}D%x*iCRZPDO^wMROg~)qo0i3narJ#` zex2LEhc|`QVj2UbnYcpKcjbh@$zy7ndUI63P~VwE7}4tS(v;WeN@Z%;+&K~b_pR|~DD^HZj`x1jE^D4{y% zK>T1ucqiE8U>zp`RK3bd%j8f$kc8$qRPj~vRCVg03x|);A7^uaL1p+t2G!X{A$BU8 zzOqzBuW5E`-1n*0ET|rvC<% z>rpGh=QTJbbPFi=o*~15kBno1*gBedo9f3+RS@nodjO7s87|52b=z82a$V=XKCF_z z+jO1sGcz--3APw!;w*e$KlNB1spZM@tZt;3I-Z)8d<%uaOb0O6Xx1ISmrSG3&m1}h zPCwRiTn?TOQdmlzKTXeH zrbVZLlC-3Z*-*)G+iPq#lF+e3|FNC0KOU^bMhCLaa3K?6S{BoLXAi&Wy_Jm4J+Il% zU4%b7A4X;bbx-mu!3>cd3n~=+_wuol%wNhopris53KgcN>*|?azNsjWz(M?};&kYe zLXB*e@*vXI>|mr29O#xzZ?NdpvF6Qi_?a&1^Nt009X@}14_^7C0`xm^qc$I3mT7pf(<;|1xqu)4^ zW@;w?{4MSB^F(HAS%>uz$I6xKJND3@%XqK<#fXFqP>_q9eNpG6S!ZBWmnSbWIs8Di zXINR~OCF7A{^`nqq(Z?`&iLhc+nc>cJNl7)UIP?`T1*CQ-%vvXB znQf0N(`DyL zjPhS>ts0`vL3OnoiKBTza_Z=vSU(tpW@6V3fa=evr|GIwtWfbvWA6=a1hkgy>w10t zTcsaKBKqAo^t%DCYGL>>q)X5ZB7}%r<1hCRy8%;KT8hbfj-&9J=0CV9_(o&u{!H4% z<^-*40fSO`U2~+vNjPb6I+}}pXNWxhFJ5RHuf}Kr*Vyz=MvyFqNv7le)aJU+Ms&L&CoydIbcVW>PfmRghu9HFb zg_(3e?l}evd5Zf5fp^M=O_gpo40bbLKPf11q7kwEV9?k9vC~vtU=~L54ilrUuuRL< z{N*?%hNNT*_Nw_#bK-?KDXDAY$sF~g%s<{|Y50o<6bY@`KUA|OLV%hm_W8RsVniZu zdSTc8wTRYEfmvE&*I`{{(ZpvoHhUNE&U%e7{L$7~-AIN7_<4xs6anrdRH_(A)VegS+uyj3DaY+cTy5q3HVW zahJlvO@*k-$=@Ao=D{y;sf*aOY`djpx?NWnZIfI^L%|$ATA<(&3)^f4*-xrpT2RH# zOoVu+!z4N8pc@PBMu&M+BR6H19^x;WB)4AF&1Djv?7S z<@CjBK{h1240h28k91jBK08IvVpoU!z1$VNP<^N7^lak@zb}cKJrYbKgpmyYh3${g z;^fP13=21y8&UQ7D`vpuEzVJxrKhOf`qpNzHI^?3ml|#W-s7y{qVttMlIqD6&C${A z=hdQxKAS^%RH!TKdLbITTUfij@1Z!wToPidzWk?8zhxbB(l}K|V7p6uCV)~X+pFTnW^DK@EWU=U1kY4d*zxHL`5@hOR8Eb(1*Rx}9shUm>F zHTA6{k%d~jRB?tAe%z_RcNhqUFTqrX0iThC!(R#z{&Xlp4E-1*WOs)5cTKO52t@A{ zQR@m1`I%|*O1m_vFt#4m5zFutpI3?nkdvs-HNnRnz-<23mvH3q7&E2RI)9pC9q9k2e(opA@q%Jq#Ea14(9v_q{KboP=;TmO44?2$Bju50c za;o?FIBISKO0CsQjyc+VnvhiGUH;U@qi;F;vOEKdO-@2Q!ZoQEYX0&wf!dtIJDK#; zzC5hTm0@ZO_m4lQM{^RM{i?k)qT?K}yv>uk@ zx4Y5Y_vmdMobQyI>zItON}^~K(%KGee)1!a;wvK)Qi*ugk)K+Ej~IYPp6{rluM6Mw0%GHij;>BKfq;6ErVF}1 z6jk4@%m_w(L;fJ|I>@*G zR>$G}b@7t6#}i}_U!Y7luI@Zkel%fjJuC;P}^!`>`hLpkkhgVSb4 zSV!a+IBkdbcKAK7VtzQqHztTU#)XaSr);=-Jm69t9k)@aeRx|B#x5s=4zjPzEW5$6 zSqDd{Gd`I^vk<=cr6~q~$O{_y+Ilz~r8Vr)CuP{>LR#T{BpN2&anJnGUdv(HnFi59 zlIzc!e}0^9)q-;&b)cEfp9=jC**I*Ne;h)hav}J$3-rY$8f@i-7{2*L&gc3ftDr1w z<_d%g3WVe&dKg!^wAdUDatl^Wg(f@LV9SUoslu3>flzO+-S=Yw)I9|5{XFC3EE8M3 z^z{v{&kCkLxxh&mL&CJ;4DBJ%+5|&C?uIN)EGKbX@9ehGfAcsdbgKAq^^dP>h0IO} zl+#GE!>eDmGx(NbKru(*00l&QKE}m z^WkE(Y~8YOUsb;%j4>rKz`EFOg_MCc|A=c9ng+*`lk*&C$AJ8F!;WX2OgX+(ql$$^ z?#zDvy91gQ+%2!It!Jj9%0yia{TbbN{n%mOsnkt$cDHk*jZ(9Ig<5*=j=b^WgNip$ z#^1>zoYeu&wpA|!MMdj;q$M` z+4^V^Vi(FQwf8g0p?^&oDbY)D-rb6pqRD6E8J*FPI%rkw)HT8`H}{{KoMYq6(YfVG ztL{dONPcQN+J7Zrvki-yV{}_>k{59+mhF<)E!WVXn zX4HhxJD|E#xwt?KDj$BkpOFV=tMSP0U)~tKq#m zP<55>Ao&1ISe(Ccj6wjg>wUB%QjOiOWOoY^IJ@P&&B3?hnByq)K|}@(n=O8w-N3oe z^Ch8!f42HQB|*Nx5>zl&e=Cp-}|fR=j^1$WaH@C5J0!4vD02dey|;^>bALZASgkHQuyOWCjulNZ^XoD}`h&?3}gRn+WF z2uPQ72?)|4ASo#&Dc#a>=#XxZE@=*Rq`SKtq`MoWyBogky`R3mcRxH2=Z`aItu<@b z#BXNJ#!;(Y4Ad;S6uNXC-)P3VM>7(5xiZi60Udv8inh)S1nRc{5!xJ1L2ETuH1&^- zw`GEmRCIQG(OXr0`mpWHUq_^1IA6+O%RW0#ULvCoF)xTk4qjV9VUzOC zYl?mqR;o-Ph&lCmb3g%Fy{QTLwK#+hwHJqWL4|?b(B+poHYVoS54M^wor_Wx$w+QF zpN#ol$4ekSJdq%j^nL**j_%3fy{mW;jUomd$J!-~wh*Ibgfd0i)lE+6*@N4->?aNn z%LxwTY0-o)PX+}@T5i<=Zr13+%t9z)a4gRXWTbcQ80|lD5qmAhTd=?1e!&|$ zUmH$-JW2ReLxhRyUgAxJghZdZ^WNccz1r2@^8isZF%HLUzT(?1l7A<)h{z=K3qKD? zx#Xtx4hCRkvjtWB%7vls(SC7wJosgD%vUDva%Umk6k5ZT8w%O74Z-lkT`!SzrE;1i zSXLZ=4-KGpyr&Tuu6}7T2VUZHK6l_bYM~1H(dz8SRDi^wx!yZN`qB`GVgDf7*{B$- z_$hsw!{;dIdIavFP4yT(!_m}MOu#|AeGkXe{jgoon1mt&CXrpXxAR(g6=4O@Xu7xG z)(r%Eb6E+Wv;Z$=HrK!D`EUtQ)xDb>cXBT*P`cs!>LN`P)N{A2b~Jzd<9+B;VgVHT z5>eY374(iM8k({S-OV1Xko2B`*soDDIGyH|NHS06Z-9<>WQ~N_lpVt!Q+2V8pa_&` zsxLx?BCSsE`_^5f9q(&G3)&}lrBlMPH6Hjnt<;W=r)i6vOz(TtJs)Av(T5V1o!!1# zbWwy_c~HQ>NfG=gBKk(FuHD-*o3T@p`mzIIP$7V&kY2Q-YBJU)&gW5tf+7R@;|z{z zc9NK+3l5Ue^lB12f1{NQ1#P*3wUwX#SX~?M@tpdQVKHgO>OMMrnR_|e69vkhsxpo9 zi9Ph9O?cW=15*v->HLDqr)hNEx%ie{>+QZ8w#pSs`D47em0ZAialh%{6H-#e@r+fR z!O^N_z-A!gn7j{mggfa^##P8)v;xeDvzvjVl!RPb-g+{6;AiF4bDWnnb*D*CqzyC2 zF5z7Y`_|W^x|uYBG(U9DUZCpSNMm@NFTI{Fnq+ZTJJLzfgK72b={kD6@>Y9m(q)6= zwDhWNFrUuzWa+gE1HG)yMof{$(RtPLSB(yqyC=~uY?w#^gN~ELY7{7C<+fTRs;7?id|MYR5;3A)8uGXpbcs~7kf@ET-G;tcr_t+o{o-9#1sCS} zZ|}5~H>lC-YLl2@15oHqc5o?zqpTB%6Jg!MqRdzK+#{H_sJ$;$ZlcTUBweAZ+tLthlZYvO0%^ok^-Zpj;+dBOlkmU%q>z_SqW;zoeQ`;#Qx#5DcCuK z<-MV?+>Grp3G`rE)Z?w|rVIkAOg7j-00W*mG+iJKs0;@#x^R#-%|NCEe^%2wNGJVR z+r>jA=95p{ue7QQaXUuQ-4z~E4Co3>HOHDwH4xi}+h4A&pPM%6B$9!Lyj;Ofy&3-*51-y7w*cYICA(vFu2{%lqA@kbkkNS@i6 zg}#DUPHpHPfNe_{oIG7HLH!zlZ2u)kcG6}9U`=Z3nGt&hK-< z0rzMPY10!oZ{n#=8&w&lQ}GAQ;v01kR2U}SnZyTd>y($>|L`+Z`1PQW2Wl%b!vVY+ z+W`kxS+;@5v{%lSmbeo&!FReFxX7wZr@&3O;~5|jV#}eqS%w6Z9q|$EbExkGhZGr4 zcquu=h14gQ-7Fpoh2;%-8rwIiu8-xSbh)0NpS(i}ehhpwT{L)fUYVPJ1Mwp5vM<#D zn)iv#$8uM5Hk9ZZ7Kg+Qc+}ZeM+kmiOI~}OzK_*uu0&&Yv{Ih|6_P|7wS7*T$n&ZD zrkt>6vM`ZFw*5ODTvF0A3hcz-iviEp;Xx_L4`L-%0Gpv+A&VP7zsF%{kZ#YiB?oYr zp8EyME?4_wbFHx7ZW)@cX>~30k+h%pMtR>??bU2fMuw!)%!-Z$6Q4nc+7kuTn4W-> zd0vj1vNQ(dkqcSs0#7XJ0XpG^Zk$jlD)sJq7%D4?rsZSCZ1Bib*DF51)xc`aL{ zy6Vqq;l4jG7dNQ%O1J+F&dRMZRC4Y8$-R^GH@6oIZ?6TmI;uS<2nCpQeh3X1P!Iv( z_D}kSxT~^|*s(cmx$Dhqd$hz`tqC;v2il2P#0B|q>F*K6c$k=RwO=K#Trh3vnm-E) z(PaH_4Fbx792maj8I0dZbJu9$QeikmMR^^4?%Tu<~sNze7Pjeaj)^{I(^u|cy;hqxo zPJmX?6B7;5KK;I~x zekF_dTk#6qH;}pFQqAH}P4Ii3R(ck-^ama`T3Rd|z9zmRl^#`w!@&Xi227Q0nUUql zjel0&y#4<4Gk_7s4b7D~GF9SJ(^-7DwHMX*DZXqPL78gb`{UD%BpiN=rCp#>ooFo& zdBMOpT1SDFo)rolY;l>Y+rRnkGdl>l8GEsb+}^Y00Lu?U!ru38#Poz{qxI8 z6q~qjb+7Josl`;5NZ)_^>}IJ+SZ@^>6chx{31H!?X&A3ZN)@sCV}*&dOMkdZJi!lT zz4+oXF>J0*K<@3tk(9{(rMrOsq6i_}qrtZi8%H{Ar+54K%-|Xt44aZq7iyfWTS6$FPVLF& zJ*+x<3M~?l##r6jcdyf{Mt>`Zd@z5!s#Pe+3a$@yRmAG(GgJZ*ewI&v{c7+bj4-N? zMw3fboF?&p@`z}Xwak1lTYZXKcxvo}b* zfy4UrIu6Tbq_&q|M#w1JxBv4NWTO{Uw`YeML(L-0@!im#F`Q67FGxl2?MBZ!R;z1I z$JG+7xkK<+=6eS1i``u*AT$Ls3F~}xRxPrT2U@g2~1!HD`m25TcnK zbvSIFL3{fLd^kwsM4+41x!wu_j2k-7&W>G_G)#V#2p!ehOGHtaB)RtQl5l|IdAMeK zNeJXXHpb^9hhtW}2d^J{c#y@Bl%nfX5TTpS0)oMczUpztO{>D+oj2`B=o9u$O|fQM zW`sUw3bN8hu>zZ z9rY#(F%j<73mEUTd#mT4$@`WTbLxrJs7;?=4bvTlK7A=L?o>hc5j2K(D*;O;!4D>- z>rbNyXmQ3UnMq!6@7t9kD5@oS(-848k>O>=PIE@hpE*6{&+G?*lm4fRG z0|Gu_+w-c2L=!+YAG7h-m7oVeXQ951!m!bTqFcQ-nkGkrW3wC*9C|}^U>P3!*h;bs z1P(sEY3#OV2W>>aJE7ug&jLT1zjD8F6tHSujw~r_i`sxe;J2}Ph~41ce+5PN5lN-^ zWTPRDKa6acv4$L!iGw3A5XKNimK={|ob zOO+OQpQ`d*8c|I)hYu0nyZ?KL?q>@8pN5fJ6ZZ=wP4y*JLdOdHoDHs8AI`FZkDovo z9}CXxt=@*Kz5_Lp2}9+?(ZZ%m!^R`dmWZ@L@}aO$VA6GSVnO9;M1E_xA|khxFBYrF z$DwuDjnri!(amSrEPm#94vc#9ELks9zmaDM=x?XPbI7-F!(cSA4@8%ql(PcJtJjDa zg?yrwlyzwvvzNi@NDFR9740p7yvlcipV#-^B^Y%J+JgD+ z-o;LPb;wC3jG$O=DC9xZ>pi1lMR|pAU&F6VRmMZCR?h~T>|t+d+JTwn@{{5~KNnp3<8r)+KL!u2svIHZ;2owD6+s03U3*@hi*iiWZp6KEm5WJxb#J=K(CkM{~oxR$` zym4-?*}$)EhAQEK`y4iXVGq?t4QNWcNCk90`i!6SliJvrHMx=k8R&jPLzOH!Upfc# z>xkpi-mxbzR+4_AB~J&{v3z2S2S#tZ?b(ixeW`dgDvbJPmY zCK_$bo(6g|o`tW~A=x>gw#&kYms{y6MJ2GA;&geX6HqB5cD1i&BfiJJ1VWj2iO1dDYd&cu1Re&5>6&%1S?-%(r#eazAF@lhS|{%tbDCc?2=U8a8) zbbVJ1A42myi3Abz?8SG7e$@eP#1icjf4CE|9mXE(-IXzBeZQi0+_o!jjV}ykLion> z0XdVVo%MB0+;zu3e9a-#$`0__(x#`H@XWV&s%{dnALpT#7xq@~y^=fPQ@6Z<3>sMI z9@|9gg8J{nk?2Yi2*2@aYWU+e;md0%;7eVN$9N@c2f;`WH&N`<)_Y`_ zY%_4nb33c$Z&B5>?$!*7mrU0Myv#e89?v?-25jG?xy^OlYck%PHCZuWj)@ZlNJN$( zi$|B67L-Ezq(1dnn)hBaw5B)Cuv}zWB3G)tPYKTR`gtwQmEn%zTR8!rJdGCo?932e zxgD}XBdQN0;K3e#RRRS7>>ow*c7Wms%T%mZx2cY4Z58!pvTUu-%>o3b*=81+>&?Zu z76^44Vq`tx!z-@zx3SXD<}1}FTdqXGN+71mBT~HQZGnlKo{vHdbqrqnt(zUTZ@#z; zw3}P2+xUz8c)72%6=Es#yj=%a9VW*EMaAo?e0sv|!mdevWLj+bJY3rKRmx2>bkSe=@AoKJaWNC#5gtciKVt;a)o@;cqBlK-1|>00JP6rPm|Zgn2*$K z)fmIobvNA`prVzm4Jin^?)uGHiUr1T93c19Xr$GlcE{@RUC6DR^z;D3v!H{ZfNsd zORMYYBM9YR^>*)NPPtAt#8HU((=>I0R|GIGl__W``KM$E@^d7{IVLYVs*-A|Z12e-!5xSF3eT@ogb)E>MVup9whyuBoF%6Q{ols< z4P3FYl56VK8ry|U2;y9&sHr^9-H}q4gchsBddnzGkhz#_&SKjtOB1;~8!m)I2xq0S zO)c@b?1XiDNr}RTtmaBr6d7ueiFr3yk3E0s_kAE!(OLtVS0jCGzDTw2g^mCKjyq9U z6T;91Phh6iJU82ry1J`9C>8DO%$Aa>?XHS9Xe4pYw=%G(;Y=R?S}ihoqutsniF(OT^dWhP48{BnheDGZ)P_ z6nC$VTaMR*wST@Vym+REjjvL2O-QdL!7W{F7av&Df& z#G!eU+*;ICMY}my$k=&J*q;fwo;pv{f}PxWR^Voxz)YBt7Q@xUaJr*L-k@w`_8n0s zMPqfWgLyD`{%8bwj?29S(U|w85&&tS8g(0Q`>A zwTDCKPW6(Sg9G_D_-ODpCHh4_KV53PvqP~DYO?a8hbEAaGZjUz0F1&G2@?PT3OjX? zj6O~Zv!Mi6A1BQ=4Rtg-y(l-%{!(=vVb2HzpFC_X)H+fQCmOFaS}I6D{=zDBgoj~` zRx7Fs!Bl-N;+upB7+&AaTL5tuwLrSBqnv`Ub(w1ZKT z87&s|fv%VWxFa*bt34tbFtd55?I@{6on==aNzi0dY;7CbZ)e*9=k;{4koxJk^(F4} zYXSdQK`BjwrttCJe#cdH$sdK{>wq5Bz9&B5HjTR^3@I685PSwK^V*axPl;B+#yFUV zTIMsW(Ew6gl{Z^B3>mS*$rA@c>e_h9YgI|Ej)jGilbd@yE@5Hi<_}(PQdk?T7^Kpk z7w(@pO*d7w3tmfWuE$lwb68cDJ=l@qY8B|+8KUCg_|HN1f2X;{*oQ=<&AA_iDdrdtr1VkX%%T+@9U-Rp@cgn|A=PJVPsR^hEDCL?Yv1TK=?IFGb5YD>RO&3Eu3F_ z??*!7YDbcbOY-vD>)b7qgYE>B;f|X5f_SaY;)bNXXnn5Gf?J>Ek=960gXhFTU`&y0 zwi$urldd<7a*6)z!BT%xQQ_9w=C;6az-Q`Zru$LeTgrIvrpHexjKMZI20aD~$PeKz zFMpmD?WT20_x6Q6w<^E9VSZ{9hSFZbdOK|BYnVIC6V!EQU85xWx&2o#p$fyfUb_*Y zS%H14)3=|49V_;Ns{q!PG(6J5Tn+T{Vo+c8J8HZsuPgCgy0>IEA=qg#1CwQ%#qnRd zYc5b-0k^H<$9|k15bzkw9WTsdM`Y?g-cw`sopC?uk2&aCGzSde?WonL_0HZspukW? zx9!5?nRCtp(3Fo+pXUNlS;yj86rrqoQ)Uqr0JJ%!uQr-%OQ(bZ5M(bdPCP{dg);Xj zviBdETRdoAv9mYMx|NL_*FFyof_Rv~`RUBI_7g8Sj>|9^V|*SIPC#u#GjQqq-K{^h zRedO-1~6DqUK>fiaiyZL4KA@UUTtb!b|cF!Y-^J8NMv7IXCuxUtU7GfF>yuXy7te2 zq6`2fa%*=-`|}SbW@G>e7xi(LLR)(#({!u##v}+~*Ympz;Y%8P0gdiBsYkU6!6D6C zi-CUWgZZk?_BDgY(vdv?kN)tb^K8IFja{ev=ltSGXslq7>iW-jK2AUia zf|iFVd8yVj!4fvP=5l0P%t)eraF?f~aXxU4ttzEpUjpp%b^!>dax6LKdG2u!lwPey z5c!U>&*ggF1y3+Vq}U*j;{pJQ{d$#$?(pQs@qznsV`Lgv`JtMQK*9{>pe9+^N{vIV z{eZ93rMWmt@1(ocZ+R1iE~)Q`Y_vxH;pm(nxz#EGrX8v?%Jrvw%f#Oe8kQ!b4p%LA zPK&scmmHgcqpj&N_5o}JWv@1S3bv`m(}n#jB-%Rn7W5gwVjfV$dF>V2%fQIBm;$Xe z4iJ{}xTwRX} zblpW$pbnz|UnBDeYb?Rfu@MM1Ewn0)4Kd;X%sOIlNtuXZrJ!4aph<$Ds@iMfT`%S{ zkvSkgudkFE66HWAHbo0m+V`I52^#%Io6c!JDzJ!ZN2G6BQK=a81`T{Ri8_O~k zCUpt;+f;Xahv!gDC>GhDk_p0A4kOIY$k7l|hFhJ$F>Uykk%F$DVy&ZZ8w`{~<=_Ti zG5Jh$0Q5&>lA74^&#J10KrNi_{2HBEU_l;PK2Y-4{r&vVuY`WoK~Rwmb`d-%4;N1r zB@KI;DDtsVP&`-g{uWX*{FIEP0F=D}b=YE)1;AT)y1P!n7EOC}aP8h_>HKEYY`c-e zqNO5Ig(CKaK>e9d$MQdr4z>A8Y(`a?RTAP_-6> z7SP*pLjoUu6g!4%fo61`e02I~-6Ave7nC+SIOr}{`+hvxs?P3wzV^#wkzyju%30^Uut2b zaZiVCzQ7_;Wj;5UTOVX#S01q6&=3(eu8`NzfJ@S(@_dvg49+^3(9kJJy1j+0lyp&5 zD*dqTBNiopQRdR<*9+nNPW(+BkDY~P`&nEWD$n~V3RJ3KEE!o3YYNXV&rgf=Bkegi z$K&ED3IoPV?b(!N`6FJ^t3%4|?T-4WpBLEh8};Ob2mFitKO=YjUUHtdu;9-l;(!%^ zats=}ACI+9v#S~~AW42|LrI4Y0)=0Y8tdc^^PLX~V3}krun2|&5u{PZWtmm`i zPe9|_&h4X<1kr$L|ECA-+8dzu0yWu@%M^se<@_s#EsE(Y8aj#XNxM8kt2?V=hJKF{ z2~b&;SE~5V!EX1tCba%DzaDTe!epeL%Y9Oaf&5;}jt*a`%sm|)xQbh+Z+nW1arFBL zGB<+^?ny(LO(skG&Yw3Vq1GSTU@>9bDpADEB!|PD zivcqf@j?WS!ZwN)^4;hKXp5ah3h$1^wkZUxI8`w~1MA&G?USohVRP}VvxpVI+WzOs z5PA)cE;p3CkR_a0baD3*2f-RAvB!KYgkAHq#gxsRdJ(W`zc0f68C=s9!<=<`$4l38 z9*#jRql9^qaF_QKS^xuNAJy17z0*inHJ>Xv3lD#ut7}=3fV~&`DxZN@-A3LfL{$@- z@u#;P_OX#6EqlCSQ3~Q3ODcr9>4$x)Pc_cf)HN|uLcyn zoQ_k>7p26ob$RZBnwU)tk=OP&<)PwrjbZIUVWT*sz{R^+@cN|Ru0?j#JQVo}dw`w} zsH&K!vuKEZNc7s>aU2`d9$mVci;6*el^vD=-u{GmFuSpyy`pJqaT_}i7WwnAzXtZy z*OPaCVggrcp)Yo3WZQw=b?Lm*pzj+_X`!ea15;-Xty{er3Z@Hr6s_Kkz!QJ)%rs9> z??55y?WT3O0-5lqY6l=xImZ&k+CaU$VEm3!*E#^@831=Aczv#GG*IIIoC-`#tWTWpG8*bDYdOc0 zhqlkt^x&aFeDg+_DM25v2m`PH6{Z;Uh%vT0wz#*xn}B-2owu>P01I?mtDBS2bmILk z&2;aly3GF0z90ZR5!yJs7}3n+d0pV2qZ0q+%^%Uo2oq5d)0d~rDR?`tkgufDrv-(rnCNQp!=;uTwHdk}t@CNy!~ix7B>CZ#7h91!;Im>X1pA0f&^)lMhY@YrWyIH#O+xJNLLp44UpFeZ zu0_y}cfx!111aV#GFb?WU#fi+qd9dh-C>aiD;G;pP%tmA=t1*(*G?CzeOL($<+>|&MXzmY znK@^GWh3W)qLL7d@qQW^Y2#yPl*HlZYAP_sn4#ohNzrGqSNb|4B7!;6lu%#AUo6k- z>OUG|LmDZ+0p+QalQcjX|H@WrT1A;rE_h~xI0y(FY-`2>L5fYx)Of*?>>S9%JnH}n zazK07U)@Ks$2$;^{!>ldq<6POM3}J@D9A|!H9B2L6jP=}V=h4NgE8gh=QvKfj>nxX zi$gwtZcpXBFE>q#D3H9MJuk}E)*p~Xjy_EemCSncbo&=&k^!b;Z_7q>sJFOe{mV^}zR#h@E+7iZ{DJ-xJS=|lwTlGo1T z!^HHcP%9NI!n?WM0MQ`X4A}|bGEcv=iUnL#2r>8lZYJo>L|FW#@&|GNfJMGu*$BLQ z7i_Em6F&4iBc$ob10;rxS)%_)RFK_x@^`-SFK;IY?C3`YQ z>WC^McuV$j>~bdl*Ih-lVi92l8O+lG1TgHpPFbKmh2m&Nn2f9 zSFgY806wsnQ7N)+O@v8k4n%QK#t-KcQ-1#Dt=_MoxY1)e<}4gwDQ>6t`p*o1|L+e5 zQ79{t99fxhPmM2mR58vR;(XAhW*o>Ppv#O(G?mFU8m6`VuQBQ_uNuY(ur+xMmJU;I zXTP+=8XvcEH_-p+3j&MTTMPG~#0atd*BG)*s*Z25RHT$oBsAZw_negv`6B%0?|>;w z(3XwJH%8=q{ezgliv~Wxrvd)O^h`|}&slqVj`M?!9Dj{^tT&iEw(Hj?)5ASmUIkE8 zf&|$gsrmgMl|ML(C?DyN$1~?v+Pvj|zHgKlyrwK!2#y}&!>l3-2*WjK{jasyh-6go zrE<(`B-f-Jp!PPNRWAWAvb`!IA`M-z8o9A;i9af{sAd2&^mzVNw7~RCSA%+%NV^X$ zB6^Z|8yh2QHq9GF5fh=?N;t)8X3$i5GHh!*fAJXqJJv6G^Ts*;oTIi~HKz|LBZs(* zy6{jR3k@Upe^hc(xyI1W!T!`gO8b`!clm&u?`CisQP9@XlZCZAv|STQ$bt+nnpHrU zT3#2%!!f9Tl>A>5Kwxksn>so+?z|bMb{|VrGg&%w#?~`2P~+ z8cK?7G+SNAp1xgZY~VR3WcW?vwmsD_9aWvNP&1YyL7_0{0aStiedM2GB?m+@IHwH3 zy$d97Ne8a4fT_WH+o*lp-;Y&XD>IeLj+%j+`*(7ttDoM8YN;lVI69=5T94Rsi~V>3 zU8$rXVjl35EmbeKo|D!!q}l6V(b*G-rX@hVo?Xk|aD*I~?(Q&j@rRJIER=gH1(L=N z;t-_XH?Z@dY2sJ463jNn$wBTs@ znFBL3%O#JRMv#tq`jujZH`sE(`SE3KW~EeBt^(_qf{lLxj8h-8JYMnwq>w&85-sgB&d!ISU*{QinF#<@5&}z1XJoo2UQ9 zxm{6%3N~;MCxq+n>Pd|Oi|OJ?^0ycnjvtUJza<)Z9~#q_MMQh~-&7!nYc7&nt%=Fm zapyY=)tRU(;p5U@(3PqCz5eC_Ki*?H$X$qtsIpQalT!VcSe(U5_vJ-S znuL~*L<5=#s$hc8t;VyaSqdbRZB#TF`0?L`Wk*Q|&vw0g8+kZef2~x0rH%{D)?1^m z*wdFIvS=CBBVv*t`CrTnVNUzLmX^0b`n^%sq4v7!m3gZXCdXVfHC7A| z*>@2+9ebL1IXN{oCf+k8d#eW!d|?r{v@pZ!-Dvhz|1SmRCnBD(#0omdpD>Z*fLK|3 zwH5+2h2C>_2|fOo-b<`m3d9F!p*4m`C`tdE4op;=s62~E|IFUvUq>--<({6T%dd4* zpxK~oqRnRN%J<5zWXED9`X&Ak7m?`GY$#P3KG?0!0%;Vf^5+WVL`!!ALpO(Fi*iO6 zVA&w1|05S`A_f)q64^@!iM$aPUqv(}ZBdNy0BQPe7J#RflWJk+C?}TT`(MT>1G=Dc z0hG*Ia``iV!-SBCaM_f(`(3P2o5kVzhFbs%rpZ@jzGdt<%f74wrptH#u1o&u@%eB` z{X6ls-pZ+$(+FQBTV?No$C}0>6fM>dn(tT=fRCqosPRd6oRGh zQkEK&HTbhenDrHol1Oky*T-)!u{eX>@Me+Sj){4%JshtE(6KBPD1wNZd#}&MysLrN zV2vC>weIcK-*#+Do*pXhnlV`^cUBMsU6zDPY067vjm#X|69@6|0crmKk4mz{V4~*% z+gf?l8XKN5v%0+l7-PVh<$j9Za;@b{{deg^N-0`4jgp^5Q!;;upz7YUsuL8>6lMxt zgUb!Mt96YADoB2hGC6gl-8d;VMrGfk5XD_+|ChqX{LO^ehaCbBrARX)xgIRNO&H2W zDoJcV7#(SdVPxF0prN7ez_f4p96PYi#t5|V|KG>P0pOY0nR0G!ZX~hHud`5N9loU64O7x!A78L;{WHJ9wE=K~Fam0xG9m~wl_PRC&>B#j>g9Pfr&f2uEC*s5IUI_?O%4YEw_UU6(5Ocrr#(DYLk zCjX&?nr;y34?0>@X#PAnqW$yNrD&mBiLod;umPvTa+-J?X#H!3Vgh*DtlLPUU<_f6 zRJcE+Wz=!~tmZp+N=~L*&J6Ksp4Y3ba_UQ8-_v{k=1T#HX|YkN`Kybh0Ax5x{-1*| zUGP2NN!m5YblX>UmNX{l+7Z$@7k#W)bHs`p{2KX&VT?-p*@0Jw-?i;nHjc;3A9 zQLK2d?LBkT@!iO!q&G2AD%V6b8J>*M^EPTP~s2%yQKfwcUE{a9kk&@aQTJ?cN>c~6xn#dAka;9Hre z2{gryn-m?@eh3feXmAfh2m!mBY`Jve0i9pY* zY(T7M%%}cWtP=|?jVE|6yAF!Dlll|w|B;v?mEc%{-F=Iu$~t`B>we@&S&b!oN|mMG zGShlzd#J?O1xi+<-S*Nb7wZvKF5HC^e? z9O&k<>Lv9zvf1A`=QAk&yU4Fmc`oV`gOq+Aeb~WTE{l6<$<+nFr^@2bc$MFLWnZ6y zuP%v?oLwP}OkO+;^JlJVQD<;qEiE&xd6w_DbL$G02iwda63ZKQjf?Nv4=!PSzG`#k zI$0<(X(h3Y7}?wP{j-sAW@fTv9yX=6tO@;O&!++h;F^YQ6}3x0V7kP2w)^^Rr4jYo zXR{Y6{87>a+$F4kwPPK>6aX34R@zP;4->ncA~=lzds0{w+%El+{K6Fk+^k=>sLx0N z>#$SPD}zng|3kjh>&#}yXm|DLetWlThVn$>2;0u|Z_(@so% zla?|v&7XnWieH5$v*oa(IaMh|3S3W)=1uX{b|w-1HXaHJ+tb&58}^E42l|tVHUF`> zy4;dELpIrlU)&!m?~-?8n{*vU4lAe;D*c#m6=ib`?(8FJ-kNL=DUKz+1N|d-c5Lq~ zQM}ff&Jz;0>G8fi1U>};qlFqfcq`{Hc{Em7?L#<;9g|rbb!+ODLOQpVHHwb@ib9!2 zJ;gaPru}+}Wj~Q{td7T1z7JwyvQ_ST7yr3?w5K|Q2HrO#jj3nh<@t9u9B(s)B?>G5 zEE9?WM=mLJNa%8CIO49H%)Qonb}wzgQs#ej<=wM+bDf@GljUTU0o(aqwt{+2vDJUZ zm1M9Q4(Qyv4TqhY!8+{+le6Hm>dNC|ZzS_fscp0;pIZUMLR`Tf1m2u`=9o5taG86C>e4f1Yn_p6S`-c>2 zk+!}`x(jP-u!V1ZM?cGbz3uw0LxppRR!i@SsQ1fWm^s=z{@L88y^cIv>Zs^{^~oaQ zX2@WmLC<6a<6dEiia-^UiZPkZLtKqb-jHJnTNdy#nyl7@;l$8?j_9IQ6wtAF zIVbhP^&z9E3;dX2}H>amO zHj7tLDu%|L3)~8sOz%YQB4`Ok4H6s0g? z8Ws;9bw`a=DS`RCMZlU%1|dM(;5nUtd(4;c~&|!ow%^aL+##B@U^O4uUvSQ zJ7xJ-FgPh=;>vGD&kqvF99m*VX3!p28!Zh~IMsvc2l;!}$O1E-s5jTO7pna>6u0uT zzj1de8~-8JrIIo-G<03YZ@gw&AaDM{TRz>(N{&BRvR93lY|<>Vb-Vm!^YA?3A|^OC zrRF~#$*NdvKWiiY`(A-^MYqQKY+LrU@^HO0aVI3VbYRVGN-H2frPv|6J(mJcG6=L01^@a6;fUzE`s8+kU{)beb6- zrL8$=KwA5OmDMUm#$>4MA2Xnet<7rU|9N?-TJ*t=r%1{kpc3FdY{Tit;=MR$& z&VghO)6HNI`MGE(Hi{{9?nh_;&qcC-RqV~%C@O9eIsCV+Cy5(F{%Vgm=+7k%W5D=9 z?FaCK5s%8%=8v2MOQ~j#uChFYsAIn_3%=xjJk?l&|HHr6-!RtlxYd~kuK6qFJhf94x~)U48#rXdB^RI$|Y3_2eGwYhsU0;E{J`U_g9C1W&-+jWVgE_4dIj*`&xygBE|#D&1#jg%0(q5FDDBqhc;$BK6r4 zI4%kQOxz)~YjAg$`~1QHeZebf_-E$xHf>MEe!Dp9vT*CIbI)K4v$3T~0FiRwv<=F| zLO>F%WF$t&l*^ytb#Z`|-?|HtK96q`xBlF`j2Yo4NU3N5Wv?MsF6io=Ekp(KU+Y|~ zAy$5-P5uH=y=>49S=nRyLtoY)k}wD^|Md7%wN=-PE!Cpw{9t0JQiZcv1jV@a)Ae0z z-dBhI$%hPzadk!HJJHcig`~Ri|EwPAT+hDfi>jy>Fz1c~P|3UNQppN1=J zRN>)|K1E9;cr48JfIJz~f)y_O^YY){ew*2s*ZfCc(hp1)*)FrkWuh;ZK2hLu4z~hw z+cYIp2(6m9B!vDbWOjehWaq?T?&-NtZRw;FS=0F+f2i!i$exu{opqb%<2-XMYM;|M z=<>jR#XLlFsagZjw?CtwxW@Rht4d*ihekef+P`yo$F59dV;rrhygHZ-z<+I97- z=zn(6{WB9Yc4o}oT(i^MvYGii&v@qim=OUS31BYsWut1p&b?v@2Jt2j6PVtU9`37N zm+a5f)M-nO{v!w<7`lh%ocoDeO=_=@BUMT78;@TAAxxEqtv^_lDe^(taa;Sje01rM zCauvFU-%`Ylj(pd;iTN)k7X~b0e0gnyKzgc7vyj|N00dSzSWA~#9_j$ht;55yd~b+ zVLtq@cLe|b!EuXDW&uy7viD-6gxs=fw{qv`Nn2{5_ z-yG}Nra+%{8hGYdI|v^_HveO(x@6_wy|lUC3}uaz><8y~-)xTZTuv#dYq?&1^6K9! zsqQd#fO?u67`g8icoqjItz;TSq?MRBXtTREPF`BoV(BjYi6C?xBuxz2<5|Q793rm@ z=zc}zJ~uvpo02g{r5|LyxS6RO%C1=S*efb8EtR{~_lm3`-%;iLM;vQaSS%~d^|!_ zm#@#TH9E$Mmrjt!FPgJ^In^PTM&SDk>^X-@#jO?#S^f(1!mIz+-F1I8nKo@*TwMjR z04maUrAZfQ0tQ%$xFEfn5SsK7I!MQrB288hBGpAYA#@1E5HK&j2>}8LA<_vQLJ>mX zd-fby_iy;l$vOFLp1G%7bIsgOCfzyW_KY!S;1`jKuy}XS%}(6`PX5%aXxpW+_!(#) zUDz-Rb!G4Ey|OtWI81NP8V{QRHs%hv6GB&L?hvRrzqQ}(v*$M`RW-;fVw@5-!dtiE zpptJvRJk@OQ@K}dZtdT{ute!s3B-seQm!4+x2Ja*2n+SM>iz>lp6&~``VE|g1q0lT ztYn@A9`hWJDZ~6&!V=ruq5@Z~ZUyK)GY$~MZPyS{&H>kU2>#|js&BbyN{TkXweXxH z%wfw)fxcl+6nC*3w6wDIFUl_-KwhlXWENO zY(>h$`?{n8*7V6+^6jy9YS{{#%9%G!1E#U&=BCA(7*nTxR8PN0%jo!uZ~8SD$&Ng9 z!%)tg^zQ^9@_w3~gOH85xO(60$>BA3OnK#jJ&a2XcgiMqE@{Ie-O z)Q(W1ECLAMk#d2LcYhS3h&o~5G%2{lO^{;t#;z~B$Kvq9_iHWq$F6?2%M;q%*uKx; zr5I8(n&y$OcWIZ#N`hm?5Q<=Mt`g9ced!R@}!YBV=kW2gzHdp2k@Mcl<#HwR%S~_y2 zeOkT5pVEkN=BR68#7DQer3N0YOzcklS$!~h(D9m!s|U?-vCduW_qxs>z+#&BPbcQg z9xIkUo22ZbxKPQlA;!m%cPYmu3DCwB(B=wgJ23=-bYSiqNDrQMo;AuGb8c2cf|CYC z!18lvUYfi+k(Ds0nb5mJtMyUiXdhCYx^r+$uD7TJlu6pPW1|gYddhnX6La?mx-T^= z{1456cO^nHMlG2Y&t%Az1EH?270sw2U*%EZ-ACpdh`jBJRXz&qOdfS_kQ5bkZFT(k zbi;xvbH7jgjh+X`uz!GGF@sXYu*;8V7tvAR%^Ql8A0_AIOw|9*sh7VyeL}Ze(LgkN z`rPRodoxHk_|*ItZY(jOI|lO8Oo|#uZ8lQOci)+SRXQH;sN4EH&LyEAIX-^)$cZ>g$}OkL#a=HuL)>#bVmS{Mzu3HR z+1I5tqoEzK&vLyemrn9RhnK}C70UQrzaA?OpeP4~)`CB_*fk8+$+G!tFy`Vl0Jkdt zWj_(~pUjifohGwx4u9UH&_=NB8A=x7#3HMg3czUKD(cqjiMxB0ibz@7*6sdG_XLQ zb>xydml8T)R@)zD4FCgG4CqSuM(&YG4Y@)0GjW39JI_wuk3t*JclrRDIWttz zP7t{&|1!|D5o2MnK}M6R1n=k*CdJ!5esDz~$J9n)*Fh+>(@|L|yYGCB|lmPN@A({bEKU+lWpj1ChKH-#5+pbPXT1!s*~^K%R*Ld-)xmglz*voxk@v=U?Nn7k4RqyloiIAf@Jy+B zTOpf)eu-tA*l|Z!j^5mW!*;qhB~qT+p-r1HMcD+}kXo+>e%r$K>&EaJ3pdI|=B*^)8ie+h_rURmfy5)r#Y2{&P)K!D;o8b2kQxXIwsMSV8dz3371&D92WF%rkS&iNfFl#aeOIHuap zkH_@lMX5>eV4l#x?7{g}j!b4)Twm?_Ds+QPATQow`ks0#JjH3Y_KZwUmZ=&P!e=^W zY-<(Jxf0=9>+bIuji{8WkX%m1y$-nsP4;re-k)KaAlivXk+8+{Osv&+P9temwL> zgST-=LWz?{ved{id1B^EF8Q!UMg1xrhQLib$@DF>xzz`2OOG}po3W_e zBH|cv(v9&TR0}bhcq%kvo);*a>1FTw99(c>{gkR(8#`}oo^%*dwvQ;-liyqbTVA^; zWo}CZh2wax1SvS=LDhWV&Gi>(3aH)P2AZlnI0m100R#hZg!feFw*e6|yB720Msi8_T*sa%5_s)CqjCAH12csiK3~w{Pmkwcrp8k# zu~+Hvm`gz&Pj27(V&wStWY-?>k^h4aHqY-)Zr^@wSnJQ`8k+gAv0ss(pb)^cr+5kc zC;0w1GgZ_5v&@EBwEUW)eHoP8&ngHxs=>-q(bI-PkDrUY|HQ&F$cJ3V60p|M_DIZ$ zUM8r2T|bvF$^j)bkk%)wL#-?QAdpP7)HgE-u;gg;N34$maB3#}p`^>u{$WZM7x$8U zu&(5jPNS&Fqmih^8Pk0m_ZJVaagq&dg9>cC<5Sn&Ne9D){q4(#Vsv_LKZ&b;T}8!p zS1wjo*ZI^v=1Ks|5lOR?<*pdKNH6iy$$11BU`{HcR#DQ2ZZDLrGfZ_hMX;3wEOCIi zgCUSY)@3a|yvOzW+(Z&ISBDFlGgW^Ji7t3fscyP)Ds$|Un{EeHgEzZ(#G$6KYd+^| zl)=YPJK>ujKZ+GlW82&DI8(WwFo|tUB9(Vm&aTZ62Ee$rJ-N@m`iXG*{XXA13EjX|M*;7N_?mWGQHz1OnRi{k zt!qcy1HutKt*sjY6QQpbv}8u!^)T<5ab#XXziRPAiXFGCi)#j?=rJ}}d|B}l2}3|m z{bpUzGjD$nE;n3Y2ujl`aNw-3qSUu`)#_MUCbY9u1y#Lcin&%;wtk_U?KQh8a&J!} zU_5j^qIZhZFpCbQtR7DJ2GOmJAi?3`6%|DV&s}!|jWJ{i)z7SF>CodHn0`OCU-jPT zo7+AGPlH{3m00g&_s&NzQYX?qB{Q|=BD>}mlI+T0Kf^AFoF9vvn(72UE84jjD(3Eo z&I6@zt@fD6`6_0Gl3->&W|jRoZkMXMgOCjJJS%sF=4CMH-=OYZu=}k4(5Ky&*F-q= zUaGC=FhQD)SvoN~vBTI-2pH;JiF)=Z#CvGj^qZ;m18l~~(3Vp(X>nW8?Ui*3s-UiT zKY$O%z}0B12gmX(udeR+)y8n<77EBLd5I`}G?S)!pIWW*E}Qm4BV@OCi?tm~cTn|v z2|+MqW+>-tNMHF|O~dY3a{-Q6cy+q50g{z*T2jg727;OK+^>;K>r>=~&E^npGi`l2 zA~?Q%YR{qo)9bLYG2({&tr?5E<>p_1kzW6ICo(8ox7{7ZD7deB3IdhMtiUr7@mf_* zb3TC}UV9L!c=4ucQU^mzN%0_vOAaBm<*SgUm0CLC%?r7&E|6>*z<>hE%jS(k8jsVM zdz%0Xx3VAj)9)36BjDnIdcNUoWYx=H#Dh#o2KY#sqw{-YIkUlPNNN-nLtG?n;nSx@eK~^QV5k`6+8{6AgGJc$eKmO1cG~iP)Uf z&u9y$xm_)BxxUb@%7q6X&f|+6dwG$TxHUtPWd)Ia2VeS1rEcn$s4?Mnw3Dl>dhx_{ zoqpAZ*XKWqEL|ICQ2I!i^Ft{8qlEjH>D|qtx2Mqr%eMW)AxtX4B#ysEzjP@uZ|bRb z-d{3y0xwdwYVqv4id@sRw$UjCTKAv>Y(581a!2%cv|u@2OxOPmZ@dH}3}jn*q;V%< z3%^hLBfD3;8tMF?JECw1xH@Vf%~y6DjFhQsMCUB1q)2^pt(Ivdm3FQeer7IO_I|`# zL=xc45OA|hx>3J2pe;m~+igoMcY;?A(iT+f zWj<)pIH^KdrlR5QD0P9%fiZ1L2zu(iwzKK&+et;pu*a?59o82op9Jq8<4xQ18rg6| ze9}o|)|S7eE!u*3uNO1wsdS00^5M^&V<7yY{=0Eu#3zN!eBYM^waq$4J{-}W%Y`$( zkBnom7-K@16ITCo!Wsg(9*1QG>j8DZsT&lF;L+=$XAcO*q#(8AXMP z7P}eCWwMc^uO`hxQHA$aO`<+&X&4yK<10Kgo@ZA0XxxZ9+Rc0y9>ko_>Wx(5ky8!( z<`{V|jBZhGvMsDE;n7L;DWL8;49xCZv8aZ856050sIx75Yhm<|$~FIkhR!_$OnBW3 zPr(e2vC+2^socHDun@CDDcC3;lk6&Aq`d59Nola6?88t9`ovKPLj;*WN`LGyczk{} zh-_uGc7p1mWpC^w=X~Dc`?gUxkB_Us(A`G3>WAU?n1kqFb%gq@uDvL<`A}GL*?HXE zFhC)4+Vg{7j9P`FTTnj?C&Ch<35#^0-t-4*GpYazOK;eRv3dq=czO~7dc{7rVn!PWKH3? zw>ve5(ZnQd5#LY#EOES9fjnOP<(z4}Na!FYXJcMu3zXoT;oTSzJ|fwWwvZX>7O=h8 zFxvSI#e*0HHOMHfbG8s9TZX77x><@myOu{mAtN;Z$)F>s(=J%NbMH0TWBrIG1O0`r zcCxEShCHhg*TKC{jJA>WB}wmOeqNFQ diff --git a/images/LAION 2GPU.png b/images/LAION 2GPU.png new file mode 100644 index 0000000000000000000000000000000000000000..d154a526dba66592c26dbb9ce4f1c60876150034 GIT binary patch literal 51428 zcmcG0Wmr_v*RLQV2$B*iJ;)%P(jkqsh;&P-bPkQQG()#EDj^^pL#GU-DBUSBGl0YZ z_l)v?pZnbV-sgU~_uCxKK6|gd_KM$Hdo3b0)fMmFd2r{(jT?8BmE@k^xN)=N#trPJ zc(^xiV39_IpWV38|4CWy=?gEc-3x-BMsJLN-d%9MNhuVM^NUK8NV&<1T~n#k(7wX~ z>dV%)$QXHqJ@i<4>zwKB*`vo4Qdnx^E}E=r<8aQF)E?#+FAn0nUzSZ~$lde#rJKmm z7vrlee9YMZt$NCK4=K_9^-sX@#b=vu!x`y<{BNxNuQoFD-^}%0T{IJ4owxrG=O=FX z!mOPB2Aeo-=f|&xcY)b00J-XQP^v%}w!m2WTX20UtXM5`Jd}WH9I?_ju{YN|adxyb zu{M-SzC^`sqWVd= zCmLNh-dDA79f~?HcUOCR{{ESuo+IAK@phI3EL!9tSDN~*@V3o)JTxPgEHvtj|1J> zATzMW-HTN=x-zKLAr-xvX8_W~s{L{iUSjCbXyuY&sBMo8a(CG^P!-m}_CUTsXcW7M zLHM#t3{>Fi{&1gpXwK=d4W<8jRU54&bK6QXyRN9Gcf;erkOgWXg5qZuvFxT}13t&U zm1px7j~8olFMfBc-85lTjHOXN$yv)J?Vivl45KD-V}dlU*O~SQ-Ml^iK{k9`Q4*PO zSO}IVoA^b#|3x30UG((l7t5j7W-J@kjtgyQmtT-!RZ-qlbS%k|nX1@4dbPw?(fedYHelc5R%9Km;`VBtpn~5={GS8gjn>)R3e_)R;ZpH? zXGldcX@}-hG0}~j+Hjas_H2%IsWLaVhYPo;E13-#uB>?cTA?7DhWLRn5?^)C5H;xd zhOFLsrFYq~W_Enldw;%FUR1K;qku-94cIj|<2D%{?N|4*E|OSCA4QOgqcgo|ENKos zC^oLqe~G|UDV&&o?+gZlZ=4?Bczd|G@$@L(b~X9l5u&SHUyHc+Y3gR&M_PCTE?tk|>%oJ9=_J1G20={zqc> z+;Eacr;Lf`Iars%R_`1Sc;!p7&u)708dUCm5KK3zBtlu82<)?^T3B{k38wB~St;-t znmx-i`h?O3T>nM)!H=?ufs+=*!J<63x7Jzx)$zj1%TzfNsRcMMe&p>(zTa^c9=~(q zme`BhSduSv9Kv%G6%kGQS>JxKJ4EK*yl862!idOY7EKc^2o`~NHNH$C-*iJF%_`R; z1GB|7T7MOBdbm*Wpzgf|!?hhf>@cU*4VL7Eh*cB(qP!J1Il%ck{No`5u!pPf8&c^e zhb=ZotZe}=_L`0~B)s1j5l6%_tJIUTuw7kFT~72L#Y_(5UZG8k79bn#H2U}i)Z9S{ zqL7x4`rg_&zgLTu>KbUo8hf%Ql3p8&rH2#Go@|IUEHCIT-$bbCNXD&w_U`8&57KQF zrZw`W^}d)H?u$!qdG3kyAR<;69bRZ5G+qgyj zam{F+4Qr>ny~4}>Mi6hGyVD@&f1h3frsuUgPUY4*qAqwbM|P zz(Kl>SUY3@BB`e9;n)vI>hVIz_WbW(G4~IJKG<$4WsBo|M2l7UtgST5HQet~bjVC< zqMupQ&U4=>Rx8oSRo{T;Ru-|Xt>^OXCO@W53XrMN&2exh-(7Whwz@PcT23GWpW1KP z>+`r>*1QM1JGbq@v(?D>UGBE_k8aWQ!6y8erHwnAizi#yZO4&sI;YAFb$w+?{){%J z#*&y^hWU1)0?6_#(>pGhx!J9e(_h~Z^v`a|Qn|glv1<2NsC{H7$_3ek+PDEZX@DD! z4Y;wG9^vST#@_EF!@FmKR^_TX9hfB<$I5BZA{Dngv>AxGx{NrPYCHbAC0GejyT_Yg$u25l@Zbh24rlGS z0!>h^LrVi$NrSI$nr%m&2!|25Dd4HFn2M&VpoMOmLrg@4i3@@gl;{!AJu^339dJSX zxHy(rBc!;T9f75yG7^z%`qnt}1uJNgW5jHDrHocQt~KN90MY;Uur(=`CiaxrZ&P+c zO=#}LY#Fx8+uxBZKOT9OxGXv7`qHF{nauuNbf6Y`ylgF7xM3~p4+3?fEN5c|yOvvY zFGoQRwn;mY8r2H-6C?+7x}CRa36rF*P*VH8DiT$)`2{N(>Z=slD#E8*ub4%JL^)#R zSH~aP74Wl$`wVF*irBB3kFIt?AMF*}18!#&B+5PbZ6eHMGHTVE3X9w;)BAR+BLVI~ zlRa?+&ate5Ovmn)pP65!piYmdAF6P+WU)7 z4dhCOn^db^%>}h-#dosB?q2LJ+G*yRWrN%g=5kw1C(Z--{#^3Fi#5y&iE`S`UXR8F z($2<>M)%Eh@XrTbOqz~$GzKWGJFSXr#A=;ZkS*zu<*F^az7X@=cmq-Bde$#YS5VL!=% zvUocdudnv7zC7?%pKN-$?YvI=~?(-xVJEzR-C2E{;@|P(ea*+VbhmpI*_~5ABz|}0xYoxP6=KX35D&&<~H+UZ;g2mv)av9g+vZoVlRp?LkQ9!_;tY>Md0Szf-$04pCOvlN%Pr?;hk8Y?+^nA zeV69M^Z92rI4|9JvOBdwlI3ZhB((3`c7&IgZ(6zO)sMXQ`Hrv~Y@pAclGlI64~%p2 zMb{9Di{|ToJ(Uecx3FnUk~<@Km?|Wm$O1bQ(*9gj8wxNtC5IYH=W&n*t1J4MAbJ9& zKYvj|TQBiVOrMsTQ}ljWaQnX9!a{y==`#wz9yWZG?wXbd4(D=Ew|!dg4LW(USCfm~ zwvfo=Wg=BMGr)~%!wl^~B8ONjc9rU2!(W`=9_#$-5Vu{_X)~qyPIhT2b!@rSbG31G z|7i^E!@xYxZ?|tk z;1Al?i?ZA$Hw2&GDjHUCI8p)tn$oZELG*sWuGHORZoZhw4#%sa+!z<}x+Itq?dPeYylaJfrx+3RM1wBapXiX$ZYwJLD(5B; zW?IO~xZ{2Q#fsXkvU-Y0qP9!>t;J6!E#h4`6)EmczhAGI>W*ZKP5s(Je#*kLW17P` z%q(NuGzuA!&vzBcRoHmGUrofqz?`$)?M|~2scz)+Rw$@~cGltR_X;jRVz|ogtJ&O! z+xC6e@u?+hJKb{M;hjsui~iA}{e1fM4tZ7ZPEE!@62KS_L%$-$?Zp%7H|^UE2R(@YxJkMk0D#7PvBQ$ zQeR)-x!=7sH^x(&FQB)Rmd3bh^?rp!bYI5=!k%^tA-hxS4xm`k9u6_s8Na&WgfzlV z46Vx)l}ZaIy+ZHlt8u8M?lr!ds$lwdd2t@)G|q%@SIbg-FjB_N;8N%-`3ml+ns>8_ zM$$K}P`+t2&ILq6yVCb~KvtxXU|=UAc}14~Ow)TJbN-$(i$~V$+lnQ<`=Z@lM6qr> z2$9c*j;(?hy^d&wvHKW^=*C7H*G(9i-UI-weCPnZosyyrgBF!Le_p?a5yw!bkfS=9 zG6Fr;rEMUi$f;g~{kQoPD=cObte!BO&bb0#IC7)O$9|TH z#EFbshI^*Bk!6uNXJ_xLbCk5%5|~%R<&@DUXIApJIkAcADT<;iwz>nywd_ z?2<%pZiJFfs&j2~-5AM_A=S{=atKMF{E*!!*Cfs{Vn=+mPPQgbjVJ_Vg?A+6SYDdC zCbSuuHnudxS|$@|{rF5z`1rI93!eWkbSy4{7cvGsY5gXJu^Ob53d%G6{?C<~V z<~N6|k>#>8MaM-QG=4bg#x_Url6R)%`5HLRjQ1%tu*51;ijw5+!175uH$tg@YdtyA z^i*xynX05};(JUoj^d>p+(54mK;^wo@8bHJygu6gF8>08B#UTkW!@g1IIz1b$r7Bi zNlIp4q`I8u>r`OB^P`--Q)x)h?j!`)h@?W9-im~BLr0WEinux+DlGbBA12O$D<6^^ zmk@^^cwpYnX3i_UEbFU7&p#-|5@PkO5`|);MF#2XcwFB9+8-x*DndXCedH)h7pLBm zU%tw%!a=ikU@|p1!1SPVb?6ay%}0BP_^C_wrwUZ{#;)&c=*fQtQ;y>HaX2ZY4J)x? zpw^H$YGO}GV^>@ zHY$q=B8!K?07f1=F7+*e7-hL04Acxx&`uH8+pF6dQ zIHB>ZP(i!mSj8(Oz&J=s1tTq=_a=n(;EcdgJe?@~e)lQ7;cnlc)xDiUEinHZRXI$CXfaG=4FmIqlq7#9tias4&*e! zUSYv7HX+WJ*>CNWDXr%@F4t8DvG*tpGRzQUM+WdH$GV{05E~^2a7Lrm&X6(@_>{4E zP619HL;>}hsk1Lbv;P)2!9;f6=Q9f?wb^le$n;gClcNxJ-3pF#Rd+f#mLzn06RN{veI@1Mw`W(S~P z0;09!UMVl9S2Cyt&9#6#12cFUms?DiV(ct#IlZp{>hvvqek~OCO(VC-1o&LrvGI`p&F+n>+o^4qKz<$Xhce$#G3J10K2_2|3ZMU!HQU>wAH`fw6_Z_ShJ>q+4T zGgkmZF8nr%DGN^GN_Uk^uGmRZA+q7xfu6vbQo8)L!~_$JNb;mY5 zCmrIlWMpvqmJ!aC2@A;H;k^$8o@OpOxUsMwin5s-t-OLX02@p0lEC z3bSny5p2~-qYA%uiBqe-v-Kd}0aTTX2!v+C&@sRO@%}(R1GYKqN~TyK{!AKs7sE)$ zh0=7K*Q8ElQ=PAv@M}|HFTfZbhkSgM()PmgBiSFy;*Z@lx^xSpd?PH9zt^2pOe%i^ z<^mG}AC4;enM!b~J;K6wdjy=%tPTjo%n%fWH$2A-fZ4W0I`ragVjOdW$RYSrNl~A` zjvwH}Gv<*g^{U#T)9&r3-yGE|y?WSRmw=Z5BQBzOW=^JIPbRf;A5N{XPyso?Rc|v+ z)b;gwHPtOE$Fp<-dsfdq_^`JBq=yNO$DbUmz0PPJ{4BfXp4^L<$XVD-6oA}-S>0kjWqA1 z2DRMf`>Nbx1JCKj*fGeF2@zJzDVgIYr*IIQ%>$m=EbFrW39qI(WIsoJWP6eXrAO*Q&r zX;lC;$CXI>@~ec6ARMkasPnf9!Ty<_l8 zxpk1G&C|?ZUq~p)wi?6ep%m7WSo|D3VtCAz1W$}=44(>pmO5Is>nBHKfmY8)_R%AU zt6-_dQV``=AIlAt-v=-+ZH|g6VJC8cV$0b`+LOoRnK_1-9F&nlYtp4HEJ7$UeG&^t6&U4sGhav8!3f5`f-I+42+YYj@|ip!^|PcMap zFMGHyFF29N{r8h9!dC%9B5Np_Qtm6=rhdhX4=M>x?;Iz;wO=6aNR&ys5$H4-qqx)1 zro8rRQjx6S22B$a>m+$; z-Ba8)gY_;B4n=a84Rw7SNpB&1h3l;(WE`n@8`mu-Cumr1gMu6!yE*`GRAAzur0hhx zR(zV47w(k0=nA>@n0e|U?a*3}=W}`Q!$%xfZxUJ)Gi&+_UGhCXRSrif+ z%5HSf5am$8eohlrm{Ol0IXM9>x#040e}24%3GN$*)>8VR2j(>CJd;DZ2p(IF9P#@- z6j0vW?nyP;)vgD;qq6-&-O?H?#8U}j_@VA`Pt46vUuUraexGLwq@jrZ@w^%7HONJ< z%R+zlL0A7}rHOG!-(GO#QY80WR4u9E!+4~7;m``Q3L;|KfKNkf@;&IJU8z2~Ela4* zb~Um=n5-8kYCmAGD^j+zY0l>>n%%G8@KK{+q`;bdgzws!UR8cuS2!`)L}%+g>^CPj zcx2@#dq7Sxn{jtvf;O;Dm1^XLZIp7NiZPdN7{|N+F*#Cm7M~gT)GZkOWqoe^Pz-xW`a3cl4 zj6fRdv-R0QpKka=HbAui_>JH4oI)1x6iWBuqBVm zG}LUaR&Ec`oGcjsEaW2>7P!h%8k=7gFkoHTCEH|9*kZW<&12dL!S zCe%rQP}n<+~2=tu=$u8?w*)}|AeKjJM@BBsCTI&ot6gV$A-Etg5*g)=P>S40o;gXL;e zt}>(AsyS0QkMl}%xjwZ861hXTs>3|$0fcEzXd`^`kbw{?P^PHB#7#
    T^_P3>vZIm@(USs_?uhXV+E?4^`bt3KOWT$k)9A*#j1<{ zV%147FdROZL}p8i1$NqLk_%T1@*OgYL}@=jCeJ>rJ`Vap>V#ci zq4Gir`v?%%of^I(90SEX7i`CwI?ikn*W!|Dgw<9;sW0tPjd%3%f$=W5iVV~$%L_yMKSk+B)nepRz z?ts&wEkDz2)*NERug!%`wIgEOfD0rwoiX4|S>5#D zBOWjyXRhN3Pb_%>lBsGI?2ocAAWvCwI=TlCXOm(7`tPnXfP{l-?f5C70A;-Ab1&ie z0A*Qjt1SYS3+!=)1OW(y+wq)d@z=PEAh+FVi7nVClPscEAidq2IOC(UmWgpUqrxU&zXeJ=l_(H*Gq9GfCrf-{wst{@>z{JKTDY?#_tF-UE5I@$WXTG_nu22)UonpR1d_u*OD(t{|8=ShzP-O+p5u4r}z%=#80;c!Gc? zm;4m4V;e0CJv2dAU;T0#PVrRlD1MInq(j))`pb}&2ie!(h5I;kFBg8g5N9s0+Jl@fRzi)&VNQ$3K(aj)TAwD zAY-Dw_#hhzl{$rPsX2OU$0SW?xBw2&Z4nmKOL2|^o!K(XqG`a#lw}I$mN~z3|1kx;dT+LB$u%SzAbQ@Rw%b!M5V@q zBBQpBP?t^cDqu$CX@P_gXGOT*N+UOXO4jjkuGQyHRS(7KNxMfI(R-?k(7hz(U-t23 zs%CW%wSf2_b=g1~T78`80|kAAje?*SvhNFXBvS_^o!T95Cf+e&B6d}yS6yLuYQ5yB(^?0v%(cl=63Zp6KtO!V zPC3u=bKOdyh}~-3`ZI5oT2=o}#*fn%EIbSuSuL63)JV0+F5;GNPq%1f$YNevvl6N!6P}p31M|BY>k2sbK9>NJI3pt zP8X^?^eUANz$Oq2LO_2uEr&0Gj9a{Tcjubxf3gH|2GG<1p1~rsKH%!o4L}8*RIdcB z35M@dBbO##o*EQdS}2@tCY})4e;x&@f!-s>Zbhl4I}3?$Q)QnFnmG6-#2I7FSZ~L< zDCHXBQG&M>Eq3}64mag|^8E_&xqzG``-!HhzUO%8BB;9H&XW*r(>B(8AyX06Ov{~N zUbt*qX{M{*I3@F8>`4O)*I8t(6QM)*n1Tt410WcsUWB?&gBN`BSCc@aB~SMsF!P)YQlbrBrspq_vIoOHt8Lm7e-jzQ(St10# z$INDtkx&82|Ens@YT!1}hN>5me<|%Si~r})J1)J2s*!Fe-2x>oKw%ZfgCs4k+pUhR zM?e1MPBPoI9BV`QWJcLToLIP_@hGIR%e3Fc*-`#o4z5B=GVQ$|=O=RdG(wK^Mn=_L zjCcJXi^u!Qgh_5jz*59jou<15v56hLLp3EKgYM8v-GC$AiF)6mdb#mNajxGuu^nB5 zIVyyCzS44fgx1T{wN{TcSe8*eK~4E>to+=dxVVD`NLZUHH8VUjH+AKGcTv5qYz}wl z;3-N#OLg9X$yA7OcpmH(zpP7K5W0z7$! zJW`Vole!qPc;I>T8TNyvmmX2ifUGY7G?_?7@i1|t!hgPuabj$fcsK%`m>5(Gm?jX%Kv^0zJLsVG`0qS=IKU$En^rRb z{sbs0XU-#au&NjMbg!M`$IguMsXMPfg5#>Av`=EsxhIasZ>HYQQ1QNhUPLWAo?Pl- zKlg`Fk`WEOUJXGqb?U7q{Py);VjhIpWAvbY(v+I%A^Vlm^ZM$t%v$u~3pDzbj)A{@ ztf`Z%qrVmxl|MGGe`E#PkZV(oW_PGDd4@&`o1%R_B~{~otw&2$_UX(bzaHRRD zLwGx!e|J5+m-=>eWp#JuN^+NZavc81seGr&N-Up+@2Drl;4Ea}i+n_ZFAnWvF@IPz zPX#`_3vZvLFwRtEcxYszfU480U=P_4NVbsjS|l zFgN%7YNT&tj(qX&yIl`^>$8Zs(1R8b-e|WkC%u9_3YjYlmqDJy8jn z*mIDP>V`M;!j}@CJh^fB$ci+T0Mbv8+Tu&;=#NMD*p)f!q$Q_Cb;OQ9;hxDUyepsh zZCO1{YU)#(Bc{z%RJ7YEOEp`L8(N&R-AVbpU>_7f19Fs9qwu20Hs+WKZR))`5n-j!o=~JER+P9=U(~H+SISQ( z`b$bwQMN{&Vu=-o51ZGvDAn~hwbsVu=?JykekypigL)}gKCovshKX_O$r&Aj6%reM zlB|Ytjmzb<_z&JKc(8@^8Ucy7Kq`1csPi4pl7=GQ92zux>}ndx#Ib(b?x!9IeM9&} z=DdEs9$}8UZL^H2+{)TG{Wwx4#a+OrrbF%d>?h0(`jIze_duV`!4$AC5vD=hkh!6%qyRma2Z)_IMU5 zY8M+&h&SN0X;bA*Q&1$zZKBUBY8={FGts=1!N%R+Mfm1BZ^(Q1GFBQ{d=6_f(l!lI zH0YHTiQYj9zjEzbrx-Y5A#1tKJDiZeLEsba>XLfgIr-c%PV>*c{ zF;!mAsHT7Q9dF+WnMnmda&_y|NaOdPDueEcDb-`y9#xC$?(SmTQ$dWMk@IJq`L$F} zxd-#+4i)h>KDcg@T|5)^wuFf=C9v#IiMJ>v6-Dg`EBNcJu7sRKu$RQ_2m@s!_T4(P zLw4rzD%@cSR35eI&QvX|ASw9JYF5iZoNrQs%4!T8Yt=bRcT0y0bQsm*-Y0kw>2NCk zo=?0k6EItI7Zakpoj2de@KP=yu?^~H_1^Lpt*_9OuNPx@yo(91Q4iBy{}3YPNBHy!zKw*~*;xApD?Dd?q3U=!GcS<7Fe zTIkkC>p3hQ2E5{5uzZ5C$$>cQVSx%>4o%~j%U!@me`)aCV}pP-;zHoVe{DNM&=S6FNVtuS`oGaIB|z9gOZ+u|2~aZg0%T?B zSGx9JVwri!uwo9BPKJSe0)}4d9DsN)kM$&s^TEQ8mIJKe4N3niLH|H8e=&h-l;MXE z1k?ikQ(THQ?f_+tM*oT=2fsvHzy7{Ipa5}?o$T3`TPh9>BhjV z6Z;oPl(ri>i?%)VeOrk0H)4~6;GGM9&-&Q5VgQ>1zHSXifv=M~jYolFy5+>#F}mYu zXNtVtaqu)p!smTY)()n8ATpPHHT;W4u4HyBt=P+?4vldwI^5u%D-19;s^CJPQ@{vk z=4d1@O;Q|hrxOG9y>UTKAVM?LVzwv7U+EJR{k_mGg}8aBgH0c8NBwGAR-cA9`rtF! z@<3F4jz|8~Qdf8s&%9as&Uj=4cWiEJLGEJYC^+eU(>tN|4-uEqogqOLEP=#c}7 z>^xY4gb8JA(65P$KRb+R;_+Rg4M)6po0K=?vDlPr%es%CUaTH1T|ISP#5!%v-O zrT-w=@NxhwiHLMVeG@8Cl-Smi7w~5MAatgsPQ7n?dw9s3>b@V4p#*rDfFaElM7q&9 zMkw3SIYQ7uFcKO{-%+}zID)E1z7dyW9^?irT|QxpjL8Za;qyrf+_a`nTb=KQMH)Gh zBSZGF)r0lHW%Ls7Obb`krMa}Y%c zzygu7K3q(=vy-5IbM|Je7Dgl$01QKim81U%gG;2b3jnVYCj)P}mR1Z-Q4F+sc`dE1 zfV38j^8Zs8B{K{bAze(@xt7*DfV8q64zpiNYY-rgcXtM*>C;r(5NknnHjfVEJk9>-ir6W64OkubBsB z$|KFWXSefLW3Yf*Z`%+R>6UACRecZe<9T7Z;=g4ynMTt{+nknUrfcoAwZu1+jHsw6 z9B1ksH2{ZiLh5=Q+%lnm|EjUWNbdOCnYwa35Z8Exl0aA>5L6dE6mmG^Y-F3m^q2s> zCaG)k0i8fY2^44(N&oR#U)yN^=NHiKY$HFA{$tq%G^^wAV5w^;&7{p&q$AwLHL@vE zwsd8&Be>@i9&4)>k!U<_Bzn^YD7RpGyU^yh)t>|N638RETz0pn_KT0V!IJT)H z0Ntk|5RVd#g6K!|k(kx;K3OH`dY~XoD7*Ryz5V?YzNPAPM4zS{?PQGe)64JeXS!Q) zS0`~3xw%)UW$&N$Jh0jXq(-*r*L>FL`&&$yCAWa6)w%Jv(m8M47_M9K+s@MPC+0~@ zPy)NoQ3bCg(y7gG(lB!Oq22QQ#ZRO}E@o`1$rx z2-dpzbg#&}OC88wOsl6W*+v4_w2YTO-4+srOTCrCHON86+3Gjt>8G1T+2`WxA2Zf_ z1N2}KJNWMrK6o@*O6u@}pWv?v0y5kpfVO6NR(<5`=vwh^VZ}Jzx)*${X;471 zb8xJ709%3i%ZtB?y>OL#1(vwF#a$|Gu9*(wjPsI1@JFsWRsL1Ty!)8dE9d<@y{8;6 zUh*HVw~r5{Dg7UPX8FL*$b0Oq{@=aTC&s=_ zOs86+p8X8y0aF2T5Gp{zWv38RZ*BnO07QL`--4r|fO5N6z_Uc4bq;e)9MS!NqPvYd z$w;(>B@j??6$-xjQ3zTqI2O#!L!pN9>jih}{=m@0h{bHOr z6z-91aCiV-Rt13OY?PHlsoa<%s(VvX=QUE9^Owiev z82CK^!|CMd%(x^B68e4a9DBkO0aWm+0r)z`LR9fAxv4V`%H2yFNzET!&&J4Uc}v7) zRcRW~WTC>q>fiH3xKI3fru!tX(K*If?FW^Wca}YNG(Mpd^!ZmT+iKq-Aa6mL3D_L3 zFTB&<*Dh@l5c6Pu{_tz9Bn7lkfyW@?S_zzh$By)L*?&4ZiU%kqh?)~9G{q<)k1$|F z7Keu!|G9kI5WrWGMpFWpoEUdZoduJ5umtyC=MRP&z-Ua*r>7{ZzCY0izJ;w~yt@A~ zd2X76@!8(%BB`K-%iV>&-2Qw2CKkY`rYw~2UxGv_&m15lEXW}1nn*5y4CCj;*S1VR z6-bfrXhvN>PCmk5i2{1b-0{?_S}yvDe+e0CJl_jPKi#IS;>=!6FTwpurJ5F zle|(?CL6%8%9AA+=A|(QmBX7r*?xbr<&&FDk<)Ig8-I!g-_RM9bz&|`=1#`XQ+n3S zF%^1S+a&=~Yv!krSK=pU^+jJ*+&>4P3&xM>&vKYfkje0(iT3mP+Kbzc+!Y!vD}R7{ z2gwiQ^pK%7tsfmg|5?6RpZ3*K+C(E?6clzlSvTLchj5fO! zy`z1pFZ#)yom)J14YZz2z4`H3xfz(QFOHr&@oKOV{p>8N{UqveDb8QK`Y&$hxrar# zfVkQbl%K~`8a5rL|C!|4=)Ar6EHeigGQIVu?YD^hbj`kBz_j=I1epR)r4LHmQKdQO z>Nmxjd>aL6Q6Q-OWMpLBVu2Q}rbYZPR!w8!JMVGAt|pZw9fIyQh$oShsDL`>aTu{I zwFv^~%_S&M^6CdN$jffOoPpY#9zooDn0%~ri|?t6bF>hlfXpvU4rZi4b(a;OdYq)hIMkUPiZv!9w zWn-&+4*&jTXzkL#6{~UcWs%jqHR-Q4d!Vk;)8ZhpESGOE=g*j_udXPv*H7NSP2RQ- zbM2$>C+LlXhH3Nh{IdA8%MoyY)|s2bN_Y`V-_$A$HmyCMevSq%kun&H7=fc%;(=|F zDtH0_F4TZ}oTu0;zlBMnm*OS_D1c|OpM4+8x11qf7{w=|c#tHUKaARdQ$3|NGhs{s zS4d#MICavEdZ$kgD=9H1ry1VM$ATNw_0?vzD8tsZlNOV4CWzjuM2p*-7LG>AQ%GUg zjqs!ul&BIpV%C4*;vIeTTe^n~Jhh}1hMv?@O7QYc(RWy!8|6^Az+X*b2-0Wd@c@al zGfe&`#-qtMpi6M(UtiLZNPZ(mW))NVT6((bhXH{?c8X168=GrVob7);>Irey#vPRY?E{=1f*FhEA= zj?2S;v9>)Upv5X0rT>f~2Mu6uhMLjrrl-$)%H7tVd1eQ*F0478UV7N?I2`nP*i zCP}z$YKl57!Z=C|Hcw9B0g`hrL!A5{_Ek*J#t&2KTdY{zT7moIiPU!VTKeeGH%EV0 z?;ds&UJv7apjZ0K6z|?0Aa&5b`iT3!(1gNJF2zmNf`DbPfBslqTuqe}x=V`21`(6e z3V2k7p@*L4r~Bb?%@tZh3+(|{rShsAR{>`lS9TWeF$QC&4XUkbkzX{pirVTQ_zm&1`SRvEA?8*O-^zSLpsU zjNQ@8n}`fd`I4Ek0Zn0u*F+%DQCp#ENGI1>Z0fB z5XP)`D5nYUe&T?ESO_|pXs<^OAPEvNL=M759r@hti&t9)PQvizDUR9}$Du z&;rFckLi0Rk9P?fKMxmEP4rLg3abE3(dBo;bje5YbY0VjG%4?ytn~!(P@W3AjJ$IO zY|nD~KBhdWNF|jlirieuf1ZGnz~S^ITRkhJC%^k$UuW3ZP*;N=O_YRto{-&Or>or{ zgT#?}X&KaYdr3T6$iBNY0ldB3U#>L1A!FRZL^&qY-if3aZ2wIoN6hitEoMkd?!AHl?QIOu~JYE#9 znx0a(Gi{SxR^Kn+5+>)H|HI559Px}C26#~niQF*r=)ZtgL7ae&fW+LYs~4MsK(9L? zR$R%KA+ic7qolX_oVjhE(`~)~o%|`aT!?(afgTB>mleGH9$HjC`lteBNp0r#D#!-$ zDRGo@5Uj?1Q&pTE_x+(3@siFN`n&0c&N9Kn%O4A8hRlzd^;}2^_ha-g&AB??ac*eY zY|<#|t*wokw#1w18NG5eXI@T|e>-1TAni6cOQvXvO#;E z9A5o!y!fH?d?&(u=+X3q8r8UhaRLjW6J6@VEMJbb4K2s{g%J71EK9fe#Ru+GL4R7; z+}Ab$#Lq1SmKr3M?IM1P5+TV9-vlqOh+naY zrYYz@Fk}5zXB%SEd_}2#?yF@O(mQoDY8@k={51RP6IGRkk81O^ZeQld0K}+hV~c<7 z*ggQhD!kL0<=S;^0uHKxzZU2EQ4@HSHRp$3qiQ#7V!gAZxz>d>VQ>AEf2lVhVpGHR z?ht&M{x1l}R0rMtR~__s2~B7qZUYM|^e>tP5?{=DSPqHBR@aZO$wqHX;aKtgTMwur z5`d|V*K%Axs$oA^%*a#y&oyQuFq3jd3eeerftsbt(l?FBlLoFMZ8cz~zokq6$P&c2 z4Se&C?>Z>XQwC;Yu`@`y2I4FLiE5fG{{iB_On*z9{<-N%fW!*?!B^KDVgY8#EVR?V ze#`(!{QtOFYr$V1bv})!^V^IAIGa7w2}m-z9d3?cN=B>@*HNi4r3WU_24u}@fpE4K z_^bLX@UL1XfV_E1vNQSwQR{zk)^?OKB)fnK!Usn*ynEKRSjmmcu7H0N0-3Bv0Rj0v z`|u7-X8I8F4p1WSc+#(UYoKY|?pubSd|SWvck9E_L5&<2AE0+v_h2_gayz~I%jX&4 zomanMvAhP~kArU$Q(ov7$wEHEGQTJQLgCdILxY!PDfj_KdXMZlc)A+i|`{1mhzkQ1%{pwt+w)e{cR{ za@v~)CbfE5GCVS>2T-sEB>^*Tnl6X`stsZ>14Kr^ww4+yA9pK028&=1aK1083tZ?b z3few4;8{8sIk>H5-HHLa#i}`KgKBHtfnt-PCv2<1$`H$^_q&ao+{##EQKM1SbzEOl zsbq}mzVf`zOQETY zNWRYVI?}_G*D-xNvg&*<$>qlJkY@;2z;V<1##N+wzH=ikT1X}xRJynv#CmaN2=%Zh zG%`o()EXDH{(a?reO!74)q(HYAj~xI1MH??KNXW*dp{Cl<~_hanB!OK?)lc>!m|Dv z*jW10@~C?oa~%=ELZhHEx3eJOQTyJaLS*ohzn%s^aSoD?`YT92FT=#U%{negK30d{ z_w!Aaath>g$0O@bYx#h=a0A~5cJpIIU)XKgAq~kL_4trIZ;8rkQoC`p6L?X ziJEc~oCr!%uL&8VNml;oWhPfpB3fCV(JVk?0%3k5>%L0P$_Ne1&#)#T@p!`m9c}XS zn_YSP)tQ4iXsbWosSC+5=os0^LS{p?b%(aDHN}amiPy8qQO&1~=z9pUs~hz}7~-k@ zWi&^bK{nZP_t!qxPeTh!Aj5H&!aVm@wr%vVirXdt6M6)cZe38r zGV}?7huc9BLN%oX!pVM(#6y6K{`{8N1KqE3x=-|OZ@meNU`2pPazuBRIYhu^4jy?o z{dos*&c`{mlsFFcHgRM@eRI-~qv|D^H@>p(xBn6!qmQs=K#iA08*Nrn_j)KTQ&lsX z248O1HxrTb%is$?X^0q@xJHAUamc3n*lA6&)NHfhKF zCmZ#>aKF1tLTc6yVy1Y)Vfng?1FsEQZA5%xnlnq^_?v$lP`uY_8SWc(o&TN+$|C8Y@A52|X*d#aur9xPExDE>^0=8;F}h&&xc87(sdRUnpw zq&`o(sBr!Ugd+D-cqP&IoZcG#bD*TgZ#dk(HmfHiYtD1fI8Lup5;bG-Q1uA5UWSy{MJP6M-t>+B_EoRBj+jJ{AS)StKlkxMH_DojAUicZjwa^` zZHhoMm5ytB?7S?)es;7_X0JV?xaT(7Q}3wPye&?kuJ%SP?)Q6y3bYNLpG_zE$PVUR zlXm)c*h-VLvM8)cQlH8MZ??mbf`(1LlAF4JH59qzIIzGE;9~8=4D`))Ud=BMObd=; z{s;|4C!eChAw^Fn=SPnr3imANMSXyvU)gQZy4ox~o8|b6hm9t-rZ>i7{V!Jih1VMV zA#mfXTTUBQPB}~n-rj~th1_E2c9M=qYwu?6lG|Ns-+LDNv%^`lv@k-4-yps;nW}ea zM3O@8L$<*cNIK=%m+#tAD}vDwmNBW4{^VN1XdyagIN_Gdn-64OhIpUyU(oSP;y1Rfj=#R$HZBJ=PI@CGmE9sHAvi!^uiWHj z$F~VR(k_zA5toobH6B!7>^PzcWnS`b-D^D;UTzh~LbhWn*nOb2(9_0=XMR(hw}vQ! zjUJ~f$_;fb(_K$@Z`-9kbeGzb7?rj)TB1R|&_)s1cJ3CPMV4yr>qkLctfJDI?#s`T zdypZ*v+_rklJ@bf6qAAk^(<*l0T+FcuT}8zZZod3rU}#GxdtEFI(^j`&|ZZO>&AxO zFyFrTT_N%*))l$EIKrdt`i9k~;?llbS?+JL)zX{JBRw}>b5jJ~ zdh9vi_bJ#QER<^@Hl*jTogL~yw_h_lP4!6i81Th=edD;}ES95v5r80`b-sl;^<+1n z>&}+ZC5m7`TsBzRXN5=emTEFaP^K%x+jy3`%eI}n<}T&P9P7Tx`BqRoQf^?BUe&cH z58t;IV6-$+=zUTNnY9n-HO*DN+dIz3w4$47BSzv3(Hk6ci#)fm=q2w*X(Vb7>}N{a zjDoxpxYWobnuI1H4umSJ?DuUtnV*elCYI?-#?ij>^{if}EgU}=?KTu$^GI+b_ zy4-Ne(rg{?(VX0p(~Z*6`g!v#?kLmmtiYTx&ZyL8rAcOg6~470k#{*~HCil0rwgHB zTE3nXQ{&B=m^{fx5mIj}xb1z^t7tJ$c8F-FD0Z&)w)OQ;mHSHB&X8#{?C`RUx*vz4 zuQ%;3TYpdY^XyxAI+J}Ux?JfI}cMAUpBYWnWT>&y=>ZcUu-b8%K#RSe=WyGm?E47!yJM9ROLKNx-^2skwy-HO%}g)W8g0Tuz`M z@YJFLp$4*zR@YjxB#a5=FG|^uQ=LUSEOQ#Npos-04RGj#L&t}W?-GKs!V2!^o&7kI0@#8~ z(hAu43Q|TI4^Cc`#C7)LWz2_47(0Ny69T+gOkj}(c7_hX;V$lj%Ksyey)Xc9rmDYw zepdnUNv?pJUW?B~_b>$!D*(8#0y2Q|bwt-)SG6QOHcUXS!ae;J5vw;u-1pF9tlV5t zvk$}&BPu8uBq~enyCj_#Z;yc(#8Yk})@fix4o1<7J>A1p*zDIzySHw8X`In>=I{pmH%OSou0Z%8rhpsechAED33kdB9 z=ym1)wY#nkqC2-h_UM*}JqAx7NpAC@2|y)U+q$=Bl+Bn0|Gu2ybAVoHm42W@ z-R%RcC=pvp@-DTR4Codxi4t6`IeAkCt<6iO>8HD@*&6?nSTls({&7esMzhXI=updr z&zhQoB=l{;7epbDoWRR%%=5Ca{SVZ#5=Ic`R-p;V!@)%YKy%>pFwWx0+##X_kAVy1 zn8vg%o(R=@B-H8FLq%=^>h@nuk5ETE=`-gZ3Kh65^A;80?e8)rHr0a=ze zDZgS+ov6dq#3oBb^Q&qK?-{&~l^FQ1P8REVRH)S9>?s2wCt*&g{}0Q(+$)6 zqRqBiT^JyYm}lqpskro$!zF@Y8D>n#WzlDEgJ6=mo1d2pKq;E7&-EJ#wJ9C|o8G1XHqQ^#x47%6~Pi6iCIC{oRe)7VYpfQZGC7cpR2YI4qIkBtW)_)y9e2f}qx_x>}iOcDh$ z$xe$Mi@~_62!s$+MdcK}i%UGr4*b)ot{|*TS_JYm)!arIdpHJ!-8;<`jWf~M4+OKL zGcTX=*`Y=%u;xU$f5TT^V6<2Ct1G{qaiD-k6fAy&#+<>Odde%=u+F$07~)vL?K1@L zd21llYs8b1*s#vHDsaZ>6N#}Q?;cx zo-@584`#+_eyaK`GcI@yjJPnX`V29n`v4rt|L_^d%(wKp{|6VoaU|G#*|gJ(E-cO{ zAv^Rpeyrg&F3|AW&FlBPu7Z#~p7)yj#0w)(R5@aLH+O$wgsJT1m)~nv2hesnBY&Ao zoV`^u3`|g`@<;{ttqj4EzJDU5i&bxGVC0CwcedCmpaqBgCZwmYjLF;r?V9Gor;lCm zV6ZMEkU;F6P#~7U$YI!x{jtmCOxz#mkS6~}9-GI#hd~8_SpLfbOjx2}gM^Ccs^t@*+Ze2!*2lj0De7=%3srj1!L4!Zt675~wkP*s9|OW!_Owp_Ir>Dm1JA_nubF0gg{ zR*(&HF_MJjdI&){E0Tp22{Ic`uR{DOw`MlAKdubrlgO#+<)SVULLVGgb+)o%jh(*F&$9kFr!kGXr7|wc|11fQVinZJgNEQc{M{n$*qhDtOyC$&0Ouc! z4RwJO8VI`73mTZZ2(~IgBO+c^PoEr33#j>-bozwASVG`)o~-VZt~zz!tHvc>WlE(+ zQAX0$Y0*t94Xr`E1!pYq(&&}HK1(uBY)8&E9VVYVYutU@R2?6rdYga3hndg96kVuu z1|D6+%nCq>)6uLSx*@hRJZ}thqf|~`zP|cEgz8A^=W0Z&_gNl33o{<)vX~BfQyE3UFrswc zfXn3Jm53f8c*BJ5!pfHOpyGa-V)`DNfT^l-C5OQJ6#-9W-_b&;b8;Cq{7l=zISih< zWry(O4wOZXW@7YK_NRi~$#q}%G@2tSA6aGt=c)nqgp6H>d>r*t(QRUVE{bj@>zW`+ z3ID~T0gH~shAuT4*NjRtV!Y8W~0VztMmD?piC$PdOIt^yS>f_lJ>6feiCXyNwOyS|@-zX{OO&1HA=6-xG_rh&!;9 zTUbHL3GS2wa8X1AD;^C%!^{3`v4ZXejI`OG2=$3(`x+}7&1?t%$2jX8vqZX zecfqyhI_3Hzzo5?am{dLE0O2*lWDgrQ=2?ZgKif;tAimme>x9z)?q?E{0i{Bju%qC z3tl{v4g1VM;d$Js>(M@YW(lwo`n)KLK2R?ylKVV)=y>({k{{P_Q_w|Fw zc=QV<_{B(SA13qQV=GcK(%tpx3=EjP-LTQS7NP>Q=em~ajXAmgZQz=9ug28$&jQd( zhc0Ux+O(ONqJh1Xw4ADHm7p`i>(>t!pY=r5}7lv+m#L>!7tX?LUi}CL?=Mabxm>rIB(cg$rntn+2 zWD|JIB#n+C4g$pM<+k-^wpt6XD7s~(Sck*?=bt}*-)_n#$C;C{dF4AYik?*kk(=%W zp*k9?Euj>^Eafm;{}9en^&WuX7nBQ{lIm%Aih2OcMy+B($?e?w_3)Z4mK1^Pw7d31 zu4dlO0vt3y;S3QX=MfzpgX6&CemeA0>L7<|9zk>t!UZv(Uyff^iLSqa&Z2n5A+`Up z`xbu;E3!*aR@`*BcIRb`7@Bw%YDkHMpa@=KN{Cq^#m1yPh*khXv222b9WHQv@C=y5 z?VnEYT>c?Q>;zZ02<>X;n2}zo_Hu4hGRR$`dB7VImp@JwsOHK0ErE95j7pw~MtM%} zf^xC@V3g(CS;&>==9R5P3McbEhXJ(pnKg+j&{W0)j$Lj#9*gyNrFp|KjFE))cb8;+ z_Ic*|lf4~$*7S!kEcWQVPN~K)S+O4pL5^bVV;xHrTNMTB7`57e6*HnPU0%JLZF?1h z!R_KCS`+M%6ee-$-sJ@yLR(I}p{5#gALs*?0ypJbQ&||z(^P&8%+IFBsAW#*c!qmI z`!wSOO*|!fx8DQyrTUZXPjFr-fu#kB=}LL;`Tg*Wy>KG#XiZ=I#Sw_Dk&=K<1pcYr z1$icIjCZ@%Bdvig(o9^n$$J5PZ)9_#20C(cbV0Zw-R|6e-s{37li`aPX^LXj=*Ef3 znX9u1A-pTruyQI>0jZR9<_>`e4Ra9|;%A}(1hPzc%5QiB4iLe3lzg&hN<&AX5t-!z5)V1`qJA>RnKn+Ju74m9&G_t$H$f+`02YE z@1TmIM{dY?Vs$!pLCtf0zod?vs9%@8PUK6&wSS~okbL?cyfk2O&339^pO$#|Ct-q~ zxvW}em{Xbvu(ss~<6llgh0-b*g+EADgjM+RK;i2i**!W__*cMO@vRVjK2!J|EI{Fx z99Nwo!1~XF{gvbr^zcminqd^aw%_}KH9@4Supnp~b!*>~MHD$$u5b(9C zojZkUj=g(4h#_UnnF*$tNEDSacSkVHVrZZ%y>{dVch-#WCy5GEHEnXKD~&Ymy>eNR zYr2Pj_h((FB=_t4iG%yQFT-*>;?SBuE5csTK4RSd(NVjJ8O)FX-uA|M)>e2wX3S?z1qE2p>yfyZi^Zu3_rN8L|k5w#e8 z>$;?IN}Exopda+%~cVY=wl^7oV{XLc*nx$9~T1~WJ&Nh09misfQU-45#oa{ zSC2{Vrv#PVACtYq4{AJ#&=@o?_+|y|^&O@SAQq(DA=mH&Jsx~CSu|}ojoftG<)gbr zdev!Fsg~x zBnu-?y6^~MK23*cA7kD?jO*tn-@DTiyg);~YMG~exk50jb5K8sL$1WD;UOyYw1xPP#hOT>CS$rNT6%fm2 zYkkZ|!YC3T)GmmMGvjp@-<{gtU(i_U^jp|{s=w-KDau%BvG!fPJ5ZA8S#v+Z-lr6i z;LRK(Qq#mB#Y>x*5b(3aD0cr@p-|Ik_=1yA$$aMr7EMcF#?WtC?C)nZx##bhU$08c zsmDZ1G*WSpp-WkpA@c?sZbDtPV%>bp8%p7wLlc!~_}*UGp*@1+QsB$!ir(%uw`fL> ze2=DyFVS1dO-G^j>q)PzV0~|>>b=p5`89m1d!tv{ZWN@?-z_2r^H}^8^+g~=B+_g` z0h!ZbGRNh-mU0!LQdb0>19qEnu0ZEqQ3WpyJI00TY%Y;5Pz7|W6PCQ~g-KuleY5#3 z0hRJas#YyuR;Nb6lz0f?e{mr=~9{uwre~cK{^#gFwXg-UDYw z|4n%hZG(H{Om3^}o(HhLhKa*^0|G_~`hX9#ol4;2TAPzI*`53ny$AO>o)Ei1O0xow zRxq)~n|M7|lXx4BjHB%fqKagj5=6#r7@bF^z%9aSh&uZhk)k7)2R$&c)Hh1_POM{E z?{|DtSv(!Yqv&T0BqRCCC`lbWYN)<^*A+9YJ`BO?+p*gwB?L7+WIAG4y?7O9&3xtQ zx>K2LVhWCEtJdslPG#+FOc3q`8juNNge^@HI5K@db#|-f|7jPVI~CF7 zQGfS?UJTGI4f8Y!5#ngSzGXxHLTIO4u=wv1Gn`K~-Y9zGj7^%WImf_#Y^ORs# z1kHOELnmP0SZn%miL?@A;P7HDh+xeVB(P=R1O+s|(`c9oc1-Lh)*2APq|tDTYWC9vcN*0M-H0)zl zFMibN?dj6tXk5Hjri`h^PJMXrwHxE3+;8yO`MPG49RR9#@_-f*)g1oX?J!-Als`rQMtXErb%Wio*})>Hp*Efr zQ%$7?8O@37tJp#_6--7`>L~xj#ZqFM;$(xuN?vxuaHe#?f0pQ4#Bt8H!^Q zOBbKx?F=cAy6k04A}wWL7ci+Wd|H-Jm8b2+arpYi^hG^k(PI!W#e0=uIj1)#p{mw4 zd}KHM)2{77>aBD>{i@rqrcAkg4pB@}x2p;l_7@$F7Yjd2T6bW|Qf^o$1>bH0zWe3J zqi5+&=`|wW1YD!HmZ}nxXxKN!jw4GJBAv{MEcZ1e2qP~-ehb8Y25HP=!6>tgUk|;BLUj?DS zc&;Gano<$6t4;A1knX!87Z!U`hXCSi2q4OaJ{RkdG^993QjJ|z)dLz*Q_`6!Ykjs3 z%95Q*2)}&*KW;^fV%=DN9R5Snai3*j&t&wY!Xj?WfNSP8D~cdK9Gs{i*)iU zFSFgRN^yy7rie=syaA>;WN+*u1oO9;LZ<;CO%|%u@crKxG4dEfSp z-Y8^RkCFxtPQ>OCZJr8Lf}pkCVDT+l_%-yxMTOVghV-pw+shWj8TnN7z90U<`bX$%cw@c2^7Li6*^kCwKjz%NRr{`0_Xag?m(FJ#uVm!qOC7CjPkjou%^>jbuais% z-IZc6NdUejI%kkgz42*~%&KY6e!pvW)B9U(pO5{ff1Q)}-4Rz($K}DYUMGq4xR!?K z4d}b+x2yKH+g^obhqk5qM;U;la`5E9_@sC(J7Xj{LnpD55V7ZHa>GV#tjaIl-*<^b zK6dMEk2TZx8M!!Hqy2t5ri37e!_ISk@O|U2!t{f~-vbLGP?%Z`G)PZ#Q*3#wftLq- z?rT>A|DtQb=@eI_Aqb*4*eEm!Qs;jbEua(T$79kmt0ETG9VI$Tw1QHBD1~kk$r-cN ztn-QD@G2ZeoU;KPEb88vB(E{^cx@1OS7^{GaIa=F@CHCX%8hDrtiY^ruaIXM(Wtw` zxXIJC-@QR1vf1)I@Y=jX~8M_MH&L_JJrAN%OhhBHES@ExExJ7{0W-Wbzl zyxZO)+z4Y+@2(Y21#0WJ<@a^2FpPJ-0T&c8nVK7p|H92a{6J#=f|QQ z*$kz!)K}iu^Ot%CdjrK2GemxOtbL9`h0THDNa^tgfip#NWfqgC65gZMf|zoJ>#W9T zCYw#!ecDvG;RYLH79UY$wq)*7BFSd%E3)Ap-4@EB{1$O_sZ~!~vtNnSb%NU2-z)JX zix+VYAsmh~X_Z~Jm{_sf$+aTMkaMP&V>cC&eV3#NKaNwsmslrL}63)mcx-Nn36`kbG?-)3{Ra2!JPdyQhF-@y)z-E+uSTKK( zm4(ME-Ys5s)sq68>McqdQJQi=nly6*Ydj;0r9h>rBv{PhY}^~>)(|)OhB$VbQP5~+ zIS{daWwV2Z2^8uM#%CPPUsQfiXG}`N8*48?8Ok`oANl$5)HY)PiRln%&trRG5!=z1SMuQYpIU zX!6$(+-^V3ssv4I5lSVHKl?%g6l%za5=!m0tk6&IDa0jsd+HNKQfTdn7&vCCbJG9J z&5x#Zkuyt+O%fb5`*e`VErq`g`Gw}P*%G?wC2x%2U2DEJtDn4@ z%cp5H-xDJws;vK+u{FD-U#gF3X`dr=bRyVDD2C-_=O_kQ&~SrZ&z#*?=?B^3EQB$r zgO$w~&dM9jOFFw>K9hKbMwnEew}y(wCP%H**^niDYqPZW;$g3Q<69z2f4*e7n?W6| zOcq*+sU!3&*?jj9ae4YslE-?oOs(77z6bN})B*an-|R;q>lO1ybf9O?&BhtZ5$HA| zw)%=x3_7yRJ5nCjxy4?}ZD(ZZ%eauxRy1~}9l z{ZjXa;`re5i}XD*opSk<>i03wwHKutaT54N>1|&=iWS>*k)TVea!T9<`pVP3tUV@v zQ*I#=M{hIw3E)jOYu#+9gyI-4aV@V5@!%tDwX^AyQne*lYb+4h`<{95?~j) za;9K;75W9io!X@N)J|P0HjPlw4WM!L$JF#-QT!P84rfu-v#=Rpy*6AvrMW(freYlt zkSk~*TE?e9b`PID=Fc^NZrdfms%Bfu@az#FxQV@pfHjjE4iUJE^(wG04w7V~)cwR~ z)Y|{t{_G-1yTJmc&Bzc2Ft(u!mX_yfNsAA_|378F|b(YfC%4YtR}^I%PR9UjF?qwuv~-@ljO zx%eri5_Hw@@9s1me`}>4=KunBE5H*m?DqKD#+dY@b}1cQ9^=D1={9c1GiX@jco{=e z^HTD$n>-X3K3$HmL5}|2zw6rG+@bO z15m`f(iKh-ccZ{{fV!+(1O66<1;a(v z55sn?DlQ}DdxT`H!w`5n8Dx^|B)&41Ql!T;D@f3=eL4-8C6wuPw7%f}D&`P&|W$ktWx zLF8tZpj)nKDT1{0jE$mBZ7(9b z?$r_^geziH`= zo8;gMNOG3>%O00n(lVJ#cEonI zD2PnOR?Ts_)VRrrEs&Z%(=m-Gh>3n=i-0?gTQn8J9{|0XChiis)z!-jts{25Bjs8i z_nN1nD>tV9by2<1e%)sVdhwdYNTrw`w&y*$Z^FHc@C`jK_0~Mm8BHnex_c2Bl1R^c zPd#1P_)A$)q)c1CxZktBp)$vZz{11}rCelQQC57PqB8-J?Pr)JK5xfGvK`wf_d)L1 z9>u+dd>;8|)dGG;r<%ic-ykeidp@6G;wI!KglV%dsG_$$@fSDx3jD_BxHXNA%H$gj z$g4Yw3=Lh@!qL79mJeX*C0E*rd}O9~oi?%!o(ugTpgO>nq%)61tkU1p#hZz0k9Y2} zB4=LMsU(7KksYlV+xMF?L8=2W1!JkgkS1sft!t3yC3Bh|^JM-qpUU9yISkhT05=k! zq9#r9h)lvp*~(KQqnHWkI^$J6COMq1N{CLmMvCsR*xDDAJ&f)YP`+E1ZR@HtD}lbXgIZ9{EW$YaY`40YO@wSHS`CNiwvI*KeTb$WWRAkf*lNWzg( zb?x%(hO4fkQp2?9kbwcVX|+04qkf3P}GV z<$yYb8}Qvb_nN(#;CyEyM|CZg<=w6jC~ME7^QletnXOco<^ik*?=*iTxkG2~rN(_V z3CYo^HcI!^77s>~q;jQQ>ji~M3U!+4PZupZLo%XZV)9!;aryL%aKDE)Zu{z%BXS#H zUXg*OqQ$lqaoj)T+aXi^bf%>pQQ7sEJXmeh&3P~4syNv87TZ&n%UJT>VKlH-kG#0( ze35=Io`9g9Gg$(WP>^rnzTE#^_Zlrk1g0>MXYa`h&1VHO#<7sa>dfO3T@NjGG3g=7 z&*N54ZyFarKphHm9Qahc(x(dyxl2p#*tGrtQaNG2QYg@5gS_T5U>9|T&(?VsN$;aY z-Zw<*)C|@ee9-?*_U}vsljrB#b~;f$CDq5 z-!CBtb)% zu9rC~a9j00)FdstuGJDE`IQK%P;75BxQ)2Ag_17kYMCuQFDkzb`#~m{woK?k>`$RS z9p}yZ;o2w5o_%=-{7E_z$5#~A?nLwk>4$=htF{-oB4{T<5FwyzO#+oynw`K~cD=?xG@_ zQV#{Z1|kLl7U|^eS6IS!MeS_xY~DAIt%Q8@zzY@&v#!17co*D%tj&jCt;~ob>Aia3 z`d+WO^M{E@(M;~nSV@PXcemHfg@l(^4sbdC^5^O`P9&|M>h8|`#@S?Gzkk9#a3x)7 z!Dd@KQU?oL*g-Qt_N{;-+k;@14&Ci_~@6K4o%E zTL&FE)TDKzTfX~I06Vt|oQ1PiC|7Vrp{`fl)eaU+<+P^0owN!Z$~M_{5Li>b%$63t zxNyM2A04>N_sx-N!wHq>G)Lr4^yO5x?lg# zc-jV3q2#020F@h=3_Ki<{rh!i;DX3zW;LB|Egc46xTz6eovrWVx|Djwznrq4n2|*L zl=bq>vnQ!(iML7h)Jziczjv!_VUd;0{`SD}dLNe(zmf5)HbVsFVQ_+u^mi4*Kc1;c zBk?kRpZm!ewlLt22l(OiB^Ec)$f$(I4XI;ppFM#aUp%%MkHbE%XvX+zp%(6Ep1I_| zT}gigFjN?6N0SnM%~JSYovd7ySGA-Fl6x(E&njd|M@?&KLWGxg!j7t<4(-}+Q7X6T<--5|8;O+Lj{7_{8NYb zd(@}A@@jub=l}d)3Dotu1a!)L7XD9PIx`WR04>QEKMx@UN*AH=?)HB!CScKjIr9%u{-2eJZH9rh`BzooKif%~2dpOO6R-7qt1%?OpCFo`TtkoZnkh_g_15`C89wYgF3B%(QQo0L;&j~rs!|Y$e|ddd6%6wW?GCsB56WP z>$8td6!cB{iznNM>>-!m6b@MqCZ*VoRdU%)H#YPKdjxI8K*gVhjf28Ls-&hYXGTTf ziLU>syrRBV`|oufQ!pugX(GBSwJjnQe>^q)>#>}3r+5pO$xzuaD~<^C6=)B_>s-1z zk0hP|_}ba8@?L2T09RY;cpp*k6z<#F(%`akhw*cfGjj0DCtL;bw5ct!bGlmZeqrFM z9zjE(A>!Xgt}ji}#)QROS_&J1@76+W2z*c-*cVKL%!4>0^z0=`=OviE_RYp@%2TYt z?`_?G=iFWAfKdgx( zS!fC?@MM31_2GFQ7Y#%5+=dC$0(7x)Ww{3LV^(s;T%NVmE8G7?Cf(%_yv9*u-;Izp zi;oU$xhn`|NEc>-L1B2aHBQm-(qk1f^RfJcPCa=qS@%6IQ!y!~F{7FJqRHuf(aa^O z8d56*!DyltgsrIUR}F(sdYiDe%}fKMVd`iiM16m}V>%L))s%2bby*L^Rc&ig9Z{um z9T4Yx^yjy|!30JIj9`g>G3I8G*9d7B4JK|QKyH1N#khT7I+72T<3P01dO1pP&W1_d zY`c20C+i%Xjw5U{Osbn-zDYX8fIwZvZWlc&|BNGsGyj@P*C;QU+z(?Ys(U z(3kF?&xK9U;@oTZfZ4b1>r>I@f!qE$=gH6Y5~^{ZC_Zg#CaCYa+*0@z(1{5z@j~fD zf5Oz}6}wVB(wJ9Cvb+c9O{tQZxR{o3I&lceLbF%7=xiJ*5%aN*?FE5#{V#=ymbk3E zZQm&wc>Mw`X`eN21Pi#xt~Ubm-=t8fI%mo7cwxWNKp6)6WhJjP<8v}JI zR)~-@-c8WTgqUY8#R`GPT}DezWOL!)qNXyXc{t5PbR0%z(UR57_370J@)99u8uM1H zSXya)q@%~z4aI%fb-4Ja?wB6$^y)6+F4W_fLyTK@twvieGu5HypYgMegbL0fh(Oaz zz1+^)AB)zKo8jt_W=~OVBfe`DYsvKr`3d@OjZk!nNDVi2 zWkP(W{##H*c#L3ISkDorbXk~-ezx#)d^$RJE=??*b58nv4BjUNspz%Pmy;GLqz!!MEw^|dcCgIXRM%r$?y=y|-_SUv=ArpsCOxZ*8^iXG)`G{AJ=tU*xX0w#KU;ME%ozd1|K>}UKWzf)~#|mPnELF`Z(M(kFEqriThvD z;W119SOGxgSyTHLinmBN4-}7SEj`)d2&GDQ=tmH}|LE?w>s}UC#4T`swq!P8@qDj# z(;JyN=cA9IYLn>f;|lV7rk+Q3u+G{SK{HxvDv+jnf{RFq&xfdhw4pZ(WcN>C&lkkv z2|jmXvB78G7@+MelxT^!EpS!NRK*WLq>tzNm3G6@R_nd{{{0;SZl{<3;!*men*sBv znq%>M#f5*AA67vE<|T=iKa@^?)TZ?{j5>9X>B80v{c|B8fpQ7>L%8%u2=qV17#}|v z6aSm}#SZlBzqp_NnaIq~7`tFnlAGhtUCyKmV2dLD7Y)^)hZkd1cHLvG|7NiP;wtKY zkwpF0DZs!1X8s@Q)PMeusqO%}{10E%9~%RZty2GCV*T^~^VZM6ES@&j`{xzTrWkOy za{M87{g+_GE=bjz5)SWQ)5=VsxA#!laj(GD(#fimkuT_tP^tP&C*bYg&ex52<@0=lm{|ssf6LOUN`32j};m-+bjQI@O)&IA-{U3b= zb>Tk!3jNAKa%nzKMgzR+fN^UkG|$2C*K2d0>41Olbui0U<-xRbJ>%nB3S>GqsoQ)) zzv7^M(pPS?mo(CJt-)hAVu(6@BdGkdY0H0^-&1~n>Ir=L?myFY_5=SEj6q60nfvTN zE@11&$9!6x_W|hoU+WA0{De7?vdo_N-%M+Eu!>?b1H%7tfszU4MBh!ZLM%w`&u4lS z*qt@RPm{0(2!BQ=f-y+h>y)qj#|$WC5@Wt!jmrPxnPE2hbIb{m0ab1+#P83&RDmn- zuigg#jIQ}MIFTN1e@FS>`VMn1>j1FtsW0$h>6-t%za==aaoZ*S78WV^&%LCnF+w8K zn(jZRQ4n*&Dmzu{&VSwVf0m?x&2fKg!DX&JysHLegS0{vX8@b=5GXvJbLtT4Q^$0U z$5a?AVX`)uYz~HiyjSM|?_ah6hfb8$X!8NIE{J#-!z49Tcns{)6#xK{2PpNiVH(QV4WMxIKmp^Lr#vN9!= zP|SW}dGs4xosCVvheiwn8G+SAZURsw&5_dBGGW?6#gEx1I*Cj>r{LM&s{-(>mA%AB z;;)$I%O*WsrM$o6rOt7#Gl8LCtLr zlbt}**=3TCwNP1IO;3O3??t@6Ky-&t{o}U82Ng*R(@4;OrK#ZhTt799#bxiwI@g7n z#*yV)Oo=;L@I{hd!P_3K(Q=lc92GJ@0QHn+dl8UZ~O^&*5F&i|~zJVA9t; zGG;6AXcG@RgfE=t42ve%$7II)$q3V&UE1_y#OB6%D@6|QBxfT<bP($j8H3q9 zVz;y5TbjFBa-mQ2%a^4eBeb^j-^MkL#Gw=97$(vFcw!>Y%_rJTd9{@zA9W2Am$E#- z*2iK*L31VXrxI$?nu_fJ)+cW-sNty5s;@hSAT+|t&Wobkm&hrWf>6*+Hu6rsm>4HV zA~TdaoJn6k)QB8M?NY$;Jh1W~0%W@C@{_kgZPc=o*WG{cLT8C0P@wZ^BN{|&B>0E? zPA2@+W4tF=uaCsT{oQ-hoYN%Iu8ql!uM&CAt(c2(L^bAR)W-Seq>bq3M@!f&hgS5D z)H}x7P@Bi3UhsyeuG!08R~k`m-Ctl=8{K55Fvsb92krC-mYYl^bu{PIq(If*;*1yS z*5x2Grw@IuF2niz3py1}*Ur6m4rudiPx$fFS`PNrr z&^%75)*5m}?r5r!b%=zEir5Fm0vkm=v97WMPTn0!6kVUL_IH7%qvWBeS4}oVpH|ez zo@9!tUqc)_5k3Ve$H)^DFr8hx`KmA2pg`|K!hB&S3M%y!m^Jzlyf&sr8JZJS8Ba?t zFQS6HpCqe--!6k2YDVckQ~Yic`P zCuqST7Zp!Btbb)$+2HtQ#*NEq`MnG3Y&XT*dH(&T)~x>1z|0Bq%h^+&!>GmO=61N(83D)H_Hnq@>4cU2m-`bKa)QzQ*E#XKim`qPGN3G z_LpuNr6ZHB8~`_ZWD<6VYatopAF~R)AkUv2g zE-MK;9ws(+VUDc#oCNNGl&&W#RQN+gsV2K=tp@w|;)mBwBjvg>Zy2Sa5|Uj%bfXRa zEy;)2`z1Ia%(exi54Km?VObU*+hBcV+m*G>)U&ofJze9A_A26Kr;6TiQd{Ndc#HWI zuSN@x`?d?Au9dhr_h-YAU9c{JzB78n{Gj_nI@s^I*q&Rf5tp3v4^z2cuoJfqYbk>CLRr1IVt+lS!v`|<{H zxAd~(%4`2#D9q*kAmrsp0aN%-lehW*Uh%(St_%iVIOw0)tsL%euNa2%_U(O3B0x#xGZW~}bFE#@auxLRU`O9Y?6rBD)^bQwBV zIn4eqW9$<>Fit(!2JSq*u@x7SO9?PZ*6uU(>;yxC04V2po{j;7?mj3gp^D}Qp=N_P_H$d6pd-vK zFmRVE0D}PB!)yrycp3{}yeCZbwvLwtm}qN*Al9j)MdIGdY7ZLT5FcT>2C=-x{k`By znXmEhuj4m?kd%NZ5~OoFl}JbHYc=VKvr?|~C*!CM1G=E|(GEmv^BBT9z!}8DV35X{ zW|?NSC9L!7Bd|^fL4f>xw3%27U0!{~OfyyT_5ao0cSbe2bz3VokS3rMiA0J5(nUys zP^1_X0TF2e0Zs%&k&bi_=~5z!gc5=RQUvKrk={ZHAVn#H)DWt4DN^o^=e);rJ>U4o zxMSR(mtPr#@Z{NhKYPzL*IIMVk}jFBXqSV-+}s1G`L@p5z#LzhDO#n?H9h7$slzx^ z%2WVqrYyZJ3P9zPrG>!5$>wOD1)$hqVkYF(s`Q^VJg9EY#~&Bj4-gndDlhh3Bt1uB zuhV9ss+5?*b{8fci5O>+(jEB`nEB@0n|$A(LMBV+<|~*;$;6w3L>2*QpMrx61|6Bp z(+n})6;T2thE z2|{W2-iEs4u77@4{uqyVCqjP3@cHT>m-k$(krvW#babl2TZZ0K*9>+H-knNoW0-p7 zlThm#MIYnO{ix}xIgge&{iqFfV%iC54cmr(fkJLj#KOXct9^r6P*Z?h#y=XT=9wfE zOg2tM^KzQjwJBHihw3u8>b>TLa^CO0MKfkwdX**725+SUQ;sjj@QQU8#ZN+Bp`_yi z$=6rt1s#{hLT{id?pnjPD17M})^7`&bl2_@VhqR{_2JdcnGXY0}XYZ9Gl5aY3Cr`4ntxQ0?NP#t98E@Pj2tIl_W`?RnHOf zxV<*2me_@P*i+>To#<$5?krorB3~TM#=O5uSL99Ky85EC>(IBMwCwK0wP}sh+jJJY zr}Y<84*bfiD1}o;Odc+}d`GhBsHES$+Viv-CAp)B;6qorq@&=4deCEcQo~o{Y1jJ( zavpwJB3U-dZ0Fld#g#<5*Y!K3Xi&>e-uxV$(NN}`oJ`&t2g((7@VfU{#E*ea6zeW< z-t$8}TnIYcboH#As@fUiq0iI`9i0`I~$o6NR{d%tN z;*X!m)ZmEZb6TA14yoDKGM~#31}_R)satN8VH=bwHY@(Wr#5?o=q%Eh9Phy_Du!>zLm4_N~4$br@uAjKnJmUvzEBF z*uc=rYiVv<)q$E%r=)}6xPq~9bdPP$32EDr6_JsfxYeenXBjoNaaTK=p3*y1)u#K7 z_q#LxSbMzEpMR5$!GoI9#-$CG(VznrTe~n9*fGt%Y^!R6&UK?Wq7_{~fDGM&z=9lZ z#bXcy3;b{=VzkNEmDmzKRGIjOp&4)jWL(#m32OjL^{QlO15Si|xwG&PN~>PVvrxa` zb|^k3p8U2o1fOHPEOt`Qh6hyhJ!Tk|R|5sHS^2@a$g?Yr_u8LMBnj#CQIShNdjuHORqP88q;5Rg7PxX7yvxy zFTfq)ZW$SYCz?(Xkz++_oAJX{YM{B4;%tzidA1q@TJ+VA}YN*@4GTavBV`&ZAyuOVzw z2lpJ|AaeWnl{rYEq)m6bN~NK@OIt2IFdwPi_F?jAu`g8T&ia36F$B_8KVFy|pkG`# z`s0s-<>iH8faS}igE>f0_wO%ElTF3EG(0KFtaiXs?x8>$t1e|t>+L+x+DW(HPG4&u z*?HM=%Sz<$rw82fT@HWy+k=ck%+%n4xm@=za88s5cV4#m2j{Od@~`0qv@%@%ztM+( zK1clEnj5CJ|1HT&x%;zVK#tM;DVX|eH2B0f!2FS`!XLc+ogJr;<7dZy?a6 z8l`_rtvotR;j*7D{c9wgwZV*j`_O;%_e(1QR-A#E>-QyV{tYaeP9L@}e}hgwaTFCc z&s)@VYtm0U+h7DIEx-Og_XWUy+Oiu2a!(MWH{z|)>c?Ud)b4il-n`d{S&UkIB7Qrt zn4-%v*C$_oKb}OrqH~P!wnS0nL79)U%vHxo(G0(hN;TQpfjyNG-S}Kc>AI0y+Hj}e z!$Dw-CXCda^xV)uB|a3MDW3?z&S`jw?lqdR%DUWlh8_7)5V|+K8DlTBEh5H0Yr3(C zJhiJjIIOrl=rY^qYo|7dNG3DKo%auutEFgF13l*32`BHzzWY%ZM61>h@IN*Pvr1+9 z6v&;hB7Z>&O6eRNNVl8{D-o4R_<~w}w^O!0_5969EI+N?2c z>vpQ#TgHgu40s&!zwh>Txs0Ew#&%RMZXOSj>GYme+|e1_!t5*BEVkjFM^!kQF{yb_ z1ZZBbNN4C)@2w_H&LyvQ23_WSP3&b-m;sNC>IR=?SF41VCbojcy7 z^zA_Ek7cv1N}iqhXLkAJ%da;+=^tXfudjFs*d2WZP zc7_e~k0do_O3NNO)IauB+*$RKgK?^VShVca{Er1jeB$BTs^;TbOO&VuL>u~W zJm@(QsslL$xCCOt!Pjw$Lfq$~DsRKrS?))i`kO)Wr(vPy&oX;Qk)!F(eVvg3kArJv z$L_h`HuAk^QZ;2G5hWNk{BF){)~Nm6+{Ve}vH*1t_@}bb($!vBw~y*>J8$y@anr+M z?hgqqZL1_Q91r(^ulMEp*v!z(6R@^5kSf=U|*zQAq&K-wGj3S>( zCV)K3T33}E-gvDs-Fp54KJfY2=FMu8?T_|v-EV4he8H~Rd3|rg+M$H|N_3<7nd5jU z>!)}fWLYTTW?O;}2$?MV#^?yq>NtHAQ}Bd0#qkF4;(@Czx|6`mCIct~owQ^M?y01N zl;1#olm_jmcRcczsxn*4AYpTc{G!+(*W@a>Gg)j!cN%!dJn^1(1WBP3u0fTgX=TJ? zEo$w=tKl!9vO4EsRPH=cH#&f30WBe3d{%Azs%x?2Y~V16MdV~ph5hM`{_|83;d|5* zKH5Bfph@?(!ePQ@t=6YhyE!-#w zRMUZEgz)_zAQr=#ZhWY5Y)eERZ8L_HuA>Kvu$4=KH6Sgv4UmOZK)UVyT)vBG(>$i7Hw(c$`l#x8q zSm6*eFn7kQE!$ux*?w}2UCWb8dR9jJ4Q6lrgZ@bt7*nZDG_A0QG7;I+#hI%IlW1mP zxCA?YlBPJ4GCJia#;@Mj+m_PU*NxDP*Z78GjzXI<2$_+$WHN>dRkq6W6xFS#CuLt)){E^?h$=!Q%8f|rs?)xh&$y&)(b9*6BghyIMMZ`F5 zaxA3%lST(bTY?Es2i<=$BBJodYii@$uA(8XJ86G2BOGIkonw?wkOj^>^Qeu0B(c76 zKOfA_xSW@4Z80WesJ_+NY)x{e#E)#dJ@{_p;S&KvVfje%zNe!V)Asa7PTw-~Xt|B) z(1v|ZmJqGw<>{TH$7URs%9fdiXn+?5D;0~|V@3%4#+Jla_a~>#-M0aG^SK!0=XiVu zUPZb?ba566E7uM8R5|z)_{$JdY7UUCQVEX}tx#2UC=@-#`%%X_?_#Ds$bdHXpgKG} z!?ntm_UntG@>z!}J#KgOklm>wojX|2Q3#`SVj*u5cIH=6Av^3KL<>*N123o6re&(S zECV@w&%Z=>ig`&={9n$V_B)0TK9&ubV z+iH=PeV)vD$^lajC)-?vV)gkxUZD(M8-ux0Ee-LAH6|mo^04(1U?wzZKPS{VHqb=v zx157^@M&9I4LN}{>d9nJgZvx(!|-7r z6|$YppW4kox1Q(cYjk@q<8i^ydo{K3qEp(?53lD=ea(w%4nGuvhGn|6=`t*)69gVi za^^aUuVxHSo8@|alwL=7<#|KaPC)Kez0BSzSPR_{0Nx#Or7S~tQk{{{Fx0Cqm)P(X zE5}a^l1wM5RQ>2-gP!kJiV`s;K=?aK zvx)*Zp6D8X{P)nQCLiGPkaN}wzeizlG60xM_E}!|SHV&}sQKcG#cloL-~W7RK1C^) zF~Mf#|MSX}xBj{j$!CjhzGSyxU^ukcaVMy2vRG`A_9 z?rZ6=N}Jvkq8U$3T;;=s+d!?#Rj{c|hZ-X5B0#B7rg*!goHGa>0e@Dg z)Lla=AY#ecO_7TI>Dk-QL+lHxX@`yAzLn4AQsk@BHI4^_a4${4s%3mO>!DiqUSGZh zx7cyfy&wFEa{8atz>K#-~iVp&NOpEXNt zq6U_i6oGxP`|@atF7+tx_Minm@o+DJsxqS!;5SbUD*m8*eS4Az#96;A z8h%YQE2V)R;syw60>#Gg2`i|vfh_>H!YE@oe0uE?C~$Aw*ST2_Oc7ax`*MtWvYzf2 zO!^%JK*Tt3_10Zr7?9+y?aA_J1TXJ(ya`B88O3gbFe7ZggpYpSXZ$^#b|0;KV;o-> z2s?TT(1h66ia`5{qVOzX#nH2*=gCqE^spqjWO($@g$S#*IkjlV+7a`H{t4=~m5O7G zX!rfST{Xl6TNKH49e~KB{raY=XshTWR<)uHFv3#k+4926qo*DpG^SAgm=}MrgTTPVf&Q3CcfHAs z<^f+@lt-;JTOB)rUmNJdOZa*h^nl!mfo<@rcermyX?<;wK|~9VW~_kqDT4*E5Eb*| zW$4=5vyUe7L92Sl5dR5ZxK#w2Z^~D}%18`SbOhhQNpwHbqt)QUA7LR~YHO>a&N$M) zInG|MSmKyZDZsyDRLVPduH&Fssd%3=H=gszRy&pZ#SM;B^)uf+;=`T3+7pk4c%O-M ziK!Y2~LXQAtsc_7~1HJ}saY zN@c1UUA@E{LLbl+%sPg{JZ*7zuiDvR>M(*=#xxv#EJ3uo%n_wzTihShxXDKs;vSu> zz<@0B%iR%l*zZke4?JKyp-Ue%e7V_GyDg!H;Of|$k4A(l!O7`p>Us2TX#R?8px5;? z`XtQv`Ur!eTjr*s;fhBL&yTytgDpJ18Ddg2Mw+-WWDyZ5ZXwJAd9i)i0ph;>=nzr& zU7-UuIw%gKRKSiN9YyW!w>ed*RR`OMF0ToX2t$CBx5KloIFxu4CzA)N6U$;Qyt3~d zL2zd#UWvFLD4_vIq9Tm!uGAQVNwKoE9k4CvQY#gVwJ(m_AntFv^ZJa%iQJ%HYid7Y z_sk-%)Z(ZLBx18!bgW+zQf4Tp9x}|Um36+xgppkGg?<<(FWfetts#B#eOUwO5fp(I zNknlJHON^xJGFHd>>}1la-yS+Qs|g>argO}g!?M~AT|x&;9Jq8It8{ap_fFJ8*mPb zY50Wz7IVi1es)EaxK-+W(zx)yT+u4s>>;b#Cj{pmvl4UPyUkj5FPjk_}IJ2ab*DMUQvzhP|}nXxjgAz{T% z<=W4vo9l7Y5q*;(fMpuiccv&n(sT_XTN|!GW&5Wp|KALx{}DiARwyMs(EW{H5|#yz znHu!qdp}W{rE26(j*{Y7o8`D4^24s8R`ta})=s3}mU4V*Jz$ z3={>(jRecnoJq*#RXC2Abq|w}Z8JkDS6}QLr=><(oY~*d&!Fpf`_U}54wp8E@w~D?&bYf^75rwgwfE5mfY?*&;Z4 z4^41keF4-#RL*GHpGGcIY}_LJ!O8MdF*05AIQee3+q!5SC`<0|P7 z+wOR*_YcAH)pNHYWfG4hKWg$YJ`km~6hHO`9@C>WTW&let~~~u_a+@>(Ygd%g%g%9 zn}_r?v_INR|He~S;!jiaEKxo}Z#zsXa@ojD)FQl8r-9wt_6qbBwU2gLFVjsaUz&+8 z{$)mWGBaKSlH(5ff@VJz)FZ)D47(VtVSbr9|FUJ#Q+xLQ(fqMmkI@XS{?etipd}EF zo|l%!O*A_o@%$F>!`$#kwpwKY+On;=*Vtxc3JWX(0vsaRnG=nruf+stIi_8gF7y6k zvlsJH;H(#|bxd;JW&1#0PD$x&ldp3eVp4KOm6NE)rVTT+x(S^OYI4-s%><(5@gC z;sCo)8e>xV3+;Ug?0AciN2#FFh&ZcxJ*~eZ^^9_82F|Wrt$e4M-8`~|?Am~ofpnd- zQ>~3nq{|PjQpH#_L0n(7rB>!!qP{zy%wqdWa?!CGYH0p(ZZjIT$Tl0{fzRL;o{~W) zjufYxtfUB$&N>Qrmr5Af=P6jCv9pP%L0%Xq4Q55T0xB*;dBI@(_5AZ(fvUWaI zw+^^0T7E(4!wm&jsgXRn+4yE1ecLE>DyyaQozzCFkVtNV(`jigYS$uvYu?)6$7`g> zqc)+0cpfubffguf$|6vgf15O>r`_A!d6a*R+urCSZCLq}a8di-mdGQsok5xx%1>}H zu;!FStvJSl^Od2>7H%WST;a?w(~8U!kgS6T7d;JGm$PPR%WmbE!D8JFr*Qq#QyrJ{ zJANEFx@4`ZJ@P#GdtJ(qm`RbvIc*fZh=LJ8}&qy$-{_9vz1ZkpxQ>I20=*>B1wtEji0E#lCx!t6GP3$4KSP6!gIrw(T>f>;{$@ za;+Q$iNWNeS6rJ#!66R1RPxRicEL9oVAhY-0x?eWQV-FlVJjg8hr}}&n#`x^{pOi{ z^sJa&JwW#0JQ4&mPC0L{Qc0@4v`uqteF|q~)39gvluqDg_v~&sDo`g=cu)wNNN|(O zxGwFvT!6&4a>pP|jHv6k>Sa1%AJWjP5gnGh>PU|=Re2A?q}98;oV_S!PLDM~+HF;Y zqwSjkCC57~E$74OSrgG6HjLWWXjC545}b?$Q-?5?T58-W-$bH0%FFGnDo~vRCOTt*=s*br=$pdMO7rOLUNKtpVzP=@T+s5?Vnz{Ko z*|&`>;zJ0hmqS1CZO{=^JSEnrCyL#a-6oS_+63leGa^^nVQYE={!cCy2Wc1HK#4}f z>RQ(~IGv-#qDbGJ+BKuk^DVMX`K)OS!NVPE*!e`+R<3+8WLxn#&biGdO>z|zL&rF` zT8TWe?g(8DdMVan_es_m$-kmY5Pz(t=aIdgd#zg5H3hnp%E;lieD#g&mL^EY{pu@c zjXJ;ykv806WOJ)PvmB+CKC)$%NyjgJj?_-i$r{*h7lTajW}%2iMz$YKRF)1+BluhC zHp(Pg;n3f{7u9wKT}(VNSUdG$D9UMIIt-0*re(`z8f=-8aO0r~5P87;0RF)?f74K4 zp&b@AT|?M0S9Ook+*;!EVWFD%C_k&D?UKSZ?s*jv`P%*?4Xdqes!xoX+1M>{ypzA! z)`>w4;|~(n*Pop~jE@K!eLffVHGmqLMO3WYp~O_X;eN|v-O2k~$r%~)3pN2K^@Kzw zSS#bvwr^C}X(JLZW`r)f(%CsMeur47mgyP{|`<=<*nhc3dDBBDDhtE!` zV0@Xig_^@}_)bGPoo3A^>W~)b=LUQz&$crPD=EipJ{~&LuhcXZx5A@n6Gr=LXGgZ~ zLqXUk{3bLJ+WOM=Fgesfw7>|h+ZO)5i=PRfagNK!21)Jdq*;~8Kdz3cVXm~eEbBgH zlfC_}*>IV4msux9#gI!Y@k3-ubI8}}T3Y#3zy4PV+y#+B6 zEI!LgBU?#A+n3@}kr8>+;Z8E$eHEjS)kqgt*S5CSPK%E4i(T@q=|d>!f^~$Nu?^Jw zu08b@hop|^f(kWq@x)+^*#<4fJ*t;;;)O!0?>2_htc#&rhkr=Tk+*a%q*KDF4<=t%iZ=%p6W4u}_qicxqV9RvmFW>* zW>+$CRPXb#D`_IMo>MCndDiNATu&!_e@(~1ds+oro`Tmp3Uv4V!zeY%?DvG8)y$um zeqWF^xKyw_&Dt^b>fWI)jB3gD0C4Xfw!P?-)g5$+Rl2G#35)yDC*_qwwB z;BW4E=~@(L8GOh*W~aA}t)E+Y+g%yOH7PWw^O?K#8!Oi`i5G42Q6W<@_K%2X@2~Mx zLHa~0a8dW;V#=N>-g)Xf)5oaiswQ64(y=)>{t)&>dI@z}yNAl6> zz?xw@9lvj_9oO>w)IR9snFmtiEtCW=!udYf(pdu!aQ3@jD>$o(%CwRmg9x9@7lg%@ zO?%^<_=VNi;hj4gN_+MPX}et#1^weGhp?pS>y+P^DL(NF072`}B$)k+vkIngRxU4| z{GREp$pm2u^4wkDe{x4ZlgS(u&Z~)zpq%OYK0ZoBdi<%KUdYXRCAThAO9C^&je@y literal 0 HcmV?d00001 diff --git a/images/LAION 2GPU.svg b/images/LAION 2GPU.svg deleted file mode 100644 index 724035676c..0000000000 --- a/images/LAION 2GPU.svg +++ /dev/null @@ -1,1518 +0,0 @@ - - - - - - - - 2023-11-30T13:10:10.501490 - image/svg+xml - - - Matplotlib v3.7.1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/images/SlimOrca 1GPU.svg b/images/SlimOrca 1GPU.svg deleted file mode 100644 index a7861f7ad5..0000000000 --- a/images/SlimOrca 1GPU.svg +++ /dev/null @@ -1,1424 +0,0 @@ - - - - - - - - 2023-11-30T13:15:28.212061 - image/svg+xml - - - Matplotlib v3.7.1, https://matplotlib.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/images/try live demo green.png b/images/try live demo green.png new file mode 100644 index 0000000000000000000000000000000000000000..540ff5595feae0169b7af326cde9758ec347d2d7 GIT binary patch literal 15262 zcmZ|0by(C-^yt5eh=_oIl$1y*sUTe<(hUnpt8^oq{N_;(-Pg+L(qa%4u7y#iowD)Y~Yp zub3SiwdNQdJ@&}2ytd+apRYC5h?o85!H0Jm6-D;%@aXBMOjv|*uU9?09k99uOOxcx z+#8cwD#OXY3myowIEk&EPn{(RyDB&PLd$tGVk`K}qRaWrpk@3Soxa+Ym5iE|{4Ap> zwxRX{Q?yfV75TnH6USE^2IdTtLc$D_eXuVu7Z^4y$4NpTXjOVE$H$;qLc-wP<)^7u ze}6p&|5v(ck}bWz0s>OL;p19=#ru}6a5oq!cQ-gs88^H@5ihtvG5o7w5+#w7?o}em zg7_VTSaxO_Q}M1Mtl@&%7D?;zvzoIqyPEUFM8Vrm!CWRlomH@Z>JDUnh&{47HD56D zgH*NJo|OO3KfzyKv(TV+QlL-7zokaa=)Q2*`jH6J#KGn+x6(J zqb!!VvKJ;qoHroPOi)1~9P!j~#oku0Rw^uUb&O>1XQ1kE{7S=VtdTVPIM8zc}&cuB{2UoITSSA^fRzlII&D4tR6N$yt~msjwl?w!)|fVFtGh)`oi z8gFt0F63oO5o3V#utB1bNzcVFJ)M3^>@{sx^T8biZCL^ObR@TvaPH7hY5~9G0TDfD zz3_b`Q-Ymrm+wyIjaIE5CCUq;tahF|2xj5_H@y3yL+AFMxtsI(-OMnrV%g1k{ch>t3z`mq8I2QA&t3pu)}FNLxO zy>BV=BPpNMk z1pP@0){3Rmec5pf^61eUvU?N_j%vpNq&y@zb}P}sflceT;w>BMs^0E>LI2UYr34ld z+|)(=xb61@NgPk>%!gabICTatyRoY(+f}YIGwa_lbsjZ4se4H`*9%4^Hwt?H(tGjs z`OPdp&E-I{i@jeJ=dPo1p)3|RArLBdl0Sp4GvXGVboLLQc`@OeQ27mgDc5W-a9=r% z#i<;QRb;}{!+Dd*WMb>>uv=!nlCk71>u>P2X+l!MVB%L+xbIbeLTB0mZUO|-gdO`g zM{-o;gI>6MH3g1K#aPktx}E(&D6y%P}Y^?#SO?uT=w7+mbycS+3qa$mNYi3 zdI>Bs2Qlrk%LbNkn~U(w-eVgs0cr?jUgoENYY-i!(8k!wva%0nM4xrj?cSpimQ|4t zCavmb$yg8w;gDI`we#AhsT16B6z9D$j{dyO_U6~K<%K?414DfWLJ433hmX7dY<}aq zdOyA#E`=S)hojz#1D%gH4Wku3L>Xt~hZ@()X0J34bHCJPDA0XKMLavd!}4nfE6YZv`f$F+6v_+KtBkZ- zEg^D>J3Hr#_n{H+^;wH*Php#Mq5Jp!Z+4)I`}W>~_SLkJ3f$;+uP8pLv9H(LEsgG% zI1q@~t3=e*ROA6IEmdMg``6+#r9JB8Q|k=~AcxU2W#l4VYuCauIl6oTda-lr9lLAa`!X?tn?#j%zL4M`c8LaXWBM z9QER{brEtrvbhQQ2zjsb?CohMdD~pouQ9O_73^O0@ygQgS}l$_sFM}*JD}*TM_s1h zu=u&?e24y|7d;6s^zur#liNMPYTz+3quj!8lv$KqbbOMdp9PvKd&3eAMTNX5b zumgcP&GQn!VhC#x*^3pu9oA~)FUp|Nim9T*X8!Xp6F*<}6sHg?>#_h5ddQDpRa4nJ+0_HE@H z8_wyyHt{=PQXpZG{eREWry3SQV|pKP34Fop_^z}r%>u>ic4wTg*F0cUQlyOg7KWonMs(fQ9wZ zFovB{vE+??th6dd&%w@2_i{aFvX=H-1 z@J}x0KlsQ?!R&Q+a!+X^4+E<}*+lmkZ+*+(uGSP$`Ef9tyQ1&~#;$np`ON>E zBs&*pReSds_~qqJBs)udZ#Vx7UI(#%t}_D&epnG!@mz&6Wgj*5=nU6XeWZ;igYs?p9TuK2Hi zI`|gjV*H-pGywt|W=f(BCSuArfgCjZiaXgv)Rn4rtLg9IULW22pTJptG?L%$^|1TE z=1`C?-HRv_QIyxVmq6vsRi5@r5m^|bU}<}El{oI^>Xa#t{e+GYU-AYX*b|@09!K3<>kNcFbrAD) zY_I<}pf}|mU2i)l4EZ_1z=6T$9hkKHZj4y+WX{L_!ZZSLP{I%8ZU}Uh~wZ8^bR8=;U47|Czk#SBoT2{$8 zh$*`d$7f;?8l)h&-CJKdlmr&~M_PC-&rbsvtX01@WMm=&aRv$@*DRE3B|oS94lt*h zG>qU7Dj@hz`dYt({1X4le~lIWj?A;55E#>>X{;$Lf9^ku4VvSClQQu<$|h51hlmu%=UvbvUoliSO^20QkyD3d8Vm4 zbAPSeISb=JZlGo+2AF4>{z@zl{4L`IwW!}k)7akbRmlx5$lkID&4Pgr=I@}w;Pkea znu&f+1|#39|N9mfHAA^i%!jqw9=oVLfKlJ!i{Y0#Mh9AO6spL$*m;oV#Xo@G5`4lJ6FVCZ<3-N|qD=9gz^SQvojs}}z#x=YMY z8~S>1|I^)7AMq6Ipo61d^5C&w*mW_sz8#_XbYk#~copm47W7|RxL_JGR=nox-1xxW z2$@GFPwacmPmjG33?jpfKc$CR*MXmtujI1Dzedc~;yEOju7{n=7%_L1{+f^1HBR#a zM?Bi&lxolw$Z8spa^b_jbX74RbLdgX1`k<>k?>aoaM2(}*dOF;Z zzOvs{sCROiJck>%^cJyY{`g*b9(uuKVz(AX%zGfqMUb%ZTF03X~r=%uRJl75l zSJ1w!x%N#n))+am{ENN}Vn8>0Uk@7M6}5+K5WRdU7aJldO22`ZuTU@SaC4W(ec&6~ zHLXC6#3Mi{H^N6kcg@T3mx!`3q=_MqQUebmlZXr)?EZND*KCC%X3nVb;Bl3aI84bZ zxkk|YmN^~dt&nxw@J+BE)aWNr?20cg-p=m#k@%9r#>}XA?g@c>?@4%GF04l6N3yHH zEzv={t{aYry9x~QdqKAOyvT&Q%KWVhRd>Cp8N!j-ksMZ>~Q_Yw$f4$ONfegLb1c>-yM#?>#Yn% zK0L&S%;Tcqypo-7c4szh3Wf78t2QE2ONS2w*iCsXpJa6E_GO=+e{`9N{|5ima!$q#N9rb^!#WmwY~gb|3ze$ZrxHojNcgg zf-E!*(IWYCJ9~WlvaWVmFn%C2?8Qj7YZqazZ}My(?kCGHM=I)->EY0*KRi&a*CFz_ z;aU=+WVtc2!?w`MH&uK1&(w zu8wT=OqSQ>tVDa{ZhjQ<%ADNlTR~O>abP);BiY}fF|-5M7bo>GWpYy1Q9`5oi{y>2 zRS_mV5wQiOHrBY6^@7V(lXiR+7I@t|2Kju@6p`yJSCddpaS9P7{2=+FYR-Bu%4I1Q zWKEbBhr^Y~wc}B}ga$Obw?NL5igh?t&*-3oe0Yv!1_|y}>sNm#k;Z6nV0v-s-LpTJ zwrag)8(cP~LXcO_I6P&&qrriXuC1MWZtDLjSX6vF7rk;wx=#ud!@; z7DGgo`{{hW{w$8T_3=o@_Xn_{j|=EZhSi*WSk_3;pPS`1f4)+s>_&IIORm{Wn__y%JD6ZBK&X4!!u8NSjAqv^8ETe zMd-CC-==(dW3l>WzDfAmRL+bYU2y4~wPVsHyKh(xAFPAP51Dnvs+bWVl`FTmj^6eo zfxgoy_sDU}@+fx8l595>t<+fLftomEU!4a8%6A!o-D4fsvYSIq8>S&Ukk_^Lc^e^QeScykEYrlOm!NzrDQ07`{!N@@#l_$as3W%oM(8+}&h zm1Q5Wk6o8v08JYD>y$Gu8<#r4NWtQmFj(~L8in^)m%Hyb58yv)_cj8VY)W`EYV^i51zQ&)$8-t zPGe+4n&!!l0@GmkeMW;b_1}1d>prlG$rbC(^2+J{3}UlPHno{*FsJl9hju zMMgh==FJQ~7kN~{;Leib;1^vt?hXOTFv+l09RDmTmt~F*ja+cHYhqS6D6VJ!u_k+~Ey}bu*GmZSU21kkCUkm=-&^`TPADKrvoY6}pu<(JK&qjX} zl4bwIK-9{HJ6?jBZ%yt_=rE1_a!_IB@`lS(Hd|`r`m@z_Zy(RSacg_ZD3}uRYFx`) z&aEdbWav_x*oWyBRpdOArt>;?G!-&S_GD?~sXH26qfgw|qEF%$BNmJK`9?HxQ;E>2 zb(E}aOONR3(JYc&JQ+9hWpqv6>(`teR8c7rCb{BMXBEiP+IZBOOSM@U96H+3(K^PT zy_>xk{<$h~C@?qUuhX;#jrGom=F^qR-@o%(OH35`yaG*>`n(yiX8uUFj*KvSKP}7^ zD(JDO9LZobN-iZSE$6c2L569NaKmZ~YI>72HgZ*t*Yo;j-ww|@@-6qJ6MN<5v~8tD z?mjfXFT!{+nJ96&^wnMWq_Ez4OaJ7aq2P8D$I`1M=xO`H#MDI`zemo?M5$N3l_hPHTeMRV7;wG*j!;WQig&iLHbd zEcq^HGV`!zF_~C(nd^HQyH);F-DrSM6C;f%q+1Iw8+J2)CkS&C<1(W9JRMCwT!G-*%Fpg4 z60;M&&_a@CxlANwu$QK0{DHwbAGMBA6Gi=`r4!Tk_C6P%5%NjpKTOLgmE6kX8e@*# z%H|p}E7ofqGh-%zIz{1FlPY?yD%Pwu+GTCLnYH#cjye0WzlOcTG!{@dhDg^BoK>JL zSn{gV@EOm+VZm+blGfh>&e=(KbTnJ3VM^1;#fMuL>X2QbIPvwn$yROKEb^}W#yi|G zsj~%(ud$a7Wa0>jT3PZCdcwJWRCWH%GDB`-L9e~_o4!fMCZC6rhTSJEBa7Ut$f+` zExwvqc3Gxo)mLPRdj^-D*uivkH@1d2j`ei`ypO&0K6TCt8UL7NBZ+j^!W))GxvvpS z&go}sy^Iv1rBRN)zX&Z7LgPHd1*M}5+3Ei2Xm6>N>0kPz4F)RC_z~Z|lLSSJDB}o+wY%<+bM4=1N43U@obFHJPT)?KqA6hw zaRK!^CMwQoR~H&nDJB5>|_j-_~IrmXg#*&dv*sQ*x^luaVe?F2dNw3zr(h zaKtwPI{vdSVDH?We|u?}{qfsU*Q$m>q;AzBe&wirbmvbf#nG=oX=FSumVuAAXVm8l z*i%N)cH`W$0plF9{?&c#DBu>)WBlft-4a$LaV!9u7(Gz87hI_3*C?uSvbNP zP>-rxrY_#cFwQYkra3?2hoQ749*(QcaOz?}J@rlq<#4CI5w~<7X&E#-K&8 z$Oof%#D-%zDtmHeM}dC*ulBNVTUArRbR-7e$QMD?=jTYxPs2d&EMR#=n({`E+ z9&ZDLOX*?=vqt>bpyX>|I+Aq^p|(maB%-JHl0|&(|WQd*C1{(m>S>8 z_?8pg=$_Lr+){Rnh6XGJ_jXe?dPT}*6sLJ^bOl$}97GJ5(3_Ldp2#>}9o`Jg9T9a6 z*Ty*O_w$%HI;kDbG%pS zdWqa#P7dja_&V_1vc^|(@_bpNR>_Eadwnq__mzL3=tOGwknsCv7`UN=^VvfSNjG(v zXYeG{sq@e^LOu;jebf>6$A*RMQzgGnN95=B%lK~MxQRa=`tc$r4=9+XA94&mUgcvM zJnm&n_7WIZbJgWVQ%C6wOQwld9wj?xb$P%&NZ+l$@~ORuX#e7cKo^_k@NCjlGFa!; zyo$YxDM_)*!A<@ok#`d%?s8TJp`$Rpg!YPC?F$L+ATN7U&uhbQAU zN8o)lr_AB9eg!X)-DNCq6HmIGV}vjM-XgB>Mpqe1!(-_zo_g$Lrbs5y3Yyl;OG-kV znaLt>RcrdX0?+Ql+wN@Ib;q%(HVrMQ-6hHD`oLKUa9mFKu`32b2k@4kZ|d%fDKtzk znVDI;Q*12JuLpfd|6^P@+4iT!h%=J0D|H}Z04*M0dfJPf^5zK4CW#uJtj`T31y1Kz zVBg&!mgDA`CIf1D2FpxQHOzM?D`PP%I@em5<~)#bVcQGn|M6%heta8lYH{cyYPF8u zootGyrA?#yeg@llF+{#}cJPaK`AVKP{m_Fnz=mXFy@+M3Dx|#fsGpqZ!Ot(puJ+3} z+%P)>CNMrp-q3Bq9h?^RuXo}!rX1=d5ynz~)7TU$moU5s8L~z*ix46^63* zYZm;QcAE;01$BEia&DFUa<{J)z61pv4i%Pl{ya=(j=C|`H3>tnLP~N7$L!tl-bdEUjq&aeVLxXW%7^4$l+O$IQ>h z-2CQ>_~U=iI#Pn|9lNt(J*))u1PuyBcBT|l`E`;3(ozUZv ztULZ&oagWX-S3*VJ?N|YTv6^^x|I?a^Qi<2(qV>{ z&U#E|CY6DBh3Bzx-G0?ZU&VS@O!RI<=WD{0M{}ReVa%If5l^P5o_~MJ@L2lux}yG1 zy*XhWo=h?Ur?fw~h+X$%=2FF=zN)wT5%s87el_+rX(n<=$=iB8p7OyHc69WHuu7 zf4V(W_~6GXV(hu|H90EqS9gFPtD$+mhPI32q?W6$j&n^Pr;ojFaZctNoqKJPy6vo< zRFo+?gGeKHI1Z7_sJ+x&)aS7yW_Q6Fg2LR*{3q&@lTGsYh8-9!gnea#MJTmzq3DBJI@sEG}N3Ba+iJ85OArQWZD(_s>x=;RVQinZN)D@kAnp3TA)E(w zm=^htywWUWn1k5uV6?!mH{rSGZRN}6Hy;U~Cv+2&QfC4%BM0mvHF}f9_BBo+$WBM6 z?+Fo>0y-T@Nrqo70#{6Ot+wS~Ou&&Y(BVMCPHeAc@j- zz31D0XWkdO*Sx0wVdY--o(x^semu`#lrTqO&o8iqFqd{5K8mmR(%gSEgL<`@xJ~V; zX}ZqIo4^j8oXoybtLe))iT(Eeoa)ft$Nwksa0!ui;=_B((q}u8X2`&OvusTbdu?rI zLhh|{nmt3Q)OP}Ltot-N#v@q%neoTTu7uQu4N*LDl{HIu+`mrgJIS92MSB176)}m; zk&x;=C&cMfo$E^anZ#yEUbWLRgo7W)6g5|C6kau17SZ^^H%vD|Own}_t2%RO@~7&& zH=~|>8+5eksr2v6n&VJHH+rym_nE%umyXb$XV$i5BiUy%JW{)7P&(@CdOAL`uJ1ln zi=y82W=R_w%dbU0JG`hWwBjH3nVAXXQmN7r6&##awSFyD8lar^doaV}+TMLg2{&m_ z^la!tkhM8Ls(y}B*rJMg_3QTsL!W{W;pFXHsv5-Hm}|+Z7WnUH;o7D8w8ylE1(Ytg|4{d&s~T zsu?Tq__cRjiHF&|-oY_&Qa#t`T@3Q8bN;pIbf_IR{<^d;>`%TGR1u9GJkv$YabltZ z-7xc+a=_E%;o#yd5;jzNwaOrAp0nCEGp|k&TLD5jg-Dq|13UTg2jJn!VpY%ngPpai zyJgEKyx3<>qX79rC*@S;g5VgJfVLyeY(#9~Xm6DQM`!ZePV>7^E;4co<+r8B%&492 zG5zly%wuyKQFcSfA?bG-1x%QE_;j)!&(SxB-PV@5wV>Z@G(KfyKnO;^HyfGPo`=%u zOPMa{)TIJ{CbmyC5Cp*!mXV`xqr4*s!bHC(j$Unh?Y4`xke9r`8< zrPm{(VMnotkyc%sOz1#&9_qWvju+~t7a(A%tLmCXH4}TSSEfnB_oh*EQa!fptbM7_ zvVB4EO{`VItxav_*`IB{GS@fHE|-_G=$3odju!4;y}mii#^kwaN6*&Cb1mx8;9=a! zUpB|&>0#)*FPwW)WW}^7ckS?A;YSKV;Rugj@-;Ih9sgT$Ph@Kf1#lmDKqnbw=UfMM z{uhC&2a)knrAsiAsX^bq#GKkEWoTNK)6$Esa^yGm=Zvd*HCbHA0SvW_sB#8=*}o#k zb{&=*%$7%;Rk_yq|tyz-vg z)VLib$QC_Qobs`Njby>A&!bn|@rqp2x!X8&Iw=}g$2>NRA0Oqjdv9W@@?!!BXUA*IC?M4bP7VWyg zSW{*L@{+rg5lsBz6Yth)U6?BmHwcpX@hJ)}MTA#`Y(>)9C(^cLXsNB-4$7ozj%GQy zhHFkdhe}$GSVcvoXJVhLyS|cA0kEVMhKPk%_6M^+tBZa3{_gHMHpWo z%ImQrX>(#x)j^KWZS3;Ia(s$TynV)$9+O&Is!SXYmTJ#m!;90d-A_R@;KuC6zXxeV zLPT|_6OQkDdtVsau@Zk^InpMag~eRdah~BQksYB%g zOYFGZ#4RSwl`Kfk{R9wCLI<9Ut6qQDWgx8ccUls<7WdS$CY?=inF`LWeI>R!`{gcd zZu?~T`k>!|#C--ZVL5$B6%Y12St-Ry>Dj=}Omo@V+2TbdOwj9|d10-50si0KH$_LV zAgqSw!K9~swT~P+ZGQeLl9iroDeTa2HBHM+HP+LOVX1r|}h4 zb(GBq`X{fBRA?uD6ii>h+h2Xpy@Hl0&8Y2WjyCiXuvLABevM0ESLL_!R!V%kEVp0e zI#uDD?K^j!TH)xYLx@YM|a;wuK~hdWRB zPvEh)%aIomd~GF__S=QAn*%;3pFc`Xkc=#yI?vD`4`usfbxtR%StpdFN(nn)45If+ zehsurat@73#-MD&6m3f3bJbfgQ!A+tH>_>W9--jmbg)Jza{L#*#cr#j;(|pk?ik72 zYlj+gJ}c`jtsK^-C6=4&-ls5W`Nn?5Az}08+QfF$rqBCGuSN)}saMKsX{(C!Ft<#! zi65GBxLy%?bgxi>2FamINB6J*P@tL-=&YJyLCulTbeP|^UAC${LhOgq)h@GRfl0lh zpo_bM;@w~&UuSN;`k(c38_f~rqT=NbEJYvgij_#CFNRaO3sg*{ej%fGpO8{MK+#hrVb_N$MqfXCYis?(VT~z3iSDAzJ?Hp;vzl#7q z2!aZjndqk{!0DAx!)+%A=r)`T0f#Wh!2~FN1z)>!K;@NT+2~r;*H+lw>aA{Vc z^N4srjc=xJhWgERf^t9kk{>3o%*7fs{505mldBM{EEs~U#MxXTOMph8P^HV)9+fy~BV5fQ&}scwIfZdUZpCS}wVG7mx1_(hh*AuO(BsmnDNWV`>>q~x zCNouW`(}L)6gSGYMdht-2RaHn1O*(^7>EhK zmvm%ZGYY`4i%(J|NhTsZVj)j~@NvR_1uN<>@_5778K>fpBoy%wf3hUaxzaiT;sz2V zX5xEa0O(OrPH=qW-(>#5Sk`i*tI>A!>YXI;p&{zAjuLkw3t_LZ0a~=@`cDj@%WzUd z+oCcV(JVU*&pSf30D}W=#M17s| zd`$G^81Qy43rl2gN3oAi*-vlyX>==~&UbOiJ_816je`tj2yZ_7`TLyFAqazwU(`>d z*zXd8DbyiD>BH;KkV&52gZ;l_H$7HA$AEN&CmHbb!Ft5Hv^ z{;z5OLFQs|CD9m&de-s6kVN1&CWXc@*{v%H95|W&r-l@Fw8>Kxbk=zq;M%_r1M;{G zx> zAD!Qx*#JMDF|x^-!8e!kdzMHy93KAtSz&T!fNg)7Uy%m;dP2(t27V_B_Z7F<5LNAf z=l)sKQ9!*(4>}oo!+3uBejM7MyHMuw z|7R5lf#iw*E2|(2E9;FOY%^hJ$NPE$@Zetm51{1y$5v!YgA{WLZ~ zmKEDbF9r`M76Ba*7J-=ClL+L>_;{J&9MSi@WZBm3WzbyTdHaFo?X zMx)P$i+c!56#)18>|2Fif2%uB3g7|@?j1@qIUK91YV+-Z#~<-BKo5I;0t4mXXX=BF z;3dRQ-aGttdU*5UuEH&4Dsb(nU5Q`Rqs3&Z#moa8sow%8ResdmJd_*{jDQ~Bh5lyN z4ISzAaF&Xh#?l5Gk!Iin$KS@^k89YtlF+!KknN%t3~pNg2%h+7_b*U1C){bk$vY33 zCC?@Dtp80MlBttQqhN3llBhuG8|oLiNy7HnF(TbSaSK27Tm(5pU6GfZict>1fa!&xmWr^BkOPcyK|6!|99vvS7NY z4<=i>2MV27XJqDIOklWgd&I2o6u6w*gC*Ym$O|Ojk+8=TJ&UGvh@!zUAMY8E~}2z>ng84&82p1)`g~@vDH*4R5M zqJ3~<0dTwtj;$OEKX(mpDbqoR2H;PO#g|ttG>-a7dSKPy%395$hjzk{#COtw$1wkP zkHEI)TL@ccXb{)K2c$+X7=_lqhO|bf{OSj1$Rc}shqHKOMJ6=oW9w;HrKmC8qll>F` literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml index 070ae2572e..09dd118414 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ exclude = ["images*"] [project.optional-dependencies] huggingface = [ - "transformers", + "transformers", "datasets", "sentencepiece", "accelerate", @@ -70,4 +70,4 @@ colab = [ [project.urls] homepage = "http://www.unsloth.ai" documentation = "https://github.com/unslothai/unsloth" -repository = "https://github.com/unslothai/unsloth" \ No newline at end of file +repository = "https://github.com/unslothai/unsloth" diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 100625fac4..e9f8b5e65b 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2023.11" +__version__ = "2023.12" import os import warnings import importlib @@ -35,7 +35,7 @@ ) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" else: - warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") + # warnings.warn("Unsloth: 'CUDA_VISIBLE_DEVICES' is not set. We shall set it ourselves.") os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" pass diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 175caab0d7..67ad306fcf 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -43,7 +43,6 @@ def _cross_entropy_forward(logits_ptr, logits_row_stride, mask = col_offsets < n_cols # TODO: Fixup int32 locations to int64 - # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 label_idx = tl.load(labels_ptr).to(tl.int32) logits = tl.load(logits_ptr + col_offsets, mask = mask, other = -float("inf")).to(tl.float32) max_logits = tl.max(logits, 0) @@ -88,7 +87,6 @@ def _cross_entropy_backward(logits_ptr, logits_row_stride, col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols # TODO: Fixup int32 locations to int64 - # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 label_idx = tl.load(labels_ptr + row_idx).to(tl.int32) if label_idx != -100: diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index ee1b20f88f..99a7a50f47 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -35,7 +35,6 @@ def _rope_embedding( mask = col_offsets < half_head_dim # TODO: Fixup int32 locations to int64 - # https://github.com/Dao-AILab/flash-attention/commit/c79de85ffa0d19b80fa468f90c5086e837499d72 rot_position = row_position % seqlen Q += row_position* Q_row_stride + head_position*head_dim @@ -48,8 +47,6 @@ def _rope_embedding( Q2 = tl.load(Q + half_head_dim*1 + col_offsets, mask = mask, other = 0) # RoPE repeats sin and cos so 128 = [64, 64]. - # sin2 = tl.load(sin + half_head_dim*1, mask = mask, other = 0) - # cos2 = tl.load(cos + half_head_dim*1, mask = mask, other = 0) if BACKWARD_PASS: """ @@ -62,11 +59,8 @@ def _rope_embedding( where R.T is again the same [ 0, -I] but the minus is transposed. [ I, 0] """ - # sin1, sin2 = -sin1, -sin2 sin1 = -sin1 - - # tl.store(Q + half_head_dim*0, Q1*cos1 - Q2*sin1, mask = mask) - # tl.store(Q + half_head_dim*1, Q2*cos2 + Q1*sin2, mask = mask) + # RoPE repeats sin and cos so 128 = [64, 64]. tl.store(Q + half_head_dim*0 + col_offsets, Q1*cos1 - Q2*sin1, mask = mask) tl.store(Q + half_head_dim*1 + col_offsets, Q2*cos1 + Q1*sin1, mask = mask) diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index df4b85f0b6..34906a5a1a 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -13,12 +13,12 @@ # limitations under the License. import triton -MAX_FUSED_SIZE = 65535 # 2**16 - 1 +MAX_FUSED_SIZE = 65536 # 2**16 Solves https://github.com/unslothai/unsloth/issues/7 next_power_of_2 = triton.next_power_of_2 def calculate_settings(n): BLOCK_SIZE = next_power_of_2(n) - # CUDA only supports 65535 - 2^16-1 threads per block + # CUDA only supports 65536 - 2^16 threads per block if BLOCK_SIZE > MAX_FUSED_SIZE: raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\ f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 8ab451f7d5..bfc0e26c0b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -16,14 +16,15 @@ from typing import Optional, Tuple, List, Union from torch.nn.functional import scaled_dot_product_attention from transformers.models.llama.modeling_llama import ( - # apply_rotary_pos_emb, - # repeat_kv, - # _prepare_4d_causal_attention_mask, + _prepare_4d_causal_attention_mask, logger, BaseModelOutputWithPast, CausalLMOutputWithPast, ) from ..kernels import * +from ._utils import ( + prepare_model_for_kbit_training, +) # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() @@ -37,7 +38,6 @@ # Tri Dao's benchmark shows xformers is faster for now. HAS_FLASH_ATTENTION = False pass - import xformers.ops.fmha as xformers xformers_attention = xformers.memory_efficient_attention @@ -55,12 +55,9 @@ import numpy as np import types -from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig +from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig from transformers import set_seed as transformers_set_seed from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model -from ._utils import ( - prepare_model_for_kbit_training, -) def original_apply_qkv(self, X): @@ -92,10 +89,6 @@ def LlamaAttention_fast_forward( ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() - - # Q = self.q_proj(hidden_states) - # K = self.k_proj(hidden_states) - # V = self.v_proj(hidden_states) Q, K, V = self.apply_qkv(self, hidden_states) n_heads = self.num_heads @@ -112,8 +105,6 @@ def LlamaAttention_fast_forward( if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] - # cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) - # Q, K = apply_rotary_pos_emb(Q, K, cos, sin, position_ids) if position_ids is None: cos = self.rotary_emb.cos_cached sin = self.rotary_emb.sin_cached @@ -130,10 +121,9 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - # no_attention_mask = attention_mask is None - # Ignore attention_mask - - if (not HAS_FLASH_ATTENTION): #and no_attention_mask: + # Xformers doesnt support backward pass for GQA (yet) + # TEMP fix + if (n_groups == 1) and (not HAS_FLASH_ATTENTION): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) @@ -143,18 +133,17 @@ def LlamaAttention_fast_forward( # Grouped query attention if n_groups != 1: - Q = Q.reshape(bsz, q_len, n_groups, n_kv_heads, head_dim) - - K = K.reshape(bsz, q_len, n_groups, 1, head_dim) - V = V.reshape(bsz, q_len, n_groups, 1, head_dim) - K = K .expand(bsz, q_len, n_groups, n_kv_heads, head_dim) - V = V .expand(bsz, q_len, n_groups, n_kv_heads, head_dim) + Q = Q.reshape(bsz, q_len, n_kv_heads, n_groups, head_dim) + K = K.reshape(bsz, q_len, n_kv_heads, 1, head_dim) + V = V.reshape(bsz, q_len, n_kv_heads, 1, head_dim) + K = K .expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + V = V .expand(bsz, q_len, n_kv_heads, n_groups, head_dim) pass A = xformers_attention(Q, K, V, attn_bias = causal_mask) A = A.view(bsz, q_len, n_heads, head_dim) - elif HAS_FLASH_ATTENTION:# and no_attention_mask: + elif HAS_FLASH_ATTENTION: # Flash Attention # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) Q = Q.transpose(1, 2) @@ -163,37 +152,22 @@ def LlamaAttention_fast_forward( # Flash Attention v2 auto supports grouped query attention A = flash_attn_func(Q, K, V, causal = True) - else: - # Uses Pytorch's scaled dot product attention - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" - ) - pass - # Grouped query attention - # K = repeat_kv(K, n_groups) - # V = repeat_kv(V, n_groups) if n_groups != 1: K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) K = K.reshape(bsz, n_heads, q_len, head_dim) V = V.reshape(bsz, n_heads, q_len, head_dim) pass - # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = attention_mask is None) + A = scaled_dot_product_attention(Q, K, V, attn_mask = None, is_causal = True) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2) pass attn_output = A.reshape(bsz, q_len, self.hidden_size) - - # attn_output = self.o_proj(attn_output) attn_output = self.apply_o(self, attn_output) - attn_weights = None return attn_output, attn_weights, past_key_value pass @@ -227,7 +201,6 @@ def LlamaDecoderLayer_fast_forward( """ residual = hidden_states - # hidden_states = self.input_layernorm(hidden_states) hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states) # Self Attention @@ -245,7 +218,6 @@ def LlamaDecoderLayer_fast_forward( # Fully Connected residual = hidden_states - # hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states @@ -308,7 +280,7 @@ def LlamaModel_fast_forward( if (past_key_values_length != 0): position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, - dtype = torch.int32,#dtype=torch.long, + dtype = torch.int32, device = "cuda", ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) @@ -326,11 +298,7 @@ def LlamaModel_fast_forward( inputs_embeds = self.embed_tokens(input_ids) # Ignore attention_mask - if True: - # if attention_mask is None: - # attention_mask = torch.ones( - # (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device - # ) + if attention_mask is None: padding_mask = None else: if 0 in attention_mask: @@ -339,7 +307,7 @@ def LlamaModel_fast_forward( padding_mask = None attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, ) pass @@ -403,7 +371,6 @@ def custom_forward(*inputs): all_self_attns += (layer_outputs[1],) pass - # hidden_states = self.norm(hidden_states) hidden_states = fast_rms_layernorm(self.norm, hidden_states) # add hidden states from the last decoder layer @@ -466,19 +433,13 @@ def LlamaForCausalLM_fast_forward( loss = None if labels is not None: - # logits = logits.float() - # shift_logits = logits[..., :-1, :].contiguous() - # shift_labels = labels[..., 1:].contiguous() - # shift_labels = shift_labels.view(-1) - # shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + pass + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) - - # loss_fct = torch.nn.CrossEntropyLoss( - # ignore_index = self.ignore_index, - # label_smoothing = self.label_smoothing, - # ) - # loss = loss_fct(shift_logits, shift_labels) loss = fast_cross_entropy_loss( logits = shift_logits, labels = shift_labels, @@ -547,13 +508,14 @@ def from_pretrained( load_in_4bit = True, token = None, device_map = "sequential", + rope_scaling = None, ): gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() statistics = \ - "==((====))== Unsloth: Fast Llama patching release 23.11\n"\ + "==((====))== Unsloth: Fast Llama patching release 2023.12\n"\ f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ @@ -570,9 +532,20 @@ def from_pretrained( assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) - # [TODO]: Determine RoPE scaling - # https://github.com/huggingface/transformers/pull/24653 - assert(max_seq_length <= 4096) + # RoPE scaling + model_max_seq_length = \ + AutoConfig.from_pretrained(model_name, token = token).max_position_embeddings + + if (rope_scaling is None) and (max_seq_length > model_max_seq_length): + rope_scaling = max_seq_length / model_max_seq_length + logger.warning_once( + f"Unsloth: {model_name} can only handle sequence lengths of of most "\ + f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ + f"{round(rope_scaling, 3)}, it can be magically be extended to "\ + f"{max_seq_length}!" + ) + rope_scaling = {"type": "linear", "factor": rope_scaling,} + pass bnb_config = None if load_in_4bit: @@ -589,6 +562,7 @@ def from_pretrained( torch_dtype = dtype, quantization_config = bnb_config, token = token, + rope_scaling = rope_scaling, ) tokenizer = AutoTokenizer.from_pretrained( model_name, @@ -596,9 +570,22 @@ def from_pretrained( padding_side = "right", token = token, ) - tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}); - tokenizer.pad_token = tokenizer.unk_token - config = model.config.update({"pad_token_id" : tokenizer.unk_token_id}); + + if not hasattr(tokenizer, "pad_token"): + # Fixes https://github.com/unslothai/unsloth/issues/5 + if hasattr(tokenizer, "unk_token"): + tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) + tokenizer.pad_token = tokenizer.unk_token + else: + logger.warning_one( + f"{model_name} does not have a padding or unknown token!\n"\ + f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." + ) + assert(hasattr(tokenizer, "eos_token")) + tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) + tokenizer.pad_token = tokenizer.eos_token + config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) + pass model = FastLlamaModel.post_patch(model) @@ -607,6 +594,8 @@ def from_pretrained( layer.self_attn.apply_qkv = original_apply_qkv layer.self_attn.apply_o = original_apply_o pass + + model.max_seq_length = max_seq_length return model, tokenizer pass @@ -668,6 +657,8 @@ def get_peft_model( random_state = 3407, max_seq_length = 2048, ): + assert(max_seq_length <= model.max_seq_length) + if lora_dropout != 0: raise TypeError("Unsloth: Fast Llama patching only works with dropout = 0.") if bias != "none": @@ -727,8 +718,14 @@ def get_peft_model( pass # Patch cross entropy loss labels - model.model.extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") - + # Fixes https://github.com/unslothai/unsloth/issues/10 + extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") + model.model.extra_ignored_labels = extra_ignored_labels + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_seq_length + internal_model = internal_model.model + pass return model pass pass From 54ca9035b85eaab430dd640dd266d1bbacf55979 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Thu, 7 Dec 2023 00:13:29 +1100 Subject: [PATCH 0061/1088] Fix generation --- unsloth/models/llama.py | 96 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index bfc0e26c0b..0a89f6bf30 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -74,6 +74,91 @@ def original_apply_o(self, X): pass +def LlamaAttention_fast_forward_inference( + self, + hidden_states: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]], + position_ids, +): + """ + https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406 + Fast inference using KV cache. + QK^T can be computed in 4 chunks + + [Q, q] @ [K, k].T where q, k are the new tokens. + [QK^T, Qk^T] + [qK^T, qk^T] + + Since the attention mask wipes Qk^T, we just get + [QK^T, 0] + [qK^T, qk^T] + + Since softmax is row-wise, we get + softmax([QK^T, 0]) + softmax([qK^T, qk^T]) + + We then multiply by [V] + [v] + softmax([QK^T, 0]) [softmax(QK^T)V] * + softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]] + + But notice * [softmax(QK^T)V] is just the last attention. + We just need to compute the last final row. + + This means we can pass in a row of Q, but we need to + remember K and V, which are called the KV cache. + """ + Xn = hidden_states + bsz, _, _ = hidden_states.size() + K1, V1 = past_key_value + + Wq = self.q_proj.weight + Wk = self.k_proj.weight + Wv = self.v_proj.weight + Wo = self.o_proj.weight + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Qn, Kn, Vn = original_apply_qkv(self, Xn) + Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2) + Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K1.shape[-2] + 1 + cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len) + Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids) + + # New KV cache + Kn = torch.cat([K1, Kn], dim = 2) + Vn = torch.cat([V1, Vn], dim = 2) + + # Grouped query attention + # K = repeat_kv(K, n_groups) + # V = repeat_kv(V, n_groups) + if n_groups != 1: + _, _, cached_len, _ = Kn.shape + Kn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Kn = Kn.reshape(bsz, n_heads, cached_len, head_dim) + Vn = Vn.reshape(bsz, n_heads, cached_len, head_dim) + pass + + # Attention + A = torch.matmul(Qn, Kn.transpose(2, 3)) + A *= 1.0 / (self.head_dim**0.5) + A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) + A = torch.matmul(A, Vn) + A = A.transpose(1, 2) + A = A.reshape(bsz, 1, self.hidden_size) + A = original_apply_o(self, A) + return A, (Kn, Vn) +pass + + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320 def LlamaAttention_fast_forward( self, @@ -91,6 +176,17 @@ def LlamaAttention_fast_forward( bsz, q_len, _ = hidden_states.size() Q, K, V = self.apply_qkv(self, hidden_states) + # Check for inference + if use_cache and past_key_value is not None and q_len == 1: + A, past_key_value = LlamaAttention_fast_forward_inference( + self, + hidden_states, + past_key_value, + position_ids, + ) + return A, None, past_key_value + pass + n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads From 08787b7021601bd0812ef5eb6f5b85c264935ecb Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Mon, 11 Dec 2023 23:58:36 +1100 Subject: [PATCH 0062/1088] Mistral, GQA support --- pyproject.toml | 16 ++ unsloth/models/__init__.py | 1 + unsloth/models/llama.py | 36 +++-- unsloth/models/mistral.py | 307 +++++++++++++++++++++++++++++++++++++ 4 files changed, 347 insertions(+), 13 deletions(-) create mode 100644 unsloth/models/mistral.py diff --git a/pyproject.toml b/pyproject.toml index 09dd118414..3520b7dd93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,22 @@ kaggle = [ colab = [ "unsloth[cu118]", ] +cu118_ampere = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only]", + "flash-attn", +] +cu121_ampere = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only]", + "flash-attn", +] +colab_ampere = [ + "unsloth[cu118]", + "flash-attn", +] [project.urls] homepage = "http://www.unsloth.ai" diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 4df2e937e4..2b1ad6df0a 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -13,3 +13,4 @@ # limitations under the License. from .llama import FastLlamaModel +from .mistral import FastMistralModel diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 0a89f6bf30..1dc69be042 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -217,9 +217,7 @@ def LlamaAttention_fast_forward( past_key_value = (K, V) if use_cache else None # Attention module - # Xformers doesnt support backward pass for GQA (yet) - # TEMP fix - if (n_groups == 1) and (not HAS_FLASH_ATTENTION): + if (not HAS_FLASH_ATTENTION): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) @@ -227,15 +225,21 @@ def LlamaAttention_fast_forward( K = K.transpose(1, 2) V = V.transpose(1, 2) - # Grouped query attention + # Group query attention if n_groups != 1: - Q = Q.reshape(bsz, q_len, n_kv_heads, n_groups, head_dim) - K = K.reshape(bsz, q_len, n_kv_heads, 1, head_dim) - V = V.reshape(bsz, q_len, n_kv_heads, 1, head_dim) - K = K .expand(bsz, q_len, n_kv_heads, n_groups, head_dim) - V = V .expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + # Xformers does not support backward, so we have to convert + # GQA to MQA by cloning K and V + K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + else: + # Xformers does support the forward pass though + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) pass - A = xformers_attention(Q, K, V, attn_bias = causal_mask) A = A.view(bsz, q_len, n_heads, head_dim) @@ -258,7 +262,7 @@ def LlamaAttention_fast_forward( pass # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! - A = scaled_dot_product_attention(Q, K, V, attn_mask = None, is_causal = True) + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) # Go back to (batch_size, seq_len, n_heads, head_dim) A = A.transpose(1, 2) pass @@ -403,7 +407,12 @@ def LlamaModel_fast_forward( padding_mask = None attention_mask = _prepare_4d_causal_attention_mask( - attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window = None if not hasattr(self.config "sliding_window") else \ + self.config.sliding_window, ) pass @@ -812,7 +821,7 @@ def get_peft_model( layer.self_attn.apply_o = apply_lora_o pass pass - + # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") @@ -822,6 +831,7 @@ def get_peft_model( internal_model.max_seq_length = max_seq_length internal_model = internal_model.model pass + internal_model.max_seq_length = max_seq_length return model pass pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py new file mode 100644 index 0000000000..4d91e98371 --- /dev/null +++ b/unsloth/models/mistral.py @@ -0,0 +1,307 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import * + +from transformers.models.llama.modeling_mistral import ( + MistralAttention, + MistralDecoderLayer, + MistralModel, + MistralForCausalLM, +) + +def MistralAttention_fast_forward( + self, + hidden_states: torch.Tensor, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.LongTensor] = None, + *args, **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + bsz, q_len, _ = hidden_states.size() + Q, K, V = self.apply_qkv(self, hidden_states) + + n_heads = self.num_heads + n_groups = self.num_key_value_groups + n_kv_heads = self.num_key_value_heads + head_dim = self.head_dim + assert(n_kv_heads * n_groups == n_heads) + + Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2) + K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2) + + kv_seq_len = K.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + if position_ids is None: + cos = self.rotary_emb.cos_cached + sin = self.rotary_emb.sin_cached + Q, K = fast_rope_embedding(Q, K, cos, sin) + else: + cos, sin = self.rotary_emb(V, seq_len = kv_seq_len) + Q, K = inplace_rope_embedding(Q, K, cos, sin, position_ids) + pass + + if past_key_value is not None: + # reuse k, v, self_attention + K = torch.cat([past_key_value[0], K], dim = 2) + V = torch.cat([past_key_value[1], V], dim = 2) + past_key_value = (K, V) if use_cache else None + + # Attention module + if (not HAS_FLASH_ATTENTION): + # Xformers memory efficient attention + # Also has Flash Attention v2 dispatching + # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + + # Group query attention + if n_groups != 1: + K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + # Xformers does not support backward, so we have to convert + # GQA to MQA by cloning K and V + K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + else: + # Xformers does support the forward pass though + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + pass + + A = xformers_attention(Q, K, V, attn_bias = causal_mask) + A = A.view(bsz, q_len, n_heads, head_dim) + + elif HAS_FLASH_ATTENTION: + # Flash Attention + # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) + Q = Q.transpose(1, 2) + K = K.transpose(1, 2) + V = V.transpose(1, 2) + + # Flash Attention v2 auto supports grouped query attention + sliding_window = self.config.sliding_window + sliding_window = q_len if sliding_window is None else sliding_window + window = (-1, -1) if (q_len <= sliding_window) else (sliding_window, sliding_window) + A = flash_attn_func(Q, K, V, causal = True, window_size = window) + else: + # Grouped query attention + if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + pass + # Needs (batch_size, n_heads, seq_len, head_dim) + # is_casual and attention_mask must not be both set! + A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) + # Go back to (batch_size, seq_len, n_heads, head_dim) + A = A.transpose(1, 2) + pass + + attn_output = A.reshape(bsz, q_len, self.hidden_size) + attn_output = self.apply_o(self, attn_output) + attn_weights = None + return attn_output, attn_weights, past_key_value +pass + + +def MistralForCausalLM_fast_forward( + self, + input_ids: torch.LongTensor = None, + causal_mask: Optional[xformers.attn_bias.BlockDiagonalCausalMask] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + *args, **kwargs, +) -> Union[Tuple, CausalLMOutputWithPast]: + + if causal_mask is None: + bsz, q_len = input_ids.shape + sliding_window = self.config.sliding_window + if sliding_window is None or sliding_window <= 0: + causal_mask = xformers.attn_bias.LowerTriangularMask() + elif q_len <= sliding_window: + causal_mask = xformers.attn_bias.LowerTriangularMask() + else: + causal_mask = xformers.attn_bias.BlockDiagonalCausalLocalAttentionMask.\ + make_local_attention(window_size = sliding_window) + pass + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + causal_mask=causal_mask, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + shift_logits = logits + if not hasattr(self, "extra_ignored_labels"): + # Fixes https://github.com/unslothai/unsloth/issues/10 + self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda") + pass + + shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]])) + loss = fast_cross_entropy_loss( + logits = shift_logits, + labels = shift_labels, + ) + pass + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) +pass + + +class FastMistralModel(FastLlamaModel): + + @staticmethod + def pre_patch(): + MistralAttention .forward = MistralAttention_fast_forward + MistralDecoderLayer .forward = LlamaDecoderLayer_fast_forward + MistralModel .forward = LlamaModel_fast_forward + MistralForCausalLM .forward = MistralForCausalLM_fast_forward + PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + return + pass + + + @staticmethod + def from_pretrained( + model_name = "mistralai/Mistral-7B-v0.1", + max_seq_length = 4096, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + # rope_scaling = None, Mistral does not support RoPE scaling + ): + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() + + statistics = \ + "==((====))== Unsloth: Fast Mistral patching release 2023.12\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ + f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ + f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ + f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' + print(statistics) + + FastMistralModel.pre_patch() + + if dtype is None: + dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16 + elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16: + logger.warning_once("Device does not support bfloat16. Will change to float16.") + dtype = torch.float16 + + assert(dtype == torch.float16 or dtype == torch.bfloat16 or dtype == torch.float32) + + bnb_config = None + if load_in_4bit: + bnb_config = BitsAndBytesConfig( + load_in_4bit = True, + bnb_4bit_use_double_quant = True, + bnb_4bit_quant_type = "nf4", + bnb_4bit_compute_dtype = dtype, + ) + + model = AutoModelForCausalLM.from_pretrained( + model_name, + device_map = device_map, + torch_dtype = dtype, + quantization_config = bnb_config, + token = token, + # rope_scaling = rope_scaling, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_name, + model_max_length = max_seq_length, + padding_side = "right", # MUST be right or else attention fails! + token = token, + ) + + if not hasattr(tokenizer, "pad_token"): + # Fixes https://github.com/unslothai/unsloth/issues/5 + if hasattr(tokenizer, "unk_token"): + tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) + tokenizer.pad_token = tokenizer.unk_token + else: + logger.warning_one( + f"{model_name} does not have a padding or unknown token!\n"\ + f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." + ) + assert(hasattr(tokenizer, "eos_token")) + tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) + tokenizer.pad_token = tokenizer.eos_token + config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) + pass + + model = FastMistralModel.post_patch(model) + + # Patch up QKV / O and MLP + for idx, layer in enumerate(model.model.layers): + layer.self_attn.apply_qkv = original_apply_qkv + layer.self_attn.apply_o = original_apply_o + pass + + model.max_seq_length = max_seq_length + return model, tokenizer + pass +pass From e699f1d09e38817cc10621031d7e92edc799a456 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 12 Dec 2023 00:57:06 +1100 Subject: [PATCH 0063/1088] Update llama.py --- unsloth/models/llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1dc69be042..9e9c3cca5d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -411,7 +411,7 @@ def LlamaModel_fast_forward( (batch_size, seq_length), inputs_embeds, past_key_values_length, - sliding_window = None if not hasattr(self.config "sliding_window") else \ + sliding_window = None if not hasattr(self.config, "sliding_window") else \ self.config.sliding_window, ) pass @@ -821,7 +821,7 @@ def get_peft_model( layer.self_attn.apply_o = apply_lora_o pass pass - + # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") From 28f3b971d21e469fb985f384db10a03982c4ce12 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 12 Dec 2023 00:59:30 +1100 Subject: [PATCH 0064/1088] Update mistral.py --- unsloth/models/mistral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 4d91e98371..0c13f998f7 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -14,7 +14,7 @@ from .llama import * -from transformers.models.llama.modeling_mistral import ( +from transformers.models.mistral.modeling_mistral import ( MistralAttention, MistralDecoderLayer, MistralModel, From 2d5d88487463e76f75002be3b2704267ec96e68a Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 12 Dec 2023 02:40:37 +1100 Subject: [PATCH 0065/1088] tokenizer pad fix --- unsloth/models/_utils.py | 43 +++++++++++++++++++++++++++++++++++++++ unsloth/models/llama.py | 33 +++--------------------------- unsloth/models/mistral.py | 29 ++------------------------ 3 files changed, 48 insertions(+), 57 deletions(-) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 32d20aee5f..0ccbe9117f 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -19,6 +19,14 @@ import gc warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") import bitsandbytes as bnb +from transformers.models.llama.modeling_llama import logger + +__version__ = "2023.12" +__all__ = [ + "prepare_model_for_kbit_training", + "patch_tokenizer", + "print_unsloth_message", +] def prepare_model_for_kbit_training( @@ -59,3 +67,38 @@ def make_inputs_require_grad(module, input, output): return model pass + + +def patch_tokenizer(model, tokenizer): + if not hasattr(tokenizer, "pad_token") or tokenizer.pad_token is None: + # Fixes https://github.com/unslothai/unsloth/issues/5 + if hasattr(tokenizer, "unk_token"): + tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) + tokenizer.pad_token = tokenizer.unk_token + else: + logger.warning_one( + f"{model.config._name_or_path} does not have a padding or unknown token!\n"\ + f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." + ) + assert(hasattr(tokenizer, "eos_token")) + tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) + tokenizer.pad_token = tokenizer.eos_token + config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) + pass + return model, tokenizer +pass + + +def print_unsloth_message(name): + SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + + statistics = \ + f"==((====))== Unsloth: Fast {name} patching release {__version__}\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ + f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ + f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ + f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' + print(statistics) +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 9e9c3cca5d..a3a91ab33d 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -22,9 +22,7 @@ CausalLMOutputWithPast, ) from ..kernels import * -from ._utils import ( - prepare_model_for_kbit_training, -) +from ._utils import * # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() @@ -615,18 +613,8 @@ def from_pretrained( device_map = "sequential", rope_scaling = None, ): - gpu_stats = torch.cuda.get_device_properties(0) - max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - - statistics = \ - "==((====))== Unsloth: Fast Llama patching release 2023.12\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ - f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ - f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' - print(statistics) - + print_unsloth_message("Mistral") FastLlamaModel.pre_patch() if dtype is None: @@ -676,22 +664,7 @@ def from_pretrained( token = token, ) - if not hasattr(tokenizer, "pad_token"): - # Fixes https://github.com/unslothai/unsloth/issues/5 - if hasattr(tokenizer, "unk_token"): - tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) - tokenizer.pad_token = tokenizer.unk_token - else: - logger.warning_one( - f"{model_name} does not have a padding or unknown token!\n"\ - f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." - ) - assert(hasattr(tokenizer, "eos_token")) - tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) - tokenizer.pad_token = tokenizer.eos_token - config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) - pass - + model, tokenizer = patch_tokenizer(model, tokenizer) model = FastLlamaModel.post_patch(model) # Patch up QKV / O and MLP diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 0c13f998f7..8b1fd0fd84 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -231,18 +231,8 @@ def from_pretrained( device_map = "sequential", # rope_scaling = None, Mistral does not support RoPE scaling ): - gpu_stats = torch.cuda.get_device_properties(0) - max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - - statistics = \ - "==((====))== Unsloth: Fast Mistral patching release 2023.12\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ - f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ - f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' - print(statistics) - + print_unsloth_message("Mistral") FastMistralModel.pre_patch() if dtype is None: @@ -277,22 +267,7 @@ def from_pretrained( token = token, ) - if not hasattr(tokenizer, "pad_token"): - # Fixes https://github.com/unslothai/unsloth/issues/5 - if hasattr(tokenizer, "unk_token"): - tokenizer.add_special_tokens({"pad_token" : tokenizer.unk_token}) - tokenizer.pad_token = tokenizer.unk_token - else: - logger.warning_one( - f"{model_name} does not have a padding or unknown token!\n"\ - f"Will use the EOS token of id {tokenizer.eos_token_id} as padding." - ) - assert(hasattr(tokenizer, "eos_token")) - tokenizer.add_special_tokens({"pad_token" : tokenizer.eos_token}) - tokenizer.pad_token = tokenizer.eos_token - config = model.config.update({"pad_token_id" : tokenizer.eos_token_id}) - pass - + model, tokenizer = patch_tokenizer(model, tokenizer) model = FastMistralModel.post_patch(model) # Patch up QKV / O and MLP From 82dcf5298048d7536e84546891a6794102211fcb Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 12 Dec 2023 02:54:54 +1100 Subject: [PATCH 0066/1088] Update llama.py --- unsloth/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index a3a91ab33d..e4b5118e62 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -15,8 +15,8 @@ import torch from typing import Optional, Tuple, List, Union from torch.nn.functional import scaled_dot_product_attention +from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from transformers.models.llama.modeling_llama import ( - _prepare_4d_causal_attention_mask, logger, BaseModelOutputWithPast, CausalLMOutputWithPast, From badf6a4980311948a023fef7b71a4730a4723505 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 12 Dec 2023 13:07:38 +1100 Subject: [PATCH 0067/1088] Update llama.py --- unsloth/models/llama.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e4b5118e62..1f0046ec06 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -614,7 +614,7 @@ def from_pretrained( rope_scaling = None, ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - print_unsloth_message("Mistral") + print_unsloth_message("Llama") FastLlamaModel.pre_patch() if dtype is None: @@ -734,6 +734,7 @@ def get_peft_model( use_gradient_checkpointing = True, random_state = 3407, max_seq_length = 2048, + **kwargs, ): assert(max_seq_length <= model.max_seq_length) @@ -759,6 +760,7 @@ def get_peft_model( bias = "none", task_type = TaskType.CAUSAL_LM, layers_to_transform = layers_to_transform, + **kwargs, ) model = prepare_model_for_kbit_training( From 50f55b2a38a931dcd0f807043ad1a94f8e75acfc Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 14 Dec 2023 12:33:25 +1100 Subject: [PATCH 0068/1088] Update README.md --- README.md | 189 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 172 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 9c44e2628d..988dfa8ca6 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,17 @@
    -## 2-5x faster 50% less memory local LLM finetuning -* Manual autograd engine - hand derived backprop steps. -* 2x to 5x faster than QLoRA. 50% less memory usage. +## 2-5x faster 60% less memory local QLoRA finetuning +* Supports Llama 7b, 13b, 70b, CodeLlama 34b, Mistral 7b, TinyLlama and all Llama archs! +* Mistral 7b [(Colab example)](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). +* CodeLlama 34b [(Colab example)](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) does not OOM is 1.9x faster, uses 32% less VRAM (27GB). +* Kaggle 2 Tesla T4s 5.28x faster on Alpaca. [(Kaggle example)](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. * 0% loss in accuracy - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) -* [Flash Attention v2](https://github.com/Dao-AILab/flash-attention) support via [Xformers](https://github.com/facebookresearch/xformers). * **NEW!** Works on **Linux** and **Windows** via WSL. * **NEW!** Experimental support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290)! * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -* Train Slim Orca **fully locally in 260 hours from 1301 hours (5x faster).** * Open source version trains 5x faster or you can check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**!
    @@ -22,9 +22,16 @@
    -1. Try our Colab examples for [the Alpaca 52K dataset](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) or [the Slim Orca 518K dataset](https://colab.research.google.com/drive/1VNqLARpE8N8eYwNrUSDoHVjtbR9W0_c7?usp=sharing). -2. Try our Kaggle example for [the LAION OIG Chip2 dataset](https://www.kaggle.com/danielhanchen/unsloth-laion-chip2-kaggle) -3. Join our [Discord](https://discord.gg/nsS4V5Z6ge)! +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | +| LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | +| OASST | 1x | 1.19x | 2.17x | 2.66x | 5.04x | **14.83x** | +| Slim Orca | 1x | 1.18x | 2.22x | 2.64x | 5.04x | **14.82x** | + +Join our [Discord](https://discord.gg/nsS4V5Z6ge)! +If you trained a model with Unsloth, we made a cool sticker!! + # Installation Instructions - Conda Unsloth currently only supports Linux distros and Pytorch >= 2.1. @@ -39,10 +46,12 @@ pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" ``` import torch; torch.version.cuda ``` -2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1 +2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the "ampere" path. ``` pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118_ampere] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121_ampere] @ git+https://github.com/unslothai/unsloth.git" ``` 3. We only support Pytorch 2.1: You can update Pytorch via Pip: ``` @@ -59,7 +68,7 @@ pip install --upgrade pip # Documentation We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! ``` -from unsloth import FastLlamaModel +from unsloth import FastLlamaModel, FastMistralModel import torch max_seq_length = 2048 # Can change to any number <= 4096 dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ @@ -91,16 +100,13 @@ model = FastLlamaModel.get_peft_model( trainer = .... Use Huggingface's Trainer and dataset loading (TRL, transformers etc) ``` -If you trained a model with Unsloth, we made a cool sticker!! - - # DPO (Direct Preference Optimization) Experimental support [152334H](https://github.com/152334H) hacked Unsloth to work with DPO via TRL! 1. Hack the model's `config.json` to be llama model. [Example gist](https://gist.github.com/152334H/d8a68b51b83bac008a02e69ecc81d5c1). 2. Use Unsloth for DPO for both base and reference models. [Example gist](https://gist.github.com/152334H/4847f3a8cca12894877e6b30698b0b64). # Future Milestones and limitations -1. Support sqrt gradient checkpointing which further slashes memory usage by 25%. +1. Support Mixtral. 2. Does not support non Llama models - we do so in the future. # Performance comparisons on 1 Tesla T4 GPU: @@ -147,9 +153,158 @@ Two Tesla T4s on Kaggle * Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. -### For replication of timings: -* [Huggingface LAION DDP reference implementation](https://www.kaggle.com/code/danielhanchen/huggingface-original-laion-oig) 60 steps on DDP Kaggle 2 Tesla T4 GPUs takes 40 minutes and 46 seconds -* [Unsloth LAION DDP fast implementation](https://www.kaggle.com/code/danielhanchen/unsloth-laion-chip2-kaggle) 60 steps on DDP Kaggle 2 Tesla T4 GPUs - **Unsloth only uses 1 GPU whilst Pro plans use more.** takes 4 minutes and 34 seconds **(8.64x speedup)** +# Full benchmarking tables +Click "Code" for a fully reproducible example. +"Unsloth Equal" is a preview of our PRO version, with code stripped out. All settings and the loss curve remains identical. +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | +| code | [Code](https://colab.research.google.com/drive/1u4dBeM-0vGNVmmO6X7cScAut-Hyt4KDF?usp=sharing) | [Code](https://colab.research.google.com/drive/1fgTOxpMbVjloQBvZyz4lF4BacKSZOB2A?usp=sharing) | [Code](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Code](https://colab.research.google.com/drive/1ANW8EFL3LVyTD7Gq4TkheC1Z7Rxw-rHp?usp=sharing) | | | +| seconds| 1040 | 1001 | 525 | 419 | 196 | 67 | +| memory MB| 18235 | 15365 | 9631 | 8525 | | | +| % saved| | 15.74 | 47.18 | 53.25 | | | | + + +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | +| code |[Code](https://colab.research.google.com/drive/1gjL1TaKwc_xv2TcxJC8QWEWBG1msh3g2?usp=sharing) | [Code](https://colab.research.google.com/drive/15vlPjMr8xDj5BFhGdqunGaOQSMqXPEXU?usp=sharing) | [Code](https://colab.research.google.com/drive/1zPwvf-BmHyHlPMBxDsY8zS0BnQ-KKbCc?usp=sharing) | [Code](https://colab.research.google.com/drive/1X2uHy-arRsZxqWHvKHwwW102JaMwChD2?usp=sharing) | | | +| seconds| 581 | 631 | 361 | 315 | 82 | 28 | +| memory MB| 7763 | 8047 | 7763 | 6441 | | | +| % saved| | -3.66 | 0.00 | 17.03 | | | | + + +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| OASST | 1x | 1.19x | 2.17x | 2.66x | 5.04x | **14.83x** | +| code |[Code](https://colab.research.google.com/drive/10NzDreFbuWELGUuBv0MOoC7y3MBewaNx?usp=sharing) | [Code](https://colab.research.google.com/drive/1TwdkJ1sHsuEH-kgeCPqSFeCpOnCfz6Ou?usp=sharing) | [Code](https://colab.research.google.com/drive/1AkwjUkOF0XeRBMT_S8Uhh74kitEsZHla?usp=sharing) | [Code](https://colab.research.google.com/drive/1roMkp2UjbeK2t3DkNz50cRs1MT92RPFT?usp=sharing) | | | +| seconds| 1852 | 1558 | 852 | 696 | 367 | 125 | +| memory MB| 26431 | 16565 | 12267| 11223| | | +| % saved| | 37.33 | 53.59 | 57.54 | | | + +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Slim Orca | 1x | 1.18x | 2.22x | 2.64x | 5.04x | **14.82x** | +| code |[Code](https://colab.research.google.com/drive/1UNo1xsMl8YH7xnWnIVjDFnCAPfc0RGgu?usp=sharing) | [Code](https://colab.research.google.com/drive/1zbphER-SKhbSWGjHTfnBLPFyTgIVvaeH?usp=sharing) | [Code](https://colab.research.google.com/drive/156si33585iv4Uh-VILFglUmIMrNCNuc2?usp=sharing) | [Code](https://colab.research.google.com/drive/1_mhZy7dfl9jEnJRuJBZJ5y3OwW06jgQA?usp=sharing) | | | +| seconds| 1824 | 1545 | 821 | 691 | 362 | 123 | +| memory MB| 24557 | 15681 | 10595| 9007 | | | +| % saved| | 36.14 | 56.86 | 63.32 | | | + +### Mistral 7b +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Mistral 7B Slim Orca | 1x | 1.15x | 2.15x | 2.53x | 4.61x | **13.69x** | +| code | [Code](https://colab.research.google.com/drive/1mePk3KzwTD81hr5mcNcs_AX3Kbg_Ha0x?usp=sharing) | [Code](https://colab.research.google.com/drive/1dgHxjvTmX6hb0bPcLp26RXSE6_n9DKj7?usp=sharing) | [Code](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | [Code](https://colab.research.google.com/drive/18yOiyX0T81mTwZqOALFSCX_tSAqju6aD?usp=sharing) | | +| seconds | 1813 | 1571 | 842 | 718 | 393 | 132 | +| memory MB | 32853 | 19385 | 12465 | 10271 | | | +| % saved| | 40.99 | 62.06 | 68.74 | | | + +### CodeLlama 34b +| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| +| Code Llama 34B | OOM ❌ | 0.99x | 1.87x | 2.61x | 4.27x | 12.82x | +| code | [Code](https://colab.research.google.com/drive/1ykfz3BqrtC_AUFegCzUQjjfUNlxp6Otc?usp=sharing) | [Code](https://colab.research.google.com/drive/12ZypxQh7OC6kBXvWZI-5d05I4m-B_hoR?usp=sharing) | [Code](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Code](https://colab.research.google.com/drive/1fm7wqx9MJ0kRrwKOfmLkK1Rmw-pySahB?usp=sharing) | | +| seconds | 1953 | 1982 | 1043 | 748 | 458 | 152 | +| memory MB | 40000 | 33217 | 27413 | 22161 | | | +| % saved| | 16.96| 31.47 | 44.60 | | | | + +### 1 Tesla T4 + +| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| +| Alpaca | 1x | 1.09x | 1.69x | 1.79x | 2.93x | **8.3x** | +| code | [Code](https://colab.research.google.com/drive/1XpLIV4s8Bj5uryB-X2gqM88oRGHEGdaB?usp=sharing) | [Code](https://colab.research.google.com/drive/1LyXu6CjuymQg6ddHX8g1dpUvrMa1nn4L?usp=sharing) | [Code](https://colab.research.google.com/drive/1gsv4LpY7C32otl1rgRo5wXTk4HIitXoM?usp=sharing) | [Code](https://colab.research.google.com/drive/1VtULwRQwhEnVdNryjm27zXfdSM1tNfFK?usp=sharing) | | | +| seconds | 1599 | 1468 | 942 | 894 | 545 | 193 | +| memory MB | 7199 | 7059 | 6459 | 5443 | | | +| % saved | | 1.94 | 10.28 | 24.39 | | | + +| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| +| LAION Chip2 | 1x | 0.99x | 1.80x | 1.75x | 4.15x | **11.75x** | +| code | [Code](https://colab.research.google.com/drive/1EtdStADehE4FVJnU2Cu6O8p9jDYdqG2L?usp=sharing) | [Code](https://colab.research.google.com/drive/1Ik4jO68odUiQIJ_szZ3xok5fk58WpA5Q?usp=sharing) | [Code](https://colab.research.google.com/drive/1E2nR4V3bXIWBQIUE7uR39lYPr3UikzqH?usp=sharing) | [Code](https://colab.research.google.com/drive/13jbj8D8FOt9KyXwZt9Yf2MsYkD8CyCVR?usp=sharing) | | | +| seconds | 952 | 955 | 529 | 543 | 229 | 81 | +| memory MB | 6037 | 6033 | 5797 | 4855 | | | +| % saved | | 0.07 | 3.98 | 19.58 | | | + +| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| +| OASST | 1x | 1.19x | 1.95x | 1.86x | 2.58x | **7.3x** | +| code | [Code](https://colab.research.google.com/drive/1aXzGgEM3yYB6SWy_XR81nQFWME40ksSy?usp=sharing) | [Code](https://colab.research.google.com/drive/1-5MdIOp0cM0scC-CdRZhh8OYhnGHqct4?usp=sharing) | [Code](https://colab.research.google.com/drive/1n-fgduZhRUsSjgpqNtVkXA3rSfE7iBdg?usp=sharing) | [Code](https://colab.research.google.com/drive/1z_GlHr2M_bB4lQrPhdWC7dseZv23cBIy?usp=sharing) | | | +| seconds | 2640 | 2222 | 1355 | 1421 | 1024 | 362 | +| memory MB | 14827 | 10391 | 8413 | 7031 | | | +| % saved | | 29.92 | 43.26 | 52.58 | | | + +| 1 T4 16GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Pro Equal | Unsloth Pro | Unsloth Max | +|--------------|-------------|-----------------|-----------------|---------------|---------------|-------------| +| Slim Orca | 1x | 1.21x | 1.77x | 1.85x | 2.71x | **7.67x** | +| code | [Code](https://colab.research.google.com/drive/15yLlJx9IE84kzx7ikky45pRcarPyUtEs?usp=sharing) | [Code](https://colab.research.google.com/drive/16IShIBmjKULWy87I-xURpj4nztTkAF13?usp=sharing) | [Code](https://colab.research.google.com/drive/1CJG3XLg_OQpCz71eB7Uqx7wuK_n2b-a8?usp=sharing) | [Code](https://colab.research.google.com/drive/1UmwuWHtlrC6MAfl9mX7A_TRfo5iSHDa-?usp=sharing) | | | +| seconds | 2735 | 2262 | 1545 | 1478 | 1009 | 356 | +| memory MB | 13933 | 10489 | 7661 | 6563 | | | +| % saved | | 24.72 | 45.02 | 52.90 | | | + +### 2 Tesla T4s via DDP + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| Alpaca | 1x | 0.99x | 4.95x | 4.44x | 7.28x | **20.61x** | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-alpaca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | | +| seconds | 9882 | 9946 | 1996 | 2227 | 1357 | 480 | +| memory MB| 9176 | 9128 | 6904 | 6782 | | | +| % saved | | 0.52 | 24.76 | 26.09 | | | | + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| LAION Chip2 | 1x | 1.12x | 5.28x | 4.21x | 10.01x | **28.32x** | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-laion-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-laion-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) | | | +| seconds | 5418 | 4854 | 1027 | 1286 | 541 | 191 | +| memory MB| 7316 | 7316 | 5732 | 5934 | | | +| % saved | | 0.00 | 21.65 | 18.89 | | | + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| OASST (bsz=1) | 1x | 1.14x | 5.56x | 5.09x | 5.64x | **15.97x** | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-oasst-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-oasst-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-oasst-bsz1-t4-ddp) | | | | +| seconds | 4503 | 3955 | 811 | 885 | 798 | 282 | +| memory MB | 11896 | 11628 | 6616 | 7105 | | | +| % saved | | 2.25 | 44.38 | 40.27 | | | + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| Slim Orca (bsz=1) | 1x | 0.97x | 5.54x | 4.68x | 6.88x | **19.46x** | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-slimorca-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-slimorca-bsz1-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-slimorca-bsz1-t4-ddp) | | | +| seconds | 4042 | 4158 | 729 | 863 | 588 | 208 | +| memory MB| 11010 | 11042 | 6492 | 7410 | | | +| % saved | | -0.29| 41.04 | 32.70 | | | | + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| OASST (bsz=2) | OOM ❌ | OOM ❌ | ✓ | ✓ | ✓ | ✓ | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-oasst-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-oasst-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-oasst-t4-ddp) | | | | +| seconds | OOM | OOM | 2719 | 3391 | 2794 | 987 | +| memory MB| OOM | OOM | 8134 | 9600 | | | +| % saved | OOM | OOM | | | | | + + | 2 T4 DDP | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +|--------------|----------|-------------|-----------------|--------------|---------------|-------------| +| Slim Orca (bsz=2) | OOM ❌ | OOM ❌ | ✓ | ✓ | ✓ |✓ | +| code | [Code](https://www.kaggle.com/danielhanchen/hf-original-slimorca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/hf-sdpa-slimorca-t4-ddp) | [Code](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | | | +| seconds | OOM | OOM | 2990 | 3444 | 2351 | 831 | +| memory MB| OOM | OOM | 7594 | 8881 | | | +| % saved | OOM | OOM | | | | | + +# How did we make it faster? +Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unsloth.ai/blog/mistral-benchmark) for more info! + +$$ +\begin{align} +\text{RMSLayernorm}(X) &= y = \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ +\frac{dC}{dX} &= \frac{1}{n} \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \bigg( n (dY \cdot w) - \bigg( \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot \sum{dY \cdot \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w } \bigg) \bigg) \\ +y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ +r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ +\frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) +\end{align} +$$ + # Troubleshooting 1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: From 27bbd6b2cd927b7cb0866a03dec41efb04470501 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 14 Dec 2023 15:06:01 +1100 Subject: [PATCH 0069/1088] Update README.md --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 988dfa8ca6..45f235f6c7 100644 --- a/README.md +++ b/README.md @@ -297,8 +297,7 @@ Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unslo $$ \begin{align} -\text{RMSLayernorm}(X) &= y = \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ -\frac{dC}{dX} &= \frac{1}{n} \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \bigg( n (dY \cdot w) - \bigg( \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot \sum{dY \cdot \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w } \bigg) \bigg) \\ +y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ \frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) From c931ec40c6dd9d7cb9578be8f53ca079ca668d28 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 15 Dec 2023 12:57:08 +1100 Subject: [PATCH 0070/1088] Fix packaging, readme --- README.md | 7 ++++--- pyproject.toml | 4 ++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 45f235f6c7..422abcf3b8 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,10 @@ ## 2-5x faster 60% less memory local QLoRA finetuning * Supports Llama 7b, 13b, 70b, CodeLlama 34b, Mistral 7b, TinyLlama and all Llama archs! -* Mistral 7b [(Colab example)](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). -* CodeLlama 34b [(Colab example)](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) does not OOM is 1.9x faster, uses 32% less VRAM (27GB). -* Kaggle 2 Tesla T4s 5.28x faster on Alpaca. [(Kaggle example)](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) +* Llama 7b [Colab T4 example](https://colab.research.google.com/drive/1n-fgduZhRUsSjgpqNtVkXA3rSfE7iBdg?usp=sharing) on 1 T4 2x faster, uses 43% less VRAM (8.4GB) LAION dataset. [Alpaca T4 example](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) 2x faster on 1 T4, using 6.4GB VRAM. +* Mistral 7b [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). +* CodeLlama 34b [Colab example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) does not OOM is 1.9x faster, uses 32% less VRAM (27GB). +* Kaggle 2 Tesla T4s 5.28x faster on Alpaca. [Kaggle example](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. * 0% loss in accuracy - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) diff --git a/pyproject.toml b/pyproject.toml index 3520b7dd93..44abff8e61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ huggingface = [ "accelerate", "trl", "peft", + "packaging", ] cu118only = [ "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", @@ -70,16 +71,19 @@ cu118_ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu118only]", + "ninja", "flash-attn", ] cu121_ampere = [ "unsloth[huggingface]", "bitsandbytes", "unsloth[cu121only]", + "ninja", "flash-attn", ] colab_ampere = [ "unsloth[cu118]", + "ninja", "flash-attn", ] From 399f8ed56f40df0919208d1ffdee64a31a1b22c8 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Fri, 15 Dec 2023 16:35:24 +1100 Subject: [PATCH 0071/1088] Update Colab to CUDA 12.1 --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 44abff8e61..b93dcb748f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,7 +65,7 @@ kaggle = [ "unsloth[huggingface]", ] colab = [ - "unsloth[cu118]", + "unsloth[cu121]", ] cu118_ampere = [ "unsloth[huggingface]", @@ -82,7 +82,7 @@ cu121_ampere = [ "flash-attn", ] colab_ampere = [ - "unsloth[cu118]", + "unsloth[cu121]", "ninja", "flash-attn", ] From f380cc1170447800c112dc8568bdff3dd34c79a3 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sat, 16 Dec 2023 13:14:33 +1100 Subject: [PATCH 0072/1088] Fix Mistral BlockDiagonalCausalMask fix courtesy of https://github.com/Rypo --- README.md | 2 +- unsloth/models/mistral.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 422abcf3b8..a348e48143 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ## 2-5x faster 60% less memory local QLoRA finetuning * Supports Llama 7b, 13b, 70b, CodeLlama 34b, Mistral 7b, TinyLlama and all Llama archs! * Llama 7b [Colab T4 example](https://colab.research.google.com/drive/1n-fgduZhRUsSjgpqNtVkXA3rSfE7iBdg?usp=sharing) on 1 T4 2x faster, uses 43% less VRAM (8.4GB) LAION dataset. [Alpaca T4 example](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) 2x faster on 1 T4, using 6.4GB VRAM. -* Mistral 7b [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). +* Mistral 7b [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). [Colab T4 example](https://colab.research.google.com/drive/15pyLgRN97B_jA56HS0esx56knA9I5tuv?usp=sharing) * CodeLlama 34b [Colab example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) does not OOM is 1.9x faster, uses 32% less VRAM (27GB). * Kaggle 2 Tesla T4s 5.28x faster on Alpaca. [Kaggle example](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 8b1fd0fd84..7eb651cd33 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -37,6 +37,17 @@ def MistralAttention_fast_forward( bsz, q_len, _ = hidden_states.size() Q, K, V = self.apply_qkv(self, hidden_states) + # Check for inference + if use_cache and past_key_value is not None and q_len == 1: + A, past_key_value = LlamaAttention_fast_forward_inference( + self, + hidden_states, + past_key_value, + position_ids, + ) + return A, None, past_key_value + pass + n_heads = self.num_heads n_groups = self.num_key_value_groups n_kv_heads = self.num_key_value_heads @@ -152,8 +163,10 @@ def MistralForCausalLM_fast_forward( elif q_len <= sliding_window: causal_mask = xformers.attn_bias.LowerTriangularMask() else: - causal_mask = xformers.attn_bias.BlockDiagonalCausalLocalAttentionMask.\ - make_local_attention(window_size = sliding_window) + # Fix from https://github.com/Rypo + causal_mask = xformers.attn_bias.BlockDiagonalCausalMask\ + .from_seqlens([qlen]*bsz)\ + .make_local_attention(window_size = sliding_window) pass output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions From b0f5ef5526384224e573321f94887afffe7446c0 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Mon, 18 Dec 2023 04:23:16 +1100 Subject: [PATCH 0073/1088] Torch version, docs, readme, general loader --- README.md | 31 +++++++-------- images/unsloth made with love.png | Bin 63453 -> 60119 bytes unsloth/__init__.py | 4 +- unsloth/models/__init__.py | 1 + unsloth/models/_utils.py | 3 +- unsloth/models/loader.py | 63 ++++++++++++++++++++++++++++++ 6 files changed, 83 insertions(+), 19 deletions(-) create mode 100644 unsloth/models/loader.py diff --git a/README.md b/README.md index a348e48143..872bf7fd49 100644 --- a/README.md +++ b/README.md @@ -5,23 +5,20 @@
    ## 2-5x faster 60% less memory local QLoRA finetuning -* Supports Llama 7b, 13b, 70b, CodeLlama 34b, Mistral 7b, TinyLlama and all Llama archs! -* Llama 7b [Colab T4 example](https://colab.research.google.com/drive/1n-fgduZhRUsSjgpqNtVkXA3rSfE7iBdg?usp=sharing) on 1 T4 2x faster, uses 43% less VRAM (8.4GB) LAION dataset. [Alpaca T4 example](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) 2x faster on 1 T4, using 6.4GB VRAM. -* Mistral 7b [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) on 1 A100 2.2x faster, uses 62% less VRAM (12.4GB). [Colab T4 example](https://colab.research.google.com/drive/15pyLgRN97B_jA56HS0esx56knA9I5tuv?usp=sharing) -* CodeLlama 34b [Colab example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) does not OOM is 1.9x faster, uses 32% less VRAM (27GB). -* Kaggle 2 Tesla T4s 5.28x faster on Alpaca. [Kaggle example](https://www.kaggle.com/danielhanchen/unsloth-laion-t4-ddp) +| Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | +|-----------------------------|-----------------------------|-------------------------|------------------------| +| **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | +| [Colab Alpaca example + inference](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) | [Colab T4 example](https://colab.research.google.com/drive/15pyLgRN97B_jA56HS0esx56knA9I5tuv?usp=sharing) | [A100 example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | +| [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | + +* Supports Llama (7, 13, 70b), Yi (6, 34b), Mistral (7b), Tinyllama, CodeLlama (7, 13, 34b), and all Llama / Mistral derived architectures! * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. -* 0% loss in accuracy - no approximation methods - all exact. +* **0% loss in accuracy** - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) * **NEW!** Works on **Linux** and **Windows** via WSL. * **NEW!** Experimental support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290)! * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). * Open source version trains 5x faster or you can check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! - -
    - - -
    | 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-------------|-----------------|--------------|---------------|-------------| @@ -35,7 +32,7 @@ If you trained a model with Unsloth, we made a cool sticker!! # Installation Instructions - Conda -Unsloth currently only supports Linux distros and Pytorch >= 2.1. +Unsloth currently only supports Linux distros and Pytorch == 2.1. ``` conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ -c pytorch -c nvidia -c xformers -c conda-forge -y @@ -47,6 +44,11 @@ pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" ``` import torch; torch.version.cuda ``` +2. We only support Pytorch 2.1 (2.1.1 bugs out for now): You can update Pytorch via Pip (interchange cu121 / cu118) +``` +pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ + --index-url https://download.pytorch.org/whl/cu121 +``` 2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the "ampere" path. ``` pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" @@ -54,11 +56,6 @@ pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu118_ampere] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121_ampere] @ git+https://github.com/unslothai/unsloth.git" ``` -3. We only support Pytorch 2.1: You can update Pytorch via Pip: -``` -pip install --upgrade --force-reinstall --no-cache-dir torch triton \ - --index-url https://download.pytorch.org/whl/cu121 -``` Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. 4. If you get errors, try the below first, then go back to step 1: diff --git a/images/unsloth made with love.png b/images/unsloth made with love.png index 9bf7ec93680f889d7602e5f56a8d677d6a58ae6a..20dac04f388d8eec89066d81362359994e855786 100644 GIT binary patch literal 60119 zcmYIPcRW@9|G!p3*&|tHkL)5@DSL0Sg(92m8Oh#-%7}<;+2gv1l)bm>y7tAr)^%}> z-;qAQ?;joy?mffJ9yqvyo=2jMACN=u_LUuCIan8Ms39N#OvBGH&qin9?EE=|t`9*scdt2k4@cHw5 z!ro(30nuO|d>wpzg6|_(8d7A!65`x)Fnu08J^gWddVc!cZ_^{7xmvHax?VB|wI7(x z)xYBLCJFxw=&?ko2*`X6BZ{_*A=OHcc~z}s44ci)mfQSQ0(yin1z z5)JwU+QJ=zpFjKBxQ(xU(~2RK7$gaDBGB%xzY*=jjZ|sTzW${GJE65d>Iq>Rq`v&; z>(LafNO90q(ebfp=s0KsRJ0s@x9S}{TR!!Ey)g^1)veG)WX>7R`#p8w;=6B?@D=jT zOF*aiR^PE}A|S{4X}q`cj4=U}JRFzGcLq6$<-h~Qi5U*&}Ps@TEnVS9-=aiyt1LUC_ zB=mRI*Hzqa&np};dt>)iQh;ZeQR6(EuGaPX-F8;~1L(CK7Eq4o z0VNf1#Z&oH6;L^j(B1zgLJsHN2hEYfZXWh)-Qz}5Mc%8=3BDG3_q)uktBE8<{a|ck z0%hY2_2njna3hy;oIq>1Mz*Q^&7cFV zTD<=@65kPYDhsOEf2~9Yd@T%WI|F?y=)w872fClgKxVkGHGP&S`^SSp%YUx9Nr$rV zY!O}EoTu}CJ!uoPELIucdRgX&I{uDO=;EKiq^q%!Q-Hm$KJj~i9=rzPtkRsB08S0mSyK_A>_>Z^MzyF1U9C1#W+qf9j*Q}PI5_@wzM=Uy1 z|ArOCb2iuH@Xb_-f)MzUo)omh7~1~FP3XVpau|X#NUd-PrKD~(l9ACv96@~rmH%d( z^o4$FLT^$x=N%y%CU=yiO%Ft>HHPtt%j+(LfhDKx1dp6w%FVYsfT3FGqW4MLpA!raw79vnO6Oz@C(p z+zc==9dmj8`?tKJ$)jbawDjZx=3dpo$KL+${L8u72o;`5aQF4e$tj8vi&l@HX+CRQ z|704P*T1U2A4mL}|Cj+>j!e?Aj;25Pxx zr7!QDjW3_KwRt#Z1)jU@Z5{OVrkL24qNiy|Sv!fsm}A$Y8eHh(v49J&+KTR#{Zgkv zABW?Qso8I!Ofkb+E`D9>>6T$Qepy4pj+Ql+*jCQJ`^Fw|MA7ygWVoA8_zn)eMlY|u-hbxuzM$`s)Kld{)XyPv^du*T=t{RKUT)jU<1r|tM2%|WhiN+b+G&@;w zit-=yJXvq`_8UAKDV2)j-3nV&u)Ae3MT1>Q=XL~fAN`G8WrX(9;Uc8p)8%Ftm;iWI zR7w*?o8^Zzc8aH`NHs?vSm6vY$EwkoQEpXj7avSI9D<^r{eWIn+KiW%ex2;I;MFvT z3qB(%gH7IBttQQ}GrNX`&8%Trbcg8KNSdGPcZ`NTNd-=3hQj5-i} zYf_7R{VVo754t0f9-_ZcE!((3e`~agkud$B<1L)I2ZDO@Jm+s_WxtEUd&uzkfmXRJ zHRhVW`e)&5Z5N<}*xw1?xcSS)?#3k?4olmd&Q*>+eX15JV%DD z{$HsBXY(xf&9Enj^Qne`RBHZ9Q37XfZYo*^Wea#+GJn&Iira0_DQ#Pw(Jed_7a7$J^7EdqMl{{b8426EYG2MA<8J~(P`mpSFUdkxMgtdPm44m84sLsdX!MHs(1Tiq{?-ZBY%3{jS8 zH|0<*&ru6E6#flgI~&jKZYcPBFs`1(Tr~rjv-(Ft zmTxWO!LBvTE|${jx;t4;&Myzj6W&%Z-cqBDPP=G*F@?j4{g28va|Im7QdN|4^^ zb$KdO+Y4G5EF8MZZ$v$eAg*uzx#0J-7$B?<2zHo5b19&;`8Gj61zMVeIAmsaMG>7! zIq!lf{56V$L<(T_7l@V$s5rsGoi0mMiMT&_F+cQ=zZcDs*|? zD(cC<5WC(xpEWR1fFM$P=8h_l66|O-rS|4`I0~LLY-zFMS>2x7VuOu>ab;u_q+dUo zLJ32>6d|?dU+e?yQ*mdAaLzmgAS%&+3WD5%E2Bcqxya2;4G%r|C;M z@bDn{zV&C^fI0K*zVuxNozrxxI2EuS=H~UE*D81Z|8Ps z3pdi_$;i-5oKIavXNhTAZr*nbqP^E;Zj;>De=qeiap zd0_M;kZ9smuh)Y-ZGPZi@IK^EourQ%$y$GNC?yV0IT!!Hf_g#q2VHn8aNzakz~t3m zTO5fZ4!(uhY>Kp09xH9Va(Jz@GO7zTa^>jBdgV-4TmkhlRZP9n%5!v5wf8x6hxupg zf@-xStiuAkBo0>m8s7X;CgLi%qHRn-W+2#xzJv-7+u!9Ph4Fysg{NxYKIUD3GzJl5 zFC5@`i=n&%Hnxut(=!9SCd|MC_U&ORINLVYQmO7n`(G@52q^f-|nBYz`qeJ)H0#MSfi=iNqWBXTbr| z)s2Zf8I!W@xV*FW6GZ&P<}?eNC3rzt;tm46G+Fm`m{ZS%!^I?)JC79?A z$*~!Mq@qY2!#5kox9lDtC8m1^c2lP-1sAd;(+)Jn<~CzEyeex@?eS!+2~@AX8YO6mnS z%{d_il!wVX<4^gb%H(UKLEbY)t`B%%^-v+x*^NXM2y@QvNPLdP)Mom=_WR`UUTl1K z1Q>%k{QbY-fB3WBuJy+A}Auv-MW*$-iU$=pWTD5GIWql=?=s9i)_fqgjN+X?R~8nh7kkC|?%i$@zJ^;G;mcWWlR54i{q zy1#wS6rw$L02Y+p}0)U&wIN& zj9g#pwi&H6XsP?w({osYWe6-;%EYnvH%4>WkheUjIX@ik#=`Xe9h6(c#ISS_RP`NI z(mnY3br1^YcXW7AG!KA&4X)c#fekjb;$@eCYQ_oS&~-TwBQMsZ$kwA#0>SWHp(YZ>Ilw|jDAJVqHj1Cq{ zk(*1z-=PUjaIX{;Ucs81?hYDQS}Tn*pCBIa2~n-+I5^YgS3q4Q`s>g(EAo!bZVKSw zXOaiMc{=Bx78Y~2D*bN?KSIL+kl4n*h z16ywnHM$oGFTqIN_SJoi&G@q{PYz7$Q^-M{^bF6DcM|Ch$hPq9Nt8HI;{p6CjZ9S& zwY4L2h*Vf(u>zo0e4+cKha4}Q?5843jRK>`1H69~)n=iv-7`igzFYlYdp>ah*hlxS z0&3rACEv)_>L2b^!&2eNbCgVt_J2q7`d9eRkT-G!S@d03QC0QgLFiMEK0ExQvc7xH zO&AHLPqk;SJz>1TXS?3s$yr#V-Dk##j4%(qs$gO+DKUj{QF zmXa#JYhK!M75&v#cu!vd+-l05r!I9i= zfSV;IfA+(`u58Bt>!)LQTyUi9?R2CLU^tAdfEIf&xQ0g~wWer{GHX^IIB~A(dvtB}$ z))D74C%$z{eOn~d;pB>-8#c^M!v)!U=#|;sWog@-s@8wCx60EFws8WNVph~hrqs8Mt0*?U z!#`DsYoD+-3_K!)EVOsYN;a=&72NE8_Cu@6%asYpuL|I3P~CP(q#SYZTa(!cUIhB@ zaUL+O* zvm;dy`jL(!cjzSnNvg0IP%0U0!Z?@Q6K9!r_=qs5?T@$juFbOyCVFN1X*@*&_^c}C zm;aWFgZ^eERareB0^Q3=-1Sky*XhXpVUeC-MO!)p_rC>uorH@L?AkQ$u5Mt#waTU* zOaL<0ZA<;~pXRb5@w7$^fCk`~ObT=5L`WxI%cdTZc_Ae+%vMjHRlPqvH}MYHc+5R8 z@bub9QhIVkdqlz1{K#9R1i^a7$Jk&$j`ft(LJOnGk)rGpSLY|5rhEv~&%2&yn)gbl z!(6>xI^MlAM@U<y!%LE-=#T@Y%?kzGaKPgi0 zmY9I=Q5Q_1)~74|#}gxE_nx;n1~41{CGm8yUvod>m-5Y=Js%MU$evsqZ3 z!5}*N7}d8&Sb`o=+bCPVjN{&l8QD7gMjBxHc^K}Idq;HE>NQ^Vx7OQ<&PU7@u2_|E z>m4S+3(sXg){pXFTEF^*hgOsV>knw@>FKHe4eHbBGVKw1R}8Vsi_F2_`TTGyv1oVe z8_UkzHx{P?Sa)m%^A1gGXq7TXYLTm`OkO%pdR?=wj-<|?Ugp2J zq0BQHOssEDP;!?wuW@1AQqoTv4zWE8Y-#jT&^<UUp)v@{a3F7Q4dIC7;OC^&6YqE_nNgk^M z|83^_M#~VzL<)f7b0utzusn0vc(=r)k>3VIoV;ShZdVsky~*hWVB=x)?DbQdp~e4S z;Vpcj+ikym>CwKFn32I?(ono5D@)ddqgs?HmS3j)Xxh-R85V?Yg(y_lnh&-5i?*Y* z9VH!DG7?^+P#Vts#qZ>T6~%fyh=;f;zVe20+3U;;*PSM3G?^herk7Yx$%;&P zCZYI~NpDr(we91+r9rn%pr&4=XwN(NVY?Emq-ZMcRT7nt9mHW=i6;5=2^dX1Zi5Wz zAshU6)I#$rL!epJJ0R)lE=fD=9_p=Qhk<3`%m9Bm@x9LPwK}{vb&42Q4gUxRp z-mnV(8p66Bw!fz#dFc`8A}(?_p3Xf)D6|4(+X>{~_d(x}78b*qN~UHsFETBgCDQGP zgESR6AH7`3)zc(=Z@U7I8VSxAX=J%QYx4dJD&u2#>A_L3T5VljUO|CE^k8Xe=@$ii zJySl-Yz%8_$uwhCkUIOfbTZ7hb-gPyHi`RAL?tOs<(-LeCNg*{s0{-;j72BNhbv7A zw$08O$*Ye&<=ToLC?H1V;a)CVj(BL-wIS$%?({C)hhld|rtXV?Jw!?PEBWGs@5{d?U`*iu&It69(iueXLA6z^BIQ@I@_YVox z;lipaii*|~-a6;mceRd_ymuis-K0*?mS;r$EiFOR~M$fa1vNAp5UX zBmb@)gv58#xz{Z|k&I=-3CsjdtX5L!Tuhx1s7%ZVP`3KRzGzAdmwqAX@z@hCZ;)(S z`4l^AtZQTxSyv~9-v8{{+Rj(ek|&QHDNxhV(_3OzN?#m&oJiEz*q9*`^vI=gh4y`V zx+(L9248C%p5t2A%EPW02s$T<#6Q6r|6g?DH74Z{!$ytcz3;JMQW7rgd9PyyOC}cG z>npYVEWvU$7K(Dr3?@_b5R@m#-)H?+eV9dt=*tAviiIC`Br>NvPfIPpCehvR-d*E3 zr1(~ZIThFp*sAItYW8uRbNhKS4LgQ-rHA79wpzcBc~GsB@-fyjjcixu*Y$rGNbcYg zB&iAyt-7^r3}kukDZzKMiNw8|yzUF1{9K3_K2CV&ViDGNM*4%$*=w1q?WknaD1qtY z->O>O!{*nXtFFoXj@^GBKru@uds?a5XtaH({1T<5y>;__Baax7nVG z7M*ch2j`bYV*xp%H1kwsXT#=%u#9p%cPn=vQ=7&6KeQC#E|cZvQ{VvcAJx?c_brI$ zoEKV|5@pXHwqlWz_lzqaM+Z)k?|SEu95f%Hjmk{yc#s9+vIWJmBO0*06&CnPGM9~S zw+L$gzdZOPY9NTBZOfj_?OrGN?F$x_!`G+t{P9QF9zpHdky}1INA1Um2ZNGBGssdT zn^2QYBQGeODp$-af+|(`}ygCW2Muy2nV#J zq{LwH(RUX?W`BwxKEu+s6ZGbX-*oI6%(SdBnSiUZ_EvOIifXAlv0iG`E8vy>U@%+@ z;3WeW8|E#-C;G1kH`jx*|4Wmg#)eEHtI}o-i?JvLIWPotq$C1~1=7xL{Ys9tL6AWv z7SXS_)6a<;=IosQA?;1y!TmU1-tLY0NNahWXY$4KY6Si~&$)DbzH43#NTOi9f)M=< zv(A$yugw;00nOZqsRbwAr_Y~*5*O~Q$WZIJ9=43{nQ&@l#}TpwYS;d%ey)kW2Dn@w zicyt6%6r~WXvqAce%-atru9V5yS)B)c46Cp#LGQn6K9ULUoO6`fdyn0O^utls`xcJ zZuOGPmlUHD07OfF+D09i9^A-{MFgzUeiw6I!_dc)ZeUa7N0=yeD3dP^XI*!9yIJ_& z2WQ16Ragsed2;6oEezvblh$w3PT;@w%n(&?BB z7vm^DNpTxB8~&N+vp3n0M+y!hQbMn9{}2%c(;U#ZUuhc(E5XoYkfD;J+gE<1EXYm$ zzW6O?=v*D6ma|#r4&bT)WVLKt5%3{m&Izpnf)jveji-`B2f=dX1cpU2`(M}4(BM^d z<(S%zLO*7e)dviRpfgVsVq)T*VI`KuP&`s?=!G^wwV6dH`@1s{@kOrjuf(+HM86K= zn~iOPY<2%(!0)}gp$@~hm+WJ(sW>gnW$|5MEZ(l&3r4{Xd?vTgs84@m5u&3b!rwhv zZuD)w{gZ_)PV-)59rmL@>#if?#i4Dd$NL|Olt8)2w?*~)qxSS?XO9Q{$g3pmhwd1( zxOTiHvQEsZ1pHjH-6gECq0z?glax1cHTR{24<0fa&M3t%SJiFwTRELT$*(uAm`|_}WHMC;Mli;(T^ivp6NUGKQ zPH`C$lvyDM!%D&dhfYrOO$Ns+T~sWpG2r>N{t1e8_p>u?d2rHaNo7sjU7 z8+lvTGThnz047&pEC z?tgFWcOZKZ`b~EAeltb(!OqBtZXC1p?MOzC>!gxfLh7IHt0-jrl&iBCuOVbX8pm^q ziFva4{nra{uLC|m+hJlUA_SbjSIOz=SxfP-|G@nEjyNbJ@t{rd=^O^X68Ew1;@rd4 zT(5=2)R+g~BySPvES&q=n#@k`jwkZ+KD{{AY^Yhlw=I16c5GIArnmpo12uFKuquNF zSc#0NBjpps_UV0-Ux?Q8Mk|Up&-C@*GE4gx<@vKIC3FIZBeBY+n^jP-MwaVil!+ch zFB)r9cdu*h!{!qx#1>W~5WZU>r{sHvDgAO~&9SyIgPww%;+A2+_<$1hoU$vsq4H)` zz+|9f1KLeNy5{-Zl$m0#}c;*@-K;uDL-1uS{FwOudC06*u-ZVSjJ z5hGYoAM3r~@?r?<#cDm)^mqFXAJMXqPo?f^UP_Rxd^OH%unnzd;>tVFJVKx=>izqi zKH3Yp!{z%jgz>tWd@_yV-gP*dhIGgp4s?8bH~$XJoRW2VaIZ!BE`c9 z1RCxvm$?95%|0XqPZkNyPQeYSIYKFAh0T>Z(N6s%rf@Zo9&7EMatzi6r-kABhwe{w z;(|Kac4Uh&{`L{C$@8Gi>i8zJew?63`7ako&Z*qARkU9J86au0ho*3-?ZCpx;M?^J z-Yiddl8=4&#tHoeXqT6lVZi6Vye7>fzb48&%9oxRorz;e4@oE#=dT+d5T0(x?>=w% zfxzv`GVaMX#(L?r-Hwmim(d4GsA^H#1Oe zrejE|$;}ccdq_{G%W(=YKH^Es5b23>a|T0OnW4>63!Ak zvd7D*2D@O*tVaZ3AC!w_1cOmv0y^Lo9C5@Ne*g(_nIg*29?>HT2qL)hIG!-V2thXU z4krneJnn+=Cl~2)FI#hK>5T-=$Y&X934T9`op9v3HWSg{(s;eDu9_0<5hAZ+1o$o> zu5>a*!z70z(U$}IA5f6<6Gwm>|7p%rK1P(Kxv~gghfm}w(TeZizc0GpN)1i*_CLd7 z$W*_?R9-YZzj^LDZUkuZTm9yVJkp*dy8}g|I+>&GN-$CXtCPlP`zUlRi+M;nD&YH8 zt@4brBpB`sJFnf}dA4GL$$rR%m}McGH*)N3wr;; zr&1cRXED>@{#ErRon?@-a~Z~cP>ZV*JZ^Kc)|V|FFmRa^X2b?r@q=TNfD#_>PW(qJ0+W^H+qgWm5>}h0nyEJA-Z?Bkph-`%)FlueaU(+oy$!$&rWMa z)xylcy(X4~3XA5sjD(fT)Omw%>pAv7rM;yY)oOaY2^7c)f{I%GB?mwA+tg(ai=1@# zrK!+fBV~+Q=}jk`e}n>Wm$9BjIW^=Tx zo3E~RbC@5~-MX|>FP9old073TzXKI=5fBK#p}oDmHh6301^Y;CBEx*mU;xt5vcj@3 zIOxRAn)N(>qR^67g?o#@LFu7Y8_4eS^>;SC*WlR{qe2BWfX+s46C`d zwFdKX(8<5L_3c|ZOAy4}1C*uSn{KvXG{!YD=1?ZXrY-!hvmbZHMGXh0N~0Q&FU7d8}-IWOTxF+_IA^N*s2#!aaICjSRJ z_uqFTrxM`lE7Vr6CUNZT9ov7*5Z`ZR6x^XVx;L*E{WRA9i>W5g=>ES%k@U!m?2&=Q zf{J@cHa1Parpc_qdF9@g6LeGh9-j_b*)4H$^bW;*YX#67!XQO=hrrSWu)W;;F2ddP z*Uo(canBCk_Xh=fPjrQa&~{J?i@`PFDSLP|?C7 zTXnLYZBNqH79KTlrl0n^^ARX=>;+uM9Q3M-FL%+t{mk(+=6Uzmslq^u^yELf4&h1sKl(vj5y_pZaA zl!>78<29S})8nGj(i_iOMvYhk0Mfj>KPb!h2I9SKVqgHNtQ?vMW*YfU2h?igyi}kh zkYYPQVv!$0bzkC!{|$raUBg$5bu49}6o!lA>tyRUKS2v+0P(Ie;GNjg$`VstQZx);4Tee-32i-r}kXD3ag1V<;rs`{IV8EgK zDUF#6Z}eOnG0Mq5Js8z0s;Ds9-UApgF0-wsKWFf$K2tiU5kv7O1~+{b z7RCQiWt|XKc48~p#kX0!v+lo87k*y0{p4VLQ+F*$9`cMh7^o+tzfgMg zVg(KW+|I0wO|r|=_gmA|go2RMA2CyEB(-k3TUoT27Z@JI)3wX|1gI@x>kfQsIyyo# zEf+=3fpuXkOkKtIR_fl$~Y+bI>{NeJUnH$wR=$v z#duI93yNO`sXOx@MyFRErDxSAa{~hds~=h2aTXRjL&bk;43bTHLRdnnQnvd7&B5*b zTYzS7?SQscK-qphMUVf)Wgg3UNB57VlSj>PZ#5dDY!lsN%D^nM9z zP2p|7Trbw-O5=c`IQ>KvCMLU!YG%B@>Eh`c;A?F(Cw6c>u9CvwgICMJh??)|Zuf@; zXt^($Qa*z;>5x zS+KgPd#sWE6zkzUvt}l6(98FW!~n+DuE&sb8o z^@H_yMFGss_y+rY42ZnTJ|ab+inaFTBTxrLJIhev32C$&B{pMh9tRT%Yz2W11(CEn zQw5O5d`ru~-L5Hst>jLTvL8tI&O~qhl;0x!#5mMJeY##{`-d{qAMw4|+~Urj$PYK& zWAZuSz*o8nU3K65TN;8sCN@mAuKk6N>qOzPPl6nZ;K8h+6n|d-x->X?(m3AbK^Kf% zv%^qyc4V*;arO2|r*XT*lF34tW0!)9KP_M~(kl5dS#2KW_^s0SWlHE7Y~PRhWvZ~1 z)$|6a55;DpP>>5|K(~toA1xA^HXoNTv+Xv^5VEiMDj&4AHVz9t$U!MxLBut-Ie$s} zCo~upy4ny|tg62FwgCnYsKx88<%SIO8Ih6bxB`RtbZaN-L`~(rWP?lq>sBxGxHf@dseSWHJ@>z zec%*F9#>?)6X!_`G?Eo5iSzKm#{uJ~j-nQ`O@i}NM&AfsoC)KR1J+ygH;A~=%v&#Y1QKm2JUrJ%^QEci%sK5`ux>THESOhIaFh?_BIe%ED5$ zWU82|l&AyIFP>!Q@dW$s_lvvt^4EXK&29I{ksnh#IB$BBnj#QpIsb!B5NWKabH$xA zIlIf6!&#}_bM&`S`4wlIvuV%rVT3#5rUkIsopC|{mR<4eM>UAe#(Wqr8&I}DA9@Nz z&TA(vVk;o$Gg=uBlfztJ`n5fL#XrW#ZNvu>kbAk#hlzkg7dWv(hmH!7>UVi`n_6IH&-;_gwUixz)B(@11Rq&bQ&YL;}O3k7<6(7|%e6vxsuf6!IpD8_$uvibGqNLjs_H?!u3Sl@)O?0%o9FA9ulIcva+ zc0SV*MYSBh{Ia%n%1o5-2*K?ox$<652(aR6Bf4Vg2ulw&x7f1iUJPc(Bj?N3KwXbK ztWjsX72}?i!LOxh!z+ATe-%qh3O9TiRl#)ziu_(wwRhc(p zOvSvJJOva*deJxFGDCxr_%ktke#TUy|9;MB?5p`JhL~5DI>`OZ-Vod`=G7;P*mw(; z)BLtpB_*2lrKA)KoJ7oJkt3W;W<7r9r{a6*j2y*Y1d7mqfX5$=h=9YNUphNVEJ62w zC9)*u<*^=Y&rZH18L5y?(#o7tI>qRHQ*WPZ^wcpid3Dm1d7%EmVGq8_@hXL1XSm19 zk4w$81+vNnr@?G&Y#^jHtuNR6?I6H}cGDrJT~*1M_Z=v^q*bWpBQI1C&j@EMo8jl& zpsqh2C!c|=@6cuPRPJVpJ@_NchrnWB{QS#6H!4Ti%18Ih%ShHtT^V*O3TZk26xX?i zVV@8ta)A1=@gNlL`~4u3vbD8^4Lv36$`H13JDh&G)P_faBQ)4wY=jVDn>U2FsyQ=KTTVXWF`#jkiP6ldUrQ_k$#pgO8O?-pxkOScaUTc|BZ} zDgncqh5MCjojNyQy%fE6jc)VVo-v^5lw8fQNy^G9^FLI3^VrV`au(=Fc8e&WfrP{b z+9I)h=1$Cm5C>?i!LraZV@R;HMewm*ru#4YG^N)%3wXPJD<(jPuE5;{sigZ0^FV`T z$pozLN3O*JSVsD)0+mF5RGqq= zCT9)qOYfCbcjOhHF2)MBY7b+LMP7|@U(%)Dy}eH(Ntt%wv4X!|hh*jz7Za1v3EbdD zMx=LVy-CiIjRN}X-2ReaKagqS^eA*GP&~=@SdwzU~2N%oQu;!I|Zy&@NNDQ2+O*J zG-bT(W@+{NA?dS&(GNo(fRVw{b6B_f^H90O0UNjP1Y?cwcJFUmUfjMonS|(@%E<8^ ziHnObU82c!u*4sLIhRjxdXxVtuY9g8=P(x&;K+l#*h)mx7|&Nyh3~0t{2Q<4@E`LB z7d9p6X$lNP{hJ<~C!dbU57eYU0P5#C;%D0PmY6+inAf;<@vi%V}Io%4&vWHVP zui%TLTCxUfg2NPVLZ+-Et!HW+(x`+(nC{c-t;sMMDKV(mSX|6*8k7jJRitV;wrF$Z zT@`WNJyq|xyROCTH`vnmi=~sWxLiyZF0}NHMp{f(STHzaj+Xeb<4@}u&15Q^L%^<( z6EXX`?+dNHOZ`56^~vwef^h+d99*-63|WMIQ(@Yti$$nlB)x-P;N1?f^@E`zeJx z5{$91tEzm$idj3j%|wU&{DRLU=S6Ga?HzCQ?fdn0C#P9em@D#1lp5M3m1IIs^u^~# zW<*zW{nsYH#0p;wu?x5;QZd7^OZ@R@a5?Fz+oZWqZhk)dvUJ*Epc13|+l_r)t;Z&`768Gb=(mWXO_63zePp<7XS=u; zOy%ZQ*w%HIErJRV(2or~@mfpf5j5z3@9K2o_&1{)dERsejmU3D2=Y#zD9t<|Km_KY zWjbcWXUgN@jwGwKe)3@P2MbJDJ`f8&#B4RFX7m13-3whg`Lj_Qyk_>ad(XlmS%Rko zpSZ4F1IX!zZ?2qXotDAC;x3(G7P`lcEX$ZczfR&053Wys10qvEj> z?Iin_n&{1mvd=3eb|UhpN075^S1OsT=Gt1Ckn{Zpy0i@| zIe9`9@U@ig$~g#w;g=}j@kHi-Pgy{CSR11Ke{%bOHXNH(X`ci*GP*(YdH{=P4)svX znb^80K_C~6RH7;lI1bcd%-g0mj`rJMosRj9cdipy-5r-!Maiqgzjkcc{l&m< z#DzkX*1XPFa));(+k=U6kTap*>I_*Hq!s>)pQSPj7Y<~>v-2Zc`ZJV|vx3IHMJjb* znYfHLF?hg;%u%NtGyW6kmE`V+*cb`~>xc=#+*ND694cNIjS;GjVyq^OP;w(ZEPl%r z>Nzq@5fKsAS(1gxb)Me3&@LGv&sFvDU@|240b;ylcxZ@xgqg_~_U4xGb9wEGWVnCG z$!Ly#&V{@}6Q~6kG4Iz-VjgOVI~97Ji6eig14Un%+}_R(eGWD!1v{&0p2^RBuAQV(RZ}w{ zJ_jlTF-wVZplpW>pn$jmQFn%RSoZnlZyKb?h*z26<=-T(a8Qx3I z7ew58rh=nE+{&qo(pbdsO-#0y2`j7s0^e2Q@t_R0lv3wS3p(mCaqCd^ORd12cEaMB zfYO+iX*t$C5sEw|PR{(H1)y6)7^wT)V3wMCNwhl?U_x>&_Fz|b{Y&^0=*#Gn3&4)m zOXW1AqS{r})6;8nA{y=oiUD#9q)g$%Lv@vOW#Y&D$~q{yg4Oyu?Xkc8iR4@oJx=C4 zcad}xPVMDs-@cg6Is~D&lfA&c7+2?)O%^|?1MzD>)Gt?_zt;JhT35|Q~ zJl~h(zA;nl_#1mq;ra?%cSy^L4!)puUjTCI@xcALg17%Qz><8JnbMYgFix0PS4+Pz zCi374?jOc{*A&;Y);+r?R#3v$1{4EsS4ak$4Afq%3L=7-C-YHHsT?L15#WlQ>9i2} zg?<0W^_f0?3uh9wv)bdhgTyM`n3~q`n>rOC>LCm3KBDsWF%XNHE)HC{WqQvUw5*1;RE1TkW~OZ*qi8Kf9K2Q`qF!n zwfi8z$*Gf4RPib=F8*;a1iS_@$!f4upphw`qDMe6LMdGzYfHokbo$)I{HlMxK9t)4 zlsR`gskDmjz17TOwRDg%TR!8EJ}vv*|GPtuLpmI)%iZcV3b(pCxa9#KNBA?Mbq_{H~YXLrZ3Cwu0L)iKBP`dDhEW}rgQ!AqGQbqWw9!w;x!)(;rAtnoWKr-1Mz1E54eRaF%ptWPIs z;q)v+TYhh+qx_bMeMR--`zEs=E2h;a>O})EFI<Hl8mqljxp*$ z{`n@apEO2h6dm*qO7fa{I%Z~!`qUN(@~ESPYa+vYKgnc^dQNuZs6*cOTo@ko^ZmkM zD%(`=Y%anGO`7W`{%6OL<0+(U7}>ES$u!d_T%8=dfa?CK?s_=TjU>hMMWYv!I5z9x zT-Z+=TDYG5!sD)NOp0&ff4u;rBy?zLZ3D9U=YLW-j2k@)oGpWjc#cfo-DG$tz7RNC zGoLjEB6n?~rFhLMwbuVRa!~fdOhdUZTbhAE+~vSjEc-j)mm8qBMPBsXqNbry*pA9) z$~~GX@C>uP*~vEYZU?Z~tLLiwYF=lKI@UN~yDN!TCALb)YUmawox7oRv?s_bY)CyS z&}J~`)%qm02~cEf*IAjxqKe0m!ABM7)&S=HiZIlO{t)K7KWeY6$W>o~g?2NvJf+oJ zdft=RU7v0fvCq%Xe+2M)Foj!l@`)Qc*)dSf4F~?Ap1BO{(^owL*$l9~N3Hca(m6waq*o|xFy{ELd%%t_Uk@n{dZrtTjcBeriF#E= zJ{fbSTvsPbgC5cn@SWHO-om_1%Ds``(ioA}%>}iK4wIap86Z|UZiR)B?3`>?EH$oW z>pihJ9Ziw@g3*b!r?3`CB{x)_2Q>nsLF;HaRZIWDNox?cIQXD2(<5fDv^zH4?DSA% z_l~UHj}GGRZGsA@=W;JzaFWU#S+(r-@cAxA(Cq+i93?dXNLdyeYBt#A9kvqH_LS|a;(gnmw9C~xIWRTe5Ya`GdiT=nZ zTR>A(BlH3OEhU#-O!$vU3K9IW5hSPG`+*AWvg9+)IlDRA(&ak8{TC081s1CJj@bx& z?cipdVI;)FV-Ho|eT*}I_Kg0r(Z*#jK#0ILy85?7YN?V$`kr`|^Q;)UzS46|b8Kv^ z2=XB&hL}b_8A90jXCd57@K59Zsrm09iG{cL;kM&F0a`M#9Liid-INWfm|U|-wMtE> zV>TuSA0dSjAADIAn7wnt3CE9{>qRZ4EE&H+Da--2oy4Q@qm_QP?Z0m)wxdfhGHc=z zpd6yGADMyb0^4*uN6=ubGg>JU^aT3nz%m+FZOr1_*bZ#SgWe0gW9|CZEKqqcxG`FY zCYhIi0balhVQkj#x2$vexZ)AXfBWzfn>8Mn#RJ-*5W>byOxm34e1* zVJ{VxI+x3`xny3YV<@KLBK@5n%}#hKdg3#syDN2VVI>#-tLI0X6_8)0&#K4X=Gz8F zgxiK?`NE?!r2Kw-Vow`vw>^_gC2>BsNZX}qo&{VFCUZz!ZXlsr+6Q&R-nXS@zp2)e zqcap@X%{a|6#)D$d%z^>ZW4(x1|19|P?x3p?zT60OT2t3orI=!PC63Th*FZHQ+Ke-cet2?pm zjfusP9ttQl@YHDe&zsHH(!d0>BH8 zD-UgwCZ=!g>PWIDD$+8H)o-ml0S*S!Hmm!rbNyn|X1S+RQ1Ws+Axme@`2kmV9eGbx z_s^r=PE`Fua0#c|r5C5 zzf_G)eo3XZ8>59wQU6ENS#U-9yRcCIu48Le(CqO;r?{7NHnb?<_fE z39_z!+j<8Ss}`1=l9eCkx_vhxq@x`0k`J5{a?s;G?06FU4Xf4+fe2b&g1{*7v?0Ga zY&o5{<-(s6U`BKQau4tS*?gn4{&K(}59HFbM)kS}%WBhQ3PQ%_{uG77m5IH{D!N*2 zk`+|lD!s;*+vBKN2I?|C$0m5nSzRovRkgTDdmfzl;jtJ~TQKM;L;l2#nfD@~zVHQL zR__AoEV)@)_T9npNasf`G{i~7txQe-Z4sGm>RM%FF{ppBKaHXuoTP@;&sI$>I_O-M zjjr~SzQ;bldNfoh?CnuIB0hHso3ai5WgHU>>}ofYUUEe19*;Q@Tq?yCn`w?8x`SHr zjU(~!?G73@LE>zp|N64+vE16=j^n3{s%pHDYun9a+;Ls@kY~` zlXmk6oWv@uj||RuRw0UI@+2Y9>2#6{>8Gjs4Ny!b%#Pm+EaZ(ngwcPUtblSK^f3*Q zaTk{#+1eBmE_&xI63b3}ukFPgnfvp@_yK-D@$Y&6{hwKPGQCgKA79P8E_s<-TVpjF zctPlLWVyaD+(7zh6u6rAc%5SzrB1)b^y|JVT}KVLQa4S~c&l8M=80!d!xBI%Ku_40 zj4i4B--r2{x_3O^<>F;~;LTzahdef6eG$fSd;Oc2VE3p*y0<7yFAyq(QqH2{=fvrA zmFxBlr=j&jA5zRRilN_hzxEM|JzV9c^2FA7g$;(?E3~Y3(}cyy*9Co|G%eEyBWVK} zV{r-t@|TU&Wp1DAi150&bt$G#nQp7-iLG?gRdL}oHYPM*;Me~p)^UsV3@HH#?M-G) z>@8#ScyJ#E+D1ReiVywv9+=o#iBJ`?Fj1|t1Dp};x9uNa7&LkC$o^_{eA z%?Fj$ygGkkL`?aYMeX(F1{^wnXZ5t9tuC{S9wmximw^m?5E2G?R>Gf{Bpjg;^og&O zk5?lm*+1G(e4^XFJon)6N9>dt7Mr-ihN$K zRenb(Q`^Jg<_Cl3l;ldJ6I^$=E6rlpHN4O8L=Qo5^HyF6&-gIcj6|YszWiTQ~QI zK9xI--8!(MJe%kjf&{Mf_A)Q_-rLEl?_+YeeB5tKyZnf;%Y+Fj=>EV`+@JBhKg^e? zg*cWw1ro<7Pe57aKks$9KqlthQq9^!$s5Z`uE)FG&f+cmb<`48-f)Q6-gK$$|8a7{ z7X*qx9^U3^#|W4d%*acc6U|OJ-b=iV+gfB=6O#@x`40JR!{o;SCm~@)NjcV4(D3_v z@ZZzqJ5kYPiH$LKAyAI-R~?1!J#&bUunODJa{uxK`O;PrR*&-|`{aN%Be3Z5N2@z- ziS|W)DKAVZj!Le%nVto^pD+XnBZk1_^8wYK@>$B{Zi#o;v`h-jca3Myt7U&kO}C zna1E0F(2<2d$B)dM@1|$NpbaU2*_9yTmdsynv@$BY!WHkE6eLcE&Z%AYC8PZXmrNm z)h8_jQ}EbP+tKvf3w%QbUkbRU4pTV!2ePPb$C}RIca3sKMm!?qQ86>Z@Q;3 zsd;_7agVW(;<5R}vSih!D`R-yt{r_Ka~kIC%(_!qLv*0u=J&htXMR5By#|hXW&O9? z)1^|7J`s-#H9nWPFWge6H)q>~M@1$MzKMEW@2rvIUT+ggvtIotOOaH!>zmQ?RWWVB zv1VT+BB7F6A`Es_?*9HAc94jid^ZK7O--Jx885AtiEZ1xOLw#hSZCzy@u{)m%1x)F zU*pF*vILPo9!L8y{C=rLN1nGD5!KgeZDJt33p|I-h9bIEq&^xSPnG{Vq0ipgi?NVL zmGw*p*PaU_5)FZLB8|3{FuO9~9#1~E-4Qqq)mE6X&=ehOj0${$&iqf5;=A2QRm9Qj9Rwrm^rqh_*ia{0<9r%tFh}uj3lW>7CMeWQF{~3hrl;YM~ zECv^7pOc%e*Kc|xGw>3O#$PxLbh8$w`GbK}YA@785*n_{XxUNw5udxsz#bz~PSUSz0M| z06illlE_o3yY2TY`h;J%q&2)WFr#ZM5o@H{~t@-ADU-<0?K zDJUpdH%+XFA&SL>kY_U@eAx2&I+eN+a&W&%`+0JX>l?pSrry%0p->Syw!D8~HMNS; z-Z$IKPpPe_$Mi4r4ft)h3x_HPxGO*jlnXCv^)!>~&WN=tl8h-%@*=40ceblWX*3~_ z6MBuOpmw%Me}Kv`^vMQKsTA9w)fofsWgy}SZEYp;9DZ<1!fW%Y#G&GhR)DrbWeA8Z za(SJ1CSor2a|ZmvTjhkC)QVTxh>PdBjgJ?E0_ybK2yh*wHPOmpJb>MYx z*h#5=&M~|Bm-^9wSeW0GHN&+)D)fN$1U*cN8qDy0x$*T2E;@r!*$kH4$eRRI+j|bP zrpfu%-TKj?a*gnK|KUJr90<+Di5!(H!$ikhL#gxsxsQ@aS8s2KPfeT{=&VQ zw|S|AU7%iGUY$|Ow0SwzXW#W&Vf<5-29->_d8NK4>_EiEUF(fhjc0)jq5|j_EXlNd ze0+H}a*IEV52el=>6-dviNhj@O42-L%Cv5*&;_$AHZL9iqs+=%^c?{N&*3bX4 zT@M#ukQ2Pjv#BEz#y-D!J#dVF={FS1jJEsM;@Z;n-s>FJfty|wqR~xxwUJHu;9FI> zh)Sr8TmOvAULh6}6-DX#>r8+OhhFn0GiG_%bj2fMr$mN;a$?wd=|EX~L5Y_DwPz z&Ry6reXf8}(BBX#d8q?bQ1?L@|+75ysLlT zZKDE2!o(Q1Xtk1ztKu&y>0-sT*W~Ui2tT~>TiaFQHt+VTd^1<|W2P)}9+lwSywVr9 z{Eh4NfNPt%C>B)YtaEXLd-FW`P>?)@j4TT|1DZ=HvM@leUly1VD%-dPo{JCy_v*5< zsD&1HJ~lQsH8D?iK%8>0S1=Q-_9bLIj#+6jaqLYH#B8|&aK33EsE9dIt@|FRh1uHY zpK8C4(~}0Nd{U}L#FGF*|KeKpp$5oHyi?Wre(n-5H(DON?&X@;h8-n9%>K(fk0ak0h>BZ_!Q$kMqhosR zb1>iN2DY{r%@=r;UPjr@GFfgiOycWzgpTm5Rm{mlgQge9&0nDy7R|6YI}SLSTVnioaE5tA^C){O9#*7e2e?^^)3!iaY?v zOEKElEQ^(MNuiB4U6Ey#eG71gM0D0!GF~mnv@8Kp?4eT13(~(CCMP}B?dgj8a#LAn z%*+e~p|x^3mt&CVs}Vv9`%EH~l)(ZKb0-h`HPEOby}9Sw+_W#EvXRfo_>x?I_TA^z&^{B)J%RY20W* zcC@*j5^XBv@bctUVyh98b&sQ1L%Z7v6FwMv+$|=x7RExkw#gNTn338;@#)owq<4Zs z4Kyvi&0F_R^k_4&PxR3(v^lo5en3GPhdTA~P>RiqF3kGdmzt-nh3dxvgQ# z%ngLL@yr)VmBq;Zd6LXR(LHb-VmVloRqW5eXQsh<_)@&J?y7V_>8Xa9>i8j`d^)>ODT!=-y9GEfa#5 z7>op*_!?n%!UIyIdPMj@-b-ViokAwn&0^XEkfyhKat}t>oC5b1R5~Y+_n4Y9Pe01U zD6|OISp}MN!z;*LufWxa6hX?Z;ls`=hmQ??7A^&4%BVSfU!@luFryueL$x!5-+v`* zU#BLWvov0)q*ZaMJnqHs$TUe$TSQ5s+^_h zcJU`(1HBHNI^%E|w(oy`s?1ud&%=O^5!l< zQejjaV>bwL&;!|Ux)rX{mhzDN$mGQgQo3C@!dR#Oeye`Aecb^eKFN}B zGdONU2_JmSSsajGcF<$!z-G>Sg)ZHKOltRsf7cW>T`aL9h z-wXrBdI-|%67!WH!E(*Lek56cd=447s0q%m5izHrG8^h`|~=k#OI%u*$PMj_YEepd);L@jGD6u1?<@#J~})S zc^8+{Zr5qZQnrYva5XJpRU2mTbuRk6hexZ`zg8KEPmmmQVziQlgv1%|CD10GzW}Nh z+YQsK5jpu({Kf~`B;Swl`aHJQ_)1B z{g;(!kg7sxbx$(VClg}>jHNItrm_C>AR=j|;{d0-7;Oq(R?>|G@hZVVQONq=$wGM4AjgZCIQWKxk zCa%#d`!bWr21;@54Gs>fF&K=wcS?zVWC)KjSN3oh8${j)V<_o(Du#n!okI_Xx}6dP zb+vE;@-rt+ds~0z<2TfwoIfLAExO}XwwjnQ zvO(k-`5Tr9`n)y=;4BUOfFa+zN%MFm zmDU0b8g7%j~7)^_jr zmv~!3P$dX}DD_c7o0Vci9)dX)^ee!eX%ydNOa;UZGFw>15}a!@Nc~|`keV79nKQ0R zp9zNnurMi|v$s#yPbm&vZjwkT4@#e#D8$vNA3h}q6^sisMnHSzPR?MlA-4DIYxcbP|D3%^?s_0U?$8Q~giVBL1*1$=_(FZHgfHZx8*2fcv zO)2P*;WuSNetN4I9)PFn<4<1#1Oh)+(p-Yr7|fz4!8^1Ykc}^PORo=_57~TFXiYqL zNA2eA`-<;lB?XNo@O+vI-^aRbnKo0n+Dhr|!e2?{7Kudc#F~h=Oc6wWH3_dG z9~*q16BGww)9{t0t>cvw!7gxZ{uG_o{Q-Z>oPLRrSCFeyG}hyzLfyJe{Q^nVc|@f? z-0brLT(g38{YJ(gX$%=cAUo;3UEN-1TD1a(sLGq~UwfQ`^6rNkGvC-7&-4)ecH6I= zwgW`9jL8DJJagDQv&$h5c5^DF&GpW2U<`v|si?}uufP;tH`DgHmYWbI58sx;kRwg5 zY5d6TzyRS^wme3YW6&Er%9{-FtWt=4hJQU)F8*J=DalC~bB5ioC3*m?iS9A5ONiDm zv?@dBfTY}Z^CO|t6{u0_KYys8yRLKwm3#}!=ckj+ysKDfo=k?HU10b0Q7Gmu5zZ#3 zn7R+^D5s`}{CjPq_slH!yu(mHrP2Vf7z01rMCb;5p|+-yyd<5A0cdwm3aqik3h(Ik zRt}dXy?RE^ zmgR)Ta7H9AD=A4pAlusLHFW8*nBw|x0W`^{FQhEYwr=8nZ#KP2O4$_IeA{QMMjFs3 zL9pTH!%aVB`Rx2q)(8i&A#UxbmW2^_M85Es6H?s9rEp< zQC!DBc82lora|D;6^Qsyc@McVJ!QGwoPzwqhW#1Hi#>lB0tey5z)zg;KKkP8}mps0vSN!bV zXePzw_Av$jh-*Wta8$Hcat=d2M+r(#)XU$Y**K(7_Z8v|BN*&Dq>v07EqA@2clq2^ zU-|zmKwkW6vnsapxqjvA{27sN#Ue4=r)xBiZ&YrBz80vU__iLgYlNT6*dqosZtSry zsJAy>S2Q1BnA+0qvCpowW2X7U+N+j?#8O(9X^P?zNorUoh*L_FS%ye_+R#*&w+Ztg zv)Qn+F(W;fsINQiVv4*@R~B^0#Js7>?Y#q584HYD){G-x9d+z{c~F-F?^2&NRrsJG zcZ+z3bXx*1qECV3-214ND|woBrwaEV>V_fb0=c3%P}4D4O%Vf5NtKdNC8p7xk$bMl zAJC7L0`tOaO#t^+5miq8Jqf5(T}iz zElmRx3^*Z>1}+qbDkFM&T$zVcC9Q2V-Sk@5tWv#S2n`gzr0asF0Y!$7=$HcONI?Qg z%zWeqySE?+z{$W%K`1EUDfPD?#>GSF0zNU-74OTdplZ0~Gsftn-)IyGe4R9*=(t!F zAOKsQL#0O~_(YF|Ee@yhM~JDDh|nE0%a~fpo4%_S5k8^zYl3mZm`{AJM}!LFY9qFk z!|-*lO^Vl(Ot7K;20=Yd^#w%Q z&d#D~9Cmj9QHdd86I*FwPgIblOKY#knLhK}HyNYSsOuo{Qzv5ARwA~v*$gp?yw=&u zYSm)&bDw|x*P1?UJp`AA%d&?SenciR8fzScfJT;0=Z_Hm>>JjKF{1O_m^1bGb-7KS zA%gag1*h@%tDVU7u|-nc#MZ8OoMFIqlX^L_%)h*X#6{gQ$Z41MvPgyF%^&^y1O*CD z*3NIk!!yG| z92DclG?*WiR0f6y_jf--RzXeX(1JjwK&=lR>ptF$xiD%~#4X-IKMvU|^O#V5;5@02 z4Q4Os#TZh_K5`M?#at0zm_YAZy9H3*VIU*20rfQvug7jc#>ef9&-sNcE1imx6xz9Q z!SCcG+$oomRk4{SSev5K=P;Ql7SiX!TjPTMw8g?d0I@0`q8 zQTFJ+c7o6k=r~-7`T-wN^5qH#S@dgc}CZ>Lr zMIduSch_h;Y+pur_?b)U@bO=A7XK;1JC76MWct;y%EhcpR^cx7a;O7@o4KK73mf$@ zHEz(wW5!+^FzmRbp%_S$H5eMuohaktW`~nV8~I_T8Q-od@D%y&bD)@rWy!SaP%@I8 zZxCL6BP|dhyX)vdgNj}ZaMEX|{pX&q7?I?C{UU$&vwgV5%i+1(aOqtUXlu(-(x2B8 zpUId!mnR8o8ZP6LqL<#jiGBPh(8fCS(0Qbk`jBn?9FACYA8b^rI;0l-{7}?)t#!Ae zSz{x%XOkEqDNFBQ#rn@G+{zJP@yJ{U*>q$Q(dM&rNwSPOe~A&tPCio`qgm`HzGmr+ zz6Crh{@e+Ez6=Ha*oaZJpM)<@P%9Frq`X`#G^s=Mc29MpMYj1;ehvCMAX$zONyMA% z5g?9egp8gacvw~$_7vZ);^+@xTX}Sl<)VKl`>a@e4^l`7yp(S z853>i5%Q7nGa6}mOS-IgLqLq8`=b;3h01wC84~1ku{XP#))m7-6IiJflLpj&_4Jp1 z4$rZgP|-yr6sMWK?t1E?@(J-X9k7FjHQdlSr7e+e)lfsClCf24=H@3cil5Xd?XA0m zT!AMD9cR-=zpdW7R?bb8ZAdoF(tI^Yu}3Q(C|YCgwNAVcEpH{BY}LMUDS1zG#=b-tz^BB%Mqhs}4CE%KQr_V~}bvb1;Wqqpp;?qA!}1)lXv z1l4_aG~Age&IrJ?Z9ON^a~hS>8qN+}iT1gp7Xfqs0cO;vr@v-(b#Ql8I7Mz;)8_r` z5SKaTd;xAjZMukdrf}uI9ryO)Yb;f2o~&@8GumVJgX89CmkQo)}`XI5{**X=;Qdf8SUBb76p)%FOGt*;go0B1TV(yQqS5#5G)NXU1 zWp7`CQAv1KkH$;V1RmCaqX;&hjv#>!0CBLPR4EDXzlyEEc}&+v2^f;_=Mt+GAKBpG znLPdc;@1TAQK+OqG%dQ&ZbA5f>b$n;L%)wh;jWQSdqR)CB^o&mw`j{T$PlhN=1{If|$Mrr! zY}&8;`?iof*cjTHN#xnK&NnL|I5ZNRkyu*l%Jb`#e&>9eKirDrfV_Yp5dY-~CQ2B{ zY2K8$KOQ!2Qm>r)&sm*~h2WFOUsy(F^?2nEar-|n=$E+saQPWJFu_zCGfALsDL*gJ zy&4-eRVqI38toxmM^A+l`-{c5$zx(ms+iD6=|2I2C*;>4Y$1B2f5 ztTdIyVUcIT3u~hTRD3K1svr_3SzIXlo7!{aLDZ2xvU2Ma!HUu>Mm!XBJUJOLZw}Pf zt12Am@JqXAh^@5HR;0{H&iZ*3v^zn^&6{|b`bgy>w}XD@t!J6_kU_~R^S$U2Q4p9U z4h2JR_I#XS>*O=wWPQ=(dPto@AK|$3D}`?xm;{mSQ&*os7l;!W21bC+Ad{@?fS&F! z$eOeJo}aL$fkw#5T%86tF)O$1i;YK01!BbQi0nHAb$F35Ell=ZE1Tha8Ft23wg__* z6YD7$Vs^{oLSSMVto!*U1!&1c;uxZ7llx%S7OvRdy>7?F&cVk!1^atbXbG*bAt3nB z)MgZTWF3HLJa%N%g^|G=?tIQ@iZ}BwNXtlc%C(8|6S9G(lFRi0JMvQaw2qRo&#ooA zMYz#7NP9c);Glnn2q$xcw4Wen_fn#!z8t_3%I3V#g;Kv_wQD1AdIkjch;#)?tG?GW z(oij7*KPBdZfy3azsFWKsek2$hV-qLRvn&@^u@bf$mm4FB4#WK-=#1P*^Q`uMPTb5 z2hoI-mhgE1{`H46#@f15LED(}{kuOpvSJJQ<&6a&RwgGm3zp3y&hJ>SPUO6N&NnHD z-nCjD`2FHhw8+Z125@O^>*Zo|Ni@TsJ|0ggn~Me4CB4KyljY^*eq7x0T9AAdS&ej> zCd+22U{+Lq#)I1lj0K21lPAIwtGT#wguX*BOO^KKyCP)B4r$=clId`_1iaxa*q+O73M?qG;K3Qg^>+T ziOwlD!eN45+#iO_$aLgR?x*{WEdE6(UN>iDnAcZZF z#9?(2;dr}SG6@tYb&3#B^P@~&X7x5SPa>!+wB$4v+1yUm!x>+k50PKLi@XJEHd#Yy znL*6+t*Ks|zgr2_hbZ#cb}ygD-p6O&1!j0%%vK+QSg(B_Rr0xh5<9XOMvgvjnJ)oy zN>XNXo{H*`qm&mN>s3^aKG%5mqCVj&93~%17K~1VHTHN>-$uMi@7AIwZ0`CR2+b=s zhlLUE%Rg3p9kgU*`)?FWHQ?2|`Dw*3iu_UsyT&hfG&c|CBIeduRSO#6Pm+JcsCSJC zCJf3?v(@Omou91Xv`$RF+F)lFzDWPv0l;(^d@Y_?znU=I}Y(S2-MO?}^ zHOzXA)%8g+T}VCcb7jM7c}p z4a-$v9(j#2kq!U$@t*l@GS0=Gpx_5?iH@}mVk4b_NV(o-qU9OQagnKK{`)ra3D@3$uT2zjCwH{14iDD5TTH*7{;Fz- zt2s1d*X6^TmB6whZ$@1i%}V|EAVXBLW&Re0|0yRvtzT_-a1R((z0E!=@Ul-@vl4-w zWr5<7tYw?+tu4IROAzoLPt??tda8XX%IseDTN=FuysZ{i+SAa4oPVKig#qWS666#T zZ}3}dXmI9a>^a5~MHX|_2T8*FR8?g^o-SU|_5gzco{639s4tRIMtHv_ zes`A)$(~?aHl=Gh@Vg(5_L5EWejT!H-&bk%GhDQkj3_-_sXHP*uMKK9%aUEHUKrrW#a z)ADV|U$&REzrd!r4fxR3H%qxrSUGJJf@@#-pazdcfJV`AgG^$0^O2u+i2NaEdX>7e zRac=Ab?AGuQ@o-z)|5d~SC+qjYrQIj&2{pGi3ow%GF0a++E^GNbNHDJ>oeLtcJRhBeRn@83^z>+ z*nN3hXzsUUoIqe{6cA)V*X$^jq@3Dn{2o3aC_WA>Op<|At1B|(0gxk;dTqdo=S0p_ z(FG2kpjvP;hrCi#TBq(ceAKV?y&!)ReB(j38*B8o`g}@f^DRxH+5}jYGNVLqw!#~` z06O|Fr;_W}R+_J)C|97}EkG~zM|kn+So3=Ko`g@HB}c34fQg?fxd_hA&X9Kq0$4eQ zF{r!mOYm|M8cNKP9>meFYDY?9_iCY@3E}U* z@z6QQZ1`hx;UR7*({J_Pw>_L!_V0gwiFAM41oBfboOM1dRf1hhHV3hWD(1<)Rrko9 zoNhw)f1_da^|`R$Z7e-<3#`N~^Wt}a!>+9X&uf$P@DM@^8m;G3u?*dyLU~t^VX$4W zx#-n$fJJvGe$LzH!`ZI3@L21-vBwo$pR6wnox~12dQhJ$5WO|XvxdA|0fxYU!>KmiHn_MIWi);O|y%|~dzuz%KC*li>1P(140}y-imd&I#63>Mvch??;#)>M- z|LKEvQNO;jRP>up98)$B({;3t9N^JIho0MW_3ne-DWZTyys`cx$wT*qT!}kPf`)%< zFDPnVe7^i^>G5}bDSzs(|E19#R68N+(RYh<@nq9ME)z9F5d4N#j3OsD+66rp9AF*` zyz>8rTj3KD3rz7_O@+$7{xFmHoRr^=NVcq@P3l-!uIoc?50;J)_!@cbkZX zfAH8Mw5LXc^BjuEn7(OeO$Pqr^i%B4$o z#YNg3^;`mTj#GXU;h@_^Ja zj6_Z+@%)fJ*hum-ws%hp=ikz=$_xHBrYbn_`wRNBGfkTDUXzf++T#0JcI*!mJeBWo z@8tcrsC0KSJq`q8o1&Va9t*)-Dk65LSvMa$qm7=u+M%M^72%weL3oV0*B9tQDcDQT z4T#TEYh{OtJa?V%UqiQdRwkZTvWMPfjh{TFN1hunbxlyCbmmDyNTGI%j~hdSEA>w8V&9AJrC9k;u%FdFxLi(MGN4IG^Shyj5l`SXoJ@uxg<#hzH7-?xE-l(KJ z=2{`4$aBBwhL%CiENStslE`_&7Sym$DUV_w9lU!TU?3*I&bF5hn$VkgmfuxsTYt_@ z7@MmY*&dJ$N!X`K-};qjanM`Inx7V`U}3YVrCC{1_mg|NxI*-Hc8|!DJ=L~VsAaJg z`J-moV#rFu9oW+Nj!bzK%L5R$!L+R7yv5E~9ejr!)R*=K~fHp7DR6^+J3_Jeoy7@#rpm&SJb^6r54A zpg71V4|xvcc7g;D(h_Z(wwdtr?G)c}NKYj5h%91O!nV)T%g<(eCmqIL=pA9=pF>M% z>l>G?aEzDukPU{u`)9Utc+I`v`PmG6BXww%o^>mS=u{qBt?;G`N|jJY*M*65$Ku#{ zfARMnzo-Yu$3Vukxe0uQeF!In5}v;G@aH6>w`4kAh$q^8l^ZNjB_l+`%HIa&*{yDEiVI+#yT@s$P`9P+7r}`-4zJul4Vl8oWN2VeQ2^(7FszR%-US1bx5FBvsfWty%Si3 zwCu;6e!1w@JqwOfrJ=Rkw)lDZQx+MSsZrmn09zb{JyT2d${g@_|3br<;xNYe~0)7vFZxV z50PG)L^oH9NP5iCvTLBAj`9lG@FT4-TDlZjfi@Z6e_!op#6a3ANX~j>`Hq57z}=*t z%Dx*vM~z!HX7X}Ofjh{Ui2`Nj(dY4Y$&Z_ST0I|6`jSp}6Prrt)zJp8BC>DB5^be# z65i;Qwj1f|-O=csnr}s;<;dD9>5xRDeDBq^S!Ks0|4rg?5~f6kdk6dK8M0J zmvm}+OEnBJ+yK`z2DrsN9W@jY5w5+H+76u&>wFzjwjDYAiPZf8S`c2ZyLRM7OxRp4 zYP9{)tK6KEl7U1``ZU*N4Fy&rSst>_#*Jf>MLjH|iiOU*yi^qZnCt^OFF>Hpi{a0v z#B-W&MSdjy5)mGEuZ@oIMILf-<}T9#Q|f2S=ue+t*U=?NK%RIMf))<2$gmHxSgME| zJzV{s2l=%pQIa#Zi}Y8pKk~<>yRc!*hp8zIWhH7;yBu+B8rToV8&y+@1Zt5eT+qZF z+o}7f307~hoE8xerz9)7ZE-JGn0wneHEC6sdL^lPRBj=sC4b>?IyqU&dRH<-qqP)4 zpv|#)<P)+x{K?O8s2sW^wxv#(*&>+`ym?%~FQ8jY&ee!@+7(y<8=LZ$F)&ae^PF2&cS2duw+ zwHnX?X}c;8yp30E4C--(pHkg` zC?b2EkByBD1pDbnTR!ueXB0U_l`K$_;Szk*6{#+|5|q(WlZH6^4qaPY2+MlDOYkD6 zKkc;ZPhG!4puqi|4r)@Q;y5i@u9B#BvBtZ4;^2V*SWwEagt4Up&Q!G+A)t?;ApDaM=b=I&6^D2^dA|_hktR6GQGF|Gt5pnGbJ| z@;`UJlmLC!1&p^p8S|DmE7+~d7 zJ39Fd1jU-CVDi?}?lHrJM)Qp`hhKLBeXJh~NjZQD#eDkuH1qLo%|@3Yu3q5sgua~J z`y)_{B&254cKz@B_)~Iw=A<{0l60n8&G7D5xqJ$n4AO3XLnW&-e^E9nw$&c7A17;i zLqS-bn|_(jW_lBiDK+RunFE2Io}l++C|r6}c}NDJur|jgli6De7M7zgDqWH>ZT^rN zrIGr#CY;wKf2gr=*{pE9blhWlvbk`%IqXJK$^(AeVk+r!Aj|7QVUSJ@Gk2AiI= zR9MVpx-*6*$?ps0W%S7;XXqFB3`*&mS@lH2Qx~%wU2rWa<$i^nxi>zCg508V^ppM;y4G|)95&^uuFO3PZ0UEl(P3z!5R=dk*;IO; zveF5(kEk4fz09r%DfZ?$I_;&jUEf26!PkCy3oqnQ)3g}X@L8b z0VBnwv`tL8d^k1Z467;Yhw~551jDuZvPvm;5U}JkKuW??i!?`O ztLkfn)xBU{ zZ*-9i@vVeAPnN5Q5MLTFhaqt|G#L)Q5`l=gZ+Q~<->2|0PKdNOK5yGcdu>mYSxlfv zRaw$&h*`|`3&%?!`{t8wUack{Sk_cN|I(X(hlfQY+VT(X1~!K92CvKJCF0TN4Y{4m z!2=rr5hfw^E}Dc=z}ZE|x4o1ynN=!97h!j9_$eo=}vziJKsn;tGjMA63~`eNaBe(f>LR;Cx$*!z4Q5C5&7-6O z4?u#J*w1Vq0XYDX7af#~rWV;swW>hhzk3bb4P_dd3vp!s_fG+y?(64%geE>4r?}#% z>@6W~xP}zJcuYHAAZYk7T9$BYz2lJhdpR4psI9Z`C;3Xt`h&|zJM5B%@Y|_43-Z1( zC2~woFeO)0EVb*R`{o95kX{%79lvJ)wE-XfM1JWhLYjx|}QB@sD7(bJ~nqrkAAF z7cyFZ%wfOTpJvIkEl-1rFB=L%8J`Fyfws%w_tTKVu*v*9$ry7*GA&w)bN3|trcM|0gWfi2izPIO4*G9`F4f# z9$X7kgU;TQ^5Q`t^rCbA!Vi@iZGXX+XTi($ayPZe&zc(Aj^9|G(1%|g3OYyy?IU>2 z(w&o7d@v}H(2v|hvbad`PL4EKyA6=F60uaJ!EY_xV}0)xzViFCm@cW)`kTnSrSV^A z*6)uQ_$;*Nnid_sza<(H)fc>k;*EW__W<&}lp4ae<(t{noc~6$B=|X#KFUOEaE6Z1 zOA$sZn;k;)rCO&yL#aT6d?AU~K51Q9t$I1-(94J4%+={CM+?x_aR;!WeyG+bK0kT5 z>V5e4(k^1o)igU=O_gl%eYz0;bDb`p4nh;`T8wU#w={W&h~1)tigx;evj?E5N=k+b zdP(w!lBma(LcL{^lsROI+}e z88$Yy6F6!yxcyiF`u$%BQv1=alu3D1Kl@hmsoN1yG_0@W_ANLdL4--15ZqiSSl>Xz`>^0S%6bRw`QQ&bNA%f@P7nbkxT5h5Y{d>%)tb%H3hDO3Lw7=j17uyfXE8L?oQ@ z{pzRy!Doh{&KuXq3n<^tFLpRHH^ns(`w&SjJaxZmBW}-sSx{(#JoR~%)aBu{E%ftt z3Z_)Gw~KhHetfO<;iJDw#TS$wUu}RLA!^+$d}+S+E^kHjz<%+*vP_Er%y}7&?@P^% zPPHkf9kX_MN2Xge=2j9RZ}4kZ>=_!mgHIG z^RKh1AH0%0K{@&G{}FZF(NzEczhx(ctn94HCS_dV8kyN4ZbDqMvU0Cg_9mOGYh1Fj z=e<-$g{*5`T(Vtz+>49)z59MYpYQJ+=lGY`dA*;n=kxJ=jF@f**U=vW4i)Rm)}rQK z^6L1lYPVHGk3qvz*PeJz9l&)&AqUtn8e;E;i?A21e1YZ+t9TtDmCvE3+_Wk8Ixkbs z(mi|mEpcr;#HCU@IQK|6m2#9H@nn}xr2BG+E%lPMw^Unmb=fy1v8*EwiSR0fIo~JQ z?&Kf#P64$8o`Fx9+QuL()HmM@d){bx1GpjG*e-Vv=rnV4QooeUOt}OUPP0uGL=XKb zpdMtZBs>E@`N)`OLj`3Pfk4@m8vWzQQMMh>V|wN&KW3Cy|a zw%#^qQjzPaM;>=`@K=f3cxZR`JD8JK^RrfR;AL^_zB-AZtYuAvKH1_SoB{Qz;4if` zWx(6vCFf=1RqeM2s{l+b{!ZSoZ*nP?vqbkz^y!0dd@3b6mF*mhaJmwPIMjMyBfd%< zGLF5~ewqC#47W&81nltm)Mv#d1XOgg{&v}s;@}nl?00Jgtd)L@69p>Pq{000w0!f; zydSH7($pkWP!v%jW^M970bR7~=aKt;y+nr}4$tV{#=d2M%S(>beA2JrR^<{oauT_s z_ORWcSvlypzh%p72mLE@bKTEU@Oax7mj^G`7r$-x_3Z2IrYARRm(soG)i@fPnwK^F zIuj(mUF^M=V~E&dQXNF~fR^9sM~cYuAP;pEfBb#bB7XkY-@Q1i znir@g-^6}4_+Zs~8&Egg|LYi7(ehcB(%(z<7?9Vy5({^z=d&n6Qk-nrnP1LE&zai? zsguP6O~SLrf;qhpUpl2-^k%s4zd88Ga@04SRSLS{8^Bri6F@BiYv7)+NLy6ltgBL1={YxU#VJh?Yl;fHC@Xg;VD0E|n%^KX2vK3I)^|lu-AZG9E#|xw9 z0;01@(+7DEE{dG6VjfyVPD|~cC<0D9webbX--0}5sVyg_oJ+d0^R8R;c>whINLb5# zV@z_YQT6yR95ZIsXQGZu?GoR1bZg|=8r&!2amnIrqx)&8kB+76x#Wg?+)KrB?rh+q zVTSqZl;n9KXmtK)k#!YBi? zxq;38t8-S_MD@3>TJ_&zE)}QsN$Fl(I`sFY<~H@Ud&6}4g^iWo7N2;EHfp25B55{;Sg$RjB`8-%jDG{F|@pwgLVpWi$g6m#VejZWRE=@ zVSB~@idzo#J!0KIZ`_0Jt|qQ`|7wK)S9-|cm}b4MZUl#eo&lTRT$YV{x@`Wl&E(p< zHF_hN%5GP$vC6e@Dm|ylnpB#&VyxF!&CJ;U>ESJM!UwHC6=s3}nP2MTvs*o0);y8V zGGKt<+`G)~g6$t*^%Y6!D;n}v!K>*r+U#J*6eRID3`-QIhD%k+iJ_05w*tkXPGS35od-V z(2?t&E21V#;j(Tb8zQgiJ9pB6^K4T}v!K?J4VTbl_ojX~;eH(nMJ(9@jhP%7>h`WA zJ>O18t+OPm&b>t-v3OU%@9>Rvf3ja6<*+#UMf*O5y~#H}Z}NOwn^o>UpkMA06-_}) z<~8VgqjfFWv=T!umK>W?aKxT0%S4V7Z>rXKmE6u^XSX~4BTu*mmb^=A;szB|zXMx@*iM#Z3 zBj0Pfoiyz_TQ%>~07+97m4)AT_(&DwxIqs(QR@$8?;B0tuiOjRY9TvTIoSA(9=JEX zBzvaf_JGuSL#Kd`xon#CT!SrmA~-~dmwMqn1;?4LRd^u5=F6;&v_}wao-FIc;o7ho zrEvzsO1}rCS;m@e{hZj!){$eCg*NBf0o|*d5sy}<^x;!4{SHzrSF{}80h_d{hx&%} z%7Oh=u_p-5pJCuk6O9g$(acjfN?%Wj&J2UWTb~eTCHK=sF09K0l%EcLSZhy@;-c~Q zo!_{SY|s&so_ggA`IkUo6Kh&Gpz9aBtEL(GSBmF+idwzAy-fekM-5wbq1JmopF4H! zRT^`-2BUgK|7w42<4OGHO-EyON3x8zN+ShI647p`JMi{zdS-1V7$Cu zKdOJw()#j>mq`3dfdhRg4}ib#StPg#`W>?f>10U!&^oJi=O>(}=!BPv**nOCY z$1&5{F6g3!iE1h1eQD?&2b&oq=#Qe2wNxQ@SaiS-rWqJ}hb-kYbz)`j0!UjhB_&9D z02jMtm2_N@^2ek{dtHwI$gBvW*Gqx2Bghm?UpRO&8Am?LN4&c`tHLfkqEcQ!0fIgI z`n3E#OR8@D=M#XnlE92u2Ut;zIDv)>+MjIvmYG`tB&W5$_X;`M@ZDSCK&F|cn2Q|{ zz|*VgWtS#TDZ2`Va0YuJ8C2j&le@Jkmt2YSOU#D*0=kvs>vU(Bm-Su!h-+LNWn24w z*)C}#ryCvnA7~7BRMqeE5nslLhXQ=`;avEQ?YcEhC%dUq4(hY5#Db3S_S&bPZL`5vL48{cU5{l zn;6#`d=Eh|N=&v+T$JTK{NC(vcUav0mU~KV@mTg{G7+_AE5pF6Hz}>%bdhwoo-~l` zi?$snPVadCzBPJEnDE&KWjtrwc=F!JMLcJJ7QbyV-!=+}wp)SC9JD8vtsaQW({MkP zXpWx4)cuq%cjkMl>!9Lz2 z?YBZ?+f%p4)*~yll-NccyYw`UB!jfU#ATP($AcMAQpp>G;z~={gB-riEyB@r&ey@8 z?@~Q>-hVXsW3Hvm-6%7))n9`UiM81%(U2x}b03cqs=0lexa z4!c<(k&R)svmTxZHn^2+?A+!`<_lK0Om8UTW@p}I9`r*pCN%S`4Ecf_MSnG>O zmjU`c)gq*E_@ue<;b@5y%R5LvKUqgRn_<8Mr~DavFbM7^()}diaKheRIrZnq+4Rf{ zS=+~s*Df56T|2%jfFvqu652cqX9=cA=vh~gAmy?q5 z?c;#XXL27V_NFv4u?B#3xE8xM-^V*-cLM*gc}N6%0*ug64v%(TZI;K3V$vIX@2X*A%!baBE$R*jNWb zb9u=5KgyGJgf5zH5}Az5Jq+5?jkOG`OQzK(`B6in?BX67gQggvWK5>+2!gg8YOx`I zC)%C*HLAo0GuF5Ie(I97#)gPUcdI1jGw^&vifn*j<`1r`68PigjjWd6u_6o7QUS`v zb8_k^!y7yU=NL=>8J`VFlbb@~{3!o$*j*A0MGku+Z|$4C=Oh3l;_c`7dWB_;wHc7L zH0N>yz&Kw+mhC(8ygNnfbAT=h0YEm@9r|)JFV)mXaY>&kCcOOT9w)>dydKVUw`O|U zJFnE`Jpgca1S}nL0T)mQX43AL7Jv=}e(()guHGGoY-~}ReyVm_jydnWzY~Et!Dv24N2fbuqda+=Ej#5+4>C^Ut?K_{G8JIOL^B)--Ka6pp;Q! zkf%>``K8$5Bh}AQlu)FW>}?IN1q!-@#Rw*ELZH4NtA&O&NcU*J;+y#u)DvyJ z1UfiOIl(kUuIcTC8BOiKAz;uKR;BKH`%PUN4+y_oiDs}j-y;schgX+5w&%in@W8LP z01>m)7x5M*z+P%~S0!#w{-YX79*U`}diz{54&QKPte0^iP*YG3D(tFPxMCl%`$xwp z9Bcf@Sw{$JSDSP17GQd=ncX~>;*1nrY?0*@#iVQ}!b^kPnHW_Rd2{2w52ltVN5n`- z#%WmU2I=EE+`HUmY75evgVk=a$Q8vehGW$g6L#KK02yL+6FgUq?;i(bDZv{f6H$OV zd10Y}iM}9$chtRYOHeG@@VCib;fOEuW{>9&mYW1`Z1nn=yym#A&J-U+Ut!r0y#mnL z`oHX2`~g;&YQO+~&0mSNUv)JCnEEv-PXYVQG}BO@;z2yHu2T2=3LvUFf5ju`z=Ep? z7<|ZUH~*5eQbvC@T?J{*bn~-3P4B$PI(H}XH;`o*eG0HrrKI;}I`=~97~XL=H+D;E zJa7TE?t4g9Egx{2D?jUKdqSL%1Rwe$S=zglK&jt^L=-&~7PJgsEmdf)6B=~NeUuy5 zWixEBTTAVu1vZ?8XYM?rTpHrXDz}+#Dj&WI0J7w0ay6MnoE*Z~;ZJX!q8&Y*hkYB30?9$<$f8g#ge)cl3r?nl*Wt?>$`2`V5Tye z+_v98Dq27iWc)|2CY8r5vyunNK;@B9t1@<1o_4T3x71CyzL!523%msF84cpFlM9!2 zM22G^Dc4z{gg2G4V=Cp?Y^Z|^W58H#Jlfr(M7GS#QjN2oHy5GX|KkOHA4{H<$iA$n z#D!|N@yYZ+C4ZJ06#bqNrjJoJn@!F0uw&PtljZb!02tb1J=(ho`jZ#* zyXAPmSY=DH7NQY$Pg3_~1$lH;uU7z7m9mnTiE?m>aN(76vjeYZ^ zBIo24-h+qDNwilVbE=ICkV7*Tf-m$+dO)EJN5c-(o$c(a-u9guHLl@h&|NhUYt2<; z&f)c=d0IYfLt6Yb;?dr+=lW(sE+B_s8pNn%kA^4&3hkUpw7wlq=4_5Yfd0yQhyt~e zBjF^ojPs>#!A3*=m>Y&pe~j;DOhBLU>eg6S5kf`rGyy+Fqpup005Bj5tF4&EUv;eS z?d4C|{08n){*C$x1UYvP z!|3M3gB>b0biBj|iJIX!GGLR*C2mO@J-uodgo#8kofyq1Y(-4(IRczkYB2Sl!nbz` zwyY($GuMj;4`_RT7mjx+QB9mxuGJ<$iez&t878w9o?u&ZS7Zb97U6zvnFvB}sbOJg zsYXilRmat7_cpyE=m{Bgign9UqWx-b!NCa5^`2N88cpA*ahh5Hw2g?&tznqjEp^F0$rfY3@2~yiV+34Cc1BP z(Ol&2yMZ^t^xD^hUMNQNGVv?jcaMUcqfN(oqu7!HysbtxtQEW9w{xyD!0hRZj7b3u z-7Lj>t)0EMP2zSp-b8k;x3$!O4SKv@F(Ivr;aBZ=Wyjbc)AU$Jr|ymNu)1m* zvwiIk+7Uo0i<`E|SfffllY#EtcSCi*kg=V4f*+rB8pkg3v0r{J^-jEySgR9e>wrklXRg1$#Ph`VsqNXNcEsosN;NV0&->$S?dm zr4h*?&tIJ3HDE;-X%YzDIjAWqOodIzK34S(s6ZP+x~A)~ch(_>k6xZ$l!6j;R=-*=|C^X;ulyo*r`R zSdL3dRWk`}!^h1@J|5J*dr$kB2wApTj!}UFx}y`%Yc@Y$8n{l%;tQ08K6RX7G4$h| zOnyF^q{1_OK=n3H5<$%P89B}m6Gsp@vY~bd8qN&R9VEjS&ko_Lkb&9{{z@j+E*De^dj;Vt0c0=<_jbJY40|E5MtbuGy+j@wMEa++3w({;L3M!*4B}^hw%q07`i% z^x@?tYXC&a?d5ke+#%k#t9LLS*!}iM@jVWqbR%JO^3@{rtTEXMIKh#5-t3Hpr{kZ? z7px6hnh@KsGUYwMy8~K^-?jW3C3AtjW6`#;4+-U5;>f<7c> zf$^gYX!l&r6m9$hg!QbU!RVkTPIX4E$+c)a&CGdcZD<;o#84xLAl@3d*u8g4XDL~u zP~%!5m0eT*_G~NF=lIpST4vW0zw7#6fnsF~{6zD3*h||yq|*>iWdvAGeKc#oy=T{= zm=Ertm5M(#dvl7q%|0AGsP8%W$|AY}rm4xL+G$m+?^4NAcItX0$eDS zoVRSVgyM+(GH|udWZPCpP&ETBn_^;594-Y$+$^~I@QGX|th!`rdUUV2vx$TJdJOzO z7m~GylXAQOj9Hg8+J(j1V0qz_7jR8AYlUhA{~xj*^G`c^l&N z!yP{Svu#U1fOIHL)^Hm?!0x9gSU%BiAy+*qyHMI-;RZ{)pf-((&$`@)?Od<>v=uwsEr*{&C}Q4U=xC;@1aOiMfR2wV3sA)C zTLYZaf5ooxZK7ttofIaflfLSV9S$W ze#uEN+2`V`gZLZyltBx?;^^;NdUf&Vp}*U1X=fH(>gK_q0AjzjG4)~*>muU!vMj{p z9PO&rIO^h(HuN6A_#>3BFc}dI+qQ39{PXOqx!35b$I#%al7V%R7@mHbdR;u#45kj;0^-l#tY z>^}7AK*#w`uaFu}=NI;xfrNY5=~hz=HrGGFv=LB6NG1l4hxYU@3?>$Qb((`l480OM zPZTsbNqdQA`NBxplSGN(DtrxCRb@*idV1qpNv)w3Yzf)b0@o$W)1Hoxa3Ew2h4RuD zV|9A3CABwZ;41+H9Uno0&;3WVx+l1i;8v8Zo{?_IiGo71Mob&+ z^F;N-YB{4l5PPdcF3m zZ*S-Do!AhLx)yT(J}Io>yzidKXGa>H#H$x1!FO5sint+nj?xPu(r|`#X;8p# z(urztMyuzZYwO;DT zG-9Vfx6Poq&Sj18n*besXs~4}{^vz3=m^Fhc&Fiq%vstCXAgDuz&0C{c(=c$ho@u9 zFGk=!3u$DPjD>maq?vMy@02t6!9#7L{l;_$xhjwD>~bno{M*|sy7Q4#cuzMqXZV#* zJAmfKL4MnfW)4x#($4-?l$uL~uC;puY;cB)BDc(Ql_r;d-5s*BJC8|H7y=%Zd>b;> z&E>`Av6%G=y5AzTG*2gJdLzSg4;S2pj3EHf8FNDv0UUs7#z{+(0ng{-t7XRkN!siC zDLIeCUx~_Jwc$<35kKB^=Ms4RuV{D}m|MU*UXc)RRk|4Dk*D&>)8kvE3k<{Fv+ZIp zGM6$--oV(=V(7ROf3?I7?<1p9E4R)5&llTDCn~(7-(IqV#W7t*HNOD$D8zkh&(hD6 zirp$IoyggyEvP>HsA&QrR`u6EetRyM>(wP zq#&kGtHWHqg=v@Qgi?C$QQhVnJ*iNb2TU0}*E` zJO22|*J|Wr{EOE8(e$+vw~`jISHKeRud+=(`cyTP!I=mU5lY^L1$cf8No#U1VwiL;8`!ZPc1a zbtHU-2XAF%S;|Hrhz?-JM?SXwLXKLvZArC$VL56etZCTYW8(t{n3aT7R^iGTaofQ= zf=j3?{-9)~8^r%!H0#{u@N43f$_rdQ!wK}mCk<~x+l27zy>>nYcR-@Ylj><_?>p5n z3ZMedUY5GGVP?-YbGxIG!U#9xPo0pV_AD-od*=2uifymoK4?z#aHsrKT&Y)*+`$N> zv$9kX6F{U=`ZuI^IY9cx$#;W5KJYoPsW2G|tk`lbp1y(B>1hCPQDfOpk>op1lMW0x(!lo96Nr(m$ricE%}tuCW1u7V zFS$&H9)IqBO8n=p-F(wE1UO#^iE`_PQdm`Tdb3Tr==lt#Ol+4_%V+U_GW|`ZgHe!d zT)iM4RA;@uTd=5`l#u2>prO#VVuFl39CRUN{mee_U?|(svf`o-t8z{c>GP>K>z)A%Vi}6u;YNy=vTU<69Y2 zG-l5M>BSEA09Ov&y-hm#HJ(-=QmagJF!%6;1u}x9NjK7zDnkx(T$**YMMlHA&v8u0 zWPrg1Wqr+4y}l@|X_qZoy{HA3EUh3Gg!Q^oHT6-7ZrR`y2LH_Gc~bpbMrE-QKa!!x zB89%F*v+qwD);B*njU-hX-GDQ-18NVp`U}^9H}2J&mA$Dcw%4|eytd%>p;7wZSL$6)D$IkJ|v;Ph!t?bEXYaSg7T>XLwPM58G?YDFxEiuYfAeW0C zY>%HumGDTd$d2w*#PSYn&bd z3^MQ!X!*{w-CIRw0sU+}U|hgb&&gf!Cpz%-WaAKv7Z* z?LlLv=EJmdGdx$^B`dcC^|EfQ7@CReYMd)DUW>VACyO^H}Wj-;vNR+L918gPhidVS87;y-$E+Nz87uP9lu z3fOe;f+XP7x|(>5=CF6Wlq_*7n_u-HU4%Fc{QmNo?D+6nK}ebJAlCx~4wsGNs%g^P z*b3TOKeLe~p74=~gJl2o&>3O_3SgP3!Hc_d-?Guh={lxV9Bs) zbc|#+_aCD-C|O7HoF$9YnR5Q1JIwM+5%9vFZ&+n{zoL^nYrKbF+myJvfB2j5MBn-{ z_?!nnTJD&3fU+){-oXE*Wj@DM_MllA2EY8bF)8Wute(64Sa%8CVK_PW*hSMyV<*p2 zGVkZ{S{}nBpYLrEJWaWi_;cB;)5DI**L3ophx#VSmZmbbr5l&H%64zROj8DO=fPOe zm8S*Io-KrvRgVVf%pu23q#QeJ=FVlKhq^ERb@Mkov_JXe>T~7c_r71eto?^`0`8d-qxBE@@KrhlD2q!K-L0*3hVWa z2S3qfu7a@fMw@f)(=Zd<0Vl}Fz=hI(+TdiJHe36Sp1~Rs!Pg1QiYnl_@-aDEQex%W zAf#QbPR^hCZRPiF1M0v;1TL>aT-4HI6+38CY^>>K;Qi{z%%6VmuaAV2i=180Rs47* zhu(+#hRy2+Ir3xapETpnD`Rj7q?!Mw@FoM7hEB(#rmhzg(Vns%aCS9uOH2qD_TrFuTvj$#W+7+~2P_p>zV5~$4Fd1cD>wVh7c7FuOo%L7u;y9@&A7GE| zDuU&~{u=jZI2ev%#TE!8nANHL6w<~G?nvo#&{A*Y=~3Sdq+?6`QhOA#2JxtGaglc3 z7?$O+tN*GabIa*4@iV5$cd)`ISRY|iS!r&(vz0_r)!U!BdhlTz-m+3R|Ib^-M7)JC zKj%t(-mP>T87YXtD&$AkaSxubC^bH}>24~!VZ3|3&Z1YeSa^<$o7N>r+Zo<`uwe*; zZ%H|9kvH!A$R}|fHgR_7EyAvPF&*s#?qUhU0_jgxHzukF8D3?_vOdz}g7eXGKlpJy zRdPL;+x+RCY_!nA3mGMM*4ggV*3%EGuG>daS&+!mggXpQyff0;_Bi^J{Z}KAoyhl( zbozDD&%A!PdOJqLzY5HGjmRO6MEE(m|ESFKOVJf7Qo^QsxxRohb)y$Qj&(crCw`X6 zhveP$RF*UJU^#Lop^`UH>A*w2en16Nh8_ckTK^fcTV*`wnQi(n3JZ!tUJEhI+PG+1 zqduYS4`$aP5b^vHdrgPN?p3DL>jbpfvZ47-!_rUPEG|o8(#y3D=NS+>?XfUT_QXi? zot5k_AQuIOg8lXIvj$6?h2e>-q4??G#-%y!@8;?#2Q5nU8Urhl6A{};&G2vLWPv@g<9#G}O}p27c`P<8gGH7oI=}}@$bt|1 z@e~ZN>4lHv2L|ky%(3v{w`2+3m{AIc(%%2CmyR_Q`6=H5xKjiflbXW(Qu(^sJ{(k^ zRh%(XraHL^IaYa52ciYI75p#F-VgjZ1yQD~`m-qua~NPb{7g!1acQD~w>v2|90lvG zCropx5-ev-XLNo6u2x`RBZx3ihUmtjP7MPJH=C*sOOP2{1#;b}=k7?2H9L}wGj$-J zJ4>rs9;unWu?4{ua=6LR zXCb&mH$oW$vvl7k$ea;qGtshpo$XaoT{@V8gC&H>&m ztS!GO=0tU5XRGYE>z@M%|NerXfmR5Cuxv`b-U9KMJor;;Hb?)|_0LPpwZr3RKN*ij zxn&FFpvXH$!tkTdgGcze^uc0iNK<~9cHYVJxBS07SlacE#QgHGquG9H(YPwChx z0pxSfj-zOd6#_lVM0jMuQH_~Err>~D2C8gCOIuDq@;$HDT{N&TgeQg14U1uC8vd3- zb@0aYw513qKfAO)B`w=z$Glbm$PnVB7l0NY$?|c5XM#WAZd(xoo3h2{vu`iUA^RFJ(-X0blB*5pyeX4 z^71LTy;?`q*vij}^E5;FS5f6YkvWM$9jzpBV*)Kjfai$@3H=n^RBC{AUK2I+7@y!k z-~cf4ow8jCu8-*LoUOpR^Z8h;_$JLP)!cD5ucTJ6C5#t8ZCO@XoQfS<5t|jU#L}`2 z&9QzH*2PT6?VIod#l)Npm0l`DAvuWn2eS})FT>!b4yK8=ept4^rKk>$m{?FK)p#zR z`{&ZR_f*8&>O#Q{{ADJhO$^(UxtvD)gJ2?e{evy(XII^(>Vj~x0W*iR001cxFmfh~ z`sAEcx;$v18UJabf3+dDJ$pD$&#~*kn3Rw|7pD+R!pOdef<@x!R>kuZSON-NYzlU{Mfk4$ydpgr*Qvt8f z+9g#MvSgGSjJCv@bPX955Iz*ygTqK08f425i?}o({*k1K@w*IP0*S55+kJ9y%yfxF zC~E4fP05O5*Ja*)BpthsdBSurQ}agly%CO`K=6i~Yky_P;PnquZZ?e#bx#lMG?qZN z(LQ5J$_2=!^7V5`EBTHWnd@a5e(p&yAwxP}n1%R`Al{TwDal8C123&Xqj;0At{Rma zPiHtM7<-UKkUx#T$Bu21qvf2;!#@S1l$LR2D9#wze*x4I>bEY(I>oL&^b1_=>R^S5 z1tG0C+@GWDFt4*ygVc6i{m*n7m^M8+m1&`2E$^CU`b>J#8+X%F4Q$4y7X);rJ-JlK zx+{Aa#=GfluB1n-?{!(fYTe$w?S&btT(9faSx5QhQA)Z{6Ds6kO0V-I$bvpBP~9^ypYwqI0EV|9JNpXl@VJj0i!?aPsg;aK4q#>i>Hwgcr zMU?G&;I@?3$^5SKzpNQb1vaXp&WQO39@`!3HPtID4HgZ;_!u`g^L^OC(bZi7;DJ9K ziW2PLU=uJ^=7;EPyqnYNV@VhVd$YyGZ{O6V4i{(-|h0E8m5CmrmolJGBN}eZsiyv^BBVY?5X|jFKQJk zVkVBZa1Yf^j~EcMz%)$dHzqvjp0RpsbL$=Gs-Z_v|Np+r;2rwm*1CKrD=SyobHGR*kLnFG zegkT5u+9)U7KnxoM0Yvr;D{RQ-OM6RnL z?0;7(_>4!Gle4JVfVJ>iHd#YwX+g-TBS2+pR*oLBScyVi* zzBa3(JRx&5DRx^T4Vay;lE-HpxRDs_)&T!xquO%9u+d&Ig}GN%rlrw z*85iX@9X%ZD-(Q(IyIy_7@f5T(qgtzdz)@1|bS_uX(U(D?OL0ttk4*4{Y*h2INngw{7yYN)dBXy~#=b zyjVsvJ+#1nL&a*MDNlD3drs5LF0G{a$d0#ULaVPSMlIi0(6|a=b69Pzdr+$$pi$N! zcO_6z&%5R-!7b9UcupY-##S&Wgg?iO{qGO|7&{o{w2|+;B>?!??{4vs4%OLYl{y;g zLeS^ic89j3x9wuSG7>(#%L}50*W+S;POo~?6D>8MAxKKNBJLwF-n($<5-b4uhT%zF}5W}6Os$BJdpb6t*|GY>pYS`%4>qpJ( zxq4uQ(3;)SeCjiSfsM8KDZ1gTpTrNBf1P!THwfW9k#uI5WW7npzVl5G@kJ4i{+O)+ z2g)hf!or4rZ-u1zc$sAdJbm5#B?yzWUc9?=|^`& z@ZSYzEHMA@bR6dS`LdY65V(RZP~IgN&ZR5DG{a)wmz?cS#H@uTBCvpS(?|-;N2OE6xKNEQV{V*kHFMM7g0L2jUmitY&t!z#bfeaM_rwT)JWJ@ehvc zQyw=`KDGU)*7df+2;SXOl(^VnS0Irzp$Q;iaEw;PzsO-t@~+ ztzfgbcA3FZGXH*wN$D&8U!<^%g8~cTpC$@PtdtV)9u9;}(I~RQ@}!6lXZ*v{Sv2rA z0GTJ8Ssg9AoB%MMi)A#au-KZtc(RPrf(#jo{K z@AP!*G=CcHo1q zb1gnnMr3OQ3Zac??EUKIw?Qs{HmeC$hkL46b$b)k>l-~kn9KF`S*G!>NV5$@uOVsw zM8|GI^KJw&)Z}4D@&rprS%d=%!_y~Eqi`F~QC0eqBhuJuf$QE(qguXwa%sHkpwIx- zqQ;FL*he0NOz4r>fWH|Qg^%~p`OLDQTzr&2xNVxZ6)CQcx*2Pv9zc#jgH+-OKM|mk z!#s1c03c^gZ2NzIyX-?6hB$sivF*0@gD$1!50O=Od!uhhp?(MA!cDy}rwPW!KTrV$ z9aqkNP@ZZ{j5+F6I1p;(AadIHfeHG8!8MWHEQT83uAVj#k&nKkOTJq_m0!q~-G|-6 zd3`Z>5`UcL63EwQZ!kVpj@q!(uIGBL2)+MHt;p9$a9|anIlQPuT^|NMyc9nZW5tbE zSc&g-;DDZ&9V`emuS%SESPZP85%&ngVr2fmMr8fcQx&jz7%F@D`cWKMCLyvX#m*rW zT-njPH8utrdgS?AC1*Xwt-QGkuoovQp6`RRJ5a&(ewPC%-e^&3@0a`HZp6B|wqj*H zeS&FM886<>><UKArZQrJcK>||53tH)nyaXj>uLk>JiEV+l%6U(3Llbl*`T!p2^l8*A(z1z+T{o= z$kx$yFqZ-MdJAEr*9N?NIr#Peza~GMqSne@Z$5tjK$mh1oKJo-n^DUualgZh>)?vZ zZ=h{<$6s0pY#+uWa$kp`m`>Tgl0lLMVH)+I`DgpJ11mMZRJ@kBTJ&46xiuc#enisQ&QtyFGfL()kF zx(BmdH_!V~6nhBwFx)XS1}4|Bj(-~@R*l`K4+tJD-RA(-l9W^e8J(mh^0Wsw?dr9< zL4W_G`0#FL2&>=XIOnJbuwmR!*hCJP4Gn680(zX_>H4G1@myY7r}V}d8-xRGnzjr> zJMr#HnSt^c$A%_jm))^(KZ^!JIIBVQZ-R(`rN@5=?eOxSHo#YDy~CWWBxIPUzfL%S zal>_nfc9Zv9EoN!+!5TI{jihsVfX*fl6+*hNdRF4D>~&fTv=@Dl(>GZmpNhO!z079 z%lqt>^0cctHW8oSA@=9nE62BL8KECwJIgi)|4A_VL7G)iH z9*SuibPuw66V@xze9f%dn2M#K_z8NXJ75Zqu=y=&f|+*N$N|v2wSy=>p;rd~9&rYC z9{-ve{_)`w7lv^d9$xlvz|MC_6GkY`Lu=6m$qvF8{cnbf0Y>gYN_M_|y%gA$*gXC0 zhNCQo%3ipnaAv;Ubhxo^UcL#9XRrQeyB<@jU9|Y!R}f800~iM-Qc&M%;e*y8{KMOWwS_q4ay& zbXHnvLU(&f%64cz?3?H=eH_JQ;gIMBGTDo_7(6KGD3lTynf(-RGBBHT(%8JY&+`4N zI#^}-&3z|D-mEK}TF-gHL%&}U)RlVlLXb^JDSf*N3(s-fLXB6MwjEVf<(5@pO$p}@ z^-Rsp&3CY4CyVAgmOI(xYHhB+Hf$=)-mlqF>}>p5PPsC}9Q15#ZvAKv$$)SBV?jLL z{)BEPJVRH5&K}>!-~DV(zsWyMHP?JXbYIG`{?zdRx<+AOlOGqC$usHOGv(bg=biOy z>YUK{02j+0G4T?<*Y*PE_;$S6VQ+nels2uoSTvnR?g|7R9 zlfxWv-^eJclbMt2a}jcLvzi#{@vbXY@oy+V53hb8jfKNn{&0ayDK1>-w6C#UB7D6@ zWH)SXa!)<%eUFns%qg36OV}BZ6wto~vZB~Y2fgU#g@RAj=pi!Y=F#3ipQO=$Ebn1X zd5^Rw7F_P7p?8)}u;WiAINy9t+^lt{vcm7F0W?H9a&BkqL5cIksx&u73(ZR}Vn* zjQEQeE(DK0_uLjOydPHhByGnn?Jx|Q5wOGTINyYIL%HpUw*E?rRyw}>S)7g#SZ^&k zxmtiL>N`02dE98jEwmmm=eh7SAY=-E$u%W{;D44w+wyDV%>UL?{v5+OXa-BM4#9C2 z1gvBy~I8E$nt&vO6N?kQ}##P%YFP#;`fFu}#dzPQHuJ@AuyxkJt6O_Ikfg z&*$rXJ+AjPYy~R>Mu^&^{3XbC(~fB^e}-vbVD@wRxHx9hg8bWCLH<0(xC?5~te&f- zqN4J$q5XYy8qquNfk|HB0F69Ez#&IZ9_xU*t)upJxf^0~O-+3lbOD2ZS_HXtp!TqN zlYI_mT}2vf`yg>Ilr%z*>fHFGxpgAfBXS4%eJJ5J`X_-}+PzNNh(klu5o6-Iq{X&Z z)*d@gey3$l`k?#dIT7q_Y*i#UlhPd&p&D)0YE?X?8)CH^M4D}Fc!v@{I_qmgmU@2X zza_jdEzW%FQvij;<~RYAKHYfdCS@O_l<};Iys)fI?%*{LHZ?y)z?R=5dfrUDV=WEw zAFY#CZe1X285dv{V99ObZuJ~}c#`tWN+gBjI@+&lQE8H2G{SFRq#P%#?|!4{t)9^2 zzZR%+TRJl+lRO1fU2#n{gFyddfe0P?V#Bd~DQA45ueAY(S+;)Y9x4!b>*~;Ed7~fN zP2YQAP5>(zjs>9b20)C=?mo9DV`AG%@CTwJmcnn_)Er}1(B=g=VP&4xKVF;gQ__+t zGYeUyTmlX1{3`f-!*yDAV;QznmXP}x3Kac zd0sCX7x7VmY!V}18Y|-#+-L5st=$|jMrH|v1lIM_&0|V~RTK-bo zp1m}u#d(j-i-c3vwg{%w&p;>Si`S(e6iMU#r-MJ`+oSr89IyH=VXWrb2a|TRY1=Te zsxPYBXvS=dNQ?quv>s?)5rsh)#a_`ZVJ#hZhFVMF$$$x1_DoZx2sY2Dco7kPni6tI zz${tHA%izB3?G-d4)h$ZlVK0*WrL_!3qY>3@m`c;y`(#?7!Cf$1*M(;p0hXXU>S4q z9{6B{{3+3lF?8zb8@-`u5gXXkF(5v=FA!9FKD`{X;H4h5Zr^N-Dl%iL<{m{&2xC0| z(H?tWI5U>8f314jc)!z(sH0y6+@E#Sq9on4)84mPKeQXEK5;q!9-$*Id9-o6Jm4rM z1dP4j;P}{Q@wv252gvo!YwvkvbsiJqhzUVpTuN8HS0ti^Yuwo9;GW$mH-}U~P}nU= zK6k(VP9bB5dPgN+(BQYqWA4QkDczs)tocPyNF~Lh+PBKpl4%$fpfKAr?!`Wm<%<}w zKvx}OuanWzOu2P8Ci7vuOzZtyZF}2JVl;xnP5SI(+X zYpmg-*8-&i2;RKt*UXCE@5#)F-yWF7nURtU9)cW{dg3pXq@lU6L-@pQAf!=~u zLjL7BTy=#u_=~7F?gZBx+-enmh=&W8{ANwbXSFpVD!q#l+RR>OD-VRhISd#!>!{Nu z75}qR=tn~?&?|I!BpR*Qy}HWd5GTguR8p?K+M@1__*#V`Qy7sPtz(%LG6*H@r;Bes zhCe7dvN(S!I=c4)sGfLvE^(*sJ#8B(q5h(JKSpHzd7(QJ_|SKgl-IN#r3eY%^^kWJ zkvS`Ul@p6({Kif$yWhQ}z0rJZQrsFjl^kRxFKXS%5YIclXpY2Y3+6*Y+U{6&u%KU? zq~ld~#BNCd0Rvq_4n9ipf5vba$dNW4u~_yxyEy-01nkdbe!p0r`n!&FsHkv0X7hs2 ziRqOXpLG=Ag|)TWuWO%5FXOEAIe(g*O3R)9js0)?Tp$`Vy#wC8fJV@F6vuu&e`6Y> zOpW!GB&(>~tP7w5h3%V)gWw`&-C>l-1~*2V0|gX!n%MVO{0NJTTfDpU;Ed)>X3(eX z>Bj<~cT9Z|_8JM8rczZmBB7)~ZpMieshs2&_yWIfKpva8PmYsw;~M=vrAgBYg8(i z!kCj94XD?#p|O(ELTXyBN9=yb72c6@X5z7&9A>d4Y^%7(1gChnzSVep^FtuNDSCQM z6?ZgFOpG&GbRyk~0@YZ4#4HR~*kJoG!3d3+L)5)&ybd;#0pu5TxZdpjPjV0BcYbGt3m2-a zYlafV_1B?SQRX-aCwx(xlU^H#o)+uK1GXf2Ys;j{6F4(FY?Hk)vNEYKQ)-nr3hCd+ z^u!MVeBc;{_u%Dg58nMO62@1|?5MH;|48atPY&h0eow>XKleyUs+}LU^v{86L@>Uc zD*;m>5-)39JE6nMIizCB8#p%Vx!0pK@L(^3VA!4Poi2K8dNScoCQXSdVrl9*`8`qh zD*JgPSpxw>C$E`r9cHytsaNP#-9y)HUY}9}-+V(AEN+uGvK*)Rqcl7x?kROGRUEfA z@?G*K@pztKfT|Fjfc;HaxAg9wzy>7<9@eeYQGG4Znw404 z-(HaTrFJxeR~PXcJLTkO`}5dB;46E_*)%%Ej&6GTPfwL?&9LfaZgan zbh|`~#tE-N?8B_qjQ+tdzOOTQzz?nL-P+3D4akb;r77+%?~?$NqrBYAv!P|n#5JBl^bZG%?X9X zaTRF^_SIE6lUBxk*JHf@OfR=VDKy}E8gj!_Q* z?G?)&rV>LnN%#_`uJYCC&ciFb;N|!ecWZ3xsrtf@P2!#+9ZV^yZG6YOcRz9fuWa!S zT%J_nMmSXu^$`S+l>I}n8TmfDYr)#u*sMXCBQ-+^8pl>=srShx4TrZ;E>%VNpj?BE<}Nbk#mM3d0I) z`?ehFvW@K%uEfoAC}8h{Vj>`s_I6)YFoOIS-Q7QU?;)2;02|H@zfHvkvI9|fe_oeV zzu*@E&PmIi2+RJpd`CFEb}cw$%R+*Cl0cKIgJO;RSzCw+(wfRtnBPMVPKx_zJd*N5 z^yJgEAAzqwCK$GLh!|D8?Jtry0}OEJf_zu`4>E?bTyKa-1&Q3gAvzS zRkg`)l=yko>m$hhP4sIr5j+qbXN8>gs0_Rq-h(EoRx8S89FM!H3??}J!D3`_dy~H>{>|Qu2)a?t6VqEBOWp+L!XruZ@B5RNJ zG2ddPuXv9!hc?LkQaUpEb#@rPaZ0B%h%};^7u)K?9UgKWv@J3y~Xy~%QA zptydbPKSJv-dLSI60$`cx=#o`#Gxxm<>v+N8w4rPRt^B8l3C~>!|V-{BjSp+(N0pX zoO&e~?orvnKUs1fd)u7h29+J_>MvbR5uwQU{Lnq8KyzD!QAgS{I3|ObY`E8)3PAzd zm@$>Ue=2|2s%QYzYZGbOl}UVm#lx)tAnw7(O0fU05xwZuSH2)Ml>on!)kMBoTD422 zri7Nt??rt%nwUDeM4mM0o%&u!XN;a?pKIkCq4EA zUJ?Alkp9%6H|XRplN48O;HU+h>=@lH-^xA(@_&toN&hB8TKj7Rx;F9lx+a;&-uBrR z0GjH_TdC@4%~G zH1hl%0~qN)YJI$_D=fgFB)26G>B-Oi`N^19fV2tS=ewElH-NsW5+i3Nq>kjQCalqi zG;Iu?nAsvX$VySx@i}^HU|Hq*)~lK>E;6Gr(EBtYBTm5B5=SYBvDc-yirtIB|2YBI z%})=K`wHqDx>=ysj&s3+^0WOf0BCfEP2{(?)$e(yI%}S3Z!i z#vjmJZlePOtpBq95!6nS2{~2tawoso zUx!R33i_S(U4O=V&&znR)x}iv_qJwx>ur;(9JVU=Fd&ueoh+i!cg-rX2jvXq$_e9I z{wE#gp0@C~TM_*cGBKJvo-zNYnGJ~Ti?C%j+h^D_{R+gxRRIX){H=~yg~>9XHLcUy zgGNUY!xv)Fns*b7!TKEWhDF-0qD3zg@McM?yS=1$7{pnlD(HU6Yo3X8J;5xQmR?!v zP_5u~j@sdeeC_8Fi+X)W1?3J)9UjcrMa;=J;K5xpJO!$1)2oVB%8r>`VIBzmAiump zM+6g=%Yv)pyP<7G$}z#KnzU)PwZ7Vob)W+;(cO(hhCSS_%OW9yJxjlJJBJuzyF=xp zJROW|R!csvw8Znq7jUJ{B291c4Vr{u;G)u_PGemg9J>+{u`^>j%)M=Iz{6t3q?eh2 zP--o53-f504$vtxwSAuPw_NpX-@LG(N4#kv(Q2}bWZI>_So&zCrM^pt;aHY_`4I=3 z$mO2m!w>|J^IP}z?yK^0JH^$39`N42eCnxsTV)e-+s&=K)kH3+D@0rpJ1*%_raciA zkz+0NzToA7=3?V)|+b zp3l>8EaT^(fLl%SbdAwmeL^Ex=3e*I8;w z{BE!ORu{Akq2H5!zk>NaVU95jzotP(8x3;M4?r_e$zq|!C=n@4jj`&Mf8cJ9nqR!W}spWr( ze-Ak^-oWA!l2fN$}cgvF)}m!TIsF%`fD2(d@j##_%+C%E$;Q4RqOozGElFP zeEF$rI3E>56#r30y%w*cvdIo|!P2!#ubRGqN6%R^-31*!qvhScs=~O4*jJ|d`pX#vV&unU7#fQMG zX=PD$T9A6TQLDA7+-dCZ7?+BAqk+{_d+|(1{z<#74E|+poHQcMYC`$XY+cSF+{Dud zcfE@>=Hgvs<)s3F`z5AiK$ce1<>z!IC|jv`<{ zeizkqDqVaLtdxjWkYN*v35UO9i93H{HF7D7CM$@1Dli^bk|s3kMC#=m4)ohv)Zb^= zYs)K%2Inp6pF5eHBlSKZJ?Qu7moF`wFaK*5F>_@wa^#hk$G;t{7sz&U|Fl3)3?hJ` z`1h0L_3UTJAE*0~iVp^TSZU*Y;U8R@O?#gc3FolnV@+jXTyiPggDbSe=#uH}C@L}Y z4ey=fkeq00}l|!Va3R4d3V_i6^w&as7Dhle~DpO5M;v{(|b3{w6LPRUzKNqYf zzazjbY~e@#=aYwENRcTlith%?r9AivQUFwqc6V$GWo;Y#p;Cf|d1>ewm zDGY@~TpIPuS`23En_DWE$+##yXAfWP!^yE}Jay2A!f0_>3@UgSl70Ww54Vc@ zwTtx?T>&TQfZxBS4WV6zYxQPGhsQ{V|NpqJa{j&&3sQvRewO1qH2G4WgwyVHU2WLu z!qJ?y!|W?ACT!?TQ{PhapAufUdvW*~z`}F?_xQ0b;xtwCu0~`xuW%FAc#LG)3*Be? zQzO1gxjU?~`IE1<68A&z- zOJ3DbpdiHMJG{+9hW&k&p=lh|`~&Mk?|8;w9-fh?~I}4a#_H;N&fOmWAZM9NG8fxMi z`GWXocyYk%Mmawg<1djxzwt^_rC=PAm!WvQk%3sCFnA*!afOX!y>r;U(Z8!!<5YUb zYCi{D^VL#vwi>)nsq~;&ZOQ%VoSN<>mufhtmN33pxMAGoz-o8u`dUvks}4?f**`#d ztv?ZN0i(z1SVrHaBF{Oyu%U%hP|Ftgqp!ruAM(x0OnR7G1EUw|40aralUu6y1;b`M zdVQCNC@%CfGr|vkU|@bB;Unxr3d^)b-=pwOpH#4mK)M%>={ts2YODAk4zJ<5ZxrPx zeU+=+G_Gn=Y-8=`_}hl6c;EexJnyng2~%VE!K$l1IcI;3KAQ*E;7VR1HS-lhryAhH zK{e?NaOD1Ab^Vv6_qQ(=o{J>uN#A1O&MVvC=0=8KOS~so!szX){u!S1TKF{+*U=`s*f1?4hC#uhHPB(GCmYqVV#riI$tfDOFtJWiP~ zOcloAJWP4Izbj=QofpxN<@BH7P1fVs2O>L(0|Y#^=b}=g@=+W0eLM%hZ6+@6tNd=j zKf-t%R(amDQt~jw7eBQ8fyln)J~z9KZ#%smBKQZHhylG>y6{U=!`)PQx(_{58PG{DqpBJhLoAGw2JR$?aJkHbo^)%uvd;%6zFTTj69o1A;!D4 zi&CoM#MOdCALOjT<&5pTyuMLBCIJ|ZQLoY^e&9cq;*TlrDMDj)E+Sj>()jrX@k_xm z2r2x;Ji%=B?*AT-xQL<>0rsfJX3?SB`1!OGu+Ph0kpBO9bO-kU1Q?d92x5(K&Q=o~ zUJ&(lS3>X)P&fBB_l;1EvH7%PdslQ1YeFpxJanuPO2lLYXB?^>l;1CU&U5t?=_ z?|6i*-NxT`&kprMZ3%=&ZuI??iUp~p+H|FueeCE2@6iVF^@a)^Ocha(D*Rntu>uIm zTLgquVU=N5bk6SbK#IzE}&PZ*w@|dakag}v&cTNQF%!gj*L_M;AK|fF7ELB5@)z3dnBVHQD2p^Kzz9*JjyXqvO^mEM$qLT}bM*v@xhF(h?R_}p k_raXWj`fv?Im>JM6Rn>$X}XYtn!0jK1`wAjN(|su4Eh74hxe#OHg4k8^IecbK9^ zIY(!r7G~_!Ejux(DJk=1r-faTS74yA5vDte|A&tizlt;wkTB1#3 zi2H@CgBbMV=JfrfotU3J=qY9qW)*D*4yfMSDs=iQjKk#Xy%*ry(wI$&Jtwb4u0P); zjej7EsKlv~KW-=N&;$))d{~ZR4*P`4>PaAk>oF0U;rV^NIc)M`)j^p?c z|L-t-0?6-M!jAQu=|`%p5Q1I_NH|-a*qsim+fpW*=zpvG{2=T?@(3=uZ*$DiLmuvB z(d!6OxHPrIu!1!F*{w8Q7&kBIEtWfOhcCv}_um}y>IjM_`aU$52^jRh`Clsh8?;Yc zdc|_94JYs=*d@Z~mB5I_Hxu{M!pOMuB|hP=*`yUUtMEJQKqi<16fd~Jh8NP1>@3B$ z^XuyXld_|QG3dWhcuF6q&Vz3TLG4!1>!U!FWLxaL3*?0yG{!lb@ZS*4#3w#+w~zg@ z-4n*K!Y?NZvHVQ2p7oh}W&Pa}X}qN3>+3>LE69v)*J~4)rkNgZi3FsI5u1Wec=>m| zifQ9ZYF(#QQj*zu_b5+`|W=bu5Cdw4aacNsvZSf4v*Ra80A z2_$6ueHgI&9W(#85ydyDL`!@iaE*h{$Cr?7@zt}H--Vd>692bKyf7{)EEru!Ehz2T zuf6kFg3{LdWfva7d~ z*?uv}-Xy>P3ATb8LXZg7#{U@uZ=X`-+TfSmw^c8Ka4#^V4*AP^*Et*SpS42^&&fKD zZ+I?yu+$M=ff^w6aY_U0UoQ-bwOZB!>4MI(+<2<1o_bpR#$VzBAvI?Hx-zMBd~D#g z=bA0Nof$tM`LahHw505da~0s60s7A)1~hinnor$sN7$m(5q;S!7zix0O6L~G|2{UT zqVF&TwRX-kNO7W96ih)RpwIDqG_U?ySjlb?*HO^V`nM$6zMtS}H3v-gUnb)+?lAFKgfkm!S1g?_Q5UL0ao^A|G3Cl_c5>_v zllQy*dJA2ZTA|f|vJY0)R?=BjU1bt6(yDK%KML0-LfQ2_IQ#o$Ag+wzH6JxHGE1)z z%p6RahY{-WnSJSMe@!`-^bjP1c}C}_BZ<8nml+v&f~l=la01$U?5FfwlE$Q9a=rW8 zCxfzuHVSPbmzliQ_et3zB2!-%`G+U#c-@M94pQr9W!hNIPOEa8m}ius8T?hed1_1t zwI9{vrirOMoZ@vkZ}dYRdW}lzEp-BSEDffs>FKE$ zwOZZbLVMm_k5v5)ef4n3gGW4a*{;?bhBFvDPA7VolOs?5-CU(%SCB7gPY9eR#)&po ze`5XqE8Mxe3BL(Mp4YqH9d10o@3cextG=kQ%Ldu187MzZv`0K&qHk zL8l{(;a<4FjGmAEY%mfZS}oZQTr52KtkuTo@7}GIu;Mpq++1g4fsqLB2*~u+|Mo)>S+Z9CN9?Zh5PjeVW&A~@fT{ez+ zQ%NS%W7bA(o`lSNXKw?L>lMbQ0pxP4OnQqHmkqnhrTz!sbk_Z4Mj0qw{bI8yQt3;?&s~ zUMljj(~W0S`*QAL{xBgCMQ1ui5#qcVcs0t3$j+0a*?@k3*i^A7`cTN1;T@VN6nZJlUL2p|$FpnDU z9-Ci45TQ_T+`Wt6>B&pEyo1%epJyepQauGnU>8+Jdr*72zzEQLjmlYv8qS~38yTWY zHex8aalYD^+wR>G%Kfv}KIt*Y2X{%TmCIfoaao)ur0r@#vPAuDT7jfk@V2*M(j7cb zJ~Usgkvc0u?MZfQU?*_}ITnC)#=Ny{aqpd!VJ|XgE}1`(qt@Nk${yh)?6A;y0e>M_ z02RrYu_k@!wprSk2Ikt6R{ymk_wZ%sQ6)5EJ}mPDjZF!`W^@?-_E< zx+v+{1>tt}&r+iut$yHCERR+;>-o-b_--y>p1CVTG}x~$Cassi*$E+*VniKW#vxx! zHJa9L$%6koet{9JXG|SvXE>QHKl(cCa1qDR3^OEI$Qy2nWnuyc)izd+qq0T?JyPiwPFV|JbXe4H;#Qir-KMqJCOa>yl7+gnrDwz#t@pH>VxOL$s z1u5EJh=}lXMQr(WMX%w&wIx}1RyNCJJ?fO_gXwfDB~nIN0;jM|rF5AINIUIj4vWPb zLF=b8xbPi8OzfMqbIOaq0gDY|0`!cyBhoqjs*d8%40rveL6n|mib}y&?Y?&7{O;yE zF*A#G^GqtcY!zAgJ7s0RmcLEnHq7zgEo!9YQ4M0>l&RG9c{IY;A=h@=ox1ReRVtk& z`!6?PyQ5UW!9I*sF8r;=4Ix>&cCv)1UDFnQ=k8%|>qFJ?$;2xB zXDXDMu-_p_Jb}+CTNQQl3Z#bh{h&iH9@9fea-jw@raUd;xWz8O;dDQ|RDrVW_&&z2 z@C=Tf22H(!EqNEDCgfDd77Sz1XKhHHscE6+UBE*->whgDWS_CX50pYAo)Qu>p(aVfJOX2+_yupVo%is{pwf-?z z^+8P(JTVDu-kv*y|1V8?9@r@>~tm=Ot9;v4g>X zi~{ZXQfJ%)Ic9bu+Ypm&579fEJS=V3y>DJAS}0Hkqd+r0re&Se#leUidWD%HCtm%A5X4$}WCfUHlL| zaxg8JcVFOZMHAVrksuw@2b25Gr7^m;V%hzLR*&r0#$w z;Gc~*X-C&Dq@RDA}&3p>C6MT!tNV^MY@c-Q-HrfS*toEz$n+G3%>Vm5RP z_MYh0O#0zrw;UttYf}dpsAPzO@bl7@FENM8|rzQf#Kdv$#Zv?ZWilzXw+us zm}}>WK{dUq5!9^oI>*m5y&n}6G3NUB7#P7c-W-b9pO7we{#zHXUr1X1chKAKK#pvF zYVpu$t(OQA+&yf+6UKWH>w3m;tj5~|+GO-x)|WBYx%ENO3KQ;oKgtJ6l1F*$XeqL&y%0lG>Y63*C0wTt;x zUhT^EU8bj4%!Px0{v7+T8Njpy^OWs006S>w)JK>bK^;xm@^@H5OGzU8I4dMq)5>I_ z-;M5#*aDt>Y@aJGb*htR#DF7puPd2G{>T14_3r+R&2pc7`< z!B#ieKd!gW^&bc&>fK!efwcRdeeHjQcEa%9G}UK*G=*LaGxR+t9NcPPud6fuu(Byr z{^L{r4&vJ+UD&VB$qicBgJAq%psN^3)`K?|8u@24-i~wZ`x#eh)te9f#-hT$_q*WV+2!;Q^%saGyhdab zFJUJ=eC-9KOODfH`e)u-%D@JpQ+i)k4CeTE4(B3GI#a3&m1sI&s?I$nJlO z8tq*_rS=unUwe|;bIl17X7^|S+!x>V@sp*3Y^lz!IbQs*QryUuy81p^y;z1Y&&=2- z)iqeud}mpqrPG;G1Nl2$RL5x^lgQ^JO-o|t&JgySw$p0P5$?S7f9xjgVapZ!!P>*_ zvWF3{l%X>u#>Q{|YWDz_7B^J09Ft`?+5n&$5uOwn+s`T(arn*zAKBAX6iuIEU7r!5 z(+z2^^7KyV!I7kv`-h|G{y-@jxNq9k!UZb9_he}4A-NjhY3Bs-@d zKMry%s$s|Jmuo!7rwlp%C}r>%y8B#L&PQzZtrfSz}xj0rCh zNZchMlE`if?`CUuwEhWu8nisJR5$ziT>uhuB&#NUG+wm*aEIg=`Ur}yHOOe$aW}6u z%Gf$E*TnjFmC+OO@bwL^$pgek`f}U*h>J(2uvPjOY;Fz5+SFKu6+ja8>9HVdmw7=T z&1he2r!L)c;~WW!D+=+ca265LFF%%FefxGf3nK&-#25I4+WXt`zeVCTF?IYh$&#&% zxEu#v^#PIM4%p$0Hm2-?ug4&!D5YUsk^;T5xuQ46hp7Rc7=D!qaJ^VxE3_fhE1GY3 zxO-LA>#OcdE*vVio+{KZ=ZL)uLv>3&{tro}@ERwgx6@g_T;})AHh1^GeG67*#M$u< zUd-)?enWoze$^IUyn82O+LE%SrsjVKRu_9`m*2zXX&Vn37vnRpb-9Ev2Y!Zv9xF81GpRwnoHUsS~H0Sv4LXI0EFgLL=zC7Tj z>vN+G>J~ljy|G?+y)HKO_}oA+_dWK_(RfQZ9sK5DK}r2$q*F6G%J4!>wgDMX*?{_& zKak@PTw}tA?Go0tA-9x%NKZ3*d^z+GTxaOp(Q6eHFiit+Kc_sm#$2nzN_86vjE#Qs z-Mz<9$p|UEJ=1C->W0;zpVf_Vyh^BGp+nz9r(I%&L zSaet{XAMIK9>}g$sMv)3RCC0K+V5H%aO&zls$0WWxCg$<{O{A?#Pmb`=AY}<*149d z%%ZDQUwAl}LK06qI~gv&e680fJKCu|j7>=WW8QS@MdyK-qlOYKS9L5islZDGd8yV+4}E0My(9lke~ zCT5%0r%1KX4)_@=Vt*-+Bn_P{0%oL1P^=Ng>M-{YU64)K`P$#gwGiC4cUJQJ?92Nw z6g4Bk-_*lk4x%$RtT>V0vXriWnyWp>$IVKiPfH8jJ23ssR}P^#e|ggg7Ia^t%eYZC z8~hM+z7gDqUTC3mdk)DAyp2JidLyb`cMPm-Y;vosH9STJ2epFQTO+iszsE9j^7A*g z-AzrAI$ESbUROm=d4Wd2Mt<9ouMl&-7LMr#fBm@slU`qWP@CiL{1gi`qRDxbgYDuK zrT|q(rpaf+%FxSe45JmmwG(NkZi3vN@}3xOwdu|czczyArPAWkgR)#Qw*kH)2&&LFb*~F8Gior zb6C#l%#yy`Qs9&qka{b97u#*M6nl5xF6gv26x6g%5syM(F3EtL$f2nwq^EMAEHZ{a z$U{hW%wL{esh1Hal)qyE5TVGepA0hLqF-4m8BTb2!uihEP*39~rlxZ0>$N>@FSaVF zbQQ^V2&j2pxVY5&GD6c%?dKbc(dn+)KPM)F$_*PIRWkXUVQfuT_LVm-h7SxVk8|(D z8v2dq8*`zx5%3E`cAk}1G;1K;Uj~&Yq*J{c6dM}bV>E(wFLUF-9ax5~Dhl&oQ~J&Dfo> zjH!R6IF6Vr=|QTnyhLS7fpqyZ{Z*f1e^y`Q=G^#e6*h`^$l@bgV?T{t`BXH8Wl)mW ztzaL$Op1^6cGFMO`@1W$KeSnT+h8kF$#*7uEWUkO0BB(Z9M3>oE59yNOM?kfsM5?K zcm&@>(1f_5W3H83MAIVPv04g|yNS7Ot0U$uf2qybqMTFLxT1?e)pd z9Uq`F(Y2R6`xh$7Jk^w^n(5h^PbiBY?)7C$bqsdL^!+@c>b(9N&*FnwW5mfr=s)5~ zp084jlSS-zmaJxYH&z=wh{e7m9m-4V1LZayu%i$(4x>Dc*Qip%9LZL9eJZll&t$0dHj#9~A4NGWqkyHtmZ#gtUULof79(P;0-34URw>-}$$ zw3Z$}xZSk5pIj<&KH-ge=C z*mPeU3yz9%IqS|j-S664QBO>-G^#h4(E0;j`$Aqj^(hvaQtKH`i3ll+L{gEf>y!By zmsR{QsT?QkeEYYpBN=!Q{(+rQXO}*ju#?K9+Uh(9yEj0ZGT#YEx`kkxb=U>^oLxD! z!O1I1`sG@df}pT6i!#pv()*A|@@h{;?n?xf>tHhP8F0OlULE>Z-@}I-kG!Yxs&x}_ zI2k2BhF+fZKAAQOlp&FzgkH>ubxLbFB^f#J1F$ zwX_o@wGvTY+D&>gN^xvI-yKuh2*}Wya}IhCY(AtNJ-{6!=j_Q&J;+v=phskko*T@) zIea>vZ6J}$*>~{Cr`M9T*KaS&e5Q7_wBEc4vdM;0=PJbLSlnjDXU2sIS}!C{uN&$v z$0Z@nry8CV*@bQG(yJXVPW^Is|3QCM-S_T?fZXa|vjb(1kkG|z0>4|D5&Vco*e=1+ zr)SyY!dEaSiY&v+TvmN1Uz)3#zR3t!#?NY+3mVssp6Iruc-J_VDG##a_zm9gZzC^S@qzITy_}jt6U4$^YaaX?m9CQ)`gQ$`kW_>eKj7!cxQQM|TYG zyDc*fw%51>J|#c?iSL!mXrb{(^xJ(}VRDnS>fT707St)nppj*GZ zO0aKpzvKQy9WEcRGcA{QC5+Zqy_uzU9K~JP{i`d53d^2Kg3Od#*SND#H|`)?+BBC?{7{PC609O>1)!uzG`D@qtM`&zKIq?cLZP)B-$?< ztSuOHUI;`M*?9aG}?+S*FbDj(T_flKcfNGys+@$i*;*+rZ{L7A8dlar|A zOe^KB$u-v64&t7MZ~t&Z!2;~vXcoCgaFa*bA36z;_pW6}II)i~V=WCoguj&f^xJQv ztd(kQr>Z)K^xst$%$xHKeFb%y39yScl}A;&EyS?A5j`L?3Ql^dac5^e+qYsOHe1m0 zlco>xLl((o$lv4DyDmf8gjnsQqy#Zv?fTwo#bYNYCVnw&bWql8TkZ~7*~pF2sac_> zZa$2T^%@s!BrSBI`vjml7wRVIm_WdCP+9jnk6da}d-;=f_6it_coepdyB>*|&GuXq ztwpW$Beo|q{)r`|}CT)1aZFU-FvMBcgkMWg~(g^5jhiUfJ()pTcwoU_QH z&!s*4^F>@H?=Mzxk_--VH!qQ?bJ?Crg@G?G0|l%HDA~0tu9mlEsx7OC;f7PY6`h$X zZ&A4=zksSiM8A82Km0f|NUNDprwI{A?+(KvxL7C?2(UaWKda z=_V4n)7#2$!0x>{QQVN)A}1hzv5}i>Im){7?0hvL_YvW*-Jzr3rsV$xi=Aq$SpVrI>!>D$Ix{w9wfI6q)zBwwCTK30f z{CR{vIgRvQPq5(}ThAaR`N--wZy`}O^vuZ34K&@eES_blB~xxuKb=e6>k!nYwtK@! znLlU$a(U+v{-*DJ^!QP!=)=4={&(dhSBR45w^imtB?M_{|{mySJT z3Y@0izXr|BOcutT!0(NJV)CEIQIn1dh(9Vb#$QUN60Xj15Hm0^NKCo#U}j^8MRtlT zy0a3C@4e79h2@v?JRL~6?7%EfdjV&8ioo+)ZkTt_iuOWkd5kX1lDb+ZNxo$ASdFW? zq^^_}?wR9>U|c38UD0`!_LyrM;zg9oac;~E7w+;HW|!5-Vv-h*+*oF;W;7A+xn=;d z41P%tQq-)tO!kesNPhccsXj;Y!I?D0+V2nT&F*fu4Y5HUUs;T6fB4osm3BJg;e69D`at z;YofXb8p!H0NSx^P1~B#0v?MeWcBWenckIQ7*HNCil)pYZM8*94 ziJ&BmQkOCe$`i6kN<^}<6m-i4wSiQT8vk>%o!hD!C@%aPRCz8peKNPTzyyoW3F4T*tAi=9f&RPjp3RUz9z?6~ z$@H0hOq@A~+^e^10Cr}V!U+t}$0KCS#i4Il$@m{M>O5xc3GkRUcH{Eu+0j9KW~C~_ z$lGTCTN6w5#%ncn`q`)7;>xWR-&kCJ_5Po;#6SD)EB82ap{FxWmw|!saOb@ ze#-OalNH-0#%a!?4a+7O%h(U&Jt7*+NT5UwD4*|3+ebp;x5YDNTm+(v@A-wdx>f`u zX-vKSBlwyX^=G4dgh*(lr7~#D!1W^USE!abl5p6#el)FTm3lL~ucg{hg7HNt-~Qs% zCds1r5cfP+>B*-cDZgc`Eu9{f$j&25G4Zs06P&L1yEj4+;gI|K1*~})AmP#9HQ#-T zl(YE6+6i19)0sY$Bu?x)-y9}Vl`v!_ZP)7MiypW!i>O4+JF`>YkzqP!{n%ahQIiMc z{%SN(l!nmt#k03PLWVbS2Q3^6S!EZ=4-xPyyIx*6k);MnAnYPG&&}UnGSL2pNOB>r z^C>grcusI8slm>!(r#U9y()hAGoqM3x{`jwU}BI-qd$+^w_~cpm`*$(636uZv|vXIqvD8uMu=Bmczt+}2g8w9n`pEb z;kUQp*etJEDwTuo88wf&V%Yh<)FJkn^xy|deUAs62gMA~pq%JoU(Sf^#On59Uw8Z2xp|LS)siyh>vQNEC&Z$Mk zg%X9%n1^s#OJZ~7KZF-%+y$LW^Ro<>JWj@gIU*L0S)Nd1A^=a3dO81ARDaj1?(qDDFFdAU-OaE}?8i5lZ;;+Yg zx(2u|LCalCWCfb}+rz8>@Bq7*lHTU*6-bA7>w0&*mg*G3O5jP86jeChC5 z{tsrb{i-$m4$)FplfA*FF*2iTyt%pDolkz_9FRHfpZJTSQkp|CA6EpQInyK%AtJWV z2id&9=HXyp&qKEdZO}5$mq({y)3vCB^b^NTbJLN+7^}04qRyNbr@NKKD{!lRa`xq# zbPtgFm~PcmkGz$m5jguA*F&7N&x)@R23UT5tuhOry(hDnJCY zGG~eJC8~@e@-hFwv7N$exb`Tpr49;c7fIoSI+&&hcVuM{Doh7MsXuBzq@208tZPtP zAap!QH8xl8ZyO!_y0wH^XlK!u2Gf+<$Og5I%cVP38>}YS$e9R*Xlnm%-l}j~6=b`u zw2*L+?poF9?Yu?eFg zs72O$%=cOTtZb1aAao?#%bpFQq#wh8(9P==SJUbmYUUilgY&F-!)!fX$^ii@8VXE_ zMPj37AHbzD#ODt(ByP@D8mLH@P8A>Yfgx z=7b!RbOHXyxoti#A6$*OdTLY-7wnEiSL)vbeH5y79Q>m6tZmEDxu4(FTMSU>1MMoB za7uM6UD;-qr8l}|JLWy^e6AN1<8xo}? zenOvbHKpcqQ|m#thDO}eyj21#TcGft>>|8faFyE&JfS}#mH}~qy-p+72SHMN)J669Z0!X$N~oh{6-SdJ1OWV zPk-^A9r)gS`jwsgwTY|8Q%&*F0~6il2gxEzMsI^dI~~hf1A0>DnV(!7&xFa4srnmS z#XVq|E*C?Cv(suh>i!bw@36m| zyVLBNE=t#yO5pQiLIyrkX3{oX-#%&P|K*A7Rb#zfN>bn!E`3lgHxB2NmzeC#_wZV8 zkJvL$CFK>_&mpXf<%ol*23*PR7gbx0wDn2~oe$B07UfN=@hXY3I`XE;KVUVx826B0 zIW#3OuTw5|+~0SrIgfD2_U$cKM+_(MAF;TW$I88xa78)J)!AyOwEk+qX-esPsMw1s zoMZjkl{{;?!K9VeHG)isWq$a_{D8I*HFjB0LFRNvo>BtODF-VSE^*|$303OL9@|R zd_}lrH|me{+MbCT=GE7r;WK(?OXhZ2J1ThL1~treYq7t`j*={7)ug-q8p0^8txfeu zilw9xBlzL|VYT|+@$YUfQ{3B57bl2O-%ZgFk4}(8!WJ3%IgP6rP!`xOk!+yQc;<6; zHbXRn`R725dY4B+8A+_-W?#3t3r9a+yEk~vQZpj9E8Yzyk-Dn-4IoFaHe#UWc35op z-U;vF2daw)i#W@kEqOxzz3qCFgB)zcp~p8OpsdJm#X0QgUaX}O&f}!V;ed0Ueun#KU*SRs0;n)P!df{%zPaCO^ z9g$JdleZrlV?ci@AI+{iC5hefA4Odgt=7$ldk7$NZk1+e;&q){tK5csd|$(BA~ag0 zU%*Kc=(r417%dx$UlXuCq_sMnS8EKavd}U?n`627x@x+wBVCT+R z7)MqSJ*zaLEOWNYKU1YWv4s5_%y^r%Cs1+dht_D37ZY#e)yf#}ln-P_XK1EIy7o1b zZe_HaE?C&DbGoma4z`}&w3H5;x_kpUE>GL9t0(x7A`hrY5N$XXL>~!>4!W)|Zb=+! zBe&%5k3(3shJ>j=>r5Q0%*-Y}buH3Eh^R!a0TKH7d(*Upn8v;=e*&-F^v@W5VT}zu zqioZ{o>TbDVU`QcF3IcB*7y7wOc?2MIE&go;X$|~drtlkBac@PRy&nLBQ``1rmCF! zNjNeBNaG)ewxmNN!?1w!s8lVqQD`rbYP#?$Z)y7`L>yIbQv2g!h2+?ntHa9X32L;YZlY(MLeg$<4m~W7gb~g?L9E18d;82 z>$fW-UQyja2PXaQP0_I&>%)_#xwA}O{QQzyL8K_-?Vg&rnTAJN9E{ZdLJqD_-=k55 zv=LWEjQBgFug_(GM*uUwqf{Yg!Ls^63EN8U`D}L&(;Zi=`a!^W*M>uS2)jRS6$CD}kxOv&gG8k?28Tmm%IBR@Ug3>#aS# z(cjX%rhPgo-zPJ>8yE!OX6E4GR^xNB)@l1~bkU^kJ)t8sQ*?!v)ys^rbTx9SY> zKkBo^zJH8ylW4m@q{484*2~+o-RpY#?pQvG&!_u8??U~M#i+qw|rAPYX?2C*7ZeWr5xAn-f zD1EWqrx*5b?G;xQg?i_~aE3p+X(E@_hs&+$ia^bWyPLsG1*_YYwnd$#+VG_^zU}~} zVpfwegv`fRrizpLgApRNoK)%@|;yCj4ds z>%I!qF?$ebT!SJdnNHs8eu}t^d})f?WpV=xOP@pD-njL~FX?=r$9EUAYoQPXvu{R8 zCVY>piY%>czaNN0B)z-XW`8NxHZPF4PQ9cirK%uf=QOb3ukduJyFU{*M?hHkU7L-q zxHNf$v2D9wsLCDSj?za~HaCkt`t_UKKlClxny!gQ{_?DxGt&3=`V8Qvyb3mTFlOjr zSBO5cM(=^I*v{sfnEo@4pgh7zv98G=xk2S zdNvkjM%kld{bgcqatPR@M;JS`-Bz&S30jA*j5pfYx>$tEzK|>pXX-kZ@T7D51P9Py zvM5K2>Wu9kY_)&0G#hD9V(_xOC0<}y;DmIhta^-+?+R28Y&8PRH65Ql{Gx~DunkDR zGMt8=hr5MP_#o{b&#kZKBaA-Smh?HLQ$M;@!^AW-35xA8=Sb{<8e}p^ke8KJ%e#Ag zs3NQC(o&P4>I@~1nLO4{DhdpIb)r?MO4I_@7l@&8?Z3Mde?umnGr-in!(#;D*O&g3 zKPXWKw(YW`JAu*JSvNy%#ft9KXA-n)*ow6YOh@?c?0cQmzyD_AOuPiNXQ?M^nR)HR zlU)N`vum#2?sac-96J-W!?CJdTV@A|8QXypij?4nJ;ez1J?f3nh8iHa$;~!#x%Fis z&M4le-Iu;2Nt1=AMCi&l)&=CPUshAXg1mOyrXi_6L4R%vp5E`nKPs!4l^5wqWdwiw zGqgEg!o$~hw_xLHQVR_?-Y{r{h`+l_PXwQFnapDf21aScG{K?DMq`Q!m>PHKHPHT< z(_GxygN@XiNbj9AY+9lN9!*SEOg^|-uIQMv z-n$0PpEBK$A}i-qACY}Wx=1S`@CH4ZqG2g@n;iLr(4^)Q@CPHS-SFLisMW~VES&i@kdwO^FzGalEy!fTS~L5=={@1!}2O;Ibb(W!t4Ve-N}ru zO0XsVk@lS*^O2o#xX=a+Q4Vt6POM?iEFfgJB$ydH2joQk^f={=n#oGWfV3`#$WXj{ z#S$hAp0_*n^h4Zc2lNDoEH1h!2EM1G=+Y#UGEl~}nHlPZ#eL)97jif!hX z4yo0$9CaAW@dM_dXDBu}+Ewlu>V~ktFPZOjHG!AAs=j(rsKR77V3$B2?`POCXV0YF zTyW1MZuGP#&9>njm5mFNe=CoZIX^U%pgqyX<9JdRXsT=1Uo zq2NxzP3wGnChW2gvjuT?Z{0Vxm!-+=JwN&RtgW=MbSUe=Rr5U6@f`{PFcsSO?J879tY6JQ&z?^4ngbA zQ7LE>b~xFsF(;wX<8J zzC2Zf<)j_kvk7+IOa8dzLax%9l%os7zWSfco0y0nJh9WI-Gd(El+D$%vQ?OX*-6AI zOx6Q-g{vA2_4i+|Y`PqyBWgZ6zJ~8B8+(bhpP>lYO$>tCQhOy0_}w2@wCChtc88EC zSX*1a8cE=LqpQ0D^a%Ae*w25cJX`!4VyOtZDyOQp((fi7Dl?Hu@dnf=y)QuIgsLqP z0~w3Yw7tGx7574+{GIYpr|RKB6&|=lki?a_Nj2pOMn?F&$7&ey$L*RL9JAx(BB8W*^+OH$zD%|)_4BE;(Y1L9!zW-eiWtd@ zcLHmW#a54o*~KuYRf6t|<1PDnm5uM`5~oUGsXrQKVmbfU3$O=oM>G+uxY|$S{dpcj z9>EssL@tA-CR>YpD|&h`^M(8@j*oJNd8KWuqQ%0K_z57!@{|Q>`I=m~)^Iukp3{sL zOZ)ze$!oj1iW-b!cnF}jV>8_kv^;`|@QYpZ5Xvh(g;RRAqRu#U(B}ak02F_EJ2&P0 zb;jlVJDR*_TI$Gg6OXIf7AxU5_iq6)Si0=w#Ws*ahJ_IL#4t)wHhOV2vHEt%_6)YL zk-fPUOz_-^U(S%l4TDPo#nx(g7n5FE4vw%Lm*Qo-xC)IYn`% zJTNyn#1V0A8H{%Y^oa`LWRrrI^g_g_PCb6VnFpS7KA9PN}v^NrvZr3X_Y~(AemZebIbVES;*uCjTttLyf2fD%;fUw8( z&}EyUP(v;ncvMkUn~#6H&}_uIt$#m;6#O8phi$Mi^o@uG00w%~y#YHts zr-zjt``IzxG-f`2c`xFZhK0Lwiq{jD<&ZP;zEAfO8^#;7A}$)HaPoHuM|OpurJRWK zM@r)7&bd{$>YR5yNFCZK+rEIlz5H>a|EEnu7LdWz4Gh8;THJC0A$;Jn!yl`9MDp?r z^il#nZvjODcryA`2KQGj9tSITJ4Hp?i2*x~RNVIzz!yC6gM)*Wzx$0JDE>d5zA~Vy zrfZw-?rsF7K|va%TRZS;Y|LYp@&z}+g=z!_~5!CK+v_BhUU`l=xr<$W>y7RkRT1%|<4lGNL{8SW-SAaru z+MCJ6!!vglp?G^q@7?B-#8A1Y6%l%z#A1goNCCT;gxK5KBEF0W_s_^d&G(4D?&g}X zA&wu;C%~;^$`dO+uJr+yI&h6pm%o*9F4B`B5=JZcq?;oNmZNG}Yo3RPXA?A?AJraU zH>({}+^Th+oapj&#iM)rpXsbr0gr0a>@ZP&Q6l7cFA*E$a0bKPB>6@q;fI>-GMY1b zr)p*a@B6>~Ll1JUbg=&&yg&(z1=_bpd64j|d9^G$H7sQl)JC*2VC zf97|KP6uC|Rb!Vb#WrPGdu0#4kyPV?C;cf-DF=w9Rr6GXrvg7Aa5Dcq4^3P=r-miI zZuiBtPS};%b_}oOiOA-Wy)Yp#!^}*rk1U;|ER`%f+mK~E_KJ5uO8#VvjD7i6|Ibj; ze2d4=11<@%oN$KcR$RY9sS5{;*YNZ~j=sLW0!1|2pf6ECuKHmpqRi_%WfaWNXGN%9 zX@@;21nX|LxzB5NoP(s;_;`BiH9%u8pt8G_Zkq(09Y1IBqsnu9% zZ@mwJk6unqW?!IPwR;YqFtd28H-SOBk_BPn9|8^CZyG+moGcEiit$^#3O^8Gf7wBJ z{lhk>#XhfZXV|H6{!~n57Y*-3?V0S)m?>3n7Rv8Qlj)+7d8?|XMgW*lbQ0q<8FfSa zLgU|H_P|j+32~=hrL8VDdUkZN9&i!dx&2VWAZE&=-iIZv*1eU+Pg+{1JHk&3bvol+D)M{FY^+9; zxL{ac_}<~Qq)wFFo&SHv!x!`Wj{GK6d-7gI-G_D8-q+qsuf&*|fEVMH^4fRnIpp8# z+~*R|wexl%tEwW+#aQ!v4aKz8*1=WNb}3~CKh4Hn=~e^YGqo*n=X0yQlaOr_fK01i z_X|G;WC^yOr^;wItJ3|>?&**0C4b~Q{+%4K_bWdXC+Ij4G}TCml-38Cn-)EhTb+oM zJqkQ4#=P<#-<>!)m{!UaF{PnF4W6_-PnWzmZ}alnk6dKyTwYdgf0$ls+(Wb{QKuAH z9s){C<*y#wX^gC_(Yvb>8G(T?QmiL9LZ<_?F^qq%*6ZJ~=``U%AP|En>HQN-Qtpu% zj(BaQ>Rq|aC*#7B5@fr!o1zV6!w|ZN!%y?n+v>y$0H^z&L3%nm#y#h)b@We82pe5x zX~=Lq^+N8E;gec$)B4?qJZ^o5IgKR<>ji=XqzUvU8Q{w*W5R;RjfzjCs}c~zcTwFU zS+yt(BwMnYZp)l}mM!Gd)$*xIqNnwvN9);{V=w$uRX;`d zFDc6M1%_I^ZqcDUa?mBo3i{OxoOZxo1DR72^WEUkxbTzpr`#NxN~Dx4@=%z_S6Fd4 zR?3yzK}Sr1PkLXpFaGzfk$ySU%mizM->E;vreY9xH>q$kvXyg^IB9mE)55cZfI)5&3OE*CN?BYy6O^U8}&!`KD!BkB9ckXyQ*mTnx3>R_>J_a2iM_H zQyTU8BxrPg^fSvK{c)GsWiTMbY0tLSc3m(T>I%niOw(*InV=JwgX!n2SdC6ke&MYn zHurx41w_AgLet;Wsc8OXW6z>8wfU4E;SyT3rM>j+J;;Hma9j%=nyjdsMvR&G$~2uv z`Icg$7PW3JdZ&OjC<(`1EsWnc=YpIp%=3Z!VO%nM6f5`TUx*4TD{<_#%iyvfkhbZM zedUelpyV=@XSc}0|5RsW}r+OhU=&nljm zF=ih*3^ZiSdgV-<#N5MCu`Lk1&B!$_mR(QH_-8kACMnW;JU6IuuG^D#hp_+WE$eY2 zLs-Yp7uD1}o|0VlYqW`)MPF4W@UqcvZE`8KX!S}k_OX-`^ezHkZ(aDu2jD6DvSEE5 zY#IMUeq(tBq%DMdf9vw?@P~eu4+bOI$RxV`4k6qC5;|A9!{18Zcj^Q(8?NMN zTFp^-kx65}Liybc>rGVZ8OjcX1Z{D~!s9c602~MFKOH~DfA^_SP728r-gk5oHw6i= zHA>>TDFM{t`P)`?e)yJAf+h1aTOfi8CV9@LA0*#QHQeTWTm@uCHEy^N!$$n zR@f9#v;Xf&2Jh|~sa34XFDuEX5Z_zgkwn!l1J%8Zb}KjQn%Q!tjJA(7vH+?bbZzg` zm*Z}{Dx*Fy@Zvcjl0E~@Ge?;;gTE?T&W6I`d+2GO1<^?(SBh~XcU6~`SU`{oM4+IK z5Q338D|^P{d|UIy3%oding%WeLjF=_te8pHZNZGeTW-^1ZNi#mE~|h2kAr`%E)VV% z?xcyNnVsIZVMQ>VR1!QUN5VjhlD*o`)qVTJzB4q=t9>8Uum0Zf%gMx)3LhX{NwU4q z!pl+H((QFlXPoyY;Q`L%LF86|j?FtLX<<>q#MH7yjrErt(>V7}Kc(LX&I2!EnMcO7 zr}AALjfh`I<>&J5I@5^PL)jl<{^ubNhd?EN&-7erfjF2yx)gH`UT-v(TI_W8lU-VJa{?I@ zPDX_!Tqe@^!cQ|JKsgTu_Kc))Rhf>a$JXfE>VJ#z(*uSMsG!F zuEmgSc98GR+sTQm?dkC@2Y$JvPzZe8pz6 znary59myU0{_J~6iL&M2(m0L3zz*enwD?g*h#^n|XF|rK3GiKgbh?7tbY#C9Hlw=% z-(s^AM0dmwD#xafWTRAkk}eK{uHztmD!-fUa=(5*Sk|Rq9Cjyii&&%*KNw0YGg#X! zha`)*dMssaCcv+aGLra|U)%0T$}=7qn0K1>0X*&d#Qc1*H%~a;4gYpw`vYC$Zzq}f z#K#S0mX}|ryLoV~?!FC^XEhrINq#5|Y0x_Cudc}~0sdYG>WO;HG-6^RomqUHvq+Xo zCBu|n6pEusSGBO=E03Mb&}V%eC~Pe>TlBtK0!SAQ(==LJDLE@_9|lRODnn2SHvku` zQvK`dc;!=xlwTJz10%a+A&Cv6-1spaf7>ww$5pdjFMZ z0lJi-drx>|hPaTIe_=Rq>wyFC`r}R;Xp+7c;6W_6r)|5gCy%LTy{@11RDkbAB>=2m zGQZoYY??Nl8%2ZKYq>DeYwdVC$@q-cT#B~12Bi!%^R>u^Ib?^Z>yN#|8J`wsP+@Le zkN0N~uR@qHM1G1q$7taW-_7f;0fZ&UWH{Mk4F8ZNV%GDGAv{58YZ&(4+)$zJW#~_p zR~u56m&0;zFn$|-Cwk}o0GJgiK$P3dD7CPii1xx z%kn@V<+iq;Q~uI$KF04btt^$sx;&uDU)Jz*^qllnM1B(VT4q_jP*Ru|ZK+_$|25{+ z;>)DH%ils8?5u4%1JhrDQ*t)e^U8Ga@00oc2km($6*h}d4Bu=Xf@0zE>yi-l|8$Je> zIMQ7-EVT`s{sIa1O0sV&KGlyow@a^rW{yBmBe5=$v0_!nxHUnQrFbo3g=Mt!DPb^G zKK_3hGd)RID;0!ShJ$N77(Z6Hg}xm_$P~=kl%@QTtj(wi6NAqeL6r!3JcDm z`YgIh<81A8FTLJ!I&6>Dv=?-^Q#P2$%6kQ7o%g3@s@}>SCJ_TKPG7NdzJOVhAZTrq z+}+p`*%MDuo4ujD@RNU!9D5zq@eKr0zObzZK|#XilBG1KQStj6RTTeb8hyQ1hBn=0 zOj!ML64l_FHy?8gkDR;s#i#EM#alHk1TrRFKNFu{jW}hnb!KssJE3yqHB73#zCX`? z5*HH_>w)YiAY)5Z7dvdlNxhoOmS%0eP`Upb*_5{>{x8VKp40 z7mj~Lds=m7@9)d3$#giN=odV)UfLk?d>f2C4HFc8)Tm#mp`&>An}H|xOL=$hMD$|A z*SKk9{ysA!(MWU3Bb;~=+c+N%7f{{x2jn(J)E@A3#X*9Pejcn58HFIwMDkmV{CGmy=AKt)t9_2f}t!_W*Bw2)2CZ>a0&L}l1E z$qd#B0oEiqY7@+VeW$;>Ie`C!elRPsEy2-u&|l+p&GUfeD{w#nfYWnXig~;uSfk}~ z(W(KAT=GjUqih(n%~x6k-raRCqIOOe^RO>A!n@-6{xv!yIL(hCER_nG29uBU*#%y^b+Rmhwkbq713#QOD)t_LW27;0QU{ahc zAML(u>n`76Zo9B+5QFqbj*epTk=VX>bDIr$f$FU@=GwcDGA-s`2t`hF*QM)Iv37u#f1_w|6{(VRHcHewuG!&-LL)TF4NO)>Vq$3!!w zp-ggu?>u#N^+v7#Mf~)ji;`Dz2cgS^zO=rqHI!I!EiazaSQt(CBboxGWaOa^$*=1P zjqp4&BhuKfWw|=sovT38IV;b9(L?VC*`GOn(PI?$!|wqymxOlidU3qMpBe?);wM4S zvYL7ZYuo}A?dL2(uSpn^7o2we&heGDvI+&C~i>?EKCv>Kh4-^T$V z<$kr|#uBra%(l_?x~7r{(4-2fd*QSW75T?0mmF_KE9{?Q*6qk}q>EU?Yz?o}aTFU`s4(uyY8 zl6O=TFqX@!Qs0hDpskJ;YlpgmiQzSQ2Zo@&UNgBXOe1P6X^`Hh9sl1(htAt+#|XO< z0_Wkubg~z}GUoO0W{J=NvxQ``9}mD{Q(zYdhKD5mGH~1tqXY}2eK$ryi_wV0-=y$>l6EnVgRXx&>B@d`0gy#bkc#v|LHQo-=Q_Y?LDE|+A z5^NC=_*fG<_&p2w@e#|0M)%_2138TP7+0aga*Hvk#v8t@L_Ei?SliSJRhEi2Sx`Px zH&qrfZpgU%|Dt+B8T)Si8|vBcvS0@0HX$2lmYabmYSX2{-Qa)V?KAcqc05B*2rdC` zPgGw}S_|4lv;7_$ar$kx9a#u0ORU(E&9Lhx6?NmqmEy~fK+MS{nF^(zY#ALF%?5qd zvPp_g5woQ@EcR`Rwj;7_`-AKK+S3E&b34}Gs_O%pxjxO0(bnqh#{ISt&$fzU3R6Gx zL|Hl0)phjIsx@69%Edg%(fu)z(KAx;zy9kN?Dt?#!AS$xy4T0=#H=D;L1$I6(6(u% z-o4{mJ|KjyL59=}p5^FbO@Ufcxzq&f=B2)|O5RKRv-oVW ze3@*CeR+y17~c7e71J89p^>~sDl8n#TAAH zUo?3n$KBD+AslPMK?p@wuDe|-gqq_DHu4LhT}v$dnuVRpcc)KIK?a%6sUDzkqV0w| z<6e=v&efO<%psqcr_^!bL4ec|cr-RLANv}Q}hOvfdI_DR;`o?0g5 znAC{dj${@ENGA(($iI|yYb34X!nX}BnYDNZR;sOQ@WNE(ay#d4-41nH9z$ z((Lzse5#pJQWt*&5i@TJPsF`RBgPmT)(>Kmmr-teDFsy!#>b{4c;r)%&&xu5t_3aX zt*tC_a{l4xir9jBG#hO+Iz`q_VjVMYsP@aXPcwUGGMd(#!%FM(?D(BZ&HbOoS6w=q z6?E4Q{z~fVUanFH!&Uo3A;*4hr9RX#B`!?lM1e=SUzn)=KP`aMc!*PrZKVUIA<#RN`FR5F zUE>Ai8^|6F7SaT83hy?S3AV)m)CgI}z7w7o@&W3XZkshR*Roj5J<#oOA3p#mYAEmV zFc&{f=N#-9A|AZyuBMjaQKT%3HA6kly>aCsQus)#|9>Br3;$2PSMPYu@L8f{TLpXL zJRwd85BoFcXFL}v^5lbG&wfpjefGS0eJZuW^Wqy6ZtIjHKTCOS?gPc*vCHm6Ko&r? zwE$WT0Wy{nXc2dyeKC2Zqy&Ge?EWW7-5eOY>XIpheFe~oIfJ?FR^@@2#MDo9@b)0? zaCTNrE%FH<3sWrh%kO|v#Y`6g=&y~IWbxI0-kZ-LCZP;LJ6{p6W!n#Ir5+yUIAL7h zo|9JQau!tfV8iK&{NJYoY9%Nnv9S{4&0bw6Euz*EufjgBXJV?lJXQXIxGTS2fVE)Q zG0131IKzIIjuFI*<@xr!TO0!pHyH1`zizANuiv4Riom4FaRk_cI?vvW-=KLnVS!12 zUJ8nQ1phXC`Aw(Um3i0y_QK_0_7}j2#W4j=p;X;>i3uzXLd@I2KDhu|Fm_=$MpQnR z{hqPfBuIa!X>xl>NjQ8>&LMl(D!JFMX>zL)8L_j>vQi~Gg~7S^gti8Lkm}1k{H*_8 zhd%}ilat=cqIGOJ4+EqiVMe_@x4hI1MKn8m5n(i(ruyx3ia$@z!-U&E;~9}f>+9nhGMNUUKf3O_jWoej_?(I3`*pUR-zK|~4tko+c1 z(@nSKb4Mt4qfL15yj{Oe!Rq6!RW}P?B?{7iAEG=X(Ks(68+t37Z}bqw`nsP%x`l)M zco8E$Gyn)6u^a)4xSqs0XiC@%4<-DcrlWb_WG!qF7!cgQi~2FA>%}3)-@Hx%_b5VT zqhg0xS8sAkAuSl)Ij@|pF}u2vJn7Ekpx`R)Q(ay!Ug#gsr^;3hf%)(Af|1#A4G)># z#zuG|L@xAf3{ALiFVc&&Zj+X$pofm_rhyFqHe-?SH@P zz8W4H+Km;%knuRdc)Jd@zgnJAo0hq&o}CA$hk3MA&y)0r@CNp_y0ctJU%`I)bQoG;Pg$d69kr1JmU%Gag;?w$dXSY~#>AIuW~n zt(29yxjyUJb)*a?d##ub_X~zA!u}ZR3mJw?aZrtn1dmiX+&4u?+W?tV)T#%d0h4~q z`gUI(9rALm+M!b)cjQ9=zrH*nG`>+oa;xKnxPG~xh?ppohl#(}3gUKbw6;k()Y)_H z-7Z$=eK;UKK(6-=@P=<&W90c%We9Io86HiEl$a-nAe&?_2>L-LF0|fhAgpnv-0M|S6sdX%C132r=ug54aLppWuz)gNvlMpxF* zI{}=vkb^mE$n_MgttIR3>9%P=GMjZi>sI1IXdhXEy2w(qT`-7aU@&$Htz;6+g7W_61^>EMA|;s3?6-zg{0#VLo}9|6qqB61``;jz{`>Y8(zWVj;e~`t36Bze=O^Sm&3J%&4k-6Zz*Ghn_0BC_i7 zVPq<0=%iI3`yUr#_~OQi4o4;ON;N=rO#x0Egi>wo#?e40TFw>7!*){Kw>dnBW&AeQ zi68N={*WpymK%<2Is^kB>KN;F6=lmAtd`7?JbN^2-AYmzu%+yx9qh!rF7=r-`bf)P zB=Ad`Nx5jAC?|G~NC6JEtZ^B3VNWkP94QY;td0PJgH&G;SfR{bQReh$e|B@;1braEshx2vn3Sg9 z^M6vebF<)I@Pz!7{xEPB#qO{nKKsa_lXC$dAXEPvn5uH_j+@yI`cop$BM#32FfJA& za296t$g#{8;#JfkegVkABHHM;U2z133sA>yNErDpd!%+Pad9j(IcMPQSBD#c;*}2` zb5@*e?4a@7;yZ+RT);2Cp*6ZbGKK+aP{9o8%i+$eYp?)~M44JEcodC;*kr#Z(M1! zGs1cuzGswL!m@?HmFzH{0r!DMhd&Z^vr%&I2m+P`TzE)}JUUagw77EI%Z}HkBSb46 zB97GLJyn5Q`4kkeHGpk0J=_&}F%@;Xk(a(Ng0qh(&5&z90sG}P9^0Gw4lsBrE5&ij zP=oy(#4mBGEs|5lTEgC0wOeW7-N9~M-6&X{tk8AN37vEJ#kz|LLI-%mY+W`834&A@ zV&PYCnCzlQ!6FwW-?H{)dl3eMvF|)V+C<^uLOe#SN1u2#%m$m^4Jua=2<4FWp=Z{t z4hD11$0k5Nk<%Kh`Qr#!V`IH)e1P{w11P>kd=Zf1} zCOkB!%pva>-vKkpTYLSXVKEc`UC&qRZ`>sP)1i%5kf<&O+61nao#bi4PLKm>JbQ%5 zL4Y(kB{B&`6O1Vk?p%)LdJ)dIzSok!H1!bILT+RGT~&_o00WZ+9au9kAZn9@Zdi#h zZ2+J`A&_X5tV6hh_f|}E5g5F?=xV8^8>ud}^eIIU?Azg~MW}=`jAv~`5Hp0IZpL3* z&$9#R=ws59Q^1|D5b1q%MbrK()ffusE2#FDH5z?yxu<$`Q5nC-cJ?@g2WOX6U%@+W38-UumR4D>uL z%CI%u4*}#*AG{YHCfwkAC0_t&R&(U~-}$c%OHumtRPXH7Xt=^W_e~TSNEp1oS?Jeg zYii9lj~o&aOOX=iI293WCbyBGV=A;7kFdqm6c4IsUO8s`13=PSHw z_M6#bIZ;`kje*Xf6!*7&799XX*`RUeXM4@(-$!$XPRmFeoQJ(Q`GK_ER?bWRbvsx2 zQO&|_kBQIcEW~%|Z0jcL6cCVU@DtjEPtrqi)9>kyl4}LXNkkRvV$NY2nMV%wi~hiS zpO5mCxe~gxu=|dftvB5P@OA=lIT}T)qaAM#0WDxEgY15Y)PVS2f#x?1Br+q=4FEqp zsI!HZS&ZwpczBIhQa%ni-aPT1(JFqibvy_4iJMTl+zDo8sKwdA9MWyS6IB%r2u=9? zjf6Shf!(8{Wd@r@MoS5UF>o9w)6wezv=@hlK1(+B#A9|0K^4u`2z1x3_;5r$sWZ!) zSK7LCwXx1GKG~W1&UhgdtV#z~j?SjspcZM#QpG@<%+09ljeb#DL6R|s0?!TUh3o#B^sBI@e`y2nFEHuAv_=BbR&|jsGq`X z#QE_&}q4oJH~a~E!~bKsH)IAq3TOpgyTZrFH0?gU!zo1v#SJJ#C@ca`?5$kU89h}9cAJ<@g0-U)>w~(lyo6|T?^o* zJCl@;r-)LXN3=+B6VZ~PZ;gZ}qNNBa%x%ov{BpiX&kn9%5m>>F%GcLTjg|UJd7*fx z`#t3C>vnW^_#lfa7Du+g7+VNG-8)G}aKgvxqg^ZeWw411vlRDZk^ODT7wSUE^Wp!^djhMXvTJ@9TSm^F>Gf)sm8uu}f`ZCe9xzalS zdu_ND&9-f_@GnuE00{lYQ3a!!D%9E{VuP&Q$RE&+d$!G%Res z2}}-7_dhY^+QfH&87?Ehky_iY_48&le{@`)Jes|-5oX~>A#5)UB6`ovd4SaqkH}GF zL%H;VQGTGhpOFi107KvL8jxKc`_{BMAqSLw{kvUM>o#~FMtzoY+&uZ!YF?{Mm_7al zDX;00sQ|r~1&59^!lfc5=!lHt?0BGXyj#&g+3Yf8yo;3ki zhwmw1&n~ShTW1pxk;0$pPO0B>v4|Z=oL3S{nt>?=74{Fddd7z+(V|Gz1Cq8kR7kOe zd5AuB#%J?Iq{gWYn%%s*mB!GHFQp-w;zgVU?+Hba1N7lM*D|QfXI;5dT&im`S4^6n z_u`!!(W7R<4~8^!TG>9%IEx8|7bY=Ogp_OahV)9 z1>k1Ziw@HUpr9`XfwRXeopc?k@T*ui(ZUaVN*4eI{PWa*yb8D&}bTu>+KD#a$;{o-n+KC#pK!%=s)04Jf8Wk)#_A?$QLn^q0uH#2PG zV!x-y=_(_sm8({6EZ)6>gk6A;{9>cG&#TSM^x)bGv~yVidB_YlPYo3>C6ABdJ!Q`- zlngF!YaxGeyt7}HTq)C(?3~x*R(nIo(99iaWOo-5>+0bZ-R9lekQLEOuBtq<2{-I(P()SMRsm z22Q>yO4y*SNgZ+As**rg9JP8v3|SWRkx~?LcAlZq5wf>V@Wzc-^G28}hl0yS&&a6h z=M=Qq!+;O@l_x+cJYcy_Z8zMjkN(E#d8Xh>8aB1!=RZ(%_{Pi@nG>a%CgVgW4#`#9 zp}QsqAPm*rcM%bOzql43>On%R0eJf-&(luCsa4KueMQ(gk>&0jR)8ZD9N1xG`-Ajk z&K*x@moz??SFJbO85bZCGNZOkZq}rSH3#Fua+ZQV3Tc**rlB$zH-IVNzW40Uu%HX6Ci$3xj7WEl-~uVgy(^iXIeD+{bSzYfOk_`7F&Xd(_1w z`l^3w0@q?jrzcjPo0&e6oJ^4HA+@%^f6^2w^X7M5jA)9^X1o-IM=-qNnDCRgoN2gj z(t4?>+V-0S{|$i(o3a8d{_HWiQ{0p&lY`B*sf6`AH2P~ise{5JHxxXEWc`A@N-C;# zdMd6j$@1o0>2d-f8uO(cxzBlK7eM{w&UePm<{myszk%rj(eF&Ed7^Qj&t6MUarpN? zj9|Q{2&t>qg-gT(A~&1aOK-p%nz3O0dTB6OAaenh>jaR=EnI-iJuZoLFkdMGYdSqZ zLFugSfc;XhaG6({>b%1aZLS?KQ`?8uwAZxqM1tX15ly(|h39xHkw+^Xj>s2{dLNpj zE)OS$?b)!>5Jh{VNa8e%6XS^6K(?MHT(o1TA+J?VUXk@l=0lPEyNJQw9=>M4MFG#yyGW(LswgBcP3Jl_r#EBGu)Jx_|bp}|y z0?|c2E*jDvFoLo+n~x z$u+n9Y7S!@j_p&D_knJp*EV*pjPsFNARD4lfj;(<4%2_v1%2gcJjb<=Pth(4hy6B9 zyzUyTCpilpMgwS31wyI>AZ=|-2;Ji&0}2c4Lyz6u(CWVh#HXzt0;!+TN&5tFvGlte z2)0OHeUXPoeh2r6<&A%eMuPHvyG<(Gjq{2f;m&YL_Pe=_JRcj)X+xUGSrqDPyR_}( zS$;6%tT{EYHE2=~8^OH|G!!`p?zc%$AIEH6%>@xvX=v92&D1K#0~jwsX7_+}J;i!P z>|s|HT5)FZJ7{RU2Exrg(3-LRyp)q7cS$tpds*rN!H|C7Sm!9kl_-C(v9sydn=UQ=)>ZyBb>CD90H#tw#_$0-D7>X0T4C61U^w0q}t% z15I%*@YquER38m1;e}9*r{i)nxot@9R*YfZBX3#pAo9S7zK;F#QnhHp;#+3&*)?XG z^Suzb&;gaed&Kw#=K~!~7EEbl<*UT;GNqDwV`EBNU0GbePw%iZ-Yi&dRc~j)I?7Im z@8HDx1pfF`qFfJS+#AR7?ufs7Q_Fg>CcL&%tPZi-kJdl2l-@xR3X)(R6vhlc@)hvW zW0rRYIk1f>Ob7c3#+M$F-PT!)vjmH?WC#AS4^5t%);=%+F|?4jV2CTvULrNsx`aM^ zgf)Bh0umn|eM0XE(E?j6UJdgo-1b95<&Vt5&TMJEpLJD>pbo^5PQfK9g2w;R!53-AL4|$;_>P z@V~7FmYXmP-Yu*bV)BGkBaHn17L=AEzao$p7zYZlltZw?H7^lVJf}zNEfO?o5T_)j zp}GqF&b9`b=lvZqrmWxdrETB=2>vElGJtsv?bD}yb1SD-rcX;R;dE<#vP~%)gxUvo z@}tw!y<8CV9@IrTQ2h&V6^|IHWecZ5@jHw$w%vdmQ%Z6+1_z2?#HS3Goilh80n5MB ztPUXYl9vv)xp)^O_LZ*t=j$R^i&gUnO+#h-2siUd_z6a7=5n)`hLXy88?tEV}SDPtHME0#1xu_AD#!R!8 z9&v<6`#{wQQd6!pTVEh3>|wm-v7YZJxCfGN0~fLXZH7ZM zP7@A04)ZPG))#s_=f{BHM7sha;bDm2*VhLux8}nt-%)Glb=6j~1?NO;_|e2CV1-je zJ|nV1h{~vjkn25dR;JcFgVIJY$}M6!xLr^_#(kJJ*&n>%%^~t9{sm;yvVxu$0}GlR ze{<%9z9dlXyo;ij_N@anlMas%C7ja8sneeq-{)?k%n$Yl3o3N2Qq7n=@GJ8d@FIBIrjD&igr-z zZt4R7xcL@8LPOJ);jY3p!o(vHwcGpAGx(nn@7KeBh3!*6)gCHR6|RF1#0>eK;9no^ zZ`Lv3bL_ID*`sKZDbTow3H`dy}$aV%Iyx1 zH02ADA=^4Wm45`9XOVHSX@?-@w&||-1|7IH#K?;Bt4KxAZ{2?IrG%wopt2EV@f^{o ze~X3e(%Dz!S-1-z_xQ6a?0$`n^x}?}Hvhe6e2SPwYs+|!|05X1`)mniPaFMIe>=-a ztJky1d{xSMS;=mKznvwo&;G1TVE_Ay8ZBbD&|(WP9|d;qJP-O`Qw-z-eY!Wn%*<5n zF+I`!mqOE48t*7xE0w0x9kK6??F;kbr9d9&uYSMD{b!|{c*4-8XEH`(#$h5*X+P9+ zPewG6d(Tzts#mggUePD5_gBiL*Q-0UEUU12u~Ds?o~kJ*Vz3^;AgD14JL2qC+2Jd$ zxc%PaG#mSR__ao$I|84H%%;~0Me;u_04{>6?lSHQjP!+F^AUIdC4vqENJ`jF-#mY{ zt{T9q+}L84Zw1SFt!N08gjtW;C`g|ic2=~crCLqK2eMIo}uaWg1?(NKNG$(qJo=>buxEu{O*`8c!@SPc4vh#q(@6ONFC+ zqmD%44h?u>mDY_2CJo|{RgWQYoz`FX?ih}CFOXLzwUuVdM~nV9WGK$g9LSxz%vwqB z8>QU~N!Y0Jxxv8LLg8^!&Dg&Gk}F9z$N5cvyD(3piA8Y+Di;rKO=uzlG%M?vUEADL zM3`x9s!*MT(&R)=v{WeyFWa!OjO|rg>0pGjI+OKtO^y-kk2V5ZrGcA&=ChW$!;!0H zbn4Zr#Kl<2qVTtXVm?+Obv>Wj(!AfMPz&$Y+#A3(CdmCiY*+XetBep)KlpEL3Km0S zb@>0q*uQKI0~F@Zj1P1)X}pUx>8=Vl^4tAXyqYKLwdfziMWI*vg&x~Yg~&@<$G@d9 zJUV=xUj25SeygXqaaMqWQoM7bKFyljr;FtSUAYcAQhiM)9w=zhM8tKWO*pj*Z3_<; zqAmUqaQzO)%eRY_U2rpz(U3+cd8=I-I}PdM8EHW)>3{k$!Fd(5=$c29ojtR=4NKBh&YW?=Lv&5+x%)y@kcBEuj0My4 z2k;d(0&a%C(F#!kDn4cq30&{tvJ?FYN*-F74I%VHbHU`^jj~;K8T#tbEqN8BWUkz| zU~oDi;{DxgHWEnb+!*;1M4VRWh#Obh#~NJw4P|*OXZYr-OJ1qNN}3>SnESaCEYjT+ z9dg!}O3n^H5Qh0}eie$r!bt6uS7_B{craToZ?Xf&3IMLJB)jz8AOdbdsH=lTM?PC9 z)8nLpUS#J@$MB$nNO+>^)~bw=Y`~Ym!83y=E%paCkwZ7zgRk1GbMx~WHhk-L*4D<5 zE!#ejm!Y{b4}_#V2J>-7`ru-=!@vEMN?-`^P&?R<+9nqAeyb`@1Rc=y5t4*Z?ZH`A zdSzj`Q7(IGbRKWX^7lYNO}#Stv~v4z+<|{rpV}k(W=dw=NMC<{+$v&|t8^+2zNgGqZv2Uz4M#B9sQwVIr3yMpm{^ z-H8BsiY`*f=v%zi6n!Brh-e(UJUxL#N>XZ};8T`w=B?A}Va5MwxL=n3glB3m_tDyb z&PDKTQX|Z}+}J-JzfH5y*B7H^{hqNJCVc*Ne^`x-IpA=KcKP;Fd?b~lJ1!r9yFUe; zF2AZ2zjZl=TMiJsobQf%_%Vg@0sI8-tR-Foyhh5IRUN^;Dbbktx+1ymo0i7Lg`-ex z#J7%F&XY3#ljX?NiEMK~LMeAgc~|c>+BkZ?RK_2jb9_w@ zvG*KGd>LJaot>+`KHa96C6ESRwY+}K8Y}dkW(%(DJ1@K=N>LBnTTLPMY8_wWdbjD1N-d;MaqIEF(Vb!+X-_H}AHc(rNdmc53BWg_%ci3_C<6~-$971} z?OcHnixXK`0BOBJ*kPlm1=7&z@KH^zsFeav!oIBq=8y@EZVJ8+nw<`B0Q0TO5p71N zLcmul#pL#;SfL4V)U+x-_ zGY=0gp*Pfp4NDf#O;V?p3q%4VU%ULXs9x}Vgo|~a$u1}ZKX6_cD1Bs>GRzhB;;9R7 zTyC54S6AiZWAZiB+JnA}sVszpenv@Q;^lFbu8z-rDG730NRwwduHSw71=)GHtIZTI z^(QuIps?fGT@~bsXyvw9{Bh}G3wK=k-dZ7*J;21ruH~;}I+t1W4!TXIES!54^F)bS zV&L))<^;;?G#C{syN-AW*qe#R#6f?&sjmjBFQI&=bt{uZALan0l$P-bFs1SRGx$Rw z*N!IMHh{7RU3?ns~kfWtYyhnvx7L=6E$)4g4w3WaKugto74)>Rb zdpo2(%yWv#^7UoH8Wb~7t&=%wnhDu1zkOzJ|LiN_svJJ!Nr1*@dvUbwGN&tUV^dD+ zh3@pOva`g;){>L;3h$JH^H;KJM-teB310Ihp6bn+FqcEPj%!%9h4KClY82S+#x3WC zwPXW(AlFj_gsi-DawYWnTMi3Lfwroz7MAQ5Yqn3lQ4T6$V z4v%7kMbIP>>?u*aWoL}54yB8|ZqC>>d{*i=s z;`&J>b5|&iW2bg8FV|C3-gH@p!%7R1RPKIkFh%48?sntlvUgTMN6gr}qfakt^MQ;c zWcoFq>F;i+xO21@;0ylxuB+gQ-e&{?2?1)D-Wlmc&TV@4{2CjH69Rt5ahdtfrL3k( z6OU);*~OKElH*HHUZ(?WtZeM-@on;FaSt1-U0`W=JjN*wz4o%>ioCuBVz(Vu@zn+O z^;3z+4KbFKN1tM17>)nucI8L>;*z*(_%2E2nnCqedO?5$hN8Kc<4oKNzZ9E!)bHX4 z(AABdnH$A0rYze$Y%Q4VEByd!UMB4mwPY zS<(;)e<}%!FwIJTBmxoFaCfnXXe!Q~=u&m$xg0j;QW!0pNom_mECH==pbFozBa!W>~`|lBZ-4)The#kezD3N;AzEAg{qj zChWCeV#qyG)it>2V^Y!~I;s<*3glAEqupEU$)p?(cyw{4p9l9BGH}i0ih%r>f;e ztAJ0Q?erlF7T~HUoX#VyX-iu*k@#f6WzJlEF0T8cik^!|;19mw<%n#K8YuDeZSLPN z49MP$rS9#;ko&(47|zw7uhZirqI@F@1ckgT36fHr2><{(;h{!!OXn!n>NRAG%{b)3 zAJEtv2rizHY1r18nk3Ovbr307i1{R{Vyd8{hoyr#sUY0UIh$6uxO_9}BHdwW(UQ$cPJ zP^7spm`%Y(@m-DLMo$@pR>2kas>Ed>!*#uEB(AZd{>Rr^aI>q5pME@?f|k@HT&>IJ zxl5@<2Q&Vn7TL|)ygY@*T?Wa#giQjx4Ul_Rf7fn#(dV}ub}cIq=jqvE*Ir=TuscAZ zSqy|+M^ZnZY9t1fZQRXA=z2ij^gd--6MZIOQPW|J8$Kp;{ZbWEnr^Yah2Vl6q}MDo z1rzg}^Rua0WPhS&5w1TSPS*z<^iw~BE{wL=37&5NZ06l)=@cr&tQ-r{cg3I69PpTq znI%vJ=pbQ+{odq(jKY344N`|cLqcX3zfkGeR_WbjY8f)eCB+xx|GWA9IMpIvr7rQb z``lz7>Tl8MR$seP-ekVyucdrE6NG&!Uad}DvSIc0r&i)~!HIMqzgl>i^dOd#s2v{%8OOfJ)4U1uKn1oG4bD=QT*bUvf@hP58*3v0=Uj*C)sDikw zS4P02m(>tG>yPPKSeE`dH9VD>>2=TPzT`=sb0od*QtP_tT2@CK$j^d3Nk3|1xMJeX z(AsB9KeRLc{30Cx&>jf^ifPQ~qwWDS+0JfRUMb+4UBC{Yz|{Ms(K}T29(z*8*JW8`NpCkB=v2i+wO99Z~F zkd?#Vfc_tD{e}3z?yQJ}r}zWQ5KaDCYl0xoJhPzFV^yjRyV4ZJvqS?e&2 zrZ8Sczxu%*;7!1G7+J@GI(u}`N$9+qbDh>)7N@>aE58ai8tueXA%v{-+mrwo_<2)( z=)eIGdMgFuJ$$DhUZQ|l&<;>}Q3Q-U^PydupVe&hr0L$fCZG>SRChy+6QJcxRtygg zRRoZ~`oeNFh!!R}SDRdiDUFqc$!3!2Eh~$;__UdXl?3opQSf06Y)1^b>#ekVJImk* z&##TEdn4UHDwywHZRbr4by-*jBsFm3RcAa`b29njI6b?m(wsfIJzo?h*IiWkEta+VoP7iKcx_iG%I&L9n3wp{=}HwXu%{BCCv-=s4S?ByH?8yFhqQZ0`Kk?f7+Ip3_Ue!3@ zkdXki=a?_<-2;pz>4zv1@@Uo4pJ?wIxF{K{EXh1Q^9daVDBn>p2gL^~vUOq;9u<(YzswP2C1}e!+8tjN-8msWRo5P`hLeV?C zPj{IbmThB)#D)j?gKquOI6gG$@lyO0KvA2yql+6V0C!qdU7|akt1l_+DCG!Iy3K=a zI60)v5>(e=cvROon}H=+nVe_{d<;!Pr(aka$8ecWgcE}GGtsW}Lh)vjPCElycB!Zd zSt6?ZWyot|~~Cxu7_^`$Hoy@6o<(EM;- zz&sDNpGdx0&2p;v&CD0bth4>1wlxg!-+@e9H@8*G_91GP2K0ZuG;nB{s3vr95$Xmf@%q+u-T9~#^6!v;i zXLZ4M&P$?jwkyvnAzgjvBGzcbZjR10%fz+41W32o2e_`9PF-!0a79&ojjlSvnld5U2rxp+V`#bs_K9xNYZmwEG2wc2>~Zg9|lGn4*$*Z^FA*i=Q#;+3+0Q6AQ z;Dhf@aVc8{I5>tDL7+K7g9eC=Kz?l?VESurS^>5@6MDH#faw+XrQLe6HJVdC3uY@0 zvv2TklGAgu7{h$tO{on15e5B3d^v1(6t~p_pfWO(&^>N(^#7UXAv}LrX3(-OV(4S# zwW2czcXtRxG23gnxPk(-y>fAw@&MecofyprzYy+85w;Q1$(LIdfI*NJ#viv6Fv z27NjmtiQSn`i}AF^i{>|_ls!pGsTUy5Q&pJhdq+%a@fFvR?|yKOEo^A7EwXdGVlZ& zXsmAA{ECIefDYFLKuxxpURe4C!~uYt$H-XkDgGh=fzE(034ZAnb;o#E7MrEhwP4)f zoyl{41?KRr^JmNFAac@Uw4k)B4velBcVjX)?5Y^jV(m9pz(9zQs=N7DEi@rl;L2dL zMC_i3D=L5ypHRdDGXKqrd_I=_E;pwIRa|o`B#54TcK7d;+fvRfj6H8)L38sT%pI+J zjviLN@oK~Z_j!65p7r&qN49f;)=-bwZx-H(?I(q=HbFzo(I#SfVH<;M z#ZweEExQ(3Qy_u~2R z-fl7Ul)z$)vITDYj%qon-gTw5gRrGhkqZ0kjo0Z8k0Kqk4}5-sq~I&HH#Y!Bf%b|Y z6K4@f>IIA_x}_G&v=b@wjG&ES5GxC%y^_iH`Pw($X}n|Trm)l`c(#B#HW%N?4+pfH zfyZ_c_@ko{lsXzi%1t&FH-2{Cah#-aus6hBs*^iz&)+D?^jw#xd`cHM)8>$69-UQ8 zol_>}8Bh|>CWtQlQ~tVQ3kbP9JyhY7ve3{WAAsFH65S>bQHr?W-OEhK5f+#NR`X%W zj4!PewuFvh4Cv@(Kux{97$B|~YwZD62hYH}3RBo$Va?S=KkyfxvPUQ)2UU7gLTb>^kWaIgqn^7)I*8>3Q7?m^szn;1vAFFYe{ zlXOIMw)@z9zG+L60Jx;lc9d#xHl2s1-~pRp-XMu_g|CGA(m2x8ODb zisjj0b}z8QR(>Gl)shU8I>hhjb=^Ir%3xw66=Pk;vSg%Ru~j#i*m5X8cv-wk;5o(J zX+4xR`zl;vt?(Tu%g_a^B|p1}z^0)Ms7mx3YJG^+jNZ6LI#@5eqka5ChZcGLklvI3 z1qdlmofk*Yv?u5)H0y1iyzA{^gMk5fICegaR+wXBT3HAlES64W(Tky>n>pk(!6kQ#Q+2i4Q#xczcnX`PCCE5d!QDh}cGg?DVPucxFcYcf(4hgykKL%e|It{1FD{ z?s0$=-xfeyFENiKEpkCCw^wOht4TanLZCV?Op|Py{)V1v0q$6$0=c#(ofb*;tE#F zw9%pEoS(}MXc$NTK$u-5O9Fz|3%HMRw|tHD=Lwxa0gejK6bF1fBlN>OKCm~2YqGAm ze@<_G=vv3H8Hx4lCB>PXfN6QLZNhcc%RLcv-b)5NyjHaKz~!hVP3{5BcV^r7@U8*T zXcL~0g&VEcZ`*AXUcSI0r7gCxF^>bZtL()0pkf)FCsiFMKj_uN$;!JF^DN=M&gm@; z+}eh27KWOYo|T5ybx$kYyB{ETVPi}#CoH1(`)3ooKoqb9u=FQ+znx?|A6a+LurL-+ zzuS~s=1g|ZQ~OJ+93No|t4rHad^|mlCu#bTA_zQe3}t$)r7pq7+|&Ja#3;TP5#!K!9052y>pQEUx>F4U;64bX;s zY%I0-ISzv{Fq8{M=j696aB$$#J$d6hpe4$iWG*H%LD2Iy#UD37KA^tw12FxXt|YH? z1#n&{HmQo~pIHJ^VYRC`EerZ!9GWmC7MlcjpXPdCNxP&00?`Bu0ON8Zo!UTAprwpB z*h%0wkB`OR$Az<6QM2&RmQscch-|DZ5 z3fu$vq-xiH&5@ro*O+udKjY#qjcx~TGP!nbBb%)DL{q^k&mF0>vF!t zf>za$_iOVwU`5iKheC$EMXDhtsev`JhIjM)hPmEYHaSuNi>{W01O3^J2RYt1(HRdO z{k8Vcy&1q=v$q4uX(a|GBzPk93h=xbkpqd?-pB15D?MJc*|y1j4(+zqHaxes!_a!&0R+DR)QK1wcJH+fL!aX%%zExJEfyHBfAj^^hC#!JX8`ky90ZsiKb}cZ zxwSC!*v!(jb^^*GS4)O;*{TU3r(;F} z7WO+=ByI&Wxc+v07dW#k29rnY=&A|`3byDtJ4ZFO5qx0z9kvazW?t@*Xy)WsI05>HSEyGcu(MNPZ<1pt=N6XRsL0RAA}g_ACEh8SF! z^GQtW$jW%hUpcL2ZsZ($Djlu}EPTS&iDwgz3uT9Z`7>|otP@z%+U4THI1^Sw9^s!l zCJ8J)IQf==Lew$0f#;YouX=yaFlJp9KCaPGj!EP;fP3|naKNPyhqw2`=bXq{&Ea!% z4OVM_b8r8${rzY6+G*@00S$DVxMx4*c1v_!F0imp@si_+zsG_^e;M}6u5E0_Llq&{ zA`j~c$@`?v;4>hK?zR*Asr`i#>PdZb_|}%)9!N*f!@bptEkFnu^cDwldELAuGOK+x zy^ZI9b$7yWgjLArSP4=>82_V;7f!Ypaq8-gMX)R(*LjVq*?$T#hBZZ3qXxfVf?pi^ z(+(P&3O`lu(N)x!q>{a}NeYuke}n^=ps}9skZAFd7Ng}h%pEUj%J;EgW1>f6v=B4= z*9)+8bGt97hJH(Qx_Ppsh~9JFF3E!ZD1%x`iS_)CQWW;%$MI-XHhA^1J(4o;=a2Kd zx#b&8J?R&!x{2bdo|Ui(QBIxgecH{BLtpSDn96@t$m6Ze<&@}rRFotxd2->G_>6I~ z8PGDfHLjc< z05Q}2J#qd^cMPq`HZs7xCUF+{o=%I4RXK@BcDzJ2onLlAjWi&eKAVm6E8tcZX?J_p zb_+0=ce&VP(b3a;6Ux54DtFsd(7}$x2YuApW_3e#u1*@M$2hle<{UN6Srh>slplvH zL=*g>KOnhhY&u<#f^X)w?Zg&u^l4PwwnLm;iL}uo z7&jzniVF)_C_`g$;Tz_xhx!<4J>&tr!Ho>(lC&JLnzq_r{ zlmyLy7hFtJepJU#7L0!LnuHn9wKb<+ySSD3c)!`gofI^ZS-16=_<4RlUQcDB2>mR> z`EMx1?$JZ+$9WU)?bbj(te_xt23bV;94fnj zJM(4C0w2Q-0Kn$0Mt--J{PnotT%RD<71M`u%i#}j2c?L%X``YtceKTpDSL=EiJ zb2*Llim|X$_9Zxx;GV~?(RS1ELBhs>NCS&zPF_NuB0nnX!6M!{cbEHV+KtA zKt3N_23DU7mEk-Z4q`5^W97GO?lMOAP}Lo&jJPbn62^tk~&83gO-J*GR6 z`pkXoDQmxjA&fOmGs>Zd{cMAgZwOp=^bF$K&oDu^hi>`KRGI=pTmX|w+zkYTU0a-h zjBKg7TJv2%OS6L+uW+u$PQKOh1eQ;xpBM~*h|*0V`J?ODbsaurXd|<5sEvDNZMSDS zPj#?3B1Bu2;M3)US z!ar=Nd~+Xqg_07EM*jfps#l7$40$BMg!mFn;a&|U{ubg#>BXl({@~{cWC1U`4wKVS zgR>lS@SDK;_8S`MrzM9>@f|)H2XsEg1SxZBWP`Z>~h7y*? zDJn=Lhj?X;mPuc{5o-dJxQ6SuSY+B|BPyD7^z->l$n$c_??Hf0TWd{v36{Ee@gvesm?r7FAeGCz(4?*o-}LI5d2!xP!`^W-%r*s_;c~sT1ae( zVPo#Qk>q4f9{~jTQ+?^bbTA0g3D_DFLxJXom-9)Ek0~i%$%pHH{KP;LY>UAc@MeLKG_x319D_93(sA?;IuU!~}3mdvJ6^6awJL_?D-s@s!UJt0dEP9(20?v{*Pk}~D4sk8E>H1!D<%`QUcd7NlX z7AaXDMumQbL+O6S&xMB|$n%XQP6{7E|3T{kOFwRjFC-<`orlYuiI5BCFPtkh@f3cN zrt$H&k9NyMbn1fCS&`G$%1+ErZX*YEd_Zbs3g;a2Q1-E(XHnmtMBfU8_mnE672vkj zxs@0F_^9CI>X^8&l9*DFuGIN>--V&E5OzKP43i)0{zuHiV;mLR|EoKl#gXg$bu44kN}cN zY=Q55o(&J()p?k9-hVicTf^K@#oga@^dR|k{PU=4_v82E-OOc9k>(X|B7Hq_Je>)0 zPkhjr;W6x5mCbnoqBjo-kXIaJKz^P4MVaZ$2Qm zV*BvYBU6T93U!s*ij}++$qL)g9o;B-2Q^AtQq`UBFYE+X&gg&fWU&0t0Atgi%WZ!Eu@>-`&KtO8 zHQet4`k>BibM*V6V&&54=4wpkRJG6Z$(wVbq*|cH;kY&$*Jd!!#aw#zzWXEbZdOo)(BrttV1o&pyYFsv9%!k! zKTXTb{5MnI@nJ;YHah+?QlQUtg>N1KYFt`#v{vuHPt~2fpVQcZ2AfZ>>)&EBuKfJn zNe9Y7uc8!JmYsqlVoanYC%C3-zl*6tARa6w?{MWDlqWOUMwkcMipa$! z3p|;I3eqa%nvIM{QcjOAEdCMP?26LioQM*u>q*Z!Nvx@>N>>8GE)w8Eg2Kv4cjYQ} zfIbI1pURMsM99())i&<+qU%|#2x){>@VNA%^71ebvGW-sD`zc8A$wBGAdC-%x#qpx zeh*!Ayu%qH?^(ZyajZ`FJIqYQgf{@=wfc}*=SXdM>ODwS zIg2`EkB4DfTTHfV>a|ucnwHw7P~Gd#bYmF_|f*VQ+T+Z zSMst0Y|<}RFMD~}lL4c4C71Le;5Hk?9X4>_c!$c#WV7rJBWc#$6O3)APWQimHuqX@ znkeS!FID<2ZQb7%&SjOePfheK`}i;7Ywn|!H|W1TfjB1%dWwLIp77^u9^Ex(xS9)n zXnsBSy$iK`m@F;ACSXRfyxwt1t$u`BQ^K59UYOs?Je~ z=cEDLYTN||kD5NFZr4c{ET>b9+!zcLqwGL2#s&8o6K;@eNxS(frtgXYB@+=?G zM8HEa?Rq(FPsP2QG_4hginKcjONBgMC<_*B%jHr8AT4v-=Jwp`#?J}9xe-0{4-HKX zk*Y;+YoT!6wLm9)N7i^jm+yym;?t9p$Mvpm@CCh zp2a0p8o15kKEG`jRij!Z$4^!xn-8dSxlL`SY$=f>`klOhGq!o7?EfOdRz+uz2T1*Y=egLA> zcy3>{VvWVlRD*EbRX=*;#P9d;W^;eFr#z~EIg%xMSo`d~k6)5A2jpb_eUAzHJF&T` z4}%_hG2=Nb9o})tg8XT{L0zj8nB+;y7JB2kY&W$}SDf0q^kkXiW)=s3vNd&wvojAc z=gG>3r&L5f?jhq3{7z&kG%RZYR)G;(nh5)pnn!vpw7I#ZRp1&M+OMSEU(?Aj?`>Na zCj?b_qlUru=)1R4>HrdBm2c3A7jsh_025K>RpN$T*zV3Q1Q2P0kSPb(!7XStGwwe_ zUfFlI81{GV01JP+2=Onin`x#-kzr<*=>=mHq(hj2>}MXjMo&<^1AyccC3X%E%nL}dz?Bswd`APCypxe=_8L;lD0E5Q=rePboA6^>}n0-adD_l!|&2` zB#N>}7ErYZTt-Qq6LGAs&j3iSson^dVH!?%V))qmMB1;vpCN!?D>` zwR$=_--dWMw!hmRlpn4A8RL{z=L&ym%0qD|6$@7_^#dE2@+P|NzT(`{Sq5<;>V6s7iZ~wy{4w9lV3O$=Nh5;mzFW_?|iEt0{E>~+eTTI@B)blaGppi;F$4wd-=n8O@ zO^Lui=$QkL(FKIYDM{Vs=OTfgoM-koXr9XG=AMn>aPMsA{Nu%yDi{S=Xvpi8Ms-4!~l;H7793}G3cv}rJ=1K4B-0g&xqk@ec`)FRKRid?AlfC z`zw*YdREm~wex9x=iSi?$*E3`7ta{t$g~v{PUa?}pVfUxNBq_P$7I!X5oAEZY$JGv zZlKBwB2j5VNnZ(C@G{5FOq|gLI0jiXFk5Zp&Ye)7EZnd<2BX2hvw6S6`sIe|7rZs40Jm-fTFwMBI0+Mtj?7P>dhV|KBqu3$X6a)Sl{mWZ;`@Glj zdHT-sD%Cve<%>OEKL3A`RKfCw+_zy=v+o)j^M3#M_``&MksN2#y~*vD0%b;h2FHgD z(o_Gc0j4_?vx#Wh z2jggnXR??EB6XOxf|n;L_9QI3akMyh^k>YcnBNlLr5(9+GPkg8!zPr2U*Wyy%V+Hl zO5@lr^(3MobsC5`zPhRpt5Ws4H|m(jRk@ugxrK92vw z?kR+|%8f+C@baK=CYc(|^F_+f-e0mTk`MbTpCdnDSeQq${%eG@rG5713^S&*P_w3BvSI6L|85=Inwf42BvhkqwTw@eLSNj`u zU1F`%pU)iPA}VAoe&DfeKeRaNL8pI)UimD#2-{&<&q%mgTN5p}yrmQWyy-1o4ikKz zmon?*Q13Ty`_inm0ojq4&#bz(wI6mB9@UZB+y1wW@;H z_Jc%Y+++~Oc3H{Sh54TbM(Qf&hNJ7mVkQ_-`g>uwO!M#4;@r{%PHJ9l-%yFlPT+$_ z$Aw!4dP=i2S?{>VPVB!S`%;n=%87s=Ks*SeWX%8Tj%h+zJOSd(TY)4a3_Afe(bw_?;CHtmsD=b0H4~ z7G&n&Y9Lytnbf{EMp|8nC<`2tkQgwf%#1`joB=owy$)>KN4M>SsXEP}t|Ay1Oh4QV z%uYqCXP1(frmwhJuLBC8;+G-q>R&TznHqVQThG5_A0?Z!b0VMC758#Vxz`z=b1b)b z&=I}dlj>4kR8_F`$h|vIkNLg&HaMj1xD56@@MS}#&~*Bu;gm5{A&Vyf_}Kk=po0;{ zkAGmsXtoXA3T_P5iITKfn-Q&j^1xo+oM9vfaZI{VSmx zz}vwHH=9T3OfE)c03Aa+b{Q^s)Ts%ruC;Iy*iLueoe@O(jq#6N?yd8CwY7jIES%L( zH&Df6uj|S<)l0K}Uz2&Ep%1=N^+v*CphiIqpSh68CCa>+`BCWJ3=ZVY{Jh1kDX%!9 z=#Z%kLBAHUYNQ#r?}jO$FK3r)y!oE`Rw?C^N{oZmgay32(adlvm8Zyh5-`+c z>IMJ#_Q7~w7wZxmOCq+#hRGG*hac5D`r-bkNEHeXVTq`9D5tFk=pK0`s{5jon!zrz zlu*=-7{q$L4(7kg`r$Wb_HR>C>;a$QvSYK4%BE?*nzuHPdP5%ci5#TdLqzY{{sEs; z7txOH=J)8(e>Y+VbHo5)?fU&7#A)-#JG!?M|192rr&LjG{9T{fBD;QcAnx-n z>EVcRM=-;&89^Vf|3zugkb)QY6Z)r17RUS9T#=( zj>Luv9fW24%b;?>Vk}alHW@xCanzAKn=g|R<6?_y!cvvM*)4DEqnWz1>Moz@_P?B? zzrQ3x68khs_(WxiUgLDTqJ@2YRwQkwMSAlMU5^g1cJTnD6U~=j*C~4wd;rw67chlW zW7QjChDu{u=Np9Btk(3}zxF@1fdJVdGxhzMUa%%$sRL?r7pmXY0Rqd5MKaPMQj7Wq zla7rbbh100s5}PdkaV1woh|bpK%QYNkqE#0B+FkH#CahQWCzkXTmCF~KJFGnlPQGB zx&W~gWqJ1y3Q^B`4fNH2#LuHVjp!W!jh0PEnvyRXbbSSm!3HzSRm$LC!nZ@Fd&V5d z%bU3ub%mZhZDB!mj)kRZRKWJ2(%O(|)W+=9_XcG|OqjH}4l`$$VdjodPLU#H-(dMzwAF$aXQB5Gr3JQS6YRp}tPj%YHY9 z@^r?New7y>%SpxXYh^5;Nt@-%cRMP5(F%atHG;r zA)Pls^6<@5Pdv59T<+YGe6l zvwRVhpQbeA#mHhil=aX>>IKMp(*WXfWI?0go?u{A!vI6@qUGgX8X$n=(e;bPBqc@rU|fe9CBcBvg*+0boTKZ_R=oF6tu zOZ~aWUTW43lQhB9Bqk7Zb29?{3?hFG4d?iL13PDz;4kg>`Zq|f9m)XNFWwwSJN+{w zxy=P|MJR$vR-Yg{7fb>gl-2iZ;48^2=btgQPl3V5&NvWWZ1;F|P zJb?WlWy>vn>W@QV!#O7fx&zb%X`l+DM3twFef+X1Ayh}KJeY;;AK<5sjGePP@ZY)zJmPjZ;DvIS41z3_0L!5V zlemD5GkYK{dXJ44l}>VrSzXE$6Y36p`Fn78>2NYzjVd4AY|`CaWSRQ5S+GYFm+nG%N zC-fOBswiHi>2eB>?YcS`DvLQYH>aCy28GLn?3JSVVFj@K`03SHzx)FfSuxjEEKR}$Hk-Rn)>O(Nu)DgV zi||vwewLVMwSC|hWdJ$BdiDaBn_Zf2>WmR3j}BeCe)Z-XmQW4A#sNH>&IYdN>33>> zfeU6oSN&kaYI2JA3b|PT3|OP3i>@7U*q!+*Qp$4eQfOFDe*pJ?xrVV0mp*Fs8qTXd zIWM0lL8|Z#!rJ4bcNW=aP?Z?CscGnNmOy$%O9Qn$STbieKm;UmiziM{8j)<@qu#(J zT+z%UduEC-SW2^4-eX^Aaxpabm?#6HCy}X`EN9gm@4K!qijpTJ;;+y5nAuW;qK$BE zVB7HVm?C6llec zjczdVAdPbAcOKt~;r!PNP=A#nY7u*`TI#>#$18J5g2im?PqP_1*^kbQ#XP{oeVC#A zCNa+kF_qKqg&?&*w3kCPZRFZ-%-Dbr12Yo>1fyGI52KUjmI*wLB-V!c+*TwMsvn}s zIH`CH^XNEt4+J2B-v#zUghe`uE9>I1t5WSDBLklog)!a&OyGcyd70XAXR+Vy-Y>W< zIy0cc09K+uY6E~1Fc=GmX|j~#am6x$vH-?>ZmRXj+z~C9+D!7;SRDw1Lk;jG$}lcH zdfD*gs2{PNS16L_lZL*Zg4vE{yv6u9gjc@CAbV1X+ejtfhS5$|ut?UuXaLu_3cBm3 zc`n~gjoX+?Wt~usad33+;2U0%6!17l(|hKRJy~{&i)(qoiGXF{f|jbrMgNQ9mD$py zp`+xQtHS%D-CA>!<|dpfBXdh-3Sr)Yf7Q8}t+qdp@Q$RfgOq!wyq3prMjHl_Aqj@} z)cif-hppj2SHT!hHcMJ0gJ9?nW=N}z%HU9i9CzyqQZ#!ToCU7P*K-uPcWPlX~M8ZNz7qV98g-o)#SB@0;u?L0~SyN;Cl z_#YcUj@uNtnej3hM2TNdcu!yF@2^n6CT*<?%==WW%=KS7O7O;P+|pxgP7yTvgn-? znfstOU!ltXrp{NkzBjfWqW0hY+F}?_grJm;le=yX-C}mi55sq(Tz5MbzLEU{fXO@G zk!R(i{W@IrLonGsn#D|Zk1ywBK<|bK2an4jF|0tW>|Su zOPT%-P#!U{4-UOJuvzuPiRgX`mjf0nT`m6jAea+Qlfvt!tjY;7ABIjT6{e-Cu5hn$O)xm{0{(_svWjo&Z1Zzc_LN zaw+nkN>E)kLBZELy4Gw>awd*a@viV9ReRBdH*s+1xKp^?iW7Z@je>vnfb(!C%m3+B zC}%4K%cUg!0?1%Yevi$A9wC2e+rM=)I~DBebPQcz92#wh;6``NJGE1#Pfeckcp0_i z?5^>MPw{LH6FH{M-fI99o-$kH;_jY{v(eFQgbO=qK;CIP?+CL(i6NWz#dUYi&9rO> zlLq4_m31YM(oq=43&&lAH`livU4E1rU2SFhHqVuV{~+SPp9mCH`8Nviue{ip$)l@? z)Mgs};jGlf-j<$oOo_T7W!rmo*1`7yP+2!yi>kXVOcYbd0+qvLKSghv|P$VThZc_#QBhD|KEqy$732Y^GrnLFAh|A zw1e@dyVEm_?4`NJ*HfVamZd*`dVgj#cmh;i!0f83oG6|Z{yAv>_4_l_K`)XC-qHw4 z#f_;jbZnazBPqTg{Dh4C{m%^f<>We*37I8o^S18%>-O~Rvw&|(_YtnLJBQ|g#^>to z!l<5i_CkLS&N16JNT$K_W!vT8ZS7T%s3)_YQpW;F@|p7K%xO@M!uU~4=B{Jh={Y|V zQp~hv-D$BB1+P*+&)CnMn^D}P@)MDVG32#$V^+Am{o#+)$$_d;AoqXgLVOO!u6Oqm zAvGHobzM3C=VJ3fRtRS? zM>NZRP5E?-uD~ySezMTKB?eHY=<4#?Y;;*GIHLz%%tI9RISfI0w|7=&@`YaOYm$={ zAm1WNXW_}E@1r!C`cV)w#;%k$60QhIsZcAm%86)bg7`YFHSrbL4vW+N*!J&;>GpeC zNj7BVyDrQ1pt_oh?P4&)qrsW+vskFnQ`D3BWs6s%1RvIwz*SWwijTJg1vBH3btDZ! zzUu06)lh;cXu6LJU>dDRg0Q`^%Y>8P;PWRCj z#Re90#5O|Sa#Gwht++8akR7?(+^p)~F&u#N8o6EGY)Ww&wJ0H)dw4ivLhrohr|EA4>kmejqh@DrEn7-2BM@Mm!F) zo1!?ktL(foKJhxr>ArQ&=!d0i0O#DUV&)sB=UB{KXs#)d*VmtIIp7VRxC#qDmAaS` z4e2)>iO$ZWy3!`?!{E+&1UmoyN@iifyzA^y-L?^tJ`Bx^a!`8`%q0W1!xTk+N`=}fB>8}%_nyviptJeTM-#d)LW#&x$wIF>FS)43`X4=Tl%O5ctcaC*MoRlAIOmt7L`Z`LviNgxv$y#8roy?SSx+ZB8@`u#W0nA!w%Nt5peFs#<7>2T zl*8M0&Y#acNL&za=h5=53evWVgs+2VfAWJ*h9&9FuH?<3NmsuT);zDe7AW&1qdW)F zM~Q8AyV8C)KyH*$E*#%!uA6o!+4*+Xg4Zc)AQJJ%D0}a|WOdft!#kqGg36SsTd6J*^{!9#aX15tCpG*mnxE!irkRePg z6uyfU=O!Z`EY-QlFgU?~>w9cy_{y1|(j!W+%Z5P5oUD-U1WErO9%+*-*&46E6K=nj z7HZy~`F(&dF6ec`XR7}FsYM}B`Z|H~!Li|b*JihdGyo=z zK<57cr-E1RUau>$pzOh%QbZh{ZBjrDT!6ZYs{DTYE)8U&fe*WnW0@y;ptfP&J7Sj} z$iXdeOSZn7n;Ee17~uR)#vH85CdWB@k>9OX%3AYpb)`*8RcFrk-t?{&HGYHM?@Yr$ zq4qdTf{jcTjd@DxFIyJh2>M?;-YIQ z*be@lf7X@c%dw|pvAo@|e2@x%n)r?l7FttP$G*t^^E6C_b=LsFM)v=Ty7EA#|2IA) zN+n+;cc|P}j+C>M`zn->Eg>OyZbPS=91+4dhE~py9NUyTq1L@kp+cr|;>1w-uGQAZ>nQ|=6z@>B7{D<*Ko z4Z*tpR@Tl@+iFDj0z~o#D%fVZ^jqKfy8-gG52+Y4R`qGQXW{)v4kAG1wgcAp zt#FlU-7bcmM%e`;;leM@@ll~xh_@}{<6c#p<^AjTPl8P&l`DI#PwDqmWIAx(Kuj-2 zO&!Mew3;EuRe>uQXIRc7%)t@^m_2!=pe)=0CS+9!=ZoSi0rB0`cIby<@uu*UZ;hGs z&XLjIq&7Ja{`tyk{`6FXZy(%^Cl(mWu#7vbsT=cQv;#$IVZ-KQJW`88Z3YE)ww!3+ z=_j>F$aBU7j+x|7K^-8y-SsZjR#?EH)2bz_?`N0Zo?FEXyr~|`$fKY9Oga#q@r;yM z({%6Z*(I9I?qz;UUbr2jJ{y$Q$a7itj^EuuSx;KJ_LfS1bTvj>h9WJq?pdcJ+K_g& zTAClm`-;d+dO`0T`@7YIaLL%c>Hg`Jr|7;TAS%cCN|c|@{g03K=4K>+^ix4`c&DJ; zlH1PAJDAEA8(vmnQgcy(YtfbE6dX?@M(@~pcrAN%5ek=FVm?fG$D%s%3HaLJ=qr_N z1rc#*9l<-~8GQUAu!;Msc?%A;8K%79`xXL~Ay?kedIvffp=jqS;xh*-T}7!gdW8M( zU*CW$M(h>Oj#-YX*^qeX1~z^e21rdf%g zn#2hWtu1_^%F7yxL{(*^Jyd|(#)GuA=|%UfWKgj zlhMZNDP+^-Lh=?GG9&n2i*Q%uzYRV~32&feMf@C@sk;BxL0q%1Q1!;tqP|Fsed)v&uekoC_FuJgr}nDt;^?4fq#FA)zOT;5P-)6 z*DAO*YqJ}70ek3onv$o{n^E%4q!`>?bkyx}J~6hHWt304lRZg~6b}jfYD8$5%lDX_ zP~u?@T6dUs>^_xKCkuCrAb9%L?7|jzi^Xqh@6XK%_emM1cpWx_G)*rHJClcL_}^{|btO+r%G3IzNK)6_0raBOT8 z=u+=Y^bW3T{TeBpD7SWA{tuY*^#|iz{iSlN!3F% z3Y8cBk(boEM>-=esA+oZ7W?k)4#Y?>B{${qS0%<=xUEf*j;ze4L3tb3={tVj1h9UK zgvS%_%+sOQQGEKa^zn2Wls{T|HNGgMzuG;IFsT(88UpD|7&8~lA`i5+1G1^~$IsXrcR?6p6!E~V^)~%tL1PT2t_m@IOuQ6_Dg_AF@)E!v9R*XT zLnPjHg$Jk(?tadtoR@W6>4Bxw&9sn+8>4kAr90yV>mdz}3Om8Wrh1tOKj6N7&5ses zNa_keL+uBm3%w(eeBR#&-<4}J#ii>Cs`?w#x9h_}(i^^(kRd_+7xZ37_riGl%vn=T zj7ndmJx!kSa{|3vaxU9iARsa#{2*P5nt|QF7RjXj>(V9|C4WPcIQ?*MgXuI=AGSi1 z>(bsWoQsVM!Yw=pWxW4b*tzBqWZM>s-meqBQG<<4?i~U zPKPfgld9}l#*`lF063cJtJ~`PT59;S zgy7I15FrF}66ZOsu3t-sKN;@H!p71uuuhaF*fl$sdBUcU+I`AZ>7!bM6ZhJi|JJ$?zmj(Dz&E ziIC1KQV0{+UTbOrrhu)JgmT$LnvwX5V zc&phddWeGYe4isr34pHPDPjz}#X1}PbnS8$Fd$3_Nm^KZ@aI3ajMV&P=F!N_vw?n1 zoNc0v?T7ANq99OF80-O%bdHAgJIIb!d^jE_e*F4rAQ7;U~8qnV$V8D>0t z-l8XqZkFd;r2ajqc*AUCHAq!=dg)OS4A$tpuIMSdWB1ik0WKbdPj1Py!5s8a*H`}@ zPJR_6)rLs3>J%!g(i=!_HSeA>jK7i`oOxYIEj_}1c@TjN5ylDUTN7cOgI`<64HQ{bCT5j>xmpq0C3<5i>u7JmZ%AW@fs-phk(F&r6{}9=nrF4 zb<7up@7tVJ|7sgbNxd&7WH8OOF&q?9;VfrRQN^jiL^#=CdtcG5w6F{ta8@Gk9}OV^ z^}GeZ4MdXyki#3wp=>lBfaLLS_?&$? z->~P>BTiawLMPsFBLD-Q73c1dO3{iA;}o)I4x>IAREpvnt$zp_(y<l#WNc(SR*=8J#!NR*dh?IYW#8#M z(ILT+j5fc)PLuG!4tA&DP-UJ)kU-I0Jik-R=`DCR{pp9r8_tlsPUTg4WRR&AV)Dk= zB!_2c)UedvseaSex~6NTL$Y;?*K>9fS%yNHGr8b&poUr_sJ}6%`2O-{Y3rAf2*GQi z(v))$y#%yquRUe+44EJ!>y8C+pOL42VBvvjdk(6y%D$%J*vXPDt@H>a4?=NcbzwwE zqrc?019tZEKz+=q*vpU46D}DNU5?Q70Uuv1Z9D96Bj%X01u65RBGG&6E#G$oEr2DI z>B@IhNo6fg>g%=v z&3`buGtcHG5nvg*2B?d(bL za+J8f=Ns8uOF~8qE?ToIS`0^}8uz<0R@~VhA`6p_bUQ-77$ea(rU<$^Zs8H7i`P?i zCRD!k(3)%^6V%8{lpgKeRN&X1mT&6V%oS%?=P;yLmx=x$nfMHOcC5pOvM zqVCevmBB0Ily4qH*M~(uVkyJ38mas2RoSbd6C;D>53B9HF(J*Oh&;ixwkyLaUz60m zioTYlNdpbRYfV{M)j$E+u zvzJBAoJfbTg%-kDEcMj!n9-WGk4pDoLu#;ZYv_X)(FL1l}?rM7M$>7N4=h zsFHsa{n790dM!TRT5cU{%GqIyO?z=$-z-7@w6z&_ck*%=Hr2_0Zw5jR33ppr=3#Nx zYRl3#JGq2XX1+UBlh`McUsGXYEi$;x3Z`2cb$OdXOAqxi-n&{RF0KQHJ+Pu zU{I6$erD^5T#q8OU1k*9m9q-9ZKvu)1Bxr^LQjqx;VS{-tr3w8h`iKR+Y~bNc@np@ zk@T|TkR?}?cGtia{OrWi^P-Bhf0oi-UD$ei4J76w9bZ$-t_JdRfw8)uT{6*amHXp8 zC7)>I2-A&ieRdC0ZV?E)ARH^UhC>K*2D7;zzw!pw%Pb2h}P zIOP{tzehNG6nXxOFZ+@IEX?J;7ZH9fZ0k-eBSLt+I_YSiLhXP+59~28aUj%onS9G? z>>mI?=&I?JOLlk1eqT2CCmw(mS53K6dGURgwwpNH#KCgQ& z4ks2$vF5@mX7EsLOyB;9XuCa=;?071-D6Ue@U))+Hf~{66*m9Nxgkx-tnKb^n?E&? z#xgz-sHj!gDv01_{1-N_|$hsUcyj*a|G%V*m#NJ;T>+BztH)i4xTa>7ohsVDy2DSJb%2o zf%Vnl)A>uUOp&4U7*aI&B*oVbnZpX#%>D5`ENJYp)U4I3eF{`XK$K{k{HckYp&vX5 zr?|adW76jou8_CRB*aMjIQ7-M%GhWNNo1lv(xXyw>!D@0(2@@0Ca|wIw7Nsf?iKgP zV-W?>ekEkjg9s;-uL!>P4Lsq!`C04ir@qr)vu6}Pu~pHF9VUOR1r zwOVG4Fgit9?U4AR{t!C*aMvG8MCKSf%!4)#%Fqe&w=m{A`b{&s+;Wp$V|n09i*F%5 zrRAMhZv$TQh7ivkjTjKvjy3&IMsXuu?Z}{fzW?&-^|#Rim!2F}ddEHTj$Eo;&46gb zz!uJbe7TV+AfO2g|9`$T13e9q+dK%~FDP){t9-BOUcn9wR5>|sNC z`fYk$EtqPwW@z+~xJMKmJ`JSeRxFl{2pJBC) zcl??cWiupagM&o6dO3H^7R(J8}gNRU2cf*Ai_4!S$E@cJ+?FcPnJ~KNib$ z*MHhBP0gt57tCaVLe7w_U7R0@6z#$r?MIys^CCW<^8GiMl3G8QNuR6ksj%-Zw^?%? zzH0sQK@{pLuxm2N+yBx>DL(pJIIlu(@P`43GK54(aUp;1AV{wEGYN`&HxlIxmV9>j z6lD}3ZVCrGk<8(!GikrcV}gBoR(KBeY$l606K#$;j7#z{>rD70xYMf6%&ke#$JQMN zPc?G9&T_k4ZM!DK8asipXX@K}`)SQ4J&?KzBcGwZ9Tv|vX4{=co1Yc5J8Cy{I^qKn z<5MBqIaw$JqtGKBn%Jz&y9(FlOzxgDs3y5I|L9f-Mo!+pt?GN}!;I70{~`;&&2lM@ zOJ6>@0}$#;oaes`JDJhC!f-g9B;)UJEA_ zQ^hY$KBCZE1=Sy0qLn2SCf6%|N^@-uARd1uQf*a&y-}YsVJ|jguTZ2ut(i_o_durX zfC)c%9(JLX%Cbhfn{2=V3xh2NEP0~N7ye-!K5|R@LP5J6bx4&JPVzjk<$mwxn|wrT zjyR<{a^X@GafxwucA%4Vk`w6^Oxg0oRhSoVMU!yO$$oP0sNy!{`2a>NqTsrMU*K7wEPzWtLg;Xo&rSLL%iay6J4|xV|dzn zS-ktY*jWIKi3z1UMA2c9HO#e#yCb8K)%JbT&cd{Yt+I!O5)iIT7RtEK9K40ru`(vS z6Qm@(cPPxdp05g&e8(3`S9+;J^X$MR>c(2onYX-;WxlpTHeQR-{DH88g_8c5riYT9 z0Ry7X258}j>d@`+@MK)k)&EGck+-^?0raTHZlGOhXbfYWWB7WEXD~&)3^$0n;Rbf^ zLQlSdS2So&6Gtkg_o)<3J_-KRp(AWHOy&ssYT~u}qNlGht>T+DY&#&?=zLtSiSe>C zOeG@%S@wr3OqM47I--&TvTNtQf1mDj}c?O8t+vZ38~=k|#wu zn{rx@we4}9xyB-e){Y!P|J_1p6kv@T`S$<82J$bkZ>qEH zh{d9wG2e*@vexBnOL>2&c^^YHi*_waGmn(GJ9C2{th)xe)#AqmI)vYRt#Y^Cd13p+ z9%bNuWCK`%6I?#W!(Tf-OL|bgGo1g9g<+TAS~6Sj5g0c%sKyp-LdGPmQLv%r*4S{; z85znJ#`+yF&z@zwRH20q{AEOhl{n|8o9vNIe)%teMUUQ)CV-SJT`RFFo{K)LlcV2AE@tLlhol_~TjD|nq0fT4R zV6HWfl1)lVG1^>jA8weX>|e`Pzl#Q0BH+`EA8H?GnnaHw1K#4LPe6!UWpg}g2EY#uTc__x zvU1cgt?cWM=gt$(Zp#oiI^g3rX|Mj~c%D~nu)HXdPFt-nfxzwau`IJtQs%K@XY??< z)k!VYpYcieG2CXG>GUEy;gQc_`UB`MqRnGx{;#4sq6~b;fkp;Qij?CQtOv z=h=@G9Df*M^bBLZ9>eqM4g6a(@cFrcVad~W-{EM&NR!~lHv#GozUiI0Q*FC3FUTfO z>8@~mc-=PGBNY;YWzy+w`7Wf;kALg|)vT!dBXEDQIf=5eA2Bt(piudYou~W2hC}W? zyPIksS8)0Y!R+6vDI&UeVJ{MCuEPK1)P5Fu?6ughUyUIDCp|4!k zhIFpKjwZg&%-SjLF5re-tJAS3{sGWU0T?&(>=E6`E<@reNr1lwCz3(SAQ`Wd-LO^p zNte-T%tOS&Xc4FKqE9n#VAR1~B_A2Mn?qwCcH~tDP)x52idn(`IOzYGK6>73J`t*o zD0&9A1CxwN+Dl>kIzjx2yOO*C5EwxsR2hXYs0 zNHwMs@xtaIV*X^Dq#U6&mX*ObbK9^j(`~6+oNrvULC#WlcbYA zSY+>;-L1=vwJWoRVvrMEbh?0*&RyNNFA|%NKF@jNC!et*Jqb>p0~#&Q7%u&}oCo5$ zyNq*y#uOAx+`K6+`)eKB4u9$0TmBxxFA4tRr*wAGF^j>-L*8a?`6u8`CJY^=|0*i_dI;%mb-V*r{R=gxtG zorTKf`JKWZL?}|XFo{S-CUHeM{7%X%dI-3gJhaL9zY8lRPUeo)WDC20%X;!=pKkq4 z2~ON;0RFIBdDpArsVIGAq+AB+i>}tuIqC0U3*~58gFmq?s9zLMn-f4sbD%tmFfZY# zIH78 z0o9U?bChtQ?!w}`k!x3wi_cyy?iq-0UOO88cP+CmKp5j=NC%kv3xv%e-W7mm%|_NE zE!@nMoeK}9torUo(rBv0b0e2i=NPDKzXC$sOWL9AVY0Ch9SZq18W?-D_y}$opf`bc z7I#Wq#tS)u7Pob#g1`POQ7I0yjIVcT>ji*e*as2919ElRLW9)ek| zEioaYrb9DQ)7a&K8DW3Kh-22p7QA-``Dn}T3*xc6i??gz&ZPn#oL?97)@G8GG=b=< z-CT9=Nx?0W@2sA$CjAJ;Ft+s%5vu}}V~d~@p+{t><~TI0KZlKWDE2x;?@4Xjo~4Sw zC!phaYHPgC@!y`J+cygiT5Njcdu}>VR4m%txxk{EwQRguvkZ<0XfrkuUTm1d>g*5h$>}}IJqsIGL&)@#< z@KS}IUGph}qZtyzDtD0If_MxUP;eoc-x_gK%w=cEA`udLdk)d}O1okE@Pcu3(_ozD z(1Q)_epH#QY5CWm1xrVVG@&n<#(>6+tWaJfV$0XDF5hhq-Tnu|9s)>nC4q;H5B{kH zPG$cR+@YW+>ri_eLd)PyLn0T;*3;v{Wz}j2{;Vf4;5MCP!@&W>xeH5&lhM>{ZOg;G zejG2w=Q&ROW@fRwDZB-jYDvnhpVKngb}qzfruMP@+K53{UNr~JTFh@;@^}+TvIWq3 zpbeO}|DRv~dQ>%0$E6jYBEZ=F)Wf`eb3YErPP450jeSz>sTQC1NPyjV_g-dPp#DuO zc7MTG<&Bo(c>YVj;8EBSY6s%Ql>iFo#?K#@qt0JEH_dPb2&FO-PW)QpkYFPR?My6# z)^PaP7&_-M3Gk7dg%kHBshPjuA+L){LEgGj3*^dT1g@vuz0%&*Et!;0x_E z9{B?neLx^TAeytS8_?>8w%-4X@7@0GIYKL9UmFMH?I?WVnn&!I^decc)(Zjp!oMkm zknRrKlZy40qs%`G)YloYq$i2Jrq0ertNuEMl&*WZuqV~fjOl-|Nm9?P=t*O)@OHJx z-%TtjP>;j*L}Fi-hlKr$=GAefwQ?Me=g`| zN?`YogXQNmPV1z&)v^|BJkQp~Dp=5n4Ptx6d7E=kMv&Bl>_7bG)$+L|ISD1|oLN*;?7# zU;wMLjgoBs0>}eDX`%|7E*%uat0YQK=ORrXfLCm)+nAO#AKqMQ1t7cHin(T4e>~QQJ ZuSd-5iUz*Tc>wFr)X?fmmBH;t{{zYkEKC3Z diff --git a/unsloth/__init__.py b/unsloth/__init__.py index e9f8b5e65b..e39afddfa9 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -47,7 +47,9 @@ "We have some installation instructions on our Github page.") # We only support torch 2.1 -major_torch, minor_torch, _ = torch.__version__.split(".") +# Fixes https://github.com/unslothai/unsloth/issues/38 +torch_version = = torch.__version__.split(".") +major_torch, minor_torch = torch_version[0], torch_version[1] major_torch, minor_torch = int(major_torch), int(minor_torch) if (major_torch != 2) or (major_torch == 2 and minor_torch < 1): raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 2b1ad6df0a..9322049dc8 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,5 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. +from .loader import FastLanguageModel from .llama import FastLlamaModel from .mistral import FastMistralModel diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 0ccbe9117f..de124c9dde 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -20,6 +20,7 @@ warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger +import platform __version__ = "2023.12" __all__ = [ @@ -99,6 +100,6 @@ def print_unsloth_message(name): f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 support = {str(SUPPORTS_BFLOAT16).upper()}\n' + f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform.system()}\n' print(statistics) pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py new file mode 100644 index 0000000000..f75ebbcd5d --- /dev/null +++ b/unsloth/models/loader.py @@ -0,0 +1,63 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .llama import FastLlamaModel, logger +from .mistral import FastMistralModel +from transformers import AutoConfig + + +class FastLanguageModel: + @staticmethod + def from_pretrained( + model_name = "mistralai/Mistral-7B-v0.1", + max_seq_length = 4096, + dtype = None, + load_in_4bit = True, + token = None, + device_map = "sequential", + rope_scaling = None, + *args, **kwargs, + ): + model_config = AutoConfig.from_pretrained(model_name) + model_type = model_config.model_type + + if model_type == "llama": + return FastLlamaModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + *args, **kwargs, + ) + elif model_type == "mistral": + if rope_scaling is not None: + logger.warning_once("Mistral models do not support RoPE scaling.") + return FastMistralModel.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + *args, **kwargs, + ) + else: + raise NotImplementedError( + f"{model_name} not supported yet! Make an issue to https://github.com/unslothai/unsloth!", + ) + pass +pass From 275483f7de1ef8dd982a9bc1ff86a9555b1cdc2f Mon Sep 17 00:00:00 2001 From: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Mon, 18 Dec 2023 11:32:46 +1100 Subject: [PATCH 0074/1088] Update __init__.py Fixing typo extra '=' --- unsloth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/__init__.py b/unsloth/__init__.py index e39afddfa9..5b0b8d325d 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -48,7 +48,7 @@ # We only support torch 2.1 # Fixes https://github.com/unslothai/unsloth/issues/38 -torch_version = = torch.__version__.split(".") +torch_version = torch.__version__.split(".") major_torch, minor_torch = torch_version[0], torch_version[1] major_torch, minor_torch = int(major_torch), int(minor_torch) if (major_torch != 2) or (major_torch == 2 and minor_torch < 1): From 725e581539a0755beb23aaff684608db4e42160a Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Mon, 18 Dec 2023 12:25:47 +1100 Subject: [PATCH 0075/1088] Fix typo --- unsloth/models/mistral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 7eb651cd33..9a91a8fc1f 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -165,7 +165,7 @@ def MistralForCausalLM_fast_forward( else: # Fix from https://github.com/Rypo causal_mask = xformers.attn_bias.BlockDiagonalCausalMask\ - .from_seqlens([qlen]*bsz)\ + .from_seqlens([q_len]*bsz)\ .make_local_attention(window_size = sliding_window) pass From de855c2afa68ada67d8b3caad4eb9bb592bf4a4f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 23 Dec 2023 04:22:48 +1100 Subject: [PATCH 0076/1088] Small fixes (#48) * Fix generation for GQA * Update _utils.py * flash attn * Update _utils.py * Update llama.py * Update mistral.py * platform * Update _utils.py * Update llama.py * Logo changed * Update README.md * Update README.md --- README.md | 16 +++---- images/unsloth made with love.png | Bin 60119 -> 63453 bytes images/unsloth new logo.png | Bin 59415 -> 60119 bytes unsloth/models/_utils.py | 43 +++++++++++-------- unsloth/models/llama.py | 67 +++++++++++++++++------------- unsloth/models/loader.py | 5 ++- unsloth/models/mistral.py | 12 +++++- 7 files changed, 87 insertions(+), 56 deletions(-) diff --git a/README.md b/README.md index 872bf7fd49..4e2ebe3c63 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ If you trained a model with Unsloth, we made a cool sticker!! # Installation Instructions - Conda Unsloth currently only supports Linux distros and Pytorch == 2.1. -``` +```bash conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ -c pytorch -c nvidia -c xformers -c conda-forge -y pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" @@ -41,16 +41,16 @@ pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" # Installation Instructions - Pip 1. Find your CUDA version via -``` +```python import torch; torch.version.cuda ``` 2. We only support Pytorch 2.1 (2.1.1 bugs out for now): You can update Pytorch via Pip (interchange cu121 / cu118) -``` +```bash pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ --index-url https://download.pytorch.org/whl/cu121 ``` 2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the "ampere" path. -``` +```bash pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu118_ampere] @ git+https://github.com/unslothai/unsloth.git" @@ -59,13 +59,13 @@ pip install "unsloth[cu121_ampere] @ git+https://github.com/unslothai/unsloth.gi Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. 4. If you get errors, try the below first, then go back to step 1: -``` +```bash pip install --upgrade pip ``` # Documentation We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! -``` +```python from unsloth import FastLlamaModel, FastMistralModel import torch max_seq_length = 2048 # Can change to any number <= 4096 @@ -305,7 +305,7 @@ $$ # Troubleshooting 1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: -``` +```bash !ldconfig /usr/lib64-nvidia ``` 2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. @@ -315,5 +315,5 @@ $$ # Credits 1. [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support 2. [152334H](https://github.com/152334H) for experimental DPO support - +3. [atgctg](https://github.com/atgctg) for syntax highlighting diff --git a/images/unsloth made with love.png b/images/unsloth made with love.png index 20dac04f388d8eec89066d81362359994e855786..9bf7ec93680f889d7602e5f56a8d677d6a58ae6a 100644 GIT binary patch literal 63453 zcmYIvWkA%=_w@o&(k)U_0@9$Oq|%|JbeD(-%Sy-6NOwsq-5pCVDH4KoEi5buNG#p; zeAMsn|GeN0JG(P;=gzt3o_i-uT~&eT?)|$U5Qs=gQC1TK!a4tn!0jK1`wAjN(|su4Eh74hxe#OHg4k8^IecbK9^ zIY(!r7G~_!Ejux(DJk=1r-faTS74yA5vDte|A&tizlt;wkTB1#3 zi2H@CgBbMV=JfrfotU3J=qY9qW)*D*4yfMSDs=iQjKk#Xy%*ry(wI$&Jtwb4u0P); zjej7EsKlv~KW-=N&;$))d{~ZR4*P`4>PaAk>oF0U;rV^NIc)M`)j^p?c z|L-t-0?6-M!jAQu=|`%p5Q1I_NH|-a*qsim+fpW*=zpvG{2=T?@(3=uZ*$DiLmuvB z(d!6OxHPrIu!1!F*{w8Q7&kBIEtWfOhcCv}_um}y>IjM_`aU$52^jRh`Clsh8?;Yc zdc|_94JYs=*d@Z~mB5I_Hxu{M!pOMuB|hP=*`yUUtMEJQKqi<16fd~Jh8NP1>@3B$ z^XuyXld_|QG3dWhcuF6q&Vz3TLG4!1>!U!FWLxaL3*?0yG{!lb@ZS*4#3w#+w~zg@ z-4n*K!Y?NZvHVQ2p7oh}W&Pa}X}qN3>+3>LE69v)*J~4)rkNgZi3FsI5u1Wec=>m| zifQ9ZYF(#QQj*zu_b5+`|W=bu5Cdw4aacNsvZSf4v*Ra80A z2_$6ueHgI&9W(#85ydyDL`!@iaE*h{$Cr?7@zt}H--Vd>692bKyf7{)EEru!Ehz2T zuf6kFg3{LdWfva7d~ z*?uv}-Xy>P3ATb8LXZg7#{U@uZ=X`-+TfSmw^c8Ka4#^V4*AP^*Et*SpS42^&&fKD zZ+I?yu+$M=ff^w6aY_U0UoQ-bwOZB!>4MI(+<2<1o_bpR#$VzBAvI?Hx-zMBd~D#g z=bA0Nof$tM`LahHw505da~0s60s7A)1~hinnor$sN7$m(5q;S!7zix0O6L~G|2{UT zqVF&TwRX-kNO7W96ih)RpwIDqG_U?ySjlb?*HO^V`nM$6zMtS}H3v-gUnb)+?lAFKgfkm!S1g?_Q5UL0ao^A|G3Cl_c5>_v zllQy*dJA2ZTA|f|vJY0)R?=BjU1bt6(yDK%KML0-LfQ2_IQ#o$Ag+wzH6JxHGE1)z z%p6RahY{-WnSJSMe@!`-^bjP1c}C}_BZ<8nml+v&f~l=la01$U?5FfwlE$Q9a=rW8 zCxfzuHVSPbmzliQ_et3zB2!-%`G+U#c-@M94pQr9W!hNIPOEa8m}ius8T?hed1_1t zwI9{vrirOMoZ@vkZ}dYRdW}lzEp-BSEDffs>FKE$ zwOZZbLVMm_k5v5)ef4n3gGW4a*{;?bhBFvDPA7VolOs?5-CU(%SCB7gPY9eR#)&po ze`5XqE8Mxe3BL(Mp4YqH9d10o@3cextG=kQ%Ldu187MzZv`0K&qHk zL8l{(;a<4FjGmAEY%mfZS}oZQTr52KtkuTo@7}GIu;Mpq++1g4fsqLB2*~u+|Mo)>S+Z9CN9?Zh5PjeVW&A~@fT{ez+ zQ%NS%W7bA(o`lSNXKw?L>lMbQ0pxP4OnQqHmkqnhrTz!sbk_Z4Mj0qw{bI8yQt3;?&s~ zUMljj(~W0S`*QAL{xBgCMQ1ui5#qcVcs0t3$j+0a*?@k3*i^A7`cTN1;T@VN6nZJlUL2p|$FpnDU z9-Ci45TQ_T+`Wt6>B&pEyo1%epJyepQauGnU>8+Jdr*72zzEQLjmlYv8qS~38yTWY zHex8aalYD^+wR>G%Kfv}KIt*Y2X{%TmCIfoaao)ur0r@#vPAuDT7jfk@V2*M(j7cb zJ~Usgkvc0u?MZfQU?*_}ITnC)#=Ny{aqpd!VJ|XgE}1`(qt@Nk${yh)?6A;y0e>M_ z02RrYu_k@!wprSk2Ikt6R{ymk_wZ%sQ6)5EJ}mPDjZF!`W^@?-_E< zx+v+{1>tt}&r+iut$yHCERR+;>-o-b_--y>p1CVTG}x~$Cassi*$E+*VniKW#vxx! zHJa9L$%6koet{9JXG|SvXE>QHKl(cCa1qDR3^OEI$Qy2nWnuyc)izd+qq0T?JyPiwPFV|JbXe4H;#Qir-KMqJCOa>yl7+gnrDwz#t@pH>VxOL$s z1u5EJh=}lXMQr(WMX%w&wIx}1RyNCJJ?fO_gXwfDB~nIN0;jM|rF5AINIUIj4vWPb zLF=b8xbPi8OzfMqbIOaq0gDY|0`!cyBhoqjs*d8%40rveL6n|mib}y&?Y?&7{O;yE zF*A#G^GqtcY!zAgJ7s0RmcLEnHq7zgEo!9YQ4M0>l&RG9c{IY;A=h@=ox1ReRVtk& z`!6?PyQ5UW!9I*sF8r;=4Ix>&cCv)1UDFnQ=k8%|>qFJ?$;2xB zXDXDMu-_p_Jb}+CTNQQl3Z#bh{h&iH9@9fea-jw@raUd;xWz8O;dDQ|RDrVW_&&z2 z@C=Tf22H(!EqNEDCgfDd77Sz1XKhHHscE6+UBE*->whgDWS_CX50pYAo)Qu>p(aVfJOX2+_yupVo%is{pwf-?z z^+8P(JTVDu-kv*y|1V8?9@r@>~tm=Ot9;v4g>X zi~{ZXQfJ%)Ic9bu+Ypm&579fEJS=V3y>DJAS}0Hkqd+r0re&Se#leUidWD%HCtm%A5X4$}WCfUHlL| zaxg8JcVFOZMHAVrksuw@2b25Gr7^m;V%hzLR*&r0#$w z;Gc~*X-C&Dq@RDA}&3p>C6MT!tNV^MY@c-Q-HrfS*toEz$n+G3%>Vm5RP z_MYh0O#0zrw;UttYf}dpsAPzO@bl7@FENM8|rzQf#Kdv$#Zv?ZWilzXw+us zm}}>WK{dUq5!9^oI>*m5y&n}6G3NUB7#P7c-W-b9pO7we{#zHXUr1X1chKAKK#pvF zYVpu$t(OQA+&yf+6UKWH>w3m;tj5~|+GO-x)|WBYx%ENO3KQ;oKgtJ6l1F*$XeqL&y%0lG>Y63*C0wTt;x zUhT^EU8bj4%!Px0{v7+T8Njpy^OWs006S>w)JK>bK^;xm@^@H5OGzU8I4dMq)5>I_ z-;M5#*aDt>Y@aJGb*htR#DF7puPd2G{>T14_3r+R&2pc7`< z!B#ieKd!gW^&bc&>fK!efwcRdeeHjQcEa%9G}UK*G=*LaGxR+t9NcPPud6fuu(Byr z{^L{r4&vJ+UD&VB$qicBgJAq%psN^3)`K?|8u@24-i~wZ`x#eh)te9f#-hT$_q*WV+2!;Q^%saGyhdab zFJUJ=eC-9KOODfH`e)u-%D@JpQ+i)k4CeTE4(B3GI#a3&m1sI&s?I$nJlO z8tq*_rS=unUwe|;bIl17X7^|S+!x>V@sp*3Y^lz!IbQs*QryUuy81p^y;z1Y&&=2- z)iqeud}mpqrPG;G1Nl2$RL5x^lgQ^JO-o|t&JgySw$p0P5$?S7f9xjgVapZ!!P>*_ zvWF3{l%X>u#>Q{|YWDz_7B^J09Ft`?+5n&$5uOwn+s`T(arn*zAKBAX6iuIEU7r!5 z(+z2^^7KyV!I7kv`-h|G{y-@jxNq9k!UZb9_he}4A-NjhY3Bs-@d zKMry%s$s|Jmuo!7rwlp%C}r>%y8B#L&PQzZtrfSz}xj0rCh zNZchMlE`if?`CUuwEhWu8nisJR5$ziT>uhuB&#NUG+wm*aEIg=`Ur}yHOOe$aW}6u z%Gf$E*TnjFmC+OO@bwL^$pgek`f}U*h>J(2uvPjOY;Fz5+SFKu6+ja8>9HVdmw7=T z&1he2r!L)c;~WW!D+=+ca265LFF%%FefxGf3nK&-#25I4+WXt`zeVCTF?IYh$&#&% zxEu#v^#PIM4%p$0Hm2-?ug4&!D5YUsk^;T5xuQ46hp7Rc7=D!qaJ^VxE3_fhE1GY3 zxO-LA>#OcdE*vVio+{KZ=ZL)uLv>3&{tro}@ERwgx6@g_T;})AHh1^GeG67*#M$u< zUd-)?enWoze$^IUyn82O+LE%SrsjVKRu_9`m*2zXX&Vn37vnRpb-9Ev2Y!Zv9xF81GpRwnoHUsS~H0Sv4LXI0EFgLL=zC7Tj z>vN+G>J~ljy|G?+y)HKO_}oA+_dWK_(RfQZ9sK5DK}r2$q*F6G%J4!>wgDMX*?{_& zKak@PTw}tA?Go0tA-9x%NKZ3*d^z+GTxaOp(Q6eHFiit+Kc_sm#$2nzN_86vjE#Qs z-Mz<9$p|UEJ=1C->W0;zpVf_Vyh^BGp+nz9r(I%&L zSaet{XAMIK9>}g$sMv)3RCC0K+V5H%aO&zls$0WWxCg$<{O{A?#Pmb`=AY}<*149d z%%ZDQUwAl}LK06qI~gv&e680fJKCu|j7>=WW8QS@MdyK-qlOYKS9L5islZDGd8yV+4}E0My(9lke~ zCT5%0r%1KX4)_@=Vt*-+Bn_P{0%oL1P^=Ng>M-{YU64)K`P$#gwGiC4cUJQJ?92Nw z6g4Bk-_*lk4x%$RtT>V0vXriWnyWp>$IVKiPfH8jJ23ssR}P^#e|ggg7Ia^t%eYZC z8~hM+z7gDqUTC3mdk)DAyp2JidLyb`cMPm-Y;vosH9STJ2epFQTO+iszsE9j^7A*g z-AzrAI$ESbUROm=d4Wd2Mt<9ouMl&-7LMr#fBm@slU`qWP@CiL{1gi`qRDxbgYDuK zrT|q(rpaf+%FxSe45JmmwG(NkZi3vN@}3xOwdu|czczyArPAWkgR)#Qw*kH)2&&LFb*~F8Gior zb6C#l%#yy`Qs9&qka{b97u#*M6nl5xF6gv26x6g%5syM(F3EtL$f2nwq^EMAEHZ{a z$U{hW%wL{esh1Hal)qyE5TVGepA0hLqF-4m8BTb2!uihEP*39~rlxZ0>$N>@FSaVF zbQQ^V2&j2pxVY5&GD6c%?dKbc(dn+)KPM)F$_*PIRWkXUVQfuT_LVm-h7SxVk8|(D z8v2dq8*`zx5%3E`cAk}1G;1K;Uj~&Yq*J{c6dM}bV>E(wFLUF-9ax5~Dhl&oQ~J&Dfo> zjH!R6IF6Vr=|QTnyhLS7fpqyZ{Z*f1e^y`Q=G^#e6*h`^$l@bgV?T{t`BXH8Wl)mW ztzaL$Op1^6cGFMO`@1W$KeSnT+h8kF$#*7uEWUkO0BB(Z9M3>oE59yNOM?kfsM5?K zcm&@>(1f_5W3H83MAIVPv04g|yNS7Ot0U$uf2qybqMTFLxT1?e)pd z9Uq`F(Y2R6`xh$7Jk^w^n(5h^PbiBY?)7C$bqsdL^!+@c>b(9N&*FnwW5mfr=s)5~ zp084jlSS-zmaJxYH&z=wh{e7m9m-4V1LZayu%i$(4x>Dc*Qip%9LZL9eJZll&t$0dHj#9~A4NGWqkyHtmZ#gtUULof79(P;0-34URw>-}$$ zw3Z$}xZSk5pIj<&KH-ge=C z*mPeU3yz9%IqS|j-S664QBO>-G^#h4(E0;j`$Aqj^(hvaQtKH`i3ll+L{gEf>y!By zmsR{QsT?QkeEYYpBN=!Q{(+rQXO}*ju#?K9+Uh(9yEj0ZGT#YEx`kkxb=U>^oLxD! z!O1I1`sG@df}pT6i!#pv()*A|@@h{;?n?xf>tHhP8F0OlULE>Z-@}I-kG!Yxs&x}_ zI2k2BhF+fZKAAQOlp&FzgkH>ubxLbFB^f#J1F$ zwX_o@wGvTY+D&>gN^xvI-yKuh2*}Wya}IhCY(AtNJ-{6!=j_Q&J;+v=phskko*T@) zIea>vZ6J}$*>~{Cr`M9T*KaS&e5Q7_wBEc4vdM;0=PJbLSlnjDXU2sIS}!C{uN&$v z$0Z@nry8CV*@bQG(yJXVPW^Is|3QCM-S_T?fZXa|vjb(1kkG|z0>4|D5&Vco*e=1+ zr)SyY!dEaSiY&v+TvmN1Uz)3#zR3t!#?NY+3mVssp6Iruc-J_VDG##a_zm9gZzC^S@qzITy_}jt6U4$^YaaX?m9CQ)`gQ$`kW_>eKj7!cxQQM|TYG zyDc*fw%51>J|#c?iSL!mXrb{(^xJ(}VRDnS>fT707St)nppj*GZ zO0aKpzvKQy9WEcRGcA{QC5+Zqy_uzU9K~JP{i`d53d^2Kg3Od#*SND#H|`)?+BBC?{7{PC609O>1)!uzG`D@qtM`&zKIq?cLZP)B-$?< ztSuOHUI;`M*?9aG}?+S*FbDj(T_flKcfNGys+@$i*;*+rZ{L7A8dlar|A zOe^KB$u-v64&t7MZ~t&Z!2;~vXcoCgaFa*bA36z;_pW6}II)i~V=WCoguj&f^xJQv ztd(kQr>Z)K^xst$%$xHKeFb%y39yScl}A;&EyS?A5j`L?3Ql^dac5^e+qYsOHe1m0 zlco>xLl((o$lv4DyDmf8gjnsQqy#Zv?fTwo#bYNYCVnw&bWql8TkZ~7*~pF2sac_> zZa$2T^%@s!BrSBI`vjml7wRVIm_WdCP+9jnk6da}d-;=f_6it_coepdyB>*|&GuXq ztwpW$Beo|q{)r`|}CT)1aZFU-FvMBcgkMWg~(g^5jhiUfJ()pTcwoU_QH z&!s*4^F>@H?=Mzxk_--VH!qQ?bJ?Crg@G?G0|l%HDA~0tu9mlEsx7OC;f7PY6`h$X zZ&A4=zksSiM8A82Km0f|NUNDprwI{A?+(KvxL7C?2(UaWKda z=_V4n)7#2$!0x>{QQVN)A}1hzv5}i>Im){7?0hvL_YvW*-Jzr3rsV$xi=Aq$SpVrI>!>D$Ix{w9wfI6q)zBwwCTK30f z{CR{vIgRvQPq5(}ThAaR`N--wZy`}O^vuZ34K&@eES_blB~xxuKb=e6>k!nYwtK@! znLlU$a(U+v{-*DJ^!QP!=)=4={&(dhSBR45w^imtB?M_{|{mySJT z3Y@0izXr|BOcutT!0(NJV)CEIQIn1dh(9Vb#$QUN60Xj15Hm0^NKCo#U}j^8MRtlT zy0a3C@4e79h2@v?JRL~6?7%EfdjV&8ioo+)ZkTt_iuOWkd5kX1lDb+ZNxo$ASdFW? zq^^_}?wR9>U|c38UD0`!_LyrM;zg9oac;~E7w+;HW|!5-Vv-h*+*oF;W;7A+xn=;d z41P%tQq-)tO!kesNPhccsXj;Y!I?D0+V2nT&F*fu4Y5HUUs;T6fB4osm3BJg;e69D`at z;YofXb8p!H0NSx^P1~B#0v?MeWcBWenckIQ7*HNCil)pYZM8*94 ziJ&BmQkOCe$`i6kN<^}<6m-i4wSiQT8vk>%o!hD!C@%aPRCz8peKNPTzyyoW3F4T*tAi=9f&RPjp3RUz9z?6~ z$@H0hOq@A~+^e^10Cr}V!U+t}$0KCS#i4Il$@m{M>O5xc3GkRUcH{Eu+0j9KW~C~_ z$lGTCTN6w5#%ncn`q`)7;>xWR-&kCJ_5Po;#6SD)EB82ap{FxWmw|!saOb@ ze#-OalNH-0#%a!?4a+7O%h(U&Jt7*+NT5UwD4*|3+ebp;x5YDNTm+(v@A-wdx>f`u zX-vKSBlwyX^=G4dgh*(lr7~#D!1W^USE!abl5p6#el)FTm3lL~ucg{hg7HNt-~Qs% zCds1r5cfP+>B*-cDZgc`Eu9{f$j&25G4Zs06P&L1yEj4+;gI|K1*~})AmP#9HQ#-T zl(YE6+6i19)0sY$Bu?x)-y9}Vl`v!_ZP)7MiypW!i>O4+JF`>YkzqP!{n%ahQIiMc z{%SN(l!nmt#k03PLWVbS2Q3^6S!EZ=4-xPyyIx*6k);MnAnYPG&&}UnGSL2pNOB>r z^C>grcusI8slm>!(r#U9y()hAGoqM3x{`jwU}BI-qd$+^w_~cpm`*$(636uZv|vXIqvD8uMu=Bmczt+}2g8w9n`pEb z;kUQp*etJEDwTuo88wf&V%Yh<)FJkn^xy|deUAs62gMA~pq%JoU(Sf^#On59Uw8Z2xp|LS)siyh>vQNEC&Z$Mk zg%X9%n1^s#OJZ~7KZF-%+y$LW^Ro<>JWj@gIU*L0S)Nd1A^=a3dO81ARDaj1?(qDDFFdAU-OaE}?8i5lZ;;+Yg zx(2u|LCalCWCfb}+rz8>@Bq7*lHTU*6-bA7>w0&*mg*G3O5jP86jeChC5 z{tsrb{i-$m4$)FplfA*FF*2iTyt%pDolkz_9FRHfpZJTSQkp|CA6EpQInyK%AtJWV z2id&9=HXyp&qKEdZO}5$mq({y)3vCB^b^NTbJLN+7^}04qRyNbr@NKKD{!lRa`xq# zbPtgFm~PcmkGz$m5jguA*F&7N&x)@R23UT5tuhOry(hDnJCY zGG~eJC8~@e@-hFwv7N$exb`Tpr49;c7fIoSI+&&hcVuM{Doh7MsXuBzq@208tZPtP zAap!QH8xl8ZyO!_y0wH^XlK!u2Gf+<$Og5I%cVP38>}YS$e9R*Xlnm%-l}j~6=b`u zw2*L+?poF9?Yu?eFg zs72O$%=cOTtZb1aAao?#%bpFQq#wh8(9P==SJUbmYUUilgY&F-!)!fX$^ii@8VXE_ zMPj37AHbzD#ODt(ByP@D8mLH@P8A>Yfgx z=7b!RbOHXyxoti#A6$*OdTLY-7wnEiSL)vbeH5y79Q>m6tZmEDxu4(FTMSU>1MMoB za7uM6UD;-qr8l}|JLWy^e6AN1<8xo}? zenOvbHKpcqQ|m#thDO}eyj21#TcGft>>|8faFyE&JfS}#mH}~qy-p+72SHMN)J669Z0!X$N~oh{6-SdJ1OWV zPk-^A9r)gS`jwsgwTY|8Q%&*F0~6il2gxEzMsI^dI~~hf1A0>DnV(!7&xFa4srnmS z#XVq|E*C?Cv(suh>i!bw@36m| zyVLBNE=t#yO5pQiLIyrkX3{oX-#%&P|K*A7Rb#zfN>bn!E`3lgHxB2NmzeC#_wZV8 zkJvL$CFK>_&mpXf<%ol*23*PR7gbx0wDn2~oe$B07UfN=@hXY3I`XE;KVUVx826B0 zIW#3OuTw5|+~0SrIgfD2_U$cKM+_(MAF;TW$I88xa78)J)!AyOwEk+qX-esPsMw1s zoMZjkl{{;?!K9VeHG)isWq$a_{D8I*HFjB0LFRNvo>BtODF-VSE^*|$303OL9@|R zd_}lrH|me{+MbCT=GE7r;WK(?OXhZ2J1ThL1~treYq7t`j*={7)ug-q8p0^8txfeu zilw9xBlzL|VYT|+@$YUfQ{3B57bl2O-%ZgFk4}(8!WJ3%IgP6rP!`xOk!+yQc;<6; zHbXRn`R725dY4B+8A+_-W?#3t3r9a+yEk~vQZpj9E8Yzyk-Dn-4IoFaHe#UWc35op z-U;vF2daw)i#W@kEqOxzz3qCFgB)zcp~p8OpsdJm#X0QgUaX}O&f}!V;ed0Ueun#KU*SRs0;n)P!df{%zPaCO^ z9g$JdleZrlV?ci@AI+{iC5hefA4Odgt=7$ldk7$NZk1+e;&q){tK5csd|$(BA~ag0 zU%*Kc=(r417%dx$UlXuCq_sMnS8EKavd}U?n`627x@x+wBVCT+R z7)MqSJ*zaLEOWNYKU1YWv4s5_%y^r%Cs1+dht_D37ZY#e)yf#}ln-P_XK1EIy7o1b zZe_HaE?C&DbGoma4z`}&w3H5;x_kpUE>GL9t0(x7A`hrY5N$XXL>~!>4!W)|Zb=+! zBe&%5k3(3shJ>j=>r5Q0%*-Y}buH3Eh^R!a0TKH7d(*Upn8v;=e*&-F^v@W5VT}zu zqioZ{o>TbDVU`QcF3IcB*7y7wOc?2MIE&go;X$|~drtlkBac@PRy&nLBQ``1rmCF! zNjNeBNaG)ewxmNN!?1w!s8lVqQD`rbYP#?$Z)y7`L>yIbQv2g!h2+?ntHa9X32L;YZlY(MLeg$<4m~W7gb~g?L9E18d;82 z>$fW-UQyja2PXaQP0_I&>%)_#xwA}O{QQzyL8K_-?Vg&rnTAJN9E{ZdLJqD_-=k55 zv=LWEjQBgFug_(GM*uUwqf{Yg!Ls^63EN8U`D}L&(;Zi=`a!^W*M>uS2)jRS6$CD}kxOv&gG8k?28Tmm%IBR@Ug3>#aS# z(cjX%rhPgo-zPJ>8yE!OX6E4GR^xNB)@l1~bkU^kJ)t8sQ*?!v)ys^rbTx9SY> zKkBo^zJH8ylW4m@q{484*2~+o-RpY#?pQvG&!_u8??U~M#i+qw|rAPYX?2C*7ZeWr5xAn-f zD1EWqrx*5b?G;xQg?i_~aE3p+X(E@_hs&+$ia^bWyPLsG1*_YYwnd$#+VG_^zU}~} zVpfwegv`fRrizpLgApRNoK)%@|;yCj4ds z>%I!qF?$ebT!SJdnNHs8eu}t^d})f?WpV=xOP@pD-njL~FX?=r$9EUAYoQPXvu{R8 zCVY>piY%>czaNN0B)z-XW`8NxHZPF4PQ9cirK%uf=QOb3ukduJyFU{*M?hHkU7L-q zxHNf$v2D9wsLCDSj?za~HaCkt`t_UKKlClxny!gQ{_?DxGt&3=`V8Qvyb3mTFlOjr zSBO5cM(=^I*v{sfnEo@4pgh7zv98G=xk2S zdNvkjM%kld{bgcqatPR@M;JS`-Bz&S30jA*j5pfYx>$tEzK|>pXX-kZ@T7D51P9Py zvM5K2>Wu9kY_)&0G#hD9V(_xOC0<}y;DmIhta^-+?+R28Y&8PRH65Ql{Gx~DunkDR zGMt8=hr5MP_#o{b&#kZKBaA-Smh?HLQ$M;@!^AW-35xA8=Sb{<8e}p^ke8KJ%e#Ag zs3NQC(o&P4>I@~1nLO4{DhdpIb)r?MO4I_@7l@&8?Z3Mde?umnGr-in!(#;D*O&g3 zKPXWKw(YW`JAu*JSvNy%#ft9KXA-n)*ow6YOh@?c?0cQmzyD_AOuPiNXQ?M^nR)HR zlU)N`vum#2?sac-96J-W!?CJdTV@A|8QXypij?4nJ;ez1J?f3nh8iHa$;~!#x%Fis z&M4le-Iu;2Nt1=AMCi&l)&=CPUshAXg1mOyrXi_6L4R%vp5E`nKPs!4l^5wqWdwiw zGqgEg!o$~hw_xLHQVR_?-Y{r{h`+l_PXwQFnapDf21aScG{K?DMq`Q!m>PHKHPHT< z(_GxygN@XiNbj9AY+9lN9!*SEOg^|-uIQMv z-n$0PpEBK$A}i-qACY}Wx=1S`@CH4ZqG2g@n;iLr(4^)Q@CPHS-SFLisMW~VES&i@kdwO^FzGalEy!fTS~L5=={@1!}2O;Ibb(W!t4Ve-N}ru zO0XsVk@lS*^O2o#xX=a+Q4Vt6POM?iEFfgJB$ydH2joQk^f={=n#oGWfV3`#$WXj{ z#S$hAp0_*n^h4Zc2lNDoEH1h!2EM1G=+Y#UGEl~}nHlPZ#eL)97jif!hX z4yo0$9CaAW@dM_dXDBu}+Ewlu>V~ktFPZOjHG!AAs=j(rsKR77V3$B2?`POCXV0YF zTyW1MZuGP#&9>njm5mFNe=CoZIX^U%pgqyX<9JdRXsT=1Uo zq2NxzP3wGnChW2gvjuT?Z{0Vxm!-+=JwN&RtgW=MbSUe=Rr5U6@f`{PFcsSO?J879tY6JQ&z?^4ngbA zQ7LE>b~xFsF(;wX<8J zzC2Zf<)j_kvk7+IOa8dzLax%9l%os7zWSfco0y0nJh9WI-Gd(El+D$%vQ?OX*-6AI zOx6Q-g{vA2_4i+|Y`PqyBWgZ6zJ~8B8+(bhpP>lYO$>tCQhOy0_}w2@wCChtc88EC zSX*1a8cE=LqpQ0D^a%Ae*w25cJX`!4VyOtZDyOQp((fi7Dl?Hu@dnf=y)QuIgsLqP z0~w3Yw7tGx7574+{GIYpr|RKB6&|=lki?a_Nj2pOMn?F&$7&ey$L*RL9JAx(BB8W*^+OH$zD%|)_4BE;(Y1L9!zW-eiWtd@ zcLHmW#a54o*~KuYRf6t|<1PDnm5uM`5~oUGsXrQKVmbfU3$O=oM>G+uxY|$S{dpcj z9>EssL@tA-CR>YpD|&h`^M(8@j*oJNd8KWuqQ%0K_z57!@{|Q>`I=m~)^Iukp3{sL zOZ)ze$!oj1iW-b!cnF}jV>8_kv^;`|@QYpZ5Xvh(g;RRAqRu#U(B}ak02F_EJ2&P0 zb;jlVJDR*_TI$Gg6OXIf7AxU5_iq6)Si0=w#Ws*ahJ_IL#4t)wHhOV2vHEt%_6)YL zk-fPUOz_-^U(S%l4TDPo#nx(g7n5FE4vw%Lm*Qo-xC)IYn`% zJTNyn#1V0A8H{%Y^oa`LWRrrI^g_g_PCb6VnFpS7KA9PN}v^NrvZr3X_Y~(AemZebIbVES;*uCjTttLyf2fD%;fUw8( z&}EyUP(v;ncvMkUn~#6H&}_uIt$#m;6#O8phi$Mi^o@uG00w%~y#YHts zr-zjt``IzxG-f`2c`xFZhK0Lwiq{jD<&ZP;zEAfO8^#;7A}$)HaPoHuM|OpurJRWK zM@r)7&bd{$>YR5yNFCZK+rEIlz5H>a|EEnu7LdWz4Gh8;THJC0A$;Jn!yl`9MDp?r z^il#nZvjODcryA`2KQGj9tSITJ4Hp?i2*x~RNVIzz!yC6gM)*Wzx$0JDE>d5zA~Vy zrfZw-?rsF7K|va%TRZS;Y|LYp@&z}+g=z!_~5!CK+v_BhUU`l=xr<$W>y7RkRT1%|<4lGNL{8SW-SAaru z+MCJ6!!vglp?G^q@7?B-#8A1Y6%l%z#A1goNCCT;gxK5KBEF0W_s_^d&G(4D?&g}X zA&wu;C%~;^$`dO+uJr+yI&h6pm%o*9F4B`B5=JZcq?;oNmZNG}Yo3RPXA?A?AJraU zH>({}+^Th+oapj&#iM)rpXsbr0gr0a>@ZP&Q6l7cFA*E$a0bKPB>6@q;fI>-GMY1b zr)p*a@B6>~Ll1JUbg=&&yg&(z1=_bpd64j|d9^G$H7sQl)JC*2VC zf97|KP6uC|Rb!Vb#WrPGdu0#4kyPV?C;cf-DF=w9Rr6GXrvg7Aa5Dcq4^3P=r-miI zZuiBtPS};%b_}oOiOA-Wy)Yp#!^}*rk1U;|ER`%f+mK~E_KJ5uO8#VvjD7i6|Ibj; ze2d4=11<@%oN$KcR$RY9sS5{;*YNZ~j=sLW0!1|2pf6ECuKHmpqRi_%WfaWNXGN%9 zX@@;21nX|LxzB5NoP(s;_;`BiH9%u8pt8G_Zkq(09Y1IBqsnu9% zZ@mwJk6unqW?!IPwR;YqFtd28H-SOBk_BPn9|8^CZyG+moGcEiit$^#3O^8Gf7wBJ z{lhk>#XhfZXV|H6{!~n57Y*-3?V0S)m?>3n7Rv8Qlj)+7d8?|XMgW*lbQ0q<8FfSa zLgU|H_P|j+32~=hrL8VDdUkZN9&i!dx&2VWAZE&=-iIZv*1eU+Pg+{1JHk&3bvol+D)M{FY^+9; zxL{ac_}<~Qq)wFFo&SHv!x!`Wj{GK6d-7gI-G_D8-q+qsuf&*|fEVMH^4fRnIpp8# z+~*R|wexl%tEwW+#aQ!v4aKz8*1=WNb}3~CKh4Hn=~e^YGqo*n=X0yQlaOr_fK01i z_X|G;WC^yOr^;wItJ3|>?&**0C4b~Q{+%4K_bWdXC+Ij4G}TCml-38Cn-)EhTb+oM zJqkQ4#=P<#-<>!)m{!UaF{PnF4W6_-PnWzmZ}alnk6dKyTwYdgf0$ls+(Wb{QKuAH z9s){C<*y#wX^gC_(Yvb>8G(T?QmiL9LZ<_?F^qq%*6ZJ~=``U%AP|En>HQN-Qtpu% zj(BaQ>Rq|aC*#7B5@fr!o1zV6!w|ZN!%y?n+v>y$0H^z&L3%nm#y#h)b@We82pe5x zX~=Lq^+N8E;gec$)B4?qJZ^o5IgKR<>ji=XqzUvU8Q{w*W5R;RjfzjCs}c~zcTwFU zS+yt(BwMnYZp)l}mM!Gd)$*xIqNnwvN9);{V=w$uRX;`d zFDc6M1%_I^ZqcDUa?mBo3i{OxoOZxo1DR72^WEUkxbTzpr`#NxN~Dx4@=%z_S6Fd4 zR?3yzK}Sr1PkLXpFaGzfk$ySU%mizM->E;vreY9xH>q$kvXyg^IB9mE)55cZfI)5&3OE*CN?BYy6O^U8}&!`KD!BkB9ckXyQ*mTnx3>R_>J_a2iM_H zQyTU8BxrPg^fSvK{c)GsWiTMbY0tLSc3m(T>I%niOw(*InV=JwgX!n2SdC6ke&MYn zHurx41w_AgLet;Wsc8OXW6z>8wfU4E;SyT3rM>j+J;;Hma9j%=nyjdsMvR&G$~2uv z`Icg$7PW3JdZ&OjC<(`1EsWnc=YpIp%=3Z!VO%nM6f5`TUx*4TD{<_#%iyvfkhbZM zedUelpyV=@XSc}0|5RsW}r+OhU=&nljm zF=ih*3^ZiSdgV-<#N5MCu`Lk1&B!$_mR(QH_-8kACMnW;JU6IuuG^D#hp_+WE$eY2 zLs-Yp7uD1}o|0VlYqW`)MPF4W@UqcvZE`8KX!S}k_OX-`^ezHkZ(aDu2jD6DvSEE5 zY#IMUeq(tBq%DMdf9vw?@P~eu4+bOI$RxV`4k6qC5;|A9!{18Zcj^Q(8?NMN zTFp^-kx65}Liybc>rGVZ8OjcX1Z{D~!s9c602~MFKOH~DfA^_SP728r-gk5oHw6i= zHA>>TDFM{t`P)`?e)yJAf+h1aTOfi8CV9@LA0*#QHQeTWTm@uCHEy^N!$$n zR@f9#v;Xf&2Jh|~sa34XFDuEX5Z_zgkwn!l1J%8Zb}KjQn%Q!tjJA(7vH+?bbZzg` zm*Z}{Dx*Fy@Zvcjl0E~@Ge?;;gTE?T&W6I`d+2GO1<^?(SBh~XcU6~`SU`{oM4+IK z5Q338D|^P{d|UIy3%oding%WeLjF=_te8pHZNZGeTW-^1ZNi#mE~|h2kAr`%E)VV% z?xcyNnVsIZVMQ>VR1!QUN5VjhlD*o`)qVTJzB4q=t9>8Uum0Zf%gMx)3LhX{NwU4q z!pl+H((QFlXPoyY;Q`L%LF86|j?FtLX<<>q#MH7yjrErt(>V7}Kc(LX&I2!EnMcO7 zr}AALjfh`I<>&J5I@5^PL)jl<{^ubNhd?EN&-7erfjF2yx)gH`UT-v(TI_W8lU-VJa{?I@ zPDX_!Tqe@^!cQ|JKsgTu_Kc))Rhf>a$JXfE>VJ#z(*uSMsG!F zuEmgSc98GR+sTQm?dkC@2Y$JvPzZe8pz6 znary59myU0{_J~6iL&M2(m0L3zz*enwD?g*h#^n|XF|rK3GiKgbh?7tbY#C9Hlw=% z-(s^AM0dmwD#xafWTRAkk}eK{uHztmD!-fUa=(5*Sk|Rq9Cjyii&&%*KNw0YGg#X! zha`)*dMssaCcv+aGLra|U)%0T$}=7qn0K1>0X*&d#Qc1*H%~a;4gYpw`vYC$Zzq}f z#K#S0mX}|ryLoV~?!FC^XEhrINq#5|Y0x_Cudc}~0sdYG>WO;HG-6^RomqUHvq+Xo zCBu|n6pEusSGBO=E03Mb&}V%eC~Pe>TlBtK0!SAQ(==LJDLE@_9|lRODnn2SHvku` zQvK`dc;!=xlwTJz10%a+A&Cv6-1spaf7>ww$5pdjFMZ z0lJi-drx>|hPaTIe_=Rq>wyFC`r}R;Xp+7c;6W_6r)|5gCy%LTy{@11RDkbAB>=2m zGQZoYY??Nl8%2ZKYq>DeYwdVC$@q-cT#B~12Bi!%^R>u^Ib?^Z>yN#|8J`wsP+@Le zkN0N~uR@qHM1G1q$7taW-_7f;0fZ&UWH{Mk4F8ZNV%GDGAv{58YZ&(4+)$zJW#~_p zR~u56m&0;zFn$|-Cwk}o0GJgiK$P3dD7CPii1xx z%kn@V<+iq;Q~uI$KF04btt^$sx;&uDU)Jz*^qllnM1B(VT4q_jP*Ru|ZK+_$|25{+ z;>)DH%ils8?5u4%1JhrDQ*t)e^U8Ga@00oc2km($6*h}d4Bu=Xf@0zE>yi-l|8$Je> zIMQ7-EVT`s{sIa1O0sV&KGlyow@a^rW{yBmBe5=$v0_!nxHUnQrFbo3g=Mt!DPb^G zKK_3hGd)RID;0!ShJ$N77(Z6Hg}xm_$P~=kl%@QTtj(wi6NAqeL6r!3JcDm z`YgIh<81A8FTLJ!I&6>Dv=?-^Q#P2$%6kQ7o%g3@s@}>SCJ_TKPG7NdzJOVhAZTrq z+}+p`*%MDuo4ujD@RNU!9D5zq@eKr0zObzZK|#XilBG1KQStj6RTTeb8hyQ1hBn=0 zOj!ML64l_FHy?8gkDR;s#i#EM#alHk1TrRFKNFu{jW}hnb!KssJE3yqHB73#zCX`? z5*HH_>w)YiAY)5Z7dvdlNxhoOmS%0eP`Upb*_5{>{x8VKp40 z7mj~Lds=m7@9)d3$#giN=odV)UfLk?d>f2C4HFc8)Tm#mp`&>An}H|xOL=$hMD$|A z*SKk9{ysA!(MWU3Bb;~=+c+N%7f{{x2jn(J)E@A3#X*9Pejcn58HFIwMDkmV{CGmy=AKt)t9_2f}t!_W*Bw2)2CZ>a0&L}l1E z$qd#B0oEiqY7@+VeW$;>Ie`C!elRPsEy2-u&|l+p&GUfeD{w#nfYWnXig~;uSfk}~ z(W(KAT=GjUqih(n%~x6k-raRCqIOOe^RO>A!n@-6{xv!yIL(hCER_nG29uBU*#%y^b+Rmhwkbq713#QOD)t_LW27;0QU{ahc zAML(u>n`76Zo9B+5QFqbj*epTk=VX>bDIr$f$FU@=GwcDGA-s`2t`hF*QM)Iv37u#f1_w|6{(VRHcHewuG!&-LL)TF4NO)>Vq$3!!w zp-ggu?>u#N^+v7#Mf~)ji;`Dz2cgS^zO=rqHI!I!EiazaSQt(CBboxGWaOa^$*=1P zjqp4&BhuKfWw|=sovT38IV;b9(L?VC*`GOn(PI?$!|wqymxOlidU3qMpBe?);wM4S zvYL7ZYuo}A?dL2(uSpn^7o2we&heGDvI+&C~i>?EKCv>Kh4-^T$V z<$kr|#uBra%(l_?x~7r{(4-2fd*QSW75T?0mmF_KE9{?Q*6qk}q>EU?Yz?o}aTFU`s4(uyY8 zl6O=TFqX@!Qs0hDpskJ;YlpgmiQzSQ2Zo@&UNgBXOe1P6X^`Hh9sl1(htAt+#|XO< z0_Wkubg~z}GUoO0W{J=NvxQ``9}mD{Q(zYdhKD5mGH~1tqXY}2eK$ryi_wV0-=y$>l6EnVgRXx&>B@d`0gy#bkc#v|LHQo-=Q_Y?LDE|+A z5^NC=_*fG<_&p2w@e#|0M)%_2138TP7+0aga*Hvk#v8t@L_Ei?SliSJRhEi2Sx`Px zH&qrfZpgU%|Dt+B8T)Si8|vBcvS0@0HX$2lmYabmYSX2{-Qa)V?KAcqc05B*2rdC` zPgGw}S_|4lv;7_$ar$kx9a#u0ORU(E&9Lhx6?NmqmEy~fK+MS{nF^(zY#ALF%?5qd zvPp_g5woQ@EcR`Rwj;7_`-AKK+S3E&b34}Gs_O%pxjxO0(bnqh#{ISt&$fzU3R6Gx zL|Hl0)phjIsx@69%Edg%(fu)z(KAx;zy9kN?Dt?#!AS$xy4T0=#H=D;L1$I6(6(u% z-o4{mJ|KjyL59=}p5^FbO@Ufcxzq&f=B2)|O5RKRv-oVW ze3@*CeR+y17~c7e71J89p^>~sDl8n#TAAH zUo?3n$KBD+AslPMK?p@wuDe|-gqq_DHu4LhT}v$dnuVRpcc)KIK?a%6sUDzkqV0w| z<6e=v&efO<%psqcr_^!bL4ec|cr-RLANv}Q}hOvfdI_DR;`o?0g5 znAC{dj${@ENGA(($iI|yYb34X!nX}BnYDNZR;sOQ@WNE(ay#d4-41nH9z$ z((Lzse5#pJQWt*&5i@TJPsF`RBgPmT)(>Kmmr-teDFsy!#>b{4c;r)%&&xu5t_3aX zt*tC_a{l4xir9jBG#hO+Iz`q_VjVMYsP@aXPcwUGGMd(#!%FM(?D(BZ&HbOoS6w=q z6?E4Q{z~fVUanFH!&Uo3A;*4hr9RX#B`!?lM1e=SUzn)=KP`aMc!*PrZKVUIA<#RN`FR5F zUE>Ai8^|6F7SaT83hy?S3AV)m)CgI}z7w7o@&W3XZkshR*Roj5J<#oOA3p#mYAEmV zFc&{f=N#-9A|AZyuBMjaQKT%3HA6kly>aCsQus)#|9>Br3;$2PSMPYu@L8f{TLpXL zJRwd85BoFcXFL}v^5lbG&wfpjefGS0eJZuW^Wqy6ZtIjHKTCOS?gPc*vCHm6Ko&r? zwE$WT0Wy{nXc2dyeKC2Zqy&Ge?EWW7-5eOY>XIpheFe~oIfJ?FR^@@2#MDo9@b)0? zaCTNrE%FH<3sWrh%kO|v#Y`6g=&y~IWbxI0-kZ-LCZP;LJ6{p6W!n#Ir5+yUIAL7h zo|9JQau!tfV8iK&{NJYoY9%Nnv9S{4&0bw6Euz*EufjgBXJV?lJXQXIxGTS2fVE)Q zG0131IKzIIjuFI*<@xr!TO0!pHyH1`zizANuiv4Riom4FaRk_cI?vvW-=KLnVS!12 zUJ8nQ1phXC`Aw(Um3i0y_QK_0_7}j2#W4j=p;X;>i3uzXLd@I2KDhu|Fm_=$MpQnR z{hqPfBuIa!X>xl>NjQ8>&LMl(D!JFMX>zL)8L_j>vQi~Gg~7S^gti8Lkm}1k{H*_8 zhd%}ilat=cqIGOJ4+EqiVMe_@x4hI1MKn8m5n(i(ruyx3ia$@z!-U&E;~9}f>+9nhGMNUUKf3O_jWoej_?(I3`*pUR-zK|~4tko+c1 z(@nSKb4Mt4qfL15yj{Oe!Rq6!RW}P?B?{7iAEG=X(Ks(68+t37Z}bqw`nsP%x`l)M zco8E$Gyn)6u^a)4xSqs0XiC@%4<-DcrlWb_WG!qF7!cgQi~2FA>%}3)-@Hx%_b5VT zqhg0xS8sAkAuSl)Ij@|pF}u2vJn7Ekpx`R)Q(ay!Ug#gsr^;3hf%)(Af|1#A4G)># z#zuG|L@xAf3{ALiFVc&&Zj+X$pofm_rhyFqHe-?SH@P zz8W4H+Km;%knuRdc)Jd@zgnJAo0hq&o}CA$hk3MA&y)0r@CNp_y0ctJU%`I)bQoG;Pg$d69kr1JmU%Gag;?w$dXSY~#>AIuW~n zt(29yxjyUJb)*a?d##ub_X~zA!u}ZR3mJw?aZrtn1dmiX+&4u?+W?tV)T#%d0h4~q z`gUI(9rALm+M!b)cjQ9=zrH*nG`>+oa;xKnxPG~xh?ppohl#(}3gUKbw6;k()Y)_H z-7Z$=eK;UKK(6-=@P=<&W90c%We9Io86HiEl$a-nAe&?_2>L-LF0|fhAgpnv-0M|S6sdX%C132r=ug54aLppWuz)gNvlMpxF* zI{}=vkb^mE$n_MgttIR3>9%P=GMjZi>sI1IXdhXEy2w(qT`-7aU@&$Htz;6+g7W_61^>EMA|;s3?6-zg{0#VLo}9|6qqB61``;jz{`>Y8(zWVj;e~`t36Bze=O^Sm&3J%&4k-6Zz*Ghn_0BC_i7 zVPq<0=%iI3`yUr#_~OQi4o4;ON;N=rO#x0Egi>wo#?e40TFw>7!*){Kw>dnBW&AeQ zi68N={*WpymK%<2Is^kB>KN;F6=lmAtd`7?JbN^2-AYmzu%+yx9qh!rF7=r-`bf)P zB=Ad`Nx5jAC?|G~NC6JEtZ^B3VNWkP94QY;td0PJgH&G;SfR{bQReh$e|B@;1braEshx2vn3Sg9 z^M6vebF<)I@Pz!7{xEPB#qO{nKKsa_lXC$dAXEPvn5uH_j+@yI`cop$BM#32FfJA& za296t$g#{8;#JfkegVkABHHM;U2z133sA>yNErDpd!%+Pad9j(IcMPQSBD#c;*}2` zb5@*e?4a@7;yZ+RT);2Cp*6ZbGKK+aP{9o8%i+$eYp?)~M44JEcodC;*kr#Z(M1! zGs1cuzGswL!m@?HmFzH{0r!DMhd&Z^vr%&I2m+P`TzE)}JUUagw77EI%Z}HkBSb46 zB97GLJyn5Q`4kkeHGpk0J=_&}F%@;Xk(a(Ng0qh(&5&z90sG}P9^0Gw4lsBrE5&ij zP=oy(#4mBGEs|5lTEgC0wOeW7-N9~M-6&X{tk8AN37vEJ#kz|LLI-%mY+W`834&A@ zV&PYCnCzlQ!6FwW-?H{)dl3eMvF|)V+C<^uLOe#SN1u2#%m$m^4Jua=2<4FWp=Z{t z4hD11$0k5Nk<%Kh`Qr#!V`IH)e1P{w11P>kd=Zf1} zCOkB!%pva>-vKkpTYLSXVKEc`UC&qRZ`>sP)1i%5kf<&O+61nao#bi4PLKm>JbQ%5 zL4Y(kB{B&`6O1Vk?p%)LdJ)dIzSok!H1!bILT+RGT~&_o00WZ+9au9kAZn9@Zdi#h zZ2+J`A&_X5tV6hh_f|}E5g5F?=xV8^8>ud}^eIIU?Azg~MW}=`jAv~`5Hp0IZpL3* z&$9#R=ws59Q^1|D5b1q%MbrK()ffusE2#FDH5z?yxu<$`Q5nC-cJ?@g2WOX6U%@+W38-UumR4D>uL z%CI%u4*}#*AG{YHCfwkAC0_t&R&(U~-}$c%OHumtRPXH7Xt=^W_e~TSNEp1oS?Jeg zYii9lj~o&aOOX=iI293WCbyBGV=A;7kFdqm6c4IsUO8s`13=PSHw z_M6#bIZ;`kje*Xf6!*7&799XX*`RUeXM4@(-$!$XPRmFeoQJ(Q`GK_ER?bWRbvsx2 zQO&|_kBQIcEW~%|Z0jcL6cCVU@DtjEPtrqi)9>kyl4}LXNkkRvV$NY2nMV%wi~hiS zpO5mCxe~gxu=|dftvB5P@OA=lIT}T)qaAM#0WDxEgY15Y)PVS2f#x?1Br+q=4FEqp zsI!HZS&ZwpczBIhQa%ni-aPT1(JFqibvy_4iJMTl+zDo8sKwdA9MWyS6IB%r2u=9? zjf6Shf!(8{Wd@r@MoS5UF>o9w)6wezv=@hlK1(+B#A9|0K^4u`2z1x3_;5r$sWZ!) zSK7LCwXx1GKG~W1&UhgdtV#z~j?SjspcZM#QpG@<%+09ljeb#DL6R|s0?!TUh3o#B^sBI@e`y2nFEHuAv_=BbR&|jsGq`X z#QE_&}q4oJH~a~E!~bKsH)IAq3TOpgyTZrFH0?gU!zo1v#SJJ#C@ca`?5$kU89h}9cAJ<@g0-U)>w~(lyo6|T?^o* zJCl@;r-)LXN3=+B6VZ~PZ;gZ}qNNBa%x%ov{BpiX&kn9%5m>>F%GcLTjg|UJd7*fx z`#t3C>vnW^_#lfa7Du+g7+VNG-8)G}aKgvxqg^ZeWw411vlRDZk^ODT7wSUE^Wp!^djhMXvTJ@9TSm^F>Gf)sm8uu}f`ZCe9xzalS zdu_ND&9-f_@GnuE00{lYQ3a!!D%9E{VuP&Q$RE&+d$!G%Res z2}}-7_dhY^+QfH&87?Ehky_iY_48&le{@`)Jes|-5oX~>A#5)UB6`ovd4SaqkH}GF zL%H;VQGTGhpOFi107KvL8jxKc`_{BMAqSLw{kvUM>o#~FMtzoY+&uZ!YF?{Mm_7al zDX;00sQ|r~1&59^!lfc5=!lHt?0BGXyj#&g+3Yf8yo;3ki zhwmw1&n~ShTW1pxk;0$pPO0B>v4|Z=oL3S{nt>?=74{Fddd7z+(V|Gz1Cq8kR7kOe zd5AuB#%J?Iq{gWYn%%s*mB!GHFQp-w;zgVU?+Hba1N7lM*D|QfXI;5dT&im`S4^6n z_u`!!(W7R<4~8^!TG>9%IEx8|7bY=Ogp_OahV)9 z1>k1Ziw@HUpr9`XfwRXeopc?k@T*ui(ZUaVN*4eI{PWa*yb8D&}bTu>+KD#a$;{o-n+KC#pK!%=s)04Jf8Wk)#_A?$QLn^q0uH#2PG zV!x-y=_(_sm8({6EZ)6>gk6A;{9>cG&#TSM^x)bGv~yVidB_YlPYo3>C6ABdJ!Q`- zlngF!YaxGeyt7}HTq)C(?3~x*R(nIo(99iaWOo-5>+0bZ-R9lekQLEOuBtq<2{-I(P()SMRsm z22Q>yO4y*SNgZ+As**rg9JP8v3|SWRkx~?LcAlZq5wf>V@Wzc-^G28}hl0yS&&a6h z=M=Qq!+;O@l_x+cJYcy_Z8zMjkN(E#d8Xh>8aB1!=RZ(%_{Pi@nG>a%CgVgW4#`#9 zp}QsqAPm*rcM%bOzql43>On%R0eJf-&(luCsa4KueMQ(gk>&0jR)8ZD9N1xG`-Ajk z&K*x@moz??SFJbO85bZCGNZOkZq}rSH3#Fua+ZQV3Tc**rlB$zH-IVNzW40Uu%HX6Ci$3xj7WEl-~uVgy(^iXIeD+{bSzYfOk_`7F&Xd(_1w z`l^3w0@q?jrzcjPo0&e6oJ^4HA+@%^f6^2w^X7M5jA)9^X1o-IM=-qNnDCRgoN2gj z(t4?>+V-0S{|$i(o3a8d{_HWiQ{0p&lY`B*sf6`AH2P~ise{5JHxxXEWc`A@N-C;# zdMd6j$@1o0>2d-f8uO(cxzBlK7eM{w&UePm<{myszk%rj(eF&Ed7^Qj&t6MUarpN? zj9|Q{2&t>qg-gT(A~&1aOK-p%nz3O0dTB6OAaenh>jaR=EnI-iJuZoLFkdMGYdSqZ zLFugSfc;XhaG6({>b%1aZLS?KQ`?8uwAZxqM1tX15ly(|h39xHkw+^Xj>s2{dLNpj zE)OS$?b)!>5Jh{VNa8e%6XS^6K(?MHT(o1TA+J?VUXk@l=0lPEyNJQw9=>M4MFG#yyGW(LswgBcP3Jl_r#EBGu)Jx_|bp}|y z0?|c2E*jDvFoLo+n~x z$u+n9Y7S!@j_p&D_knJp*EV*pjPsFNARD4lfj;(<4%2_v1%2gcJjb<=Pth(4hy6B9 zyzUyTCpilpMgwS31wyI>AZ=|-2;Ji&0}2c4Lyz6u(CWVh#HXzt0;!+TN&5tFvGlte z2)0OHeUXPoeh2r6<&A%eMuPHvyG<(Gjq{2f;m&YL_Pe=_JRcj)X+xUGSrqDPyR_}( zS$;6%tT{EYHE2=~8^OH|G!!`p?zc%$AIEH6%>@xvX=v92&D1K#0~jwsX7_+}J;i!P z>|s|HT5)FZJ7{RU2Exrg(3-LRyp)q7cS$tpds*rN!H|C7Sm!9kl_-C(v9sydn=UQ=)>ZyBb>CD90H#tw#_$0-D7>X0T4C61U^w0q}t% z15I%*@YquER38m1;e}9*r{i)nxot@9R*YfZBX3#pAo9S7zK;F#QnhHp;#+3&*)?XG z^Suzb&;gaed&Kw#=K~!~7EEbl<*UT;GNqDwV`EBNU0GbePw%iZ-Yi&dRc~j)I?7Im z@8HDx1pfF`qFfJS+#AR7?ufs7Q_Fg>CcL&%tPZi-kJdl2l-@xR3X)(R6vhlc@)hvW zW0rRYIk1f>Ob7c3#+M$F-PT!)vjmH?WC#AS4^5t%);=%+F|?4jV2CTvULrNsx`aM^ zgf)Bh0umn|eM0XE(E?j6UJdgo-1b95<&Vt5&TMJEpLJD>pbo^5PQfK9g2w;R!53-AL4|$;_>P z@V~7FmYXmP-Yu*bV)BGkBaHn17L=AEzao$p7zYZlltZw?H7^lVJf}zNEfO?o5T_)j zp}GqF&b9`b=lvZqrmWxdrETB=2>vElGJtsv?bD}yb1SD-rcX;R;dE<#vP~%)gxUvo z@}tw!y<8CV9@IrTQ2h&V6^|IHWecZ5@jHw$w%vdmQ%Z6+1_z2?#HS3Goilh80n5MB ztPUXYl9vv)xp)^O_LZ*t=j$R^i&gUnO+#h-2siUd_z6a7=5n)`hLXy88?tEV}SDPtHME0#1xu_AD#!R!8 z9&v<6`#{wQQd6!pTVEh3>|wm-v7YZJxCfGN0~fLXZH7ZM zP7@A04)ZPG))#s_=f{BHM7sha;bDm2*VhLux8}nt-%)Glb=6j~1?NO;_|e2CV1-je zJ|nV1h{~vjkn25dR;JcFgVIJY$}M6!xLr^_#(kJJ*&n>%%^~t9{sm;yvVxu$0}GlR ze{<%9z9dlXyo;ij_N@anlMas%C7ja8sneeq-{)?k%n$Yl3o3N2Qq7n=@GJ8d@FIBIrjD&igr-z zZt4R7xcL@8LPOJ);jY3p!o(vHwcGpAGx(nn@7KeBh3!*6)gCHR6|RF1#0>eK;9no^ zZ`Lv3bL_ID*`sKZDbTow3H`dy}$aV%Iyx1 zH02ADA=^4Wm45`9XOVHSX@?-@w&||-1|7IH#K?;Bt4KxAZ{2?IrG%wopt2EV@f^{o ze~X3e(%Dz!S-1-z_xQ6a?0$`n^x}?}Hvhe6e2SPwYs+|!|05X1`)mniPaFMIe>=-a ztJky1d{xSMS;=mKznvwo&;G1TVE_Ay8ZBbD&|(WP9|d;qJP-O`Qw-z-eY!Wn%*<5n zF+I`!mqOE48t*7xE0w0x9kK6??F;kbr9d9&uYSMD{b!|{c*4-8XEH`(#$h5*X+P9+ zPewG6d(Tzts#mggUePD5_gBiL*Q-0UEUU12u~Ds?o~kJ*Vz3^;AgD14JL2qC+2Jd$ zxc%PaG#mSR__ao$I|84H%%;~0Me;u_04{>6?lSHQjP!+F^AUIdC4vqENJ`jF-#mY{ zt{T9q+}L84Zw1SFt!N08gjtW;C`g|ic2=~crCLqK2eMIo}uaWg1?(NKNG$(qJo=>buxEu{O*`8c!@SPc4vh#q(@6ONFC+ zqmD%44h?u>mDY_2CJo|{RgWQYoz`FX?ih}CFOXLzwUuVdM~nV9WGK$g9LSxz%vwqB z8>QU~N!Y0Jxxv8LLg8^!&Dg&Gk}F9z$N5cvyD(3piA8Y+Di;rKO=uzlG%M?vUEADL zM3`x9s!*MT(&R)=v{WeyFWa!OjO|rg>0pGjI+OKtO^y-kk2V5ZrGcA&=ChW$!;!0H zbn4Zr#Kl<2qVTtXVm?+Obv>Wj(!AfMPz&$Y+#A3(CdmCiY*+XetBep)KlpEL3Km0S zb@>0q*uQKI0~F@Zj1P1)X}pUx>8=Vl^4tAXyqYKLwdfziMWI*vg&x~Yg~&@<$G@d9 zJUV=xUj25SeygXqaaMqWQoM7bKFyljr;FtSUAYcAQhiM)9w=zhM8tKWO*pj*Z3_<; zqAmUqaQzO)%eRY_U2rpz(U3+cd8=I-I}PdM8EHW)>3{k$!Fd(5=$c29ojtR=4NKBh&YW?=Lv&5+x%)y@kcBEuj0My4 z2k;d(0&a%C(F#!kDn4cq30&{tvJ?FYN*-F74I%VHbHU`^jj~;K8T#tbEqN8BWUkz| zU~oDi;{DxgHWEnb+!*;1M4VRWh#Obh#~NJw4P|*OXZYr-OJ1qNN}3>SnESaCEYjT+ z9dg!}O3n^H5Qh0}eie$r!bt6uS7_B{craToZ?Xf&3IMLJB)jz8AOdbdsH=lTM?PC9 z)8nLpUS#J@$MB$nNO+>^)~bw=Y`~Ym!83y=E%paCkwZ7zgRk1GbMx~WHhk-L*4D<5 zE!#ejm!Y{b4}_#V2J>-7`ru-=!@vEMN?-`^P&?R<+9nqAeyb`@1Rc=y5t4*Z?ZH`A zdSzj`Q7(IGbRKWX^7lYNO}#Stv~v4z+<|{rpV}k(W=dw=NMC<{+$v&|t8^+2zNgGqZv2Uz4M#B9sQwVIr3yMpm{^ z-H8BsiY`*f=v%zi6n!Brh-e(UJUxL#N>XZ};8T`w=B?A}Va5MwxL=n3glB3m_tDyb z&PDKTQX|Z}+}J-JzfH5y*B7H^{hqNJCVc*Ne^`x-IpA=KcKP;Fd?b~lJ1!r9yFUe; zF2AZ2zjZl=TMiJsobQf%_%Vg@0sI8-tR-Foyhh5IRUN^;Dbbktx+1ymo0i7Lg`-ex z#J7%F&XY3#ljX?NiEMK~LMeAgc~|c>+BkZ?RK_2jb9_w@ zvG*KGd>LJaot>+`KHa96C6ESRwY+}K8Y}dkW(%(DJ1@K=N>LBnTTLPMY8_wWdbjD1N-d;MaqIEF(Vb!+X-_H}AHc(rNdmc53BWg_%ci3_C<6~-$971} z?OcHnixXK`0BOBJ*kPlm1=7&z@KH^zsFeav!oIBq=8y@EZVJ8+nw<`B0Q0TO5p71N zLcmul#pL#;SfL4V)U+x-_ zGY=0gp*Pfp4NDf#O;V?p3q%4VU%ULXs9x}Vgo|~a$u1}ZKX6_cD1Bs>GRzhB;;9R7 zTyC54S6AiZWAZiB+JnA}sVszpenv@Q;^lFbu8z-rDG730NRwwduHSw71=)GHtIZTI z^(QuIps?fGT@~bsXyvw9{Bh}G3wK=k-dZ7*J;21ruH~;}I+t1W4!TXIES!54^F)bS zV&L))<^;;?G#C{syN-AW*qe#R#6f?&sjmjBFQI&=bt{uZALan0l$P-bFs1SRGx$Rw z*N!IMHh{7RU3?ns~kfWtYyhnvx7L=6E$)4g4w3WaKugto74)>Rb zdpo2(%yWv#^7UoH8Wb~7t&=%wnhDu1zkOzJ|LiN_svJJ!Nr1*@dvUbwGN&tUV^dD+ zh3@pOva`g;){>L;3h$JH^H;KJM-teB310Ihp6bn+FqcEPj%!%9h4KClY82S+#x3WC zwPXW(AlFj_gsi-DawYWnTMi3Lfwroz7MAQ5Yqn3lQ4T6$V z4v%7kMbIP>>?u*aWoL}54yB8|ZqC>>d{*i=s z;`&J>b5|&iW2bg8FV|C3-gH@p!%7R1RPKIkFh%48?sntlvUgTMN6gr}qfakt^MQ;c zWcoFq>F;i+xO21@;0ylxuB+gQ-e&{?2?1)D-Wlmc&TV@4{2CjH69Rt5ahdtfrL3k( z6OU);*~OKElH*HHUZ(?WtZeM-@on;FaSt1-U0`W=JjN*wz4o%>ioCuBVz(Vu@zn+O z^;3z+4KbFKN1tM17>)nucI8L>;*z*(_%2E2nnCqedO?5$hN8Kc<4oKNzZ9E!)bHX4 z(AABdnH$A0rYze$Y%Q4VEByd!UMB4mwPY zS<(;)e<}%!FwIJTBmxoFaCfnXXe!Q~=u&m$xg0j;QW!0pNom_mECH==pbFozBa!W>~`|lBZ-4)The#kezD3N;AzEAg{qj zChWCeV#qyG)it>2V^Y!~I;s<*3glAEqupEU$)p?(cyw{4p9l9BGH}i0ih%r>f;e ztAJ0Q?erlF7T~HUoX#VyX-iu*k@#f6WzJlEF0T8cik^!|;19mw<%n#K8YuDeZSLPN z49MP$rS9#;ko&(47|zw7uhZirqI@F@1ckgT36fHr2><{(;h{!!OXn!n>NRAG%{b)3 zAJEtv2rizHY1r18nk3Ovbr307i1{R{Vyd8{hoyr#sUY0UIh$6uxO_9}BHdwW(UQ$cPJ zP^7spm`%Y(@m-DLMo$@pR>2kas>Ed>!*#uEB(AZd{>Rr^aI>q5pME@?f|k@HT&>IJ zxl5@<2Q&Vn7TL|)ygY@*T?Wa#giQjx4Ul_Rf7fn#(dV}ub}cIq=jqvE*Ir=TuscAZ zSqy|+M^ZnZY9t1fZQRXA=z2ij^gd--6MZIOQPW|J8$Kp;{ZbWEnr^Yah2Vl6q}MDo z1rzg}^Rua0WPhS&5w1TSPS*z<^iw~BE{wL=37&5NZ06l)=@cr&tQ-r{cg3I69PpTq znI%vJ=pbQ+{odq(jKY344N`|cLqcX3zfkGeR_WbjY8f)eCB+xx|GWA9IMpIvr7rQb z``lz7>Tl8MR$seP-ekVyucdrE6NG&!Uad}DvSIc0r&i)~!HIMqzgl>i^dOd#s2v{%8OOfJ)4U1uKn1oG4bD=QT*bUvf@hP58*3v0=Uj*C)sDikw zS4P02m(>tG>yPPKSeE`dH9VD>>2=TPzT`=sb0od*QtP_tT2@CK$j^d3Nk3|1xMJeX z(AsB9KeRLc{30Cx&>jf^ifPQ~qwWDS+0JfRUMb+4UBC{Yz|{Ms(K}T29(z*8*JW8`NpCkB=v2i+wO99Z~F zkd?#Vfc_tD{e}3z?yQJ}r}zWQ5KaDCYl0xoJhPzFV^yjRyV4ZJvqS?e&2 zrZ8Sczxu%*;7!1G7+J@GI(u}`N$9+qbDh>)7N@>aE58ai8tueXA%v{-+mrwo_<2)( z=)eIGdMgFuJ$$DhUZQ|l&<;>}Q3Q-U^PydupVe&hr0L$fCZG>SRChy+6QJcxRtygg zRRoZ~`oeNFh!!R}SDRdiDUFqc$!3!2Eh~$;__UdXl?3opQSf06Y)1^b>#ekVJImk* z&##TEdn4UHDwywHZRbr4by-*jBsFm3RcAa`b29njI6b?m(wsfIJzo?h*IiWkEta+VoP7iKcx_iG%I&L9n3wp{=}HwXu%{BCCv-=s4S?ByH?8yFhqQZ0`Kk?f7+Ip3_Ue!3@ zkdXki=a?_<-2;pz>4zv1@@Uo4pJ?wIxF{K{EXh1Q^9daVDBn>p2gL^~vUOq;9u<(YzswP2C1}e!+8tjN-8msWRo5P`hLeV?C zPj{IbmThB)#D)j?gKquOI6gG$@lyO0KvA2yql+6V0C!qdU7|akt1l_+DCG!Iy3K=a zI60)v5>(e=cvROon}H=+nVe_{d<;!Pr(aka$8ecWgcE}GGtsW}Lh)vjPCElycB!Zd zSt6?ZWyot|~~Cxu7_^`$Hoy@6o<(EM;- zz&sDNpGdx0&2p;v&CD0bth4>1wlxg!-+@e9H@8*G_91GP2K0ZuG;nB{s3vr95$Xmf@%q+u-T9~#^6!v;i zXLZ4M&P$?jwkyvnAzgjvBGzcbZjR10%fz+41W32o2e_`9PF-!0a79&ojjlSvnld5U2rxp+V`#bs_K9xNYZmwEG2wc2>~Zg9|lGn4*$*Z^FA*i=Q#;+3+0Q6AQ z;Dhf@aVc8{I5>tDL7+K7g9eC=Kz?l?VESurS^>5@6MDH#faw+XrQLe6HJVdC3uY@0 zvv2TklGAgu7{h$tO{on15e5B3d^v1(6t~p_pfWO(&^>N(^#7UXAv}LrX3(-OV(4S# zwW2czcXtRxG23gnxPk(-y>fAw@&MecofyprzYy+85w;Q1$(LIdfI*NJ#viv6Fv z27NjmtiQSn`i}AF^i{>|_ls!pGsTUy5Q&pJhdq+%a@fFvR?|yKOEo^A7EwXdGVlZ& zXsmAA{ECIefDYFLKuxxpURe4C!~uYt$H-XkDgGh=fzE(034ZAnb;o#E7MrEhwP4)f zoyl{41?KRr^JmNFAac@Uw4k)B4velBcVjX)?5Y^jV(m9pz(9zQs=N7DEi@rl;L2dL zMC_i3D=L5ypHRdDGXKqrd_I=_E;pwIRa|o`B#54TcK7d;+fvRfj6H8)L38sT%pI+J zjviLN@oK~Z_j!65p7r&qN49f;)=-bwZx-H(?I(q=HbFzo(I#SfVH<;M z#ZweEExQ(3Qy_u~2R z-fl7Ul)z$)vITDYj%qon-gTw5gRrGhkqZ0kjo0Z8k0Kqk4}5-sq~I&HH#Y!Bf%b|Y z6K4@f>IIA_x}_G&v=b@wjG&ES5GxC%y^_iH`Pw($X}n|Trm)l`c(#B#HW%N?4+pfH zfyZ_c_@ko{lsXzi%1t&FH-2{Cah#-aus6hBs*^iz&)+D?^jw#xd`cHM)8>$69-UQ8 zol_>}8Bh|>CWtQlQ~tVQ3kbP9JyhY7ve3{WAAsFH65S>bQHr?W-OEhK5f+#NR`X%W zj4!PewuFvh4Cv@(Kux{97$B|~YwZD62hYH}3RBo$Va?S=KkyfxvPUQ)2UU7gLTb>^kWaIgqn^7)I*8>3Q7?m^szn;1vAFFYe{ zlXOIMw)@z9zG+L60Jx;lc9d#xHl2s1-~pRp-XMu_g|CGA(m2x8ODb zisjj0b}z8QR(>Gl)shU8I>hhjb=^Ir%3xw66=Pk;vSg%Ru~j#i*m5X8cv-wk;5o(J zX+4xR`zl;vt?(Tu%g_a^B|p1}z^0)Ms7mx3YJG^+jNZ6LI#@5eqka5ChZcGLklvI3 z1qdlmofk*Yv?u5)H0y1iyzA{^gMk5fICegaR+wXBT3HAlES64W(Tky>n>pk(!6kQ#Q+2i4Q#xczcnX`PCCE5d!QDh}cGg?DVPucxFcYcf(4hgykKL%e|It{1FD{ z?s0$=-xfeyFENiKEpkCCw^wOht4TanLZCV?Op|Py{)V1v0q$6$0=c#(ofb*;tE#F zw9%pEoS(}MXc$NTK$u-5O9Fz|3%HMRw|tHD=Lwxa0gejK6bF1fBlN>OKCm~2YqGAm ze@<_G=vv3H8Hx4lCB>PXfN6QLZNhcc%RLcv-b)5NyjHaKz~!hVP3{5BcV^r7@U8*T zXcL~0g&VEcZ`*AXUcSI0r7gCxF^>bZtL()0pkf)FCsiFMKj_uN$;!JF^DN=M&gm@; z+}eh27KWOYo|T5ybx$kYyB{ETVPi}#CoH1(`)3ooKoqb9u=FQ+znx?|A6a+LurL-+ zzuS~s=1g|ZQ~OJ+93No|t4rHad^|mlCu#bTA_zQe3}t$)r7pq7+|&Ja#3;TP5#!K!9052y>pQEUx>F4U;64bX;s zY%I0-ISzv{Fq8{M=j696aB$$#J$d6hpe4$iWG*H%LD2Iy#UD37KA^tw12FxXt|YH? z1#n&{HmQo~pIHJ^VYRC`EerZ!9GWmC7MlcjpXPdCNxP&00?`Bu0ON8Zo!UTAprwpB z*h%0wkB`OR$Az<6QM2&RmQscch-|DZ5 z3fu$vq-xiH&5@ro*O+udKjY#qjcx~TGP!nbBb%)DL{q^k&mF0>vF!t zf>za$_iOVwU`5iKheC$EMXDhtsev`JhIjM)hPmEYHaSuNi>{W01O3^J2RYt1(HRdO z{k8Vcy&1q=v$q4uX(a|GBzPk93h=xbkpqd?-pB15D?MJc*|y1j4(+zqHaxes!_a!&0R+DR)QK1wcJH+fL!aX%%zExJEfyHBfAj^^hC#!JX8`ky90ZsiKb}cZ zxwSC!*v!(jb^^*GS4)O;*{TU3r(;F} z7WO+=ByI&Wxc+v07dW#k29rnY=&A|`3byDtJ4ZFO5qx0z9kvazW?t@*Xy)WsI05>HSEyGcu(MNPZ<1pt=N6XRsL0RAA}g_ACEh8SF! z^GQtW$jW%hUpcL2ZsZ($Djlu}EPTS&iDwgz3uT9Z`7>|otP@z%+U4THI1^Sw9^s!l zCJ8J)IQf==Lew$0f#;YouX=yaFlJp9KCaPGj!EP;fP3|naKNPyhqw2`=bXq{&Ea!% z4OVM_b8r8${rzY6+G*@00S$DVxMx4*c1v_!F0imp@si_+zsG_^e;M}6u5E0_Llq&{ zA`j~c$@`?v;4>hK?zR*Asr`i#>PdZb_|}%)9!N*f!@bptEkFnu^cDwldELAuGOK+x zy^ZI9b$7yWgjLArSP4=>82_V;7f!Ypaq8-gMX)R(*LjVq*?$T#hBZZ3qXxfVf?pi^ z(+(P&3O`lu(N)x!q>{a}NeYuke}n^=ps}9skZAFd7Ng}h%pEUj%J;EgW1>f6v=B4= z*9)+8bGt97hJH(Qx_Ppsh~9JFF3E!ZD1%x`iS_)CQWW;%$MI-XHhA^1J(4o;=a2Kd zx#b&8J?R&!x{2bdo|Ui(QBIxgecH{BLtpSDn96@t$m6Ze<&@}rRFotxd2->G_>6I~ z8PGDfHLjc< z05Q}2J#qd^cMPq`HZs7xCUF+{o=%I4RXK@BcDzJ2onLlAjWi&eKAVm6E8tcZX?J_p zb_+0=ce&VP(b3a;6Ux54DtFsd(7}$x2YuApW_3e#u1*@M$2hle<{UN6Srh>slplvH zL=*g>KOnhhY&u<#f^X)w?Zg&u^l4PwwnLm;iL}uo z7&jzniVF)_C_`g$;Tz_xhx!<4J>&tr!Ho>(lC&JLnzq_r{ zlmyLy7hFtJepJU#7L0!LnuHn9wKb<+ySSD3c)!`gofI^ZS-16=_<4RlUQcDB2>mR> z`EMx1?$JZ+$9WU)?bbj(te_xt23bV;94fnj zJM(4C0w2Q-0Kn$0Mt--J{PnotT%RD<71M`u%i#}j2c?L%X``YtceKTpDSL=EiJ zb2*Llim|X$_9Zxx;GV~?(RS1ELBhs>NCS&zPF_NuB0nnX!6M!{cbEHV+KtA zKt3N_23DU7mEk-Z4q`5^W97GO?lMOAP}Lo&jJPbn62^tk~&83gO-J*GR6 z`pkXoDQmxjA&fOmGs>Zd{cMAgZwOp=^bF$K&oDu^hi>`KRGI=pTmX|w+zkYTU0a-h zjBKg7TJv2%OS6L+uW+u$PQKOh1eQ;xpBM~*h|*0V`J?ODbsaurXd|<5sEvDNZMSDS zPj#?3B1Bu2;M3)US z!ar=Nd~+Xqg_07EM*jfps#l7$40$BMg!mFn;a&|U{ubg#>BXl({@~{cWC1U`4wKVS zgR>lS@SDK;_8S`MrzM9>@f|)H2XsEg1SxZBWP`Z>~h7y*? zDJn=Lhj?X;mPuc{5o-dJxQ6SuSY+B|BPyD7^z->l$n$c_??Hf0TWd{v36{Ee@gvesm?r7FAeGCz(4?*o-}LI5d2!xP!`^W-%r*s_;c~sT1ae( zVPo#Qk>q4f9{~jTQ+?^bbTA0g3D_DFLxJXom-9)Ek0~i%$%pHH{KP;LY>UAc@MeLKG_x319D_93(sA?;IuU!~}3mdvJ6^6awJL_?D-s@s!UJt0dEP9(20?v{*Pk}~D4sk8E>H1!D<%`QUcd7NlX z7AaXDMumQbL+O6S&xMB|$n%XQP6{7E|3T{kOFwRjFC-<`orlYuiI5BCFPtkh@f3cN zrt$H&k9NyMbn1fCS&`G$%1+ErZX*YEd_Zbs3g;a2Q1-E(XHnmtMBfU8_mnE672vkj zxs@0F_^9CI>X^8&l9*DFuGIN>--V&E5OzKP43i)0{zuHiV;mLR|EoKl#gXg$bu44kN}cN zY=Q55o(&J()p?k9-hVicTf^K@#oga@^dR|k{PU=4_v82E-OOc9k>(X|B7Hq_Je>)0 zPkhjr;W6x5mCbnoqBjo-kXIaJKz^P4MVaZ$2Qm zV*BvYBU6T93U!s*ij}++$qL)g9o;B-2Q^AtQq`UBFYE+X&gg&fWU&0t0Atgi%WZ!Eu@>-`&KtO8 zHQet4`k>BibM*V6V&&54=4wpkRJG6Z$(wVbq*|cH;kY&$*Jd!!#aw#zzWXEbZdOo)(BrttV1o&pyYFsv9%!k! zKTXTb{5MnI@nJ;YHah+?QlQUtg>N1KYFt`#v{vuHPt~2fpVQcZ2AfZ>>)&EBuKfJn zNe9Y7uc8!JmYsqlVoanYC%C3-zl*6tARa6w?{MWDlqWOUMwkcMipa$! z3p|;I3eqa%nvIM{QcjOAEdCMP?26LioQM*u>q*Z!Nvx@>N>>8GE)w8Eg2Kv4cjYQ} zfIbI1pURMsM99())i&<+qU%|#2x){>@VNA%^71ebvGW-sD`zc8A$wBGAdC-%x#qpx zeh*!Ayu%qH?^(ZyajZ`FJIqYQgf{@=wfc}*=SXdM>ODwS zIg2`EkB4DfTTHfV>a|ucnwHw7P~Gd#bYmF_|f*VQ+T+Z zSMst0Y|<}RFMD~}lL4c4C71Le;5Hk?9X4>_c!$c#WV7rJBWc#$6O3)APWQimHuqX@ znkeS!FID<2ZQb7%&SjOePfheK`}i;7Ywn|!H|W1TfjB1%dWwLIp77^u9^Ex(xS9)n zXnsBSy$iK`m@F;ACSXRfyxwt1t$u`BQ^K59UYOs?Je~ z=cEDLYTN||kD5NFZr4c{ET>b9+!zcLqwGL2#s&8o6K;@eNxS(frtgXYB@+=?G zM8HEa?Rq(FPsP2QG_4hginKcjONBgMC<_*B%jHr8AT4v-=Jwp`#?J}9xe-0{4-HKX zk*Y;+YoT!6wLm9)N7i^jm+yym;?t9p$Mvpm@CCh zp2a0p8o15kKEG`jRij!Z$4^!xn-8dSxlL`SY$=f>`klOhGq!o7?EfOdRz+uz2T1*Y=egLA> zcy3>{VvWVlRD*EbRX=*;#P9d;W^;eFr#z~EIg%xMSo`d~k6)5A2jpb_eUAzHJF&T` z4}%_hG2=Nb9o})tg8XT{L0zj8nB+;y7JB2kY&W$}SDf0q^kkXiW)=s3vNd&wvojAc z=gG>3r&L5f?jhq3{7z&kG%RZYR)G;(nh5)pnn!vpw7I#ZRp1&M+OMSEU(?Aj?`>Na zCj?b_qlUru=)1R4>HrdBm2c3A7jsh_025K>RpN$T*zV3Q1Q2P0kSPb(!7XStGwwe_ zUfFlI81{GV01JP+2=Onin`x#-kzr<*=>=mHq(hj2>}MXjMo&<^1AyccC3X%E%nL}dz?Bswd`APCypxe=_8L;lD0E5Q=rePboA6^>}n0-adD_l!|&2` zB#N>}7ErYZTt-Qq6LGAs&j3iSson^dVH!?%V))qmMB1;vpCN!?D>` zwR$=_--dWMw!hmRlpn4A8RL{z=L&ym%0qD|6$@7_^#dE2@+P|NzT(`{Sq5<;>V6s7iZ~wy{4w9lV3O$=Nh5;mzFW_?|iEt0{E>~+eTTI@B)blaGppi;F$4wd-=n8O@ zO^Lui=$QkL(FKIYDM{Vs=OTfgoM-koXr9XG=AMn>aPMsA{Nu%yDi{S=Xvpi8Ms-4!~l;H7793}G3cv}rJ=1K4B-0g&xqk@ec`)FRKRid?AlfC z`zw*YdREm~wex9x=iSi?$*E3`7ta{t$g~v{PUa?}pVfUxNBq_P$7I!X5oAEZY$JGv zZlKBwB2j5VNnZ(C@G{5FOq|gLI0jiXFk5Zp&Ye)7EZnd<2BX2hvw6S6`sIe|7rZs40Jm-fTFwMBI0+Mtj?7P>dhV|KBqu3$X6a)Sl{mWZ;`@Glj zdHT-sD%Cve<%>OEKL3A`RKfCw+_zy=v+o)j^M3#M_``&MksN2#y~*vD0%b;h2FHgD z(o_Gc0j4_?vx#Wh z2jggnXR??EB6XOxf|n;L_9QI3akMyh^k>YcnBNlLr5(9+GPkg8!zPr2U*Wyy%V+Hl zO5@lr^(3MobsC5`zPhRpt5Ws4H|m(jRk@ugxrK92vw z?kR+|%8f+C@baK=CYc(|^F_+f-e0mTk`MbTpCdnDSeQq${%eG@rG5713^S&*P_w3BvSI6L|85=Inwf42BvhkqwTw@eLSNj`u zU1F`%pU)iPA}VAoe&DfeKeRaNL8pI)UimD#2-{&<&q%mgTN5p}yrmQWyy-1o4ikKz zmon?*Q13Ty`_inm0ojq4&#bz(wI6mB9@UZB+y1wW@;H z_Jc%Y+++~Oc3H{Sh54TbM(Qf&hNJ7mVkQ_-`g>uwO!M#4;@r{%PHJ9l-%yFlPT+$_ z$Aw!4dP=i2S?{>VPVB!S`%;n=%87s=Ks*SeWX%8Tj%h+zJOSd(TY)4a3_Afe(bw_?;CHtmsD=b0H4~ z7G&n&Y9Lytnbf{EMp|8nC<`2tkQgwf%#1`joB=owy$)>KN4M>SsXEP}t|Ay1Oh4QV z%uYqCXP1(frmwhJuLBC8;+G-q>R&TznHqVQThG5_A0?Z!b0VMC758#Vxz`z=b1b)b z&=I}dlj>4kR8_F`$h|vIkNLg&HaMj1xD56@@MS}#&~*Bu;gm5{A&Vyf_}Kk=po0;{ zkAGmsXtoXA3T_P5iITKfn-Q&j^1xo+oM9vfaZI{VSmx zz}vwHH=9T3OfE)c03Aa+b{Q^s)Ts%ruC;Iy*iLueoe@O(jq#6N?yd8CwY7jIES%L( zH&Df6uj|S<)l0K}Uz2&Ep%1=N^+v*CphiIqpSh68CCa>+`BCWJ3=ZVY{Jh1kDX%!9 z=#Z%kLBAHUYNQ#r?}jO$FK3r)y!oE`Rw?C^N{oZmgay32(adlvm8Zyh5-`+c z>IMJ#_Q7~w7wZxmOCq+#hRGG*hac5D`r-bkNEHeXVTq`9D5tFk=pK0`s{5jon!zrz zlu*=-7{q$L4(7kg`r$Wb_HR>C>;a$QvSYK4%BE?*nzuHPdP5%ci5#TdLqzY{{sEs; z7txOH=J)8(e>Y+VbHo5)?fU&7#A)-#JG!?M|192rr&LjG{9T{fBD;QcAnx-n z>EVcRM=-;&89^Vf|3zugkb)QY6Z)r17RUS9T#=( zj>Luv9fW24%b;?>Vk}alHW@xCanzAKn=g|R<6?_y!cvvM*)4DEqnWz1>Moz@_P?B? zzrQ3x68khs_(WxiUgLDTqJ@2YRwQkwMSAlMU5^g1cJTnD6U~=j*C~4wd;rw67chlW zW7QjChDu{u=Np9Btk(3}zxF@1fdJVdGxhzMUa%%$sRL?r7pmXY0Rqd5MKaPMQj7Wq zla7rbbh100s5}PdkaV1woh|bpK%QYNkqE#0B+FkH#CahQWCzkXTmCF~KJFGnlPQGB zx&W~gWqJ1y3Q^B`4fNH2#LuHVjp!W!jh0PEnvyRXbbSSm!3HzSRm$LC!nZ@Fd&V5d z%bU3ub%mZhZDB!mj)kRZRKWJ2(%O(|)W+=9_XcG|OqjH}4l`$$VdjodPLU#H-(dMzwAF$aXQB5Gr3JQS6YRp}tPj%YHY9 z@^r?New7y>%SpxXYh^5;Nt@-%cRMP5(F%atHG;r zA)Pls^6<@5Pdv59T<+YGe6l zvwRVhpQbeA#mHhil=aX>>IKMp(*WXfWI?0go?u{A!vI6@qUGgX8X$n=(e;bPBqc@rU|fe9CBcBvg*+0boTKZ_R=oF6tu zOZ~aWUTW43lQhB9Bqk7Zb29?{3?hFG4d?iL13PDz;4kg>`Zq|f9m)XNFWwwSJN+{w zxy=P|MJR$vR-Yg{7fb>gl-2iZ;48^2=btgQPl3V5&NvWWZ1;F|P zJb?WlWy>vn>W@QV!#O7fx&zb%X`l+DM3twFef+X1Ayh}KJeY;;AK<5sjGePP@ZY)zJmPjZ;DvIS41z3_0L!5V zlemD5GkYK{dXJ44l}>VrSzXE$6Y36p`Fn78>2NYzjVd4AY|`CaWSRQ5S+GYFm+nG%N zC-fOBswiHi>2eB>?YcS`DvLQYH>aCy28GLn?3JSVVFj@K`03SHzx)FfSuxjEEKR}$Hk-Rn)>O(Nu)DgV zi||vwewLVMwSC|hWdJ$BdiDaBn_Zf2>WmR3j}BeCe)Z-XmQW4A#sNH>&IYdN>33>> zfeU6oSN&kaYI2JA3b|PT3|OP3i>@7U*q!+*Qp$4eQfOFDe*pJ?xrVV0mp*Fs8qTXd zIWM0lL8|Z#!rJ4bcNW=aP?Z?CscGnNmOy$%O9Qn$STbieKm;UmiziM{8j)<@qu#(J zT+z%UduEC-SW2^4-eX^Aaxpabm?#6HCy}X`EN9gm@4K!qijpTJ;;+y5nAuW;qK$BE zVB7HVm?C6llec zjczdVAdPbAcOKt~;r!PNP=A#nY7u*`TI#>#$18J5g2im?PqP_1*^kbQ#XP{oeVC#A zCNa+kF_qKqg&?&*w3kCPZRFZ-%-Dbr12Yo>1fyGI52KUjmI*wLB-V!c+*TwMsvn}s zIH`CH^XNEt4+J2B-v#zUghe`uE9>I1t5WSDBLklog)!a&OyGcyd70XAXR+Vy-Y>W< zIy0cc09K+uY6E~1Fc=GmX|j~#am6x$vH-?>ZmRXj+z~C9+D!7;SRDw1Lk;jG$}lcH zdfD*gs2{PNS16L_lZL*Zg4vE{yv6u9gjc@CAbV1X+ejtfhS5$|ut?UuXaLu_3cBm3 zc`n~gjoX+?Wt~usad33+;2U0%6!17l(|hKRJy~{&i)(qoiGXF{f|jbrMgNQ9mD$py zp`+xQtHS%D-CA>!<|dpfBXdh-3Sr)Yf7Q8}t+qdp@Q$RfgOq!wyq3prMjHl_Aqj@} z)cif-hppj2SHT!hHcMJ0gJ9?nW=N}z%HU9i9CzyqQZ#!ToCU7P*K-uPcWPlX~M8ZNz7qV98g-o)#SB@0;u?L0~SyN;Cl z_#YcUj@uNtnej3hM2TNdcu!yF@2^n6CT*<?%==WW%=KS7O7O;P+|pxgP7yTvgn-? znfstOU!ltXrp{NkzBjfWqW0hY+F}?_grJm;le=yX-C}mi55sq(Tz5MbzLEU{fXO@G zk!R(i{W@IrLonGsn#D|Zk1ywBK<|bK2an4jF|0tW>|Su zOPT%-P#!U{4-UOJuvzuPiRgX`mjf0nT`m6jAea+Qlfvt!tjY;7ABIjT6{e-Cu5hn$O)xm{0{(_svWjo&Z1Zzc_LN zaw+nkN>E)kLBZELy4Gw>awd*a@viV9ReRBdH*s+1xKp^?iW7Z@je>vnfb(!C%m3+B zC}%4K%cUg!0?1%Yevi$A9wC2e+rM=)I~DBebPQcz92#wh;6``NJGE1#Pfeckcp0_i z?5^>MPw{LH6FH{M-fI99o-$kH;_jY{v(eFQgbO=qK;CIP?+CL(i6NWz#dUYi&9rO> zlLq4_m31YM(oq=43&&lAH`livU4E1rU2SFhHqVuV{~+SPp9mCH`8Nviue{ip$)l@? z)Mgs};jGlf-j<$oOo_T7W!rmo*1`7yP+2!yi>kXVOcYbd0+qvLKSghv|P$VThZc_#QBhD|KEqy$732Y^GrnLFAh|A zw1e@dyVEm_?4`NJ*HfVamZd*`dVgj#cmh;i!0f83oG6|Z{yAv>_4_l_K`)XC-qHw4 z#f_;jbZnazBPqTg{Dh4C{m%^f<>We*37I8o^S18%>-O~Rvw&|(_YtnLJBQ|g#^>to z!l<5i_CkLS&N16JNT$K_W!vT8ZS7T%s3)_YQpW;F@|p7K%xO@M!uU~4=B{Jh={Y|V zQp~hv-D$BB1+P*+&)CnMn^D}P@)MDVG32#$V^+Am{o#+)$$_d;AoqXgLVOO!u6Oqm zAvGHobzM3C=VJ3fRtRS? zM>NZRP5E?-uD~ySezMTKB?eHY=<4#?Y;;*GIHLz%%tI9RISfI0w|7=&@`YaOYm$={ zAm1WNXW_}E@1r!C`cV)w#;%k$60QhIsZcAm%86)bg7`YFHSrbL4vW+N*!J&;>GpeC zNj7BVyDrQ1pt_oh?P4&)qrsW+vskFnQ`D3BWs6s%1RvIwz*SWwijTJg1vBH3btDZ! zzUu06)lh;cXu6LJU>dDRg0Q`^%Y>8P;PWRCj z#Re90#5O|Sa#Gwht++8akR7?(+^p)~F&u#N8o6EGY)Ww&wJ0H)dw4ivLhrohr|EA4>kmejqh@DrEn7-2BM@Mm!F) zo1!?ktL(foKJhxr>ArQ&=!d0i0O#DUV&)sB=UB{KXs#)d*VmtIIp7VRxC#qDmAaS` z4e2)>iO$ZWy3!`?!{E+&1UmoyN@iifyzA^y-L?^tJ`Bx^a!`8`%q0W1!xTk+N`=}fB>8}%_nyviptJeTM-#d)LW#&x$wIF>FS)43`X4=Tl%O5ctcaC*MoRlAIOmt7L`Z`LviNgxv$y#8roy?SSx+ZB8@`u#W0nA!w%Nt5peFs#<7>2T zl*8M0&Y#acNL&za=h5=53evWVgs+2VfAWJ*h9&9FuH?<3NmsuT);zDe7AW&1qdW)F zM~Q8AyV8C)KyH*$E*#%!uA6o!+4*+Xg4Zc)AQJJ%D0}a|WOdft!#kqGg36SsTd6J*^{!9#aX15tCpG*mnxE!irkRePg z6uyfU=O!Z`EY-QlFgU?~>w9cy_{y1|(j!W+%Z5P5oUD-U1WErO9%+*-*&46E6K=nj z7HZy~`F(&dF6ec`XR7}FsYM}B`Z|H~!Li|b*JihdGyo=z zK<57cr-E1RUau>$pzOh%QbZh{ZBjrDT!6ZYs{DTYE)8U&fe*WnW0@y;ptfP&J7Sj} z$iXdeOSZn7n;Ee17~uR)#vH85CdWB@k>9OX%3AYpb)`*8RcFrk-t?{&HGYHM?@Yr$ zq4qdTf{jcTjd@DxFIyJh2>M?;-YIQ z*be@lf7X@c%dw|pvAo@|e2@x%n)r?l7FttP$G*t^^E6C_b=LsFM)v=Ty7EA#|2IA) zN+n+;cc|P}j+C>M`zn->Eg>OyZbPS=91+4dhE~py9NUyTq1L@kp+cr|;>1w-uGQAZ>nQ|=6z@>B7{D<*Ko z4Z*tpR@Tl@+iFDj0z~o#D%fVZ^jqKfy8-gG52+Y4R`qGQXW{)v4kAG1wgcAp zt#FlU-7bcmM%e`;;leM@@ll~xh_@}{<6c#p<^AjTPl8P&l`DI#PwDqmWIAx(Kuj-2 zO&!Mew3;EuRe>uQXIRc7%)t@^m_2!=pe)=0CS+9!=ZoSi0rB0`cIby<@uu*UZ;hGs z&XLjIq&7Ja{`tyk{`6FXZy(%^Cl(mWu#7vbsT=cQv;#$IVZ-KQJW`88Z3YE)ww!3+ z=_j>F$aBU7j+x|7K^-8y-SsZjR#?EH)2bz_?`N0Zo?FEXyr~|`$fKY9Oga#q@r;yM z({%6Z*(I9I?qz;UUbr2jJ{y$Q$a7itj^EuuSx;KJ_LfS1bTvj>h9WJq?pdcJ+K_g& zTAClm`-;d+dO`0T`@7YIaLL%c>Hg`Jr|7;TAS%cCN|c|@{g03K=4K>+^ix4`c&DJ; zlH1PAJDAEA8(vmnQgcy(YtfbE6dX?@M(@~pcrAN%5ek=FVm?fG$D%s%3HaLJ=qr_N z1rc#*9l<-~8GQUAu!;Msc?%A;8K%79`xXL~Ay?kedIvffp=jqS;xh*-T}7!gdW8M( zU*CW$M(h>Oj#-YX*^qeX1~z^e21rdf%g zn#2hWtu1_^%F7yxL{(*^Jyd|(#)GuA=|%UfWKgj zlhMZNDP+^-Lh=?GG9&n2i*Q%uzYRV~32&feMf@C@sk;BxL0q%1Q1!;tqP|Fsed)v&uekoC_FuJgr}nDt;^?4fq#FA)zOT;5P-)6 z*DAO*YqJ}70ek3onv$o{n^E%4q!`>?bkyx}J~6hHWt304lRZg~6b}jfYD8$5%lDX_ zP~u?@T6dUs>^_xKCkuCrAb9%L?7|jzi^Xqh@6XK%_emM1cpWx_G)*rHJClcL_}^{|btO+r%G3IzNK)6_0raBOT8 z=u+=Y^bW3T{TeBpD7SWA{tuY*^#|iz{iSlN!3F% z3Y8cBk(boEM>-=esA+oZ7W?k)4#Y?>B{${qS0%<=xUEf*j;ze4L3tb3={tVj1h9UK zgvS%_%+sOQQGEKa^zn2Wls{T|HNGgMzuG;IFsT(88UpD|7&8~lA`i5+1G1^~$IsXrcR?6p6!E~V^)~%tL1PT2t_m@IOuQ6_Dg_AF@)E!v9R*XT zLnPjHg$Jk(?tadtoR@W6>4Bxw&9sn+8>4kAr90yV>mdz}3Om8Wrh1tOKj6N7&5ses zNa_keL+uBm3%w(eeBR#&-<4}J#ii>Cs`?w#x9h_}(i^^(kRd_+7xZ37_riGl%vn=T zj7ndmJx!kSa{|3vaxU9iARsa#{2*P5nt|QF7RjXj>(V9|C4WPcIQ?*MgXuI=AGSi1 z>(bsWoQsVM!Yw=pWxW4b*tzBqWZM>s-meqBQG<<4?i~U zPKPfgld9}l#*`lF063cJtJ~`PT59;S zgy7I15FrF}66ZOsu3t-sKN;@H!p71uuuhaF*fl$sdBUcU+I`AZ>7!bM6ZhJi|JJ$?zmj(Dz&E ziIC1KQV0{+UTbOrrhu)JgmT$LnvwX5V zc&phddWeGYe4isr34pHPDPjz}#X1}PbnS8$Fd$3_Nm^KZ@aI3ajMV&P=F!N_vw?n1 zoNc0v?T7ANq99OF80-O%bdHAgJIIb!d^jE_e*F4rAQ7;U~8qnV$V8D>0t z-l8XqZkFd;r2ajqc*AUCHAq!=dg)OS4A$tpuIMSdWB1ik0WKbdPj1Py!5s8a*H`}@ zPJR_6)rLs3>J%!g(i=!_HSeA>jK7i`oOxYIEj_}1c@TjN5ylDUTN7cOgI`<64HQ{bCT5j>xmpq0C3<5i>u7JmZ%AW@fs-phk(F&r6{}9=nrF4 zb<7up@7tVJ|7sgbNxd&7WH8OOF&q?9;VfrRQN^jiL^#=CdtcG5w6F{ta8@Gk9}OV^ z^}GeZ4MdXyki#3wp=>lBfaLLS_?&$? z->~P>BTiawLMPsFBLD-Q73c1dO3{iA;}o)I4x>IAREpvnt$zp_(y<l#WNc(SR*=8J#!NR*dh?IYW#8#M z(ILT+j5fc)PLuG!4tA&DP-UJ)kU-I0Jik-R=`DCR{pp9r8_tlsPUTg4WRR&AV)Dk= zB!_2c)UedvseaSex~6NTL$Y;?*K>9fS%yNHGr8b&poUr_sJ}6%`2O-{Y3rAf2*GQi z(v))$y#%yquRUe+44EJ!>y8C+pOL42VBvvjdk(6y%D$%J*vXPDt@H>a4?=NcbzwwE zqrc?019tZEKz+=q*vpU46D}DNU5?Q70Uuv1Z9D96Bj%X01u65RBGG&6E#G$oEr2DI z>B@IhNo6fg>g%=v z&3`buGtcHG5nvg*2B?d(bL za+J8f=Ns8uOF~8qE?ToIS`0^}8uz<0R@~VhA`6p_bUQ-77$ea(rU<$^Zs8H7i`P?i zCRD!k(3)%^6V%8{lpgKeRN&X1mT&6V%oS%?=P;yLmx=x$nfMHOcC5pOvM zqVCevmBB0Ily4qH*M~(uVkyJ38mas2RoSbd6C;D>53B9HF(J*Oh&;ixwkyLaUz60m zioTYlNdpbRYfV{M)j$E+u zvzJBAoJfbTg%-kDEcMj!n9-WGk4pDoLu#;ZYv_X)(FL1l}?rM7M$>7N4=h zsFHsa{n790dM!TRT5cU{%GqIyO?z=$-z-7@w6z&_ck*%=Hr2_0Zw5jR33ppr=3#Nx zYRl3#JGq2XX1+UBlh`McUsGXYEi$;x3Z`2cb$OdXOAqxi-n&{RF0KQHJ+Pu zU{I6$erD^5T#q8OU1k*9m9q-9ZKvu)1Bxr^LQjqx;VS{-tr3w8h`iKR+Y~bNc@np@ zk@T|TkR?}?cGtia{OrWi^P-Bhf0oi-UD$ei4J76w9bZ$-t_JdRfw8)uT{6*amHXp8 zC7)>I2-A&ieRdC0ZV?E)ARH^UhC>K*2D7;zzw!pw%Pb2h}P zIOP{tzehNG6nXxOFZ+@IEX?J;7ZH9fZ0k-eBSLt+I_YSiLhXP+59~28aUj%onS9G? z>>mI?=&I?JOLlk1eqT2CCmw(mS53K6dGURgwwpNH#KCgQ& z4ks2$vF5@mX7EsLOyB;9XuCa=;?071-D6Ue@U))+Hf~{66*m9Nxgkx-tnKb^n?E&? z#xgz-sHj!gDv01_{1-N_|$hsUcyj*a|G%V*m#NJ;T>+BztH)i4xTa>7ohsVDy2DSJb%2o zf%Vnl)A>uUOp&4U7*aI&B*oVbnZpX#%>D5`ENJYp)U4I3eF{`XK$K{k{HckYp&vX5 zr?|adW76jou8_CRB*aMjIQ7-M%GhWNNo1lv(xXyw>!D@0(2@@0Ca|wIw7Nsf?iKgP zV-W?>ekEkjg9s;-uL!>P4Lsq!`C04ir@qr)vu6}Pu~pHF9VUOR1r zwOVG4Fgit9?U4AR{t!C*aMvG8MCKSf%!4)#%Fqe&w=m{A`b{&s+;Wp$V|n09i*F%5 zrRAMhZv$TQh7ivkjTjKvjy3&IMsXuu?Z}{fzW?&-^|#Rim!2F}ddEHTj$Eo;&46gb zz!uJbe7TV+AfO2g|9`$T13e9q+dK%~FDP){t9-BOUcn9wR5>|sNC z`fYk$EtqPwW@z+~xJMKmJ`JSeRxFl{2pJBC) zcl??cWiupagM&o6dO3H^7R(J8}gNRU2cf*Ai_4!S$E@cJ+?FcPnJ~KNib$ z*MHhBP0gt57tCaVLe7w_U7R0@6z#$r?MIys^CCW<^8GiMl3G8QNuR6ksj%-Zw^?%? zzH0sQK@{pLuxm2N+yBx>DL(pJIIlu(@P`43GK54(aUp;1AV{wEGYN`&HxlIxmV9>j z6lD}3ZVCrGk<8(!GikrcV}gBoR(KBeY$l606K#$;j7#z{>rD70xYMf6%&ke#$JQMN zPc?G9&T_k4ZM!DK8asipXX@K}`)SQ4J&?KzBcGwZ9Tv|vX4{=co1Yc5J8Cy{I^qKn z<5MBqIaw$JqtGKBn%Jz&y9(FlOzxgDs3y5I|L9f-Mo!+pt?GN}!;I70{~`;&&2lM@ zOJ6>@0}$#;oaes`JDJhC!f-g9B;)UJEA_ zQ^hY$KBCZE1=Sy0qLn2SCf6%|N^@-uARd1uQf*a&y-}YsVJ|jguTZ2ut(i_o_durX zfC)c%9(JLX%Cbhfn{2=V3xh2NEP0~N7ye-!K5|R@LP5J6bx4&JPVzjk<$mwxn|wrT zjyR<{a^X@GafxwucA%4Vk`w6^Oxg0oRhSoVMU!yO$$oP0sNy!{`2a>NqTsrMU*K7wEPzWtLg;Xo&rSLL%iay6J4|xV|dzn zS-ktY*jWIKi3z1UMA2c9HO#e#yCb8K)%JbT&cd{Yt+I!O5)iIT7RtEK9K40ru`(vS z6Qm@(cPPxdp05g&e8(3`S9+;J^X$MR>c(2onYX-;WxlpTHeQR-{DH88g_8c5riYT9 z0Ry7X258}j>d@`+@MK)k)&EGck+-^?0raTHZlGOhXbfYWWB7WEXD~&)3^$0n;Rbf^ zLQlSdS2So&6Gtkg_o)<3J_-KRp(AWHOy&ssYT~u}qNlGht>T+DY&#&?=zLtSiSe>C zOeG@%S@wr3OqM47I--&TvTNtQf1mDj}c?O8t+vZ38~=k|#wu zn{rx@we4}9xyB-e){Y!P|J_1p6kv@T`S$<82J$bkZ>qEH zh{d9wG2e*@vexBnOL>2&c^^YHi*_waGmn(GJ9C2{th)xe)#AqmI)vYRt#Y^Cd13p+ z9%bNuWCK`%6I?#W!(Tf-OL|bgGo1g9g<+TAS~6Sj5g0c%sKyp-LdGPmQLv%r*4S{; z85znJ#`+yF&z@zwRH20q{AEOhl{n|8o9vNIe)%teMUUQ)CV-SJT`RFFo{K)LlcV2AE@tLlhol_~TjD|nq0fT4R zV6HWfl1)lVG1^>jA8weX>|e`Pzl#Q0BH+`EA8H?GnnaHw1K#4LPe6!UWpg}g2EY#uTc__x zvU1cgt?cWM=gt$(Zp#oiI^g3rX|Mj~c%D~nu)HXdPFt-nfxzwau`IJtQs%K@XY??< z)k!VYpYcieG2CXG>GUEy;gQc_`UB`MqRnGx{;#4sq6~b;fkp;Qij?CQtOv z=h=@G9Df*M^bBLZ9>eqM4g6a(@cFrcVad~W-{EM&NR!~lHv#GozUiI0Q*FC3FUTfO z>8@~mc-=PGBNY;YWzy+w`7Wf;kALg|)vT!dBXEDQIf=5eA2Bt(piudYou~W2hC}W? zyPIksS8)0Y!R+6vDI&UeVJ{MCuEPK1)P5Fu?6ughUyUIDCp|4!k zhIFpKjwZg&%-SjLF5re-tJAS3{sGWU0T?&(>=E6`E<@reNr1lwCz3(SAQ`Wd-LO^p zNte-T%tOS&Xc4FKqE9n#VAR1~B_A2Mn?qwCcH~tDP)x52idn(`IOzYGK6>73J`t*o zD0&9A1CxwN+Dl>kIzjx2yOO*C5EwxsR2hXYs0 zNHwMs@xtaIV*X^Dq#U6&mX*ObbK9^j(`~6+oNrvULC#WlcbYA zSY+>;-L1=vwJWoRVvrMEbh?0*&RyNNFA|%NKF@jNC!et*Jqb>p0~#&Q7%u&}oCo5$ zyNq*y#uOAx+`K6+`)eKB4u9$0TmBxxFA4tRr*wAGF^j>-L*8a?`6u8`CJY^=|0*i_dI;%mb-V*r{R=gxtG zorTKf`JKWZL?}|XFo{S-CUHeM{7%X%dI-3gJhaL9zY8lRPUeo)WDC20%X;!=pKkq4 z2~ON;0RFIBdDpArsVIGAq+AB+i>}tuIqC0U3*~58gFmq?s9zLMn-f4sbD%tmFfZY# zIH78 z0o9U?bChtQ?!w}`k!x3wi_cyy?iq-0UOO88cP+CmKp5j=NC%kv3xv%e-W7mm%|_NE zE!@nMoeK}9torUo(rBv0b0e2i=NPDKzXC$sOWL9AVY0Ch9SZq18W?-D_y}$opf`bc z7I#Wq#tS)u7Pob#g1`POQ7I0yjIVcT>ji*e*as2919ElRLW9)ek| zEioaYrb9DQ)7a&K8DW3Kh-22p7QA-``Dn}T3*xc6i??gz&ZPn#oL?97)@G8GG=b=< z-CT9=Nx?0W@2sA$CjAJ;Ft+s%5vu}}V~d~@p+{t><~TI0KZlKWDE2x;?@4Xjo~4Sw zC!phaYHPgC@!y`J+cygiT5Njcdu}>VR4m%txxk{EwQRguvkZ<0XfrkuUTm1d>g*5h$>}}IJqsIGL&)@#< z@KS}IUGph}qZtyzDtD0If_MxUP;eoc-x_gK%w=cEA`udLdk)d}O1okE@Pcu3(_ozD z(1Q)_epH#QY5CWm1xrVVG@&n<#(>6+tWaJfV$0XDF5hhq-Tnu|9s)>nC4q;H5B{kH zPG$cR+@YW+>ri_eLd)PyLn0T;*3;v{Wz}j2{;Vf4;5MCP!@&W>xeH5&lhM>{ZOg;G zejG2w=Q&ROW@fRwDZB-jYDvnhpVKngb}qzfruMP@+K53{UNr~JTFh@;@^}+TvIWq3 zpbeO}|DRv~dQ>%0$E6jYBEZ=F)Wf`eb3YErPP450jeSz>sTQC1NPyjV_g-dPp#DuO zc7MTG<&Bo(c>YVj;8EBSY6s%Ql>iFo#?K#@qt0JEH_dPb2&FO-PW)QpkYFPR?My6# z)^PaP7&_-M3Gk7dg%kHBshPjuA+L){LEgGj3*^dT1g@vuz0%&*Et!;0x_E z9{B?neLx^TAeytS8_?>8w%-4X@7@0GIYKL9UmFMH?I?WVnn&!I^decc)(Zjp!oMkm zknRrKlZy40qs%`G)YloYq$i2Jrq0ertNuEMl&*WZuqV~fjOl-|Nm9?P=t*O)@OHJx z-%TtjP>;j*L}Fi-hlKr$=GAefwQ?Me=g`| zN?`YogXQNmPV1z&)v^|BJkQp~Dp=5n4Ptx6d7E=kMv&Bl>_7bG)$+L|ISD1|oLN*;?7# zU;wMLjgoBs0>}eDX`%|7E*%uat0YQK=ORrXfLCm)+nAO#AKqMQ1t7cHin(T4e>~QQJ ZuSd-5iUz*Tc>wFr)X?fmmBH;t{{zYkEKC3Z literal 60119 zcmYIPcRW@9|G!p3*&|tHkL)5@DSL0Sg(92m8Oh#-%7}<;+2gv1l)bm>y7tAr)^%}> z-;qAQ?;joy?mffJ9yqvyo=2jMACN=u_LUuCIan8Ms39N#OvBGH&qin9?EE=|t`9*scdt2k4@cHw5 z!ro(30nuO|d>wpzg6|_(8d7A!65`x)Fnu08J^gWddVc!cZ_^{7xmvHax?VB|wI7(x z)xYBLCJFxw=&?ko2*`X6BZ{_*A=OHcc~z}s44ci)mfQSQ0(yin1z z5)JwU+QJ=zpFjKBxQ(xU(~2RK7$gaDBGB%xzY*=jjZ|sTzW${GJE65d>Iq>Rq`v&; z>(LafNO90q(ebfp=s0KsRJ0s@x9S}{TR!!Ey)g^1)veG)WX>7R`#p8w;=6B?@D=jT zOF*aiR^PE}A|S{4X}q`cj4=U}JRFzGcLq6$<-h~Qi5U*&}Ps@TEnVS9-=aiyt1LUC_ zB=mRI*Hzqa&np};dt>)iQh;ZeQR6(EuGaPX-F8;~1L(CK7Eq4o z0VNf1#Z&oH6;L^j(B1zgLJsHN2hEYfZXWh)-Qz}5Mc%8=3BDG3_q)uktBE8<{a|ck z0%hY2_2njna3hy;oIq>1Mz*Q^&7cFV zTD<=@65kPYDhsOEf2~9Yd@T%WI|F?y=)w872fClgKxVkGHGP&S`^SSp%YUx9Nr$rV zY!O}EoTu}CJ!uoPELIucdRgX&I{uDO=;EKiq^q%!Q-Hm$KJj~i9=rzPtkRsB08S0mSyK_A>_>Z^MzyF1U9C1#W+qf9j*Q}PI5_@wzM=Uy1 z|ArOCb2iuH@Xb_-f)MzUo)omh7~1~FP3XVpau|X#NUd-PrKD~(l9ACv96@~rmH%d( z^o4$FLT^$x=N%y%CU=yiO%Ft>HHPtt%j+(LfhDKx1dp6w%FVYsfT3FGqW4MLpA!raw79vnO6Oz@C(p z+zc==9dmj8`?tKJ$)jbawDjZx=3dpo$KL+${L8u72o;`5aQF4e$tj8vi&l@HX+CRQ z|704P*T1U2A4mL}|Cj+>j!e?Aj;25Pxx zr7!QDjW3_KwRt#Z1)jU@Z5{OVrkL24qNiy|Sv!fsm}A$Y8eHh(v49J&+KTR#{Zgkv zABW?Qso8I!Ofkb+E`D9>>6T$Qepy4pj+Ql+*jCQJ`^Fw|MA7ygWVoA8_zn)eMlY|u-hbxuzM$`s)Kld{)XyPv^du*T=t{RKUT)jU<1r|tM2%|WhiN+b+G&@;w zit-=yJXvq`_8UAKDV2)j-3nV&u)Ae3MT1>Q=XL~fAN`G8WrX(9;Uc8p)8%Ftm;iWI zR7w*?o8^Zzc8aH`NHs?vSm6vY$EwkoQEpXj7avSI9D<^r{eWIn+KiW%ex2;I;MFvT z3qB(%gH7IBttQQ}GrNX`&8%Trbcg8KNSdGPcZ`NTNd-=3hQj5-i} zYf_7R{VVo754t0f9-_ZcE!((3e`~agkud$B<1L)I2ZDO@Jm+s_WxtEUd&uzkfmXRJ zHRhVW`e)&5Z5N<}*xw1?xcSS)?#3k?4olmd&Q*>+eX15JV%DD z{$HsBXY(xf&9Enj^Qne`RBHZ9Q37XfZYo*^Wea#+GJn&Iira0_DQ#Pw(Jed_7a7$J^7EdqMl{{b8426EYG2MA<8J~(P`mpSFUdkxMgtdPm44m84sLsdX!MHs(1Tiq{?-ZBY%3{jS8 zH|0<*&ru6E6#flgI~&jKZYcPBFs`1(Tr~rjv-(Ft zmTxWO!LBvTE|${jx;t4;&Myzj6W&%Z-cqBDPP=G*F@?j4{g28va|Im7QdN|4^^ zb$KdO+Y4G5EF8MZZ$v$eAg*uzx#0J-7$B?<2zHo5b19&;`8Gj61zMVeIAmsaMG>7! zIq!lf{56V$L<(T_7l@V$s5rsGoi0mMiMT&_F+cQ=zZcDs*|? zD(cC<5WC(xpEWR1fFM$P=8h_l66|O-rS|4`I0~LLY-zFMS>2x7VuOu>ab;u_q+dUo zLJ32>6d|?dU+e?yQ*mdAaLzmgAS%&+3WD5%E2Bcqxya2;4G%r|C;M z@bDn{zV&C^fI0K*zVuxNozrxxI2EuS=H~UE*D81Z|8Ps z3pdi_$;i-5oKIavXNhTAZr*nbqP^E;Zj;>De=qeiap zd0_M;kZ9smuh)Y-ZGPZi@IK^EourQ%$y$GNC?yV0IT!!Hf_g#q2VHn8aNzakz~t3m zTO5fZ4!(uhY>Kp09xH9Va(Jz@GO7zTa^>jBdgV-4TmkhlRZP9n%5!v5wf8x6hxupg zf@-xStiuAkBo0>m8s7X;CgLi%qHRn-W+2#xzJv-7+u!9Ph4Fysg{NxYKIUD3GzJl5 zFC5@`i=n&%Hnxut(=!9SCd|MC_U&ORINLVYQmO7n`(G@52q^f-|nBYz`qeJ)H0#MSfi=iNqWBXTbr| z)s2Zf8I!W@xV*FW6GZ&P<}?eNC3rzt;tm46G+Fm`m{ZS%!^I?)JC79?A z$*~!Mq@qY2!#5kox9lDtC8m1^c2lP-1sAd;(+)Jn<~CzEyeex@?eS!+2~@AX8YO6mnS z%{d_il!wVX<4^gb%H(UKLEbY)t`B%%^-v+x*^NXM2y@QvNPLdP)Mom=_WR`UUTl1K z1Q>%k{QbY-fB3WBuJy+A}Auv-MW*$-iU$=pWTD5GIWql=?=s9i)_fqgjN+X?R~8nh7kkC|?%i$@zJ^;G;mcWWlR54i{q zy1#wS6rw$L02Y+p}0)U&wIN& zj9g#pwi&H6XsP?w({osYWe6-;%EYnvH%4>WkheUjIX@ik#=`Xe9h6(c#ISS_RP`NI z(mnY3br1^YcXW7AG!KA&4X)c#fekjb;$@eCYQ_oS&~-TwBQMsZ$kwA#0>SWHp(YZ>Ilw|jDAJVqHj1Cq{ zk(*1z-=PUjaIX{;Ucs81?hYDQS}Tn*pCBIa2~n-+I5^YgS3q4Q`s>g(EAo!bZVKSw zXOaiMc{=Bx78Y~2D*bN?KSIL+kl4n*h z16ywnHM$oGFTqIN_SJoi&G@q{PYz7$Q^-M{^bF6DcM|Ch$hPq9Nt8HI;{p6CjZ9S& zwY4L2h*Vf(u>zo0e4+cKha4}Q?5843jRK>`1H69~)n=iv-7`igzFYlYdp>ah*hlxS z0&3rACEv)_>L2b^!&2eNbCgVt_J2q7`d9eRkT-G!S@d03QC0QgLFiMEK0ExQvc7xH zO&AHLPqk;SJz>1TXS?3s$yr#V-Dk##j4%(qs$gO+DKUj{QF zmXa#JYhK!M75&v#cu!vd+-l05r!I9i= zfSV;IfA+(`u58Bt>!)LQTyUi9?R2CLU^tAdfEIf&xQ0g~wWer{GHX^IIB~A(dvtB}$ z))D74C%$z{eOn~d;pB>-8#c^M!v)!U=#|;sWog@-s@8wCx60EFws8WNVph~hrqs8Mt0*?U z!#`DsYoD+-3_K!)EVOsYN;a=&72NE8_Cu@6%asYpuL|I3P~CP(q#SYZTa(!cUIhB@ zaUL+O* zvm;dy`jL(!cjzSnNvg0IP%0U0!Z?@Q6K9!r_=qs5?T@$juFbOyCVFN1X*@*&_^c}C zm;aWFgZ^eERareB0^Q3=-1Sky*XhXpVUeC-MO!)p_rC>uorH@L?AkQ$u5Mt#waTU* zOaL<0ZA<;~pXRb5@w7$^fCk`~ObT=5L`WxI%cdTZc_Ae+%vMjHRlPqvH}MYHc+5R8 z@bub9QhIVkdqlz1{K#9R1i^a7$Jk&$j`ft(LJOnGk)rGpSLY|5rhEv~&%2&yn)gbl z!(6>xI^MlAM@U<y!%LE-=#T@Y%?kzGaKPgi0 zmY9I=Q5Q_1)~74|#}gxE_nx;n1~41{CGm8yUvod>m-5Y=Js%MU$evsqZ3 z!5}*N7}d8&Sb`o=+bCPVjN{&l8QD7gMjBxHc^K}Idq;HE>NQ^Vx7OQ<&PU7@u2_|E z>m4S+3(sXg){pXFTEF^*hgOsV>knw@>FKHe4eHbBGVKw1R}8Vsi_F2_`TTGyv1oVe z8_UkzHx{P?Sa)m%^A1gGXq7TXYLTm`OkO%pdR?=wj-<|?Ugp2J zq0BQHOssEDP;!?wuW@1AQqoTv4zWE8Y-#jT&^<UUp)v@{a3F7Q4dIC7;OC^&6YqE_nNgk^M z|83^_M#~VzL<)f7b0utzusn0vc(=r)k>3VIoV;ShZdVsky~*hWVB=x)?DbQdp~e4S z;Vpcj+ikym>CwKFn32I?(ono5D@)ddqgs?HmS3j)Xxh-R85V?Yg(y_lnh&-5i?*Y* z9VH!DG7?^+P#Vts#qZ>T6~%fyh=;f;zVe20+3U;;*PSM3G?^herk7Yx$%;&P zCZYI~NpDr(we91+r9rn%pr&4=XwN(NVY?Emq-ZMcRT7nt9mHW=i6;5=2^dX1Zi5Wz zAshU6)I#$rL!epJJ0R)lE=fD=9_p=Qhk<3`%m9Bm@x9LPwK}{vb&42Q4gUxRp z-mnV(8p66Bw!fz#dFc`8A}(?_p3Xf)D6|4(+X>{~_d(x}78b*qN~UHsFETBgCDQGP zgESR6AH7`3)zc(=Z@U7I8VSxAX=J%QYx4dJD&u2#>A_L3T5VljUO|CE^k8Xe=@$ii zJySl-Yz%8_$uwhCkUIOfbTZ7hb-gPyHi`RAL?tOs<(-LeCNg*{s0{-;j72BNhbv7A zw$08O$*Ye&<=ToLC?H1V;a)CVj(BL-wIS$%?({C)hhld|rtXV?Jw!?PEBWGs@5{d?U`*iu&It69(iueXLA6z^BIQ@I@_YVox z;lipaii*|~-a6;mceRd_ymuis-K0*?mS;r$EiFOR~M$fa1vNAp5UX zBmb@)gv58#xz{Z|k&I=-3CsjdtX5L!Tuhx1s7%ZVP`3KRzGzAdmwqAX@z@hCZ;)(S z`4l^AtZQTxSyv~9-v8{{+Rj(ek|&QHDNxhV(_3OzN?#m&oJiEz*q9*`^vI=gh4y`V zx+(L9248C%p5t2A%EPW02s$T<#6Q6r|6g?DH74Z{!$ytcz3;JMQW7rgd9PyyOC}cG z>npYVEWvU$7K(Dr3?@_b5R@m#-)H?+eV9dt=*tAviiIC`Br>NvPfIPpCehvR-d*E3 zr1(~ZIThFp*sAItYW8uRbNhKS4LgQ-rHA79wpzcBc~GsB@-fyjjcixu*Y$rGNbcYg zB&iAyt-7^r3}kukDZzKMiNw8|yzUF1{9K3_K2CV&ViDGNM*4%$*=w1q?WknaD1qtY z->O>O!{*nXtFFoXj@^GBKru@uds?a5XtaH({1T<5y>;__Baax7nVG z7M*ch2j`bYV*xp%H1kwsXT#=%u#9p%cPn=vQ=7&6KeQC#E|cZvQ{VvcAJx?c_brI$ zoEKV|5@pXHwqlWz_lzqaM+Z)k?|SEu95f%Hjmk{yc#s9+vIWJmBO0*06&CnPGM9~S zw+L$gzdZOPY9NTBZOfj_?OrGN?F$x_!`G+t{P9QF9zpHdky}1INA1Um2ZNGBGssdT zn^2QYBQGeODp$-af+|(`}ygCW2Muy2nV#J zq{LwH(RUX?W`BwxKEu+s6ZGbX-*oI6%(SdBnSiUZ_EvOIifXAlv0iG`E8vy>U@%+@ z;3WeW8|E#-C;G1kH`jx*|4Wmg#)eEHtI}o-i?JvLIWPotq$C1~1=7xL{Ys9tL6AWv z7SXS_)6a<;=IosQA?;1y!TmU1-tLY0NNahWXY$4KY6Si~&$)DbzH43#NTOi9f)M=< zv(A$yugw;00nOZqsRbwAr_Y~*5*O~Q$WZIJ9=43{nQ&@l#}TpwYS;d%ey)kW2Dn@w zicyt6%6r~WXvqAce%-atru9V5yS)B)c46Cp#LGQn6K9ULUoO6`fdyn0O^utls`xcJ zZuOGPmlUHD07OfF+D09i9^A-{MFgzUeiw6I!_dc)ZeUa7N0=yeD3dP^XI*!9yIJ_& z2WQ16Ragsed2;6oEezvblh$w3PT;@w%n(&?BB z7vm^DNpTxB8~&N+vp3n0M+y!hQbMn9{}2%c(;U#ZUuhc(E5XoYkfD;J+gE<1EXYm$ zzW6O?=v*D6ma|#r4&bT)WVLKt5%3{m&Izpnf)jveji-`B2f=dX1cpU2`(M}4(BM^d z<(S%zLO*7e)dviRpfgVsVq)T*VI`KuP&`s?=!G^wwV6dH`@1s{@kOrjuf(+HM86K= zn~iOPY<2%(!0)}gp$@~hm+WJ(sW>gnW$|5MEZ(l&3r4{Xd?vTgs84@m5u&3b!rwhv zZuD)w{gZ_)PV-)59rmL@>#if?#i4Dd$NL|Olt8)2w?*~)qxSS?XO9Q{$g3pmhwd1( zxOTiHvQEsZ1pHjH-6gECq0z?glax1cHTR{24<0fa&M3t%SJiFwTRELT$*(uAm`|_}WHMC;Mli;(T^ivp6NUGKQ zPH`C$lvyDM!%D&dhfYrOO$Ns+T~sWpG2r>N{t1e8_p>u?d2rHaNo7sjU7 z8+lvTGThnz047&pEC z?tgFWcOZKZ`b~EAeltb(!OqBtZXC1p?MOzC>!gxfLh7IHt0-jrl&iBCuOVbX8pm^q ziFva4{nra{uLC|m+hJlUA_SbjSIOz=SxfP-|G@nEjyNbJ@t{rd=^O^X68Ew1;@rd4 zT(5=2)R+g~BySPvES&q=n#@k`jwkZ+KD{{AY^Yhlw=I16c5GIArnmpo12uFKuquNF zSc#0NBjpps_UV0-Ux?Q8Mk|Up&-C@*GE4gx<@vKIC3FIZBeBY+n^jP-MwaVil!+ch zFB)r9cdu*h!{!qx#1>W~5WZU>r{sHvDgAO~&9SyIgPww%;+A2+_<$1hoU$vsq4H)` zz+|9f1KLeNy5{-Zl$m0#}c;*@-K;uDL-1uS{FwOudC06*u-ZVSjJ z5hGYoAM3r~@?r?<#cDm)^mqFXAJMXqPo?f^UP_Rxd^OH%unnzd;>tVFJVKx=>izqi zKH3Yp!{z%jgz>tWd@_yV-gP*dhIGgp4s?8bH~$XJoRW2VaIZ!BE`c9 z1RCxvm$?95%|0XqPZkNyPQeYSIYKFAh0T>Z(N6s%rf@Zo9&7EMatzi6r-kABhwe{w z;(|Kac4Uh&{`L{C$@8Gi>i8zJew?63`7ako&Z*qARkU9J86au0ho*3-?ZCpx;M?^J z-Yiddl8=4&#tHoeXqT6lVZi6Vye7>fzb48&%9oxRorz;e4@oE#=dT+d5T0(x?>=w% zfxzv`GVaMX#(L?r-Hwmim(d4GsA^H#1Oe zrejE|$;}ccdq_{G%W(=YKH^Es5b23>a|T0OnW4>63!Ak zvd7D*2D@O*tVaZ3AC!w_1cOmv0y^Lo9C5@Ne*g(_nIg*29?>HT2qL)hIG!-V2thXU z4krneJnn+=Cl~2)FI#hK>5T-=$Y&X934T9`op9v3HWSg{(s;eDu9_0<5hAZ+1o$o> zu5>a*!z70z(U$}IA5f6<6Gwm>|7p%rK1P(Kxv~gghfm}w(TeZizc0GpN)1i*_CLd7 z$W*_?R9-YZzj^LDZUkuZTm9yVJkp*dy8}g|I+>&GN-$CXtCPlP`zUlRi+M;nD&YH8 zt@4brBpB`sJFnf}dA4GL$$rR%m}McGH*)N3wr;; zr&1cRXED>@{#ErRon?@-a~Z~cP>ZV*JZ^Kc)|V|FFmRa^X2b?r@q=TNfD#_>PW(qJ0+W^H+qgWm5>}h0nyEJA-Z?Bkph-`%)FlueaU(+oy$!$&rWMa z)xylcy(X4~3XA5sjD(fT)Omw%>pAv7rM;yY)oOaY2^7c)f{I%GB?mwA+tg(ai=1@# zrK!+fBV~+Q=}jk`e}n>Wm$9BjIW^=Tx zo3E~RbC@5~-MX|>FP9old073TzXKI=5fBK#p}oDmHh6301^Y;CBEx*mU;xt5vcj@3 zIOxRAn)N(>qR^67g?o#@LFu7Y8_4eS^>;SC*WlR{qe2BWfX+s46C`d zwFdKX(8<5L_3c|ZOAy4}1C*uSn{KvXG{!YD=1?ZXrY-!hvmbZHMGXh0N~0Q&FU7d8}-IWOTxF+_IA^N*s2#!aaICjSRJ z_uqFTrxM`lE7Vr6CUNZT9ov7*5Z`ZR6x^XVx;L*E{WRA9i>W5g=>ES%k@U!m?2&=Q zf{J@cHa1Parpc_qdF9@g6LeGh9-j_b*)4H$^bW;*YX#67!XQO=hrrSWu)W;;F2ddP z*Uo(canBCk_Xh=fPjrQa&~{J?i@`PFDSLP|?C7 zTXnLYZBNqH79KTlrl0n^^ARX=>;+uM9Q3M-FL%+t{mk(+=6Uzmslq^u^yELf4&h1sKl(vj5y_pZaA zl!>78<29S})8nGj(i_iOMvYhk0Mfj>KPb!h2I9SKVqgHNtQ?vMW*YfU2h?igyi}kh zkYYPQVv!$0bzkC!{|$raUBg$5bu49}6o!lA>tyRUKS2v+0P(Ie;GNjg$`VstQZx);4Tee-32i-r}kXD3ag1V<;rs`{IV8EgK zDUF#6Z}eOnG0Mq5Js8z0s;Ds9-UApgF0-wsKWFf$K2tiU5kv7O1~+{b z7RCQiWt|XKc48~p#kX0!v+lo87k*y0{p4VLQ+F*$9`cMh7^o+tzfgMg zVg(KW+|I0wO|r|=_gmA|go2RMA2CyEB(-k3TUoT27Z@JI)3wX|1gI@x>kfQsIyyo# zEf+=3fpuXkOkKtIR_fl$~Y+bI>{NeJUnH$wR=$v z#duI93yNO`sXOx@MyFRErDxSAa{~hds~=h2aTXRjL&bk;43bTHLRdnnQnvd7&B5*b zTYzS7?SQscK-qphMUVf)Wgg3UNB57VlSj>PZ#5dDY!lsN%D^nM9z zP2p|7Trbw-O5=c`IQ>KvCMLU!YG%B@>Eh`c;A?F(Cw6c>u9CvwgICMJh??)|Zuf@; zXt^($Qa*z;>5x zS+KgPd#sWE6zkzUvt}l6(98FW!~n+DuE&sb8o z^@H_yMFGss_y+rY42ZnTJ|ab+inaFTBTxrLJIhev32C$&B{pMh9tRT%Yz2W11(CEn zQw5O5d`ru~-L5Hst>jLTvL8tI&O~qhl;0x!#5mMJeY##{`-d{qAMw4|+~Urj$PYK& zWAZuSz*o8nU3K65TN;8sCN@mAuKk6N>qOzPPl6nZ;K8h+6n|d-x->X?(m3AbK^Kf% zv%^qyc4V*;arO2|r*XT*lF34tW0!)9KP_M~(kl5dS#2KW_^s0SWlHE7Y~PRhWvZ~1 z)$|6a55;DpP>>5|K(~toA1xA^HXoNTv+Xv^5VEiMDj&4AHVz9t$U!MxLBut-Ie$s} zCo~upy4ny|tg62FwgCnYsKx88<%SIO8Ih6bxB`RtbZaN-L`~(rWP?lq>sBxGxHf@dseSWHJ@>z zec%*F9#>?)6X!_`G?Eo5iSzKm#{uJ~j-nQ`O@i}NM&AfsoC)KR1J+ygH;A~=%v&#Y1QKm2JUrJ%^QEci%sK5`ux>THESOhIaFh?_BIe%ED5$ zWU82|l&AyIFP>!Q@dW$s_lvvt^4EXK&29I{ksnh#IB$BBnj#QpIsb!B5NWKabH$xA zIlIf6!&#}_bM&`S`4wlIvuV%rVT3#5rUkIsopC|{mR<4eM>UAe#(Wqr8&I}DA9@Nz z&TA(vVk;o$Gg=uBlfztJ`n5fL#XrW#ZNvu>kbAk#hlzkg7dWv(hmH!7>UVi`n_6IH&-;_gwUixz)B(@11Rq&bQ&YL;}O3k7<6(7|%e6vxsuf6!IpD8_$uvibGqNLjs_H?!u3Sl@)O?0%o9FA9ulIcva+ zc0SV*MYSBh{Ia%n%1o5-2*K?ox$<652(aR6Bf4Vg2ulw&x7f1iUJPc(Bj?N3KwXbK ztWjsX72}?i!LOxh!z+ATe-%qh3O9TiRl#)ziu_(wwRhc(p zOvSvJJOva*deJxFGDCxr_%ktke#TUy|9;MB?5p`JhL~5DI>`OZ-Vod`=G7;P*mw(; z)BLtpB_*2lrKA)KoJ7oJkt3W;W<7r9r{a6*j2y*Y1d7mqfX5$=h=9YNUphNVEJ62w zC9)*u<*^=Y&rZH18L5y?(#o7tI>qRHQ*WPZ^wcpid3Dm1d7%EmVGq8_@hXL1XSm19 zk4w$81+vNnr@?G&Y#^jHtuNR6?I6H}cGDrJT~*1M_Z=v^q*bWpBQI1C&j@EMo8jl& zpsqh2C!c|=@6cuPRPJVpJ@_NchrnWB{QS#6H!4Ti%18Ih%ShHtT^V*O3TZk26xX?i zVV@8ta)A1=@gNlL`~4u3vbD8^4Lv36$`H13JDh&G)P_faBQ)4wY=jVDn>U2FsyQ=KTTVXWF`#jkiP6ldUrQ_k$#pgO8O?-pxkOScaUTc|BZ} zDgncqh5MCjojNyQy%fE6jc)VVo-v^5lw8fQNy^G9^FLI3^VrV`au(=Fc8e&WfrP{b z+9I)h=1$Cm5C>?i!LraZV@R;HMewm*ru#4YG^N)%3wXPJD<(jPuE5;{sigZ0^FV`T z$pozLN3O*JSVsD)0+mF5RGqq= zCT9)qOYfCbcjOhHF2)MBY7b+LMP7|@U(%)Dy}eH(Ntt%wv4X!|hh*jz7Za1v3EbdD zMx=LVy-CiIjRN}X-2ReaKagqS^eA*GP&~=@SdwzU~2N%oQu;!I|Zy&@NNDQ2+O*J zG-bT(W@+{NA?dS&(GNo(fRVw{b6B_f^H90O0UNjP1Y?cwcJFUmUfjMonS|(@%E<8^ ziHnObU82c!u*4sLIhRjxdXxVtuY9g8=P(x&;K+l#*h)mx7|&Nyh3~0t{2Q<4@E`LB z7d9p6X$lNP{hJ<~C!dbU57eYU0P5#C;%D0PmY6+inAf;<@vi%V}Io%4&vWHVP zui%TLTCxUfg2NPVLZ+-Et!HW+(x`+(nC{c-t;sMMDKV(mSX|6*8k7jJRitV;wrF$Z zT@`WNJyq|xyROCTH`vnmi=~sWxLiyZF0}NHMp{f(STHzaj+Xeb<4@}u&15Q^L%^<( z6EXX`?+dNHOZ`56^~vwef^h+d99*-63|WMIQ(@Yti$$nlB)x-P;N1?f^@E`zeJx z5{$91tEzm$idj3j%|wU&{DRLU=S6Ga?HzCQ?fdn0C#P9em@D#1lp5M3m1IIs^u^~# zW<*zW{nsYH#0p;wu?x5;QZd7^OZ@R@a5?Fz+oZWqZhk)dvUJ*Epc13|+l_r)t;Z&`768Gb=(mWXO_63zePp<7XS=u; zOy%ZQ*w%HIErJRV(2or~@mfpf5j5z3@9K2o_&1{)dERsejmU3D2=Y#zD9t<|Km_KY zWjbcWXUgN@jwGwKe)3@P2MbJDJ`f8&#B4RFX7m13-3whg`Lj_Qyk_>ad(XlmS%Rko zpSZ4F1IX!zZ?2qXotDAC;x3(G7P`lcEX$ZczfR&053Wys10qvEj> z?Iin_n&{1mvd=3eb|UhpN075^S1OsT=Gt1Ckn{Zpy0i@| zIe9`9@U@ig$~g#w;g=}j@kHi-Pgy{CSR11Ke{%bOHXNH(X`ci*GP*(YdH{=P4)svX znb^80K_C~6RH7;lI1bcd%-g0mj`rJMosRj9cdipy-5r-!Maiqgzjkcc{l&m< z#DzkX*1XPFa));(+k=U6kTap*>I_*Hq!s>)pQSPj7Y<~>v-2Zc`ZJV|vx3IHMJjb* znYfHLF?hg;%u%NtGyW6kmE`V+*cb`~>xc=#+*ND694cNIjS;GjVyq^OP;w(ZEPl%r z>Nzq@5fKsAS(1gxb)Me3&@LGv&sFvDU@|240b;ylcxZ@xgqg_~_U4xGb9wEGWVnCG z$!Ly#&V{@}6Q~6kG4Iz-VjgOVI~97Ji6eig14Un%+}_R(eGWD!1v{&0p2^RBuAQV(RZ}w{ zJ_jlTF-wVZplpW>pn$jmQFn%RSoZnlZyKb?h*z26<=-T(a8Qx3I z7ew58rh=nE+{&qo(pbdsO-#0y2`j7s0^e2Q@t_R0lv3wS3p(mCaqCd^ORd12cEaMB zfYO+iX*t$C5sEw|PR{(H1)y6)7^wT)V3wMCNwhl?U_x>&_Fz|b{Y&^0=*#Gn3&4)m zOXW1AqS{r})6;8nA{y=oiUD#9q)g$%Lv@vOW#Y&D$~q{yg4Oyu?Xkc8iR4@oJx=C4 zcad}xPVMDs-@cg6Is~D&lfA&c7+2?)O%^|?1MzD>)Gt?_zt;JhT35|Q~ zJl~h(zA;nl_#1mq;ra?%cSy^L4!)puUjTCI@xcALg17%Qz><8JnbMYgFix0PS4+Pz zCi374?jOc{*A&;Y);+r?R#3v$1{4EsS4ak$4Afq%3L=7-C-YHHsT?L15#WlQ>9i2} zg?<0W^_f0?3uh9wv)bdhgTyM`n3~q`n>rOC>LCm3KBDsWF%XNHE)HC{WqQvUw5*1;RE1TkW~OZ*qi8Kf9K2Q`qF!n zwfi8z$*Gf4RPib=F8*;a1iS_@$!f4upphw`qDMe6LMdGzYfHokbo$)I{HlMxK9t)4 zlsR`gskDmjz17TOwRDg%TR!8EJ}vv*|GPtuLpmI)%iZcV3b(pCxa9#KNBA?Mbq_{H~YXLrZ3Cwu0L)iKBP`dDhEW}rgQ!AqGQbqWw9!w;x!)(;rAtnoWKr-1Mz1E54eRaF%ptWPIs z;q)v+TYhh+qx_bMeMR--`zEs=E2h;a>O})EFI<Hl8mqljxp*$ z{`n@apEO2h6dm*qO7fa{I%Z~!`qUN(@~ESPYa+vYKgnc^dQNuZs6*cOTo@ko^ZmkM zD%(`=Y%anGO`7W`{%6OL<0+(U7}>ES$u!d_T%8=dfa?CK?s_=TjU>hMMWYv!I5z9x zT-Z+=TDYG5!sD)NOp0&ff4u;rBy?zLZ3D9U=YLW-j2k@)oGpWjc#cfo-DG$tz7RNC zGoLjEB6n?~rFhLMwbuVRa!~fdOhdUZTbhAE+~vSjEc-j)mm8qBMPBsXqNbry*pA9) z$~~GX@C>uP*~vEYZU?Z~tLLiwYF=lKI@UN~yDN!TCALb)YUmawox7oRv?s_bY)CyS z&}J~`)%qm02~cEf*IAjxqKe0m!ABM7)&S=HiZIlO{t)K7KWeY6$W>o~g?2NvJf+oJ zdft=RU7v0fvCq%Xe+2M)Foj!l@`)Qc*)dSf4F~?Ap1BO{(^owL*$l9~N3Hca(m6waq*o|xFy{ELd%%t_Uk@n{dZrtTjcBeriF#E= zJ{fbSTvsPbgC5cn@SWHO-om_1%Ds``(ioA}%>}iK4wIap86Z|UZiR)B?3`>?EH$oW z>pihJ9Ziw@g3*b!r?3`CB{x)_2Q>nsLF;HaRZIWDNox?cIQXD2(<5fDv^zH4?DSA% z_l~UHj}GGRZGsA@=W;JzaFWU#S+(r-@cAxA(Cq+i93?dXNLdyeYBt#A9kvqH_LS|a;(gnmw9C~xIWRTe5Ya`GdiT=nZ zTR>A(BlH3OEhU#-O!$vU3K9IW5hSPG`+*AWvg9+)IlDRA(&ak8{TC081s1CJj@bx& z?cipdVI;)FV-Ho|eT*}I_Kg0r(Z*#jK#0ILy85?7YN?V$`kr`|^Q;)UzS46|b8Kv^ z2=XB&hL}b_8A90jXCd57@K59Zsrm09iG{cL;kM&F0a`M#9Liid-INWfm|U|-wMtE> zV>TuSA0dSjAADIAn7wnt3CE9{>qRZ4EE&H+Da--2oy4Q@qm_QP?Z0m)wxdfhGHc=z zpd6yGADMyb0^4*uN6=ubGg>JU^aT3nz%m+FZOr1_*bZ#SgWe0gW9|CZEKqqcxG`FY zCYhIi0balhVQkj#x2$vexZ)AXfBWzfn>8Mn#RJ-*5W>byOxm34e1* zVJ{VxI+x3`xny3YV<@KLBK@5n%}#hKdg3#syDN2VVI>#-tLI0X6_8)0&#K4X=Gz8F zgxiK?`NE?!r2Kw-Vow`vw>^_gC2>BsNZX}qo&{VFCUZz!ZXlsr+6Q&R-nXS@zp2)e zqcap@X%{a|6#)D$d%z^>ZW4(x1|19|P?x3p?zT60OT2t3orI=!PC63Th*FZHQ+Ke-cet2?pm zjfusP9ttQl@YHDe&zsHH(!d0>BH8 zD-UgwCZ=!g>PWIDD$+8H)o-ml0S*S!Hmm!rbNyn|X1S+RQ1Ws+Axme@`2kmV9eGbx z_s^r=PE`Fua0#c|r5C5 zzf_G)eo3XZ8>59wQU6ENS#U-9yRcCIu48Le(CqO;r?{7NHnb?<_fE z39_z!+j<8Ss}`1=l9eCkx_vhxq@x`0k`J5{a?s;G?06FU4Xf4+fe2b&g1{*7v?0Ga zY&o5{<-(s6U`BKQau4tS*?gn4{&K(}59HFbM)kS}%WBhQ3PQ%_{uG77m5IH{D!N*2 zk`+|lD!s;*+vBKN2I?|C$0m5nSzRovRkgTDdmfzl;jtJ~TQKM;L;l2#nfD@~zVHQL zR__AoEV)@)_T9npNasf`G{i~7txQe-Z4sGm>RM%FF{ppBKaHXuoTP@;&sI$>I_O-M zjjr~SzQ;bldNfoh?CnuIB0hHso3ai5WgHU>>}ofYUUEe19*;Q@Tq?yCn`w?8x`SHr zjU(~!?G73@LE>zp|N64+vE16=j^n3{s%pHDYun9a+;Ls@kY~` zlXmk6oWv@uj||RuRw0UI@+2Y9>2#6{>8Gjs4Ny!b%#Pm+EaZ(ngwcPUtblSK^f3*Q zaTk{#+1eBmE_&xI63b3}ukFPgnfvp@_yK-D@$Y&6{hwKPGQCgKA79P8E_s<-TVpjF zctPlLWVyaD+(7zh6u6rAc%5SzrB1)b^y|JVT}KVLQa4S~c&l8M=80!d!xBI%Ku_40 zj4i4B--r2{x_3O^<>F;~;LTzahdef6eG$fSd;Oc2VE3p*y0<7yFAyq(QqH2{=fvrA zmFxBlr=j&jA5zRRilN_hzxEM|JzV9c^2FA7g$;(?E3~Y3(}cyy*9Co|G%eEyBWVK} zV{r-t@|TU&Wp1DAi150&bt$G#nQp7-iLG?gRdL}oHYPM*;Me~p)^UsV3@HH#?M-G) z>@8#ScyJ#E+D1ReiVywv9+=o#iBJ`?Fj1|t1Dp};x9uNa7&LkC$o^_{eA z%?Fj$ygGkkL`?aYMeX(F1{^wnXZ5t9tuC{S9wmximw^m?5E2G?R>Gf{Bpjg;^og&O zk5?lm*+1G(e4^XFJon)6N9>dt7Mr-ihN$K zRenb(Q`^Jg<_Cl3l;ldJ6I^$=E6rlpHN4O8L=Qo5^HyF6&-gIcj6|YszWiTQ~QI zK9xI--8!(MJe%kjf&{Mf_A)Q_-rLEl?_+YeeB5tKyZnf;%Y+Fj=>EV`+@JBhKg^e? zg*cWw1ro<7Pe57aKks$9KqlthQq9^!$s5Z`uE)FG&f+cmb<`48-f)Q6-gK$$|8a7{ z7X*qx9^U3^#|W4d%*acc6U|OJ-b=iV+gfB=6O#@x`40JR!{o;SCm~@)NjcV4(D3_v z@ZZzqJ5kYPiH$LKAyAI-R~?1!J#&bUunODJa{uxK`O;PrR*&-|`{aN%Be3Z5N2@z- ziS|W)DKAVZj!Le%nVto^pD+XnBZk1_^8wYK@>$B{Zi#o;v`h-jca3Myt7U&kO}C zna1E0F(2<2d$B)dM@1|$NpbaU2*_9yTmdsynv@$BY!WHkE6eLcE&Z%AYC8PZXmrNm z)h8_jQ}EbP+tKvf3w%QbUkbRU4pTV!2ePPb$C}RIca3sKMm!?qQ86>Z@Q;3 zsd;_7agVW(;<5R}vSih!D`R-yt{r_Ka~kIC%(_!qLv*0u=J&htXMR5By#|hXW&O9? z)1^|7J`s-#H9nWPFWge6H)q>~M@1$MzKMEW@2rvIUT+ggvtIotOOaH!>zmQ?RWWVB zv1VT+BB7F6A`Es_?*9HAc94jid^ZK7O--Jx885AtiEZ1xOLw#hSZCzy@u{)m%1x)F zU*pF*vILPo9!L8y{C=rLN1nGD5!KgeZDJt33p|I-h9bIEq&^xSPnG{Vq0ipgi?NVL zmGw*p*PaU_5)FZLB8|3{FuO9~9#1~E-4Qqq)mE6X&=ehOj0${$&iqf5;=A2QRm9Qj9Rwrm^rqh_*ia{0<9r%tFh}uj3lW>7CMeWQF{~3hrl;YM~ zECv^7pOc%e*Kc|xGw>3O#$PxLbh8$w`GbK}YA@785*n_{XxUNw5udxsz#bz~PSUSz0M| z06illlE_o3yY2TY`h;J%q&2)WFr#ZM5o@H{~t@-ADU-<0?K zDJUpdH%+XFA&SL>kY_U@eAx2&I+eN+a&W&%`+0JX>l?pSrry%0p->Syw!D8~HMNS; z-Z$IKPpPe_$Mi4r4ft)h3x_HPxGO*jlnXCv^)!>~&WN=tl8h-%@*=40ceblWX*3~_ z6MBuOpmw%Me}Kv`^vMQKsTA9w)fofsWgy}SZEYp;9DZ<1!fW%Y#G&GhR)DrbWeA8Z za(SJ1CSor2a|ZmvTjhkC)QVTxh>PdBjgJ?E0_ybK2yh*wHPOmpJb>MYx z*h#5=&M~|Bm-^9wSeW0GHN&+)D)fN$1U*cN8qDy0x$*T2E;@r!*$kH4$eRRI+j|bP zrpfu%-TKj?a*gnK|KUJr90<+Di5!(H!$ikhL#gxsxsQ@aS8s2KPfeT{=&VQ zw|S|AU7%iGUY$|Ow0SwzXW#W&Vf<5-29->_d8NK4>_EiEUF(fhjc0)jq5|j_EXlNd ze0+H}a*IEV52el=>6-dviNhj@O42-L%Cv5*&;_$AHZL9iqs+=%^c?{N&*3bX4 zT@M#ukQ2Pjv#BEz#y-D!J#dVF={FS1jJEsM;@Z;n-s>FJfty|wqR~xxwUJHu;9FI> zh)Sr8TmOvAULh6}6-DX#>r8+OhhFn0GiG_%bj2fMr$mN;a$?wd=|EX~L5Y_DwPz z&Ry6reXf8}(BBX#d8q?bQ1?L@|+75ysLlT zZKDE2!o(Q1Xtk1ztKu&y>0-sT*W~Ui2tT~>TiaFQHt+VTd^1<|W2P)}9+lwSywVr9 z{Eh4NfNPt%C>B)YtaEXLd-FW`P>?)@j4TT|1DZ=HvM@leUly1VD%-dPo{JCy_v*5< zsD&1HJ~lQsH8D?iK%8>0S1=Q-_9bLIj#+6jaqLYH#B8|&aK33EsE9dIt@|FRh1uHY zpK8C4(~}0Nd{U}L#FGF*|KeKpp$5oHyi?Wre(n-5H(DON?&X@;h8-n9%>K(fk0ak0h>BZ_!Q$kMqhosR zb1>iN2DY{r%@=r;UPjr@GFfgiOycWzgpTm5Rm{mlgQge9&0nDy7R|6YI}SLSTVnioaE5tA^C){O9#*7e2e?^^)3!iaY?v zOEKElEQ^(MNuiB4U6Ey#eG71gM0D0!GF~mnv@8Kp?4eT13(~(CCMP}B?dgj8a#LAn z%*+e~p|x^3mt&CVs}Vv9`%EH~l)(ZKb0-h`HPEOby}9Sw+_W#EvXRfo_>x?I_TA^z&^{B)J%RY20W* zcC@*j5^XBv@bctUVyh98b&sQ1L%Z7v6FwMv+$|=x7RExkw#gNTn338;@#)owq<4Zs z4Kyvi&0F_R^k_4&PxR3(v^lo5en3GPhdTA~P>RiqF3kGdmzt-nh3dxvgQ# z%ngLL@yr)VmBq;Zd6LXR(LHb-VmVloRqW5eXQsh<_)@&J?y7V_>8Xa9>i8j`d^)>ODT!=-y9GEfa#5 z7>op*_!?n%!UIyIdPMj@-b-ViokAwn&0^XEkfyhKat}t>oC5b1R5~Y+_n4Y9Pe01U zD6|OISp}MN!z;*LufWxa6hX?Z;ls`=hmQ??7A^&4%BVSfU!@luFryueL$x!5-+v`* zU#BLWvov0)q*ZaMJnqHs$TUe$TSQ5s+^_h zcJU`(1HBHNI^%E|w(oy`s?1ud&%=O^5!l< zQejjaV>bwL&;!|Ux)rX{mhzDN$mGQgQo3C@!dR#Oeye`Aecb^eKFN}B zGdONU2_JmSSsajGcF<$!z-G>Sg)ZHKOltRsf7cW>T`aL9h z-wXrBdI-|%67!WH!E(*Lek56cd=447s0q%m5izHrG8^h`|~=k#OI%u*$PMj_YEepd);L@jGD6u1?<@#J~})S zc^8+{Zr5qZQnrYva5XJpRU2mTbuRk6hexZ`zg8KEPmmmQVziQlgv1%|CD10GzW}Nh z+YQsK5jpu({Kf~`B;Swl`aHJQ_)1B z{g;(!kg7sxbx$(VClg}>jHNItrm_C>AR=j|;{d0-7;Oq(R?>|G@hZVVQONq=$wGM4AjgZCIQWKxk zCa%#d`!bWr21;@54Gs>fF&K=wcS?zVWC)KjSN3oh8${j)V<_o(Du#n!okI_Xx}6dP zb+vE;@-rt+ds~0z<2TfwoIfLAExO}XwwjnQ zvO(k-`5Tr9`n)y=;4BUOfFa+zN%MFm zmDU0b8g7%j~7)^_jr zmv~!3P$dX}DD_c7o0Vci9)dX)^ee!eX%ydNOa;UZGFw>15}a!@Nc~|`keV79nKQ0R zp9zNnurMi|v$s#yPbm&vZjwkT4@#e#D8$vNA3h}q6^sisMnHSzPR?MlA-4DIYxcbP|D3%^?s_0U?$8Q~giVBL1*1$=_(FZHgfHZx8*2fcv zO)2P*;WuSNetN4I9)PFn<4<1#1Oh)+(p-Yr7|fz4!8^1Ykc}^PORo=_57~TFXiYqL zNA2eA`-<;lB?XNo@O+vI-^aRbnKo0n+Dhr|!e2?{7Kudc#F~h=Oc6wWH3_dG z9~*q16BGww)9{t0t>cvw!7gxZ{uG_o{Q-Z>oPLRrSCFeyG}hyzLfyJe{Q^nVc|@f? z-0brLT(g38{YJ(gX$%=cAUo;3UEN-1TD1a(sLGq~UwfQ`^6rNkGvC-7&-4)ecH6I= zwgW`9jL8DJJagDQv&$h5c5^DF&GpW2U<`v|si?}uufP;tH`DgHmYWbI58sx;kRwg5 zY5d6TzyRS^wme3YW6&Er%9{-FtWt=4hJQU)F8*J=DalC~bB5ioC3*m?iS9A5ONiDm zv?@dBfTY}Z^CO|t6{u0_KYys8yRLKwm3#}!=ckj+ysKDfo=k?HU10b0Q7Gmu5zZ#3 zn7R+^D5s`}{CjPq_slH!yu(mHrP2Vf7z01rMCb;5p|+-yyd<5A0cdwm3aqik3h(Ik zRt}dXy?RE^ zmgR)Ta7H9AD=A4pAlusLHFW8*nBw|x0W`^{FQhEYwr=8nZ#KP2O4$_IeA{QMMjFs3 zL9pTH!%aVB`Rx2q)(8i&A#UxbmW2^_M85Es6H?s9rEp< zQC!DBc82lora|D;6^Qsyc@McVJ!QGwoPzwqhW#1Hi#>lB0tey5z)zg;KKkP8}mps0vSN!bV zXePzw_Av$jh-*Wta8$Hcat=d2M+r(#)XU$Y**K(7_Z8v|BN*&Dq>v07EqA@2clq2^ zU-|zmKwkW6vnsapxqjvA{27sN#Ue4=r)xBiZ&YrBz80vU__iLgYlNT6*dqosZtSry zsJAy>S2Q1BnA+0qvCpowW2X7U+N+j?#8O(9X^P?zNorUoh*L_FS%ye_+R#*&w+Ztg zv)Qn+F(W;fsINQiVv4*@R~B^0#Js7>?Y#q584HYD){G-x9d+z{c~F-F?^2&NRrsJG zcZ+z3bXx*1qECV3-214ND|woBrwaEV>V_fb0=c3%P}4D4O%Vf5NtKdNC8p7xk$bMl zAJC7L0`tOaO#t^+5miq8Jqf5(T}iz zElmRx3^*Z>1}+qbDkFM&T$zVcC9Q2V-Sk@5tWv#S2n`gzr0asF0Y!$7=$HcONI?Qg z%zWeqySE?+z{$W%K`1EUDfPD?#>GSF0zNU-74OTdplZ0~Gsftn-)IyGe4R9*=(t!F zAOKsQL#0O~_(YF|Ee@yhM~JDDh|nE0%a~fpo4%_S5k8^zYl3mZm`{AJM}!LFY9qFk z!|-*lO^Vl(Ot7K;20=Yd^#w%Q z&d#D~9Cmj9QHdd86I*FwPgIblOKY#knLhK}HyNYSsOuo{Qzv5ARwA~v*$gp?yw=&u zYSm)&bDw|x*P1?UJp`AA%d&?SenciR8fzScfJT;0=Z_Hm>>JjKF{1O_m^1bGb-7KS zA%gag1*h@%tDVU7u|-nc#MZ8OoMFIqlX^L_%)h*X#6{gQ$Z41MvPgyF%^&^y1O*CD z*3NIk!!yG| z92DclG?*WiR0f6y_jf--RzXeX(1JjwK&=lR>ptF$xiD%~#4X-IKMvU|^O#V5;5@02 z4Q4Os#TZh_K5`M?#at0zm_YAZy9H3*VIU*20rfQvug7jc#>ef9&-sNcE1imx6xz9Q z!SCcG+$oomRk4{SSev5K=P;Ql7SiX!TjPTMw8g?d0I@0`q8 zQTFJ+c7o6k=r~-7`T-wN^5qH#S@dgc}CZ>Lr zMIduSch_h;Y+pur_?b)U@bO=A7XK;1JC76MWct;y%EhcpR^cx7a;O7@o4KK73mf$@ zHEz(wW5!+^FzmRbp%_S$H5eMuohaktW`~nV8~I_T8Q-od@D%y&bD)@rWy!SaP%@I8 zZxCL6BP|dhyX)vdgNj}ZaMEX|{pX&q7?I?C{UU$&vwgV5%i+1(aOqtUXlu(-(x2B8 zpUId!mnR8o8ZP6LqL<#jiGBPh(8fCS(0Qbk`jBn?9FACYA8b^rI;0l-{7}?)t#!Ae zSz{x%XOkEqDNFBQ#rn@G+{zJP@yJ{U*>q$Q(dM&rNwSPOe~A&tPCio`qgm`HzGmr+ zz6Crh{@e+Ez6=Ha*oaZJpM)<@P%9Frq`X`#G^s=Mc29MpMYj1;ehvCMAX$zONyMA% z5g?9egp8gacvw~$_7vZ);^+@xTX}Sl<)VKl`>a@e4^l`7yp(S z853>i5%Q7nGa6}mOS-IgLqLq8`=b;3h01wC84~1ku{XP#))m7-6IiJflLpj&_4Jp1 z4$rZgP|-yr6sMWK?t1E?@(J-X9k7FjHQdlSr7e+e)lfsClCf24=H@3cil5Xd?XA0m zT!AMD9cR-=zpdW7R?bb8ZAdoF(tI^Yu}3Q(C|YCgwNAVcEpH{BY}LMUDS1zG#=b-tz^BB%Mqhs}4CE%KQr_V~}bvb1;Wqqpp;?qA!}1)lXv z1l4_aG~Age&IrJ?Z9ON^a~hS>8qN+}iT1gp7Xfqs0cO;vr@v-(b#Ql8I7Mz;)8_r` z5SKaTd;xAjZMukdrf}uI9ryO)Yb;f2o~&@8GumVJgX89CmkQo)}`XI5{**X=;Qdf8SUBb76p)%FOGt*;go0B1TV(yQqS5#5G)NXU1 zWp7`CQAv1KkH$;V1RmCaqX;&hjv#>!0CBLPR4EDXzlyEEc}&+v2^f;_=Mt+GAKBpG znLPdc;@1TAQK+OqG%dQ&ZbA5f>b$n;L%)wh;jWQSdqR)CB^o&mw`j{T$PlhN=1{If|$Mrr! zY}&8;`?iof*cjTHN#xnK&NnL|I5ZNRkyu*l%Jb`#e&>9eKirDrfV_Yp5dY-~CQ2B{ zY2K8$KOQ!2Qm>r)&sm*~h2WFOUsy(F^?2nEar-|n=$E+saQPWJFu_zCGfALsDL*gJ zy&4-eRVqI38toxmM^A+l`-{c5$zx(ms+iD6=|2I2C*;>4Y$1B2f5 ztTdIyVUcIT3u~hTRD3K1svr_3SzIXlo7!{aLDZ2xvU2Ma!HUu>Mm!XBJUJOLZw}Pf zt12Am@JqXAh^@5HR;0{H&iZ*3v^zn^&6{|b`bgy>w}XD@t!J6_kU_~R^S$U2Q4p9U z4h2JR_I#XS>*O=wWPQ=(dPto@AK|$3D}`?xm;{mSQ&*os7l;!W21bC+Ad{@?fS&F! z$eOeJo}aL$fkw#5T%86tF)O$1i;YK01!BbQi0nHAb$F35Ell=ZE1Tha8Ft23wg__* z6YD7$Vs^{oLSSMVto!*U1!&1c;uxZ7llx%S7OvRdy>7?F&cVk!1^atbXbG*bAt3nB z)MgZTWF3HLJa%N%g^|G=?tIQ@iZ}BwNXtlc%C(8|6S9G(lFRi0JMvQaw2qRo&#ooA zMYz#7NP9c);Glnn2q$xcw4Wen_fn#!z8t_3%I3V#g;Kv_wQD1AdIkjch;#)?tG?GW z(oij7*KPBdZfy3azsFWKsek2$hV-qLRvn&@^u@bf$mm4FB4#WK-=#1P*^Q`uMPTb5 z2hoI-mhgE1{`H46#@f15LED(}{kuOpvSJJQ<&6a&RwgGm3zp3y&hJ>SPUO6N&NnHD z-nCjD`2FHhw8+Z125@O^>*Zo|Ni@TsJ|0ggn~Me4CB4KyljY^*eq7x0T9AAdS&ej> zCd+22U{+Lq#)I1lj0K21lPAIwtGT#wguX*BOO^KKyCP)B4r$=clId`_1iaxa*q+O73M?qG;K3Qg^>+T ziOwlD!eN45+#iO_$aLgR?x*{WEdE6(UN>iDnAcZZF z#9?(2;dr}SG6@tYb&3#B^P@~&X7x5SPa>!+wB$4v+1yUm!x>+k50PKLi@XJEHd#Yy znL*6+t*Ks|zgr2_hbZ#cb}ygD-p6O&1!j0%%vK+QSg(B_Rr0xh5<9XOMvgvjnJ)oy zN>XNXo{H*`qm&mN>s3^aKG%5mqCVj&93~%17K~1VHTHN>-$uMi@7AIwZ0`CR2+b=s zhlLUE%Rg3p9kgU*`)?FWHQ?2|`Dw*3iu_UsyT&hfG&c|CBIeduRSO#6Pm+JcsCSJC zCJf3?v(@Omou91Xv`$RF+F)lFzDWPv0l;(^d@Y_?znU=I}Y(S2-MO?}^ zHOzXA)%8g+T}VCcb7jM7c}p z4a-$v9(j#2kq!U$@t*l@GS0=Gpx_5?iH@}mVk4b_NV(o-qU9OQagnKK{`)ra3D@3$uT2zjCwH{14iDD5TTH*7{;Fz- zt2s1d*X6^TmB6whZ$@1i%}V|EAVXBLW&Re0|0yRvtzT_-a1R((z0E!=@Ul-@vl4-w zWr5<7tYw?+tu4IROAzoLPt??tda8XX%IseDTN=FuysZ{i+SAa4oPVKig#qWS666#T zZ}3}dXmI9a>^a5~MHX|_2T8*FR8?g^o-SU|_5gzco{639s4tRIMtHv_ zes`A)$(~?aHl=Gh@Vg(5_L5EWejT!H-&bk%GhDQkj3_-_sXHP*uMKK9%aUEHUKrrW#a z)ADV|U$&REzrd!r4fxR3H%qxrSUGJJf@@#-pazdcfJV`AgG^$0^O2u+i2NaEdX>7e zRac=Ab?AGuQ@o-z)|5d~SC+qjYrQIj&2{pGi3ow%GF0a++E^GNbNHDJ>oeLtcJRhBeRn@83^z>+ z*nN3hXzsUUoIqe{6cA)V*X$^jq@3Dn{2o3aC_WA>Op<|At1B|(0gxk;dTqdo=S0p_ z(FG2kpjvP;hrCi#TBq(ceAKV?y&!)ReB(j38*B8o`g}@f^DRxH+5}jYGNVLqw!#~` z06O|Fr;_W}R+_J)C|97}EkG~zM|kn+So3=Ko`g@HB}c34fQg?fxd_hA&X9Kq0$4eQ zF{r!mOYm|M8cNKP9>meFYDY?9_iCY@3E}U* z@z6QQZ1`hx;UR7*({J_Pw>_L!_V0gwiFAM41oBfboOM1dRf1hhHV3hWD(1<)Rrko9 zoNhw)f1_da^|`R$Z7e-<3#`N~^Wt}a!>+9X&uf$P@DM@^8m;G3u?*dyLU~t^VX$4W zx#-n$fJJvGe$LzH!`ZI3@L21-vBwo$pR6wnox~12dQhJ$5WO|XvxdA|0fxYU!>KmiHn_MIWi);O|y%|~dzuz%KC*li>1P(140}y-imd&I#63>Mvch??;#)>M- z|LKEvQNO;jRP>up98)$B({;3t9N^JIho0MW_3ne-DWZTyys`cx$wT*qT!}kPf`)%< zFDPnVe7^i^>G5}bDSzs(|E19#R68N+(RYh<@nq9ME)z9F5d4N#j3OsD+66rp9AF*` zyz>8rTj3KD3rz7_O@+$7{xFmHoRr^=NVcq@P3l-!uIoc?50;J)_!@cbkZX zfAH8Mw5LXc^BjuEn7(OeO$Pqr^i%B4$o z#YNg3^;`mTj#GXU;h@_^Ja zj6_Z+@%)fJ*hum-ws%hp=ikz=$_xHBrYbn_`wRNBGfkTDUXzf++T#0JcI*!mJeBWo z@8tcrsC0KSJq`q8o1&Va9t*)-Dk65LSvMa$qm7=u+M%M^72%weL3oV0*B9tQDcDQT z4T#TEYh{OtJa?V%UqiQdRwkZTvWMPfjh{TFN1hunbxlyCbmmDyNTGI%j~hdSEA>w8V&9AJrC9k;u%FdFxLi(MGN4IG^Shyj5l`SXoJ@uxg<#hzH7-?xE-l(KJ z=2{`4$aBBwhL%CiENStslE`_&7Sym$DUV_w9lU!TU?3*I&bF5hn$VkgmfuxsTYt_@ z7@MmY*&dJ$N!X`K-};qjanM`Inx7V`U}3YVrCC{1_mg|NxI*-Hc8|!DJ=L~VsAaJg z`J-moV#rFu9oW+Nj!bzK%L5R$!L+R7yv5E~9ejr!)R*=K~fHp7DR6^+J3_Jeoy7@#rpm&SJb^6r54A zpg71V4|xvcc7g;D(h_Z(wwdtr?G)c}NKYj5h%91O!nV)T%g<(eCmqIL=pA9=pF>M% z>l>G?aEzDukPU{u`)9Utc+I`v`PmG6BXww%o^>mS=u{qBt?;G`N|jJY*M*65$Ku#{ zfARMnzo-Yu$3Vukxe0uQeF!In5}v;G@aH6>w`4kAh$q^8l^ZNjB_l+`%HIa&*{yDEiVI+#yT@s$P`9P+7r}`-4zJul4Vl8oWN2VeQ2^(7FszR%-US1bx5FBvsfWty%Si3 zwCu;6e!1w@JqwOfrJ=Rkw)lDZQx+MSsZrmn09zb{JyT2d${g@_|3br<;xNYe~0)7vFZxV z50PG)L^oH9NP5iCvTLBAj`9lG@FT4-TDlZjfi@Z6e_!op#6a3ANX~j>`Hq57z}=*t z%Dx*vM~z!HX7X}Ofjh{Ui2`Nj(dY4Y$&Z_ST0I|6`jSp}6Prrt)zJp8BC>DB5^be# z65i;Qwj1f|-O=csnr}s;<;dD9>5xRDeDBq^S!Ks0|4rg?5~f6kdk6dK8M0J zmvm}+OEnBJ+yK`z2DrsN9W@jY5w5+H+76u&>wFzjwjDYAiPZf8S`c2ZyLRM7OxRp4 zYP9{)tK6KEl7U1``ZU*N4Fy&rSst>_#*Jf>MLjH|iiOU*yi^qZnCt^OFF>Hpi{a0v z#B-W&MSdjy5)mGEuZ@oIMILf-<}T9#Q|f2S=ue+t*U=?NK%RIMf))<2$gmHxSgME| zJzV{s2l=%pQIa#Zi}Y8pKk~<>yRc!*hp8zIWhH7;yBu+B8rToV8&y+@1Zt5eT+qZF z+o}7f307~hoE8xerz9)7ZE-JGn0wneHEC6sdL^lPRBj=sC4b>?IyqU&dRH<-qqP)4 zpv|#)<P)+x{K?O8s2sW^wxv#(*&>+`ym?%~FQ8jY&ee!@+7(y<8=LZ$F)&ae^PF2&cS2duw+ zwHnX?X}c;8yp30E4C--(pHkg` zC?b2EkByBD1pDbnTR!ueXB0U_l`K$_;Szk*6{#+|5|q(WlZH6^4qaPY2+MlDOYkD6 zKkc;ZPhG!4puqi|4r)@Q;y5i@u9B#BvBtZ4;^2V*SWwEagt4Up&Q!G+A)t?;ApDaM=b=I&6^D2^dA|_hktR6GQGF|Gt5pnGbJ| z@;`UJlmLC!1&p^p8S|DmE7+~d7 zJ39Fd1jU-CVDi?}?lHrJM)Qp`hhKLBeXJh~NjZQD#eDkuH1qLo%|@3Yu3q5sgua~J z`y)_{B&254cKz@B_)~Iw=A<{0l60n8&G7D5xqJ$n4AO3XLnW&-e^E9nw$&c7A17;i zLqS-bn|_(jW_lBiDK+RunFE2Io}l++C|r6}c}NDJur|jgli6De7M7zgDqWH>ZT^rN zrIGr#CY;wKf2gr=*{pE9blhWlvbk`%IqXJK$^(AeVk+r!Aj|7QVUSJ@Gk2AiI= zR9MVpx-*6*$?ps0W%S7;XXqFB3`*&mS@lH2Qx~%wU2rWa<$i^nxi>zCg508V^ppM;y4G|)95&^uuFO3PZ0UEl(P3z!5R=dk*;IO; zveF5(kEk4fz09r%DfZ?$I_;&jUEf26!PkCy3oqnQ)3g}X@L8b z0VBnwv`tL8d^k1Z467;Yhw~551jDuZvPvm;5U}JkKuW??i!?`O ztLkfn)xBU{ zZ*-9i@vVeAPnN5Q5MLTFhaqt|G#L)Q5`l=gZ+Q~<->2|0PKdNOK5yGcdu>mYSxlfv zRaw$&h*`|`3&%?!`{t8wUack{Sk_cN|I(X(hlfQY+VT(X1~!K92CvKJCF0TN4Y{4m z!2=rr5hfw^E}Dc=z}ZE|x4o1ynN=!97h!j9_$eo=}vziJKsn;tGjMA63~`eNaBe(f>LR;Cx$*!z4Q5C5&7-6O z4?u#J*w1Vq0XYDX7af#~rWV;swW>hhzk3bb4P_dd3vp!s_fG+y?(64%geE>4r?}#% z>@6W~xP}zJcuYHAAZYk7T9$BYz2lJhdpR4psI9Z`C;3Xt`h&|zJM5B%@Y|_43-Z1( zC2~woFeO)0EVb*R`{o95kX{%79lvJ)wE-XfM1JWhLYjx|}QB@sD7(bJ~nqrkAAF z7cyFZ%wfOTpJvIkEl-1rFB=L%8J`Fyfws%w_tTKVu*v*9$ry7*GA&w)bN3|trcM|0gWfi2izPIO4*G9`F4f# z9$X7kgU;TQ^5Q`t^rCbA!Vi@iZGXX+XTi($ayPZe&zc(Aj^9|G(1%|g3OYyy?IU>2 z(w&o7d@v}H(2v|hvbad`PL4EKyA6=F60uaJ!EY_xV}0)xzViFCm@cW)`kTnSrSV^A z*6)uQ_$;*Nnid_sza<(H)fc>k;*EW__W<&}lp4ae<(t{noc~6$B=|X#KFUOEaE6Z1 zOA$sZn;k;)rCO&yL#aT6d?AU~K51Q9t$I1-(94J4%+={CM+?x_aR;!WeyG+bK0kT5 z>V5e4(k^1o)igU=O_gl%eYz0;bDb`p4nh;`T8wU#w={W&h~1)tigx;evj?E5N=k+b zdP(w!lBma(LcL{^lsROI+}e z88$Yy6F6!yxcyiF`u$%BQv1=alu3D1Kl@hmsoN1yG_0@W_ANLdL4--15ZqiSSl>Xz`>^0S%6bRw`QQ&bNA%f@P7nbkxT5h5Y{d>%)tb%H3hDO3Lw7=j17uyfXE8L?oQ@ z{pzRy!Doh{&KuXq3n<^tFLpRHH^ns(`w&SjJaxZmBW}-sSx{(#JoR~%)aBu{E%ftt z3Z_)Gw~KhHetfO<;iJDw#TS$wUu}RLA!^+$d}+S+E^kHjz<%+*vP_Er%y}7&?@P^% zPPHkf9kX_MN2Xge=2j9RZ}4kZ>=_!mgHIG z^RKh1AH0%0K{@&G{}FZF(NzEczhx(ctn94HCS_dV8kyN4ZbDqMvU0Cg_9mOGYh1Fj z=e<-$g{*5`T(Vtz+>49)z59MYpYQJ+=lGY`dA*;n=kxJ=jF@f**U=vW4i)Rm)}rQK z^6L1lYPVHGk3qvz*PeJz9l&)&AqUtn8e;E;i?A21e1YZ+t9TtDmCvE3+_Wk8Ixkbs z(mi|mEpcr;#HCU@IQK|6m2#9H@nn}xr2BG+E%lPMw^Unmb=fy1v8*EwiSR0fIo~JQ z?&Kf#P64$8o`Fx9+QuL()HmM@d){bx1GpjG*e-Vv=rnV4QooeUOt}OUPP0uGL=XKb zpdMtZBs>E@`N)`OLj`3Pfk4@m8vWzQQMMh>V|wN&KW3Cy|a zw%#^qQjzPaM;>=`@K=f3cxZR`JD8JK^RrfR;AL^_zB-AZtYuAvKH1_SoB{Qz;4if` zWx(6vCFf=1RqeM2s{l+b{!ZSoZ*nP?vqbkz^y!0dd@3b6mF*mhaJmwPIMjMyBfd%< zGLF5~ewqC#47W&81nltm)Mv#d1XOgg{&v}s;@}nl?00Jgtd)L@69p>Pq{000w0!f; zydSH7($pkWP!v%jW^M970bR7~=aKt;y+nr}4$tV{#=d2M%S(>beA2JrR^<{oauT_s z_ORWcSvlypzh%p72mLE@bKTEU@Oax7mj^G`7r$-x_3Z2IrYARRm(soG)i@fPnwK^F zIuj(mUF^M=V~E&dQXNF~fR^9sM~cYuAP;pEfBb#bB7XkY-@Q1i znir@g-^6}4_+Zs~8&Egg|LYi7(ehcB(%(z<7?9Vy5({^z=d&n6Qk-nrnP1LE&zai? zsguP6O~SLrf;qhpUpl2-^k%s4zd88Ga@04SRSLS{8^Bri6F@BiYv7)+NLy6ltgBL1={YxU#VJh?Yl;fHC@Xg;VD0E|n%^KX2vK3I)^|lu-AZG9E#|xw9 z0;01@(+7DEE{dG6VjfyVPD|~cC<0D9webbX--0}5sVyg_oJ+d0^R8R;c>whINLb5# zV@z_YQT6yR95ZIsXQGZu?GoR1bZg|=8r&!2amnIrqx)&8kB+76x#Wg?+)KrB?rh+q zVTSqZl;n9KXmtK)k#!YBi? zxq;38t8-S_MD@3>TJ_&zE)}QsN$Fl(I`sFY<~H@Ud&6}4g^iWo7N2;EHfp25B55{;Sg$RjB`8-%jDG{F|@pwgLVpWi$g6m#VejZWRE=@ zVSB~@idzo#J!0KIZ`_0Jt|qQ`|7wK)S9-|cm}b4MZUl#eo&lTRT$YV{x@`Wl&E(p< zHF_hN%5GP$vC6e@Dm|ylnpB#&VyxF!&CJ;U>ESJM!UwHC6=s3}nP2MTvs*o0);y8V zGGKt<+`G)~g6$t*^%Y6!D;n}v!K>*r+U#J*6eRID3`-QIhD%k+iJ_05w*tkXPGS35od-V z(2?t&E21V#;j(Tb8zQgiJ9pB6^K4T}v!K?J4VTbl_ojX~;eH(nMJ(9@jhP%7>h`WA zJ>O18t+OPm&b>t-v3OU%@9>Rvf3ja6<*+#UMf*O5y~#H}Z}NOwn^o>UpkMA06-_}) z<~8VgqjfFWv=T!umK>W?aKxT0%S4V7Z>rXKmE6u^XSX~4BTu*mmb^=A;szB|zXMx@*iM#Z3 zBj0Pfoiyz_TQ%>~07+97m4)AT_(&DwxIqs(QR@$8?;B0tuiOjRY9TvTIoSA(9=JEX zBzvaf_JGuSL#Kd`xon#CT!SrmA~-~dmwMqn1;?4LRd^u5=F6;&v_}wao-FIc;o7ho zrEvzsO1}rCS;m@e{hZj!){$eCg*NBf0o|*d5sy}<^x;!4{SHzrSF{}80h_d{hx&%} z%7Oh=u_p-5pJCuk6O9g$(acjfN?%Wj&J2UWTb~eTCHK=sF09K0l%EcLSZhy@;-c~Q zo!_{SY|s&so_ggA`IkUo6Kh&Gpz9aBtEL(GSBmF+idwzAy-fekM-5wbq1JmopF4H! zRT^`-2BUgK|7w42<4OGHO-EyON3x8zN+ShI647p`JMi{zdS-1V7$Cu zKdOJw()#j>mq`3dfdhRg4}ib#StPg#`W>?f>10U!&^oJi=O>(}=!BPv**nOCY z$1&5{F6g3!iE1h1eQD?&2b&oq=#Qe2wNxQ@SaiS-rWqJ}hb-kYbz)`j0!UjhB_&9D z02jMtm2_N@^2ek{dtHwI$gBvW*Gqx2Bghm?UpRO&8Am?LN4&c`tHLfkqEcQ!0fIgI z`n3E#OR8@D=M#XnlE92u2Ut;zIDv)>+MjIvmYG`tB&W5$_X;`M@ZDSCK&F|cn2Q|{ zz|*VgWtS#TDZ2`Va0YuJ8C2j&le@Jkmt2YSOU#D*0=kvs>vU(Bm-Su!h-+LNWn24w z*)C}#ryCvnA7~7BRMqeE5nslLhXQ=`;avEQ?YcEhC%dUq4(hY5#Db3S_S&bPZL`5vL48{cU5{l zn;6#`d=Eh|N=&v+T$JTK{NC(vcUav0mU~KV@mTg{G7+_AE5pF6Hz}>%bdhwoo-~l` zi?$snPVadCzBPJEnDE&KWjtrwc=F!JMLcJJ7QbyV-!=+}wp)SC9JD8vtsaQW({MkP zXpWx4)cuq%cjkMl>!9Lz2 z?YBZ?+f%p4)*~yll-NccyYw`UB!jfU#ATP($AcMAQpp>G;z~={gB-riEyB@r&ey@8 z?@~Q>-hVXsW3Hvm-6%7))n9`UiM81%(U2x}b03cqs=0lexa z4!c<(k&R)svmTxZHn^2+?A+!`<_lK0Om8UTW@p}I9`r*pCN%S`4Ecf_MSnG>O zmjU`c)gq*E_@ue<;b@5y%R5LvKUqgRn_<8Mr~DavFbM7^()}diaKheRIrZnq+4Rf{ zS=+~s*Df56T|2%jfFvqu652cqX9=cA=vh~gAmy?q5 z?c;#XXL27V_NFv4u?B#3xE8xM-^V*-cLM*gc}N6%0*ug64v%(TZI;K3V$vIX@2X*A%!baBE$R*jNWb zb9u=5KgyGJgf5zH5}Az5Jq+5?jkOG`OQzK(`B6in?BX67gQggvWK5>+2!gg8YOx`I zC)%C*HLAo0GuF5Ie(I97#)gPUcdI1jGw^&vifn*j<`1r`68PigjjWd6u_6o7QUS`v zb8_k^!y7yU=NL=>8J`VFlbb@~{3!o$*j*A0MGku+Z|$4C=Oh3l;_c`7dWB_;wHc7L zH0N>yz&Kw+mhC(8ygNnfbAT=h0YEm@9r|)JFV)mXaY>&kCcOOT9w)>dydKVUw`O|U zJFnE`Jpgca1S}nL0T)mQX43AL7Jv=}e(()guHGGoY-~}ReyVm_jydnWzY~Et!Dv24N2fbuqda+=Ej#5+4>C^Ut?K_{G8JIOL^B)--Ka6pp;Q! zkf%>``K8$5Bh}AQlu)FW>}?IN1q!-@#Rw*ELZH4NtA&O&NcU*J;+y#u)DvyJ z1UfiOIl(kUuIcTC8BOiKAz;uKR;BKH`%PUN4+y_oiDs}j-y;schgX+5w&%in@W8LP z01>m)7x5M*z+P%~S0!#w{-YX79*U`}diz{54&QKPte0^iP*YG3D(tFPxMCl%`$xwp z9Bcf@Sw{$JSDSP17GQd=ncX~>;*1nrY?0*@#iVQ}!b^kPnHW_Rd2{2w52ltVN5n`- z#%WmU2I=EE+`HUmY75evgVk=a$Q8vehGW$g6L#KK02yL+6FgUq?;i(bDZv{f6H$OV zd10Y}iM}9$chtRYOHeG@@VCib;fOEuW{>9&mYW1`Z1nn=yym#A&J-U+Ut!r0y#mnL z`oHX2`~g;&YQO+~&0mSNUv)JCnEEv-PXYVQG}BO@;z2yHu2T2=3LvUFf5ju`z=Ep? z7<|ZUH~*5eQbvC@T?J{*bn~-3P4B$PI(H}XH;`o*eG0HrrKI;}I`=~97~XL=H+D;E zJa7TE?t4g9Egx{2D?jUKdqSL%1Rwe$S=zglK&jt^L=-&~7PJgsEmdf)6B=~NeUuy5 zWixEBTTAVu1vZ?8XYM?rTpHrXDz}+#Dj&WI0J7w0ay6MnoE*Z~;ZJX!q8&Y*hkYB30?9$<$f8g#ge)cl3r?nl*Wt?>$`2`V5Tye z+_v98Dq27iWc)|2CY8r5vyunNK;@B9t1@<1o_4T3x71CyzL!523%msF84cpFlM9!2 zM22G^Dc4z{gg2G4V=Cp?Y^Z|^W58H#Jlfr(M7GS#QjN2oHy5GX|KkOHA4{H<$iA$n z#D!|N@yYZ+C4ZJ06#bqNrjJoJn@!F0uw&PtljZb!02tb1J=(ho`jZ#* zyXAPmSY=DH7NQY$Pg3_~1$lH;uU7z7m9mnTiE?m>aN(76vjeYZ^ zBIo24-h+qDNwilVbE=ICkV7*Tf-m$+dO)EJN5c-(o$c(a-u9guHLl@h&|NhUYt2<; z&f)c=d0IYfLt6Yb;?dr+=lW(sE+B_s8pNn%kA^4&3hkUpw7wlq=4_5Yfd0yQhyt~e zBjF^ojPs>#!A3*=m>Y&pe~j;DOhBLU>eg6S5kf`rGyy+Fqpup005Bj5tF4&EUv;eS z?d4C|{08n){*C$x1UYvP z!|3M3gB>b0biBj|iJIX!GGLR*C2mO@J-uodgo#8kofyq1Y(-4(IRczkYB2Sl!nbz` zwyY($GuMj;4`_RT7mjx+QB9mxuGJ<$iez&t878w9o?u&ZS7Zb97U6zvnFvB}sbOJg zsYXilRmat7_cpyE=m{Bgign9UqWx-b!NCa5^`2N88cpA*ahh5Hw2g?&tznqjEp^F0$rfY3@2~yiV+34Cc1BP z(Ol&2yMZ^t^xD^hUMNQNGVv?jcaMUcqfN(oqu7!HysbtxtQEW9w{xyD!0hRZj7b3u z-7Lj>t)0EMP2zSp-b8k;x3$!O4SKv@F(Ivr;aBZ=Wyjbc)AU$Jr|ymNu)1m* zvwiIk+7Uo0i<`E|SfffllY#EtcSCi*kg=V4f*+rB8pkg3v0r{J^-jEySgR9e>wrklXRg1$#Ph`VsqNXNcEsosN;NV0&->$S?dm zr4h*?&tIJ3HDE;-X%YzDIjAWqOodIzK34S(s6ZP+x~A)~ch(_>k6xZ$l!6j;R=-*=|C^X;ulyo*r`R zSdL3dRWk`}!^h1@J|5J*dr$kB2wApTj!}UFx}y`%Yc@Y$8n{l%;tQ08K6RX7G4$h| zOnyF^q{1_OK=n3H5<$%P89B}m6Gsp@vY~bd8qN&R9VEjS&ko_Lkb&9{{z@j+E*De^dj;Vt0c0=<_jbJY40|E5MtbuGy+j@wMEa++3w({;L3M!*4B}^hw%q07`i% z^x@?tYXC&a?d5ke+#%k#t9LLS*!}iM@jVWqbR%JO^3@{rtTEXMIKh#5-t3Hpr{kZ? z7px6hnh@KsGUYwMy8~K^-?jW3C3AtjW6`#;4+-U5;>f<7c> zf$^gYX!l&r6m9$hg!QbU!RVkTPIX4E$+c)a&CGdcZD<;o#84xLAl@3d*u8g4XDL~u zP~%!5m0eT*_G~NF=lIpST4vW0zw7#6fnsF~{6zD3*h||yq|*>iWdvAGeKc#oy=T{= zm=Ertm5M(#dvl7q%|0AGsP8%W$|AY}rm4xL+G$m+?^4NAcItX0$eDS zoVRSVgyM+(GH|udWZPCpP&ETBn_^;594-Y$+$^~I@QGX|th!`rdUUV2vx$TJdJOzO z7m~GylXAQOj9Hg8+J(j1V0qz_7jR8AYlUhA{~xj*^G`c^l&N z!yP{Svu#U1fOIHL)^Hm?!0x9gSU%BiAy+*qyHMI-;RZ{)pf-((&$`@)?Od<>v=uwsEr*{&C}Q4U=xC;@1aOiMfR2wV3sA)C zTLYZaf5ooxZK7ttofIaflfLSV9S$W ze#uEN+2`V`gZLZyltBx?;^^;NdUf&Vp}*U1X=fH(>gK_q0AjzjG4)~*>muU!vMj{p z9PO&rIO^h(HuN6A_#>3BFc}dI+qQ39{PXOqx!35b$I#%al7V%R7@mHbdR;u#45kj;0^-l#tY z>^}7AK*#w`uaFu}=NI;xfrNY5=~hz=HrGGFv=LB6NG1l4hxYU@3?>$Qb((`l480OM zPZTsbNqdQA`NBxplSGN(DtrxCRb@*idV1qpNv)w3Yzf)b0@o$W)1Hoxa3Ew2h4RuD zV|9A3CABwZ;41+H9Uno0&;3WVx+l1i;8v8Zo{?_IiGo71Mob&+ z^F;N-YB{4l5PPdcF3m zZ*S-Do!AhLx)yT(J}Io>yzidKXGa>H#H$x1!FO5sint+nj?xPu(r|`#X;8p# z(urztMyuzZYwO;DT zG-9Vfx6Poq&Sj18n*besXs~4}{^vz3=m^Fhc&Fiq%vstCXAgDuz&0C{c(=c$ho@u9 zFGk=!3u$DPjD>maq?vMy@02t6!9#7L{l;_$xhjwD>~bno{M*|sy7Q4#cuzMqXZV#* zJAmfKL4MnfW)4x#($4-?l$uL~uC;puY;cB)BDc(Ql_r;d-5s*BJC8|H7y=%Zd>b;> z&E>`Av6%G=y5AzTG*2gJdLzSg4;S2pj3EHf8FNDv0UUs7#z{+(0ng{-t7XRkN!siC zDLIeCUx~_Jwc$<35kKB^=Ms4RuV{D}m|MU*UXc)RRk|4Dk*D&>)8kvE3k<{Fv+ZIp zGM6$--oV(=V(7ROf3?I7?<1p9E4R)5&llTDCn~(7-(IqV#W7t*HNOD$D8zkh&(hD6 zirp$IoyggyEvP>HsA&QrR`u6EetRyM>(wP zq#&kGtHWHqg=v@Qgi?C$QQhVnJ*iNb2TU0}*E` zJO22|*J|Wr{EOE8(e$+vw~`jISHKeRud+=(`cyTP!I=mU5lY^L1$cf8No#U1VwiL;8`!ZPc1a zbtHU-2XAF%S;|Hrhz?-JM?SXwLXKLvZArC$VL56etZCTYW8(t{n3aT7R^iGTaofQ= zf=j3?{-9)~8^r%!H0#{u@N43f$_rdQ!wK}mCk<~x+l27zy>>nYcR-@Ylj><_?>p5n z3ZMedUY5GGVP?-YbGxIG!U#9xPo0pV_AD-od*=2uifymoK4?z#aHsrKT&Y)*+`$N> zv$9kX6F{U=`ZuI^IY9cx$#;W5KJYoPsW2G|tk`lbp1y(B>1hCPQDfOpk>op1lMW0x(!lo96Nr(m$ricE%}tuCW1u7V zFS$&H9)IqBO8n=p-F(wE1UO#^iE`_PQdm`Tdb3Tr==lt#Ol+4_%V+U_GW|`ZgHe!d zT)iM4RA;@uTd=5`l#u2>prO#VVuFl39CRUN{mee_U?|(svf`o-t8z{c>GP>K>z)A%Vi}6u;YNy=vTU<69Y2 zG-l5M>BSEA09Ov&y-hm#HJ(-=QmagJF!%6;1u}x9NjK7zDnkx(T$**YMMlHA&v8u0 zWPrg1Wqr+4y}l@|X_qZoy{HA3EUh3Gg!Q^oHT6-7ZrR`y2LH_Gc~bpbMrE-QKa!!x zB89%F*v+qwD);B*njU-hX-GDQ-18NVp`U}^9H}2J&mA$Dcw%4|eytd%>p;7wZSL$6)D$IkJ|v;Ph!t?bEXYaSg7T>XLwPM58G?YDFxEiuYfAeW0C zY>%HumGDTd$d2w*#PSYn&bd z3^MQ!X!*{w-CIRw0sU+}U|hgb&&gf!Cpz%-WaAKv7Z* z?LlLv=EJmdGdx$^B`dcC^|EfQ7@CReYMd)DUW>VACyO^H}Wj-;vNR+L918gPhidVS87;y-$E+Nz87uP9lu z3fOe;f+XP7x|(>5=CF6Wlq_*7n_u-HU4%Fc{QmNo?D+6nK}ebJAlCx~4wsGNs%g^P z*b3TOKeLe~p74=~gJl2o&>3O_3SgP3!Hc_d-?Guh={lxV9Bs) zbc|#+_aCD-C|O7HoF$9YnR5Q1JIwM+5%9vFZ&+n{zoL^nYrKbF+myJvfB2j5MBn-{ z_?!nnTJD&3fU+){-oXE*Wj@DM_MllA2EY8bF)8Wute(64Sa%8CVK_PW*hSMyV<*p2 zGVkZ{S{}nBpYLrEJWaWi_;cB;)5DI**L3ophx#VSmZmbbr5l&H%64zROj8DO=fPOe zm8S*Io-KrvRgVVf%pu23q#QeJ=FVlKhq^ERb@Mkov_JXe>T~7c_r71eto?^`0`8d-qxBE@@KrhlD2q!K-L0*3hVWa z2S3qfu7a@fMw@f)(=Zd<0Vl}Fz=hI(+TdiJHe36Sp1~Rs!Pg1QiYnl_@-aDEQex%W zAf#QbPR^hCZRPiF1M0v;1TL>aT-4HI6+38CY^>>K;Qi{z%%6VmuaAV2i=180Rs47* zhu(+#hRy2+Ir3xapETpnD`Rj7q?!Mw@FoM7hEB(#rmhzg(Vns%aCS9uOH2qD_TrFuTvj$#W+7+~2P_p>zV5~$4Fd1cD>wVh7c7FuOo%L7u;y9@&A7GE| zDuU&~{u=jZI2ev%#TE!8nANHL6w<~G?nvo#&{A*Y=~3Sdq+?6`QhOA#2JxtGaglc3 z7?$O+tN*GabIa*4@iV5$cd)`ISRY|iS!r&(vz0_r)!U!BdhlTz-m+3R|Ib^-M7)JC zKj%t(-mP>T87YXtD&$AkaSxubC^bH}>24~!VZ3|3&Z1YeSa^<$o7N>r+Zo<`uwe*; zZ%H|9kvH!A$R}|fHgR_7EyAvPF&*s#?qUhU0_jgxHzukF8D3?_vOdz}g7eXGKlpJy zRdPL;+x+RCY_!nA3mGMM*4ggV*3%EGuG>daS&+!mggXpQyff0;_Bi^J{Z}KAoyhl( zbozDD&%A!PdOJqLzY5HGjmRO6MEE(m|ESFKOVJf7Qo^QsxxRohb)y$Qj&(crCw`X6 zhveP$RF*UJU^#Lop^`UH>A*w2en16Nh8_ckTK^fcTV*`wnQi(n3JZ!tUJEhI+PG+1 zqduYS4`$aP5b^vHdrgPN?p3DL>jbpfvZ47-!_rUPEG|o8(#y3D=NS+>?XfUT_QXi? zot5k_AQuIOg8lXIvj$6?h2e>-q4??G#-%y!@8;?#2Q5nU8Urhl6A{};&G2vLWPv@g<9#G}O}p27c`P<8gGH7oI=}}@$bt|1 z@e~ZN>4lHv2L|ky%(3v{w`2+3m{AIc(%%2CmyR_Q`6=H5xKjiflbXW(Qu(^sJ{(k^ zRh%(XraHL^IaYa52ciYI75p#F-VgjZ1yQD~`m-qua~NPb{7g!1acQD~w>v2|90lvG zCropx5-ev-XLNo6u2x`RBZx3ihUmtjP7MPJH=C*sOOP2{1#;b}=k7?2H9L}wGj$-J zJ4>rs9;unWu?4{ua=6LR zXCb&mH$oW$vvl7k$ea;qGtshpo$XaoT{@V8gC&H>&m ztS!GO=0tU5XRGYE>z@M%|NerXfmR5Cuxv`b-U9KMJor;;Hb?)|_0LPpwZr3RKN*ij zxn&FFpvXH$!tkTdgGcze^uc0iNK<~9cHYVJxBS07SlacE#QgHGquG9H(YPwChx z0pxSfj-zOd6#_lVM0jMuQH_~Err>~D2C8gCOIuDq@;$HDT{N&TgeQg14U1uC8vd3- zb@0aYw513qKfAO)B`w=z$Glbm$PnVB7l0NY$?|c5XM#WAZd(xoo3h2{vu`iUA^RFJ(-X0blB*5pyeX4 z^71LTy;?`q*vij}^E5;FS5f6YkvWM$9jzpBV*)Kjfai$@3H=n^RBC{AUK2I+7@y!k z-~cf4ow8jCu8-*LoUOpR^Z8h;_$JLP)!cD5ucTJ6C5#t8ZCO@XoQfS<5t|jU#L}`2 z&9QzH*2PT6?VIod#l)Npm0l`DAvuWn2eS})FT>!b4yK8=ept4^rKk>$m{?FK)p#zR z`{&ZR_f*8&>O#Q{{ADJhO$^(UxtvD)gJ2?e{evy(XII^(>Vj~x0W*iR001cxFmfh~ z`sAEcx;$v18UJabf3+dDJ$pD$&#~*kn3Rw|7pD+R!pOdef<@x!R>kuZSON-NYzlU{Mfk4$ydpgr*Qvt8f z+9g#MvSgGSjJCv@bPX955Iz*ygTqK08f425i?}o({*k1K@w*IP0*S55+kJ9y%yfxF zC~E4fP05O5*Ja*)BpthsdBSurQ}agly%CO`K=6i~Yky_P;PnquZZ?e#bx#lMG?qZN z(LQ5J$_2=!^7V5`EBTHWnd@a5e(p&yAwxP}n1%R`Al{TwDal8C123&Xqj;0At{Rma zPiHtM7<-UKkUx#T$Bu21qvf2;!#@S1l$LR2D9#wze*x4I>bEY(I>oL&^b1_=>R^S5 z1tG0C+@GWDFt4*ygVc6i{m*n7m^M8+m1&`2E$^CU`b>J#8+X%F4Q$4y7X);rJ-JlK zx+{Aa#=GfluB1n-?{!(fYTe$w?S&btT(9faSx5QhQA)Z{6Ds6kO0V-I$bvpBP~9^ypYwqI0EV|9JNpXl@VJj0i!?aPsg;aK4q#>i>Hwgcr zMU?G&;I@?3$^5SKzpNQb1vaXp&WQO39@`!3HPtID4HgZ;_!u`g^L^OC(bZi7;DJ9K ziW2PLU=uJ^=7;EPyqnYNV@VhVd$YyGZ{O6V4i{(-|h0E8m5CmrmolJGBN}eZsiyv^BBVY?5X|jFKQJk zVkVBZa1Yf^j~EcMz%)$dHzqvjp0RpsbL$=Gs-Z_v|Np+r;2rwm*1CKrD=SyobHGR*kLnFG zegkT5u+9)U7KnxoM0Yvr;D{RQ-OM6RnL z?0;7(_>4!Gle4JVfVJ>iHd#YwX+g-TBS2+pR*oLBScyVi* zzBa3(JRx&5DRx^T4Vay;lE-HpxRDs_)&T!xquO%9u+d&Ig}GN%rlrw z*85iX@9X%ZD-(Q(IyIy_7@f5T(qgtzdz)@1|bS_uX(U(D?OL0ttk4*4{Y*h2INngw{7yYN)dBXy~#=b zyjVsvJ+#1nL&a*MDNlD3drs5LF0G{a$d0#ULaVPSMlIi0(6|a=b69Pzdr+$$pi$N! zcO_6z&%5R-!7b9UcupY-##S&Wgg?iO{qGO|7&{o{w2|+;B>?!??{4vs4%OLYl{y;g zLeS^ic89j3x9wuSG7>(#%L}50*W+S;POo~?6D>8MAxKKNBJLwF-n($<5-b4uhT%zF}5W}6Os$BJdpb6t*|GY>pYS`%4>qpJ( zxq4uQ(3;)SeCjiSfsM8KDZ1gTpTrNBf1P!THwfW9k#uI5WW7npzVl5G@kJ4i{+O)+ z2g)hf!or4rZ-u1zc$sAdJbm5#B?yzWUc9?=|^`& z@ZSYzEHMA@bR6dS`LdY65V(RZP~IgN&ZR5DG{a)wmz?cS#H@uTBCvpS(?|-;N2OE6xKNEQV{V*kHFMM7g0L2jUmitY&t!z#bfeaM_rwT)JWJ@ehvc zQyw=`KDGU)*7df+2;SXOl(^VnS0Irzp$Q;iaEw;PzsO-t@~+ ztzfgbcA3FZGXH*wN$D&8U!<^%g8~cTpC$@PtdtV)9u9;}(I~RQ@}!6lXZ*v{Sv2rA z0GTJ8Ssg9AoB%MMi)A#au-KZtc(RPrf(#jo{K z@AP!*G=CcHo1q zb1gnnMr3OQ3Zac??EUKIw?Qs{HmeC$hkL46b$b)k>l-~kn9KF`S*G!>NV5$@uOVsw zM8|GI^KJw&)Z}4D@&rprS%d=%!_y~Eqi`F~QC0eqBhuJuf$QE(qguXwa%sHkpwIx- zqQ;FL*he0NOz4r>fWH|Qg^%~p`OLDQTzr&2xNVxZ6)CQcx*2Pv9zc#jgH+-OKM|mk z!#s1c03c^gZ2NzIyX-?6hB$sivF*0@gD$1!50O=Od!uhhp?(MA!cDy}rwPW!KTrV$ z9aqkNP@ZZ{j5+F6I1p;(AadIHfeHG8!8MWHEQT83uAVj#k&nKkOTJq_m0!q~-G|-6 zd3`Z>5`UcL63EwQZ!kVpj@q!(uIGBL2)+MHt;p9$a9|anIlQPuT^|NMyc9nZW5tbE zSc&g-;DDZ&9V`emuS%SESPZP85%&ngVr2fmMr8fcQx&jz7%F@D`cWKMCLyvX#m*rW zT-njPH8utrdgS?AC1*Xwt-QGkuoovQp6`RRJ5a&(ewPC%-e^&3@0a`HZp6B|wqj*H zeS&FM886<>><UKArZQrJcK>||53tH)nyaXj>uLk>JiEV+l%6U(3Llbl*`T!p2^l8*A(z1z+T{o= z$kx$yFqZ-MdJAEr*9N?NIr#Peza~GMqSne@Z$5tjK$mh1oKJo-n^DUualgZh>)?vZ zZ=h{<$6s0pY#+uWa$kp`m`>Tgl0lLMVH)+I`DgpJ11mMZRJ@kBTJ&46xiuc#enisQ&QtyFGfL()kF zx(BmdH_!V~6nhBwFx)XS1}4|Bj(-~@R*l`K4+tJD-RA(-l9W^e8J(mh^0Wsw?dr9< zL4W_G`0#FL2&>=XIOnJbuwmR!*hCJP4Gn680(zX_>H4G1@myY7r}V}d8-xRGnzjr> zJMr#HnSt^c$A%_jm))^(KZ^!JIIBVQZ-R(`rN@5=?eOxSHo#YDy~CWWBxIPUzfL%S zal>_nfc9Zv9EoN!+!5TI{jihsVfX*fl6+*hNdRF4D>~&fTv=@Dl(>GZmpNhO!z079 z%lqt>^0cctHW8oSA@=9nE62BL8KECwJIgi)|4A_VL7G)iH z9*SuibPuw66V@xze9f%dn2M#K_z8NXJ75Zqu=y=&f|+*N$N|v2wSy=>p;rd~9&rYC z9{-ve{_)`w7lv^d9$xlvz|MC_6GkY`Lu=6m$qvF8{cnbf0Y>gYN_M_|y%gA$*gXC0 zhNCQo%3ipnaAv;Ubhxo^UcL#9XRrQeyB<@jU9|Y!R}f800~iM-Qc&M%;e*y8{KMOWwS_q4ay& zbXHnvLU(&f%64cz?3?H=eH_JQ;gIMBGTDo_7(6KGD3lTynf(-RGBBHT(%8JY&+`4N zI#^}-&3z|D-mEK}TF-gHL%&}U)RlVlLXb^JDSf*N3(s-fLXB6MwjEVf<(5@pO$p}@ z^-Rsp&3CY4CyVAgmOI(xYHhB+Hf$=)-mlqF>}>p5PPsC}9Q15#ZvAKv$$)SBV?jLL z{)BEPJVRH5&K}>!-~DV(zsWyMHP?JXbYIG`{?zdRx<+AOlOGqC$usHOGv(bg=biOy z>YUK{02j+0G4T?<*Y*PE_;$S6VQ+nels2uoSTvnR?g|7R9 zlfxWv-^eJclbMt2a}jcLvzi#{@vbXY@oy+V53hb8jfKNn{&0ayDK1>-w6C#UB7D6@ zWH)SXa!)<%eUFns%qg36OV}BZ6wto~vZB~Y2fgU#g@RAj=pi!Y=F#3ipQO=$Ebn1X zd5^Rw7F_P7p?8)}u;WiAINy9t+^lt{vcm7F0W?H9a&BkqL5cIksx&u73(ZR}Vn* zjQEQeE(DK0_uLjOydPHhByGnn?Jx|Q5wOGTINyYIL%HpUw*E?rRyw}>S)7g#SZ^&k zxmtiL>N`02dE98jEwmmm=eh7SAY=-E$u%W{;D44w+wyDV%>UL?{v5+OXa-BM4#9C2 z1gvBy~I8E$nt&vO6N?kQ}##P%YFP#;`fFu}#dzPQHuJ@AuyxkJt6O_Ikfg z&*$rXJ+AjPYy~R>Mu^&^{3XbC(~fB^e}-vbVD@wRxHx9hg8bWCLH<0(xC?5~te&f- zqN4J$q5XYy8qquNfk|HB0F69Ez#&IZ9_xU*t)upJxf^0~O-+3lbOD2ZS_HXtp!TqN zlYI_mT}2vf`yg>Ilr%z*>fHFGxpgAfBXS4%eJJ5J`X_-}+PzNNh(klu5o6-Iq{X&Z z)*d@gey3$l`k?#dIT7q_Y*i#UlhPd&p&D)0YE?X?8)CH^M4D}Fc!v@{I_qmgmU@2X zza_jdEzW%FQvij;<~RYAKHYfdCS@O_l<};Iys)fI?%*{LHZ?y)z?R=5dfrUDV=WEw zAFY#CZe1X285dv{V99ObZuJ~}c#`tWN+gBjI@+&lQE8H2G{SFRq#P%#?|!4{t)9^2 zzZR%+TRJl+lRO1fU2#n{gFyddfe0P?V#Bd~DQA45ueAY(S+;)Y9x4!b>*~;Ed7~fN zP2YQAP5>(zjs>9b20)C=?mo9DV`AG%@CTwJmcnn_)Er}1(B=g=VP&4xKVF;gQ__+t zGYeUyTmlX1{3`f-!*yDAV;QznmXP}x3Kac zd0sCX7x7VmY!V}18Y|-#+-L5st=$|jMrH|v1lIM_&0|V~RTK-bo zp1m}u#d(j-i-c3vwg{%w&p;>Si`S(e6iMU#r-MJ`+oSr89IyH=VXWrb2a|TRY1=Te zsxPYBXvS=dNQ?quv>s?)5rsh)#a_`ZVJ#hZhFVMF$$$x1_DoZx2sY2Dco7kPni6tI zz${tHA%izB3?G-d4)h$ZlVK0*WrL_!3qY>3@m`c;y`(#?7!Cf$1*M(;p0hXXU>S4q z9{6B{{3+3lF?8zb8@-`u5gXXkF(5v=FA!9FKD`{X;H4h5Zr^N-Dl%iL<{m{&2xC0| z(H?tWI5U>8f314jc)!z(sH0y6+@E#Sq9on4)84mPKeQXEK5;q!9-$*Id9-o6Jm4rM z1dP4j;P}{Q@wv252gvo!YwvkvbsiJqhzUVpTuN8HS0ti^Yuwo9;GW$mH-}U~P}nU= zK6k(VP9bB5dPgN+(BQYqWA4QkDczs)tocPyNF~Lh+PBKpl4%$fpfKAr?!`Wm<%<}w zKvx}OuanWzOu2P8Ci7vuOzZtyZF}2JVl;xnP5SI(+X zYpmg-*8-&i2;RKt*UXCE@5#)F-yWF7nURtU9)cW{dg3pXq@lU6L-@pQAf!=~u zLjL7BTy=#u_=~7F?gZBx+-enmh=&W8{ANwbXSFpVD!q#l+RR>OD-VRhISd#!>!{Nu z75}qR=tn~?&?|I!BpR*Qy}HWd5GTguR8p?K+M@1__*#V`Qy7sPtz(%LG6*H@r;Bes zhCe7dvN(S!I=c4)sGfLvE^(*sJ#8B(q5h(JKSpHzd7(QJ_|SKgl-IN#r3eY%^^kWJ zkvS`Ul@p6({Kif$yWhQ}z0rJZQrsFjl^kRxFKXS%5YIclXpY2Y3+6*Y+U{6&u%KU? zq~ld~#BNCd0Rvq_4n9ipf5vba$dNW4u~_yxyEy-01nkdbe!p0r`n!&FsHkv0X7hs2 ziRqOXpLG=Ag|)TWuWO%5FXOEAIe(g*O3R)9js0)?Tp$`Vy#wC8fJV@F6vuu&e`6Y> zOpW!GB&(>~tP7w5h3%V)gWw`&-C>l-1~*2V0|gX!n%MVO{0NJTTfDpU;Ed)>X3(eX z>Bj<~cT9Z|_8JM8rczZmBB7)~ZpMieshs2&_yWIfKpva8PmYsw;~M=vrAgBYg8(i z!kCj94XD?#p|O(ELTXyBN9=yb72c6@X5z7&9A>d4Y^%7(1gChnzSVep^FtuNDSCQM z6?ZgFOpG&GbRyk~0@YZ4#4HR~*kJoG!3d3+L)5)&ybd;#0pu5TxZdpjPjV0BcYbGt3m2-a zYlafV_1B?SQRX-aCwx(xlU^H#o)+uK1GXf2Ys;j{6F4(FY?Hk)vNEYKQ)-nr3hCd+ z^u!MVeBc;{_u%Dg58nMO62@1|?5MH;|48atPY&h0eow>XKleyUs+}LU^v{86L@>Uc zD*;m>5-)39JE6nMIizCB8#p%Vx!0pK@L(^3VA!4Poi2K8dNScoCQXSdVrl9*`8`qh zD*JgPSpxw>C$E`r9cHytsaNP#-9y)HUY}9}-+V(AEN+uGvK*)Rqcl7x?kROGRUEfA z@?G*K@pztKfT|Fjfc;HaxAg9wzy>7<9@eeYQGG4Znw404 z-(HaTrFJxeR~PXcJLTkO`}5dB;46E_*)%%Ej&6GTPfwL?&9LfaZgan zbh|`~#tE-N?8B_qjQ+tdzOOTQzz?nL-P+3D4akb;r77+%?~?$NqrBYAv!P|n#5JBl^bZG%?X9X zaTRF^_SIE6lUBxk*JHf@OfR=VDKy}E8gj!_Q* z?G?)&rV>LnN%#_`uJYCC&ciFb;N|!ecWZ3xsrtf@P2!#+9ZV^yZG6YOcRz9fuWa!S zT%J_nMmSXu^$`S+l>I}n8TmfDYr)#u*sMXCBQ-+^8pl>=srShx4TrZ;E>%VNpj?BE<}Nbk#mM3d0I) z`?ehFvW@K%uEfoAC}8h{Vj>`s_I6)YFoOIS-Q7QU?;)2;02|H@zfHvkvI9|fe_oeV zzu*@E&PmIi2+RJpd`CFEb}cw$%R+*Cl0cKIgJO;RSzCw+(wfRtnBPMVPKx_zJd*N5 z^yJgEAAzqwCK$GLh!|D8?Jtry0}OEJf_zu`4>E?bTyKa-1&Q3gAvzS zRkg`)l=yko>m$hhP4sIr5j+qbXN8>gs0_Rq-h(EoRx8S89FM!H3??}J!D3`_dy~H>{>|Qu2)a?t6VqEBOWp+L!XruZ@B5RNJ zG2ddPuXv9!hc?LkQaUpEb#@rPaZ0B%h%};^7u)K?9UgKWv@J3y~Xy~%QA zptydbPKSJv-dLSI60$`cx=#o`#Gxxm<>v+N8w4rPRt^B8l3C~>!|V-{BjSp+(N0pX zoO&e~?orvnKUs1fd)u7h29+J_>MvbR5uwQU{Lnq8KyzD!QAgS{I3|ObY`E8)3PAzd zm@$>Ue=2|2s%QYzYZGbOl}UVm#lx)tAnw7(O0fU05xwZuSH2)Ml>on!)kMBoTD422 zri7Nt??rt%nwUDeM4mM0o%&u!XN;a?pKIkCq4EA zUJ?Alkp9%6H|XRplN48O;HU+h>=@lH-^xA(@_&toN&hB8TKj7Rx;F9lx+a;&-uBrR z0GjH_TdC@4%~G zH1hl%0~qN)YJI$_D=fgFB)26G>B-Oi`N^19fV2tS=ewElH-NsW5+i3Nq>kjQCalqi zG;Iu?nAsvX$VySx@i}^HU|Hq*)~lK>E;6Gr(EBtYBTm5B5=SYBvDc-yirtIB|2YBI z%})=K`wHqDx>=ysj&s3+^0WOf0BCfEP2{(?)$e(yI%}S3Z!i z#vjmJZlePOtpBq95!6nS2{~2tawoso zUx!R33i_S(U4O=V&&znR)x}iv_qJwx>ur;(9JVU=Fd&ueoh+i!cg-rX2jvXq$_e9I z{wE#gp0@C~TM_*cGBKJvo-zNYnGJ~Ti?C%j+h^D_{R+gxRRIX){H=~yg~>9XHLcUy zgGNUY!xv)Fns*b7!TKEWhDF-0qD3zg@McM?yS=1$7{pnlD(HU6Yo3X8J;5xQmR?!v zP_5u~j@sdeeC_8Fi+X)W1?3J)9UjcrMa;=J;K5xpJO!$1)2oVB%8r>`VIBzmAiump zM+6g=%Yv)pyP<7G$}z#KnzU)PwZ7Vob)W+;(cO(hhCSS_%OW9yJxjlJJBJuzyF=xp zJROW|R!csvw8Znq7jUJ{B291c4Vr{u;G)u_PGemg9J>+{u`^>j%)M=Iz{6t3q?eh2 zP--o53-f504$vtxwSAuPw_NpX-@LG(N4#kv(Q2}bWZI>_So&zCrM^pt;aHY_`4I=3 z$mO2m!w>|J^IP}z?yK^0JH^$39`N42eCnxsTV)e-+s&=K)kH3+D@0rpJ1*%_raciA zkz+0NzToA7=3?V)|+b zp3l>8EaT^(fLl%SbdAwmeL^Ex=3e*I8;w z{BE!ORu{Akq2H5!zk>NaVU95jzotP(8x3;M4?r_e$zq|!C=n@4jj`&Mf8cJ9nqR!W}spWr( ze-Ak^-oWA!l2fN$}cgvF)}m!TIsF%`fD2(d@j##_%+C%E$;Q4RqOozGElFP zeEF$rI3E>56#r30y%w*cvdIo|!P2!#ubRGqN6%R^-31*!qvhScs=~O4*jJ|d`pX#vV&unU7#fQMG zX=PD$T9A6TQLDA7+-dCZ7?+BAqk+{_d+|(1{z<#74E|+poHQcMYC`$XY+cSF+{Dud zcfE@>=Hgvs<)s3F`z5AiK$ce1<>z!IC|jv`<{ zeizkqDqVaLtdxjWkYN*v35UO9i93H{HF7D7CM$@1Dli^bk|s3kMC#=m4)ohv)Zb^= zYs)K%2Inp6pF5eHBlSKZJ?Qu7moF`wFaK*5F>_@wa^#hk$G;t{7sz&U|Fl3)3?hJ` z`1h0L_3UTJAE*0~iVp^TSZU*Y;U8R@O?#gc3FolnV@+jXTyiPggDbSe=#uH}C@L}Y z4ey=fkeq00}l|!Va3R4d3V_i6^w&as7Dhle~DpO5M;v{(|b3{w6LPRUzKNqYf zzazjbY~e@#=aYwENRcTlith%?r9AivQUFwqc6V$GWo;Y#p;Cf|d1>ewm zDGY@~TpIPuS`23En_DWE$+##yXAfWP!^yE}Jay2A!f0_>3@UgSl70Ww54Vc@ zwTtx?T>&TQfZxBS4WV6zYxQPGhsQ{V|NpqJa{j&&3sQvRewO1qH2G4WgwyVHU2WLu z!qJ?y!|W?ACT!?TQ{PhapAufUdvW*~z`}F?_xQ0b;xtwCu0~`xuW%FAc#LG)3*Be? zQzO1gxjU?~`IE1<68A&z- zOJ3DbpdiHMJG{+9hW&k&p=lh|`~&Mk?|8;w9-fh?~I}4a#_H;N&fOmWAZM9NG8fxMi z`GWXocyYk%Mmawg<1djxzwt^_rC=PAm!WvQk%3sCFnA*!afOX!y>r;U(Z8!!<5YUb zYCi{D^VL#vwi>)nsq~;&ZOQ%VoSN<>mufhtmN33pxMAGoz-o8u`dUvks}4?f**`#d ztv?ZN0i(z1SVrHaBF{Oyu%U%hP|Ftgqp!ruAM(x0OnR7G1EUw|40aralUu6y1;b`M zdVQCNC@%CfGr|vkU|@bB;Unxr3d^)b-=pwOpH#4mK)M%>={ts2YODAk4zJ<5ZxrPx zeU+=+G_Gn=Y-8=`_}hl6c;EexJnyng2~%VE!K$l1IcI;3KAQ*E;7VR1HS-lhryAhH zK{e?NaOD1Ab^Vv6_qQ(=o{J>uN#A1O&MVvC=0=8KOS~so!szX){u!S1TKF{+*U=`s*f1?4hC#uhHPB(GCmYqVV#riI$tfDOFtJWiP~ zOcloAJWP4Izbj=QofpxN<@BH7P1fVs2O>L(0|Y#^=b}=g@=+W0eLM%hZ6+@6tNd=j zKf-t%R(amDQt~jw7eBQ8fyln)J~z9KZ#%smBKQZHhylG>y6{U=!`)PQx(_{58PG{DqpBJhLoAGw2JR$?aJkHbo^)%uvd;%6zFTTj69o1A;!D4 zi&CoM#MOdCALOjT<&5pTyuMLBCIJ|ZQLoY^e&9cq;*TlrDMDj)E+Sj>()jrX@k_xm z2r2x;Ji%=B?*AT-xQL<>0rsfJX3?SB`1!OGu+Ph0kpBO9bO-kU1Q?d92x5(K&Q=o~ zUJ&(lS3>X)P&fBB_l;1EvH7%PdslQ1YeFpxJanuPO2lLYXB?^>l;1CU&U5t?=_ z?|6i*-NxT`&kprMZ3%=&ZuI??iUp~p+H|FueeCE2@6iVF^@a)^Ocha(D*Rntu>uIm zTLgquVU=N5bk6SbK#IzE}&PZ*w@|dakag}v&cTNQF%!gj*L_M;AK|fF7ELB5@)z3dnBVHQD2p^Kzz9*JjyXqvO^mEM$qLT}bM*v@xhF(h?R_}p k_raXWj`fv?Im>JM6Rn>$X}XYy7tAr)^%}> z-;qAQ?;joy?mffJ9yqvyo=2jMACN=u_LUuCIan8Ms39N#OvBGH&qin9?EE=|t`9*scdt2k4@cHw5 z!ro(30nuO|d>wpzg6|_(8d7A!65`x)Fnu08J^gWddVc!cZ_^{7xmvHax?VB|wI7(x z)xYBLCJFxw=&?ko2*`X6BZ{_*A=OHcc~z}s44ci)mfQSQ0(yin1z z5)JwU+QJ=zpFjKBxQ(xU(~2RK7$gaDBGB%xzY*=jjZ|sTzW${GJE65d>Iq>Rq`v&; z>(LafNO90q(ebfp=s0KsRJ0s@x9S}{TR!!Ey)g^1)veG)WX>7R`#p8w;=6B?@D=jT zOF*aiR^PE}A|S{4X}q`cj4=U}JRFzGcLq6$<-h~Qi5U*&}Ps@TEnVS9-=aiyt1LUC_ zB=mRI*Hzqa&np};dt>)iQh;ZeQR6(EuGaPX-F8;~1L(CK7Eq4o z0VNf1#Z&oH6;L^j(B1zgLJsHN2hEYfZXWh)-Qz}5Mc%8=3BDG3_q)uktBE8<{a|ck z0%hY2_2njna3hy;oIq>1Mz*Q^&7cFV zTD<=@65kPYDhsOEf2~9Yd@T%WI|F?y=)w872fClgKxVkGHGP&S`^SSp%YUx9Nr$rV zY!O}EoTu}CJ!uoPELIucdRgX&I{uDO=;EKiq^q%!Q-Hm$KJj~i9=rzPtkRsB08S0mSyK_A>_>Z^MzyF1U9C1#W+qf9j*Q}PI5_@wzM=Uy1 z|ArOCb2iuH@Xb_-f)MzUo)omh7~1~FP3XVpau|X#NUd-PrKD~(l9ACv96@~rmH%d( z^o4$FLT^$x=N%y%CU=yiO%Ft>HHPtt%j+(LfhDKx1dp6w%FVYsfT3FGqW4MLpA!raw79vnO6Oz@C(p z+zc==9dmj8`?tKJ$)jbawDjZx=3dpo$KL+${L8u72o;`5aQF4e$tj8vi&l@HX+CRQ z|704P*T1U2A4mL}|Cj+>j!e?Aj;25Pxx zr7!QDjW3_KwRt#Z1)jU@Z5{OVrkL24qNiy|Sv!fsm}A$Y8eHh(v49J&+KTR#{Zgkv zABW?Qso8I!Ofkb+E`D9>>6T$Qepy4pj+Ql+*jCQJ`^Fw|MA7ygWVoA8_zn)eMlY|u-hbxuzM$`s)Kld{)XyPv^du*T=t{RKUT)jU<1r|tM2%|WhiN+b+G&@;w zit-=yJXvq`_8UAKDV2)j-3nV&u)Ae3MT1>Q=XL~fAN`G8WrX(9;Uc8p)8%Ftm;iWI zR7w*?o8^Zzc8aH`NHs?vSm6vY$EwkoQEpXj7avSI9D<^r{eWIn+KiW%ex2;I;MFvT z3qB(%gH7IBttQQ}GrNX`&8%Trbcg8KNSdGPcZ`NTNd-=3hQj5-i} zYf_7R{VVo754t0f9-_ZcE!((3e`~agkud$B<1L)I2ZDO@Jm+s_WxtEUd&uzkfmXRJ zHRhVW`e)&5Z5N<}*xw1?xcSS)?#3k?4olmd&Q*>+eX15JV%DD z{$HsBXY(xf&9Enj^Qne`RBHZ9Q37XfZYo*^Wea#+GJn&Iira0_DQ#Pw(Jed_7a7$J^7EdqMl{{b8426EYG2MA<8J~(P`mpSFUdkxMgtdPm44m84sLsdX!MHs(1Tiq{?-ZBY%3{jS8 zH|0<*&ru6E6#flgI~&jKZYcPBFs`1(Tr~rjv-(Ft zmTxWO!LBvTE|${jx;t4;&Myzj6W&%Z-cqBDPP=G*F@?j4{g28va|Im7QdN|4^^ zb$KdO+Y4G5EF8MZZ$v$eAg*uzx#0J-7$B?<2zHo5b19&;`8Gj61zMVeIAmsaMG>7! zIq!lf{56V$L<(T_7l@V$s5rsGoi0mMiMT&_F+cQ=zZcDs*|? zD(cC<5WC(xpEWR1fFM$P=8h_l66|O-rS|4`I0~LLY-zFMS>2x7VuOu>ab;u_q+dUo zLJ32>6d|?dU+e?yQ*mdAaLzmgAS%&+3WD5%E2Bcqxya2;4G%r|C;M z@bDn{zV&C^fI0K*zVuxNozrxxI2EuS=H~UE*D81Z|8Ps z3pdi_$;i-5oKIavXNhTAZr*nbqP^E;Zj;>De=qeiap zd0_M;kZ9smuh)Y-ZGPZi@IK^EourQ%$y$GNC?yV0IT!!Hf_g#q2VHn8aNzakz~t3m zTO5fZ4!(uhY>Kp09xH9Va(Jz@GO7zTa^>jBdgV-4TmkhlRZP9n%5!v5wf8x6hxupg zf@-xStiuAkBo0>m8s7X;CgLi%qHRn-W+2#xzJv-7+u!9Ph4Fysg{NxYKIUD3GzJl5 zFC5@`i=n&%Hnxut(=!9SCd|MC_U&ORINLVYQmO7n`(G@52q^f-|nBYz`qeJ)H0#MSfi=iNqWBXTbr| z)s2Zf8I!W@xV*FW6GZ&P<}?eNC3rzt;tm46G+Fm`m{ZS%!^I?)JC79?A z$*~!Mq@qY2!#5kox9lDtC8m1^c2lP-1sAd;(+)Jn<~CzEyeex@?eS!+2~@AX8YO6mnS z%{d_il!wVX<4^gb%H(UKLEbY)t`B%%^-v+x*^NXM2y@QvNPLdP)Mom=_WR`UUTl1K z1Q>%k{QbY-fB3WBuJy+A}Auv-MW*$-iU$=pWTD5GIWql=?=s9i)_fqgjN+X?R~8nh7kkC|?%i$@zJ^;G;mcWWlR54i{q zy1#wS6rw$L02Y+p}0)U&wIN& zj9g#pwi&H6XsP?w({osYWe6-;%EYnvH%4>WkheUjIX@ik#=`Xe9h6(c#ISS_RP`NI z(mnY3br1^YcXW7AG!KA&4X)c#fekjb;$@eCYQ_oS&~-TwBQMsZ$kwA#0>SWHp(YZ>Ilw|jDAJVqHj1Cq{ zk(*1z-=PUjaIX{;Ucs81?hYDQS}Tn*pCBIa2~n-+I5^YgS3q4Q`s>g(EAo!bZVKSw zXOaiMc{=Bx78Y~2D*bN?KSIL+kl4n*h z16ywnHM$oGFTqIN_SJoi&G@q{PYz7$Q^-M{^bF6DcM|Ch$hPq9Nt8HI;{p6CjZ9S& zwY4L2h*Vf(u>zo0e4+cKha4}Q?5843jRK>`1H69~)n=iv-7`igzFYlYdp>ah*hlxS z0&3rACEv)_>L2b^!&2eNbCgVt_J2q7`d9eRkT-G!S@d03QC0QgLFiMEK0ExQvc7xH zO&AHLPqk;SJz>1TXS?3s$yr#V-Dk##j4%(qs$gO+DKUj{QF zmXa#JYhK!M75&v#cu!vd+-l05r!I9i= zfSV;IfA+(`u58Bt>!)LQTyUi9?R2CLU^tAdfEIf&xQ0g~wWer{GHX^IIB~A(dvtB}$ z))D74C%$z{eOn~d;pB>-8#c^M!v)!U=#|;sWog@-s@8wCx60EFws8WNVph~hrqs8Mt0*?U z!#`DsYoD+-3_K!)EVOsYN;a=&72NE8_Cu@6%asYpuL|I3P~CP(q#SYZTa(!cUIhB@ zaUL+O* zvm;dy`jL(!cjzSnNvg0IP%0U0!Z?@Q6K9!r_=qs5?T@$juFbOyCVFN1X*@*&_^c}C zm;aWFgZ^eERareB0^Q3=-1Sky*XhXpVUeC-MO!)p_rC>uorH@L?AkQ$u5Mt#waTU* zOaL<0ZA<;~pXRb5@w7$^fCk`~ObT=5L`WxI%cdTZc_Ae+%vMjHRlPqvH}MYHc+5R8 z@bub9QhIVkdqlz1{K#9R1i^a7$Jk&$j`ft(LJOnGk)rGpSLY|5rhEv~&%2&yn)gbl z!(6>xI^MlAM@U<y!%LE-=#T@Y%?kzGaKPgi0 zmY9I=Q5Q_1)~74|#}gxE_nx;n1~41{CGm8yUvod>m-5Y=Js%MU$evsqZ3 z!5}*N7}d8&Sb`o=+bCPVjN{&l8QD7gMjBxHc^K}Idq;HE>NQ^Vx7OQ<&PU7@u2_|E z>m4S+3(sXg){pXFTEF^*hgOsV>knw@>FKHe4eHbBGVKw1R}8Vsi_F2_`TTGyv1oVe z8_UkzHx{P?Sa)m%^A1gGXq7TXYLTm`OkO%pdR?=wj-<|?Ugp2J zq0BQHOssEDP;!?wuW@1AQqoTv4zWE8Y-#jT&^<UUp)v@{a3F7Q4dIC7;OC^&6YqE_nNgk^M z|83^_M#~VzL<)f7b0utzusn0vc(=r)k>3VIoV;ShZdVsky~*hWVB=x)?DbQdp~e4S z;Vpcj+ikym>CwKFn32I?(ono5D@)ddqgs?HmS3j)Xxh-R85V?Yg(y_lnh&-5i?*Y* z9VH!DG7?^+P#Vts#qZ>T6~%fyh=;f;zVe20+3U;;*PSM3G?^herk7Yx$%;&P zCZYI~NpDr(we91+r9rn%pr&4=XwN(NVY?Emq-ZMcRT7nt9mHW=i6;5=2^dX1Zi5Wz zAshU6)I#$rL!epJJ0R)lE=fD=9_p=Qhk<3`%m9Bm@x9LPwK}{vb&42Q4gUxRp z-mnV(8p66Bw!fz#dFc`8A}(?_p3Xf)D6|4(+X>{~_d(x}78b*qN~UHsFETBgCDQGP zgESR6AH7`3)zc(=Z@U7I8VSxAX=J%QYx4dJD&u2#>A_L3T5VljUO|CE^k8Xe=@$ii zJySl-Yz%8_$uwhCkUIOfbTZ7hb-gPyHi`RAL?tOs<(-LeCNg*{s0{-;j72BNhbv7A zw$08O$*Ye&<=ToLC?H1V;a)CVj(BL-wIS$%?({C)hhld|rtXV?Jw!?PEBWGs@5{d?U`*iu&It69(iueXLA6z^BIQ@I@_YVox z;lipaii*|~-a6;mceRd_ymuis-K0*?mS;r$EiFOR~M$fa1vNAp5UX zBmb@)gv58#xz{Z|k&I=-3CsjdtX5L!Tuhx1s7%ZVP`3KRzGzAdmwqAX@z@hCZ;)(S z`4l^AtZQTxSyv~9-v8{{+Rj(ek|&QHDNxhV(_3OzN?#m&oJiEz*q9*`^vI=gh4y`V zx+(L9248C%p5t2A%EPW02s$T<#6Q6r|6g?DH74Z{!$ytcz3;JMQW7rgd9PyyOC}cG z>npYVEWvU$7K(Dr3?@_b5R@m#-)H?+eV9dt=*tAviiIC`Br>NvPfIPpCehvR-d*E3 zr1(~ZIThFp*sAItYW8uRbNhKS4LgQ-rHA79wpzcBc~GsB@-fyjjcixu*Y$rGNbcYg zB&iAyt-7^r3}kukDZzKMiNw8|yzUF1{9K3_K2CV&ViDGNM*4%$*=w1q?WknaD1qtY z->O>O!{*nXtFFoXj@^GBKru@uds?a5XtaH({1T<5y>;__Baax7nVG z7M*ch2j`bYV*xp%H1kwsXT#=%u#9p%cPn=vQ=7&6KeQC#E|cZvQ{VvcAJx?c_brI$ zoEKV|5@pXHwqlWz_lzqaM+Z)k?|SEu95f%Hjmk{yc#s9+vIWJmBO0*06&CnPGM9~S zw+L$gzdZOPY9NTBZOfj_?OrGN?F$x_!`G+t{P9QF9zpHdky}1INA1Um2ZNGBGssdT zn^2QYBQGeODp$-af+|(`}ygCW2Muy2nV#J zq{LwH(RUX?W`BwxKEu+s6ZGbX-*oI6%(SdBnSiUZ_EvOIifXAlv0iG`E8vy>U@%+@ z;3WeW8|E#-C;G1kH`jx*|4Wmg#)eEHtI}o-i?JvLIWPotq$C1~1=7xL{Ys9tL6AWv z7SXS_)6a<;=IosQA?;1y!TmU1-tLY0NNahWXY$4KY6Si~&$)DbzH43#NTOi9f)M=< zv(A$yugw;00nOZqsRbwAr_Y~*5*O~Q$WZIJ9=43{nQ&@l#}TpwYS;d%ey)kW2Dn@w zicyt6%6r~WXvqAce%-atru9V5yS)B)c46Cp#LGQn6K9ULUoO6`fdyn0O^utls`xcJ zZuOGPmlUHD07OfF+D09i9^A-{MFgzUeiw6I!_dc)ZeUa7N0=yeD3dP^XI*!9yIJ_& z2WQ16Ragsed2;6oEezvblh$w3PT;@w%n(&?BB z7vm^DNpTxB8~&N+vp3n0M+y!hQbMn9{}2%c(;U#ZUuhc(E5XoYkfD;J+gE<1EXYm$ zzW6O?=v*D6ma|#r4&bT)WVLKt5%3{m&Izpnf)jveji-`B2f=dX1cpU2`(M}4(BM^d z<(S%zLO*7e)dviRpfgVsVq)T*VI`KuP&`s?=!G^wwV6dH`@1s{@kOrjuf(+HM86K= zn~iOPY<2%(!0)}gp$@~hm+WJ(sW>gnW$|5MEZ(l&3r4{Xd?vTgs84@m5u&3b!rwhv zZuD)w{gZ_)PV-)59rmL@>#if?#i4Dd$NL|Olt8)2w?*~)qxSS?XO9Q{$g3pmhwd1( zxOTiHvQEsZ1pHjH-6gECq0z?glax1cHTR{24<0fa&M3t%SJiFwTRELT$*(uAm`|_}WHMC;Mli;(T^ivp6NUGKQ zPH`C$lvyDM!%D&dhfYrOO$Ns+T~sWpG2r>N{t1e8_p>u?d2rHaNo7sjU7 z8+lvTGThnz047&pEC z?tgFWcOZKZ`b~EAeltb(!OqBtZXC1p?MOzC>!gxfLh7IHt0-jrl&iBCuOVbX8pm^q ziFva4{nra{uLC|m+hJlUA_SbjSIOz=SxfP-|G@nEjyNbJ@t{rd=^O^X68Ew1;@rd4 zT(5=2)R+g~BySPvES&q=n#@k`jwkZ+KD{{AY^Yhlw=I16c5GIArnmpo12uFKuquNF zSc#0NBjpps_UV0-Ux?Q8Mk|Up&-C@*GE4gx<@vKIC3FIZBeBY+n^jP-MwaVil!+ch zFB)r9cdu*h!{!qx#1>W~5WZU>r{sHvDgAO~&9SyIgPww%;+A2+_<$1hoU$vsq4H)` zz+|9f1KLeNy5{-Zl$m0#}c;*@-K;uDL-1uS{FwOudC06*u-ZVSjJ z5hGYoAM3r~@?r?<#cDm)^mqFXAJMXqPo?f^UP_Rxd^OH%unnzd;>tVFJVKx=>izqi zKH3Yp!{z%jgz>tWd@_yV-gP*dhIGgp4s?8bH~$XJoRW2VaIZ!BE`c9 z1RCxvm$?95%|0XqPZkNyPQeYSIYKFAh0T>Z(N6s%rf@Zo9&7EMatzi6r-kABhwe{w z;(|Kac4Uh&{`L{C$@8Gi>i8zJew?63`7ako&Z*qARkU9J86au0ho*3-?ZCpx;M?^J z-Yiddl8=4&#tHoeXqT6lVZi6Vye7>fzb48&%9oxRorz;e4@oE#=dT+d5T0(x?>=w% zfxzv`GVaMX#(L?r-Hwmim(d4GsA^H#1Oe zrejE|$;}ccdq_{G%W(=YKH^Es5b23>a|T0OnW4>63!Ak zvd7D*2D@O*tVaZ3AC!w_1cOmv0y^Lo9C5@Ne*g(_nIg*29?>HT2qL)hIG!-V2thXU z4krneJnn+=Cl~2)FI#hK>5T-=$Y&X934T9`op9v3HWSg{(s;eDu9_0<5hAZ+1o$o> zu5>a*!z70z(U$}IA5f6<6Gwm>|7p%rK1P(Kxv~gghfm}w(TeZizc0GpN)1i*_CLd7 z$W*_?R9-YZzj^LDZUkuZTm9yVJkp*dy8}g|I+>&GN-$CXtCPlP`zUlRi+M;nD&YH8 zt@4brBpB`sJFnf}dA4GL$$rR%m}McGH*)N3wr;; zr&1cRXED>@{#ErRon?@-a~Z~cP>ZV*JZ^Kc)|V|FFmRa^X2b?r@q=TNfD#_>PW(qJ0+W^H+qgWm5>}h0nyEJA-Z?Bkph-`%)FlueaU(+oy$!$&rWMa z)xylcy(X4~3XA5sjD(fT)Omw%>pAv7rM;yY)oOaY2^7c)f{I%GB?mwA+tg(ai=1@# zrK!+fBV~+Q=}jk`e}n>Wm$9BjIW^=Tx zo3E~RbC@5~-MX|>FP9old073TzXKI=5fBK#p}oDmHh6301^Y;CBEx*mU;xt5vcj@3 zIOxRAn)N(>qR^67g?o#@LFu7Y8_4eS^>;SC*WlR{qe2BWfX+s46C`d zwFdKX(8<5L_3c|ZOAy4}1C*uSn{KvXG{!YD=1?ZXrY-!hvmbZHMGXh0N~0Q&FU7d8}-IWOTxF+_IA^N*s2#!aaICjSRJ z_uqFTrxM`lE7Vr6CUNZT9ov7*5Z`ZR6x^XVx;L*E{WRA9i>W5g=>ES%k@U!m?2&=Q zf{J@cHa1Parpc_qdF9@g6LeGh9-j_b*)4H$^bW;*YX#67!XQO=hrrSWu)W;;F2ddP z*Uo(canBCk_Xh=fPjrQa&~{J?i@`PFDSLP|?C7 zTXnLYZBNqH79KTlrl0n^^ARX=>;+uM9Q3M-FL%+t{mk(+=6Uzmslq^u^yELf4&h1sKl(vj5y_pZaA zl!>78<29S})8nGj(i_iOMvYhk0Mfj>KPb!h2I9SKVqgHNtQ?vMW*YfU2h?igyi}kh zkYYPQVv!$0bzkC!{|$raUBg$5bu49}6o!lA>tyRUKS2v+0P(Ie;GNjg$`VstQZx);4Tee-32i-r}kXD3ag1V<;rs`{IV8EgK zDUF#6Z}eOnG0Mq5Js8z0s;Ds9-UApgF0-wsKWFf$K2tiU5kv7O1~+{b z7RCQiWt|XKc48~p#kX0!v+lo87k*y0{p4VLQ+F*$9`cMh7^o+tzfgMg zVg(KW+|I0wO|r|=_gmA|go2RMA2CyEB(-k3TUoT27Z@JI)3wX|1gI@x>kfQsIyyo# zEf+=3fpuXkOkKtIR_fl$~Y+bI>{NeJUnH$wR=$v z#duI93yNO`sXOx@MyFRErDxSAa{~hds~=h2aTXRjL&bk;43bTHLRdnnQnvd7&B5*b zTYzS7?SQscK-qphMUVf)Wgg3UNB57VlSj>PZ#5dDY!lsN%D^nM9z zP2p|7Trbw-O5=c`IQ>KvCMLU!YG%B@>Eh`c;A?F(Cw6c>u9CvwgICMJh??)|Zuf@; zXt^($Qa*z;>5x zS+KgPd#sWE6zkzUvt}l6(98FW!~n+DuE&sb8o z^@H_yMFGss_y+rY42ZnTJ|ab+inaFTBTxrLJIhev32C$&B{pMh9tRT%Yz2W11(CEn zQw5O5d`ru~-L5Hst>jLTvL8tI&O~qhl;0x!#5mMJeY##{`-d{qAMw4|+~Urj$PYK& zWAZuSz*o8nU3K65TN;8sCN@mAuKk6N>qOzPPl6nZ;K8h+6n|d-x->X?(m3AbK^Kf% zv%^qyc4V*;arO2|r*XT*lF34tW0!)9KP_M~(kl5dS#2KW_^s0SWlHE7Y~PRhWvZ~1 z)$|6a55;DpP>>5|K(~toA1xA^HXoNTv+Xv^5VEiMDj&4AHVz9t$U!MxLBut-Ie$s} zCo~upy4ny|tg62FwgCnYsKx88<%SIO8Ih6bxB`RtbZaN-L`~(rWP?lq>sBxGxHf@dseSWHJ@>z zec%*F9#>?)6X!_`G?Eo5iSzKm#{uJ~j-nQ`O@i}NM&AfsoC)KR1J+ygH;A~=%v&#Y1QKm2JUrJ%^QEci%sK5`ux>THESOhIaFh?_BIe%ED5$ zWU82|l&AyIFP>!Q@dW$s_lvvt^4EXK&29I{ksnh#IB$BBnj#QpIsb!B5NWKabH$xA zIlIf6!&#}_bM&`S`4wlIvuV%rVT3#5rUkIsopC|{mR<4eM>UAe#(Wqr8&I}DA9@Nz z&TA(vVk;o$Gg=uBlfztJ`n5fL#XrW#ZNvu>kbAk#hlzkg7dWv(hmH!7>UVi`n_6IH&-;_gwUixz)B(@11Rq&bQ&YL;}O3k7<6(7|%e6vxsuf6!IpD8_$uvibGqNLjs_H?!u3Sl@)O?0%o9FA9ulIcva+ zc0SV*MYSBh{Ia%n%1o5-2*K?ox$<652(aR6Bf4Vg2ulw&x7f1iUJPc(Bj?N3KwXbK ztWjsX72}?i!LOxh!z+ATe-%qh3O9TiRl#)ziu_(wwRhc(p zOvSvJJOva*deJxFGDCxr_%ktke#TUy|9;MB?5p`JhL~5DI>`OZ-Vod`=G7;P*mw(; z)BLtpB_*2lrKA)KoJ7oJkt3W;W<7r9r{a6*j2y*Y1d7mqfX5$=h=9YNUphNVEJ62w zC9)*u<*^=Y&rZH18L5y?(#o7tI>qRHQ*WPZ^wcpid3Dm1d7%EmVGq8_@hXL1XSm19 zk4w$81+vNnr@?G&Y#^jHtuNR6?I6H}cGDrJT~*1M_Z=v^q*bWpBQI1C&j@EMo8jl& zpsqh2C!c|=@6cuPRPJVpJ@_NchrnWB{QS#6H!4Ti%18Ih%ShHtT^V*O3TZk26xX?i zVV@8ta)A1=@gNlL`~4u3vbD8^4Lv36$`H13JDh&G)P_faBQ)4wY=jVDn>U2FsyQ=KTTVXWF`#jkiP6ldUrQ_k$#pgO8O?-pxkOScaUTc|BZ} zDgncqh5MCjojNyQy%fE6jc)VVo-v^5lw8fQNy^G9^FLI3^VrV`au(=Fc8e&WfrP{b z+9I)h=1$Cm5C>?i!LraZV@R;HMewm*ru#4YG^N)%3wXPJD<(jPuE5;{sigZ0^FV`T z$pozLN3O*JSVsD)0+mF5RGqq= zCT9)qOYfCbcjOhHF2)MBY7b+LMP7|@U(%)Dy}eH(Ntt%wv4X!|hh*jz7Za1v3EbdD zMx=LVy-CiIjRN}X-2ReaKagqS^eA*GP&~=@SdwzU~2N%oQu;!I|Zy&@NNDQ2+O*J zG-bT(W@+{NA?dS&(GNo(fRVw{b6B_f^H90O0UNjP1Y?cwcJFUmUfjMonS|(@%E<8^ ziHnObU82c!u*4sLIhRjxdXxVtuY9g8=P(x&;K+l#*h)mx7|&Nyh3~0t{2Q<4@E`LB z7d9p6X$lNP{hJ<~C!dbU57eYU0P5#C;%D0PmY6+inAf;<@vi%V}Io%4&vWHVP zui%TLTCxUfg2NPVLZ+-Et!HW+(x`+(nC{c-t;sMMDKV(mSX|6*8k7jJRitV;wrF$Z zT@`WNJyq|xyROCTH`vnmi=~sWxLiyZF0}NHMp{f(STHzaj+Xeb<4@}u&15Q^L%^<( z6EXX`?+dNHOZ`56^~vwef^h+d99*-63|WMIQ(@Yti$$nlB)x-P;N1?f^@E`zeJx z5{$91tEzm$idj3j%|wU&{DRLU=S6Ga?HzCQ?fdn0C#P9em@D#1lp5M3m1IIs^u^~# zW<*zW{nsYH#0p;wu?x5;QZd7^OZ@R@a5?Fz+oZWqZhk)dvUJ*Epc13|+l_r)t;Z&`768Gb=(mWXO_63zePp<7XS=u; zOy%ZQ*w%HIErJRV(2or~@mfpf5j5z3@9K2o_&1{)dERsejmU3D2=Y#zD9t<|Km_KY zWjbcWXUgN@jwGwKe)3@P2MbJDJ`f8&#B4RFX7m13-3whg`Lj_Qyk_>ad(XlmS%Rko zpSZ4F1IX!zZ?2qXotDAC;x3(G7P`lcEX$ZczfR&053Wys10qvEj> z?Iin_n&{1mvd=3eb|UhpN075^S1OsT=Gt1Ckn{Zpy0i@| zIe9`9@U@ig$~g#w;g=}j@kHi-Pgy{CSR11Ke{%bOHXNH(X`ci*GP*(YdH{=P4)svX znb^80K_C~6RH7;lI1bcd%-g0mj`rJMosRj9cdipy-5r-!Maiqgzjkcc{l&m< z#DzkX*1XPFa));(+k=U6kTap*>I_*Hq!s>)pQSPj7Y<~>v-2Zc`ZJV|vx3IHMJjb* znYfHLF?hg;%u%NtGyW6kmE`V+*cb`~>xc=#+*ND694cNIjS;GjVyq^OP;w(ZEPl%r z>Nzq@5fKsAS(1gxb)Me3&@LGv&sFvDU@|240b;ylcxZ@xgqg_~_U4xGb9wEGWVnCG z$!Ly#&V{@}6Q~6kG4Iz-VjgOVI~97Ji6eig14Un%+}_R(eGWD!1v{&0p2^RBuAQV(RZ}w{ zJ_jlTF-wVZplpW>pn$jmQFn%RSoZnlZyKb?h*z26<=-T(a8Qx3I z7ew58rh=nE+{&qo(pbdsO-#0y2`j7s0^e2Q@t_R0lv3wS3p(mCaqCd^ORd12cEaMB zfYO+iX*t$C5sEw|PR{(H1)y6)7^wT)V3wMCNwhl?U_x>&_Fz|b{Y&^0=*#Gn3&4)m zOXW1AqS{r})6;8nA{y=oiUD#9q)g$%Lv@vOW#Y&D$~q{yg4Oyu?Xkc8iR4@oJx=C4 zcad}xPVMDs-@cg6Is~D&lfA&c7+2?)O%^|?1MzD>)Gt?_zt;JhT35|Q~ zJl~h(zA;nl_#1mq;ra?%cSy^L4!)puUjTCI@xcALg17%Qz><8JnbMYgFix0PS4+Pz zCi374?jOc{*A&;Y);+r?R#3v$1{4EsS4ak$4Afq%3L=7-C-YHHsT?L15#WlQ>9i2} zg?<0W^_f0?3uh9wv)bdhgTyM`n3~q`n>rOC>LCm3KBDsWF%XNHE)HC{WqQvUw5*1;RE1TkW~OZ*qi8Kf9K2Q`qF!n zwfi8z$*Gf4RPib=F8*;a1iS_@$!f4upphw`qDMe6LMdGzYfHokbo$)I{HlMxK9t)4 zlsR`gskDmjz17TOwRDg%TR!8EJ}vv*|GPtuLpmI)%iZcV3b(pCxa9#KNBA?Mbq_{H~YXLrZ3Cwu0L)iKBP`dDhEW}rgQ!AqGQbqWw9!w;x!)(;rAtnoWKr-1Mz1E54eRaF%ptWPIs z;q)v+TYhh+qx_bMeMR--`zEs=E2h;a>O})EFI<Hl8mqljxp*$ z{`n@apEO2h6dm*qO7fa{I%Z~!`qUN(@~ESPYa+vYKgnc^dQNuZs6*cOTo@ko^ZmkM zD%(`=Y%anGO`7W`{%6OL<0+(U7}>ES$u!d_T%8=dfa?CK?s_=TjU>hMMWYv!I5z9x zT-Z+=TDYG5!sD)NOp0&ff4u;rBy?zLZ3D9U=YLW-j2k@)oGpWjc#cfo-DG$tz7RNC zGoLjEB6n?~rFhLMwbuVRa!~fdOhdUZTbhAE+~vSjEc-j)mm8qBMPBsXqNbry*pA9) z$~~GX@C>uP*~vEYZU?Z~tLLiwYF=lKI@UN~yDN!TCALb)YUmawox7oRv?s_bY)CyS z&}J~`)%qm02~cEf*IAjxqKe0m!ABM7)&S=HiZIlO{t)K7KWeY6$W>o~g?2NvJf+oJ zdft=RU7v0fvCq%Xe+2M)Foj!l@`)Qc*)dSf4F~?Ap1BO{(^owL*$l9~N3Hca(m6waq*o|xFy{ELd%%t_Uk@n{dZrtTjcBeriF#E= zJ{fbSTvsPbgC5cn@SWHO-om_1%Ds``(ioA}%>}iK4wIap86Z|UZiR)B?3`>?EH$oW z>pihJ9Ziw@g3*b!r?3`CB{x)_2Q>nsLF;HaRZIWDNox?cIQXD2(<5fDv^zH4?DSA% z_l~UHj}GGRZGsA@=W;JzaFWU#S+(r-@cAxA(Cq+i93?dXNLdyeYBt#A9kvqH_LS|a;(gnmw9C~xIWRTe5Ya`GdiT=nZ zTR>A(BlH3OEhU#-O!$vU3K9IW5hSPG`+*AWvg9+)IlDRA(&ak8{TC081s1CJj@bx& z?cipdVI;)FV-Ho|eT*}I_Kg0r(Z*#jK#0ILy85?7YN?V$`kr`|^Q;)UzS46|b8Kv^ z2=XB&hL}b_8A90jXCd57@K59Zsrm09iG{cL;kM&F0a`M#9Liid-INWfm|U|-wMtE> zV>TuSA0dSjAADIAn7wnt3CE9{>qRZ4EE&H+Da--2oy4Q@qm_QP?Z0m)wxdfhGHc=z zpd6yGADMyb0^4*uN6=ubGg>JU^aT3nz%m+FZOr1_*bZ#SgWe0gW9|CZEKqqcxG`FY zCYhIi0balhVQkj#x2$vexZ)AXfBWzfn>8Mn#RJ-*5W>byOxm34e1* zVJ{VxI+x3`xny3YV<@KLBK@5n%}#hKdg3#syDN2VVI>#-tLI0X6_8)0&#K4X=Gz8F zgxiK?`NE?!r2Kw-Vow`vw>^_gC2>BsNZX}qo&{VFCUZz!ZXlsr+6Q&R-nXS@zp2)e zqcap@X%{a|6#)D$d%z^>ZW4(x1|19|P?x3p?zT60OT2t3orI=!PC63Th*FZHQ+Ke-cet2?pm zjfusP9ttQl@YHDe&zsHH(!d0>BH8 zD-UgwCZ=!g>PWIDD$+8H)o-ml0S*S!Hmm!rbNyn|X1S+RQ1Ws+Axme@`2kmV9eGbx z_s^r=PE`Fua0#c|r5C5 zzf_G)eo3XZ8>59wQU6ENS#U-9yRcCIu48Le(CqO;r?{7NHnb?<_fE z39_z!+j<8Ss}`1=l9eCkx_vhxq@x`0k`J5{a?s;G?06FU4Xf4+fe2b&g1{*7v?0Ga zY&o5{<-(s6U`BKQau4tS*?gn4{&K(}59HFbM)kS}%WBhQ3PQ%_{uG77m5IH{D!N*2 zk`+|lD!s;*+vBKN2I?|C$0m5nSzRovRkgTDdmfzl;jtJ~TQKM;L;l2#nfD@~zVHQL zR__AoEV)@)_T9npNasf`G{i~7txQe-Z4sGm>RM%FF{ppBKaHXuoTP@;&sI$>I_O-M zjjr~SzQ;bldNfoh?CnuIB0hHso3ai5WgHU>>}ofYUUEe19*;Q@Tq?yCn`w?8x`SHr zjU(~!?G73@LE>zp|N64+vE16=j^n3{s%pHDYun9a+;Ls@kY~` zlXmk6oWv@uj||RuRw0UI@+2Y9>2#6{>8Gjs4Ny!b%#Pm+EaZ(ngwcPUtblSK^f3*Q zaTk{#+1eBmE_&xI63b3}ukFPgnfvp@_yK-D@$Y&6{hwKPGQCgKA79P8E_s<-TVpjF zctPlLWVyaD+(7zh6u6rAc%5SzrB1)b^y|JVT}KVLQa4S~c&l8M=80!d!xBI%Ku_40 zj4i4B--r2{x_3O^<>F;~;LTzahdef6eG$fSd;Oc2VE3p*y0<7yFAyq(QqH2{=fvrA zmFxBlr=j&jA5zRRilN_hzxEM|JzV9c^2FA7g$;(?E3~Y3(}cyy*9Co|G%eEyBWVK} zV{r-t@|TU&Wp1DAi150&bt$G#nQp7-iLG?gRdL}oHYPM*;Me~p)^UsV3@HH#?M-G) z>@8#ScyJ#E+D1ReiVywv9+=o#iBJ`?Fj1|t1Dp};x9uNa7&LkC$o^_{eA z%?Fj$ygGkkL`?aYMeX(F1{^wnXZ5t9tuC{S9wmximw^m?5E2G?R>Gf{Bpjg;^og&O zk5?lm*+1G(e4^XFJon)6N9>dt7Mr-ihN$K zRenb(Q`^Jg<_Cl3l;ldJ6I^$=E6rlpHN4O8L=Qo5^HyF6&-gIcj6|YszWiTQ~QI zK9xI--8!(MJe%kjf&{Mf_A)Q_-rLEl?_+YeeB5tKyZnf;%Y+Fj=>EV`+@JBhKg^e? zg*cWw1ro<7Pe57aKks$9KqlthQq9^!$s5Z`uE)FG&f+cmb<`48-f)Q6-gK$$|8a7{ z7X*qx9^U3^#|W4d%*acc6U|OJ-b=iV+gfB=6O#@x`40JR!{o;SCm~@)NjcV4(D3_v z@ZZzqJ5kYPiH$LKAyAI-R~?1!J#&bUunODJa{uxK`O;PrR*&-|`{aN%Be3Z5N2@z- ziS|W)DKAVZj!Le%nVto^pD+XnBZk1_^8wYK@>$B{Zi#o;v`h-jca3Myt7U&kO}C zna1E0F(2<2d$B)dM@1|$NpbaU2*_9yTmdsynv@$BY!WHkE6eLcE&Z%AYC8PZXmrNm z)h8_jQ}EbP+tKvf3w%QbUkbRU4pTV!2ePPb$C}RIca3sKMm!?qQ86>Z@Q;3 zsd;_7agVW(;<5R}vSih!D`R-yt{r_Ka~kIC%(_!qLv*0u=J&htXMR5By#|hXW&O9? z)1^|7J`s-#H9nWPFWge6H)q>~M@1$MzKMEW@2rvIUT+ggvtIotOOaH!>zmQ?RWWVB zv1VT+BB7F6A`Es_?*9HAc94jid^ZK7O--Jx885AtiEZ1xOLw#hSZCzy@u{)m%1x)F zU*pF*vILPo9!L8y{C=rLN1nGD5!KgeZDJt33p|I-h9bIEq&^xSPnG{Vq0ipgi?NVL zmGw*p*PaU_5)FZLB8|3{FuO9~9#1~E-4Qqq)mE6X&=ehOj0${$&iqf5;=A2QRm9Qj9Rwrm^rqh_*ia{0<9r%tFh}uj3lW>7CMeWQF{~3hrl;YM~ zECv^7pOc%e*Kc|xGw>3O#$PxLbh8$w`GbK}YA@785*n_{XxUNw5udxsz#bz~PSUSz0M| z06illlE_o3yY2TY`h;J%q&2)WFr#ZM5o@H{~t@-ADU-<0?K zDJUpdH%+XFA&SL>kY_U@eAx2&I+eN+a&W&%`+0JX>l?pSrry%0p->Syw!D8~HMNS; z-Z$IKPpPe_$Mi4r4ft)h3x_HPxGO*jlnXCv^)!>~&WN=tl8h-%@*=40ceblWX*3~_ z6MBuOpmw%Me}Kv`^vMQKsTA9w)fofsWgy}SZEYp;9DZ<1!fW%Y#G&GhR)DrbWeA8Z za(SJ1CSor2a|ZmvTjhkC)QVTxh>PdBjgJ?E0_ybK2yh*wHPOmpJb>MYx z*h#5=&M~|Bm-^9wSeW0GHN&+)D)fN$1U*cN8qDy0x$*T2E;@r!*$kH4$eRRI+j|bP zrpfu%-TKj?a*gnK|KUJr90<+Di5!(H!$ikhL#gxsxsQ@aS8s2KPfeT{=&VQ zw|S|AU7%iGUY$|Ow0SwzXW#W&Vf<5-29->_d8NK4>_EiEUF(fhjc0)jq5|j_EXlNd ze0+H}a*IEV52el=>6-dviNhj@O42-L%Cv5*&;_$AHZL9iqs+=%^c?{N&*3bX4 zT@M#ukQ2Pjv#BEz#y-D!J#dVF={FS1jJEsM;@Z;n-s>FJfty|wqR~xxwUJHu;9FI> zh)Sr8TmOvAULh6}6-DX#>r8+OhhFn0GiG_%bj2fMr$mN;a$?wd=|EX~L5Y_DwPz z&Ry6reXf8}(BBX#d8q?bQ1?L@|+75ysLlT zZKDE2!o(Q1Xtk1ztKu&y>0-sT*W~Ui2tT~>TiaFQHt+VTd^1<|W2P)}9+lwSywVr9 z{Eh4NfNPt%C>B)YtaEXLd-FW`P>?)@j4TT|1DZ=HvM@leUly1VD%-dPo{JCy_v*5< zsD&1HJ~lQsH8D?iK%8>0S1=Q-_9bLIj#+6jaqLYH#B8|&aK33EsE9dIt@|FRh1uHY zpK8C4(~}0Nd{U}L#FGF*|KeKpp$5oHyi?Wre(n-5H(DON?&X@;h8-n9%>K(fk0ak0h>BZ_!Q$kMqhosR zb1>iN2DY{r%@=r;UPjr@GFfgiOycWzgpTm5Rm{mlgQge9&0nDy7R|6YI}SLSTVnioaE5tA^C){O9#*7e2e?^^)3!iaY?v zOEKElEQ^(MNuiB4U6Ey#eG71gM0D0!GF~mnv@8Kp?4eT13(~(CCMP}B?dgj8a#LAn z%*+e~p|x^3mt&CVs}Vv9`%EH~l)(ZKb0-h`HPEOby}9Sw+_W#EvXRfo_>x?I_TA^z&^{B)J%RY20W* zcC@*j5^XBv@bctUVyh98b&sQ1L%Z7v6FwMv+$|=x7RExkw#gNTn338;@#)owq<4Zs z4Kyvi&0F_R^k_4&PxR3(v^lo5en3GPhdTA~P>RiqF3kGdmzt-nh3dxvgQ# z%ngLL@yr)VmBq;Zd6LXR(LHb-VmVloRqW5eXQsh<_)@&J?y7V_>8Xa9>i8j`d^)>ODT!=-y9GEfa#5 z7>op*_!?n%!UIyIdPMj@-b-ViokAwn&0^XEkfyhKat}t>oC5b1R5~Y+_n4Y9Pe01U zD6|OISp}MN!z;*LufWxa6hX?Z;ls`=hmQ??7A^&4%BVSfU!@luFryueL$x!5-+v`* zU#BLWvov0)q*ZaMJnqHs$TUe$TSQ5s+^_h zcJU`(1HBHNI^%E|w(oy`s?1ud&%=O^5!l< zQejjaV>bwL&;!|Ux)rX{mhzDN$mGQgQo3C@!dR#Oeye`Aecb^eKFN}B zGdONU2_JmSSsajGcF<$!z-G>Sg)ZHKOltRsf7cW>T`aL9h z-wXrBdI-|%67!WH!E(*Lek56cd=447s0q%m5izHrG8^h`|~=k#OI%u*$PMj_YEepd);L@jGD6u1?<@#J~})S zc^8+{Zr5qZQnrYva5XJpRU2mTbuRk6hexZ`zg8KEPmmmQVziQlgv1%|CD10GzW}Nh z+YQsK5jpu({Kf~`B;Swl`aHJQ_)1B z{g;(!kg7sxbx$(VClg}>jHNItrm_C>AR=j|;{d0-7;Oq(R?>|G@hZVVQONq=$wGM4AjgZCIQWKxk zCa%#d`!bWr21;@54Gs>fF&K=wcS?zVWC)KjSN3oh8${j)V<_o(Du#n!okI_Xx}6dP zb+vE;@-rt+ds~0z<2TfwoIfLAExO}XwwjnQ zvO(k-`5Tr9`n)y=;4BUOfFa+zN%MFm zmDU0b8g7%j~7)^_jr zmv~!3P$dX}DD_c7o0Vci9)dX)^ee!eX%ydNOa;UZGFw>15}a!@Nc~|`keV79nKQ0R zp9zNnurMi|v$s#yPbm&vZjwkT4@#e#D8$vNA3h}q6^sisMnHSzPR?MlA-4DIYxcbP|D3%^?s_0U?$8Q~giVBL1*1$=_(FZHgfHZx8*2fcv zO)2P*;WuSNetN4I9)PFn<4<1#1Oh)+(p-Yr7|fz4!8^1Ykc}^PORo=_57~TFXiYqL zNA2eA`-<;lB?XNo@O+vI-^aRbnKo0n+Dhr|!e2?{7Kudc#F~h=Oc6wWH3_dG z9~*q16BGww)9{t0t>cvw!7gxZ{uG_o{Q-Z>oPLRrSCFeyG}hyzLfyJe{Q^nVc|@f? z-0brLT(g38{YJ(gX$%=cAUo;3UEN-1TD1a(sLGq~UwfQ`^6rNkGvC-7&-4)ecH6I= zwgW`9jL8DJJagDQv&$h5c5^DF&GpW2U<`v|si?}uufP;tH`DgHmYWbI58sx;kRwg5 zY5d6TzyRS^wme3YW6&Er%9{-FtWt=4hJQU)F8*J=DalC~bB5ioC3*m?iS9A5ONiDm zv?@dBfTY}Z^CO|t6{u0_KYys8yRLKwm3#}!=ckj+ysKDfo=k?HU10b0Q7Gmu5zZ#3 zn7R+^D5s`}{CjPq_slH!yu(mHrP2Vf7z01rMCb;5p|+-yyd<5A0cdwm3aqik3h(Ik zRt}dXy?RE^ zmgR)Ta7H9AD=A4pAlusLHFW8*nBw|x0W`^{FQhEYwr=8nZ#KP2O4$_IeA{QMMjFs3 zL9pTH!%aVB`Rx2q)(8i&A#UxbmW2^_M85Es6H?s9rEp< zQC!DBc82lora|D;6^Qsyc@McVJ!QGwoPzwqhW#1Hi#>lB0tey5z)zg;KKkP8}mps0vSN!bV zXePzw_Av$jh-*Wta8$Hcat=d2M+r(#)XU$Y**K(7_Z8v|BN*&Dq>v07EqA@2clq2^ zU-|zmKwkW6vnsapxqjvA{27sN#Ue4=r)xBiZ&YrBz80vU__iLgYlNT6*dqosZtSry zsJAy>S2Q1BnA+0qvCpowW2X7U+N+j?#8O(9X^P?zNorUoh*L_FS%ye_+R#*&w+Ztg zv)Qn+F(W;fsINQiVv4*@R~B^0#Js7>?Y#q584HYD){G-x9d+z{c~F-F?^2&NRrsJG zcZ+z3bXx*1qECV3-214ND|woBrwaEV>V_fb0=c3%P}4D4O%Vf5NtKdNC8p7xk$bMl zAJC7L0`tOaO#t^+5miq8Jqf5(T}iz zElmRx3^*Z>1}+qbDkFM&T$zVcC9Q2V-Sk@5tWv#S2n`gzr0asF0Y!$7=$HcONI?Qg z%zWeqySE?+z{$W%K`1EUDfPD?#>GSF0zNU-74OTdplZ0~Gsftn-)IyGe4R9*=(t!F zAOKsQL#0O~_(YF|Ee@yhM~JDDh|nE0%a~fpo4%_S5k8^zYl3mZm`{AJM}!LFY9qFk z!|-*lO^Vl(Ot7K;20=Yd^#w%Q z&d#D~9Cmj9QHdd86I*FwPgIblOKY#knLhK}HyNYSsOuo{Qzv5ARwA~v*$gp?yw=&u zYSm)&bDw|x*P1?UJp`AA%d&?SenciR8fzScfJT;0=Z_Hm>>JjKF{1O_m^1bGb-7KS zA%gag1*h@%tDVU7u|-nc#MZ8OoMFIqlX^L_%)h*X#6{gQ$Z41MvPgyF%^&^y1O*CD z*3NIk!!yG| z92DclG?*WiR0f6y_jf--RzXeX(1JjwK&=lR>ptF$xiD%~#4X-IKMvU|^O#V5;5@02 z4Q4Os#TZh_K5`M?#at0zm_YAZy9H3*VIU*20rfQvug7jc#>ef9&-sNcE1imx6xz9Q z!SCcG+$oomRk4{SSev5K=P;Ql7SiX!TjPTMw8g?d0I@0`q8 zQTFJ+c7o6k=r~-7`T-wN^5qH#S@dgc}CZ>Lr zMIduSch_h;Y+pur_?b)U@bO=A7XK;1JC76MWct;y%EhcpR^cx7a;O7@o4KK73mf$@ zHEz(wW5!+^FzmRbp%_S$H5eMuohaktW`~nV8~I_T8Q-od@D%y&bD)@rWy!SaP%@I8 zZxCL6BP|dhyX)vdgNj}ZaMEX|{pX&q7?I?C{UU$&vwgV5%i+1(aOqtUXlu(-(x2B8 zpUId!mnR8o8ZP6LqL<#jiGBPh(8fCS(0Qbk`jBn?9FACYA8b^rI;0l-{7}?)t#!Ae zSz{x%XOkEqDNFBQ#rn@G+{zJP@yJ{U*>q$Q(dM&rNwSPOe~A&tPCio`qgm`HzGmr+ zz6Crh{@e+Ez6=Ha*oaZJpM)<@P%9Frq`X`#G^s=Mc29MpMYj1;ehvCMAX$zONyMA% z5g?9egp8gacvw~$_7vZ);^+@xTX}Sl<)VKl`>a@e4^l`7yp(S z853>i5%Q7nGa6}mOS-IgLqLq8`=b;3h01wC84~1ku{XP#))m7-6IiJflLpj&_4Jp1 z4$rZgP|-yr6sMWK?t1E?@(J-X9k7FjHQdlSr7e+e)lfsClCf24=H@3cil5Xd?XA0m zT!AMD9cR-=zpdW7R?bb8ZAdoF(tI^Yu}3Q(C|YCgwNAVcEpH{BY}LMUDS1zG#=b-tz^BB%Mqhs}4CE%KQr_V~}bvb1;Wqqpp;?qA!}1)lXv z1l4_aG~Age&IrJ?Z9ON^a~hS>8qN+}iT1gp7Xfqs0cO;vr@v-(b#Ql8I7Mz;)8_r` z5SKaTd;xAjZMukdrf}uI9ryO)Yb;f2o~&@8GumVJgX89CmkQo)}`XI5{**X=;Qdf8SUBb76p)%FOGt*;go0B1TV(yQqS5#5G)NXU1 zWp7`CQAv1KkH$;V1RmCaqX;&hjv#>!0CBLPR4EDXzlyEEc}&+v2^f;_=Mt+GAKBpG znLPdc;@1TAQK+OqG%dQ&ZbA5f>b$n;L%)wh;jWQSdqR)CB^o&mw`j{T$PlhN=1{If|$Mrr! zY}&8;`?iof*cjTHN#xnK&NnL|I5ZNRkyu*l%Jb`#e&>9eKirDrfV_Yp5dY-~CQ2B{ zY2K8$KOQ!2Qm>r)&sm*~h2WFOUsy(F^?2nEar-|n=$E+saQPWJFu_zCGfALsDL*gJ zy&4-eRVqI38toxmM^A+l`-{c5$zx(ms+iD6=|2I2C*;>4Y$1B2f5 ztTdIyVUcIT3u~hTRD3K1svr_3SzIXlo7!{aLDZ2xvU2Ma!HUu>Mm!XBJUJOLZw}Pf zt12Am@JqXAh^@5HR;0{H&iZ*3v^zn^&6{|b`bgy>w}XD@t!J6_kU_~R^S$U2Q4p9U z4h2JR_I#XS>*O=wWPQ=(dPto@AK|$3D}`?xm;{mSQ&*os7l;!W21bC+Ad{@?fS&F! z$eOeJo}aL$fkw#5T%86tF)O$1i;YK01!BbQi0nHAb$F35Ell=ZE1Tha8Ft23wg__* z6YD7$Vs^{oLSSMVto!*U1!&1c;uxZ7llx%S7OvRdy>7?F&cVk!1^atbXbG*bAt3nB z)MgZTWF3HLJa%N%g^|G=?tIQ@iZ}BwNXtlc%C(8|6S9G(lFRi0JMvQaw2qRo&#ooA zMYz#7NP9c);Glnn2q$xcw4Wen_fn#!z8t_3%I3V#g;Kv_wQD1AdIkjch;#)?tG?GW z(oij7*KPBdZfy3azsFWKsek2$hV-qLRvn&@^u@bf$mm4FB4#WK-=#1P*^Q`uMPTb5 z2hoI-mhgE1{`H46#@f15LED(}{kuOpvSJJQ<&6a&RwgGm3zp3y&hJ>SPUO6N&NnHD z-nCjD`2FHhw8+Z125@O^>*Zo|Ni@TsJ|0ggn~Me4CB4KyljY^*eq7x0T9AAdS&ej> zCd+22U{+Lq#)I1lj0K21lPAIwtGT#wguX*BOO^KKyCP)B4r$=clId`_1iaxa*q+O73M?qG;K3Qg^>+T ziOwlD!eN45+#iO_$aLgR?x*{WEdE6(UN>iDnAcZZF z#9?(2;dr}SG6@tYb&3#B^P@~&X7x5SPa>!+wB$4v+1yUm!x>+k50PKLi@XJEHd#Yy znL*6+t*Ks|zgr2_hbZ#cb}ygD-p6O&1!j0%%vK+QSg(B_Rr0xh5<9XOMvgvjnJ)oy zN>XNXo{H*`qm&mN>s3^aKG%5mqCVj&93~%17K~1VHTHN>-$uMi@7AIwZ0`CR2+b=s zhlLUE%Rg3p9kgU*`)?FWHQ?2|`Dw*3iu_UsyT&hfG&c|CBIeduRSO#6Pm+JcsCSJC zCJf3?v(@Omou91Xv`$RF+F)lFzDWPv0l;(^d@Y_?znU=I}Y(S2-MO?}^ zHOzXA)%8g+T}VCcb7jM7c}p z4a-$v9(j#2kq!U$@t*l@GS0=Gpx_5?iH@}mVk4b_NV(o-qU9OQagnKK{`)ra3D@3$uT2zjCwH{14iDD5TTH*7{;Fz- zt2s1d*X6^TmB6whZ$@1i%}V|EAVXBLW&Re0|0yRvtzT_-a1R((z0E!=@Ul-@vl4-w zWr5<7tYw?+tu4IROAzoLPt??tda8XX%IseDTN=FuysZ{i+SAa4oPVKig#qWS666#T zZ}3}dXmI9a>^a5~MHX|_2T8*FR8?g^o-SU|_5gzco{639s4tRIMtHv_ zes`A)$(~?aHl=Gh@Vg(5_L5EWejT!H-&bk%GhDQkj3_-_sXHP*uMKK9%aUEHUKrrW#a z)ADV|U$&REzrd!r4fxR3H%qxrSUGJJf@@#-pazdcfJV`AgG^$0^O2u+i2NaEdX>7e zRac=Ab?AGuQ@o-z)|5d~SC+qjYrQIj&2{pGi3ow%GF0a++E^GNbNHDJ>oeLtcJRhBeRn@83^z>+ z*nN3hXzsUUoIqe{6cA)V*X$^jq@3Dn{2o3aC_WA>Op<|At1B|(0gxk;dTqdo=S0p_ z(FG2kpjvP;hrCi#TBq(ceAKV?y&!)ReB(j38*B8o`g}@f^DRxH+5}jYGNVLqw!#~` z06O|Fr;_W}R+_J)C|97}EkG~zM|kn+So3=Ko`g@HB}c34fQg?fxd_hA&X9Kq0$4eQ zF{r!mOYm|M8cNKP9>meFYDY?9_iCY@3E}U* z@z6QQZ1`hx;UR7*({J_Pw>_L!_V0gwiFAM41oBfboOM1dRf1hhHV3hWD(1<)Rrko9 zoNhw)f1_da^|`R$Z7e-<3#`N~^Wt}a!>+9X&uf$P@DM@^8m;G3u?*dyLU~t^VX$4W zx#-n$fJJvGe$LzH!`ZI3@L21-vBwo$pR6wnox~12dQhJ$5WO|XvxdA|0fxYU!>KmiHn_MIWi);O|y%|~dzuz%KC*li>1P(140}y-imd&I#63>Mvch??;#)>M- z|LKEvQNO;jRP>up98)$B({;3t9N^JIho0MW_3ne-DWZTyys`cx$wT*qT!}kPf`)%< zFDPnVe7^i^>G5}bDSzs(|E19#R68N+(RYh<@nq9ME)z9F5d4N#j3OsD+66rp9AF*` zyz>8rTj3KD3rz7_O@+$7{xFmHoRr^=NVcq@P3l-!uIoc?50;J)_!@cbkZX zfAH8Mw5LXc^BjuEn7(OeO$Pqr^i%B4$o z#YNg3^;`mTj#GXU;h@_^Ja zj6_Z+@%)fJ*hum-ws%hp=ikz=$_xHBrYbn_`wRNBGfkTDUXzf++T#0JcI*!mJeBWo z@8tcrsC0KSJq`q8o1&Va9t*)-Dk65LSvMa$qm7=u+M%M^72%weL3oV0*B9tQDcDQT z4T#TEYh{OtJa?V%UqiQdRwkZTvWMPfjh{TFN1hunbxlyCbmmDyNTGI%j~hdSEA>w8V&9AJrC9k;u%FdFxLi(MGN4IG^Shyj5l`SXoJ@uxg<#hzH7-?xE-l(KJ z=2{`4$aBBwhL%CiENStslE`_&7Sym$DUV_w9lU!TU?3*I&bF5hn$VkgmfuxsTYt_@ z7@MmY*&dJ$N!X`K-};qjanM`Inx7V`U}3YVrCC{1_mg|NxI*-Hc8|!DJ=L~VsAaJg z`J-moV#rFu9oW+Nj!bzK%L5R$!L+R7yv5E~9ejr!)R*=K~fHp7DR6^+J3_Jeoy7@#rpm&SJb^6r54A zpg71V4|xvcc7g;D(h_Z(wwdtr?G)c}NKYj5h%91O!nV)T%g<(eCmqIL=pA9=pF>M% z>l>G?aEzDukPU{u`)9Utc+I`v`PmG6BXww%o^>mS=u{qBt?;G`N|jJY*M*65$Ku#{ zfARMnzo-Yu$3Vukxe0uQeF!In5}v;G@aH6>w`4kAh$q^8l^ZNjB_l+`%HIa&*{yDEiVI+#yT@s$P`9P+7r}`-4zJul4Vl8oWN2VeQ2^(7FszR%-US1bx5FBvsfWty%Si3 zwCu;6e!1w@JqwOfrJ=Rkw)lDZQx+MSsZrmn09zb{JyT2d${g@_|3br<;xNYe~0)7vFZxV z50PG)L^oH9NP5iCvTLBAj`9lG@FT4-TDlZjfi@Z6e_!op#6a3ANX~j>`Hq57z}=*t z%Dx*vM~z!HX7X}Ofjh{Ui2`Nj(dY4Y$&Z_ST0I|6`jSp}6Prrt)zJp8BC>DB5^be# z65i;Qwj1f|-O=csnr}s;<;dD9>5xRDeDBq^S!Ks0|4rg?5~f6kdk6dK8M0J zmvm}+OEnBJ+yK`z2DrsN9W@jY5w5+H+76u&>wFzjwjDYAiPZf8S`c2ZyLRM7OxRp4 zYP9{)tK6KEl7U1``ZU*N4Fy&rSst>_#*Jf>MLjH|iiOU*yi^qZnCt^OFF>Hpi{a0v z#B-W&MSdjy5)mGEuZ@oIMILf-<}T9#Q|f2S=ue+t*U=?NK%RIMf))<2$gmHxSgME| zJzV{s2l=%pQIa#Zi}Y8pKk~<>yRc!*hp8zIWhH7;yBu+B8rToV8&y+@1Zt5eT+qZF z+o}7f307~hoE8xerz9)7ZE-JGn0wneHEC6sdL^lPRBj=sC4b>?IyqU&dRH<-qqP)4 zpv|#)<P)+x{K?O8s2sW^wxv#(*&>+`ym?%~FQ8jY&ee!@+7(y<8=LZ$F)&ae^PF2&cS2duw+ zwHnX?X}c;8yp30E4C--(pHkg` zC?b2EkByBD1pDbnTR!ueXB0U_l`K$_;Szk*6{#+|5|q(WlZH6^4qaPY2+MlDOYkD6 zKkc;ZPhG!4puqi|4r)@Q;y5i@u9B#BvBtZ4;^2V*SWwEagt4Up&Q!G+A)t?;ApDaM=b=I&6^D2^dA|_hktR6GQGF|Gt5pnGbJ| z@;`UJlmLC!1&p^p8S|DmE7+~d7 zJ39Fd1jU-CVDi?}?lHrJM)Qp`hhKLBeXJh~NjZQD#eDkuH1qLo%|@3Yu3q5sgua~J z`y)_{B&254cKz@B_)~Iw=A<{0l60n8&G7D5xqJ$n4AO3XLnW&-e^E9nw$&c7A17;i zLqS-bn|_(jW_lBiDK+RunFE2Io}l++C|r6}c}NDJur|jgli6De7M7zgDqWH>ZT^rN zrIGr#CY;wKf2gr=*{pE9blhWlvbk`%IqXJK$^(AeVk+r!Aj|7QVUSJ@Gk2AiI= zR9MVpx-*6*$?ps0W%S7;XXqFB3`*&mS@lH2Qx~%wU2rWa<$i^nxi>zCg508V^ppM;y4G|)95&^uuFO3PZ0UEl(P3z!5R=dk*;IO; zveF5(kEk4fz09r%DfZ?$I_;&jUEf26!PkCy3oqnQ)3g}X@L8b z0VBnwv`tL8d^k1Z467;Yhw~551jDuZvPvm;5U}JkKuW??i!?`O ztLkfn)xBU{ zZ*-9i@vVeAPnN5Q5MLTFhaqt|G#L)Q5`l=gZ+Q~<->2|0PKdNOK5yGcdu>mYSxlfv zRaw$&h*`|`3&%?!`{t8wUack{Sk_cN|I(X(hlfQY+VT(X1~!K92CvKJCF0TN4Y{4m z!2=rr5hfw^E}Dc=z}ZE|x4o1ynN=!97h!j9_$eo=}vziJKsn;tGjMA63~`eNaBe(f>LR;Cx$*!z4Q5C5&7-6O z4?u#J*w1Vq0XYDX7af#~rWV;swW>hhzk3bb4P_dd3vp!s_fG+y?(64%geE>4r?}#% z>@6W~xP}zJcuYHAAZYk7T9$BYz2lJhdpR4psI9Z`C;3Xt`h&|zJM5B%@Y|_43-Z1( zC2~woFeO)0EVb*R`{o95kX{%79lvJ)wE-XfM1JWhLYjx|}QB@sD7(bJ~nqrkAAF z7cyFZ%wfOTpJvIkEl-1rFB=L%8J`Fyfws%w_tTKVu*v*9$ry7*GA&w)bN3|trcM|0gWfi2izPIO4*G9`F4f# z9$X7kgU;TQ^5Q`t^rCbA!Vi@iZGXX+XTi($ayPZe&zc(Aj^9|G(1%|g3OYyy?IU>2 z(w&o7d@v}H(2v|hvbad`PL4EKyA6=F60uaJ!EY_xV}0)xzViFCm@cW)`kTnSrSV^A z*6)uQ_$;*Nnid_sza<(H)fc>k;*EW__W<&}lp4ae<(t{noc~6$B=|X#KFUOEaE6Z1 zOA$sZn;k;)rCO&yL#aT6d?AU~K51Q9t$I1-(94J4%+={CM+?x_aR;!WeyG+bK0kT5 z>V5e4(k^1o)igU=O_gl%eYz0;bDb`p4nh;`T8wU#w={W&h~1)tigx;evj?E5N=k+b zdP(w!lBma(LcL{^lsROI+}e z88$Yy6F6!yxcyiF`u$%BQv1=alu3D1Kl@hmsoN1yG_0@W_ANLdL4--15ZqiSSl>Xz`>^0S%6bRw`QQ&bNA%f@P7nbkxT5h5Y{d>%)tb%H3hDO3Lw7=j17uyfXE8L?oQ@ z{pzRy!Doh{&KuXq3n<^tFLpRHH^ns(`w&SjJaxZmBW}-sSx{(#JoR~%)aBu{E%ftt z3Z_)Gw~KhHetfO<;iJDw#TS$wUu}RLA!^+$d}+S+E^kHjz<%+*vP_Er%y}7&?@P^% zPPHkf9kX_MN2Xge=2j9RZ}4kZ>=_!mgHIG z^RKh1AH0%0K{@&G{}FZF(NzEczhx(ctn94HCS_dV8kyN4ZbDqMvU0Cg_9mOGYh1Fj z=e<-$g{*5`T(Vtz+>49)z59MYpYQJ+=lGY`dA*;n=kxJ=jF@f**U=vW4i)Rm)}rQK z^6L1lYPVHGk3qvz*PeJz9l&)&AqUtn8e;E;i?A21e1YZ+t9TtDmCvE3+_Wk8Ixkbs z(mi|mEpcr;#HCU@IQK|6m2#9H@nn}xr2BG+E%lPMw^Unmb=fy1v8*EwiSR0fIo~JQ z?&Kf#P64$8o`Fx9+QuL()HmM@d){bx1GpjG*e-Vv=rnV4QooeUOt}OUPP0uGL=XKb zpdMtZBs>E@`N)`OLj`3Pfk4@m8vWzQQMMh>V|wN&KW3Cy|a zw%#^qQjzPaM;>=`@K=f3cxZR`JD8JK^RrfR;AL^_zB-AZtYuAvKH1_SoB{Qz;4if` zWx(6vCFf=1RqeM2s{l+b{!ZSoZ*nP?vqbkz^y!0dd@3b6mF*mhaJmwPIMjMyBfd%< zGLF5~ewqC#47W&81nltm)Mv#d1XOgg{&v}s;@}nl?00Jgtd)L@69p>Pq{000w0!f; zydSH7($pkWP!v%jW^M970bR7~=aKt;y+nr}4$tV{#=d2M%S(>beA2JrR^<{oauT_s z_ORWcSvlypzh%p72mLE@bKTEU@Oax7mj^G`7r$-x_3Z2IrYARRm(soG)i@fPnwK^F zIuj(mUF^M=V~E&dQXNF~fR^9sM~cYuAP;pEfBb#bB7XkY-@Q1i znir@g-^6}4_+Zs~8&Egg|LYi7(ehcB(%(z<7?9Vy5({^z=d&n6Qk-nrnP1LE&zai? zsguP6O~SLrf;qhpUpl2-^k%s4zd88Ga@04SRSLS{8^Bri6F@BiYv7)+NLy6ltgBL1={YxU#VJh?Yl;fHC@Xg;VD0E|n%^KX2vK3I)^|lu-AZG9E#|xw9 z0;01@(+7DEE{dG6VjfyVPD|~cC<0D9webbX--0}5sVyg_oJ+d0^R8R;c>whINLb5# zV@z_YQT6yR95ZIsXQGZu?GoR1bZg|=8r&!2amnIrqx)&8kB+76x#Wg?+)KrB?rh+q zVTSqZl;n9KXmtK)k#!YBi? zxq;38t8-S_MD@3>TJ_&zE)}QsN$Fl(I`sFY<~H@Ud&6}4g^iWo7N2;EHfp25B55{;Sg$RjB`8-%jDG{F|@pwgLVpWi$g6m#VejZWRE=@ zVSB~@idzo#J!0KIZ`_0Jt|qQ`|7wK)S9-|cm}b4MZUl#eo&lTRT$YV{x@`Wl&E(p< zHF_hN%5GP$vC6e@Dm|ylnpB#&VyxF!&CJ;U>ESJM!UwHC6=s3}nP2MTvs*o0);y8V zGGKt<+`G)~g6$t*^%Y6!D;n}v!K>*r+U#J*6eRID3`-QIhD%k+iJ_05w*tkXPGS35od-V z(2?t&E21V#;j(Tb8zQgiJ9pB6^K4T}v!K?J4VTbl_ojX~;eH(nMJ(9@jhP%7>h`WA zJ>O18t+OPm&b>t-v3OU%@9>Rvf3ja6<*+#UMf*O5y~#H}Z}NOwn^o>UpkMA06-_}) z<~8VgqjfFWv=T!umK>W?aKxT0%S4V7Z>rXKmE6u^XSX~4BTu*mmb^=A;szB|zXMx@*iM#Z3 zBj0Pfoiyz_TQ%>~07+97m4)AT_(&DwxIqs(QR@$8?;B0tuiOjRY9TvTIoSA(9=JEX zBzvaf_JGuSL#Kd`xon#CT!SrmA~-~dmwMqn1;?4LRd^u5=F6;&v_}wao-FIc;o7ho zrEvzsO1}rCS;m@e{hZj!){$eCg*NBf0o|*d5sy}<^x;!4{SHzrSF{}80h_d{hx&%} z%7Oh=u_p-5pJCuk6O9g$(acjfN?%Wj&J2UWTb~eTCHK=sF09K0l%EcLSZhy@;-c~Q zo!_{SY|s&so_ggA`IkUo6Kh&Gpz9aBtEL(GSBmF+idwzAy-fekM-5wbq1JmopF4H! zRT^`-2BUgK|7w42<4OGHO-EyON3x8zN+ShI647p`JMi{zdS-1V7$Cu zKdOJw()#j>mq`3dfdhRg4}ib#StPg#`W>?f>10U!&^oJi=O>(}=!BPv**nOCY z$1&5{F6g3!iE1h1eQD?&2b&oq=#Qe2wNxQ@SaiS-rWqJ}hb-kYbz)`j0!UjhB_&9D z02jMtm2_N@^2ek{dtHwI$gBvW*Gqx2Bghm?UpRO&8Am?LN4&c`tHLfkqEcQ!0fIgI z`n3E#OR8@D=M#XnlE92u2Ut;zIDv)>+MjIvmYG`tB&W5$_X;`M@ZDSCK&F|cn2Q|{ zz|*VgWtS#TDZ2`Va0YuJ8C2j&le@Jkmt2YSOU#D*0=kvs>vU(Bm-Su!h-+LNWn24w z*)C}#ryCvnA7~7BRMqeE5nslLhXQ=`;avEQ?YcEhC%dUq4(hY5#Db3S_S&bPZL`5vL48{cU5{l zn;6#`d=Eh|N=&v+T$JTK{NC(vcUav0mU~KV@mTg{G7+_AE5pF6Hz}>%bdhwoo-~l` zi?$snPVadCzBPJEnDE&KWjtrwc=F!JMLcJJ7QbyV-!=+}wp)SC9JD8vtsaQW({MkP zXpWx4)cuq%cjkMl>!9Lz2 z?YBZ?+f%p4)*~yll-NccyYw`UB!jfU#ATP($AcMAQpp>G;z~={gB-riEyB@r&ey@8 z?@~Q>-hVXsW3Hvm-6%7))n9`UiM81%(U2x}b03cqs=0lexa z4!c<(k&R)svmTxZHn^2+?A+!`<_lK0Om8UTW@p}I9`r*pCN%S`4Ecf_MSnG>O zmjU`c)gq*E_@ue<;b@5y%R5LvKUqgRn_<8Mr~DavFbM7^()}diaKheRIrZnq+4Rf{ zS=+~s*Df56T|2%jfFvqu652cqX9=cA=vh~gAmy?q5 z?c;#XXL27V_NFv4u?B#3xE8xM-^V*-cLM*gc}N6%0*ug64v%(TZI;K3V$vIX@2X*A%!baBE$R*jNWb zb9u=5KgyGJgf5zH5}Az5Jq+5?jkOG`OQzK(`B6in?BX67gQggvWK5>+2!gg8YOx`I zC)%C*HLAo0GuF5Ie(I97#)gPUcdI1jGw^&vifn*j<`1r`68PigjjWd6u_6o7QUS`v zb8_k^!y7yU=NL=>8J`VFlbb@~{3!o$*j*A0MGku+Z|$4C=Oh3l;_c`7dWB_;wHc7L zH0N>yz&Kw+mhC(8ygNnfbAT=h0YEm@9r|)JFV)mXaY>&kCcOOT9w)>dydKVUw`O|U zJFnE`Jpgca1S}nL0T)mQX43AL7Jv=}e(()guHGGoY-~}ReyVm_jydnWzY~Et!Dv24N2fbuqda+=Ej#5+4>C^Ut?K_{G8JIOL^B)--Ka6pp;Q! zkf%>``K8$5Bh}AQlu)FW>}?IN1q!-@#Rw*ELZH4NtA&O&NcU*J;+y#u)DvyJ z1UfiOIl(kUuIcTC8BOiKAz;uKR;BKH`%PUN4+y_oiDs}j-y;schgX+5w&%in@W8LP z01>m)7x5M*z+P%~S0!#w{-YX79*U`}diz{54&QKPte0^iP*YG3D(tFPxMCl%`$xwp z9Bcf@Sw{$JSDSP17GQd=ncX~>;*1nrY?0*@#iVQ}!b^kPnHW_Rd2{2w52ltVN5n`- z#%WmU2I=EE+`HUmY75evgVk=a$Q8vehGW$g6L#KK02yL+6FgUq?;i(bDZv{f6H$OV zd10Y}iM}9$chtRYOHeG@@VCib;fOEuW{>9&mYW1`Z1nn=yym#A&J-U+Ut!r0y#mnL z`oHX2`~g;&YQO+~&0mSNUv)JCnEEv-PXYVQG}BO@;z2yHu2T2=3LvUFf5ju`z=Ep? z7<|ZUH~*5eQbvC@T?J{*bn~-3P4B$PI(H}XH;`o*eG0HrrKI;}I`=~97~XL=H+D;E zJa7TE?t4g9Egx{2D?jUKdqSL%1Rwe$S=zglK&jt^L=-&~7PJgsEmdf)6B=~NeUuy5 zWixEBTTAVu1vZ?8XYM?rTpHrXDz}+#Dj&WI0J7w0ay6MnoE*Z~;ZJX!q8&Y*hkYB30?9$<$f8g#ge)cl3r?nl*Wt?>$`2`V5Tye z+_v98Dq27iWc)|2CY8r5vyunNK;@B9t1@<1o_4T3x71CyzL!523%msF84cpFlM9!2 zM22G^Dc4z{gg2G4V=Cp?Y^Z|^W58H#Jlfr(M7GS#QjN2oHy5GX|KkOHA4{H<$iA$n z#D!|N@yYZ+C4ZJ06#bqNrjJoJn@!F0uw&PtljZb!02tb1J=(ho`jZ#* zyXAPmSY=DH7NQY$Pg3_~1$lH;uU7z7m9mnTiE?m>aN(76vjeYZ^ zBIo24-h+qDNwilVbE=ICkV7*Tf-m$+dO)EJN5c-(o$c(a-u9guHLl@h&|NhUYt2<; z&f)c=d0IYfLt6Yb;?dr+=lW(sE+B_s8pNn%kA^4&3hkUpw7wlq=4_5Yfd0yQhyt~e zBjF^ojPs>#!A3*=m>Y&pe~j;DOhBLU>eg6S5kf`rGyy+Fqpup005Bj5tF4&EUv;eS z?d4C|{08n){*C$x1UYvP z!|3M3gB>b0biBj|iJIX!GGLR*C2mO@J-uodgo#8kofyq1Y(-4(IRczkYB2Sl!nbz` zwyY($GuMj;4`_RT7mjx+QB9mxuGJ<$iez&t878w9o?u&ZS7Zb97U6zvnFvB}sbOJg zsYXilRmat7_cpyE=m{Bgign9UqWx-b!NCa5^`2N88cpA*ahh5Hw2g?&tznqjEp^F0$rfY3@2~yiV+34Cc1BP z(Ol&2yMZ^t^xD^hUMNQNGVv?jcaMUcqfN(oqu7!HysbtxtQEW9w{xyD!0hRZj7b3u z-7Lj>t)0EMP2zSp-b8k;x3$!O4SKv@F(Ivr;aBZ=Wyjbc)AU$Jr|ymNu)1m* zvwiIk+7Uo0i<`E|SfffllY#EtcSCi*kg=V4f*+rB8pkg3v0r{J^-jEySgR9e>wrklXRg1$#Ph`VsqNXNcEsosN;NV0&->$S?dm zr4h*?&tIJ3HDE;-X%YzDIjAWqOodIzK34S(s6ZP+x~A)~ch(_>k6xZ$l!6j;R=-*=|C^X;ulyo*r`R zSdL3dRWk`}!^h1@J|5J*dr$kB2wApTj!}UFx}y`%Yc@Y$8n{l%;tQ08K6RX7G4$h| zOnyF^q{1_OK=n3H5<$%P89B}m6Gsp@vY~bd8qN&R9VEjS&ko_Lkb&9{{z@j+E*De^dj;Vt0c0=<_jbJY40|E5MtbuGy+j@wMEa++3w({;L3M!*4B}^hw%q07`i% z^x@?tYXC&a?d5ke+#%k#t9LLS*!}iM@jVWqbR%JO^3@{rtTEXMIKh#5-t3Hpr{kZ? z7px6hnh@KsGUYwMy8~K^-?jW3C3AtjW6`#;4+-U5;>f<7c> zf$^gYX!l&r6m9$hg!QbU!RVkTPIX4E$+c)a&CGdcZD<;o#84xLAl@3d*u8g4XDL~u zP~%!5m0eT*_G~NF=lIpST4vW0zw7#6fnsF~{6zD3*h||yq|*>iWdvAGeKc#oy=T{= zm=Ertm5M(#dvl7q%|0AGsP8%W$|AY}rm4xL+G$m+?^4NAcItX0$eDS zoVRSVgyM+(GH|udWZPCpP&ETBn_^;594-Y$+$^~I@QGX|th!`rdUUV2vx$TJdJOzO z7m~GylXAQOj9Hg8+J(j1V0qz_7jR8AYlUhA{~xj*^G`c^l&N z!yP{Svu#U1fOIHL)^Hm?!0x9gSU%BiAy+*qyHMI-;RZ{)pf-((&$`@)?Od<>v=uwsEr*{&C}Q4U=xC;@1aOiMfR2wV3sA)C zTLYZaf5ooxZK7ttofIaflfLSV9S$W ze#uEN+2`V`gZLZyltBx?;^^;NdUf&Vp}*U1X=fH(>gK_q0AjzjG4)~*>muU!vMj{p z9PO&rIO^h(HuN6A_#>3BFc}dI+qQ39{PXOqx!35b$I#%al7V%R7@mHbdR;u#45kj;0^-l#tY z>^}7AK*#w`uaFu}=NI;xfrNY5=~hz=HrGGFv=LB6NG1l4hxYU@3?>$Qb((`l480OM zPZTsbNqdQA`NBxplSGN(DtrxCRb@*idV1qpNv)w3Yzf)b0@o$W)1Hoxa3Ew2h4RuD zV|9A3CABwZ;41+H9Uno0&;3WVx+l1i;8v8Zo{?_IiGo71Mob&+ z^F;N-YB{4l5PPdcF3m zZ*S-Do!AhLx)yT(J}Io>yzidKXGa>H#H$x1!FO5sint+nj?xPu(r|`#X;8p# z(urztMyuzZYwO;DT zG-9Vfx6Poq&Sj18n*besXs~4}{^vz3=m^Fhc&Fiq%vstCXAgDuz&0C{c(=c$ho@u9 zFGk=!3u$DPjD>maq?vMy@02t6!9#7L{l;_$xhjwD>~bno{M*|sy7Q4#cuzMqXZV#* zJAmfKL4MnfW)4x#($4-?l$uL~uC;puY;cB)BDc(Ql_r;d-5s*BJC8|H7y=%Zd>b;> z&E>`Av6%G=y5AzTG*2gJdLzSg4;S2pj3EHf8FNDv0UUs7#z{+(0ng{-t7XRkN!siC zDLIeCUx~_Jwc$<35kKB^=Ms4RuV{D}m|MU*UXc)RRk|4Dk*D&>)8kvE3k<{Fv+ZIp zGM6$--oV(=V(7ROf3?I7?<1p9E4R)5&llTDCn~(7-(IqV#W7t*HNOD$D8zkh&(hD6 zirp$IoyggyEvP>HsA&QrR`u6EetRyM>(wP zq#&kGtHWHqg=v@Qgi?C$QQhVnJ*iNb2TU0}*E` zJO22|*J|Wr{EOE8(e$+vw~`jISHKeRud+=(`cyTP!I=mU5lY^L1$cf8No#U1VwiL;8`!ZPc1a zbtHU-2XAF%S;|Hrhz?-JM?SXwLXKLvZArC$VL56etZCTYW8(t{n3aT7R^iGTaofQ= zf=j3?{-9)~8^r%!H0#{u@N43f$_rdQ!wK}mCk<~x+l27zy>>nYcR-@Ylj><_?>p5n z3ZMedUY5GGVP?-YbGxIG!U#9xPo0pV_AD-od*=2uifymoK4?z#aHsrKT&Y)*+`$N> zv$9kX6F{U=`ZuI^IY9cx$#;W5KJYoPsW2G|tk`lbp1y(B>1hCPQDfOpk>op1lMW0x(!lo96Nr(m$ricE%}tuCW1u7V zFS$&H9)IqBO8n=p-F(wE1UO#^iE`_PQdm`Tdb3Tr==lt#Ol+4_%V+U_GW|`ZgHe!d zT)iM4RA;@uTd=5`l#u2>prO#VVuFl39CRUN{mee_U?|(svf`o-t8z{c>GP>K>z)A%Vi}6u;YNy=vTU<69Y2 zG-l5M>BSEA09Ov&y-hm#HJ(-=QmagJF!%6;1u}x9NjK7zDnkx(T$**YMMlHA&v8u0 zWPrg1Wqr+4y}l@|X_qZoy{HA3EUh3Gg!Q^oHT6-7ZrR`y2LH_Gc~bpbMrE-QKa!!x zB89%F*v+qwD);B*njU-hX-GDQ-18NVp`U}^9H}2J&mA$Dcw%4|eytd%>p;7wZSL$6)D$IkJ|v;Ph!t?bEXYaSg7T>XLwPM58G?YDFxEiuYfAeW0C zY>%HumGDTd$d2w*#PSYn&bd z3^MQ!X!*{w-CIRw0sU+}U|hgb&&gf!Cpz%-WaAKv7Z* z?LlLv=EJmdGdx$^B`dcC^|EfQ7@CReYMd)DUW>VACyO^H}Wj-;vNR+L918gPhidVS87;y-$E+Nz87uP9lu z3fOe;f+XP7x|(>5=CF6Wlq_*7n_u-HU4%Fc{QmNo?D+6nK}ebJAlCx~4wsGNs%g^P z*b3TOKeLe~p74=~gJl2o&>3O_3SgP3!Hc_d-?Guh={lxV9Bs) zbc|#+_aCD-C|O7HoF$9YnR5Q1JIwM+5%9vFZ&+n{zoL^nYrKbF+myJvfB2j5MBn-{ z_?!nnTJD&3fU+){-oXE*Wj@DM_MllA2EY8bF)8Wute(64Sa%8CVK_PW*hSMyV<*p2 zGVkZ{S{}nBpYLrEJWaWi_;cB;)5DI**L3ophx#VSmZmbbr5l&H%64zROj8DO=fPOe zm8S*Io-KrvRgVVf%pu23q#QeJ=FVlKhq^ERb@Mkov_JXe>T~7c_r71eto?^`0`8d-qxBE@@KrhlD2q!K-L0*3hVWa z2S3qfu7a@fMw@f)(=Zd<0Vl}Fz=hI(+TdiJHe36Sp1~Rs!Pg1QiYnl_@-aDEQex%W zAf#QbPR^hCZRPiF1M0v;1TL>aT-4HI6+38CY^>>K;Qi{z%%6VmuaAV2i=180Rs47* zhu(+#hRy2+Ir3xapETpnD`Rj7q?!Mw@FoM7hEB(#rmhzg(Vns%aCS9uOH2qD_TrFuTvj$#W+7+~2P_p>zV5~$4Fd1cD>wVh7c7FuOo%L7u;y9@&A7GE| zDuU&~{u=jZI2ev%#TE!8nANHL6w<~G?nvo#&{A*Y=~3Sdq+?6`QhOA#2JxtGaglc3 z7?$O+tN*GabIa*4@iV5$cd)`ISRY|iS!r&(vz0_r)!U!BdhlTz-m+3R|Ib^-M7)JC zKj%t(-mP>T87YXtD&$AkaSxubC^bH}>24~!VZ3|3&Z1YeSa^<$o7N>r+Zo<`uwe*; zZ%H|9kvH!A$R}|fHgR_7EyAvPF&*s#?qUhU0_jgxHzukF8D3?_vOdz}g7eXGKlpJy zRdPL;+x+RCY_!nA3mGMM*4ggV*3%EGuG>daS&+!mggXpQyff0;_Bi^J{Z}KAoyhl( zbozDD&%A!PdOJqLzY5HGjmRO6MEE(m|ESFKOVJf7Qo^QsxxRohb)y$Qj&(crCw`X6 zhveP$RF*UJU^#Lop^`UH>A*w2en16Nh8_ckTK^fcTV*`wnQi(n3JZ!tUJEhI+PG+1 zqduYS4`$aP5b^vHdrgPN?p3DL>jbpfvZ47-!_rUPEG|o8(#y3D=NS+>?XfUT_QXi? zot5k_AQuIOg8lXIvj$6?h2e>-q4??G#-%y!@8;?#2Q5nU8Urhl6A{};&G2vLWPv@g<9#G}O}p27c`P<8gGH7oI=}}@$bt|1 z@e~ZN>4lHv2L|ky%(3v{w`2+3m{AIc(%%2CmyR_Q`6=H5xKjiflbXW(Qu(^sJ{(k^ zRh%(XraHL^IaYa52ciYI75p#F-VgjZ1yQD~`m-qua~NPb{7g!1acQD~w>v2|90lvG zCropx5-ev-XLNo6u2x`RBZx3ihUmtjP7MPJH=C*sOOP2{1#;b}=k7?2H9L}wGj$-J zJ4>rs9;unWu?4{ua=6LR zXCb&mH$oW$vvl7k$ea;qGtshpo$XaoT{@V8gC&H>&m ztS!GO=0tU5XRGYE>z@M%|NerXfmR5Cuxv`b-U9KMJor;;Hb?)|_0LPpwZr3RKN*ij zxn&FFpvXH$!tkTdgGcze^uc0iNK<~9cHYVJxBS07SlacE#QgHGquG9H(YPwChx z0pxSfj-zOd6#_lVM0jMuQH_~Err>~D2C8gCOIuDq@;$HDT{N&TgeQg14U1uC8vd3- zb@0aYw513qKfAO)B`w=z$Glbm$PnVB7l0NY$?|c5XM#WAZd(xoo3h2{vu`iUA^RFJ(-X0blB*5pyeX4 z^71LTy;?`q*vij}^E5;FS5f6YkvWM$9jzpBV*)Kjfai$@3H=n^RBC{AUK2I+7@y!k z-~cf4ow8jCu8-*LoUOpR^Z8h;_$JLP)!cD5ucTJ6C5#t8ZCO@XoQfS<5t|jU#L}`2 z&9QzH*2PT6?VIod#l)Npm0l`DAvuWn2eS})FT>!b4yK8=ept4^rKk>$m{?FK)p#zR z`{&ZR_f*8&>O#Q{{ADJhO$^(UxtvD)gJ2?e{evy(XII^(>Vj~x0W*iR001cxFmfh~ z`sAEcx;$v18UJabf3+dDJ$pD$&#~*kn3Rw|7pD+R!pOdef<@x!R>kuZSON-NYzlU{Mfk4$ydpgr*Qvt8f z+9g#MvSgGSjJCv@bPX955Iz*ygTqK08f425i?}o({*k1K@w*IP0*S55+kJ9y%yfxF zC~E4fP05O5*Ja*)BpthsdBSurQ}agly%CO`K=6i~Yky_P;PnquZZ?e#bx#lMG?qZN z(LQ5J$_2=!^7V5`EBTHWnd@a5e(p&yAwxP}n1%R`Al{TwDal8C123&Xqj;0At{Rma zPiHtM7<-UKkUx#T$Bu21qvf2;!#@S1l$LR2D9#wze*x4I>bEY(I>oL&^b1_=>R^S5 z1tG0C+@GWDFt4*ygVc6i{m*n7m^M8+m1&`2E$^CU`b>J#8+X%F4Q$4y7X);rJ-JlK zx+{Aa#=GfluB1n-?{!(fYTe$w?S&btT(9faSx5QhQA)Z{6Ds6kO0V-I$bvpBP~9^ypYwqI0EV|9JNpXl@VJj0i!?aPsg;aK4q#>i>Hwgcr zMU?G&;I@?3$^5SKzpNQb1vaXp&WQO39@`!3HPtID4HgZ;_!u`g^L^OC(bZi7;DJ9K ziW2PLU=uJ^=7;EPyqnYNV@VhVd$YyGZ{O6V4i{(-|h0E8m5CmrmolJGBN}eZsiyv^BBVY?5X|jFKQJk zVkVBZa1Yf^j~EcMz%)$dHzqvjp0RpsbL$=Gs-Z_v|Np+r;2rwm*1CKrD=SyobHGR*kLnFG zegkT5u+9)U7KnxoM0Yvr;D{RQ-OM6RnL z?0;7(_>4!Gle4JVfVJ>iHd#YwX+g-TBS2+pR*oLBScyVi* zzBa3(JRx&5DRx^T4Vay;lE-HpxRDs_)&T!xquO%9u+d&Ig}GN%rlrw z*85iX@9X%ZD-(Q(IyIy_7@f5T(qgtzdz)@1|bS_uX(U(D?OL0ttk4*4{Y*h2INngw{7yYN)dBXy~#=b zyjVsvJ+#1nL&a*MDNlD3drs5LF0G{a$d0#ULaVPSMlIi0(6|a=b69Pzdr+$$pi$N! zcO_6z&%5R-!7b9UcupY-##S&Wgg?iO{qGO|7&{o{w2|+;B>?!??{4vs4%OLYl{y;g zLeS^ic89j3x9wuSG7>(#%L}50*W+S;POo~?6D>8MAxKKNBJLwF-n($<5-b4uhT%zF}5W}6Os$BJdpb6t*|GY>pYS`%4>qpJ( zxq4uQ(3;)SeCjiSfsM8KDZ1gTpTrNBf1P!THwfW9k#uI5WW7npzVl5G@kJ4i{+O)+ z2g)hf!or4rZ-u1zc$sAdJbm5#B?yzWUc9?=|^`& z@ZSYzEHMA@bR6dS`LdY65V(RZP~IgN&ZR5DG{a)wmz?cS#H@uTBCvpS(?|-;N2OE6xKNEQV{V*kHFMM7g0L2jUmitY&t!z#bfeaM_rwT)JWJ@ehvc zQyw=`KDGU)*7df+2;SXOl(^VnS0Irzp$Q;iaEw;PzsO-t@~+ ztzfgbcA3FZGXH*wN$D&8U!<^%g8~cTpC$@PtdtV)9u9;}(I~RQ@}!6lXZ*v{Sv2rA z0GTJ8Ssg9AoB%MMi)A#au-KZtc(RPrf(#jo{K z@AP!*G=CcHo1q zb1gnnMr3OQ3Zac??EUKIw?Qs{HmeC$hkL46b$b)k>l-~kn9KF`S*G!>NV5$@uOVsw zM8|GI^KJw&)Z}4D@&rprS%d=%!_y~Eqi`F~QC0eqBhuJuf$QE(qguXwa%sHkpwIx- zqQ;FL*he0NOz4r>fWH|Qg^%~p`OLDQTzr&2xNVxZ6)CQcx*2Pv9zc#jgH+-OKM|mk z!#s1c03c^gZ2NzIyX-?6hB$sivF*0@gD$1!50O=Od!uhhp?(MA!cDy}rwPW!KTrV$ z9aqkNP@ZZ{j5+F6I1p;(AadIHfeHG8!8MWHEQT83uAVj#k&nKkOTJq_m0!q~-G|-6 zd3`Z>5`UcL63EwQZ!kVpj@q!(uIGBL2)+MHt;p9$a9|anIlQPuT^|NMyc9nZW5tbE zSc&g-;DDZ&9V`emuS%SESPZP85%&ngVr2fmMr8fcQx&jz7%F@D`cWKMCLyvX#m*rW zT-njPH8utrdgS?AC1*Xwt-QGkuoovQp6`RRJ5a&(ewPC%-e^&3@0a`HZp6B|wqj*H zeS&FM886<>><UKArZQrJcK>||53tH)nyaXj>uLk>JiEV+l%6U(3Llbl*`T!p2^l8*A(z1z+T{o= z$kx$yFqZ-MdJAEr*9N?NIr#Peza~GMqSne@Z$5tjK$mh1oKJo-n^DUualgZh>)?vZ zZ=h{<$6s0pY#+uWa$kp`m`>Tgl0lLMVH)+I`DgpJ11mMZRJ@kBTJ&46xiuc#enisQ&QtyFGfL()kF zx(BmdH_!V~6nhBwFx)XS1}4|Bj(-~@R*l`K4+tJD-RA(-l9W^e8J(mh^0Wsw?dr9< zL4W_G`0#FL2&>=XIOnJbuwmR!*hCJP4Gn680(zX_>H4G1@myY7r}V}d8-xRGnzjr> zJMr#HnSt^c$A%_jm))^(KZ^!JIIBVQZ-R(`rN@5=?eOxSHo#YDy~CWWBxIPUzfL%S zal>_nfc9Zv9EoN!+!5TI{jihsVfX*fl6+*hNdRF4D>~&fTv=@Dl(>GZmpNhO!z079 z%lqt>^0cctHW8oSA@=9nE62BL8KECwJIgi)|4A_VL7G)iH z9*SuibPuw66V@xze9f%dn2M#K_z8NXJ75Zqu=y=&f|+*N$N|v2wSy=>p;rd~9&rYC z9{-ve{_)`w7lv^d9$xlvz|MC_6GkY`Lu=6m$qvF8{cnbf0Y>gYN_M_|y%gA$*gXC0 zhNCQo%3ipnaAv;Ubhxo^UcL#9XRrQeyB<@jU9|Y!R}f800~iM-Qc&M%;e*y8{KMOWwS_q4ay& zbXHnvLU(&f%64cz?3?H=eH_JQ;gIMBGTDo_7(6KGD3lTynf(-RGBBHT(%8JY&+`4N zI#^}-&3z|D-mEK}TF-gHL%&}U)RlVlLXb^JDSf*N3(s-fLXB6MwjEVf<(5@pO$p}@ z^-Rsp&3CY4CyVAgmOI(xYHhB+Hf$=)-mlqF>}>p5PPsC}9Q15#ZvAKv$$)SBV?jLL z{)BEPJVRH5&K}>!-~DV(zsWyMHP?JXbYIG`{?zdRx<+AOlOGqC$usHOGv(bg=biOy z>YUK{02j+0G4T?<*Y*PE_;$S6VQ+nels2uoSTvnR?g|7R9 zlfxWv-^eJclbMt2a}jcLvzi#{@vbXY@oy+V53hb8jfKNn{&0ayDK1>-w6C#UB7D6@ zWH)SXa!)<%eUFns%qg36OV}BZ6wto~vZB~Y2fgU#g@RAj=pi!Y=F#3ipQO=$Ebn1X zd5^Rw7F_P7p?8)}u;WiAINy9t+^lt{vcm7F0W?H9a&BkqL5cIksx&u73(ZR}Vn* zjQEQeE(DK0_uLjOydPHhByGnn?Jx|Q5wOGTINyYIL%HpUw*E?rRyw}>S)7g#SZ^&k zxmtiL>N`02dE98jEwmmm=eh7SAY=-E$u%W{;D44w+wyDV%>UL?{v5+OXa-BM4#9C2 z1gvBy~I8E$nt&vO6N?kQ}##P%YFP#;`fFu}#dzPQHuJ@AuyxkJt6O_Ikfg z&*$rXJ+AjPYy~R>Mu^&^{3XbC(~fB^e}-vbVD@wRxHx9hg8bWCLH<0(xC?5~te&f- zqN4J$q5XYy8qquNfk|HB0F69Ez#&IZ9_xU*t)upJxf^0~O-+3lbOD2ZS_HXtp!TqN zlYI_mT}2vf`yg>Ilr%z*>fHFGxpgAfBXS4%eJJ5J`X_-}+PzNNh(klu5o6-Iq{X&Z z)*d@gey3$l`k?#dIT7q_Y*i#UlhPd&p&D)0YE?X?8)CH^M4D}Fc!v@{I_qmgmU@2X zza_jdEzW%FQvij;<~RYAKHYfdCS@O_l<};Iys)fI?%*{LHZ?y)z?R=5dfrUDV=WEw zAFY#CZe1X285dv{V99ObZuJ~}c#`tWN+gBjI@+&lQE8H2G{SFRq#P%#?|!4{t)9^2 zzZR%+TRJl+lRO1fU2#n{gFyddfe0P?V#Bd~DQA45ueAY(S+;)Y9x4!b>*~;Ed7~fN zP2YQAP5>(zjs>9b20)C=?mo9DV`AG%@CTwJmcnn_)Er}1(B=g=VP&4xKVF;gQ__+t zGYeUyTmlX1{3`f-!*yDAV;QznmXP}x3Kac zd0sCX7x7VmY!V}18Y|-#+-L5st=$|jMrH|v1lIM_&0|V~RTK-bo zp1m}u#d(j-i-c3vwg{%w&p;>Si`S(e6iMU#r-MJ`+oSr89IyH=VXWrb2a|TRY1=Te zsxPYBXvS=dNQ?quv>s?)5rsh)#a_`ZVJ#hZhFVMF$$$x1_DoZx2sY2Dco7kPni6tI zz${tHA%izB3?G-d4)h$ZlVK0*WrL_!3qY>3@m`c;y`(#?7!Cf$1*M(;p0hXXU>S4q z9{6B{{3+3lF?8zb8@-`u5gXXkF(5v=FA!9FKD`{X;H4h5Zr^N-Dl%iL<{m{&2xC0| z(H?tWI5U>8f314jc)!z(sH0y6+@E#Sq9on4)84mPKeQXEK5;q!9-$*Id9-o6Jm4rM z1dP4j;P}{Q@wv252gvo!YwvkvbsiJqhzUVpTuN8HS0ti^Yuwo9;GW$mH-}U~P}nU= zK6k(VP9bB5dPgN+(BQYqWA4QkDczs)tocPyNF~Lh+PBKpl4%$fpfKAr?!`Wm<%<}w zKvx}OuanWzOu2P8Ci7vuOzZtyZF}2JVl;xnP5SI(+X zYpmg-*8-&i2;RKt*UXCE@5#)F-yWF7nURtU9)cW{dg3pXq@lU6L-@pQAf!=~u zLjL7BTy=#u_=~7F?gZBx+-enmh=&W8{ANwbXSFpVD!q#l+RR>OD-VRhISd#!>!{Nu z75}qR=tn~?&?|I!BpR*Qy}HWd5GTguR8p?K+M@1__*#V`Qy7sPtz(%LG6*H@r;Bes zhCe7dvN(S!I=c4)sGfLvE^(*sJ#8B(q5h(JKSpHzd7(QJ_|SKgl-IN#r3eY%^^kWJ zkvS`Ul@p6({Kif$yWhQ}z0rJZQrsFjl^kRxFKXS%5YIclXpY2Y3+6*Y+U{6&u%KU? zq~ld~#BNCd0Rvq_4n9ipf5vba$dNW4u~_yxyEy-01nkdbe!p0r`n!&FsHkv0X7hs2 ziRqOXpLG=Ag|)TWuWO%5FXOEAIe(g*O3R)9js0)?Tp$`Vy#wC8fJV@F6vuu&e`6Y> zOpW!GB&(>~tP7w5h3%V)gWw`&-C>l-1~*2V0|gX!n%MVO{0NJTTfDpU;Ed)>X3(eX z>Bj<~cT9Z|_8JM8rczZmBB7)~ZpMieshs2&_yWIfKpva8PmYsw;~M=vrAgBYg8(i z!kCj94XD?#p|O(ELTXyBN9=yb72c6@X5z7&9A>d4Y^%7(1gChnzSVep^FtuNDSCQM z6?ZgFOpG&GbRyk~0@YZ4#4HR~*kJoG!3d3+L)5)&ybd;#0pu5TxZdpjPjV0BcYbGt3m2-a zYlafV_1B?SQRX-aCwx(xlU^H#o)+uK1GXf2Ys;j{6F4(FY?Hk)vNEYKQ)-nr3hCd+ z^u!MVeBc;{_u%Dg58nMO62@1|?5MH;|48atPY&h0eow>XKleyUs+}LU^v{86L@>Uc zD*;m>5-)39JE6nMIizCB8#p%Vx!0pK@L(^3VA!4Poi2K8dNScoCQXSdVrl9*`8`qh zD*JgPSpxw>C$E`r9cHytsaNP#-9y)HUY}9}-+V(AEN+uGvK*)Rqcl7x?kROGRUEfA z@?G*K@pztKfT|Fjfc;HaxAg9wzy>7<9@eeYQGG4Znw404 z-(HaTrFJxeR~PXcJLTkO`}5dB;46E_*)%%Ej&6GTPfwL?&9LfaZgan zbh|`~#tE-N?8B_qjQ+tdzOOTQzz?nL-P+3D4akb;r77+%?~?$NqrBYAv!P|n#5JBl^bZG%?X9X zaTRF^_SIE6lUBxk*JHf@OfR=VDKy}E8gj!_Q* z?G?)&rV>LnN%#_`uJYCC&ciFb;N|!ecWZ3xsrtf@P2!#+9ZV^yZG6YOcRz9fuWa!S zT%J_nMmSXu^$`S+l>I}n8TmfDYr)#u*sMXCBQ-+^8pl>=srShx4TrZ;E>%VNpj?BE<}Nbk#mM3d0I) z`?ehFvW@K%uEfoAC}8h{Vj>`s_I6)YFoOIS-Q7QU?;)2;02|H@zfHvkvI9|fe_oeV zzu*@E&PmIi2+RJpd`CFEb}cw$%R+*Cl0cKIgJO;RSzCw+(wfRtnBPMVPKx_zJd*N5 z^yJgEAAzqwCK$GLh!|D8?Jtry0}OEJf_zu`4>E?bTyKa-1&Q3gAvzS zRkg`)l=yko>m$hhP4sIr5j+qbXN8>gs0_Rq-h(EoRx8S89FM!H3??}J!D3`_dy~H>{>|Qu2)a?t6VqEBOWp+L!XruZ@B5RNJ zG2ddPuXv9!hc?LkQaUpEb#@rPaZ0B%h%};^7u)K?9UgKWv@J3y~Xy~%QA zptydbPKSJv-dLSI60$`cx=#o`#Gxxm<>v+N8w4rPRt^B8l3C~>!|V-{BjSp+(N0pX zoO&e~?orvnKUs1fd)u7h29+J_>MvbR5uwQU{Lnq8KyzD!QAgS{I3|ObY`E8)3PAzd zm@$>Ue=2|2s%QYzYZGbOl}UVm#lx)tAnw7(O0fU05xwZuSH2)Ml>on!)kMBoTD422 zri7Nt??rt%nwUDeM4mM0o%&u!XN;a?pKIkCq4EA zUJ?Alkp9%6H|XRplN48O;HU+h>=@lH-^xA(@_&toN&hB8TKj7Rx;F9lx+a;&-uBrR z0GjH_TdC@4%~G zH1hl%0~qN)YJI$_D=fgFB)26G>B-Oi`N^19fV2tS=ewElH-NsW5+i3Nq>kjQCalqi zG;Iu?nAsvX$VySx@i}^HU|Hq*)~lK>E;6Gr(EBtYBTm5B5=SYBvDc-yirtIB|2YBI z%})=K`wHqDx>=ysj&s3+^0WOf0BCfEP2{(?)$e(yI%}S3Z!i z#vjmJZlePOtpBq95!6nS2{~2tawoso zUx!R33i_S(U4O=V&&znR)x}iv_qJwx>ur;(9JVU=Fd&ueoh+i!cg-rX2jvXq$_e9I z{wE#gp0@C~TM_*cGBKJvo-zNYnGJ~Ti?C%j+h^D_{R+gxRRIX){H=~yg~>9XHLcUy zgGNUY!xv)Fns*b7!TKEWhDF-0qD3zg@McM?yS=1$7{pnlD(HU6Yo3X8J;5xQmR?!v zP_5u~j@sdeeC_8Fi+X)W1?3J)9UjcrMa;=J;K5xpJO!$1)2oVB%8r>`VIBzmAiump zM+6g=%Yv)pyP<7G$}z#KnzU)PwZ7Vob)W+;(cO(hhCSS_%OW9yJxjlJJBJuzyF=xp zJROW|R!csvw8Znq7jUJ{B291c4Vr{u;G)u_PGemg9J>+{u`^>j%)M=Iz{6t3q?eh2 zP--o53-f504$vtxwSAuPw_NpX-@LG(N4#kv(Q2}bWZI>_So&zCrM^pt;aHY_`4I=3 z$mO2m!w>|J^IP}z?yK^0JH^$39`N42eCnxsTV)e-+s&=K)kH3+D@0rpJ1*%_raciA zkz+0NzToA7=3?V)|+b zp3l>8EaT^(fLl%SbdAwmeL^Ex=3e*I8;w z{BE!ORu{Akq2H5!zk>NaVU95jzotP(8x3;M4?r_e$zq|!C=n@4jj`&Mf8cJ9nqR!W}spWr( ze-Ak^-oWA!l2fN$}cgvF)}m!TIsF%`fD2(d@j##_%+C%E$;Q4RqOozGElFP zeEF$rI3E>56#r30y%w*cvdIo|!P2!#ubRGqN6%R^-31*!qvhScs=~O4*jJ|d`pX#vV&unU7#fQMG zX=PD$T9A6TQLDA7+-dCZ7?+BAqk+{_d+|(1{z<#74E|+poHQcMYC`$XY+cSF+{Dud zcfE@>=Hgvs<)s3F`z5AiK$ce1<>z!IC|jv`<{ zeizkqDqVaLtdxjWkYN*v35UO9i93H{HF7D7CM$@1Dli^bk|s3kMC#=m4)ohv)Zb^= zYs)K%2Inp6pF5eHBlSKZJ?Qu7moF`wFaK*5F>_@wa^#hk$G;t{7sz&U|Fl3)3?hJ` z`1h0L_3UTJAE*0~iVp^TSZU*Y;U8R@O?#gc3FolnV@+jXTyiPggDbSe=#uH}C@L}Y z4ey=fkeq00}l|!Va3R4d3V_i6^w&as7Dhle~DpO5M;v{(|b3{w6LPRUzKNqYf zzazjbY~e@#=aYwENRcTlith%?r9AivQUFwqc6V$GWo;Y#p;Cf|d1>ewm zDGY@~TpIPuS`23En_DWE$+##yXAfWP!^yE}Jay2A!f0_>3@UgSl70Ww54Vc@ zwTtx?T>&TQfZxBS4WV6zYxQPGhsQ{V|NpqJa{j&&3sQvRewO1qH2G4WgwyVHU2WLu z!qJ?y!|W?ACT!?TQ{PhapAufUdvW*~z`}F?_xQ0b;xtwCu0~`xuW%FAc#LG)3*Be? zQzO1gxjU?~`IE1<68A&z- zOJ3DbpdiHMJG{+9hW&k&p=lh|`~&Mk?|8;w9-fh?~I}4a#_H;N&fOmWAZM9NG8fxMi z`GWXocyYk%Mmawg<1djxzwt^_rC=PAm!WvQk%3sCFnA*!afOX!y>r;U(Z8!!<5YUb zYCi{D^VL#vwi>)nsq~;&ZOQ%VoSN<>mufhtmN33pxMAGoz-o8u`dUvks}4?f**`#d ztv?ZN0i(z1SVrHaBF{Oyu%U%hP|Ftgqp!ruAM(x0OnR7G1EUw|40aralUu6y1;b`M zdVQCNC@%CfGr|vkU|@bB;Unxr3d^)b-=pwOpH#4mK)M%>={ts2YODAk4zJ<5ZxrPx zeU+=+G_Gn=Y-8=`_}hl6c;EexJnyng2~%VE!K$l1IcI;3KAQ*E;7VR1HS-lhryAhH zK{e?NaOD1Ab^Vv6_qQ(=o{J>uN#A1O&MVvC=0=8KOS~so!szX){u!S1TKF{+*U=`s*f1?4hC#uhHPB(GCmYqVV#riI$tfDOFtJWiP~ zOcloAJWP4Izbj=QofpxN<@BH7P1fVs2O>L(0|Y#^=b}=g@=+W0eLM%hZ6+@6tNd=j zKf-t%R(amDQt~jw7eBQ8fyln)J~z9KZ#%smBKQZHhylG>y6{U=!`)PQx(_{58PG{DqpBJhLoAGw2JR$?aJkHbo^)%uvd;%6zFTTj69o1A;!D4 zi&CoM#MOdCALOjT<&5pTyuMLBCIJ|ZQLoY^e&9cq;*TlrDMDj)E+Sj>()jrX@k_xm z2r2x;Ji%=B?*AT-xQL<>0rsfJX3?SB`1!OGu+Ph0kpBO9bO-kU1Q?d92x5(K&Q=o~ zUJ&(lS3>X)P&fBB_l;1EvH7%PdslQ1YeFpxJanuPO2lLYXB?^>l;1CU&U5t?=_ z?|6i*-NxT`&kprMZ3%=&ZuI??iUp~p+H|FueeCE2@6iVF^@a)^Ocha(D*Rntu>uIm zTLgquVU=N5bk6SbK#IzE}&PZ*w@|dakag}v&cTNQF%!gj*L_M;AK|fF7ELB5@)z3dnBVHQD2p^Kzz9*JjyXqvO^mEM$qLT}bM*v@xhF(h?R_}p k_raXWj`fv?Im>JM6Rn>$X}XYB>F(~1g=Lrc zJ;l%W_ou$czL~i*_nv#snE(|f8G<`hcR(Nzft;+=TM!8A3=+_I`L6TfHdLcAfCNjUzuqG_buQ@NCVzwnQ1y}yO`f3qCdHN>nQpTCs7 zqjDz=--{iTd{H!wm3>Pk-K88WeA}}RS0zJ`Pb;-8>doy(KE#o{{u4Q)G&>Za5D-76 z`KkE$Cpv1-2uPFQ1FcsDNF5`4nOU-quub&vjtK~bq=!E5|1!So{Oix(O2|eMgB$KO zKJ7iW1yO)BZ+{R~zAMLn?~Ezp<9Qp37$k|Yv*Edin}*eY>(A@Ag} zh3j=t{egIl-Ti=fYZnN#-d0BznKraXz2SGJM;kphhwT$X3SXL+Ina-q)~D} z&lKr_wfghHw@Kt4>DQ!Qwweaw$7!&BPNr{Hd=1(A@ta>?-ApT?^f%8@>aBxk%J!x& z!Kqi}xONO)!wN|W4xev`RW%D&dn~r(R9}8$~$mDP0 zePa&j>Ta6t|0cqe&WU43?Ue=Mv)_Q@x51sdj#)cvsa~ck(%-#VPuaI55!V`?m}1&T zqPN@NXW?rdVH<20NqhL+R;De?-0f7Y~tMMbJ2^Y=?~K zC3v4e8VlBAFctb=tbF@Hx;P6mDJ3h-QyX2GlwPc$ihyFwp7j#-jR{YC zFG`ZctLO5;LC{k$=-(1q$iT%n=aR3+nZBsZu%<9#TwuLyP!zWNH*o1<;>K$X=a4L6 zFz`6^3bcpykG9h+FKYZ- zNwoKtd+0!Pn8n6I^h`G!z(;C<610wq48$U*H?IyO$^`9U9|-!2{(Jf%+kk#{eO#f{ z#Jt)m^%6q_GtIV!_Wr-8uto;ZItc0ib(5}qrdFn89{XDIj@!+sZ}Vmr83>5(jH|X{ z1Apts3rfL$!bVH@?cb7-wibbuFrS2A(NRBO-YY9D^)3>wcXInZts`Jj>MW*iecfatjzYisJ61U@Fo%~M{1W;WNJB9 zv$0YNYE6c7g01n%_{bg(6=y zObFp>y-YEJZmW)|+~n@%NYzT*9pPwdNDk>GXi}f*KgaB8uH2L~RHDnhBd?;SQgbQT za8K`!{6HCuwub!sBi2L`zQ5;)$(m$)!T^Kj_QHEmk8^IaWDoRSej?wwrl-5C$y-97 z`wZ%*Z90}EQgjIFlArYyJzE0lITdYVUauLqDvW5gF>D`heZ59cRW9ReaiH<7x%#mu z1vk{*f2luWD||U0DSyIa69#daF1Vc9PWT&u@Ox%hUdN!iR`ZJmUr&PELm&atCQoy-E)%zdmnSyK!SY{CP<4TTgRnv&SJmB`598e__YDU1+V*kNCRqQ*?y*FHIN>!{$`ebrJ+1?@ zmIjPQN2=u~0c5@P>KEnU=fB1Z8&9U9H7g3Z+17k_@{#_o8$R$s1E2ndsPw{`sS8jy+e0m!dP)alrqc?wrZSUQY%{cS3QEDS zQ+X?t_=vB(l5P7! zThF(rDyuG|zb{SyD5X5z9igmMKUJATVf|Y-;;gIm6mVB5wTxhNS7B#&3U z+@^T~y_$kB#{mqF9D`R1`;7mV|$#e zXbe=k+2_ZHgDBr@Jk!d4iAw$7&kDuZDcudGn-`e8)JOa{1>)~*BpG;1=He_kseK*H zgE1flBqMGTu85SV*E5W|#;75jHh8LI6iZLHCB{rE()BEy>eyh6v(3$N&A7dN^wY;G zosHKce%jlMVR?Q5`1OgIyQJhAL?LP4e6wwID0Ot<%C+J@o`8bnFfMPk!637TwS`}K zOWt{srz)ra`<73CD*di{)HFKy>yk%pG0&}oY;ljTu*OJKh_hL*h&$|5U__1vN>Qn^ z@x1VKbH#i9bnFs_QlWImqV$C7!Uxw0Sg{o=^LiTRxr;!4PE`KJwFGU1nY0pDp=>8N z$0yQ-qHz*bX@UyqbO!G$C!Mi)%|=aBlrMIKFHx)_7g@$MWp*gNj_QMj-WR4V$YABn-9o|Ib25fQJ8I2cWQ$7kI$vgeWBw&A?&_6 zwieCPmTQaA(GJCh;2_eKN$thLlI~i)sXntK?g$_hbQ-oR-nQQZ=1q0s5m_%ka5l5B zD^?B`q}|IpqX}8A-Sr=uaS!~LycN|kUP#ddK0D<6_!D+^?oI}(V_vO~n#;K-^=Y`F z{u5z}!QnH0egR~`k=6rx4O`~!S$1nmYhCC@eK5-d3B3b^F*!d5iO9FzeW4Y~#hn!4 z#qtzJ)s>?7|3V$vXbO@6A^C=J#Fty){p*Rh7Sup(bQ^EwAI0y_YaVl?=!lU~Csdvr zZ|=5^mX#+*SBOl(P>n;KKnzT9qR3POPzTG=8npI7b{?t8;J0UsY`-%*Z?cHotH^D+ zfMbED7RYDvGgvRW4e)vAv7mAf65I=Nzt6twF-@xh=NUc_UeDY$B0~(|x&6qSFgxhd zRG5y}I-3p76ZmAt*8Qdq-zXrjYD;JOkV?@WS-$@z*V{;U!b~J|U+5n8z4M!Fa>$4E z0>ANH{L|BTrafiM112vTq7!E3aE|3NM9{%Tn_~K-vX9^-I=|=YD9*SN0tFMtElOvzJawVPZ|-p;ZGd0YT+{=iA~({~ znb-K4Gg#zdbBBU({g3WkyX8hWZ)K^DKP>Ctih-r~WO;B8`ajceC=#E=Vj}&d%}DWE zcdsIthYy>k&YO_uC*0PQSuj_|lAX5}wb@Hs1))tZN2)^HoOPku6?57nl4p~~HH1xS zm!~hsGoJpplVl4q?c9XIoNI#6mY|ufFCNZxIcnEi-Ga_WlgblQLqCmHjl=S)c(#FB z$bg4-ZXK26@Gp?62mnEZ3+Fz)eP~t78&f$wii&j#iZO-j?qPq)!% z(DPG#vG(gadtzA+aj#fsmzuCVeSDW=+ea8jxwR9b=Z%?qc!SHC`hFH*i|e()VN3I? zzyzprG_H|D^oQNuljgoWfkTika>Ntj(kYWK*TB}PGUhsaR5JuQWyL<}_?4LSgaqML zf?uzEoT^_g3~s6-MM$r|?(FSur*{o)C%=S9s@l+%mX%qw{R{G1>F3}kWf`z1hL_uY zW82=7DK*$~RZ&SzdLJ1W;iZ)XnI_H3pySs)=*>q%B4;su;&Tiep&pz#|Z8vbjt`Y`4?#qKG2K%jjTCh?VAtx{IHOu zFNKLt+8j6w=KXa%FKO^AZDfJXKd{{+R4DfTNrF!FbFNw7OOm>9RbkeQnP`~nSL<@x zA~$a2;XQ=ww-`t(mN1>qmrUCKh1ZDDOBCZGshcrrYpEXB>+zLY#`0wazFmLD{=mSH z)t2`@L|0U|cq|e4?cJsO8uYr93Kq5Zm$tAjd4b(4%)+4x?A&(aEp=UZeSyxs^_9gj z3^F_SDnt2&;lP_Ry3}Y7szs%~i+oRUmJD(y{NRs^T6+Amb#@j%YoVp0!9+?|G~b6a zm}|Ak2?w!;HFjioX#IqDVG`K20DRiVaLW+edFC(P;rPIN8fWioWSuuLhZy$1-4lQf zf?ySMljHKKwL!(a`Y)bwNVObUC)ynsr5Z2BB>ar246@aQ!YU?<%ae!yBn+DIA;=W; zf#)1VT(VvC`O?-8Ls<(Mk-FukiS%-;*?)Ce1WD0&3lx&WUz5YOS9Y;_N9hO%Y?~8x z%reh$L+jYuuUj3Xe)U1^+Ns4bS2B#9WSA>}BO-{EJ7BI^-&a?b=hAVFjIypi5C1z# z3tFHort`{iUf>6dFzoVhQHu{&?us>Bm|d7{D%(qIP4%Tg6WwZqzzFHouy+>1W?;k| zkTHoq+HW1rErV6(4?72MyVq>q@FTG>?$OU?xdCVZTV)OYe4uP>iEm?Y@Vq`-|Mw6i z;p`E`hEGhT!be>w^6;C^_B)QkxyODGDz92UMJe9pgTLs*zZQ8hx74524f_=dqbFG4 zkW1P9Qt#{?@`eCrxW|gjw1K`|+QL|Vu{+TJKr`Grz%@Dic}Xc21vfIqKW}_@esu^k z7>Ei7kERL|RWD2%Dr!+)n6)l$sHi9k{KJAQI+x5xzp|RMe+9x~e@dlK|amD zm>@`RJX$FPB-9{a1+9b&>2SD_U|`Ax+2gjjs4MCufh=z1<7d0926uf|on2c=nxx$B z-5icq<`~TTiFY2Hyn8jXj)5d@oTJ+aVvVbC;j&3UKj5$8ds?W^`iTI$csg@eH!v^u zc#yPocNd+@&iTqiy%06B_6vvaTzjDECuw40;`<)&eBBBwp!2f=`p@ZN&q6PeKdw8mImwwq;>wUWg<4FDz7iG=1n+}p>oD*8s7>Y6p23L6Cl#sqDh;>=9E2J1<$%?ggg-EXU!+6}6DWYQUQsFI=N z&i55#)Dfx4Up?>65Xm^wk{UIc(cB!3fCx_vSZmMM4M(u2h!>^E0Fm+mD!1C%xr@kf z%oV0T#eI@sN2ih6I0WJKcl`k|&6w%>>2<+sJij%4wOKlYskk#j1P@|=0?TLo7VRkQ zcTqV)GOe93uydTwCouovGh>;Z0s);4icw$ZGN`+SSV(_yovoN5Sqe^3J!QMmz}mMZ zNF^vJ_+P}hn3~W*`m}F&g3wK%U-DX(05aJWDRXB@(Gwev!LQd?+WMlhh<`umq%rWx zD{JO0Lz=I9rWUp4OIx+%G3?vmSns#NIUAvH;Zx~X5IUh1^7N+-10PmqjS-bX7jBcE zMPM6l&$Wm>Vwzt=H}X!FBIlOvT;}z@!zd$^zuY7vqmR$Lz@Rl(@IyHJUR(aTs8>4O zhQiy3vg6K3`h)gdgJJeUu?ZG#VAJ^Lg}r#3mP`x^w6_O;9B-Ovc;~sJJ=yZPZuw1A z5qWB%j*s}|MWy}ncxvnCim^nnYZVF1qVd)WiVLaDaT9|_ss}*^3$}+-3odFi;us5j zL??_no$)Ro2H)4!ntDh8wO;^$-rBZ$Q`}-;#1v#P3O_vxQ#y706gGN&zFb4p!W}JI zY>?&A+0)HWvoFPfcS>QE)6_IC`c;SoX|dyN>lal^*kX-!mi`Zy**tPZ+${?EMcH;K zx8%F`#e<1FV7%UG&~>i^|iMPHyKLc1w|u=K9yWi0Qw5_3ybaSr9tGkqzE?(kD%)Uoz5W$ zPT?U&l{1?AWE#6J_&coNjH~b^fDty{>E=Yv%Eg@5#Nb%jd}>u9*W-o#WVA0U8@&}& zz8`MjQ68hR4$h)CQD9rjIy~WLC z#e1xdle`-l|J!dX??7l=21wMKLchK>f_Uy7-;Nl?7ToPMj;aIa8@ga56g~uz&X}9Q zp8s95uWnJBly9~2(Zp~+6k6DYP zR0-$(j^o8TUh8c+TJ4EQOblvomqzHSsHFMSix5%q-InwteCeyONQ@3i#8te_J$&Z|AxY2_6M1!kuGV~C9S5#@Xn+g?DGq_CwLA29w&Qb{ zxm1=XN5N1)V=_sCXsdR~T}f)~A>!B%9(D_rkh2k!w?DE9eY<2~Zr;dIROdaKr(ryp z@mjvKNUxTHLp@(5W@Tx~$Rx+0$|jlg_>u9#@#AzJG7K!7yN#^@WZ?7F*dO`7YqUSU zF&0%qA3k@lHLN!4U>?++u{6^iGpnO=fO7pe&WFso5UeL$lV8`c$xI)6Wj)oKXs9ry z)7KSL&`OHn&)i)i(IDCxs>cOTkA&JQyms}^lH(GV;6%=?t*wEnsoa!?%Tt}Yi=!U- z^8OT2>WYerEAWZ0$oR;Jd7sXPPdp@4F(ld@YOMW2MSK8}G<9_`D%Rhr+|Rdq(1-uxDZ6>AzT# zVt)nA;4Or8(bRnmLSKTC5?92SqN^RZ!TZfQ9(dHLKta{aZ9$tJm z`A7kvRYS0Z2pZ>T-F{3gbDuF((z|XvTD33g-i^QQSZg#VJF;!ib44hAVHbR#x4GQ5 zx1fOiN}m*u^csl)vv11U_^#n2DttnC$Mt_BSw|@>LzD8G6n?=doi1eFr6p+20HhhA zXZ)GTKyCt%ZFd4u3qpEBq5ME*zO;s#pF z%uTZi&n*v-%VEpt{f3Tmi{bB`egq`8PI^DjNQ=ZhuiO>DJQe4U)mkDxVR|N^G4!cC zm>^afJ^61m-#H!mVSi8jE4VQup1b}N9#3H?%mb{D1ntkw`7rx-Ww-Di|FVqTj8vCtr zPP;a$R?C;W*_J)j=WK%{Ve93&YdT2uaUkno@!^l4K7)hYqc<=VzQHJ0Psu=*&m(_z zn$N@S?$qb&FMZO5ItxpEZaUkXW`0pvFJmZpYCex|c0w1Dv9qhhG|3&nR?_KtHVYK1 z>#M9=HJXOwMFwGE;#Zm`2|(*3>m^Knpfs~HONzKepLM8d==olrP7^`VQx0Ac&gjjG zyo3GBVP40UJNZ@l&?Ooqp zs8oat&1s5KDsv}B9Dxz)+(?N0Gyz9mGxH^+X`wXq6sH+yNsQ-kj~;qLLg#+)ijSY4 zodX{Ip^9;gG2p^{d8mwksK zQEp)!^lqWAxc~Jp0V$TC3+9OBSaa|^eIqBxH((}5Bd*$lz$X5zn#=u+9M#+?_JOO+9o|ldn2Q5VMBaGfEPLd5nJyhJiMhS-ind&~aODQeP-MD%$t@uHY><$Blw^T_5<^>(m?ZhT012 z2~DH76oGn1+a(%AsJKE1Z$`Bjz>H4)*dtQN0o*BMX#2T+Np-3Zs8)Q1`d6_l&H@B; zxRUb18Im4gMYGrW$6;b9wtCbSN4|E2d|Qu#qB|e=DP%m+nsLbfa@$-A>J=GDDsndC za9 z*~u<=0gm;ieI1&`neC+-hjvX)3|q^pQ`|fpIt6gc38rB7eNsn7I1`*ql}F;La@wY4 zXR>Ux)ST=1FgP!)ysyuV5p!>mR_JqMX9Q!(v`wvH5&X0{#ceN#^LNQZw<4RFK%z13 zM$eclp9hKR9p&)PADrb_)fkFiG(0qK`ftk<3f^hl4!B<~6@ZrTeEWrzKEUihFF-IS z^lV{i|8TIz_t*CFqpj(fj;!+JWEhHMRc%9Kh>r4}E-fdG=wPrTtG|r>< zzZ%yP8hVTVRR~DA#<5_`b3bBlgXgil;&hDh%avY@@r^7VP12{;XA1s27K?I<@PUc| zOv83k%|c>;>$#KEcoPgZ?|kp0umgFl>k&f&)8jc>rI%Kzak4qAs-v?#UsWv|)^225-e*N^y4^db1s^L?U}BUG zZhTFt=Tp`v#IXhE2B@589`3Y&R23D0>G`vDKJ3LEUzc;m;v0!QV{y}Yd&xNn%?mV4 z+{Mm&tw8|ZjS18uB+kqBQSp-9M=OQMm3hbun7HgadmVO>{@1&4@Zt3RCWvg zqNe;jj~9!0(2u+kd`SrkK{5- zDkUqdTrW+}MIVE3`|0g<1Rb>+%LPxv1?jl10|5n>_Pr<*v7z;RG2?Zu>Go{68gXQVj&mX>G>5wTdV{ z>WiDm5f9yYdfSC?_JH04X&8Nem1tYv1mNbc=l~5}xEi{0vDCIenOW6V%ETnIbU|TB z^l=tYg2ZJo)qF0m$GBPz);$B>^b!Q@xW3&~)%;c^LlYF}ok9FXdHY6JFX_x$ zedB>g4V|Zx;g;5ur6%3ab(gjTZ0Bfn>6{^wGdf(uMbC6>EBm%8JoIvlhn0kLZhd>p zBhJIq4=JM7(K&GJeX9F(TQpir{N0P(WzdYET?6r%`#%t76crDWM$ew;F|W5%+Xi>Y zg-cw=Tv{?QmQnHhk{Te!rVv4PN2JkvcOgnSwtStM*wf0?A3XYD?)r<5)6nNnG z=Amb_mDba1^z>&6(dmUa7NmKb6T)1uTun`m#ej;E+E2&snR{gMa(oZdcc`V$LX}${Lsc#fqPx?wEenE^tAaY~iBEeEM zkm^CnTCoFdw96$wVtTmBHvlOxl~T_&q%wj3HniCD`ikAE84p*3pmDZ^<&U@;zW?M} zg1xi`jK8NVM9#&0Rrt_an06;EtjYp8e25?99*o zHzh{t0nWlJ5SL9z()7~RJ&>oJ!&m*Rw)GZ+HRua1eUe`%?H+(j!zfXa^Xa9cnXcxnf+urP&vMnN-8pEP1qSQ0VXY;%jPz6qAL> z4WJ0}z@VV$+IrdugYQv6P~lTGeTH|I8rcnDpI?Vsna*m{Ok0F^i>FZ{@15On0p52& zJ)jp3@fD44z&u(oeVm0lJII~mu>^y|NmN@C7C`-lJum$0dstY5v zWYA}LWYe`Y25o;}wg7;i)`k;4JC;La~EVFZoE4$UN=wD+i^J5C-R85U-GcPx6 z0rPi?==4*_TrUhI+k@docDu!~nMeWX_H z+4ec#dV(~#c3i*e@ACL)4s`5a?%lD`+B1nO7~c#-@vX)hcr*d6(fjJUp`jsvT&?irpkde0=JDqW zCAsO`)*YFHcN-{TqO1t2>>6G8#@sTHVm%fIJ-L|99Hx0n3ltnx)QBFkS-b3a*)9AHz@qz*UGhz_Resg z%=zyAygNU_*)~&ypgO0e(IQJ7(N3xf=UzRP#MPYQHbO1uD3@`vJhynTt!Q}@AAnD!{>u(PuT zXdk<4amg9Q#|qkNw<&$v=H9K`cyDe8A8&8P*-6eAM9P`vPL2m=#q{3pRo}lh-~pm@ z-7Dpqojr~OZEzwAa56+jD)f0ypytmfvLeXE@wGPebiX`v4J};_W{P+=b{K7!D97SmSPS z?P!iY@O)6p^KG&?x0_$z{qVKO6>^z4|v|xGO6mJ}?$)LeF9(KD6RRQM14r zs#RvXxbY)Qef5YzH7UxEvF2_&t+0|}Pl@I%44WrEo{V9yVqD~=q_QL}j!&wR1Z^q? z?#ggOl34&l(*n?@+~qI%pT^SJ9S1#NNnAu#!wQgYJ|a+qw1+z5qBN9oCJ_ ztkm`vB5|52Tzt!#2$R`9p**u)p&t-+Ygu=aL)3DZcrGU@@{MP!KMBd%8G5hf@c37S zAv5zZM#&ugv<0#JrODw+I-Ej_DvVfyVR&L9ySc$AGc64Sh!Qm5r8{-pa!DjmXlcV9 z+}NmOCN);h>=ZC3sSLy3z(WB|eou(#Qm72(nrsQ>%-q$+;=UF-gKAYOcZf|?K#6oZ zxVe#$BK^N-=QgppTSsL8?kk$_u+zCeRh&L6)uZNUuee1J-jSxK|7t3v4fV@aq->26 z;Y8{WkHrPn2i*ymk&~1A9ainIZa)%Wc~HXy&~<}*4BFrZTrb0Td(&;5goCZz-M#ko zr=s>2!sfRenSdw`L0k>#cnjw!0V2PiXd%1st7^Q)y~7D+MijY*?w8e`*u+5RS}+1K zyJPrpq&H?Wc6&Ucm675CIN`(q+75x)cBP5C_=&0{B&I^lwhl+djUC=e#x<_DlKl-^ z$%i-tTVWP$aH^_0`fQy0rS&_kDW=M5S*L>XWS$x9hS7&tVnB;n=~r+S;J=Nt=exL? z%t%-Du|*tc=?@2G=qD>D7=hxWZiBk_W^N%wV6L%OU_Kaya_zbD&!*hI9K||S6vp>> zbIe~?3?Y1dgPg(g@5j*Qtxz|Rj9K?)M9pbYL5lIBcbE!C?OfYjH$aY8=o`bGw5OEl zaRzb|?;(v-OJ0p_5@3OF>n%QpaQ4z zuI{f;TvVtzSOL1V7$ArO)TlQ4E8w6A!>9lCQ?jed!s zLQcAW`HKlN)UoOv7%)V{DGK6?j}&WxmBx1MGxE7jRH+3^`gm0*0-g9S-NX#2jn znwEA0$?zu)X~09lTd2S8P5%i&M0stL0Gg2tN1z%i=GZ0!*uv;_15QKs6@aInK7b?#$mwjB;cK3h3$w z&a^G-7ljNCq8>HV)IdyAZ4)&IILBe2-B9P}B+DpHfT6s0rihN9|^EmlF#l6$X$;mCMe)+OH7&qAZdCj>a zk`=;|i{kw!`ff&yUp<7R1 z7?Fy;`V`5dQ^{H0!0qo=HV(K|7!ssJEV{b7n)YhxAzI}=a2CV2rvo= zT=c*EbO8uA!oNJi?biBJi&);bzL^gMh_x5BXG$=Sy{yLqfu?UScN6CAwE9cf`n&MR zkXsJ_+2oSQ0Gx;&|9<)^4{*Cql)r(W0*aOpM-7=O(QHlSZoJltq`2+s=q{%#_?L!98& zC_B-Ts%h62NUnO;!Nrs_k33vfi){I9*cHj_yqOtmHv{g_q8p?~?*#ub-DYi5X0=Ah zM6M-jCmbjtBxnqtGi^lj^fPQd-nls_co{9%Owqp>3BvWj4dPybiVv*8UE_hS9I7@e zRAC?P)~!8@;XbLs!-98sEnY!!(069TzEkw{^p%K=q4#Qu&gMNEtU2Jcs`M7|R~3V9 zqRB#zZwpWU=!ju-q9ey?ufG1_6w>u-A-NT>(J)$JirwidjeV?`L389zS^sy0SUe#3 zU;;cP1JHG;zJKpSsqg$K=Mo&Wwe?9cRZKJkMyXHbM)S=R1yUCnaXEEn=sBvSC@M@1 zGZrSeQ#Lc~3TW_5QQ>Z!c;i9jc*n+$jFA}rgsE(yX%U2@g|Pnnd6(VRiM+WpK%@GvUwtOQ9IzMQ8>3DU% zek{Qd-?%?RdPZ6db^fLM^ox}1p+8{M1g_aAEpN53lDogi?|G=WF+k}nxK?sXYRNse z2`E_DXcM(>eSH84t|JP zf=>yR-vZCl7RG{Ht$SSxkl&itTYO#e>hdIS;6x8IoHqv2VTbM1YMiG4)gN$miN-bFBWqBADt`PZwcFV4187F z)nB$Ox=i&K!O!{{fo$!^Q3#XZQh@9nc1Ls2R>TGbIL*1pm`pV6l=ghLg!1`6*Y9q1 z+?^Ud8h-kK;Sgz-;$qre>rho*zA&V~6Eab#0mf!=oO@(c>r5PkUq6}O=tfQ9i|58z zMESSPlDLo>S&jJKWLI6(=QC(^cz*D@msFC&HBWtH8r(GQk5aGDW%c>m^P3gB+^B)v zRr$++oNwb>xwKfaSN9blZ@X&59Jk)N8;h??NJcJW1(!D4dYnFold}^Nq=}tiV@QJzt5${NZPz8_CNStwBOUA_^tB~FL5lS3K@{_K z6tGG34xm)3)WtAA&1nSp2@V)hBll(OHed|Qpu@?iSEFQqQdf7!tzzHWCD^FK6F_(+ zroB`>6E3V^1nMl}Rv;`wLAH^rYR0=Bik5b%TnYlp>XGZg_;HcIv$kKCtDdo%vzcYxbXV7Y5Om5-{}4C+fn$R+6ycTB1pVYE$xOX;326GrEJeR-x)p#hT&emh(9JMAw5I;>v#DWrs&s_ zQYTW=##Rouk5*}oA(lkz%9A%C1bPP~ytmRAiijC|@;Jl5=SQo^@rEm3fF(uD$-;2$ zmb`5>!u_N5z0$ZfD1Qj#{Le$oZj{nJuwAaJ8?hR>NE#{6aZfd+j*FV`C+_4F<55 zO5Habn~*nQu5WaXMAn$bvy9O`VqTw`^9@3UuTq^s%{h0q6`%)XkI;9jHgLs=Hm0zr z{Z7|63MJp;iEy;iimRXq`K91MhXO!=XHw?!?(pwH3fN2@dQZ>jKM%Z$g)d;s~%3EX;r=3l4`3P3TJoy#xzDo6wx@ZUdkbJqcr!IhvC4DBo$3lx%4UuPwg_i7$ z=>K}8UqZ8rzOo6IBdWJ}i|u-F@@=yJ3{88Lv30aV9WYZK119$|?D;G$4r^A(P8gtq z#9%!v>;qgxF}x?h1qjOsPUKq_CC;vR@7oq7Dbug(ZV`$h7qTsvb$CB^5(9XU7TNE4 z^`nk>sw&L$*STiIw!$Z*j%r+#pB}s0i@A4vV1!Nef?p46H~-U2po@;BqU490Tc(49 zfY3Yub5A{lyjj|6kzdeq#y97>YS`yh44{1mv}NC6 zlDBwmnYSu-cNw|{xf?Ey;v!o!8CX-CGrHYvDZA?EZHsV9mKp%?=)9inq@ty@d9Iqw zYy8c`NP?tGIL5I({Ha?sHYs2PaPKiKtq&>UFSxC}7ytZ5-YMh&aIxpr>q&>8YrZW0=LMkhJvut_p^f;?_#Fnx zNqVS{r7zV@>;YuE7||Z!{>5WanS>b6;Kzn3JzzwH>!I;1m6fHn>C8V>F(_SMpgU&% z^--Ehb8+7UjA~lD3SS0_gW63$o|-soEIr^vioLRD5Ntf%w5%E~ zNV|)6D(w^dL)Zf-lGhxg$}%SmJK&KxpbqoeuXHY`Q}F2L`?vybv%nTg3{I`pOsg57 zsD1aMY~l&)O%%w}zR+e;uLGo_jj@76c`;URP0|55mzq#o;dW+yQEeugMSsT7MnFn^ z0LG6jw*~w=%FgtwG;->$a`&AVeJPDlC!2Wtl1y_RXzS4qK(ik#U+p~NzOOxnHyWZd zNZQFw5AF%*^4|YR83Np_EVg)ln!jUV-elH(MA=!Zu=Jj(zM?f_tIR{J=I?27&cQYX9 zE(awrKYqMbp_r%hRg~S;M{9ray7uQktCSE^^`zz0Yw0C1ui^l}r_kCtRUo%Trjt*q{Zx>PFeDt>`Xp0yc}Y@{2q4= zSTx_h3Gejz4Wvt7wLEpfX_X}8^!(K~NkYNa&r8yUQ-&tVjJger850F}k$oTGh(f6u zYOUS>b)@&sFsO8Np12(gDNtVU_b!ZJ&rkT$<4hk|th(J&rJoo>cGCejYPptpul7B! zCH3_5I!m1cP#{vCWb;1^!;dgADj7PLpSio-g{Us6x;s+kIstAaNj)EZC`AZ=@+@IL z3QLAe;Q?^gtTpaT>LD|YM!u$@kTiqK^eVab2@^oe-Gvzzj6bcd2M$DnYGf2y`JCJ1 z86RREM$czQdc1%c@cCx#$1YNUc_9Tno|?6gf}_*Od0)VPgxHva3%j1}(YnXT2wlVF z?$2br4tnoD1DFd(Bw4^F?yW6WK+DI*&i>ZWAu}_R{c~P!uDoxvy zxOJ!3a&SS}h`j5K*hHz3M2xUsCq?31?hH1@o|X*v6s1_%A|;-9YNa~*wKqj{qtO8K3=;WL_U1YJ5t zYGOm&N?|(I@6OZM*ylo~%a0qtm)T8s{H+5Gp}Z&!Ce(HBnW!v}8Qt_+Q*O z=k27hKVQw-fE9z5G;-B*l&m5Q_Du0i>)_@;8pQ#Rg#y+C_Po0E1#(>s7D9~?1~4+q zvV8eoSf@P&)rGx+b>=S(d1sFSALxMV_d>o!=)6R?rCEs!f)>xJY1D*WRetg;-lY_(qsa1Tna6}j!Pya z^EMT>rTaz&DL(hFUJ}0EiLQ+ZjFXkoQ+4EtINa^`)b?6z920- zIaQ@`J{3jdx8UKr`QcI{tk#*kWsc6Hb35v7H}(u$n@;=2L1j7-d?p$Elc(68Hx<|i zrDKnSs4%`t8%7l^l2-eDUHj>a9yq)MkR7G3nzomh;2hO#78^k>>Em(6Z^m{06aMw* zSvMSIexdF9NO*Ku^2c?AQJ?FR-ad!01(#KU_2QzhCS2x+52*ZvRC?lK6r{gl0n~x_ zaC;i*deTgpO-fu{R+#=PWki(ZWLD>aK0ehd*$K6Ah4Qj?)z9g!&nvlK z*L-;~)Fo49&e|GOGXw&2i9S+V--hfa4U4xZkDsK3d|H7y+R(Fc%{J7w7yD3xhWLx? zwg{CKOHLS3pX<ZF0eZbp zF-ha3mbu^d)qLFrL(-Lqf*Z`h=Ha)dbhjMYA4qTAG8aM!TabutuCq^lo!>om25(1L z8`Wd~9Z%h3NA$%LN*z{<=@GY3swvO%{&t`wR6`lM$WIa@h1;Ltd9!SchH-O6MH*}P876rQyv&bt(nfT_7i%H{?PU8KW63NStp>cz4BKq9!$)M< zDl}&RM=!n84&Fs}!Ym-Yc&6Jm6b-}4rn}Y=ISH%)3V0Y$gx{dKm5f*=p%|u6c?mAv zw*PD>_3)YHu`Mrz&>clgS=&|CZedX~!|(|HU@Jq)TAMDm>;`Hc00RrSIcP>&?UQ_S zqly5NYK%!KF@)5T{1e3T+h-;{X)xph0i4qYzD7 z>QFx$TRXO>f-(%lP0AgrntU} z_>9Yfx^ceYP=p_95V-;kUf$|$ij!TCYT(XHurULCBigMSn#Jp_=j)zCJu*6;?FSj2 zdqdTj+ySWS?bDi3h778BsUE7!LnnWgHCINMpJ_>r`70pdypZ!25IFTYC9BY(J%Bo< zxkUKVQRCVN0Wfs?Ae-2fzHOcxAg7dAl%ZRb={nUMT`%gYR=dIB+eejK4o8;vS+aZ) zy9zqv`R#|XtsttZen?su9pm)F2*#hMKi(vKJ7Ag?qz8AMkyqEr_7h~8(e5J>kmmI3 zZ<)~<$a2K!>Vx^L9wxv22@8rBY>W6DNCq2g#-<~q@DKX^GU}wCA>~;(jQ9+N9;E_p zIvJdeGw=1BA_jrxDP}PV5&0$AT!2$Vw)Y34yr#^~d=4%h9=hpt=Oec|;Q2Lg;elBr=Hg<515g9Bf*mLsD<}Z+}TX5c5Dh7rWmMWmI@k zQ$RkKz?3Bqu9b!93pLs93$EL8jm(Nz40+6^o;pu5y{XrqPC!PB*%ii6CDbVKlrbRrwsTxHpz1N^>l&Ok;K6XBiu@{Mn zd_L5Dm1rjA0;KkzU$L}0PTS1sG+&=(qJ)AYJm9znVr6-XPZaw?NwrZYL*8Rh2$jhR zt_gD~WIAmJ$9+<6KG|)JjK}8k&))SAqxgIuQ(kX!QQy$KB^p#3h>ZQ@)!#V}#%&!f z?yDREco{MPLw-&{Uh^teEG)3NQM`{%B#gh^aFvapw(&8{O(YYIw!+MCzgZ5WAN_+x zQbjIU8}}m^7I@bDPqFXRUsAP7?I`PP0i$9yxK#p-x9-hs5u6&*2f2OHPdF>2gO(X zU9hO@<8dQB?`*qVpS$o~LyXeO#Bf5rdE$Q-1^pR@n8h?lP;|N-TFvts{WrFBJ0{YLCOQcGhz!4KGafo5V{@x6SESlQxd!%H=Qe*tJ!EJ z@2NA995m_1E!=9*{iFd6US9l_BzO&Vh7L1y(NBXky)_?eeKcYu$J{Q3`pX zI20VFck8W=qE=Q`O6l>R;YdXz{;QnW>U-W0alMC#vlV0xHd2XT1WSB7!fNr|V}Z)! z*ZyJDYZddv2l`w4eoC@>R1|3O8yaz41OB!%9hd$5I<7|rD4mkud7pGW?(<7}A2H(d693JLCDFWE zPBK8Fnk=X){?|nm;V1EiAC#JfV;rHoMDb{7jKRAM9e4Vy#lK-muKM#27BsEh_%%v|2F|P^Wviz_SPR zK0AV4Y*j0`dzR}BBr&&z&wTEWBM1E1z zs_dUno&M4Zy-qUMn5bsn=%-T?mBhfZvqZ*OuJ!>kXv$6lm(NO0VcaZh5A0$Cys!6@ zO%|FLAGI9jl}oN>SQoVn4n?H4&jya$YTG#`W&WJ?@?_+a%tHMrBCaUo6J~g)K$s`I@ z;MiTe4hfG<${D>yyzV{--5aN8hik~@_F!I8eWYF|(NIad?CSl%Nte?U*+297 zt}HzY;n_+-e9j#0u-y;mpD=g%4OuKM%a5=N4fN3heDgXo`S`%KyV2V?_!(-&0xI*e zr;0udw6^VxQ|R~X;9sBAKZ$<|*Z_l|&Bs~vzrVmj`X|fxg93>f*CE;81^Dw64E`92 zAejF)u*A+|IDtOGWH=O6#KYJL^EPv#B!^+6$W&Jh){3-0Jr9!!@JWRPdhcum!@keQ z^!`LkLNUqv`qYG9i=D)z=fi9ZPBL8;d3bMN<|H`iqBDRYql|M`#T{I z#pGq78$c^5RZ4w>JN(peC)`jGm+m|Bs{_^93Uprt@tbl|-ZX9qAA`WN!Lo>- zO`h705?@Et)rs}?_Me2Tcp(9q*a2sl>c2yYb~#a>VfCY-q8-3iU9YVV*_Nf2=`-)m#qDyBKYtX9)eJWlJMenHHP5M(J=^U z_1S~+=X98(8TFghXN~h^p-B$h5n)Yy+>k&RD2qezc{U1V%Q=hWYH}1WKooDqUc)UsLEHZG%<`wHUKzaCs!Q!h2B< zTHwDwG#&8|3>49z*q}7)#(!4SUs?u|z4ql&?6DX_1Q?kI%Ip5rPV$sWV8`YWxpZjt zxhofhy{-iCVI6oC-TDVVu+g?{q*&7(xVy+lvdNz!-t%%iBnoBZV|>La#Dc6ONx;3I z2I{avg4@?^(p?I{GS=cB^@)e)AbQsWHl~cZG=vKSCXfSH?%^(ueg*t=KpA$a^pwO7 z_Vx4&lDhhX5m#SsR|tcDKGkIC=fn8jAq_(HInIwa(FsalXKiJf@oWy`&@@iV5kg+zz}X;jqcIZ1 z{8u7$Y(EiqUTIqP^=CJbW&SPnGF9*pFx-gA)+9aAiV>32m&xrGGm*q4 z3$~0M8}^>@-A15#uMMVw`XT9#_{0#4?2i0peO^#$rUI@(yp-_BXvj=MNc2V8FX!lz zpREB0tAD=xn++T(5xMj&A9eQpV=6CJN%h%Q49$lH1UlC~c*oLw*jAx$CP5E}7g}~C z`qQDgW;;qhk{SV8eqS!IckfBw3;YRRlNC>v8dl$8L*JB*5!qXls#&wP*!ZCai^34s zz#>zN&8ZBtfQF$zEiH7jsU%!>{g$p4_^Q#zs*OH2p2n^izlJgD2QP(|YimG-8@(&4 zxyC6P{%JQQUfc?ae1QhNhBw&+Rh4!>vX3o9GI1?WjA9*#G9Mh*Yywl#Ov2@$2-spb zgN+3B8x3;=xe#e+Y3J2empiPCBcN;IG@{?rpC2(9SO}}S$+zRI+LLU8pwChRrZb`g zefYuhklAFO3t2jvd9Ko7P-6cG{QXHx6q8MEBpZ7Y8Z$p}C+;td)n@V&qaIHXh^w%m z#mWgsk)a@TJM%8RRkFn&W$O ze%d&oX(x2h>*IDj(YZA+T}cNb@I!$SB)&d3OxHg46f*h*gRPZih9T(|Z)D=r`|UnF zm91Dj(r$vCvINhDGU3I`NVkMS$TsmPN;j-P1rSw^<<~M*=EaMROKUSGYMU8R_9C`r zK``9WJv?4gF%mv3$wl@Ul-D=wDL4t6AJFkZY_F=6lyZ+G8T^I5-<=N!r4?3+10kJ~ zCq(Sw==4WWV!MdcoBKchl}N=ExO}X{EC@{-j!3qxOYO4-3_(YB(PGn9EA;`?Rb8~y z>FY8liWB?D!E@&Fx~8q&tScoMSsWQOJnDWse0C5dO_moG5eX!uf(H>1BqWWtl2G0& z1e$M_mpwd$sVUzlvOM`2tIpMz%)NAH@&l!k{Tqchvf`IP&d?I=-Z|t>1q& zKL3}mGl3Gzn-`Vc@ZVw(A1a*tUfWW}CNk3tvd8?oj5ykQMIGkT+Mc!ez_=-^?dj7f z6F1CqMRD2&)sT}wv2qf0vxlE@YJ?O$(m&>IuVm{a%O2Ebq0%l12I?$zfruBC>w?iyaCR*4zu<4^>85IuhBH>EubCRUv;)iGB^NQUlRIPnHou%izZerJSsm2mOLA zPVxNN|FSAn7w&s(<4(o51}rk&Z-yU(v|AxoCTm?#a7{BLKTW|r7~(c#Gh#xheIWocbpR;|Yk7!^smBH`%K za6yb*qO$gDeBa+eikO&;!$!WF`%Pqo>aa57KyV3DZaMfJ;=ng*!G4H1P)<66W%+Bwxi^x~;oE0UZ?C>c zKV36YD;FZ>FMQq}N1$2?mDJsQp$u)G3w5dJqu)~he}0@KG)1xkS1zC&t4Hp`@ktN# z>QFsnHDOcs=-@LBnkVmhcz2Xmw->^iCvnK{n8=qi%1pPKM?MLNXqOSeqCFPthB~(@ z;);;hpNGJ|IHog0>OCdO%|l>#B_PTUS<^!# zQ(7(RPMq~e631OpJ+dfqHl4aPo1m8|-{|S?Jlm2nC>x(kJ}TKC;#2_$Bb7mc zJ7A;EB8~Va5ah09O*{RWfliLiTdI|*YfqKlufDPgrH@;j{96Z$zwbpB6MRbesGkV0 zpYkQ6Oa=4E@(KfCR082SnxL5ZDG>kEZ_yw@?2~|YcfxrRZlkv(WCm{ z;Q4QVmN;8(X(jS!>1%?^xYBz3WQS{^2~0P70}eS~j{v=pcW#C~63Qss?LHUGdE<$A zb>H3`{(e^MWeYG|v-ZUI2l5eW>kT)NfKl%I6Nf~}-=JsWhv~N-r zRSyz2EB@QDR(|Z`RrDO!(d*_9PqMs=bz2lK`k0h~rI+@_K!m@*E$9JrngMQGd^9x-YywLBG=Pp$uT8`b?eW?=GGm~Daz&p9`>=pyqsbS0u$EXexDlq z9+o)G&*4Mzg|GLxz=>ya%`9h{4x_Sw-B{5)y`Xt()>W~27P+PFJ%fMrU)#cVA`)-; z(~Jdc!s*{`xs*kSfh;z}?UE=Q|A;mT7lAMm`<5wRHK)?!EB{Lx$+g|SjbQ%vV>^Ah z%MYZOq1ISk*4K5Eaou9|{8oOPHvKSW#40-pB{OcEbm4VTWa|}E0T%sQ5?%+bOEBkJ z0jee;=qX2X9Q>)`e5WD~CJIy%qAH$VeYT;^rd`*wHF0vO)O|6v4WYQXktI0(2E@WT z{#_6r2WH(!PBMH15;J0bxKzFu6->?+KcB=du5lVa09!oxMc+bKt`DeUsN&6F!4~?& zV^vYn@nF^hc4h9p%D#nP{4DT44fa*-B0VR^9b6EHCCn`M(s%{uICHM0(FjcuB_BuwJq!F>ilX3nI6j7dRF#qA9=t0RtQT-liK%c zVzkzAd|!>i%;DbzK9`m0`@3O)2w$Lrl7Z$-#yH{dr&nI~>G zT&-O|R;6@A3Nhn9Jvp%koo4vb4vsPiba3}%g%oQ%@OjULwHpbNX`JWiK-z3#UOQTd za=4-9X6XC0xL%D4dyYvI&o^&}(EbBbYL4^e-{8>O1WhYyq`yAjyh$Y702MQxCm#C` zOaQr{`Yc4novE-_%t2H%FbU<@`Ym;{0u%^=X5u~~A(8R?(4PYyU3WwAG=P$@_x#yC zyfC~qda`z)8M?kQ!Cv_gafxXX@hZ}EkfApsldY|Y0OQ*SA||m-1w;Zm$Zf#!=utZv zB8|*5uV@Zv02+J9rnxgdW1KAt?5<$LLQQi|v}-^y4hidgV@{ODABEa48{+HKuSIU; z&2l|!C?q(4>6UX!^WPocPa%AGB#`noAc!jD-!Te_lQ`~u`b+FbETtynN!*-SX_%GqOKk->~>~WCemz!=T=`! zXE{f^z)@Nc(T6R>e9#QK0Cq}(gI@m-zM`TWJ%U1zSV!|Eg{^rKHY&aMy9zUNbC@Fw zYHKV~yK;)Gx9tY=m%XhWt-ksxuYuu+&b3vssRPIGnw@=dRZxm0end@l$sy`8&0W?SCIAqNr3u`(z6wS`480>HjlYGaX z;IFR7#@63%&D>L2N?GWJ66aG%`McEW{$Wo}Rg-rdsrSyS+r%({UZXAu)cc*4a3}*y zsoG?`#caL=VF}OC2kGWfo;o>hm#EBch)L@Amaf5dbLNgXYko`5b*puHV={`!DVp%j zr~W{`Hz8UG*LP@_P2<(TDrkF# zK|B?3N{#;2zo@A(Q2g5xqN19)yfVSC(0+KZ>$w~HaDGt9N)_NTA7|6YBz4Zmwum9i zMLe=aA(bS@YWxYk38^5*|zo$xnPkTt+S`e$cqyuDG}&bo`|~U>(7{ zwteAAOPFK|L&Pb2{rk03957)SI8ubTM75=9Za}EEpOY62j%kn}rL!Saaq*;nPn({B zyo6JrZ&F;$l>#m{T=mZio2j32Hxl5*25&?EM&Nyo0897R-$lA3Lxg5)vK;$lo8Ey6 z?=HD2*?ujFHpaO#g7H7aHR$M#B)h8gd@kJ?^tF39w)R}TQBRhcx$r-1;_4v<{zZ(z* zn-qL94imL(#nl$`3G7y&@!0Myo+~Ccwh&CMl^x2gb}LMre*sZsC~47k_0r=3X@|yxYhg#QT$%smO%MqQ*yxO6VU6lH8KAhU?Thg6-Tau z_jch@xoU-Tzu*=G&cNE};=3yge3x_IZP2%w`CMLj689?lrbW*reKBCx2`sJnG|woC zzXURo!XT`kz*I$xjWc&QZtX#OaJv3hJ|T>sh3#|-Lj3mb*tv6n@7A3Vh0Vnm+AtnV z;#bvi_}G3mBiUm~=nHp71K$Jk&zfI;UbLUOGZv;cm`Qi5{$Sg?ZhhbD{FG+^Mc#Lh z#0$e~r%tP3?Kw#gqYHLM%G9#OMqVO~Om!dmtWE)XU+-$BqJcWif}5^ee1TWs%44ek zIQ!=v^!SEV7^m3;oH0;S;`|%9Zd8DmOyU7Hi1Mwr>&>#Upt>x19p4WektnaG$-0D- zY^3taj5^}0X5Fc6Pf`#LaScJ-0bmZpLz$#v5+XxWSDO>5beYz40eBuVSIzW=U7Ie4 z<)+}N-nbF^5?PVpQ$OOQbxB9#nYe#-IG9VjHMlui?tyv-1$!*Q{tlLpq3ujyx`B~u ziA@5w>WJ{=3{Ku>D^HF3jvV7UVu(k9^4W}(cwe>d@ipyZX+?%uct>mgR%@lCqG7uR zB$M1ZoJ|HJkR9HSG9e!G;1;a#MLKJdZmrt_pdnPrBI%gK7uiy~S3(j6vZ56TcKpI= z;y9>*@*`drZ&s^+jcoqATNLZIbCGUMfvVg!C=sOIBnyFWx$P z%G{xjr>y<<7JkOQf+TEa1ow?vbO0PbhvmwDU(6U`-ubyV9|%oHe5mL^;knBaGhoEV zG33j80wqynOHgU4fO;If0w$>rQdCytvlj%EklzWQ5nJ~N1lJ5+w7QIc&MT`nTM=Tb z=3U(9r@DIoC-v=>)F<)+s)@1_3~1YoLhucN%b9CIN85J_4D^8s(`Qla){Ka{U=&bx~NXM9VVLNki?gsw& zVaT8^@@jGFc-$MgfP+d>MIo>Za;-KFPs{pk^s%W_pki!0^J!91hBA@Ifi-Czb3efa zr)%WA!uWGlz(Md@FV0c&Wa3nZ@8B#Sp#e8@j>4)%t@Oa*vPM9B2zX^1%N#ua!2zJ< zRXu6C8Dj4VI8T~fQ@r1>qtJe8c+Y$trEl%Ni0E0U^7hVxmpHmk0sYxr#du zv+Zs>v}bFo{8>(X>_T8|0!!0^IAR`gZl&@U+HLdveCy<4S?Q$PB=Huv5O*^&dS+HT%rp-#u_X65a1gjIj6N_pLjr|3LbuT(x=3;* zKmh36yjJiwKoMMfJ&*LnggViShx~|WsI&oM3fX1rlWWbGMHym9z5iHm@yvI*!p!L0 z57ddds{abQxLuP-z?2QEP#a6M8O|0+=g73BeYWXC%TaYxgF*@lc|CFL#6bHXG?-3w zj7A;x=0G$pxkB)F01MI2p_w`{>z$5{~32n{kPk7y?$D{vD z>_;D5BRHmGL3f^k)(sx;=06P%L+|ReY&Q0JBtRR;JrhN$TL3ctr@iL z^XLw&nOX1l8+zST`=M7RfEsp)?s3AggA_FVgTAS$Mz4+Acs{jl0xj3$`j4^f`>;Md zO_YeCZj;(-%gFOfK2drhLjx)3w@vs2%W5kbr~Vf=1M~km{wZmL@yM^U(&+SP4>ANU z|GLpCoR*l!!)#_q>LuvyQ)>E$tr6O4yDKrR>IAeIG)p)=+dEHyJ=OtMV$z66Nxr}f z-HQmkmR{Z&9?uWcany=_RfEziAgV*mj2Ol4b!$PrE>cCdgeVC4KLvXb_b zF@DN|`fdZ(aUjAUAkh2;=KEmfRi$ ziz;k{-X$RUp$yy{s6x1+h2`$?2)(L364z7sh#J!rW4$j{WU3TId9w9!QE!@i1urdH z@nI3R-9o!;+Bf+y5m}Hl5DK#~_!CT`@I~V^p@5Np;2jdGHNbRh8X~&Svle;MV&yfw zRz!oI6dD^w9oFyMn4Go;SMgNjQ%5_|#+7cCkuK|ydbv(9pQPkvKgd_AmtIMCH*PEv z_Qq8ZSka%U?ZCnLs@^B5xOjUq#=j%E0XWu&#WZGeC~{q}3K*KCUS8x1Yan5kGuI_7R~MB)vfXD`%^BN z+C}%zChS55y|y7Apqge$ZNpqDZCRWz45N7hAHLZQAATqDvv)Sc?P7P@pUJ-h zF@dI*2iRbxT;(yNL~V)#Y}#_lBj6DVJ>xS(Ee&c>=hbUhTtXSD-_@@K7b@0eT_Z}+ zykPp*HC&dLs%osVFCMC>o+kQO3tyHyiq5+xhE-KfqNFu~W-Q*iYvM~dv`gC8ga@Bz+GH747bA#iu7f#4cC`h40yu^~vo6 zPJvKaU=zxzuJcDG@R`5cn8k(4EXB=7IQ*BtP*+0-Fmusk1m*nN4r<&Py^z!oCLp8? z-vLd1sA*XrZ{lyaq!$?M-g_0nd16v9o*O`b1}oI8=Yy>@m!mv}{B@1*4J)D5$U-LI zQggz{N+)Q8`oMi9cZ>B^xTy3tl`zvkQ}DjP369?CS>i-v`0!v|@vOd#`tHVI|1zSe zv9PU0_%wOT&s6LE?awDohP`l3SVDQozALR6n52EIRIqF@#L;)TH7MY4yVw_5x=vHW z)gQ;7v2TApOzeCzD%yoS`%Lp6DZ=6n6JN6u`Kzm55_-)C$Kyk@{jf;uk`pYEpA(s_ zzgb#Le%IO$r9J-h9e*JN_6=?IE0x*&B+yDiBs52bxyF;e_`ZP4H{>7B%wcp#WZXFj zc86frf>6)u6M(y0#kvCznP8DiAU;E-MStmuWJYMbR0?Zxsk}e`{PmqjKAL&!iiu*| zXygCSmSZh0x0r5;*DjzwB!;)7!ck{0;(V}N+l08Q73#TL64~lI@zI+GaYnh}>y0BA z7z+xWi%3vx-vq+4LIpQ(G5{S)6oDQWEj9YR3z$yayYiO5Ddqy&);OK~Ls>{rb>_mtAk4EyAY z_27^3YgF7TV=L&!|6~6c?2K7-$Ryfh2Fgg^*0EO%Q&MF>GBW|JUVZ#EZ6gib&`R&t zuOtyqFuS*$^>=-DF7_O>c@|k9atr8z-h=Pp_TN@HNn(V@$=<=Z)k9M3bt5RPA9_w` zhm%XkLSt>=_P#OH61pTMz()Gkr&_a&E_KZFbS8+F{SDM02!_JOK?b>TwRK5*)!)Y9o(fo)B-kDc!aKaP_A54;M`eiKDkFt<(v zd`lahMFv`Wi&zOsFVfD}4ii>yGc)Jr1-tW3SQr9zj%! zY`}M?o6!7@{YkNOLCHq5Eg+Jin<}F|T7)SOfF4W=h4AT79Ls0hY@8e&A9&G*)UtT| zR1@da`RQ3ur?t5v(~;z9&Y5`qS5Ea*tI{ouHSOX#=bG*UoT*d#HXz#*(vX7sLvKxu z#1+=zne(_Vw#ZT#Gh+Wf>k86$-Kq@)_j(3llC5z3=&`Bz1)arkFa?FM5h^(UlLNOV?GUm;CZa7LHQnsR}f6xb^dx5$cy=R97)ljtnask zO=nF)guBG%NXhvK_R!|;F`j#3fLH~^m$yLM3_l#c5D?#AX}(*VA>P4qf&8#Q+7QR1 z1tzBvVe#BD3FqZkPnpsdjUzS}k)5@*3cNdWB+r{_4GMa!ru%E_-a%ctCJ5#wPcG&?kl-V1gaMt4W&I`{WslHQc z*Y_yQVv!ezX8ZBYObglo!$g{AW$!y*6yC?>NdY_ygi$$qiz(tg&CLfD`33sJQ~Ab- zvd>zsh%itLAor9?WhsOuU+opmbTZvTRz~;kkDkjrfyvRFI51+( zKB%Zfo(teCOszVae#>-pa8Q2GTk_&$Qdnf_!T-UH^=EvvvP}Sm=gjLLWE5gQ*0pUi z@bV0$S5q6E@#`sDXv-=xxP;vQHYgpo2P5BHK*@}v5Y7#dlITc9anR2e65r@?`8VVh zF`g_|W)>;;BU6qP{4|JkahVI%?*t8)>%J3g7|=qtjIBpCX^CEloS@FU8ZO;t0JkUU z7hpJqIroAPavdLmw2ZmJ0TQx%Qwtv{hFpa3IFTAKM;qZ^Pom>}saUig!1MeWJzUj~ zrD=F<>$izwxY7Wzbl4rc6kAJ5zMEp`XLX9D{|g*qU#J|C_r=KajBG!*1! zJcB~iyG=iJ6V0PiDgEQddbZ_@5`rhu^k|%`1eE4LleK?$-W6y?t&Ua_qj%Q7V%rMIlx=w~t?Zy7 zK-C7tAGf9pW$GmEo}h|8JN**yB7!&^6X~2^>l|`?g&Q_+ZGAT13oQV#EHCE-hM5s4 zW4ys|UV}(aADODck64qa^a+q)N#@sHo_B)(4k`Y$ zhQi@8yy77BvVre~^*`H~o0sBH@#8|BaNC4AO)x13(c9`OZf+AJx0@Oo5NfQeC)02H zpw9m{jWsE`<{OUptawX{D+Hh6`QrQqbF75ze=NX52nSsy!m_K_Y01WQ$7ik)Y1u}zNJNu3)Q8Sc(2HcbFJlzHukuyc$nsS5Y@1&FD?0h zS^THx=Ez`?Lfv~OnE3gWKFb{VdzmowQ4wt6ZQcb0sR#&>B7P9~0*@MrDlBFrGOV(c z=KMGL<7;ccJE(`kO|b5gul@ybGgBaMkgw`65Vd9{(er+xQl_)1^kFWv$d_T_Dr<;Q z3HX94ADZ-#m#|d58AL#Q(yAzaAakGkRnHIIlD}k{8#8ylNV7lai%98jiSSUQSl){q z7`<1JD`z|0k}cN!J-pY1b}cy^nD$z9OuG^{H7e64+&*~tcT+&|tX*@Jz0qhDMf835 zLObjap+ogKY`D4gKaZ2wN5J}tDXNlWCh&Z+IgcX3vlV1m>O8Yos!`D6@?ptqm8MX6HLsX!=`TSvBqcYd`8ouy56Nmt9%T|p8uZ_%Bioj?9)d;5e}dTLOcb$T$#!etg$Rxz zNjhHpfq8H=sb~n2M5nn8&UaK8TxB{SWf1vQ#rOXFhb`aUyy{$~*omgxIB8W!`DYF_ z+_@u?ie6#GRl=M-Nb#FEsY~R4{3oHAoIyibbr3)NizqMAZ~84P_v*tGyKc-ZFb9l1 zqJ6^88C*OsSJE_BO&j>V^y?c&*RYBovyDyy4Q+Fc{B|i0)RzJgKWFOc_k$mqdQSywpcnuX*Sid?K!1w%Ag`sCnwBlHePcxgs5E5qBjHANLUPOyG# z->iOxFNsuojl8`k+-)Pe zfL5Y?)y{`_vGPlcR7@ql0J?)+yQ=Yq)fw>VDEGx{o5RHajC+lLO_!T?Oh17Atv1*t z@t*uTMI`o9`4IQw3N~SyYLi|h-q(tQNFzXHRCu+2zkr}0LnyDWWCLn(MA!9e{5!7i zTM$Z!kc5MGK(c+`tu4U-l+BPkX%V7ey^^qgTSsM^&Y}dK{s6JCU;iMcUDs|Jjl{_HXL3lF z16fr~#wDnVgQbp>tb24<^$;7+45j`pVsQjoySjpb@>5VBzGWRm&PRd{h-m;<%K+9S zwTXxrU7TxhD0lGgE9O^?bs3;t;g75 zbUTVd9>Or)#u;HY=^rwh=^HVh#^^Z`1bJTzx;yG`fNO(dn3SwcdZ*n9P@KC}`Tnfk zWiX!!fa1{8^T7~>B#?P8Iq+O0Iy+C+`&m`!{V4odlijZ!`qrlAk6<$&E@`h4F3@jF zwmtai-9wt1j@Z6S!5oR}kpV|~xq8?U|6lY92BQF5SSrliRxZjZ*a3+&N{Nndz#jVz*M+g<q}jRR&09mfG^X0(gx{P5|}eE+_FD|DeNS=Ksc4n z68$)0DiS1XZu5a377Yr8peCdD6zUpAS>Fz!;s`J`VzAyE?6!t~{pu!C%E4wF^ne6$ zN>X!KT)LFZ)7r)8UDnJet0UK` zVUIKc#mrLpl_8I^(ZLFmnT7`ciL~AjszbOc;NWeiU&)ttnsaGPT#b+5dP7P+%G`h|DokGl^O^5tx(?-;Dkev$<=-9kZW zpj4*F;%(zU)J(LvT%A)sd%v1*M=$8g9#%%*cf6!rcYI;on5+h$^%B0RE!-;gl_xRV zG>=lZuH*?>R!ka(-g|tyoGj!8kjY2)U#Jkm_Qrp*31dAr=gBOc$H z9V?x1H!+J)BkJ*w?$wo9`ksDjSpJbp5SySAB<8R91S)Fjk^a7YM~4HzDElDskNI`< ztz5{&)5n?SSc@2;6Nq&-;B$H9;9QlNd)8CJn{Vfz%P2?lRUYKqy4<|U7OmMJai{t$7?mgbg%Z@ZO-WE6R z6}g*sHxF3%0=74NIp$={{p0lX zmsO;6uMfu_mtYQ`20okE+h@4m61M1r{$e<3w}sNFA|QH$^;OLm7MW)GH-A~l^vcPJ zRWwDQ%CDkAQR%9bj)=+bMAXT|Tat}vm;29q=oHRP-~OnjT}qrIGHuPQt)+cwBFX&I z`m*q#TvuXOTV2MH#g`0(gK@D=O3jCM4$mKX8_oYm)OE+R`F8JaU5eV;QnXc7d({dm zsF@nAqM~Tc8m*lORjtOZQL_?_6%@4QxXF6^>ghrTolw&W$DBHt895p*5q0mI5HeL{Co6Z`$jtSke z;fkD%j2<~|&a?jTr!NaSNI@WZN5Zc4BFnkf1LEcC4@ds@jsV*7`gxiYX?XS%edkAv zBT)M;PIbAk#Vz^~-^Y18#kR~QB-Wxh>pBeQ!s(t>~e>AN!u~&X49G z>E2XUImH|s8p~)*2=vy8oLtY-sY4d?^I5BY{214;U&SL7G@497!)*&Duwb!~{H;@0d`jK#kR zP?r+K>ia`azPGNimZaILuiYBtGkQ3I8jTE$c0o1=9Q~^f&oyNIy}8 z;nK^t>@X^@xiBTv#GeWJZ418;JL^_XOSwbPjjUd9t5V1Okobt2sV&z;H#U6ps;7Wu#NcP}q5x40m` zHGSa$onhW$&Saihm-*qs1rFYBllh^^#(fO%aOjd?;O&tHl;S@obia!hcJYoSBLnQ73+E$7hnNg+eKtd&G= zzto?J%hb}d6JKl1XlB+0kA%=ZkR*9xLgnLUDSz85ZsTDJzcS3p;R>!F>=d>`xN)IF zXF?vZZ0qm*ba=J+n#0E;8SP3) za`}y@BPFE5q_d&iSHFca^bPL)Ei9M1_wm(=R>FAYD6HK2+9hTeiTU9? zC#6EFelTazY>n3ENB*&`GQN6+$f5f}ogYr{()453!GAC85(i zf0?<71oXt%rc5{;dHmk`v~46E_f%bm-RU8;VqyDWih*6e{>#I+im!I3TUdH+9GKt9 z1;vs`IMgJdlffAD0`Aso4u`Ws2{Ms~pQ&3C9Y4FGM~{UCxV~t+RYT8AT8v^8kzOYC zZ`HtFIKd0+3T08Hbvoc-aoV6a$ww9th-y)+M;2=lT0BOcSOC&5==vnkI>W0=JXZa8{JSY8j3prYmvXN>Gd4oWuA4U!`0XEJ<0Mi z3(h}Y$J~+rmMrbL_O|-t&rkse5jG_YasX4~X-+e+S8vp@%s+0OnX|_)&0!60Xg5RC z13l)H>8pR`u}Uw-U#3<3wU+_8#_tDWP-DjHuA3c8zi+<8Sid5bb>(SFsX`Ro!I}H? z=p1PVh^(gaDHYy(e6KCow*rE{zVyiaU}}ggkaZbEVnNOPr2)^E-rvOs-)>~Q5ugma zpL*Bi`&11RA7_A&S)s+vBH}g*bNAxX-nK|>;S}wRch|PzB|zUApuJ(G$#IU3rjaMn zs!IaF>Noa$rhB~SR=V7w z^TSik3u2DjHd>Tp7#8`iU}4M~v4LkAEn%=ya6tgmr0m7*sT1dIPO`-F8#6NaCog`H z!wMav>!_~)Hjyfyv=Rh?RSRNe~t$K z{&`9=Bg5S3;IS1>zul}m?dv2Y+o>w&5BJ>Fz+15Hk${}iH`nhI8I>Qhc1V0W78t?b zyP;;9l&3H8E8-`cT=(5e+rG#6PAPNce2D!rIvz@&z_vgTv|u~OXNcnSL-Bh%rL`&r z%c{mz*UMiY$HTJbpR=<0@yTC$&~OfXOvr-*(q86>tP?;mo{>dvof&<))(ArC`SVB5 z2PcYpHo*Q~-oJz|;x4MhUJJec-ch^xfo-_C-MC}YJ1nyRXEG;~LR5oaUCDto-J^dY z>q`~ljsXyo^ZDai1XD0xQtzSu)3L2;>UV_a+jdj^vVJq96kGPR$QXM>?X-Vp+NVVA zJP(4G93<594e^y2NYV7J+9|$R1&%b){o*A`6rn!>8uHuo&(-aZLHoZ?bBEe+h1w*i z-V%BhGyS!t!Yk23(26sx?~jJiqaPEqJy({^`rv7MF3S%s6AI31H-m^<5TxlBd9R{V zEF({7M_k#9#A8=UMpwK6Wl0~PVEpX8V|$Jo7e;@x5b+f3*Sml2@0bE3K6)z{dl+wf zZTK3tKwhxFwN7F$IHK6(cjt$I)#z-`#m=I~R%#c5UC+IKeg1Ww&^r)!O!-ZEOGvW@ z->GkB&i-C{-p9N~*Es8ckvFUTGlcKKgA@U4WT52PXV&POi&UD-49+Sw)cA&Lx}KE4 zg?y8GWwX2M|9v|yhTdhD&*Culr8?6H2lv5mBo5j$D>ISo0JnqKkxLGvPu0%vg|myi zPUWod1N6eboG)HBB?z>zBRpekR-ZCmwCfFm0i*_z087ot49?P*(2!&NTVH9cJJ5dY zxiNq<+w7|zP62#jkUzO7#Btc)=R8#{92j^L5Ew*sH9upZpoeXHF^?h^GY!xtQhDM2 zdR`{F;ckL`sB!`7^WSI=ChqaJzd9_#N#v#bORfY)4#U-HtjsBbRzJk}&}5%`((SChQOUmd;{Mdgh%L}!0t#ECqE{AH-1 zg?V-O`Xj$SJ#}4Q`)#!!&}T@wA6Xk4^FY&?28_0xRpJUp{RJrGWmvM07~gZt;6AD+ z)H8Tg*zSD&D4kFaUkJ#5@P2m%FmH~FwPz}RXX%dnbU>x_H*y~WVsefg1Gl#mpABy< zO+Y5tgR&nIFTN_|7JwotrhUx2&f$R_Y^(RzBVJ@pB{2!+i!(4Le2DxtN=%Z`#BrHQ zG}kG3TxYn}_xRSEUs4Y-2Z##AkxH94PPTKO;vjcP|9n2*r&LAlHAWAOYZ2hZ>e$go z%?GocjC$5Rch*=oj>w3fBPT4r&Yq8ac1dBDxz@$yAwKVS3@8&j+I`0O2f2FY629?N z2+O1Nx7I&)I(&m(=N0(ojHiWqg<{S6SNT`)k6T4}#QC0dn-GL9vq5e+?#vfEK5Ekv z33ogH8kYkl40IAN`vi++QWHMfj&q%o5Lmb9OI1yOFv4&$QZaC+-rh~sHGI2Ve-c9K z&%w%foRATmE*_koDS?y!)QqEJVo^z7P@^*cd@9fCaCkAV@~;-?eK0Vb74oUgSKA=l zGN~uq8RzY0;MMPGeK%7u6teZ1VfuP@Xg39W>EaQ1d!Rs&PUbkeZ|?H)au>L?xk$o_VCSqcV53GKowj50l0A)-3smz zeGxuhE?P>ar7Ge|3PMm%OWssQFlNjo`S@)SnYy6}pmD=rzJk~1XsFeAJ2Xwo}4 zPOHnCkuA*8w73Ne;$iI{Ut-J7_$C`Qa(TS*P+Z-KqKal6F|MvyX~RVLQi8 zkacu6;MERwSz;_thR%J*>sdIb<(eK{{pPCGuKwZ<5hrc%Ok7XJLQ zqf<$`SH~}(USungni*SiYt+A;qtG{JsjS!GKWmidgrX|?slGbA%93uR=U^})6XZ)F zGE+6I*=FERf}U%%b@YoXYzLW|(afSn1jGpIfkrpxoo1ehgR<>&#fw zwPhwM-8IvBVdk zyn21zCpHl`muN}vnZ2y84X;1afE;n`e~c!mUHo&e*JK}sZ=oVB0WXoMl)``aY?7|R>krApk`~&irwxH3gyLhZv&BoKS+ej;{ zo)KRxo{rq$L76n4jn>xRD1unNxE6h=YT=z;w~+vK7)|R(FwM=1uEV`mH`fm6bHZNe@LuJ12U6lGYWtOL?Aef<>x; zOm#`vmQ1tCmWHeU0#zZ_*rjJ0eRRmUa^{0UK@-b29L*zBphB49ff4TJC|Y*JfnLzl z9YcVge=w#;nIQHA*C> z`(3wFS2d$2=a(%Y#Lx`?y~g|PQf6{{%EET7nenm}TjM_z%*%H!j4VXK^9P?WT?M_o zJTA8?;T%5rDPZ&hr&tz1?_ah@2dJ|AJ?e$K0SsUkTu&=~?Gnk7s3zG}%Nqn#YHZUX zz!0!O>U;J=zI%=z7bgP&c6}cpL=haD;Gej$xzLjfq#@=Pl}}muf_7&D)&Ln(TVkyG_0i39!Du)PeYRing9&;QC{QI0VUaPZ`#I}`R-B!*-2`>1bBX{EZb==l$@t?O9QL(YnjYHotCpzx8MMwVoIW zll3FI`%z6+)1n55|ji7HF8j)J^fmia`GX{-rV?7MwVH}gj=pS zQCB5zMOS%m9>!~Ojcg00m!RxqSO<>XJEnu+FZkA7Yg$KZdbQOL$SGoLAX^mpfIg%? zCjy4BKvKZr>#9bo;(~wM6Ik*fVNT=9F}G8CTN)&`msc_>2OsmVjgDC6W#U3=YHF#K zzR$rcQCJlnPqkN|_RBk2_#sI_r;e;J0-+#@c?A<_-St?4x4Q?~|3B=?grX=(;sHOh zM)H17*dm+Xr-8oVPx8GdmtlB27miPep!0v?qoc}8?WDUprcr$FAfxi(9JXC0u*vLR z)FmepI^gd)*Rg6zjn79+)&Eq4kS_pbQSIXVoaf>}&pf^rEObrEOJG2AZ1wu}^lS4YE~;vIsx1M$@_h4f^q3)ut}- za?ze}QwkqT01^f!S;|#CbkU1=(B|&qlI!O?cJ&lxkguU}A`G1saReSbzP zDuZ65*#Gk3dHDX5%*wO%X3eOpMzAX4f00IV>a+K;=4749Un+k2gHJba?q1@BSjsYH zwX80S*nRkW3^QlF%+Vjz@YIlCuC@L@F2F7$uk||p@)Fy=+wfJq_1?4&`~~|qP)n0r z{nm6zK=S}#FtOf06T1dfWIpCV(5QYnmbQR^E&E?>DQ5Q>Q29b+WvG9EbxE#}6 z&v~@4i?km_Ju)jMt4`0tyg*LiMQ?R+fz5fiaN33XgRx}!SebW-vHnnLt9b#PSx_|`bBQM|joCX+uDi|JtX>-+%z`&e z!wtF05Rc^p-wgV6Z~3Ud>y!popPbb7Wf1&Sd6Jo&wIjOhc=&UsD{$WJD&WW>1K)aC zcGD769p;#|2arJ;I8L$&YDJzcAC&IC7XtO4WB0UDZ`UToYg@xcB^i0|OZ!fy`3xhN zNJRHH!q`Vf?t{N(6wGn+T{Zp{^AmFw7mfHza?($pe$8zT5$~cR@8Z!iqeSkCzoFQq z)z!Gt#0fTPDH_r7a)7~8`K&*IP$j_LP?%n$C@LRdUVpu7(^$*7-YHS2Lyg}590|Ml z?j9JY93*92>~OF7rNAV2-hbhn4HxGDYBlS*G~wj06lla%#A`|sK3{n&5;ba2R}QqGqJu7c2wA*A(dT~LY*M8EwZ)K8u8YBLkqM%MfK zcp1aBYk3RZ`!lH?O_1i9$K_C-d7Up1m@{Zm^_#e>ti6mZ&COW%^j-d#hI?&5=f?SS zF3)27oa^`rwm*=LXU7#vkN5&x#zr;r9D5DTX_p}?fy9K&T-+^>bxMQ z{oF@Echgv{fMdp9jXxJ zQW7nU_+X6F-iOzF-I$?tn4$YB^q|>ut0~i0PhM<`910DUqil_xfF)>1>;tmSE`RYo z0ql{}L*9Yot6VL|kYgLwHce+zm3-ZxyYZ{vJGMsM#m`*sP3?X1WRF`e;Kr)&FB{4# zIbBQIe(lz0^80l#A^=<*A-_Cs7y2-2v0}q0v#4I|mP>2qJiO#dbN4edZX zX6!V#*nkE)-1gOfPqT@LGTY->-m8Y-!>=vHm%4lA*9=GIDpbSdUaEuzdNX=0_7*x; z1>0dEU)76WXr;bFuokY2fn5|Wm&8RTvH>2YzPh*(I$a^qf!@At<@5~U)3*EcO>)g< zeAQBDcJYPS;y{goRg|(1LydjfK7v^7w_!U1Sbs=IQpeU=b%CzS7ocC00_67}>)zv@ z17jJz0N=WPV7S4_)fo5l^?1ysAD<3^1Sh+-RSO?Se$NTb0A}O7?X!s78acc^Vo#Du z%P7|*y&%=`Q%ba0KFQCQU0nJ43umu5U7)fY<*fiMBdrLAmjyPnWLKRklxb{g&`9w? z>kqpuQV*!Y{c4caN8*P$Q6L(OCy>GWm(1Rl9EqgH6PN_WiMIskx_df&|HDdM8cn+u z&8ve;gAHF?pDpK!_0?L;PB!bJ0Gj)Z6O&Z zXNKO@h#JK>#+-J(|m%yyG8haQzFXg7HQJwa}eC;N`Is zn}An#<`EaKh|Iu3&G$Wyp&Y3r|0ARIRcsa)Ac~k3XP)L0We1(h_XB%b$rndFx#gcD zj6#JLN4N{#oMfJbjpNuzkFO|sdi)S3$-1Q3X8cID7Hm-y&rj*u&=Hg>&gmCov{M4m zW-eS~Q3OX~8AwuDeqU`yTEpk^rm(u&lj%MiJZID>w{b6DBpJg7S!HEu7*Xx$43HKb zcc&x!jjz^nIt$!k9B0`)A$kqnqVgZ)t>0CKf6(5;Q~0~WkxK*?i8L&%kQQ-AL9W%F z9N$?QrM#$H0F%>8j~TsIu9w%w+jD>ui=5bt1oha=L6#vts}ugL3NKN*BARkz__i$e5mSXt&_L5NYvVZ_Qk~N z<3>A=8^8|PMVd#DIr6Hs*ZHSsHnQA4I-W|0#lJf zFTE>vyCS5D;hH^1gb`}=zMu%18+~|QqW~3WvP3Uyj{mu`Y^xFESZ4xE@=5Qw!8M64 z)<&p&&oYUWRaE3=^~+=gNwMtSrW!<*XY-TNS-j=Tl;vNMBfAR_Yq)nR!kCF2t7rWG zTzO^6AD4p{QWKdTI@ZM@wFXDbKgHsr!SY=J)SmLn+QJiCckZ=Zrn>1n^MV)Lc4QS_ zZx81D($=e&%S^YjloAuzzhGIP;W@7=C_o+0dp>2bs`yN1-=EGRb34N20O+&bIprZX z>5D9Gj~D6(besABg{aF(72x0NSBatZXo#)gb>8int+M9&JnX2}DQ<7tktW@t2i{Nv z<1SGJ42_on!l3bD&HAMT z`noOP1}2#8#Xp8lR^=kNg-3r-3Bpt-k+(?P3u3M@uWj z!;gv4HQS=8bo{T@dYBjCTN~VO;KT-}*zOJzW?WM<{%!mxW!1=}OiT%wPDuwUpoj(a ztRP@+g%`jM>;vf5vHgK{4Njs~4HQ`zwE!ZVax2`4g+4 zZeHQ{*mQE0N6@`W0;G(!`9v+%t;WaYnv~L#lk#X|5-c<6;v8>vF<686t)_DM${x?l3#cYPYdiJ9^MyGlwbH}x+DCe9OQ`~ zW-B*1_i1=}zS!r>L$3=;P=GxPB}BRh6MfGJ?vQKHtL@s)l7^LE zUbw(UrPr~D2>uW?F5b$4EV(W|WDws5)w*fpYA#Bv-VV48zmlZ7A@SGh!maem} zPrT(K=^PjrOhctyhMv=Bat|}BUM=? zN=qPyn^otC6ftKvH;(8{_3dZw^A`Lv3c6~P|B|189 zkupPBpfb*`kn^zNkxRuISsKl`@0)NeuOQ0;(Bfj>^5S>|X`s?K^qIW1p0F%UD?*8% z#K;OtkU<_$wbE)0Gb?$mPWEXj)jiROtmKkQpwnC{D^7Z`Qk`9KsW5URVZk|W;YIR- z-0DJV&)a{>xH*ebkt;+7g~dg7v!^VS8(+{va_d3`b7W5N?2(^91~sCT9Fb^J4&6jG z{9xOA%Gpzo?x?d^YlIegdtxFk@hI{yk3BE{s4>fXLiD!7Z@6ytp2O87yNrwW!Eh#C z?U3cvr!6YH0AuemZX00rVF<$OT9Mp(Lg_vntL4jXiHkDL{&#s;-&)p*y>mz)J%7%9 zf~}E|tjg1W*qT7JHPWX06D&T6xp@ckN$0doBF8GJC`W}P{j{sSY+NuUbpS|~~YD&VWGg<5w4tNhTm zEF-UHB}QIT&N|D%29z|F>KTyT1wnH%QYxm}0l7-`u(dJR2*ayiOmU$-)BhZ*w#~CF z<9w9sn>w8gzL)X7&KpYE6Hly_ZJ1#6h?|7CMX{dxQ=c=RdQ*et=1k1URr7v|noM6) zg@BgZ2y9Z#<=3Ydu2&W}WFs7bQcg!Av2qp_G&cm9IkaIUB;!~E4cUoVB`O(zQO z8ENa)V!HywNmue>?l{dcPmG6&Nyni+M$??{fG(0RzxwkQtt*UJF?p;TXv{Soi|?60 zPUy7-2wI3QlUwM<=SXWSm+>$oyUB@L7#j9FXF7u|kE^8ZJx1$A7p3nW-%Zxb z3U{`ROcq03nWT1?Km7ELu+B~*2I-RBEt0-_=YDHTYt`Gm$fI2aQHdl~U~{XJ8Y{B1 z-vb&jQM%uf5Mjvo*noUT;IgheEF5_|fWSrz|850F6!m20OtoPQzqnBIVp?sBFuWnh zYjjXI4c~u+-1N$YNsoid>Ul+%zzuZpeJ9`Ca9HL|l#yp~1pS>b314b>?@#V8w(SQ^ zWYT?}zzgnTJl?#ZBmp?wc48O}LSlW-!*Y{*ZthQ{ghTGG z{I}oi9J{k)-@b6=yO5pCpFxX*nm_ittjFu1d=)Ps=$abHmZ~g zD33rC`ZPfe#DKCZ>!mG5b$s_tb+yQaSe43i$74+QqN9EINi2nA(n^NuJI6h&jm*pS zAD;!O!@$DIQim}Vl8k?Ob!Q7B`u+cMsgGs^)cMFUT8Ps zbx{i7d(+kJ%P=61w;Juiq@2`Z7)Bm@8|aCNpUz`bsj+9-_gT6E%=TH87DiWi zUrqc*OxW^eU_z-Sxm9p7=8@-w*NBJ>cHG8V0BL3IF|4f=E$ozciW=g)slo3SwNC$ z_xrVX?t2FDTsPkE1v69AuMvhP)YGwrkQPn7TAPqFJSVxohrx1+DWtawFStz7VA~2T;C?y@ zj!$F2AJ@1An2)sfKkhnN=C;(7DDJdX6SI_ADZco_>Aky{uS$J1~m0o9i`GNBt zQ7Br$LuS018cZlI!rvnC!&IM?zXLtp$`jy{m3}R5-1G_NzZAzLtzzUZ4(yIRl)beU zO9B1?@Oe`VNqD~xSfa@qqfkKqK8U{=*1^0ZZ1Z^m*_YOq&OxQp75nZ_Eemf0bx7Uv zZy^f2&P%bOfMvSTvLwejVFO^#l6 z*3Qub{m52(k}IX<12)Ke|2+T7)^;`Yn>>7|ldxCrCjdV9${)W-Dx}Gt zUdu^*4BJolFL&~EvAaFH1N;m zS++e7Fpjv#T)uA`Wy%?LKK9&~7$S)5oIMC0gYPr1yJ1jXEbdy%@4pePs4~O-yyWaY zFMPAgVp>zxmlpKbEMLyR8_i#Z39jB0*euBYd|-K5nK3o{TaxG!MzWLNT6$z!^SPd~ zk(v9~Ca#Oj7--mF0cN5w2O3U)F5pyER2p6O|9CKxtSNOSi*eI!VL~abwu*$PCHOt&OLxt z?fo-G(S-gYwSMuFyXN{M>AC<-73YPy&GJ7mam$%@ zY=CiPjr7=hKY(0q#2o7w@}xdHy!Rx zyJ&eq#Y;bYGs}X?w(Ie@?Fi7(<<3j-S`Np|bKS!O zSbh>Sw;n8fc*TpM;z)ur)ev`QY>yaeycwYYR#CuE{@qP)u4;f|IAMLjJNz=Ra^yNj zxa$l9!7>lm`oB*Ifp)cLR8@%s4k-0p4Q?ehKjl8Tr>lras z!Lphb?vPrnKVaprtHuS;ypLPdS(KK+PHlQ+-PaH9HatCS4$%ads9h4S<5(j zeO$QvEs~y%JBUJ*aKEAB^%b@nH$vE!v8>;lAz%g*{dTOV@p4-NQiPt*&3V%Pnmg43U3% zFit!}wXvk-6)peN93URMVW2#V3YqlxVBn74VpAj3s{F4v^q%isc4iYBP125F)A{Z3 zlPK)cPNyZYKiHF;TWR|As?NhUsu>NUH!^IN{4L4V0@j+JXTQ!29l<4j4(PRO| z{1;qNIkz;+M@EO*Z{wKB_rGe?<&#IpSKni01&2dF4m`I)G=FZB>FgKCd60Azc82ML z%lDz2tB(97qfxqiVt_<)nE6LEUqlqAdFw$Nx?u-|f!4!@@GoK1gchpcx5R{HUNkhJnC6I}xbt z4}*6-QsOlcHZCHjgwch*r!Fg>-X{>?20A~9*8L{k+P=j8*wv;yU$9oanKuRYp`4poxS6KPDVTCB zg^da{Yiv^;IH^wAgA>f#OKbz0eS`*nXT1g>6iVGk8xR=s3LK37X|kGLNAno%u6^?n zeff$hS_s3?1a+$TlC$93T)VXB%ln1N0wf=e@x!11yx#LCaxe|=zfdaH2-OpDU1zh2 zS%_DHD&k zdCv@mFskpq!&{hX9_z3eWKJk<+F65rTVzD8k2nc*eA{YYYX9`6stt9DMjV;AWKU%bl7wKK5! z>uSGW|4drisQGASvKkM24&tqX4QOPvv0@A|fjUn)BE=qUIx;G(LJA3|JKoSd$+P?t zGa=l&55|2K%LV?~a_g%FTt~85<(^_B2%!>O9+$J%w^rF2^gk{DXXjG0 zKjgeki}1EOk;B#sC#s09jDSS}Rl?E>@QPMmR*9Y_ir6)TW6#>poRW0Xv*HL}+3BJS z)MU1`2c0pkUO(M)7F+~W8oHVHSiT!-*D4|PI@h~j*Fd;z$7MS8Zj*&ifP{BPg>ae( zsqEzW7eK4&hGvBfqhsXU4br$XAie)zRpf_n*kbTErZT{q%(uav6(8O5#9x*_lh*f) zTnU-LxFq4+R89Pi?;1@CT8@7I?LIK%GAJq1I3Y%2v=MI!)D2o0#K+jTGH^AHBRfP4 zX_A5#?v-V%EWYDsnS^(km+m)r^6X-be8pQ-=ZRmbI{aw~XZ%XVaRMtZZcIcHguXg$ zp!n%84tKx;PMp+M-vI#5W51Y1{f!eaM&|@aN#@HdpZ*t%Mv81A9bsIE)pRz%d7+pU zZ_;AnFAHp84X<>`Lu*R0coLh(si$UW*Gt427!{#8l-Bn0EZl1L3OUEP)yv{=A;~a> zWf!M(rZ>_r$kIir0&xxO|5ePsAU0{yZfhVKdy=-=MGsGD0Fze>KR8PIiisBnzE%c#w%clC{jV%hYa}0DzvxxcQpp z|6}8wnFj)}Qm)#k`mdOvH!!$}@4?RMQ4>`@zI_|y!}JLkbHIWV9(XPMCG8y4)SyLv(!^p^IOG`pua*@kQPz0V zTONUP<2zxZwagmIEN?l)J*{lRGk;UMYeNcLUyCJX?&u@6c4BX1`+O!Y~i3OvEZLJJul8N8lCx5Qe!85ZKu1bJf}c3x4+Sw>R(>zY4upX zv#hX_d2^}UUXx;?m8fsARSpSjKo0Q{Kp%5%?xs;UE3t?h`Czhl3Dhi?Yl(Yx)wV=2 z+%cEki|%Xx`+OcdOzGGrB9C46d7FOupA44hb=bSx@Ylt z3{HxV?I|EOaoTw|mt5nx?jCPYZmh>FlU$`;Dxq8RyIMN99bN&7lgeqRw7^zr>%p7o zU>Ozsq|EmSUG0D}Eoe-0n&Gx&C(GpPu9#u{t>@{uky;Q2yv*Oq8p;X+!+@r_wddeK zTE&(wcv7a*B++%AM zdC4D)&%aU)MS^Z%a%|nPJP()28-8;IkR|x$q0WYBUkUAKGe>_mUY|?QlCvppKfEKl z)~qZ%_g$h}FJ*s)=EV0vI&Po+l{1o#9B2S>48VyRdqiPittIu=iQI;ND=7#C=8UCF z^qTcb%wOh_a|04gc{D&HLIH}Tj6ol=B+<|`aly?*f0Xd-Kq{=CeeyDuvE)H0b4+TR z-8nx%Uy?ud?;asYtb)sBSqksY3*UZU&5*=`|CBxSP&VUyz}bQ};#~|HLAfi;;jX}^ zxtcS@z)YQ8dEvfirZ^*bzgSp!JE4+3Bt(6%wRUZP$t5f}XA?p2DvwDn+LF9^(|W70 zSObXck;V05&GpQiK0*NjTh1zVZXc5*PJjQLXcSP^J;6+-zj^Z~3#~h9LT3EpJ~()# zHNXwSdw7NLP&>*y5lQ?F+IyS@Y--=M{`>5dV+S$FqB~nQ3A$rIZ|S}O zk9By|qmuwgJvD%ST&H0s3Kv~aS*W4^-mwCF`_IFLxB5db)Za3oL^>iAUsTjKQ4G_& z%H0F7q8)B2)0*BJYhFLKNeF!doNw}~`W}yf^+m1>T)GCK6Io~S3kZERL!9K4rER~g zC`GuPqEi?Qs78fwBwQxvCInL3z4{(B&^W13St@{w_B;PqYslkIWW`s43Fa#r(VrGS zPpfs`#55#1!XE{*iA@nCm3mwKgvLvyY8y`EPSuuIn&fJqw;kdXasxSsIBGDr~ zGLhX^Zs6;#H>4f;pf)y+$tY&atL9sbId#(y$|<|sujH~mFz8?7aWnivY*EC9w2iJC#oouf_dCk(b2g&WLd5hJe6Ba}m~mcPDlTq`jnNTlEG}qV91h!&H!~uC zEd-EIen!u&`SE6zlK&-*`5z7wj1Lnn#vnH^{#vP^ndyue3P}$X7|i>$wwM0##Aj@3 zQGstg+NWl2<^tKH6_WjX1TLDb+U6j6gC%oDN!uv?vCuoX77Wq4naol_Y=M6jyCp!k zKZIwxLVxRf3$f^M8f_)8dlOB_{V-KHp*#2TV@8#FLFZAWY*EzJtGYn;{Jbx6F}KgD zQ@Mw2TZd%H_ho{L{|v?@U8qmOni1!2F8&*)Rk*;*xtH;A#@B4~`u^V3yC@iJ{mHME zJ>rhqJBjA@lydPF6JglOCX?r+D`#+;ckmDLtYXk{%)|jYa%a_6r61P&wZM#_P)2H* zq{Fhs^Lf2rD+r!BckS|4nh6VCtL?0Fcr%-gdqr8<$fUWwAGTP#BT&DxDT1qFyAnmK z$uPL$v>{LdawFh7d8xwVTo8aneA@_tYXvfrpMNIu8rP+TCdiEHAv25DREAG(vF88^O@6r&IG;r6~;DP2m>%WUu-V1eRO=25nO z;XLYWGRH5S+n&EpV&VH`#Aft^q5k0?4^yP_Y=-h9@XWc`8&mh?v}_*?y?hqW=)dAt z>SoU;J#O^~W$EosT}{nLp>!a=-bC8T^g#U{eQ)aQ=RJB5`EYVI5*=Nab}pMwX@@I4 zHwqVAm1|2*9Q4&5lf{MlyICVD4+C4F@@bWEy!qokRx1Wb2(2z{*>JHDF25qgzGKMq zTd+0>wEA(feGBg}aQG`Uur_9*?(6K%EA8Cmht?9g^>+!oHWQ}Z(4O zi*!dVh7>C zr=u&oPqqKuwRulhpC>T&?SS}5or%*C z51Eb~d(&T82Q=5Bm4wTzU~ zdH*G-Pt}4-`Ok^dUh--6<4cq?K}v0)yhUvrIS%oH^$!zYVY|U{@lPW8P^p?uZDLP4ep=K5CKDG03gIW8P zG5Yg*+_?@Pbp5$vdnbgk5$z7uZ71OI_bPtXVTia=4~UYuQp6(tIpll$~2i-Vcuim!uAzSf9?iSVSUU7KQwq1`TOUvZnySo+F`+m=d< zR)Zj+a|<~q&}U4m$|si3pf7Bh4@`{7{~1swhCbQ!99{M4J0;#xGc=k)jkOBr8%0o| znv;m+8O8Yz=ATqM6sLS^|M)8XscL$d=8)aB+?h1FP_X$925iq~j<#v;ZI@SM=R0tl zZaS*cYav)3TK%lZHd?XT%zcPvUV-^Gg>)$|2GOMV*S5_ya6?S{C5)S;=^OosEoq~# zB8D%C-+#8EfU_fxkRKXV*kW`)L{z>EYV~P_ER21qjKE!(S4m&l_uo?4TmS?%f2aK(b_#(5LZzw*?x5O46Jo7W*cxcInX1GI)g zi!8txez0Y$ZfDISf!|DL(r=sg*Sb?}aF5dF`4@MMXk`y_MP14}>|baoprX(Nud)1$ zhQ!N4{m(-4k<5RKu5ioUE$hf98%r9pv_nghoOwE5WcTjqvpeRdhdN^O(f^D*3T>rS z*PUlDENC=OURrgCzpIFgk=P=t@j=&L9?9?`S|}A5Ju~>EC%_Cy4i6|W2=qANgyawC zt3jgL+VMwINoTaWcm_i*41x*tOsNpmsVKIzLJ!%13oH`xD*>cLt%;BAZd7a18>26p z!w?m%5f?9D&gCniv5q=D-E`Tu(a5FmIP3gj^aA*;ML9>(Iq!F z)}~YgmM-W|emJ31JC9Q^jeDbk|i;? zuqS*zFs9(m3QOu78|hr$0E?j`PKf-F(M<61DdrK^LWOTu^SX`Jdu6oj%nihQ(+bDx+`n2??B4Gs2%Io?5fzej6JJN zb~6v)HeQ^(c=6DP79pK~TX~1wwWeF3j?qcll=vQ&_YJ}$x~Vm2o>fCv3j3ycsv&Ta z{By3NNy3RQhNDb5;zmq>3>rrjAtLe)0DAHcw zkeqd8axk2LlSW4GmpC4({YRY&Gyq;Kj8V zsq*JsrFJeaK)Qn=3I*h!7J$izuNY+WAM0sHZhl9Ks4G5rFnmYmq#}wU8Yb;aW5vXI z7111b_(cA6^+|yyU1tf>`i6lz6u?FqiL9bL>*7+fFAs%d3s&MQ!@Lg5%iRHem#{SKL7 z4V=C1Zb8-@(HSQv$V=9R4UvSUAoiMu9Z>F9f|)9xIfr!5;~Bga z3hQa@@h^iB0WqFGGn7Qu1s-%{xH}6TezQ)#(_C|0~Q^J+40Bh5a|8f_@sXFchy+_(yWWZ{~Bg z<3=@wlu}alX^h@zMP;~uKdE;Js=|Q31oouaB>hD0X7x|lXEPkeJQCrk8C88y2u75d`!*Plsh(T)ksrguaP zYf}OlB-O{HHJpW4 zpS#CGIASTM7*7D8@bdZAxZ6>$d-~xzgGCgwywkp!NpLOA)@*er5$FeLfe!oxvZ+8Y zNc2Sze+KS?Qlm7xdtHh`gSHAfKIHf!^ymROF7P^nRi=|f_b&37T(%v? zZ~?rmrO+116~8PJ&J1vy9fgNpjx$ZRx~?7|Lc%a3pt znhRrmnh6%+6P%>l`}so?`D-Qgm+^al#b$pemXW9_=qKxB9~5r#%!q^+MLg41% zSmjwNa7+YXh3`?cBXu5JSB3)K;^RI%2If4jEn3}iA>QVpz-yKHu*s_ijy%aC<|aOc zF2YDy^YCq~>!z=!?&|Di8u;phi*W^0iY)UZH$t7gxmQK4)A))&t zZZkZS@cLa&!htvw6<8@^-S+*x)588~YZBvd+Hxjc_29D-F$vYJ`_CLqS=inQM*HSe zj#n?;g4dptpDuYpIazH7GP_AM*{karLRw4Vk;Nn34l>4@0eWnE8qp}ppV`8Wp)wt{ z-kpMy7f*I6s46@RQ#Wtm(GeAaNpDQ;_V$wdt5!o;sTo#Pyiyx3lb%kVgl!PEC&1(fg4`7z zT`S~=)CU?UYNpLNsf`pnfkc{it~nHs=^v=J`}=CQr^*d+M_B3Q6&$Q zS&}B`5N7`b*6s~JyQLpFjS}B|`uJV#d#rc!MR8Gca1}vLFETh09;Gv~L!U~IN9b{)jf7Fd`haCMl z=3_XRt1N5Ud@}3)h29$w&|KF8LAM$I1W6+EDXB#?TcEYn6B^sQZ{e!eQ)QimSr4+EHuI!bT}mj z_?HV_Q_%~l1w$^NvTazwQ({5F3jk^z(H4V3)YkLW4Rp0%He)`&0q?}4vk5PYJ@<*& z4%UrE`FogfaAo?G6dn1u{V$a=X$0aIPl^9}6bTv7 zhPoX~iRrsPUw7guNAkF*NnkA^V$&K=wei9H)HP# zPs<#@@uj$`Jbpv)BJ|^Pk`yvDbG}l$5=7LdQ!gEOi z?TV*X7e?IJqs|(Q!FZsDTEsQZJglVV)bBa5ZP)*XzF+L*`0fDyCBSPBFwqL?L)EKA zO^eq$7t-P`iwDO@;*NLlzPH6VN;7!TUT-mi=Of7ni&EZ9E(R$;F96?bm1RtaL>}na$ZSe8OOx|4E<0Bz{uC(wXZf&sB4yk% z`+Fd~VVsM8gTlb}nA1)$?n+`FNB7;=9{w}eIS*6#^-LL$iB$nc-Ll?{q8%6gsd-!1 zy7O^E6wSNy4)aTA(H^DnFh}Y0Jl=c(`NjeA`sJFUHJIc>1v$}%a{I#(EtXW8Uic1| zf*aBKP2L)FLcCd-lX|`6le1mU1?2H*5m*7{8EfJl~ywQ z#4)PT2xCq4TyFWD+I!JUq)oP)2CKP9kG$qC6`bY*3U){MraD zuGZ`5zCE-$*s=F->0G?pW+q>CrY>H3uA6m-ZKSzYf|_sXyDKhy8UMSr8}#E{qVVB& zq|)zdc=)WF!4IK|gmLMgfs!E0~SrG|Dn$69za(^@-yYPR9J5|n zgVd9y??t|I-7+UpSSrSa38uQC&qLC5z9x1tN+u%qiDY3VOFLc;n$0F>YG~&-0c4%&0=!BP3sW5lSSoCO71QuNrDk|}>b{|=x2C->J=E@1O- z#3%{dL%^hrTTqlJ9Zwk7OhyX_ul9b+o&tS+I4=wmF0C=qsi#gvC1zDU_K7tO&88I9 z`9RDtS>6eF%%R{j)A{wTB?$VHn2v79I|vX*1iBh^7Nhb1B7W5d{d(_Nh6mQgtptcu zFcYDB0zeFxS2ynZkd@%F`X)okyc{XBL^B81G>iVVMEhN_km~Qvi9!g)v`^A&^MS0T zFJ8}tTDR-*uN?vrG#s~~Tk5r(pFS9?vs}L(pzQxCL{{kcW_zDr3C(`1{mgwYgC0pE zgEw^C{421i%_oarxoXU%nsBm54Ye0}A=*9perl|jBECEs^!JTK3HueHVk9s`iX_>6DVld!Up;|z5Pc=4yxv8+KU5W(+?J&2e-*h50+06 zQ%F!#DAbzL!z>+T`WIN#JQNgGjM1sS1h1zCL~pmZ-Ponfb&{Hi49V-r3r|5#{8jRjU+JV5coF8UpH*fco$lP6t|lr>vkRn5-f)_%0cvdlSF&uo$@{ zuh6$LNeUcm>s@;;QRBbb_tx#v15Al7y0WUIq95pZwV5}yMA!Rh)}6AZ8j?tKO>`|2 z1sw^wb<1lD-@QgNG5!CjJvEQ>(Pb8x;OB<1$QN&^#|I>yFo>4O@-0bmD28G#5{2^= zC50?Bg~BT=LwhJBHu*w+laP%sJ;deUL%mYAI=AxVqb|)vYie#zGSor@7TBl?mw6W|o7$rOu5<6d;negT9(&$$fdW@V zXI`YQu#;0@?R5UT7|!1ewK4MP9UXx%@!!SvcBrRyZW}p3q@m$oObe2AedxLov}rg*16Uvk{KKf98%-e@q1C;*_ii)p8yM` zKf79|EF@|Q2rNl#7w5P)pRp%s{;Qg@{f5gVUYx01O0Eef;L_pzmL*Q11n9~tojN}p z#*M0IoGIPJRjdsOn^xCFU!x`oR@IaT5d3Uh{ewktuayi@16QE1Ur%bd{;+wmyhggUAs$Ak8`C*>suBLeJIWLU zVOrzzEylCYt8{YmAS(Pju^%7_H=6$Z53qV2a6Sf1N5}b3uk{l%V4F2~9n_@_c0K zYA!zCwi>iiPiS2a4Cg!t#Qd^}((=mi10(2L)H+q>-FQH)um8Ejk|5YX_$Ms(lQf;< zGl-C#!uu1MF7H7&%;uYc>A~osD4_aSRnas!RlHA~jU~8cN{-xoSsZ7daFyd@42V`m zj~Xxob-lZdJ+v-z2l^Lc)jlH5nizLMG?t~x|J26+e6?0mXETY3Hc01d;(VO5H8%i~ z=lwEoYWER(*zwYlv@aGg#y+OqjWtTkH$AnDqm9h3PFm@1nko+CN+k@@mL-RopZ{8d zE+0H?uVdaOy_p9AhPUc4&nC}lEg-&i|C-e#7EH3FnzuZhuxuo(>xt&2fz8Xg`&I+u z*3(Un)EV&TXPTi8yNPg5p2fcbgW#e=MxE9z-~EZ$H#Z(f$ALt z-3G{3=}jBty5|SatLP*U7c0_FdS+ez?suLX2tVl{l2;46)CoORB`=6#_e!u{5kwxP1-h5 zcd6v#vgX4V;WY)a3EXZ+&X1iVPKG@>bDCi3&aO=>^MX^FmG5Rc>7uhTPUfr^_KdBD z5iY&x;7>RFfGjCS|5NkPwh?dO$ssQdoN2D+?pyY&Qw20#l08)dHg7xdY~x)C zb79o*2jw(X+>BqDp{XrkbniW}7GUSgO=B2w-d^TpY=G?j?;$zW#eU4UfB~uNFha)P zJu=XlapQyhJk0cWw{9JXArz+#Iwu}PM4JD}ojH&meZLs=#!`MH2D%Gqy!9>-PK6FS z%N5!O`_#sCREbb|B=Bbs<2M0f-=k*Nh2L_W<;JVgq)pthkh2L{s-kR|3I5Hp=@&nl z$L=L5Eeybvbt%(G@SV9=e+WL*B0cThln>)VkrX(~=ObWbcF8!1{prgLX_FVh&8 z4`IgR48DI}9=(up1K7YBu^MdPtVvr?d*piIrOJ-8%@?V{!OjWe4B(2^GIj6Zg?vb+rWZ8r+y1ZBgZ2rxM45Knz z)PZZCI?MMl9bxdt(L~i>Q{oxidatcYz?NNCQvs*qOh(W{ql}Z|lz}V#B0oQRZn~4{ zgG6VK9z+;maZi+*_*^?|Qt!yE7zQb{|qt8$+k;r_g*2QPVa>axC$#30DCawStu7cS( Kp02X;P5K`^6%6VC diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index de124c9dde..769669ae6a 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -20,13 +20,36 @@ warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger -import platform +from platform import system as platform_system +platform_system = platform_system() __version__ = "2023.12" + +# Get Flash Attention v2 if Ampere (RTX 30xx, A100) +major_version, minor_version = torch.cuda.get_device_capability() +if major_version >= 8: + try: + from flash_attn import flash_attn_func + HAS_FLASH_ATTENTION = True + except: + HAS_FLASH_ATTENTION = False +else: + # Tri Dao's benchmark shows xformers is faster for now. + HAS_FLASH_ATTENTION = False +pass +import xformers.ops.fmha as xformers +xformers_attention = xformers.memory_efficient_attention +from xformers import __version__ as xformers_version + __all__ = [ "prepare_model_for_kbit_training", "patch_tokenizer", - "print_unsloth_message", + "xformers", + "xformers_attention", + "xformers_version", + "__version__", + "HAS_FLASH_ATTENTION", + "platform_system", ] @@ -71,6 +94,7 @@ def make_inputs_require_grad(module, input, output): def patch_tokenizer(model, tokenizer): + model.config.update({"unsloth_version" : __version__}) if not hasattr(tokenizer, "pad_token") or tokenizer.pad_token is None: # Fixes https://github.com/unslothai/unsloth/issues/5 if hasattr(tokenizer, "unk_token"): @@ -88,18 +112,3 @@ def patch_tokenizer(model, tokenizer): pass return model, tokenizer pass - - -def print_unsloth_message(name): - SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - gpu_stats = torch.cuda.get_device_properties(0) - max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) - - statistics = \ - f"==((====))== Unsloth: Fast {name} patching release {__version__}\n"\ - f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ - f"O^O/ \_/ \\ CUDA compute capability = {gpu_stats.major}.{gpu_stats.minor}\n"\ - f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ - f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform.system()}\n' - print(statistics) -pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 1f0046ec06..be39b90afa 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -23,21 +23,9 @@ ) from ..kernels import * from ._utils import * - -# Get Flash Attention v2 if Ampere (RTX 30xx, A100) -major_version, minor_version = torch.cuda.get_device_capability() -if major_version >= 8: - try: - from flash_attn import flash_attn_func - HAS_FLASH_ATTENTION = True - except: - HAS_FLASH_ATTENTION = False -else: - # Tri Dao's benchmark shows xformers is faster for now. - HAS_FLASH_ATTENTION = False -pass -import xformers.ops.fmha as xformers -xformers_attention = xformers.memory_efficient_attention +from ._utils import __version__ +if HAS_FLASH_ATTENTION: + from flash_attn import flash_attn_func # Final patching code from transformers.models.llama.modeling_llama import ( @@ -139,19 +127,20 @@ def LlamaAttention_fast_forward_inference( # V = repeat_kv(V, n_groups) if n_groups != 1: _, _, cached_len, _ = Kn.shape - Kn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Vn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Kn = Kn.reshape(bsz, n_heads, cached_len, head_dim) - Vn = Vn.reshape(bsz, n_heads, cached_len, head_dim) - pass + Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Vnn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) + Knn = Knn.view(bsz, n_heads, cached_len, head_dim) + Vnn = Vnn.view(bsz, n_heads, cached_len, head_dim) + else: + Knn, Vnn = Kn, Vn # Attention - A = torch.matmul(Qn, Kn.transpose(2, 3)) + A = torch.matmul(Qn, Knn.transpose(2, 3)) A *= 1.0 / (self.head_dim**0.5) A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) - A = torch.matmul(A, Vn) + A = torch.matmul(A, Vnn) A = A.transpose(1, 2) - A = A.reshape(bsz, 1, self.hidden_size) + A = A.view(bsz, 1, self.hidden_size) A = original_apply_o(self, A) return A, (Kn, Vn) pass @@ -359,13 +348,13 @@ def LlamaModel_fast_forward( # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + raise ValueError("Unsloth: You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + raise ValueError("Unsloth: You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 @@ -419,7 +408,7 @@ def LlamaModel_fast_forward( if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + "Unsloth: `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`" ) use_cache = False pass @@ -614,7 +603,16 @@ def from_pretrained( rope_scaling = None, ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - print_unsloth_message("Llama") + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + + statistics = \ + f"==((====))== Unsloth: Fast Llama patching release {__version__}\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ + f"O^O/ \_/ \\ CUDA capability = {gpu_stats.major}.{gpu_stats.minor}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ + f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ + f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform_system}\n' + logger.warning_once(statistics) FastLlamaModel.pre_patch() if dtype is None: @@ -632,7 +630,7 @@ def from_pretrained( if (rope_scaling is None) and (max_seq_length > model_max_seq_length): rope_scaling = max_seq_length / model_max_seq_length logger.warning_once( - f"Unsloth: {model_name} can only handle sequence lengths of of most "\ + f"Unsloth: {model_name} can only handle sequence lengths of at most "\ f"{model_max_seq_length}.\nBut with kaiokendev's RoPE scaling of "\ f"{round(rope_scaling, 3)}, it can be magically be extended to "\ f"{max_seq_length}!" @@ -686,6 +684,7 @@ def post_patch(model): # Torch.compile fails on embedding matrix?? # Workaround randomnly fixes it for torch versions < 2.2 model.model.embed_tokens = torch.nn.Embedding.from_pretrained(model.model.embed_tokens.weight) + model.config.update({"unsloth_version" : __version__}) # We also do this for the lm_head lm_head = torch.nn.Linear(1, 1, bias = None) @@ -747,6 +746,7 @@ def get_peft_model( accepted_modules = frozenset(("q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",),) + model.config.update({"unsloth_version" : __version__}) for module in target_modules: assert(module in accepted_modules) pass @@ -771,6 +771,9 @@ def get_peft_model( model = _get_peft_model(model, lora_config) # Do patching + n_mlp = 0 + n_qkv = 0 + n_o = 0 for idx, layer in enumerate(model.model.model.layers): # MLP patching @@ -780,6 +783,7 @@ def get_peft_model( # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) + n_mlp += 1 pass # QKV attention patching @@ -788,15 +792,22 @@ def get_peft_model( hasattr(layer.self_attn.v_proj, "lora_A"): layer.self_attn.apply_qkv = apply_lora_qkv + n_qkv += 1 pass # O attention patching if hasattr(layer.self_attn.o_proj, "lora_A"): layer.self_attn.apply_o = apply_lora_o + n_o += 1 pass pass + logger.warning_once( + f"Unsloth {__version__} patched {len(model.model.model.layers)} layers with "\ + f"{n_qkv} QKV layers, {n_o} O layers and {n_mlp} MLP layers.", + ) + # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index f75ebbcd5d..0ace3f494d 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -45,7 +45,7 @@ def from_pretrained( ) elif model_type == "mistral": if rope_scaling is not None: - logger.warning_once("Mistral models do not support RoPE scaling.") + logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") return FastMistralModel.from_pretrained( model_name = model_name, max_seq_length = max_seq_length, @@ -57,7 +57,8 @@ def from_pretrained( ) else: raise NotImplementedError( - f"{model_name} not supported yet! Make an issue to https://github.com/unslothai/unsloth!", + f"Unsloth: {model_name} not supported yet!\n"\ + "Make an issue to https://github.com/unslothai/unsloth!", ) pass pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 9a91a8fc1f..323ec39f82 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -13,6 +13,7 @@ # limitations under the License. from .llama import * +from ._utils import __version__ from transformers.models.mistral.modeling_mistral import ( MistralAttention, @@ -245,7 +246,16 @@ def from_pretrained( # rope_scaling = None, Mistral does not support RoPE scaling ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() - print_unsloth_message("Mistral") + gpu_stats = torch.cuda.get_device_properties(0) + max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) + + statistics = \ + f"==((====))== Unsloth: Fast Mistral patching release {__version__}\n"\ + f" \\\ /| GPU: {gpu_stats.name}. Max memory: {max_memory} GB\n"\ + f"O^O/ \_/ \\ CUDA capability = {gpu_stats.major}.{gpu_stats.minor}. Xformers = {xformers_version}. FA = {HAS_FLASH_ATTENTION}.\n"\ + f"\ / Pytorch version: {torch.__version__}. CUDA Toolkit = {torch.version.cuda}\n"\ + f' "-____-" bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. Platform = {platform_system}\n' + logger.warning_once(statistics) FastMistralModel.pre_patch() if dtype is None: From 627acc4bb37d5a0354a86c0783be20162f0940b2 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Mon, 25 Dec 2023 23:28:05 +1100 Subject: [PATCH 0077/1088] Fix Pytorch 2.1.1 --- README.md | 51 +++++++++++++++++++++++++++------------ pyproject.toml | 3 +++ unsloth/models/llama.py | 16 +++++++++++- unsloth/models/mistral.py | 15 +++++++++++- 4 files changed, 68 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 4e2ebe3c63..3d8a460b2a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
    - - + +
    @@ -36,7 +36,7 @@ Unsloth currently only supports Linux distros and Pytorch == 2.1. ```bash conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ -c pytorch -c nvidia -c xformers -c conda-forge -y -pip install "unsloth[kaggle] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[conda] @ git+https://github.com/unslothai/unsloth.git" ``` # Installation Instructions - Pip @@ -66,23 +66,26 @@ pip install --upgrade pip # Documentation We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! ```python -from unsloth import FastLlamaModel, FastMistralModel +from unsloth import FastLanguageModel import torch -max_seq_length = 2048 # Can change to any number <= 4096 -dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ -load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. +from trl import SFTTrainer +from transformers import TrainingArguments +from datasets import load_dataset +max_seq_length = 2048 # Supports RoPE Scaling interally, so choose any! +# Get LAION dataset +url = "https://huggingface.co/datasets/laion/OIG/resolve/main/unified_chip2.jsonl" +dataset = load_dataset("json", data_files = {"train" : url}, split = "train") # Load Llama model -model, tokenizer = FastLlamaModel.from_pretrained( - model_name = "unsloth/llama-2-7b", # Supports any llama model eg meta-llama/Llama-2-7b-hf +model, tokenizer = FastLanguageModel.from_pretrained( + model_name = "unsloth/llama-2-7b", # Supports Llama, Mistral - replace this! max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf + dtype = None, + load_in_4bit = True, ) # Do model patching and add fast LoRA weights -model = FastLlamaModel.get_peft_model( +model = FastLanguageModel.get_peft_model( model, r = 16, target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", @@ -95,7 +98,26 @@ model = FastLlamaModel.get_peft_model( max_seq_length = max_seq_length, ) -trainer = .... Use Huggingface's Trainer and dataset loading (TRL, transformers etc) +trainer = SFTTrainer( + model = model, + train_dataset = dataset, + dataset_text_field = "text", + max_seq_length = max_seq_length, + tokenizer = tokenizer, + args = TrainingArguments( + per_device_train_batch_size = 2, + gradient_accumulation_steps = 4, + warmup_steps = 10, + max_steps = 60, + fp16 = not torch.cuda.is_bf16_supported(), + bf16 = torch.cuda.is_bf16_supported(), + logging_steps = 1, + output_dir = "outputs", + optim = "adamw_8bit", + seed = 3407, + ), +) +trainer.train() ``` # DPO (Direct Preference Optimization) Experimental support @@ -296,7 +318,6 @@ Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unslo $$ \begin{align} y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ -y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ \frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) \end{align} diff --git a/pyproject.toml b/pyproject.toml index b93dcb748f..ab710de542 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,9 @@ cu121 = [ kaggle = [ "unsloth[huggingface]", ] +conda = [ + "unsloth[huggingface]", +] colab = [ "unsloth[cu121]", ] diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index be39b90afa..5f9b41d4cc 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -33,7 +33,19 @@ LlamaDecoderLayer, LlamaModel, LlamaForCausalLM, -) +) + +# For Pytorch 2.1.1 +try: + from transformers.models.llama.modeling_llama import ( + LlamaSdpaAttention, + LlamaFlashAttention2, + ) +except: + LlamaSdpaAttention = LlamaAttention + LlamaFlashAttention2 = LlamaAttention +pass + from peft import PeftModelForCausalLM import gc import peft @@ -584,6 +596,8 @@ class FastLlamaModel: @staticmethod def pre_patch(): LlamaAttention .forward = LlamaAttention_fast_forward + LlamaSdpaAttention .forward = LlamaAttention_fast_forward + LlamaFlashAttention2.forward = LlamaAttention_fast_forward LlamaDecoderLayer .forward = LlamaDecoderLayer_fast_forward LlamaModel .forward = LlamaModel_fast_forward LlamaForCausalLM .forward = LlamaForCausalLM_fast_forward diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 323ec39f82..f3973c9dd1 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -20,7 +20,18 @@ MistralDecoderLayer, MistralModel, MistralForCausalLM, -) +) +# For Pytorch 2.1.1 +try: + from transformers.models.mistral.modeling_mistral import ( + MistralSdpaAttention, + MistralFlashAttention2, + ) +except: + MistralSdpaAttention = MistralAttention + MistralFlashAttention2 = MistralAttention +pass + def MistralAttention_fast_forward( self, @@ -227,6 +238,8 @@ class FastMistralModel(FastLlamaModel): @staticmethod def pre_patch(): MistralAttention .forward = MistralAttention_fast_forward + MistralSdpaAttention .forward = MistralAttention_fast_forward + MistralFlashAttention2.forward = MistralAttention_fast_forward MistralDecoderLayer .forward = LlamaDecoderLayer_fast_forward MistralModel .forward = LlamaModel_fast_forward MistralForCausalLM .forward = MistralForCausalLM_fast_forward From 51dd120e354cd2223df7ebe2240fb6d1a76108c5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 26 Dec 2023 04:32:04 +1100 Subject: [PATCH 0078/1088] Fix RoPE Scaling issues (#52) * Fix RoPE Scaling * Update llama.py * Update llama.py --- unsloth/models/llama.py | 10 ++++++++-- unsloth/models/mistral.py | 6 +++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 5f9b41d4cc..590465ae88 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -369,6 +369,7 @@ def LlamaModel_fast_forward( raise ValueError("Unsloth: You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length + assert(seq_length <= self.max_seq_length) past_key_values_length = 0 if past_key_values is not None: @@ -661,6 +662,9 @@ def from_pretrained( bnb_4bit_compute_dtype = dtype, ) + # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/12 + # RoPE Scaling's max_position_embeddings must be updated + max_position_embeddings = max(max_seq_length, model_max_seq_length) model = AutoModelForCausalLM.from_pretrained( model_name, device_map = device_map, @@ -668,6 +672,7 @@ def from_pretrained( quantization_config = bnb_config, token = token, rope_scaling = rope_scaling, + max_position_embeddings = max_position_embeddings, ) tokenizer = AutoTokenizer.from_pretrained( model_name, @@ -685,7 +690,7 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass - model.max_seq_length = max_seq_length + model.max_seq_length = max_position_embeddings return model, tokenizer pass @@ -746,7 +751,7 @@ def get_peft_model( layers_to_transform = None, use_gradient_checkpointing = True, random_state = 3407, - max_seq_length = 2048, + max_seq_length = 2048, # not used anymore **kwargs, ): assert(max_seq_length <= model.max_seq_length) @@ -824,6 +829,7 @@ def get_peft_model( # Patch cross entropy loss labels # Fixes https://github.com/unslothai/unsloth/issues/10 + max_seq_length = model.max_seq_length extra_ignored_labels = torch.full((max_seq_length, 1), -100, device = "cuda") model.model.extra_ignored_labels = extra_ignored_labels internal_model = model diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index f3973c9dd1..865493edce 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -125,7 +125,7 @@ def MistralAttention_fast_forward( V = V.transpose(1, 2) # Flash Attention v2 auto supports grouped query attention - sliding_window = self.config.sliding_window + sliding_window = getattr(self.config, "sliding_window") sliding_window = q_len if sliding_window is None else sliding_window window = (-1, -1) if (q_len <= sliding_window) else (sliding_window, sliding_window) A = flash_attn_func(Q, K, V, causal = True, window_size = window) @@ -169,7 +169,7 @@ def MistralForCausalLM_fast_forward( if causal_mask is None: bsz, q_len = input_ids.shape - sliding_window = self.config.sliding_window + sliding_window = getattr(self.config, "sliding_window") if sliding_window is None or sliding_window <= 0: causal_mask = xformers.attn_bias.LowerTriangularMask() elif q_len <= sliding_window: @@ -312,7 +312,7 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass - model.max_seq_length = max_seq_length + model.max_seq_length = max(max_seq_length, model.config.max_position_embeddings) return model, tokenizer pass pass From 1128ecb82afd098a33dc1eefc40408b9d0fd3f07 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 27 Dec 2023 03:01:45 +1100 Subject: [PATCH 0079/1088] Fix FastLanguageModel --- unsloth/models/llama.py | 2 +- unsloth/models/loader.py | 4 ++-- unsloth/models/mistral.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 590465ae88..de9666bca5 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -609,7 +609,7 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "meta-llama/Llama-2-7b-hf", + model_name = "unsloth/llama-2-7b-bnb-4bit", max_seq_length = 4096, dtype = None, load_in_4bit = True, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 0ace3f494d..d458626386 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -17,10 +17,10 @@ from transformers import AutoConfig -class FastLanguageModel: +class FastLanguageModel(FastLlamaModel): @staticmethod def from_pretrained( - model_name = "mistralai/Mistral-7B-v0.1", + model_name = "unsloth/mistral-7b-bnb-4bit", max_seq_length = 4096, dtype = None, load_in_4bit = True, diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 865493edce..826ec47789 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -250,7 +250,7 @@ def pre_patch(): @staticmethod def from_pretrained( - model_name = "mistralai/Mistral-7B-v0.1", + model_name = "unsloth/mistral-7b-bnb-4bit", max_seq_length = 4096, dtype = None, load_in_4bit = True, From a3ed0dc4f4d52b01b5499a22946175dff00dfd5b Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 27 Dec 2023 03:51:31 +1100 Subject: [PATCH 0080/1088] Fix inference --- README.md | 2 +- unsloth/models/llama.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3d8a460b2a..cf1568f124 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| [Colab Alpaca example + inference](https://colab.research.google.com/drive/1oW55fBmwzCOrBVX66RcpptL3a99qWBxb?usp=sharing) | [Colab T4 example](https://colab.research.google.com/drive/15pyLgRN97B_jA56HS0esx56knA9I5tuv?usp=sharing) | [A100 example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | +| [Colab Alpaca example + inference, saving](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Colab T4 example + inference, saving](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [A100 example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | * Supports Llama (7, 13, 70b), Yi (6, 34b), Mistral (7b), Tinyllama, CodeLlama (7, 13, 34b), and all Llama / Mistral derived architectures! diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index de9666bca5..cbbb6b7c9e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -141,8 +141,8 @@ def LlamaAttention_fast_forward_inference( _, _, cached_len, _ = Kn.shape Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) Vnn = Vn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) - Knn = Knn.view(bsz, n_heads, cached_len, head_dim) - Vnn = Vnn.view(bsz, n_heads, cached_len, head_dim) + Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim) + Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim) else: Knn, Vnn = Kn, Vn @@ -152,7 +152,7 @@ def LlamaAttention_fast_forward_inference( A = torch.nn.functional.softmax(A, dim = -1, dtype = torch.float32).to(A.dtype) A = torch.matmul(A, Vnn) A = A.transpose(1, 2) - A = A.view(bsz, 1, self.hidden_size) + A = A.reshape(bsz, 1, self.hidden_size) A = original_apply_o(self, A) return A, (Kn, Vn) pass From 0369e7aa7ad67e1f9e59ecbc98306f4f19afb0b3 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Thu, 28 Dec 2023 04:19:12 +1100 Subject: [PATCH 0081/1088] Nightly (#56) * Pytorch 2.1.1 install path, 4bit loading * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Spelling errors * Update __init__.py --- README.md | 103 +++++++++++++++++++++++--------------- pyproject.toml | 100 +++++++++++++++++++++++------------- unsloth/__init__.py | 2 +- unsloth/models/llama.py | 14 ++++-- unsloth/models/loader.py | 62 ++++++++++++++--------- unsloth/models/mistral.py | 19 +++++-- 6 files changed, 194 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index cf1568f124..69c8ddc67b 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,27 @@
    - +
    ## 2-5x faster 60% less memory local QLoRA finetuning | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| [Colab Alpaca example + inference, saving](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Colab T4 example + inference, saving](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [A100 example](https://colab.research.google.com/drive/1gdHyAx8XJsz2yNV-DHvbHjR1iCef5Qmh?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | -| [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | +| [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | +| [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | -* Supports Llama (7, 13, 70b), Yi (6, 34b), Mistral (7b), Tinyllama, CodeLlama (7, 13, 34b), and all Llama / Mistral derived architectures! -* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. +* Supports Llama, Yi, Mistral, CodeLlama, and their derived models (Open Hermes etc). +* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backpropagation engine**. * **0% loss in accuracy** - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) * **NEW!** Works on **Linux** and **Windows** via WSL. -* **NEW!** Experimental support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290)! +* **NEW!** Support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290), PPO and Reward Modelling via [TRL](https://huggingface.co/docs/trl/dpo_trainer). +* **NEW!** Download 4 bit models 4x faster directly from Huggingface! * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -* Open source version trains 5x faster or you can check out [Unsloth Pro and Max](https://unsloth.ai/) codepaths for **30x faster training**! +* Open source version trains 5x faster - check out [Unsloth Max](https://unsloth.ai/) for **30x faster training**! -| 1 A100 40GB | Hugging Face | Flash Attention 2 | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +| 1 A100 40GB | Huggingface | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-------------|-----------------|--------------|---------------|-------------| | Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | | LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | @@ -28,11 +29,11 @@ | Slim Orca | 1x | 1.18x | 2.22x | 2.64x | 5.04x | **14.82x** | Join our [Discord](https://discord.gg/nsS4V5Z6ge)! -If you trained a model with Unsloth, we made a cool sticker!! +If you trained a model with Unsloth, we made a cool sticker if you want to use it! # Installation Instructions - Conda -Unsloth currently only supports Linux distros and Pytorch == 2.1. +Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. ```bash conda install cudatoolkit xformers bitsandbytes pytorch pytorch-cuda=12.1 \ -c pytorch -c nvidia -c xformers -c conda-forge -y @@ -40,25 +41,36 @@ pip install "unsloth[conda] @ git+https://github.com/unslothai/unsloth.git" ``` # Installation Instructions - Pip +Do **NOT** use this if you have Anaconda. You must use the Conda install method, or else stuff will BREAK. + 1. Find your CUDA version via ```python import torch; torch.version.cuda ``` -2. We only support Pytorch 2.1 (2.1.1 bugs out for now): You can update Pytorch via Pip (interchange cu121 / cu118) +2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. ```bash pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ --index-url https://download.pytorch.org/whl/cu121 ``` -2. Select either cu118 for CUDA 11.8 or cu121 for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the "ampere" path. ```bash pip install "unsloth[cu118] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu118_ampere] @ git+https://github.com/unslothai/unsloth.git" pip install "unsloth[cu121_ampere] @ git+https://github.com/unslothai/unsloth.git" ``` -Change `cu121` to `cu118` for CUDA version 11.8 or 12.1. Go to https://pytorch.org/ to learn more. - -4. If you get errors, try the below first, then go back to step 1: +3. For Pytorch 2.1.1: Use the `"ampere"` path for newer RTX 30xx GPUs or higher. +```bash +pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.1 triton \ + --index-url https://download.pytorch.org/whl/cu121 +``` +```bash +pip install "unsloth[cu118_torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121_torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu118_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" +pip install "unsloth[cu121_ampere_torch211] @ git+https://github.com/unslothai/unsloth.git" +``` +4. We're working on Pytorch 2.1.2 support. +5. If you get errors, try the below first, then go back to step 1: ```bash pip install --upgrade pip ``` @@ -120,10 +132,8 @@ trainer = SFTTrainer( trainer.train() ``` -# DPO (Direct Preference Optimization) Experimental support -[152334H](https://github.com/152334H) hacked Unsloth to work with DPO via TRL! -1. Hack the model's `config.json` to be llama model. [Example gist](https://gist.github.com/152334H/d8a68b51b83bac008a02e69ecc81d5c1). -2. Use Unsloth for DPO for both base and reference models. [Example gist](https://gist.github.com/152334H/4847f3a8cca12894877e6b30698b0b64). +# DPO (Direct Preference Optimization) Support +DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). # Future Milestones and limitations 1. Support Mixtral. @@ -173,6 +183,40 @@ Two Tesla T4s on Kaggle * Slim Orca `bsz=1` for all benchmarks since `bsz=2` OOMs. We can handle `bsz=2`, but we benchmark it with `bsz=1` for consistency. +# Llama-Factory 3rd party benchmarking + +| Method | Bits | TGS | GRAM | Speed | +| --- | --- | --- | --- | --- | +| HF | 16 | 2392 | 18GB | 100% | +| HF+FA2 | 16 | 2954 | 17GB | 123% | +| Unsloth+FA2 | 16 | 4007 | 16GB | **168%** | +| HF | 4 | 2415 | 9GB | 101% | +| Unsloth+FA2 | 4 | 3726 | 7GB | **160%** | + +[Link](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-Comparison) to performance table. TGS: tokens per GPU per second. Model: LLaMA2-7B. GPU: NVIDIA A100 * 1. Batch size: 4. Gradient accumulation: 2. LoRA rank: 8. Max length: 1024. + +# How did we make it faster? +Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unsloth.ai/blog/mistral-benchmark) for more info! + +$$ +\begin{align} +y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ +r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ +\frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) +\end{align} +$$ + + +# Troubleshooting +1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: +```bash +!ldconfig /usr/lib64-nvidia +``` +2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. + +3. If it doesn't install - maybe try updating `pip`. + + # Full benchmarking tables Click "Code" for a fully reproducible example. "Unsloth Equal" is a preview of our PRO version, with code stripped out. All settings and the loss curve remains identical. @@ -312,27 +356,6 @@ Click "Code" for a fully reproducible example. | memory MB| OOM | OOM | 7594 | 8881 | | | | % saved | OOM | OOM | | | | | -# How did we make it faster? -Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unsloth.ai/blog/mistral-benchmark) for more info! - -$$ -\begin{align} -y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ -r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ -\frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) -\end{align} -$$ - - -# Troubleshooting -1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: -```bash -!ldconfig /usr/lib64-nvidia -``` -2. Windows is not supported as of yet - we rely on Xformers and Triton support, so until both packages support Windows officially, Unsloth will then support Windows. - -3. If it doesn't install - maybe try updating `pip`. - # Credits 1. [RandomInternetPreson](https://github.com/RandomInternetPreson) for confirming WSL support 2. [152334H](https://github.com/152334H) for experimental DPO support diff --git a/pyproject.toml b/pyproject.toml index ab710de542..2bceca566f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,21 +5,21 @@ build-backend = "setuptools.build_meta" [project] name = "unsloth" dynamic = ["version"] -description = "2X faster LLM finetuning" +description = "2-5X faster LLM finetuning" readme = "README.md" requires-python = ">=3.9" license = {file = "LICENSE"} keywords = ["ai", "llm",] authors = [ - {email = "info@unsloth.ai"}, - {name = "Unsloth AI team"}, + {email = "info@unsloth.ai"}, + {name = "Unsloth AI team"}, ] maintainers = [ - {name = "Daniel Han", email = "danielhanchen@gmail.com"}, - {name = "Michael Han", email = "info@unsloth.ai"}, + {name = "Daniel Han", email = "danielhanchen@gmail.com"}, + {name = "Michael Han", email = "info@unsloth.ai"}, ] classifiers = [ - "Programming Language :: Python", + "Programming Language :: Python", ] [tool.setuptools.dynamic] @@ -40,54 +40,84 @@ huggingface = [ "trl", "peft", "packaging", + "ninja", ] cu118only = [ - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.22.post7%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu121only = [ - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", - "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu118only_torch211 = [ + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu118/xformers-0.0.23%2Bcu118-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", +] +cu121only_torch211 = [ + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp39-cp39-manylinux2014_x86_64.whl ; python_version=='3.9'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp310-cp310-manylinux2014_x86_64.whl ; python_version=='3.10'", + "xformers @ https://download.pytorch.org/whl/cu121/xformers-0.0.23-cp311-cp311-manylinux2014_x86_64.whl ; python_version=='3.11'", ] cu118 = [ - "unsloth[huggingface]", - "bitsandbytes", - "unsloth[cu118only]", + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only]", ] cu121 = [ - "unsloth[huggingface]", - "bitsandbytes", - "unsloth[cu121only]", + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only]", +] +cu118_torch211 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only_torch211]", +] +cu121_torch211 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only_torch211]", ] kaggle = [ - "unsloth[huggingface]", + "unsloth[huggingface]", ] conda = [ - "unsloth[huggingface]", + "unsloth[huggingface]", ] colab = [ - "unsloth[cu121]", + "unsloth[cu121]", +] +colab_ampere = [ + "unsloth[cu121]", + "flash-attn", ] cu118_ampere = [ - "unsloth[huggingface]", - "bitsandbytes", - "unsloth[cu118only]", - "ninja", - "flash-attn", + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only]", + "flash-attn", ] cu121_ampere = [ - "unsloth[huggingface]", - "bitsandbytes", - "unsloth[cu121only]", - "ninja", - "flash-attn", + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only]", + "flash-attn", ] -colab_ampere = [ - "unsloth[cu121]", - "ninja", - "flash-attn", +cu118_ampere_torch211 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu118only_torch211]", + "flash-attn", +] +cu121_ampere_torch211 = [ + "unsloth[huggingface]", + "bitsandbytes", + "unsloth[cu121only_torch211]", + "flash-attn", ] [project.urls] diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 5b0b8d325d..28c7affe94 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "2023.12" +__version__ = "2024.1" import os import warnings import importlib diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index cbbb6b7c9e..f6cc078356 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -369,7 +369,8 @@ def LlamaModel_fast_forward( raise ValueError("Unsloth: You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length - assert(seq_length <= self.max_seq_length) + if hasattr(self, "max_seq_length"): + assert(seq_length <= self.max_seq_length) past_key_values_length = 0 if past_key_values is not None: @@ -690,7 +691,14 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass + # Save max_seq_length model.max_seq_length = max_position_embeddings + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_position_embeddings + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_position_embeddings return model, tokenizer pass @@ -757,9 +765,9 @@ def get_peft_model( assert(max_seq_length <= model.max_seq_length) if lora_dropout != 0: - raise TypeError("Unsloth: Fast Llama patching only works with dropout = 0.") + raise TypeError("Unsloth: Fast model patching only works with dropout = 0.") if bias != "none": - raise TypeError("Unsloth: Fast Llama patching only works with bias = 'none'.") + raise TypeError("Unsloth: Fast model patching only works with bias = 'none'.") transformers_set_seed(random_state) diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index d458626386..421b743299 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -15,6 +15,21 @@ from .llama import FastLlamaModel, logger from .mistral import FastMistralModel from transformers import AutoConfig +from transformers import __version__ as transformers_version + +FOURBIT_MAPPER = \ +{ + "unsloth/mistral-7b-bnb-4bit" : "unsloth/mistral-7b", + "unsloth/llama-2-7b-bnb-4bit" : "unsloth/llama-2-7b", + "unsloth/llama-2-13b-bnb-4bit" : "unsloth/llama-13-7b", + "unsloth/codellama-34b-bnb-4bit" : "codellama/CodeLlama-34b-hf", +} + +# https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! +major, minor = transformers_version.split(".")[:2] +major, minor = int(major), int(minor) +SUPPORTS_FOURBIT = (major > 4) or (major == 4 and minor >= 37) +del major, minor class FastLanguageModel(FastLlamaModel): @@ -29,36 +44,37 @@ def from_pretrained( rope_scaling = None, *args, **kwargs, ): + if not SUPPORTS_FOURBIT and model_name in FOURBIT_MAPPER: + model_name = FOURBIT_MAPPER[model_name] + logger.warning_once( + f"Unsloth: Your transformers version of {transformers_version} does not support native "\ + f"4bit loading.\nThe minimum required version is 4.37.\n"\ + f'Try `pip install "git+https://github.com/huggingface/transformers.git"`\n'\ + f"to obtain the latest transformers build, then restart this session.\n"\ + f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." + ) + pass + model_config = AutoConfig.from_pretrained(model_name) model_type = model_config.model_type - if model_type == "llama": - return FastLlamaModel.from_pretrained( - model_name = model_name, - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - token = token, - device_map = device_map, - rope_scaling = rope_scaling, - *args, **kwargs, - ) - elif model_type == "mistral": - if rope_scaling is not None: - logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") - return FastMistralModel.from_pretrained( - model_name = model_name, - max_seq_length = max_seq_length, - dtype = dtype, - load_in_4bit = load_in_4bit, - token = token, - device_map = device_map, - *args, **kwargs, - ) + if model_type == "llama": dispatch_model = FastLlamaModel + elif model_type == "mistral": dispatch_model = FastMistralModel else: raise NotImplementedError( f"Unsloth: {model_name} not supported yet!\n"\ "Make an issue to https://github.com/unslothai/unsloth!", ) + + return dispatch_model.from_pretrained( + model_name = model_name, + max_seq_length = max_seq_length, + dtype = dtype, + load_in_4bit = load_in_4bit, + token = token, + device_map = device_map, + rope_scaling = rope_scaling, + *args, **kwargs, + ) pass pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 826ec47789..e4bac44694 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -243,7 +243,7 @@ def pre_patch(): MistralDecoderLayer .forward = LlamaDecoderLayer_fast_forward MistralModel .forward = LlamaModel_fast_forward MistralForCausalLM .forward = MistralForCausalLM_fast_forward - PeftModelForCausalLM.forward = PeftModelForCausalLM_fast_forward + PeftModelForCausalLM .forward = PeftModelForCausalLM_fast_forward return pass @@ -256,8 +256,11 @@ def from_pretrained( load_in_4bit = True, token = None, device_map = "sequential", - # rope_scaling = None, Mistral does not support RoPE scaling - ): + rope_scaling = None, # Mistral does not support RoPE scaling + ): + if rope_scaling is not None: + logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") + SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) @@ -312,7 +315,15 @@ def from_pretrained( layer.self_attn.apply_o = original_apply_o pass - model.max_seq_length = max(max_seq_length, model.config.max_position_embeddings) + # Save max_seq_length + max_position_embeddings = max(max_seq_length, model.config.max_position_embeddings) + model.max_seq_length = max_position_embeddings + internal_model = model + while hasattr(internal_model, "model"): + internal_model.max_seq_length = max_position_embeddings + internal_model = internal_model.model + pass + internal_model.max_seq_length = max_position_embeddings return model, tokenizer pass pass From 24133feda65a6996a1fd242ac8f9fc237a3befe5 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 29 Dec 2023 04:55:01 +1100 Subject: [PATCH 0082/1088] DPO, SWA fixes (#57) * Pytorch 2.1.1 install path, 4bit loading * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Spelling errors * Update __init__.py * DPO loss fix * Update fast_lora.py * Update fast_lora.py * Out of bounds tokenization * Fix Mistral SWA --- unsloth/kernels/fast_lora.py | 19 ++++++++++--- unsloth/models/llama.py | 15 +++++++++- unsloth/models/mistral.py | 53 +++++++++++++++++++++++------------- 3 files changed, 63 insertions(+), 24 deletions(-) diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index dcc887747f..adec68d46b 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -17,10 +17,16 @@ from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel def get_lora_parameters(proj): - active_adapter = proj.active_adapters[0] if \ - hasattr(proj, "active_adapters") else proj.active_adapter + # For DPO or disabled adapters base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) W = base_layer.weight + + if proj.disable_adapters or proj.merged: + return W, QUANT_STATE(W), None, None, None + pass + + active_adapter = proj.active_adapters[0] if \ + hasattr(proj, "active_adapters") else proj.active_adapter A = proj.lora_A [active_adapter].weight B = proj.lora_B [active_adapter].weight s = proj.scaling[active_adapter] @@ -31,7 +37,6 @@ def get_lora_parameters(proj): def matmul_lora(X, W, W_quant, A, B, s, out = None): dtype = X.dtype W = fast_dequantize(W.t(), W_quant) - A, B = A.t(), B.t() if X.dim() == 3: batch, seq_len, d = X.shape @@ -43,7 +48,13 @@ def matmul_lora(X, W, W_quant, A, B, s, out = None): out = torch.matmul(X, W, out = out) if W_quant is not None: del W - out += (X @ A.to(dtype)) @ (s * B.to(dtype)) + + if A is not None: + # LoRA is enabled + A, B = A.t(), B.t() + out += (X @ A.to(dtype)) @ (s * B.to(dtype)) + pass + return out.view(batch, seq_len, -1) if reshape else out pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f6cc078356..22179d4410 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -369,8 +369,21 @@ def LlamaModel_fast_forward( raise ValueError("Unsloth: You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length + + # Fix out of bounds tokenization if hasattr(self, "max_seq_length"): - assert(seq_length <= self.max_seq_length) + if seq_length > self.max_seq_length: + logger.warning_once( + f"Unsloth: Input IDs of length {seq_length} > the model's max sequence length of {self.max_seq_length}.\n"\ + "We shall truncate it ourselves. It's imperative if you correct this issue first." + ) + if input_ids is not None: + input_ids = input_ids[:,:self.max_seq_length] + elif inputs_embeds is not None: + inputs_embeds = inputs_embeds[:,:self.max_seq_length,:] + pass + pass + past_key_values_length = 0 if past_key_values is not None: diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e4bac44694..bb6a68f59e 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -97,21 +97,36 @@ def MistralAttention_fast_forward( Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) + M = bsz * q_len + + has_sliding_window = isinstance(causal_mask, xformers.attn_bias.BlockDiagonalCausalMask) # Group query attention - if n_groups != 1: - K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) - V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) - K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) - V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) - if hidden_states.requires_grad: - # Xformers does not support backward, so we have to convert - # GQA to MQA by cloning K and V - K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made - V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made - else: - # Xformers does support the forward pass though - Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + # if n_groups != 1: + K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) + V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) + K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) + if hidden_states.requires_grad: + # Xformers does not support backward, so we have to convert + # GQA to MQA by cloning K and V + K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + + if has_sliding_window: + Q = Q.view(1, M, n_heads, head_dim) + K = K.view(1, M, n_heads, head_dim) + V = V.view(1, M, n_heads, head_dim) + pass + else: + # Xformers does support the forward pass though + Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) + + if has_sliding_window: + Q = Q.view(1, M, n_kv_heads, n_groups, head_dim) + K = K.view(1, M, n_kv_heads, n_groups, head_dim) + V = V.view(1, M, n_kv_heads, n_groups, head_dim) + pass pass A = xformers_attention(Q, K, V, attn_bias = causal_mask) @@ -131,12 +146,12 @@ def MistralAttention_fast_forward( A = flash_attn_func(Q, K, V, causal = True, window_size = window) else: # Grouped query attention - if n_groups != 1: - K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) - K = K.reshape(bsz, n_heads, q_len, head_dim) - V = V.reshape(bsz, n_heads, q_len, head_dim) - pass + # if n_groups != 1: + K = K[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + V = V[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, q_len, head_dim) + K = K.reshape(bsz, n_heads, q_len, head_dim) + V = V.reshape(bsz, n_heads, q_len, head_dim) + # pass # Needs (batch_size, n_heads, seq_len, head_dim) # is_casual and attention_mask must not be both set! A = scaled_dot_product_attention(Q, K, V, attn_mask = attention_mask, is_causal = False) From ad02ea81b75a2bdb11ad3133ca89226d8a9ac0e2 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 30 Dec 2023 04:35:35 +1100 Subject: [PATCH 0083/1088] Prelim Qwen, Deepseek support (#58) * Pytorch 2.1.1 install path, 4bit loading * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update loader.py * Update loader.py * Update loader.py * Update loader.py * Spelling errors * Update __init__.py * DPO loss fix * Update fast_lora.py * Update fast_lora.py * Out of bounds tokenization * Fix Mistral SWA * Prelim support Qwen, Deepseek etc --- unsloth/kernels/cross_entropy_loss.py | 32 ++++++++++++++----- unsloth/kernels/fast_lora.py | 7 +++-- unsloth/models/_utils.py | 2 +- unsloth/models/llama.py | 44 ++++++++++++++++++++++----- 4 files changed, 67 insertions(+), 18 deletions(-) diff --git a/unsloth/kernels/cross_entropy_loss.py b/unsloth/kernels/cross_entropy_loss.py index 67ad306fcf..0a73a393ec 100644 --- a/unsloth/kernels/cross_entropy_loss.py +++ b/unsloth/kernels/cross_entropy_loss.py @@ -15,7 +15,9 @@ import triton import triton.language as tl import torch -from .utils import calculate_settings +from .utils import calculate_settings, MAX_FUSED_SIZE +from transformers.models.llama.modeling_llama import logger + @triton.jit def _cross_entropy_forward(logits_ptr, logits_row_stride, @@ -145,6 +147,7 @@ def backward(ctx, dlosses): pass +slow_cross_entropy_loss = torch.nn.functional.cross_entropy def fast_cross_entropy_loss(logits, labels): """ Arguments: @@ -156,10 +159,25 @@ def fast_cross_entropy_loss(logits, labels): batch, seq_len, d = logits.shape assert(labels.shape == (batch, seq_len)) - loss = Fast_CrossEntropyLoss.apply( - logits.view(batch*seq_len, d), - labels.view(-1), - ) - n_items = torch.count_nonzero(labels != -100) - return loss.sum() / n_items + # Prelim support Qwen, Deepseek other large vocab sizes > 2^16 + if d > MAX_FUSED_SIZE: + logger.warning_once( + f"Unsloth: Vocab size of {d} exceeds the max CUDA blocksize of {MAX_FUSED_SIZE}.\n"\ + "For now, Unsloth will use Pytorch's CrossEntropyLoss, which will entail a\n"\ + "25% increase in memory usage and be slower. Make an issue on \n"\ + "Unsloth's Github page if you want a faster and more memory efficient kernel!" + ) + loss = slow_cross_entropy_loss( + logits.float().view(batch*seq_len, d), # Must cast to float32 for numerical stability + labels.view(-1), + ) + return loss + else: + loss = Fast_CrossEntropyLoss.apply( + logits.view(batch*seq_len, d), + labels.view(-1), + ) + n_items = torch.count_nonzero(labels != -100) + return loss.sum() / n_items + pass pass diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index adec68d46b..26fecf8b59 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -16,6 +16,7 @@ from .utils import fast_dequantize, QUANT_STATE from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel + def get_lora_parameters(proj): # For DPO or disabled adapters base_layer = (proj.base_layer if hasattr(proj, "base_layer") else proj) @@ -104,7 +105,7 @@ def forward(ctx, X : torch.Tensor, dtype = X.dtype e = matmul_lora(X, gateW, gateW_quant, gateA, gateB, gateS) - g = matmul_lora(X, upW, upW_quant, upA, upB, upS) + g = matmul_lora(X, upW, upW_quant, upA, upB, upS) h = swiglu_fg_kernel(e, g) i = matmul_lora(h, downW, downW_quant, downA, downB, downS) @@ -123,10 +124,10 @@ def forward(ctx, X : torch.Tensor, def backward(ctx, dY : torch.Tensor): gateW, gateW_quant, gateS, upW, upW_quant, upS, downW, downW_quant, downS, = \ ctx.custom_saved_tensors - gateA, gateB, upA,upB, downA, downB, \ + gateA, gateB, upA, upB, downA, downB, \ X, e, g = ctx.saved_tensors - gateA, gateB, upA,upB, downA, downB = \ + gateA, gateB, upA, upB, downA, downB = \ gateA.t(), gateB.t(), upA.t(), upB.t(), downA.t(), downB.t() batch, seq_len, hd = X.shape diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 769669ae6a..1c75dffd53 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -23,7 +23,7 @@ from platform import system as platform_system platform_system = platform_system() -__version__ = "2023.12" +__version__ = "2024.1" # Get Flash Attention v2 if Ampere (RTX 30xx, A100) major_version, minor_version = torch.cuda.get_device_capability() diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 22179d4410..ccf61ee920 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -817,29 +817,59 @@ def get_peft_model( for idx, layer in enumerate(model.model.model.layers): # MLP patching - if hasattr(layer.mlp.gate_proj, "lora_A") and \ - hasattr(layer.mlp. up_proj, "lora_A") and \ - hasattr(layer.mlp.down_proj, "lora_A"): + gate_proj = layer.mlp.gate_proj + up_proj = layer.mlp. up_proj + down_proj = layer.mlp.down_proj + + if hasattr(gate_proj, "lora_A") and \ + hasattr( up_proj, "lora_A") and \ + hasattr(down_proj, "lora_A") and \ + (gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None and \ + ( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None and \ + (down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None: # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) n_mlp += 1 + else: + logger.warning_once( + "Unsloth cannot patch MLP layers with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) pass # QKV attention patching - if hasattr(layer.self_attn.q_proj, "lora_A") and \ - hasattr(layer.self_attn.k_proj, "lora_A") and \ - hasattr(layer.self_attn.v_proj, "lora_A"): + q_proj = layer.self_attn.q_proj + k_proj = layer.self_attn.k_proj + v_proj = layer.self_attn.v_proj + if hasattr(q_proj, "lora_A") and \ + hasattr(k_proj, "lora_A") and \ + hasattr(v_proj, "lora_A") and \ + (q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None and \ + (k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None and \ + (v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None: layer.self_attn.apply_qkv = apply_lora_qkv n_qkv += 1 + else: + logger.warning_once( + "Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) pass # O attention patching - if hasattr(layer.self_attn.o_proj, "lora_A"): + o_proj = layer.self_attn.o_proj + if hasattr(o_proj, "lora_A") and \ + (o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None: layer.self_attn.apply_o = apply_lora_o n_o += 1 + else: + logger.warning_once( + "Unsloth cannot patch O projection layer with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) pass pass From 74826077dad2cb687ede2587e092dcd008946494 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 01:20:04 +1100 Subject: [PATCH 0084/1088] Fix Kaggle issues --- README.md | 17 ++++++++++++----- unsloth/__init__.py | 4 ++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 69c8ddc67b..47ca0a3187 100644 --- a/README.md +++ b/README.md @@ -11,17 +11,17 @@ | [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | -* Supports Llama, Yi, Mistral, CodeLlama, and their derived models (Open Hermes etc). +* Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backpropagation engine**. * **0% loss in accuracy** - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) * **NEW!** Works on **Linux** and **Windows** via WSL. * **NEW!** Support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290), PPO and Reward Modelling via [TRL](https://huggingface.co/docs/trl/dpo_trainer). -* **NEW!** Download 4 bit models 4x faster directly from Huggingface! +* **NEW!** Download 4 bit models 4x faster from Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit`. * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). * Open source version trains 5x faster - check out [Unsloth Max](https://unsloth.ai/) for **30x faster training**! -| 1 A100 40GB | Huggingface | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | +| 1 A100 40GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | |--------------|-------------|-------------|-----------------|--------------|---------------|-------------| | Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | | LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | @@ -88,9 +88,16 @@ max_seq_length = 2048 # Supports RoPE Scaling interally, so choose any! url = "https://huggingface.co/datasets/laion/OIG/resolve/main/unified_chip2.jsonl" dataset = load_dataset("json", data_files = {"train" : url}, split = "train") +# 4bit pre quantized models we support - 4x faster downloading! +fourbit_models = [ + "unsloth/mistral-7b-bnb-4bit", + "unsloth/llama-2-7b-bnb-4bit", + "unsloth/llama-2-13b-bnb-4bit", + "unsloth/codellama-34b-bnb-4bit", +] # Load Llama model model, tokenizer = FastLanguageModel.from_pretrained( - model_name = "unsloth/llama-2-7b", # Supports Llama, Mistral - replace this! + model_name = "unsloth/mistral-7b", # Supports Llama, Mistral - replace this! max_seq_length = max_seq_length, dtype = None, load_in_4bit = True, @@ -133,7 +140,7 @@ trainer.train() ``` # DPO (Direct Preference Optimization) Support -DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). +DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on 1x A100 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). # Future Milestones and limitations 1. Support Mixtral. diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 28c7affe94..879b092330 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -46,12 +46,12 @@ raise ImportError("Pytorch is not installed. Go to https://pytorch.org/.\n"\ "We have some installation instructions on our Github page.") -# We only support torch 2.1 +# We support torch 2.1 and 2.1.1 # Fixes https://github.com/unslothai/unsloth/issues/38 torch_version = torch.__version__.split(".") major_torch, minor_torch = torch_version[0], torch_version[1] major_torch, minor_torch = int(major_torch), int(minor_torch) -if (major_torch != 2) or (major_torch == 2 and minor_torch < 1): +if (major_torch != 2):# or (major_torch == 2 and minor_torch < 1): raise ImportError("Unsloth only supports Pytorch 2.1 for now. Please update your Pytorch to 2.1.\n"\ "We have some installation instructions on our Github page.") From 0a3477993a2eccb1dc7d2a0a7c3c61114791f9af Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 17:57:45 +1100 Subject: [PATCH 0085/1088] DPO --- README.md | 22 ++++++------- unsloth/models/dpo.py | 68 ++++++++++++++++++++++++++++++++++++++++ unsloth/models/loader.py | 2 ++ 3 files changed, 81 insertions(+), 11 deletions(-) create mode 100644 unsloth/models/dpo.py diff --git a/README.md b/README.md index 47ca0a3187..b2bcab865e 100644 --- a/README.md +++ b/README.md @@ -8,25 +8,25 @@ | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Alpaca dataset example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | +| [Free Colab Llama + Alpaca example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Mistral + Alpaca example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). -* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backpropagation engine**. +* All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) * **NEW!** Works on **Linux** and **Windows** via WSL. -* **NEW!** Support for [DPO (Direct Preference Optimization)](https://arxiv.org/abs/2305.18290), PPO and Reward Modelling via [TRL](https://huggingface.co/docs/trl/dpo_trainer). +* **NEW!** [DPO](https://arxiv.org/abs/2305.18290), PPO and Reward Modelling support via [TRL](https://huggingface.co/docs/trl/dpo_trainer). Example [DPO Colab notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). * **NEW!** Download 4 bit models 4x faster from Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit`. * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). -* Open source version trains 5x faster - check out [Unsloth Max](https://unsloth.ai/) for **30x faster training**! - -| 1 A100 40GB | Hugging Face | Flash Attention | Unsloth Open | Unsloth Equal | Unsloth Pro | Unsloth Max | -|--------------|-------------|-------------|-----------------|--------------|---------------|-------------| -| Alpaca | 1x | 1.04x | 1.98x | 2.48x | 5.32x | **15.64x** | -| LAION Chip2 | 1x | 0.92x | 1.61x | 1.84x | 7.05x | **20.73x** | -| OASST | 1x | 1.19x | 2.17x | 2.66x | 5.04x | **14.83x** | -| Slim Orca | 1x | 1.18x | 2.22x | 2.64x | 5.04x | **14.82x** | +* Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! + +| 1 A100 40GB | Hugging Face | Flash Attention | Unsloth Open Source | [Unsloth Pro](https://unsloth.ai/pricing) | +|--------------|--------------|-----------------|---------------------|-----------------| +| Alpaca | 1x | 1.04x | 1.98x | **15.64x** | +| LAION Chip2 | 1x | 0.92x | 1.61x | **20.73x** | +| OASST | 1x | 1.19x | 2.17x | **14.83x** | +| Slim Orca | 1x | 1.18x | 2.22x | **14.82x** | Join our [Discord](https://discord.gg/nsS4V5Z6ge)! If you trained a model with Unsloth, we made a cool sticker if you want to use it! diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py new file mode 100644 index 0000000000..59e614983f --- /dev/null +++ b/unsloth/models/dpo.py @@ -0,0 +1,68 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from transformers.utils.notebook import ( + IntervalStrategy, + NotebookTrainingTracker, + NotebookProgressCallback, +) + +DPOTrainer_metrics = [ + "rewards/chosen", + "rewards/rejected", + "rewards/accuracies", + "rewards/margins", + "logps/rejected", + "logps/chosen", + "logits/rejected", + "logits/chosen", +] + +def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): + self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" + self.training_loss = 0 + self.last_log = 0 + column_names = [self.first_column] + ["Training Loss"] + if args.evaluation_strategy != IntervalStrategy.NO: + column_names.append("Validation Loss") + column_names += [x.replace("/", " / ") for x in DPOTrainer_metrics] + self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) +pass + + +def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwargs): + # Only for when there is no evaluation + if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: + values = {"Training Loss": logs["loss"]} + for metric in DPOTrainer_metrics: + values[metric.replace("/", " / ")] = logs[metric] + # First column is necessarily Step since we're not in epoch eval strategy + values["Step"] = state.global_step + self.training_tracker.write_line(values) +pass + + +def patch_dpo_trainer(): + # We patch Jupyter Notebook's printing to include all columns for DPO. + NotebookProgressCallback.on_train_begin = NotebookProgressCallback_on_train_begin + NotebookProgressCallback.on_log = NotebookProgressCallback_on_log +pass +# Patch DPO notebook printing +patch_dpo_trainer() + + +from trl import DPOTrainer +class FastDPOTrainer(DPOTrainer): + pass +pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 421b743299..baa55f9f00 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -14,6 +14,7 @@ from .llama import FastLlamaModel, logger from .mistral import FastMistralModel +from .dpo import FastDPOTrainer from transformers import AutoConfig from transformers import __version__ as transformers_version @@ -23,6 +24,7 @@ "unsloth/llama-2-7b-bnb-4bit" : "unsloth/llama-2-7b", "unsloth/llama-2-13b-bnb-4bit" : "unsloth/llama-13-7b", "unsloth/codellama-34b-bnb-4bit" : "codellama/CodeLlama-34b-hf", + "unsloth/zephyr-sft-bnb-4bit" : "unsloth/zephyr-sft", } # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! From c79d2c31cd60241a4b59c5df3ebc5f5bb02e92b7 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 18:26:17 +1100 Subject: [PATCH 0086/1088] DPO --- unsloth/models/__init__.py | 2 +- unsloth/models/dpo.py | 37 ++++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index 9322049dc8..b174a2cec1 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .loader import FastLanguageModel +from .loader import FastLanguageModel, FastDPOTrainer from .llama import FastLlamaModel from .mistral import FastMistralModel diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 59e614983f..55ee247bf6 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -17,6 +17,9 @@ NotebookTrainingTracker, NotebookProgressCallback, ) +from transformers.trainer import DEFAULT_PROGRESS_CALLBACK +from trl import DPOTrainer +import types DPOTrainer_metrics = [ "rewards/chosen", @@ -46,23 +49,35 @@ def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwa if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: values = {"Training Loss": logs["loss"]} for metric in DPOTrainer_metrics: - values[metric.replace("/", " / ")] = logs[metric] + if metric in logs: + values[metric.replace("/", " / ")] = logs[metric] + else: + # Maybe not a DPO Trainer anymore? Redo the tracker + column_names = [self.first_column] + ["Training Loss"] + if args.evaluation_strategy != IntervalStrategy.NO: + column_names.append("Validation Loss") + self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names) + break + pass + pass # First column is necessarily Step since we're not in epoch eval strategy values["Step"] = state.global_step self.training_tracker.write_line(values) + pass pass -def patch_dpo_trainer(): - # We patch Jupyter Notebook's printing to include all columns for DPO. - NotebookProgressCallback.on_train_begin = NotebookProgressCallback_on_train_begin - NotebookProgressCallback.on_log = NotebookProgressCallback_on_log -pass -# Patch DPO notebook printing -patch_dpo_trainer() - - -from trl import DPOTrainer class FastDPOTrainer(DPOTrainer): + # Patch DPO notebook printing + if (DEFAULT_PROGRESS_CALLBACK is NotebookProgressCallback): + + DEFAULT_PROGRESS_CALLBACK.on_train_begin = types.MethodType( + NotebookProgressCallback_on_train_begin, + DEFAULT_PROGRESS_CALLBACK, + ) + DEFAULT_PROGRESS_CALLBACK.on_log = types.MethodType( + NotebookProgressCallback_on_log, + DEFAULT_PROGRESS_CALLBACK, + ) pass pass From 230ac5652b74c774565e3888627c1fbaf553621c Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 18:36:43 +1100 Subject: [PATCH 0087/1088] PatchDPOTrainer --- unsloth/models/__init__.py | 3 ++- unsloth/models/dpo.py | 21 ++++++--------------- unsloth/models/loader.py | 1 - 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/unsloth/models/__init__.py b/unsloth/models/__init__.py index b174a2cec1..891947d69e 100644 --- a/unsloth/models/__init__.py +++ b/unsloth/models/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .loader import FastLanguageModel, FastDPOTrainer +from .loader import FastLanguageModel from .llama import FastLlamaModel from .mistral import FastMistralModel +from .dpo import PatchDPOTrainer diff --git a/unsloth/models/dpo.py b/unsloth/models/dpo.py index 55ee247bf6..e7724c2d0a 100644 --- a/unsloth/models/dpo.py +++ b/unsloth/models/dpo.py @@ -17,9 +17,6 @@ NotebookTrainingTracker, NotebookProgressCallback, ) -from transformers.trainer import DEFAULT_PROGRESS_CALLBACK -from trl import DPOTrainer -import types DPOTrainer_metrics = [ "rewards/chosen", @@ -32,6 +29,7 @@ "logits/chosen", ] + def NotebookProgressCallback_on_train_begin(self, args, state, control, **kwargs): self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" self.training_loss = 0 @@ -67,17 +65,10 @@ def NotebookProgressCallback_on_log(self, args, state, control, logs=None, **kwa pass -class FastDPOTrainer(DPOTrainer): +def PatchDPOTrainer(): # Patch DPO notebook printing - if (DEFAULT_PROGRESS_CALLBACK is NotebookProgressCallback): - - DEFAULT_PROGRESS_CALLBACK.on_train_begin = types.MethodType( - NotebookProgressCallback_on_train_begin, - DEFAULT_PROGRESS_CALLBACK, - ) - DEFAULT_PROGRESS_CALLBACK.on_log = types.MethodType( - NotebookProgressCallback_on_log, - DEFAULT_PROGRESS_CALLBACK, - ) - pass + from transformers.trainer import DEFAULT_PROGRESS_CALLBACK + DEFAULT_PROGRESS_CALLBACK.on_train_begin = NotebookProgressCallback_on_train_begin + DEFAULT_PROGRESS_CALLBACK.on_log = NotebookProgressCallback_on_log pass + diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index baa55f9f00..eb8d4960b9 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -14,7 +14,6 @@ from .llama import FastLlamaModel, logger from .mistral import FastMistralModel -from .dpo import FastDPOTrainer from transformers import AutoConfig from transformers import __version__ as transformers_version From c6a0e070296677b768d712c53e6ef02dd9053ad1 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 19:24:30 +1100 Subject: [PATCH 0088/1088] Update DPO readme --- README.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b2bcab865e..112f294051 100644 --- a/README.md +++ b/README.md @@ -11,14 +11,15 @@ | [Free Colab Llama + Alpaca example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Mistral + Alpaca example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | +* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support via [TRL](https://huggingface.co/docs/trl/dpo_trainer). [Free DPO Colab notebook example](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). [Scroll](#DPO) to DPO. * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. * No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) * **NEW!** Works on **Linux** and **Windows** via WSL. -* **NEW!** [DPO](https://arxiv.org/abs/2305.18290), PPO and Reward Modelling support via [TRL](https://huggingface.co/docs/trl/dpo_trainer). Example [DPO Colab notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). -* **NEW!** Download 4 bit models 4x faster from Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit`. +* **NEW!** Download 4 bit models 4x faster from Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit` * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). +* **NEW!** Want a UI for finetuning? Try [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) and use `--use_unsloth`! * Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! | 1 A100 40GB | Hugging Face | Flash Attention | Unsloth Open Source | [Unsloth Pro](https://unsloth.ai/pricing) | @@ -139,8 +140,62 @@ trainer = SFTTrainer( trainer.train() ``` + # DPO (Direct Preference Optimization) Support DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on 1x A100 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). +```python +from unsloth import FastLanguageModel, PatchDPOTrainer +PatchDPOTrainer() +import torch +from transformers import TrainingArguments +from trl import DPOTrainer + +model, tokenizer = FastLanguageModel.from_pretrained( + model_name = "unsloth/zephyr-sft-bnb-4bit", + max_seq_length = max_seq_length, + dtype = None, + load_in_4bit = True, +) + +# Do model patching and add fast LoRA weights +model = FastLanguageModel.get_peft_model( + model, + r = 64, + target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", + "gate_proj", "up_proj", "down_proj",], + lora_alpha = 64, + lora_dropout = 0, # Currently only supports dropout = 0 + bias = "none", # Currently only supports bias = "none" + use_gradient_checkpointing = True, + random_state = 3407, + max_seq_length = max_seq_length, +) + +dpo_trainer = DPOTrainer( + model = model, + ref_model = None, + args = TrainingArguments( + per_device_train_batch_size = 4, + gradient_accumulation_steps = 8, + warmup_ratio = 0.1, + num_train_epochs = 3, + fp16 = not torch.cuda.is_bf16_supported(), + bf16 = torch.cuda.is_bf16_supported(), + logging_steps = 1, + optim = "adamw_8bit", + seed = 42, + output_dir = "outputs", + ), + beta = 0.1, + train_dataset = YOUR_DATASET_HERE, + # eval_dataset = YOUR_DATASET_HERE, + tokenizer = tokenizer, + max_length = 1024, + max_prompt_length = 512, +) +dpo_trainer.train() +``` + # Future Milestones and limitations 1. Support Mixtral. From 033a897af3b63b0b4fe0f9ba3c3309f075126f92 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Sun, 31 Dec 2023 19:26:34 +1100 Subject: [PATCH 0089/1088] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 112f294051..b2f5dbe574 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ | [Free Colab Llama + Alpaca example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Mistral + Alpaca example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | -* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support via [TRL](https://huggingface.co/docs/trl/dpo_trainer). [Free DPO Colab notebook example](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). [Scroll](#DPO) to DPO. +* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. [Free DPO Colab example](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). [More info](#DPO). * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. @@ -142,7 +142,7 @@ trainer.train() # DPO (Direct Preference Optimization) Support -DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on 1x A100 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). +DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). ```python from unsloth import FastLanguageModel, PatchDPOTrainer PatchDPOTrainer() From a3e4f47ebf2816e0df1421c20659436a80cc53d5 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 2 Jan 2024 03:20:11 +1100 Subject: [PATCH 0090/1088] Add tokenizer checking + TinyLlama --- README.md | 4 +++- unsloth/models/_utils.py | 24 ++++++++++++++++++++++++ unsloth/models/llama.py | 3 +++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b2f5dbe574..041975b04f 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ | [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | * **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. [Free DPO Colab example](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). [More info](#DPO). +* **NEW!** [TinyLlama](https://github.com/jzhang38/TinyLlama) on 3T tokens. [Free Colab example](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing). We also show automatic RoPE Scaling extending TinyLlama from 2048 to 4096 tokens! * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. @@ -95,10 +96,11 @@ fourbit_models = [ "unsloth/llama-2-7b-bnb-4bit", "unsloth/llama-2-13b-bnb-4bit", "unsloth/codellama-34b-bnb-4bit", + "unsloth/tinyllama-bnb-4bit", ] # Load Llama model model, tokenizer = FastLanguageModel.from_pretrained( - model_name = "unsloth/mistral-7b", # Supports Llama, Mistral - replace this! + model_name = "unsloth/mistral-7b-bnb-4bit", # Supports Llama, Mistral - replace this! max_seq_length = max_seq_length, dtype = None, load_in_4bit = True, diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 1c75dffd53..95926a14b8 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -44,6 +44,7 @@ __all__ = [ "prepare_model_for_kbit_training", "patch_tokenizer", + "check_tokenizer", "xformers", "xformers_attention", "xformers_version", @@ -112,3 +113,26 @@ def patch_tokenizer(model, tokenizer): pass return model, tokenizer pass + + +def check_tokenizer(model, tokenizer): + # Checks tokenizer for out of bounds ids. + # Mainly a fix for https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha + # where had token id=32002. + # See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25 + special_tokens_map = tokenizer.special_tokens_map + max_embedding_size = model.model.embed_tokens.weight.shape[0] + + for token_name, token_content in special_tokens_map.items(): + if type(token_content) is not str: continue + token_ids = tokenizer([token_content], add_special_tokens = False, return_attention_mask = False) + token_ids = token_ids.input_ids[0][0] + if token_ids < 0 or token_ids >= max_embedding_size: + raise RuntimeError( + f"Unsloth: Extra special token `{token_content}` with id={token_ids} exceeds "\ + f"the maximum vocabulary size of {max_embedding_size}. You must fix the tokenizer "\ + "or else out of bounds memory accesses will occur." + ) + pass + pass +pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index ccf61ee920..b021ac2b3b 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -791,6 +791,9 @@ def get_peft_model( assert(module in accepted_modules) pass + # We check the tokenizer first for errors + check_tokenizer(model, tokenizer) + # Get LoRA lora_config = LoraConfig( r = r, From 6e23d507509132f7f1beb0a76753f3b80efff73c Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 2 Jan 2024 03:37:04 +1100 Subject: [PATCH 0091/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index b021ac2b3b..7d0acab52e 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -756,6 +756,9 @@ def post_patch(model): # Clear deleted GPU items gc.collect() torch.cuda.empty_cache() + + # We check the tokenizer first for errors + check_tokenizer(model, tokenizer) return model pass @@ -791,9 +794,6 @@ def get_peft_model( assert(module in accepted_modules) pass - # We check the tokenizer first for errors - check_tokenizer(model, tokenizer) - # Get LoRA lora_config = LoraConfig( r = r, From b2689ecc64670e0a7da136502f8683368cd87a27 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 2 Jan 2024 03:41:05 +1100 Subject: [PATCH 0092/1088] Update llama.py --- unsloth/models/llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 7d0acab52e..3e6aa36099 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -712,6 +712,9 @@ def from_pretrained( internal_model = internal_model.model pass internal_model.max_seq_length = max_position_embeddings + + # We check the tokenizer first for errors + check_tokenizer(model, tokenizer) return model, tokenizer pass @@ -756,9 +759,6 @@ def post_patch(model): # Clear deleted GPU items gc.collect() torch.cuda.empty_cache() - - # We check the tokenizer first for errors - check_tokenizer(model, tokenizer) return model pass From 40f384c723fbebf0bc979920621ab6704ad48186 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 5 Jan 2024 04:08:53 +1100 Subject: [PATCH 0093/1088] Fix tokenizer + docs (#62) * Patch tokenizer * Update _utils.py * Update _utils.py * Update _utils.py * Cleanup * Add comments to functions * Update rope_embedding.py * Update rope_embedding.py * Update llama.py * New logos! * Update README.md --- README.md | 39 ++++++++++---------- images/Colab.png | Bin 0 -> 11637 bytes images/Kaggle.png | Bin 0 -> 9733 bytes images/try live demo green.png | Bin 15262 -> 14424 bytes unsloth/kernels/fast_lora.py | 53 ++++++++++----------------- unsloth/kernels/rms_layernorm.py | 24 +++++++------ unsloth/kernels/rope_embedding.py | 55 ++++++++++++---------------- unsloth/kernels/swiglu.py | 18 ++++------ unsloth/kernels/utils.py | 3 +- unsloth/models/_utils.py | 57 +++++++++++++++++++++++------- unsloth/models/llama.py | 24 ++++++------- unsloth/models/mistral.py | 37 +++++++++---------- 12 files changed, 155 insertions(+), 155 deletions(-) create mode 100644 images/Colab.png create mode 100644 images/Kaggle.png diff --git a/README.md b/README.md index 041975b04f..f24487f79c 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,25 @@
    - - + +
    -## 2-5x faster 60% less memory local QLoRA finetuning +## Finetune Mistral, Llama 2-5x faster with 50% less memory! | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| [Free Colab Llama + Alpaca example](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | [Free Colab Mistral + Alpaca example](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [Kaggle Alpaca example](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) | -| [Colab A100 example](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Colab A100 example](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | (59 more examples if you scroll down) | [Kaggle Slim Orca example](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | +| **Free** Llama | **Free** Mistral | A100 Colab | **Free** Kaggle A | +| A100 Colab | A100 Colab | (59 more examples below) | **Free** Kaggle B | -* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. [Free DPO Colab example](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). [More info](#DPO). -* **NEW!** [TinyLlama](https://github.com/jzhang38/TinyLlama) on 3T tokens. [Free Colab example](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing). We also show automatic RoPE Scaling extending TinyLlama from 2048 to 4096 tokens! +* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. **Free** DPO example [More info](#DPO) on DPO +* **NEW!** [TinyLlama 1.1b](https://github.com/jzhang38/TinyLlama) on 3T tokens! **Free** example +* **NEW!** We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. -* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU](https://developer.nvidia.com/cuda-gpus) -* **NEW!** Works on **Linux** and **Windows** via WSL. -* **NEW!** Download 4 bit models 4x faster from Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit` +* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070 and 1080 works, but is a bit slow! +* Works on **Linux** and **Windows** via WSL. +* **NEW!** Download 4 bit models 4x faster from 🤗 Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit` * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). * **NEW!** Want a UI for finetuning? Try [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) and use `--use_unsloth`! * Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! @@ -31,8 +32,9 @@ | Slim Orca | 1x | 1.18x | 2.22x | **14.82x** | Join our [Discord](https://discord.gg/nsS4V5Z6ge)! -If you trained a model with Unsloth, we made a cool sticker if you want to use it! + +If you trained a model with Unsloth, we made a cool sticker if you want to use it! # Installation Instructions - Conda Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. @@ -79,6 +81,9 @@ pip install --upgrade pip # Documentation We support Huggingface's TRL, Trainer, Seq2SeqTrainer or even Pytorch code! + +We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! + ```python from unsloth import FastLanguageModel import torch @@ -145,6 +150,9 @@ trainer.train() # DPO (Direct Preference Optimization) Support DPO, PPO, Reward Modelling all seem to work as per 3rd party independent testing from [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory). We have a preliminary Google Colab notebook for reproducing Zephyr on Tesla T4 here: [notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing). + +We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! + ```python from unsloth import FastLanguageModel, PatchDPOTrainer PatchDPOTrainer() @@ -262,15 +270,6 @@ Two Tesla T4s on Kaggle # How did we make it faster? Manual autograd, Triton kernels etc. See our [Benchmark Breakdown](https://unsloth.ai/blog/mistral-benchmark) for more info! -$$ -\begin{align} -y &= \frac{x_i}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \cdot w \\ -r &= \frac{1}{\sqrt{\frac{1}{n}\sum{x_i^2}+\epsilon}} \\ -\frac{dC}{dX} &= \frac{1}{n} r \bigg( n (dY \cdot w) - \bigg( x_i \cdot r \cdot \sum{dY \cdot y_i } \bigg) \bigg) -\end{align} -$$ - - # Troubleshooting 1. Sometimes `bitsandbytes` or `xformers` does not link properly. Try running: ```bash diff --git a/images/Colab.png b/images/Colab.png new file mode 100644 index 0000000000000000000000000000000000000000..7a21b9429c5fe6113794fe0e9bbf812febfda2ee GIT binary patch literal 11637 zcmXY%b99{F)4-#qv2ELIY};yVHrQB=Z5z9>)!1p=SWVK{X=C%y0~8G!%+0s;bAPF7MK0s@K+{Okk=3;yIgq3?%)AcK&T6w~y|{g(&tMYNc9 z{@&dRO4qOX^MgmB(ZQU_++r6mB#s@(gDQn>Y~tQ4CDcKY z#QWO-p6o#bwI5OpITgDVrwO3~&+rci=Jb83r8zt1?y6`1vIXQu&BZ^9hFXTqd7JmQ z5lQ-5txkV@&)N_3f7ZL7YVTnXQ`dNCZ~;oFF}enb;F)=4g1OVjMBDi=##<$WwKb?L zILHv(s4P@>8wR8ii`fv~k+c88WnXPIec9s19k8b#%B+Fe#i1>RHCMi69Arb{w8jVS zBlgjK7lVL?@PWL6ID&Hip1!=|=?dGlLAa_mr*9SqBW#bTvaX6o;ArdGicJ7a(X&tB z!@zj6kAg|WCBc1F9x@3Y(GKr_3BwyVfb%q>ra|sFw+lDLvvN|$^JT#O&L2gK1hIl> zBJ=ke7NQ{ES0V!^Q+c`a*WJaCCsBWrdzhG~3lBC9HCfoOW8ILb7`HMX!Bz{_{>?QT z(jc1~iO6?e$S$}oc>V0#yS@%Jllb(nE19GOovn0iUP`7jd37N)6|j2N%2?dH7TXx1 zia+?CQo(|gL_5R~YY;Dxef1*pp@g&1P{ommYRwlX!z*_A=c|v(%_d!vwRc1oXAM4*Hoy^An z3G3g6Dybcd)c8x<7v58*hIZqO$_@^8d*7%+H}H@5dMLZ!1oTal7Sz zuBT23FRSUh!76xfA(&1g#Rp#3g(qzkdd~PfQKvpB|GKX=0XxtYn(;d=q=Ry6i!D8y zVQo5Lm5jhy&bLM2EmC((OW9lOz;~9)+z*`i8kD(4VK_$EnHh!f}PB-G}=lqMYl9g$!!um<(*n z?|mt`=hXK_K#*m~x9NMQ{~Z09|M9UPN}sW!Pc`hA<9v+zrSTAU5hx=^o_GGI@55^- zt&qBui~3CqtY3TH3h>eP?*(NFJ=+0ZtQ$sN?o&cD?k!<9Obx&#OWargy0wJXr{M`sKgcL2gdl3MJA04H z|GBoT&uwiO_t!Z%sj<(-6!)Qlt5ru}t*>x1g#BsEtoo^Bkdpeea6}UjviUyVf5 z#bc;O&j8=Q@yllHv&ava(+oehjrRZzp`Oh8 z+}xi&0maGuYPOxBbRZ+Pt`{SF^<}q_7Gno3LOtDrF3A+0=LTe?!GHVGvRMU zbR!B^Pn=O~Kc%@NI?u04(K9m7SI{+Xk6$x}P5%Kx3O;F*o_Sw4XDe9&COvrrmPCdCkDKijYEMq5u+7w~tgmJxLoz zw=hf-4a_wLEbvFD84E_5b)qVFqI$T=e#wuLjJpV9!n-^~{CDJlqn>$|w>UEv=skqM!NX}o@8f`oDBJW|vfD;NdXq=(VWsxQY_m~AU1B*A%X9m3 zd@q+oH$gg*oZQsbR;uuTPmgwC>e~+@%{aU2_*zvI5VKFV|=j6-V8z`+bihJP1ua*;# z;D(h6q{ijpX}jmOy5>&Yx!MjNTA11m@r&Q!Mnvq)c%&~bEgfwPXJR7m>m*t` z|K@fYwgP%hYSePHwEVyH?qIz+Z&7r)3JO$sRLTf6miB=tOr}eZLKP6z{Mhw#H<=}M zXMT4rOy$5ZXD#Z@O!@E3a_j-*cY{*5X*~*Z=aUF4CTpN+1t0DW{pI|1O5gJjv^hTe zW*giawcxc1{?xj_Uu;v-1fTIW9mTVM^6T7t>&p4VSX+CLs^s*l6e#R+2ZJ%4iPRl> zn(r)|HS-s$k!j3fT5EhTxs`d}>fb4Toe_@U<4KCyU}2_HD4i<^v)(({%F~(5BcoD< zg<|DL+@Zpu)2S=xQrV21Th0G~ZanYb!vPIVW~{SvwI0c8+c_mc-8YGNmM)TVwc04Z zJ=M%NiO-iw$g~(wT&mhjFE>Nk6wAwG=11|e3ceB77<6~M!1TEPG&a?G7^TDicE`OX z`o6GZ#Tjv#km0nf>Fr%W=PY%NzTOg0c6T5DIADWJkE5;}MrHCXuv}=Dm;>IxmEmPk zA#Z^DCs~=r5=bnks+)}xDJKyirJVZx3wD*Of}>}ZC(W9pk&us~ zug?kvGKH%}+3#dvW8p;$Gaw+@dZeq`68t2tT}CHjGk>Uo3Wbu!<5oAz_p8I+NYr|` zUtWIy#4V*aYKrzL7Sxp)l;k1c_eZ;VuV5%H-angXaQ`&Il0(g=t#RGoJ!S7URoiH1 zhCA>+I<=4+ajYAUW;vTDuc0*#V`kF8ADF{KRGyXNZ$Bfu}CskS5w z-)U=!pFZdR&uV*P&t4Lp_R_b^U{Y0?bjbdn>XBpGe7t;_;TQEnVZ8eY7f>LP3K8A{ zRjlDz=fvV+my=f7(e&}3s(*?K)2)g{jYya5UF9n*H}qn!tLK-9@4~Ah@xraf&U4E# z$z1+Dlm`a51#Sx)o+=43J<$ASjM(&rS+?4{S-a{A_#k#dh!?e^{@{xv4(78z7WjP( z2Al07vGnXkkeJeUE8AaFbfbzS0eOS_U--0{Qc=n1ifz=k;RqC9LnXb;h=_C}2SYS4 zU(fZ8m7ejB@>56lXSSen^XRZR{;AVLm;H*kdU4^(xCNFpsyy6J9%A_!F?nrXacFo) zNyu+N=f~N$s-aN71uUM61d+S;cLG9GHXU9G55Rb$D9{w$$*hqtno!&A&#dg1dOFLujE55O; zbKc?j`~1zogWR~)R+c(k{|!eGAEjF!dghQu7Oz3lvcTB38=w|Jau zlU}*{p-6gmEbeBYHT?!*d2%(gzlR<%++41871Uvf%&i%*5(!&ljD zv5>uNvwm)9#^GJZaA-AqD!0uIMG$>B<=7|bG}Jr6xU3crsr-QZLGLA z8#!vDxmE$L%Ijz*j$pReu`b|}+!3!C1&`nfhr2Bmg9M~%Z@U9=1H0U^~ zwTWb-;D4rO+6+MeiSbZZF4lwyFDF`AM&90};1Mb3Tu|uwZ~lC~bMV{LyALyGgqc9L z-0L>|T^+qHM?UdO(^hOcU9@P5c;%9Ndof7gCk5sK7h%pA!oP0vLAxc1Zn^L*SS?h{Htoa^8gfeJ`U}6j$QlRZXp4REwQ*sx27l=#_Zso8Ury1 ztH%9jGn9m1RiLj2mJBm z^Gg|HCQ(OH#Mgk#ae42l-Qxq3^$D$CauB9x&b5ZfHDGCdrY0o58 z>=?5D8_7RB9M*s5B<2;k-{>P31Ghi++uN+*Xh-@D3NXbi-8hGMiGg@iWRBEe5!_tg z)BAqCtfsbnc zLcw3beM&#D>Z>p_{~7svTJu^g=D>Q1pN71yGiXK}_Bhc8NR5GZ52XRe@r9{rJOI+So;ldbv? zor?~`Bdd+I#6TJa-B95lbmjmvzw^P{VnhnQh;ev{G7LmBd`(Wd6PeEG9MW6{NOJ1Y z-4+0brF3cZNb~4%+T&HOomdG9hzEfyzCu!r<5Ttl+9F%ZIFuq{Q!=&qsG1RyPiw`G ztN;}R#x%L2zE34`GO%*{Kjo=H0J3?rSnw74eJ&>tbnYpsymmj0yT@hRdDB27rs74j zb$9aCaY`8)rnK?l3>L5MAyThR4yPdFTi-n>GjDthv1mAWjrLA#3@df0Oph~8T;5gg zsFJ8Mdyt!z5NC*8E+F}7)?JpY)ULhATta{SXW`Gd_>P-VKX2uhM1sK~UJ?;HUm{ut zOER8_xu8`hcd7>!71E?XRs@&^Z|A~d=-SZ$%ftm~ScJO**i-`Mq(ZG<&rDnieRP!F zXHJ263}aqZmZ?Z9?!{tZIEt60#zs`)X>zYX90kcvS17kn7~-D2bol}_*q~H6{dKFe z?>};R*JR}d+7&S%l7AZ@Rn&iFFGu!#Eqei?VeF|7G_6`@_zSgUu})Xen3?0TQwtgv z#7Mfy!hgCfUEG0(;rdsk66tG@miYpr{<6-MLf%wi~Ves{c; ze9CKxW}>%DRr&06qHZybcNUE(8AbGWd!o-A&M zr17O4qiczqfVysKg89@j z)|Go^IrG#JX^k$x)g{`o-pEwe;z5`n?>b0^NbT5rG34uF##QF$A2+p2q$>d!u!K%i$chpav-bP z0;IaKm&5KPr`n4V|i$yV&iYL1ad*w7R3`$}NSAK>nd`YXJjB;O7kf14eW_C$2FatjD3%G7uIf=T( zxOzuEuBeXtGh=0X9T?1V-9g0PB}@|xs-x^37fLtBt~xGZj#(te<8vqzFdK2NdHT%J zPuSj4bb5XDDZLr1FK1#E2pNak_bf`C`BpP|c#2wOOe1Wg9%%L!P}i0$DnLuE5vY(v z7w65xcrVX~t)a#(3ztL$?_$BV6Xn0-7K3BL08vSE83(zD=(AtTU8rxsi92*Gg}*z~ zm63C+hftAt=*2z=Q0@HXLy?6v$1X9PI9pnIBTVdpP@w5T;J=c%T;V0aAF;2r_r7|S zzT>!}eo?t>rF=es{kfu3r-R422E9$=ufxwyzv938Aak0PSWHa6&JE1T=a}0s(az3 ze;pxIiI{d--PQ;W{h5}(FVhcB_j3SMj;Wtz05n`Qyq^@4DqA>&x_Rk!%uu4$#Uru` zUY=PlVeCPg>rEwc*fEC1as7Y^az~?~;d%^iPg$$T7BMEN9wM7doC)NTGJY|A@Y~b8 zgz3V0#h;$L?_CV7Si>)RgM`sPO#4nn>tw>eDPgs_y|Qq1PFeY#n*nhPJbM_P&rv>C zp~aUPd5{@)rBf`P2d$VfX%U9Q{QKr)SX53Oo{G!`fbbu*fhuinE0A*%8ffm; z?C%+$=Lo|AP(qw`9>{Aaa*LkwJQK{QWttgJRIuKmKD&!b;2XsQ%Zb?&6=#}#eQ!iv z$FmNwK}m9@8J^@zMfoPX08de+L6aD(L6MM>4n7+I1QODx#*0pCQT2$>DfAxAkeNih z#kGELn@0D^iBi0V<$isEdXIO{%U=6l(a@|2c-5j@P=4*MpYab z&o*M{F}a0#CyHfOuJNg>$fb;ZI|BtzHiy=!hU=MsFLv|h++tEQN6vNAE@6X)f4Q}? zWM~>i7QD8*Ee(s>A*Gm${8|L8HvBu-FSZmLI#A_*+0k-oSHUR&tj4aC237x(M{BJu z;q=@eC(Co~`(j()g~eW8U~dso$M={a>z&2cr-~;wy9zN?B@#mF;De$dzi%LR^sT4$ z()(-A16-|LHU~%d##&^b1xAyBqIVGfMf2}%0jHnZhXxOAKxFUm1&yZS@u_bw}ubu(mGq16kQ>30@`U746uYWQhX| zo|7}{#g}4kjB47ZSM<+PYkvXH>gIU!shOJKc5+u|Jo>Uv*%C?&bOJ*j$3tn=;&XJT z6`ejIE5oiN1AC0dpYoTD7xnx$;kP^u0`y>|w%Xdix{<}l&m}Pe2;(v6;RAHh%djRc zl5BjCn*7q2dC05?<0DhMzm8mKg`dlaSO`z!cam38Uzmk|G!qN>X0sF2@BWTYdlTc@>G7OT zU>vs=dVIcA3ylAZ!}kPIj#CAGu^8JQ~8q*kasHku!N9 z7x#GGwNo1&{LuQg^VZ5bh&ZCXRXKcbbr>4O$ZFwj+X|i@;G~uxJJPYi*3+MtU*JdW z?P{*jV_8v2g$L-^5=gYuyKSp13*jvc^O~-(I98~pv=w3#Az^ABl#Iy*QzS`IYk}v3 zr4lhv{-c6|O|=;N39>X@Hp?$rBdF9bC@py&ed+_L!Z)#+mOegsbbQY>wam&Z(=d;; z4Ohv}pyDAilK48++jGssCBr~LCS9bVRcxK&V~G*G*L+l-&Wgz*3j|I6nd_JNd6`h9 zV7&P?#rpz1C(((4Y!`gV@yW?@t`hKJ=tZpYnOd|jw-x&P3BqJ&f0(%B9L7X5Wl`o8 z)aR|Qgx_17e_V0o@gLS-h;?`Q8mqcP#nnOrIwmf+dXOJZI(q#tCg@DC;^UAmn%7|V ze+o)?XTkFw;212*6<5svDvafspRx?4vY(5uu6?Kl*=qxAY`x4;P={gYS;bSJi z`4reL9s~1P50ni5uF0fx%p!jym`n8SOw`T(qTi#)u6;G8JhxpcJJ%jV$~WDQrvG}B7elJ6iMHL;ti&)1+| zQTn2o2fzf90n`G%VtFCVjstMhQeJ)~o=bFzh$)=Js=|=S20R7XJrSG&gUG(J^) zWkPMK8M#bS$72!?lZpkG&+~@9WU2jqQD<}`<)6Rx$VZe6b_STXmdV8mq|J~L` zJHgH24_dH$mA)!!=PH#=-!mVkQQ!WGnQpZ}xyuqg=K+u66dV#Zmp&+W@FW)wb5yo} z03HjQ(eQD}E0$2+({)uIg-g^%O>zv;;_$L`k$e}kQ)-j0Vrri?bEK+k>|`FOPRfj0 zjk%&@vNlzXl7}&TwGYzq#)dEEiJksD+9t-@=zZxVLcBHPnY&CuTGiB`TP@|ou#}ba z3T4~9$Hp_8Jjtd*>bP!k7<=kqS#LjaXZ}X-{Jxem>tV$p;T%N%T_Acxw8m2R+C#N@ zRswixiQT0Ml~99Jx3p1yEaZmXj_^S!0112{tD}rxa+!NawC{qKUVBcxpaX8kxxWaN z8n$JvWuOK=K+hjbE`%HV^4O<8@=Pgg?62_nG#rd0&gy{j>X1L^0=mAzc~+NrsNE*4 zH(ut(tEtdj+rWz}cdrV!_LKY7*>0@&hvkIb3zNm_%~xAL?jLcrBU_-3#Vp@J>W4UM z%(uTnDvQsu?6z#cI<_VslK(U#2gE-P;Xl)nD)Fnh2MVyNQi;EzI#rjJiT;V!i-Wo+Yq$wrL>(?@HmS)u+d*g3O>I;tfx_))e-GAy}yup}g=Rwa> zL6G=9@+$b3kuFLk%+1af^)qazD3$Yb`ET5?PI+Wl49zAl@~!)Pz-IYkBVr;rz814X zl*nhV3GM&VtbYcK?6FuQ+J&9We?%*VQc%UInx2tuD6)IU;5Xd4X1qZm+#QO6bQ$5xV<3Tau(9y6LfEY5>=$5Mv6tv2OF!XgRBONYS8g} z{?P<{%sChG`C8y`gD&n=m*Nu1c2 z%_-8M)^;1G0Dd6;2W*3E1brdeD56l=$Ax+xhjgH0Ovt0(nDRSbj*y?ApTq zxMnjb1lf{o2&@{bw(^-w73`rGsk|IO{WZEkmqa>V{{F!M;UOPGJ42_DrMfBTAt24} zIj#9O)*m0#&yFj1^M~YmZ0fg3ss6W+=5rz4p}m=WERzKzTV`e2O<7Md^=2*?)nv$s z4P1B3A4HL0b^~}%fsgPadYPwZG&hEomh_?BQhp8$C@z0|MweL4Fu@Am=->E6I;^@O zey0B6J6t%ZHu%1Hz*=_Fo!YtgxlFgz#F?pr;$30SX44j>c!}+QCPDhKPfT2AadDep z%XJ@i2MTY0y>nLvlnI#vKYe5E$WHsPoJI!^`eIfw4VSK{PxOnj&pmGU)IP5pAlV}k zct*HLx@%7P-zqYhKMI_DJlXMPC`{lCRmrDwFdFeTTZp*o>e9Y;iCvXHd;)$Xq8s%1 zbt8#rr@9KT{IgR^r{lS;j0Cs!UoF(}`e&b8aKo7W;cx5!+@`Kklr-hg9dgNEgT6B3 z-L&cXbox($E3DFdr3jox;jd|gd~zRbRSKf2>F^q)U%Vh`s#BdE`wW;q>~UWA-mprj znFKBg9Uu;RYk8I2N#9^|Pyat_7NP@L4SP|qU!%nxr2y>ow+<9D)H7DQ2IjWw2wS-G zsk*>>Xc>R5E1_G?o(5;yq{!&W6*WW#Z_YrlpY(E3jL>5X$zmwAq%0_5>T?w!~KFa(qqU<@Fx> zdD?Z#7ad$);iD%ayTv^n%|DU|{S9iX6Jlej(GAI<)#=yx#1V_|`vlATZ_-8z&QJ6# zU{*G`7xAO???HgtUf2%Ki~004Om+b6`cCu+NUtmVr{B1w2Kg?BQ-h16l(Bh=!5ruz z;6oo*$Y>fLqP}U!X3M5-Jyx@Yt&MlQ?<4<#r?>>qxYi~YCj*7P$^rpi4wk_Q3dSGU zDGzaT9X2@hlx{$qW=@DFO=%|UJCX~9cG1M(IE2e#vwjz?#<)TI{-&${PQrf! m;SAA-E-#YKDTJ`~4qcC4Eb`<_6b;6yA>^c#C2Pe^g8vUJ|2w(> literal 0 HcmV?d00001 diff --git a/images/Kaggle.png b/images/Kaggle.png new file mode 100644 index 0000000000000000000000000000000000000000..287e50b848df62f7e743ab09155aee0e1ebce3a2 GIT binary patch literal 9733 zcmX|HRahHLlnqc^iv}qk+_k~ot-+zVTY*4}TX88Kthl=tiWYZwhf*9$@nQuw{NL`r z`eQt6fPlrw4MrSdV zIe$Z1mD`e?MnAP-etTdKS%ok$TcySW&}m*lK}`gMktcnw=7uRXt@EQkLebLkYZ)UwHmiQ+{>^RCp0io;%ZEIv#oOLLE{7s6dYz|X7 zHkkKNHp$NZ5WGssPUe})z~wN!v7YA#Hk@0wmb6+T(=~$#S3?@P-$lSiB05VR8!Zzq ztX?h^#F!}vnki61Ja87!3ZOuCdDSImVBDKC46O7f`PT8Rh>!8$@v|t`C%+#7&2|Y6 z9(iCs=3}yjT$9@X!>Q=CuF?aW&kUGrf1y|Vt3@KoF-_^o_upCzH9mI;BGk3$}$wy(P_(=8RV z8pW3yWCjc~n+ZYf@&Op5o>8!md$+xAeP@aT%hpZZr}AnZS`GM^Hwi02N+_0GNdJ?M zLcv)XzG=RgUrHZ0|`R=Rf2wz`YLRbbs*>Y!@5AS+-C zEoRsHqGRGRF4wvlTTPux?nKSAs#W;T9KILWsE76w2S9@G7Lcnun!TVB1b*LYepfi$ zYV~Jm>p7JiI9d)9+Z2y6@L;__EC52aOlKTtgS7rbjs6kv0~qVFf-WJlrfa69Yr3Lr zI7ZOa^IS?Pc6BM0Q?ZuZ<-zmXUYGj~JNEY;=i#7LZdPT(k(V&>=32QR2etvQLZ$1q zWwnuuds}=}`#o>6qt5?aj^_p+gblHgFeX#`^((0nOS~#u{sg zv&JhBLO%VQF;Y<|3{1!uGB1l!KQKlqiXm^Rq7X4ygz-WOlB7JIAfObTu}_nawJM*L z1$;C?c_PB?(C$;0xH#@^A8zPsHhF4>8$;f~`m$l$l`-%;!kA9|ZWJ@un~A-T^Svj+ zn0~=`N=Ce2na(o@WJNjYHunX*bzgiRu&`?W0SSh%>&m?3M{A>Xvr`fs*T6ikg@l)d zLL`k9+$21%1HORM5LkM$qJ9wA06O8<_$OF>U&VW=n+^fWAlm%%wt|UDyxK>f6uw-} zjA?M_D}Y1&x}KT&jlA;2-G{1&pX%_!%**$XI?4X@alz>w7I-{;Tia5u^}U>wEWA6Xro?nkp$*I47dtC*zuDio-t4x$6QSmNe9Vlb;d|bauk{L>1!b7C z314%(k7bEj#o+zH{sNRPDFBDDO_=P|!Ihm0-V66G&a-u|cCCIqtD zXN1Ych9a`mB^$ebUmJ2k|Ly!r3s6UjsBr7kHoZWk>ty}=m*jYhQtux@VgRqYwYq}G zVV~ldTR(_XRR`HGhyOJ8sWmg`~1 zm30yNsEJSB8P6cS0rCIsbnYwWeK|4f-#@=f<9IF8(jND_+jH+;4>PhdN--IwLbKJ$ z7bY^EF@O4&;lNj968JsaM&aC;@rB+8od4wb;|{k!5D_1Hk*n1v+glp<_N&kR-`ni{EeZdtRHh14)Gp+Qb&W|h!!kuaGu1E14+F77ZzY>lf#pr z8m<$}fbX$Q!GY`3Ml|IwXrGa8()~-R{-L#52N-<65cTYKtjsj|T)bkwvr3@E9-`T6 z{QHosr#SB6-14jPM^aM12I|SE!J4YUqkDD2Yfcj(s!iWAq8j%vmgDQI&<7s2igeOB z{rg@INGr$n<|TQGnPjME{uSdUIIPC`kJ_Oi@MDW#9YNfNFV1FLdOX!155OAXV=8Hd zYIwulQD}8XeKZUA(L4T?NW@J%IX5Nu*-<{Tr`_`9X1m?B5Lil?7U8mMe+#VV_{oej zdwDt-b@NzT(^78GcYh|sZUnPua!9?}xsUbvTavR8)-PsG=ijHi_lQH5VuAqMS5rcb zAYNb7sE)_Dy3^<%xes+i{Jq{O0z<#*ud^>*A2PfYnK`GYq^@;!=kl&MU_Bfm+?0Zu z;<^{hKW%6Q&XZFoJNkAN(_+HiVGjl5+^RufEpOv2xBQye)5fPXQRu6tT)`^nv|60X z&q7D0yk;5cXrgqDoQ@yHx9nkN=5#ScI>tZ?7v6pnRi{HfWQIlYiwID%D5+BedMhWP zP8P>d3v`x5HzIqJ75fbyvv*W<`r{Vf(*dm-~H>6(F{222+- zKmD=e>FUJfG#_Kz-3SoUSp~cNc^7}Q4PP?xFGuJv?I^tw<^3{l9pf~|(2E`sV!PF4 zqQc%~U#~0&b;`|8kC5Ld@;?HA7EQF{mMSt{U|22z2fIu{zpbP(;|GU~(8-!|;@H?e zYS7*}Ir%wB=X&^r>qe*zZ$i9Xlt})5Q1aIPCrX=r&DT#KA3vv!V16M98-^GF*z%R+ z)5^PcP~@J&PMe=-OwxLbM)?zxVqGZTcQO@UBsxY=G1C{Gi8S7P!waI_?Nd}5OA_Sl zE>gcJ%sh$5?`<(Lm&Un*Sb*}|YwIU1!Gf|%See&B!#6Fu=SGX-MB7Fg!wONM$-&eo z@y>LbFI8IGqbbCH7^2YA)YD0k~3txXI?n`UV$;mIV>T#`5PmF-*uTSp4Y$jD*& zaZ&9^D_P21irbk7!A_7zTY3y5*ver-zUvJyD&e&%wF8!srcz{{FK@-D8_|Pk*f700 zGpBpV=m@3-wT|W=d70RFC{1_ z!&-^?&6ekK*~9=%1FuMlMaN}t75DsNi^>|K_g_waXAS{l^hPk5|0P$Y!AJ%q^Z=E} zXPXNAK$<)#g%0t&2sY~%So{PbtQC4Z3 zrLljc8SI~TzH!mQw*U@BK20d^MGEL*D>L<4qLF05`y*2C7r8bE#+2&lvUWV;Q#76S z3qSULr3i?6PWhtAsvgI^SXP6>Yod#(ln?J)Dj{=Tw-Ye!r5|OQS3W!LzILixF*mCy zBKb;_iz$;mEe)6&Ci;PhZA3)u4PR03lYcxdpXfQ(zX{kJY^_{{-&H zPL4n2Jf2uala3rB-LuJley8W7U55ng$JtqBm!gXCMzlR9?{|B+=YI#>-Dp#BDxol! z1(cVytGppWd&rf|9+&njSlSe3xpz!#w`$?FQY~6YH@2MC%%P9UxQxM}Mn);tzIhs} z8*23Fp~#r)=v)2ZhnOVbDjsq9bi6v`aZT@@^<&_pZQGennmH5ar%CDz<0mnS?R!=} zAfJA1*&j@_bZf283Z&i#&u1{==}}+5ZG>?QPNnp5yI<8_JEGI4^bjo757;F#(tQL$ zEAFaputE!Wz66b5Ij0Azw-MCSVxG53hZ<|PwmlEwf^p@ov#Ewd9Ga`?w%qbnuvE)| zeg_|QwKhQ|qH-0Tl(KCZbp3BB(q-nc24lQJffSOMV=3{^Q7Rh7$@wLJp%jL070si{ zY41I#blR^(jaWEet=AxS8p0fs1~^c#)z~ZKG(8*Bq-%a+Hu&m577I^@tF{QsX1^UP zvxdlmNDq7x@UbG*SQ45NE0YoQ%e*BLgmfY~j?hj8a$2kOZ$A{r51+)O`W#X{&*ha~ zB>ryBoz#4+w;T)w4$tXF4@qsyFza8@hErVeP1*wP-twh6Np9|@)4G;Zh@NU5wEh(E z3{G=(C&UQ1+{L{y?`Pc%9xpg`+m$-aWmz<$5J-@d7RF@H%K}(k69ucZK=aTec{&&j zdm|Y_EQtm0McC!al1(G#B9uOwebH-7JVCnW0KJ6c1#4p9^~q$fU2K@3k1PCxg}n5(kc2?lFu_L2?9iFY#msP8P4)Ue41V~Yz$pO zQ^9-UB52B8B@cgh?!vWOK4@0m0;mJ^Da>o4j^bDA*(k% z;vLbKFm}}hUVD(gFj>)kU}lBSdrH~ZxjE{jRmBnI%8{AEb|z-^O6V(SZfG{~B)9WL zif@r6&5ycj4!i#+OC@Vw;BfRF%gpa%>ekYQFdh~nP4ulzlsZ-S%%cCROH1oaggxZs z?|9R%SQ?KNAR7S{ z)Z$V8lsC#wleho&5CwDp4HIPywWvSf-H+%k(~1nWWF#q*&9Vgh+7k|ji%6|1f@4Ym z8d@=FzrCq7SIZhNq#|jy+2`hL3|AhwMPB1&-`uI8MnS9cm1xJwhYi@wVR8|*k5hqM zIyc!s(Fe*ey7j=CYEDfV;*KF2)Q)jpGTu#lOz-|I%KDHW_sUa0_F~n^2(0{Qf>ox6 z1v1+XPy1APg+Pnq<;|w%@7|yZ3l<}hxfj~Xs3VrCU~s=#pv4$2R@-8`<>dUz*^;Y~ zoyopV70K~o2Tiq&llvEVM^=e#Dw;v7&IM|qXCf|=>^`Sbh+9bO{0q!E+Y%De;dh#2n*tsd#) zfDin#rvtsLm8WtVktx*YjY9tNd-VTPd3TDy=j6>)kW7L$j4CP=FXcyTHa<=7wx<@g z1d0ow#`^8qW?x_ZP2@a516vX`26t2shq|56<2v{lY}YxIrPRoW|z&ktFXV zkyFusm4bHHN<8D~)}ljDme5e+5sK(7$M#){&6Xzjb38N-$u^Ogd<`vCFv57vmXX9s zNzPL~c}$9)t&4VIObQ~NUPN>tRG18EEy%Z!HdyP=9sZ&+<=w}0qJrWrRunt&;|BWm zw@vc>hsPRmCHw9ar8H7>f>iQrg&RYrGHT8L5<+y6mkeUvgz|`QOhX!WnACMJi^(D3 zgS8vVBz%vM@qDW=ARs6o?5(hjs7im+$&vk!<8>j5Ap3`DowRFZo9v+(9DdlVK+6dd zX_g%$ti-MC%~5fTJY*%Mpn{lTZ#vCYW~;*j|*5f6}9>+#gio%#QflnaJ8|d^^B$xFmet{N(`+|uuprt9#B?b(=kGW z`a+Eb4W>$E=7;nk^dWMgpagwPI22uaS%%$yuLCDG|6>x;O-qNVnp=KUhAR?n+FkP8 zMhnu%>?Mx$ooEU=MX;nYdkplp)WI6q2FcqxkIz`zkqbV|g5Loa>X6v=2*Jr1i~JFc z%mpg52Jm+d<+jFg!6FjyPJGp+3O?b%BBnY@vU_#Kosc_wr`TR93vS03+0%apHZ#Yl z%x-qAl%{13x{YJ?=kX)T(gaF2UzO2H2)DN;m305A5teV#p?D@=V+!FFNaJhJdGajrzmiHy!|Jwwz+b*r^Y!R9DvJ4yqoeGl^#~Ax- zs(jJjyDcWyG zh2mvMeTffM>dIVdbwwmMGEkg|fETPS8s?0dvGpk4vT^>(Dn~L>ZAHmIC7zVn&p|QV z(}yD4_4ngoG1P(jQ!e2s4Lo3m2Tkn#3Rl3Owb5`2mVbi~23A&hbWw4!1!(2k4rY+i zlhm(r;NAy=+Z@JcuB#zVpCgG8IoN1&6lD;iq`MA38$<9%0TxW~A}uI3IAtmwA4mYT z(bI=FUS)o`MU29^&UM$=w42)AYACgkE409mYI^OaQ`Aaoz{F+gmq+hvZfiv3!vaz3 z@7-EQf8!*ZBOT|3$Q)kEGXUb*NY*gNx&AWYDOQvzFfZp=Sx8r|$_X6`+D~u5N_({S zIjYn3?s>I?`?pslk{4D|B9X4?tCM@3;oO~*v`mvrAaD)U{P!WXIgvWqP@OxO_cdi_ zlQGYQbfiYNWdf9?Sjr^BVfvRmViiO_~ioM0ggy+vR7ZENy;;;v8%+}629(W@-f zA{^{B_#Q}}9LTCes|QH#Q#d~X<#VeAEzN+@As z#d_rBL&pL;xSZi17J7Kwtmzb%hun0sWRYdEF@v9#=rr8$BFX_0BM3+;zg z*tsP}V0vc4%>ChJI}*#@@I+Y&8q_pbm?n0^Ag(wp5`ZWo9Uf#17?jg{^A*GEx$AOF zUuewVZQ}VrCM_9`gIVi)hu2^HiKOs%Eti{SZwTVv;vfPuI?1v~Do0~bNb=ci{XN!&Qh75jgB55-7Q za#Bm$Kw4x|^Xi&L< zz6IN{VZRAFw@5vJ6x+*v)-xMnz4nt^?@x|NJvCQv$XAvKLG`un`tvU)G@Wgz#7Pk# z2DDqZKxx4>%p5v5C=1r=C>~Eb|6P?Qk@n$G3H%wMzVVY;?RzeA!&9*MoK4=${Ue6yiNe^mxWO-Mqj|LzG||S>j^`+FVR|UvK6g_CL65Kd14VGcwy(G*awGm!@D*T@4l!;ji% z#SbdH6teZ<^?%lZ{?FpB_QULFL}4ACCHRP8#MJ|4r|4|n&sOVA!mkMK-3X{8lq7M_ zpEmyxOf1fGe7b?(cWqQ!%On`w>fSIu8ve0VA;8x!{Kc#jsaGAchOJ0Hp8wYcDuSXZ zKmwU0Wp5JYn8b3Rk)FBqdkq;#oM{9VR`zvibE@N2^~HqagCS=lFAQL|p>H<4;w!=09xed-K0da4duoMQG%rI33W z&3)p;pnr>{9C$F2VQcV?X}p)U#g4cNxq#!_hd&CjZk1E}GfrlMPT$3X6u){p-g`Y? zJMFafxlu&8+HbJ98$W2@hLx=3iFI2dD&d~?%~Xl&wvsAtH>GZ$tdP6Y#R2)2bhSxx z7nrky5@*uCV|!|m9`GSVgBfP3WgO)trwJGN-^yBPuU?Ajh<|Jm$$$4AtAL0vg{hu_ zjI7gMUlA5^ktXwyJyjf8*)Eqzhq<3_1el2XXR-2W*@^SnB5$(jln6q@5_{4$wHNBb zZh~7w#3c*94*ML~<_%gA|42u+;VVhPb3@2~AQ80`Z=U$aNFhIdS(_!AfdHQpekReeHKq35kic1 zw$3-E(X2r_sA*OEk;}qmSD#VV8zIi6%8oM!buV6Q z%%LL`bC%c-uB#LXC=o`0Do{a71i|=IpwGrTrq*Zg>F=3(uo^;FjXyar-$v?Qrx959 zR|Hht^0O!Tvpvto5q4H5YWzvC%^_I$a6(Ee=vcz)3l!(*V7fTXq zl(_Pp;9kGRiN+h;;1=y+)NbA=w|Y|qIk!FsQEseWOP(EDGG-LT<$&RH?GNl!^`2 zR8`CaB(3X9Yb$Y!xVVZ!`2*#<1T;G9^<@XLIMyic6j$*}R5Cu^c@C!y9P+ysYU(D) zH&)(YmJd}%d2kBYc)u=3Pa3o>d+nZSbM?U#$Pk@pL1geViQR@8#VGm+ zX;^Uvem_gI`$yb`3NbRb`|u&v#CGvWY-=F=)+(kJ@yidt&*R%dW}QgHb{C z2X^!b6j}Ybtfnccz_TLUjgu?J*v{kOZ0C>W!7+JvCao|_bU&}xVDP|$a}#CnNzEtI zA*$p@zmB19G>zZp%nukx&fy$n_$Zq%@Zp)HkQfF!A7JGl;v3h*6v7-JR;&3nm(T9aFT_0`pyXLt+bx zCV>nyUSvtG z$Qc5h^uHsA-1-<;FR#P<=RLB5sAJ&Fd|iYgJCDK4>(BTvN+}mgFb}Hn8y#~?8pELO zbOS}szl8Gv7Blp32qyc&@16HEKLYDgq0aD{;F7Ztar9bFRcT9tm`2=5qx!EH&7Db1 zC0~KCu&-CEmvaW-;}ZX;>iPmcAl8DHQ%VZ`#ykT4^;YK(pf2On;U3lCTGtS;sJ>{5 zG2LlshQ`LqMa#@z)%V)OpZq;sw@pyT1pY=U2EqqEZuX7)M=;?qEo4gTcWofr#h*h>_vMLj zp3eOP!!<^4!nhapdoJXny%MEXzHC!t4#^|I;ih`ybLf0#orh4e+0FWq16V5qzKol; z6W!3*GF)j;>KrdWtlLPsVW5q~gM|Hwq~M`nBQ|v%mTKm_3(72veOXPWm3A%w@JJvD zopI7-M`^WgK2aWwO0E95I=XKE7?w>sR@JkxLO!KAXWF;-;{L#%6Mz*UQl-OLn$cG@ z(P*O#59md9ewa;6VoPcz#{LV?3A3#nURM&}2WGBrG}SyC z(t3V(+$SVG-$Kh}r=sj1^2#D5^$Y?2@Zy(rg*TD|39|o=OMXTbGSVE2{iph9p7ZnJYM*O>*!IVxIIrC8A$@j3_Y)HX7}OU02j=7f5es- z_{UjPmLQ9%vNs{oDswjUKduN>zrF7IZzyGT*0|61rTqc2Md31}QFXvhg1PgvcBokD zpG*b#98ULOQAsVOf3y)Q{!13dSo@;=WCnB=NEnK`w|yw*TEO*RLij0ne~M#sxf|QH zB#7xNU4h|Ojqj%LfPrjj=L%+V73w7GK*r>jC$V!`^mEU z0!H!-VF~%?xJy;O(^mO(E9gXw3u?#dV0(``N}BxOKWNbZ z48IT%@~K{z(8z|cv1p;M*tmAt4EKkyQxPmk#Vkj0nRE7+Rsm^}%I#cVxMg?~gLD?g wDv1j?z%02IR|=;D{6(-W(6T0V^8ozgB>sGXd!}UfGUNqNlvR_dmNNbPKO%E=DgXcg literal 0 HcmV?d00001 diff --git a/images/try live demo green.png b/images/try live demo green.png index 540ff5595feae0169b7af326cde9758ec347d2d7..2df56933bd319710f9c234229d1349c3612e853a 100644 GIT binary patch literal 14424 zcmZ{rbyU>Rx9CSu5D^tnkX9O{kuDV}DUoiZYv@i91f{!6>4t$BIs_Dkl-f6mu;iqZsk$nQWP5CU15H}4@3EE@>q#_iiUpybcv zf+Gmz2}JhID^>Tj?O8l;)s?2}{opP9N&eTbiLe5F@P)CS)h@jMhAP!e45`)MgwI`V z!{2YhNuZn1rZLY@4Z_X$l`c0VB?(Di-kf}O!WMW_hFezV^~v`2ITgfHw33(aEP&UHw(o#Z&?a~jWQ0OaYk?0qR8<1W)nPUt5dv~nxHwkAR zQd7=6AbU9UfQrafh>`xn*+xdwMOUVyFsGs*B&VY5AZ~AcsJdc(=;%w*5*Z;^=gPcg z_fA0TYVfm^P2$r`_JP>U0Qd@a#7KHKOM!5_Q+4yR{ARe|YnFJ*b@XirRvH7hK=o5$ z?y@IL&ziIBRJ~taujgQUh{m4uC>dFpNM`Lm@slv*srwUz>V>K?NpBahlkMv%Z2Z<6 z&27)Y4S$CNv5%GXTnPG==gZH^=raCgqM9oEk0-R}%NI7I;1p8-J`?&RXaQ@=8Ps&Y zRza<^lVnuGoTwiH(Kkbeg|U%SjC&2A1%`za6LF8rMH%O(;`*0{QWceMU8{NgsD?lK ztkP1(oTfXdnkn-V8{*6*XQXNJ^VV0b%{+Ru(mGBSbC;I&zrbF?1v$L-UCWo*t61_-N?G#T_ybtm@q4qxoB5ay|O|?S1_#*^M8` z@qGmQfiEB*Ghy_BQfc)f@avG|!b|bGXKZ$Y1RFj#`8YtkXECNsr+wP`uxl%p+*dMS zI6NdQT#hcYIP9s>F=#S^EvlP*0o!FP6bC2!dgCo#MguOrt2ZGxA3#qw;2Qz+ zb8h_NuNIV2&x!>`XQKpL%cTkHUlkr5%%Xu=X zClwwGGJ2*v)}4s?3#tA7)TCKFNUKNnWFEiiUB9ISF*<`?ig^1Zy=7{wREjY*%GAbM z`^}kLrqh1LoEwmZhdo>R0SWzm7B5L|2`_A2vPTwiVA&lJi~dWQ{ZeUQMl-&8Y`F1q@eChxv)qMj9v ze_sSxmVV!>u3u<`dirBYOugr<*-Jw;v*T_vhgF^g+THAk@K&?-ZkR-(sF!;?HpR2h zU&JF2$R1u#U|_EuO;`HfRxZn>m}z2Wc+G*c}YBEDq-S&(YtY znIIvtsb6!$9lL@d;4D2iQ?Vfsp|AJuJ@tn2^%V9ca9a*6?+hsH`9UKJJC^0}|P_R@--uAjJP5tyOghnK^<#??@>@OYI|BoL{{x1FQ@$8q>>K9E6?fro``i4|80yQUJ8r*M*N(*0eOiF<@&o8WZ>X8Ul7=S)vR(MR#D0- z>^!OD=6j?osp6A?$A=4nB*_?QE?>3WZ8^A-Q#toR4Q&4z_uu}Rox1C3GEj8us0j2T z%&K%RgX_9~i|cx`To`GGJzak4d(~lp&!-FnTPD!%XxBgSxrOY-EQg%mnW(r{Yc z#c>3xGmoa&^v8lgvb%(G%EPV}m851TK-MjnGWw8 zA+AAR%SpmlyyVTki$Zy-3H3ypbl1ZKxA_7)X#a~Js}RUXTV`N~*N-o(?NSornx;xJ z`>Xx8dk#K@`ZoSe=TDv1_DVK%f&Ih+pScLI6%^!)Gzd^u^A{d@h!T=;OJ8OifS6 zT^kZdAPk+#j7<+oCIo?@mDE0@qqJDFR|S0#66tqdbdk^C)( zIG8;?BkY;))O8!in2O_LMs#EFKVig(8s)$5$tFn77*p@2NwNDjr@I$tbs>4@A_grS zV1W7b?J^1b#}P+hEI)VRmFx;w$#;8BZvvwcV?u^}(L7TqhHq0&J?*=Ru_pV>4?%x* zCdd~tQ14?dZ|)64X&2-dvdt?*NCjAhdcvL}q!L_CbJvQ*SS+Nq{&~EYlp(Knhvz0bsB!s+SAD&<7(+2|;ycNyr`9`n)hfu+t8 zjt-;OLJmhRI~aPc?(yH}9t5*&&0CHrStg9cn#P$>*pH{91saj!wAl zXKCxn+*i?Y+fpH}5KN=?5hhH?cRO3`HCWU5(){xZyX0V1Z>Xc;U~`ai(8f}IAN<^lUQRa8afPGd z;RFe(ZMxjQHl;zE@xy8NY@|`ooDx0Xi39nB%*VvGbG~tu zMmUBH`)`N;N#`HLNZ2oX1|7ji6A2If`18LPibZ3F&KYF#9I4sgm=x;|T?-evcMBA$ zI!ga??D}9ht*64bY)zV<0XC&Q~Nh+29~B&Rvm>?U#H^r`{5ZY>HD>}Ht*yq^6Bo%v6U^_jtS7pS%y0cWR-dJyh%saPSoTy)o?B~; zU;KFm#>?$G1B&#K9gG9nFYvs=M@LH}0yB#NLQ%vM ziYop$&yQsc$?~vd7rOqN2#l}I<%^NrhhRC9SLF?b-T1&RLFyIPY+M;Dd^A*q;`l(r zoWcE*Tk?j}YI;Ip|F#?apY_dE{`Weu%Gz&v{6Nt6;2BRppb~ncQAO1^o^cDPZkMnt zUyC&B6g3T#+_s46qt9jtl2W(uS5OZ!Z}azP;@QFBWn*dRLDwW`eXJCgvWuj(j7GMFDn$hnGc1 zly^C%sx3wBjs2JU+=1J?;&$W&pFW!PS!f#yYwCzS`R#Ta(pqENPx;MLx?{cB-2L5` zdOn{Da*Ba3R5#hJrG~Mlqw7@!De=M4Mou1Em%auKTS=I5fj$_>6kysi(wDq%J-4&>p#y;$b$07k9GaVoFG82P7xSfGAeyT@;*P zFL=j{egdvf>;ylMmsNT#wt~h&_W#%dCZyFXH8z6%fA&XERvo{{)-og)qffoEFzt8X zYO3^4L(=GrKaU_@5Kb)ch)T4LuqD%t-uGN7^i}@)jFnl_Ycmg_t~T?o6rqt=z#rmj zp>jFPMaQ6_mn`&BM^#AldwvK3p7^F(kq$Y$dtdm3x0&oi(ZOuSV zA*66VXB?vs@`PMq`_<=pFc}|J$CuDjp+VDW^4k|16P4WOX!`O~j;7$E!mnCI2F~A6 zkLC`0t+kQ96)#3Dk7+a#X^wjiC?clnn@hFgUyKw60L%12K? z!-`Q<{NmZRiFs&RSNZhWSn_<=XTz(HmZLQUz4BXz|E=sH;#D8!5e6 zp)l-lG_Hg75h=NY`o4nKl(3>rW*oJuAKS!wZ0wYpCHBZni8n8wn4ToAu>RT9TLiqI zSsPePjV5CRm4(klm^*Tzc9wQ3xYo}{fIYQxSX57pRPb8k$Ll)X zzW5zwzh$cv`U^nY1aR`V;9hN*s_^+?XMI zPbmggVkoDRs*=jX*IeW2-m1#W^7OD#A7b8xpbwU`Wa0N`b~y3w?)|dkzj`fOba)CdHbhR4v?b}AZ0ywP z*$`(=cC)mG7#{n z2f{+^df_2*DNs)V{(q*0i^>aOYgpwv}AKATEvfoYcs8J4R zzKU1zVa?x}URzFLKWsBIJ z@3wCe_{=|lk6WNG)TH#sq}`H@S>TUSX9Sy+Z|165xO&UFpa0^#f=AA?8xpU>kHlPa z!R1u#5OzplZ7sO!lub0^N6;kS;E>$1##4UUOL0s_uW67JR~qyyr-13iZ)Hg3uCpl6 ze-LZhnN`1Wuhz*ST3P@WltoR}urGDx5|j~Gx@?k0{HXkjbNT)8yK8(AUr~>nhhr0d z%2mU-6eMFMCOXm`d4-8mdfD6N2ItjBQNW=7_L1JOjNbQUhLPCZsD#|thl&FW9*l6 zOV#f0Y?Rzc%L$eFx_K1hZ(r*M3L%!=h?wQfFo9?BjVc-jEG`GSx}9hya@>Y)zlB>R zE*#22JUvEiO3jhS!6E&z_`TS5#3L7tmbNWHX&s~yjoSDeb2DRQ0)LyGjB2L5roN-S z$Y`9kti;>rl-dl`P11ID8ARTGsqXx*AtT-4e!Kj#KR=Vxm>HKmGkP zST%YSr&|+5Q~UbU;-?=x)exH(WaI8mtGCcCubn@SQ5Z2`BXOme3Xi@#4qmcdUPc!;;=}!XS0?xp z5u(I9hz+P<(G+AT4J?=z|~g7mjlnw!4+pr&OPoyA!d%EQa+jV@2;%--GhIW z!<+QtUu39vqKZcb&!26#AjQ@x9+gwY?knD%O8l|2n=!F8y#Ib^%#YePm(Q%HL3#Tc z1jq`m*Tp4yYvAN3m$AEz(e2DO(R^{KAN%qnctS^%0&f;lL!m;VIv%0-+AuOq*k~GUP2~GaVm$$ISDhivsnc*NSCy#=|3VIjq~??QQ?wsq12W?ERIG zr<=O2wObzELOE%8dXv7~&1=lU#d=jH{?VgyFQdfR+#_A8!eVF7#$a7e-*mTcu4|?F zm*~&l@UPerX5QVKU9;%4un(>rFrq2nF}oNcRgH~UM`D_|Ee(E?yDFF2&~h85>C1p> zk;qP%V2aHXe~q?mYCj>@EiM%L1;=v(uedVlh&~YLMDe3r-WzB}tX~NV_UDrRjfvCc zeIkThRrqW(UWF5BTHQG{)*r-+m&-(MJ#tb+1TRH(B=3GuwJ#tip4TCWkR6M#GdFYU zIo{y|sXL^IhKOYi|I@ z@J-xXaPSXSe3_&>vfL@dJ#8yt8vVhobDPSP)Z}EMjufjdt1EJDcH`44p6cXaPo@uo z`(>rf&YqA8Z$7dImyV0-+wj~MpXTrs+2hNUxsAG_RdaVST-n7_U6(f-<;P z!_)m0RJc(UP(2B3Ks=v*=5Jbht?apU=46DAHWoe(*ajA|> zyFVvf*|`&*QN{1^y1C&!{`=-)shbM!k-#ccQ2)46+kfCItr%7Q&{nOg*=Kl1uYrx5 zcr&E>M*UMstf-@#`Yroxi4DgqMhC5KG}hnR=02Og8+hnq?%Lcw?)Aj&ySmWJ-A_+< zpYFO0oUE`LbS*E7m>*70nL~%_e#D%{pC)ea-P(wLW>+@s!$ikO)4ToB6_UnQaHv=^qxWWEF`QamRh{& zD@zgGo{Vsh>sw|QGgtUls5sTRRG;Q4KO6Ig&uK~H;N17`h5DDS*BVwMiGHKD3tPwC zI(7#psSJTgg|DKr-;}M)to+(*$bJFu59A0+#AosMqC~WPuDYa-Lua4sH9zQ@Egf-r z``d$(p6UCk;+EQdPt6x+8vzEziFax&NMGwLfAZ6M>(|hfbnrz6XAgC(-(}t7)#c^! zTCCXHtSPpl@w!AG+lQM+i5+LUND*~4H;lw3I@u!7Osru3Hb?^0M0XK+D%++VmhNDG za5ff8?SdC6es;2JwO?6^`kb9Q<4ff>lM!!M!;hTuI?GnLw*Q-_>=KyOfbd)3%#!mm z7`D!G&HfO062X;HW2cjMyj5S{=Vs$)fS+>mI`s&qu%S7LwaTEy#>~we*ospg6*|$D zojRg|J63yqAdV7MB?uq&YpgB%{!X;fJojlTdq8`x#>>bNNUXNu`LvL{4(%_J4TrOZ zW#4w3EODnZ3iy0KYv0WJP3ZjJw$5B!H8rQ~J~vnbwZrM_C!@RD3Vcx&1*eZO2GZQ) z+$RJdPlM1Er;UA?Why+QnNV~LkcMSxLE@ANdC=GgO`jOvziZr@A<-xj(iKTm$P z5}G^>C&?A1p1LeNIh&y-yXNa(_`u7V07P^>s$zIcKgA!I?va9E_3LIV4GJL#a-c=|UeQ2}Z4y7fIR?D%3Z+w}hKQgSL2BbW!*) z`GJgK)B0Wd=d@kR*c!536q#A#lP}Fie9f31p7m5NvC=gwMI41H?oC;H#Wo?-r;Oi) z@<|OAoFsf~KI#yg)qgH%X8JuWzp03b-Si+hpGRQD3B;muyu6=nrx6x`c;>;n54f6- zelmL9hG)Z>rkde5_G1a#YdQK;6LD9b%1>>Lmgt-p9sb&vvDrsi$fXD;O%Z7vGIk;Q?$wH7_VIxtO)KRci&JlBT?j8XCL9v-=b4m8 zhYHBIj_x91mU9J6s|xRnZ8XEKN;Hx$_E44EL5ZdIBR$)eWG#4D8+ zwhtrU^vQ!O3nn3!wR`)q61#pzuX^0h-`WS$k?oYnW$s4OE?-GtHqi~gpYoY~j0oG# z{qDT8w;f(}5O@TeY`Fb1SjuV%<^8HHyLW74sec*UE0%zsvf0lu)7E`I-?&pN!#6XK z^|$`85qckvVm5!;102h<7HW1P58i9{z1#?d9Ez|QfisTC&>y=bPOERDkaQWZ%)mTAHl%uq9 zwCzJZKDK(uG8i0{1MYt#>1l+^tADQd2#{7!Wj`qYA`QDC5(hPBp{?8V;oDFz-!j6Z zTomFFRg(_S?q_5)nMzOR6Jb=Ks#`Xnn^1*HY2QzedE`hacf3edbwvBt@^f6p;e6F0 z>kD&~3Nk6DFwB!&ttFU{kA4M$^!$Rr=-PQtR-5|WsQ zB*>J9oR9EcotWVY6=CftM6c;aF+e7~ymK^G+ey<3f7?E^{u&ALu8I2i_@Tc@z#ZIPrF2DAx z3kX5Q67jfrF{CGRE+};>YD9Bciq9W^9YZf8327@esT<@5rl*&DbJXP-? zd?wU!^7>+1Oz283)k+O2SJgLvyk7eaJM0NC)1tJ$COolU@~e;Kv^6h%`S%FKfFh%2cAaH7FRF*(8-~i zc0RvtD*R_V%nth^OcJL3MT9i`?mZ~yH1eju+_S-+IB(M>%V6ENIakqI-AQr4w`LQ% z@oMMbTDwB|a;H^0(@sMpIwLhaKSa&Uq@=I;eiIUxo7z%dw|>V2n}u?-_vbt~f~ukx z7)Q}7(l`e_DYZl;RkTYpzB3$45B&I-iJ;zzHQiI1G}6DoAGHWNuO54{pLZio2QOu% zVoUO<1}CE(Zh0x`a9g@1`QKfJS`4@Nuh0HGE)l9ykK8Tnr{RB85D<}jGJ3S!%;zOw za^N3#Pk_H-Qits|y9W_gx)xg-7y(a=19U7op6GkIV$U$OvD}~mp1tSKFsN+g7vdM4?P-!oMTaqR|p-s76DyIXFN_c{V=V)E=jkK9k>gWX>~6t&c#F^yM+Iyj%1S*jTOh&LfR8M+y170uk^U6lTAo`=BomSpfmwV>jRz3=^&CqTE zcyhW~s1UdVEDlVn5yjC^317)fEdn35&_7c=-?!oVdBdWn%LrCA|6qs8i5WxBU3D_0 zH8yS%6?MTumC+dsH#Efjc$;$KT$;(l&V+d(D1DTnKA<|66J=V6z^``wyleH=rL)qb z{&sbXXQ6%QRC!g7@){D?$niA|bX!G-{-{m}ryDH0-^p4##mJW3ys8dm9B#BD`{foZ z??Fm6rCTIHzvr(NjpOKK(kw6yGPCLPLB3M=4* z3STK&)nESI^Hj6V&u3$#pV^tJ$T-V8NSbwIIp{h7iP@l*-PR0QRQ;TUbLSDV*B&qS zBVh?ysgvO_jWgC+l89B@&yeGtncOuLlaG|AX}0g+HCAa%LzNhdXhFE?@NkQHj0Qrj ztsfID3XMgRUrk9xX{B>F43pIT4p-``5T0yJc9DRB^EY2OiVJ%3i?!RPPk@3)F#HLz z@=3;+0sQGR0oB>)^zvJ*bhGnr-^m}8UZ=nUr`~e(MM$<*rahVv6rT zKP!AnP36+Cx~9P6oV{YJOVV_82?7k)5f}c$_^vJBurC_+xXaWeE#Ck5o9!k~_it#t zJtv2-LDLHq`SHktoIHQhoc13H4gq8>FK`e-D6B?we$Lk&&tXp8lt9w#n$lt`f)KZe z-NEfe^{KPWo7?CQBjyQ9X0GQiXFXlEshW1?o|%7YrJa=LPJ!X@alPOhZQ`L@U~pVx z50%^0yOWb>W}7(}Bzh3_5)*W(ZgBK~vOv)L9jWOkE{`hr{PQwxzxbNk-}c9bEwVx3 z=IE7=g;VBDBhrOQGC z6Y*;%(kq|MM}oXMD*R%z8m^?ru+6m1?6R`;nU^K*d1vViCq!x;%p#2qgsrv>ciU{n z;%L-B=0mRLO(5zvm-r^XY-!2`aiUj}?Erb{n0%eyY&?(pX&b{gBhR;G?a~&098-lr zbWLjNJsdUTJRSG$CXVaGLD8q+9zVr$I0E00aDM)~=jWSWpONn+%s!NN1XF9sc}!s9 zb{sNUi5cRXmB!o2vz*6@Q1lF*EjPZ(>#RTn(V>JfqsBSf8ym;psfr2}TB6EO%z5bf zb~CH_7JQ;AO55<8ym)W}`DO)li6{SKb?%Iv0ck$><0{$;=YBJD+eaAa;8)6bPC6}a zbNc}k7cDEl^xTY=mVgAZ4|WlC`l*l##Ge9JqQp>F^Yr4H9-AL=dls|fTU#=ExqKA; z(ZQ!BBkW-(s1U7jowT^9Lf7%QLXck@k!HZ{Zzw?i{L@{1Lp$m*P3E`w9D6gJ8ntxQ z0inv$X4)zdyROHpNAj=S-g>NK+~nyTa}5+$26{4Ae{#T)sYtl$5RN40rdIh>)-;N@ zJaJJogQsm5Uz+=^#7ZZ1#9WNLhoQ6aVRw2$9JEG{zM}h7=_*a6c}7hoF9byBcf-RZ zsDAUvroM|B=NZf$rOM$y{d!rfmqCv`@nNIOEQLqfNa~G^vYCmEva%1aKSAChxbVXF zU#U>0g_%C7xXJi8IWgXE3~S5ryQ?|$4a>*B<8%~oBAkdW$(=+mF%>^@&pL9-Sff*= z9)wIqEKQx^+krIJVtOj)gWBQ2*+w?U!L#8-yGGc?Ca_imR)znlf=v=psS^OOk3s)>@G6HCD(_v7N!y(y>G_turN}xK z6C@rpX%RQ57|iFtPZV|yU6a z+xq?s-^r2ExM^`5aZt~09X#*4`*XU6Z8Y!H3*9z2$ECP#;O1R0D#k;H7k$u$B*b$e z+6h|)dcogy)nbiCqbO)7Ba7%dWHh%!QgJOeH~E@#)f1==Jau}OcKoh)h@ltH46B`Z zCbj&buV1fv-dS(!UAFa^Lztopv8^e~jMv!(k$!5vs+ShS;?mB^^Za{&(R-UR zs%IbiHLyp(^?V=4@?T&I9qsrC;}OS#WZKc*yiqjr?h28_^6yHdS|zAs4aUyp3g#M zTl=xV=*1}$8duSjz3mu)LnW>|{eC@io?pr0Judcdd#oMC{Q||3f zJ(*_#{m^tW!s7RB*{UY*3J@#lEnW>)Z{fSMj@agG*!^d~F<6_;;5}dI_d9BAbuB=^ zt~+=+d?iHrKz=l{swC5MEeGIW=6$U~yLEvVH#WQXz=u65q~Tf+Z>qn7yi)&V8sfj8 z1HdKp$!s))98iNgVx}8ihHI5%6YOuh|8hTl@#i5FU>yq-F-`OVlb$@Y$9CX0-|`8Q zzuzP9nv~UQFxaS5w;Qq0F#NBj*6~3Q@26clbo84Vt)72Fnges21-wKFe_`PaE(78FiiY&tML=x0BvK?Fhc?84E zxnn50Fm3g{VQSih`Rqte3H)DOfYB(pR+^;9`78AHbQ`?k0O-L-9abu>ZU%N-MDfr; zLgd%orq399!am%D8Tr2KN;DBWwN}`x7ynT^x~uAj=nv-$Zql8(cG=-ztZ8(U zvfOdP4Lzl;j39o!47MQxz^v`9@4optnA5L@x)Wv3e`?rRSa?MsAOZCOSj~jY(l9kHChU%Dt22fsh3WRDfSfcV zci1<7sP_94l!w+|4qVUYHy|Hd8MI`$qoakhq`kZvz6n>C!3C`jQWBe40Dtyj9UtIF zNxD*RdLvv^c9R}HqSQ8DP9UFZXEed4!7#4>hH>K;Em_xkMP4J}gleZyHow_jgFoTp z0O#VQLyBdTkXl79M!qQpjDT@|sgDOiq!eID-IyX@Ybe$ESlP(9!}-OEG(0B)ZT3K> zZS~LsutCmFBuJ6DJJ#Jm+jQd-YA!O;9Psg=0L@8-$^1j;BG=&3-_!eaug{rMm8e>J zH|;QrCSm-?%dg((PhX-JnyP84-RwB{X`$5A6E}Gr-C6d=4pTz@AZ@pZGc4RKQ#JDD+WrXP41A-oKXcvRK831+Vcv|{~702 z=6~2&$kRy)?k^^URX#?;xA?dKjeU%PKwlyhHiNgu z{1v=4Vb`d?Ug~UyRN$FUJ&o3V=N}7Xhvjd3bmr1iYXBXu3}{(2hKqHta$lpwOlh5j z6=P`*rhg2tbp{1$xScW*d}%dZtYap_i7wKHjnU z4onoqNPlLFL?n6a@+nN%AXp`bRPJ!>ldm!zUHVx(*U9tB9i5Z2B$?M(V8LvcYMVi3JhK>RY8G zPd%TlNE*>2>cxZ@5(o)(Iuv<1uu^G?EXtJPD(w!ODiGeS7$ozSOTJ^on8wOT4WIOb zRmhVCEnw3rV024&9KZj;KbQJRdK9TB^knJxG85Me80~Mjs21=+4`I9gf8WQI z#62#ATB5Y($A|_erpO0J=E%pU#z;o;rUI~hu5Wxy#=s>%UcG{T5dDWd1q{N_;(-Pg+L(qa%4u7y#iowD)Y~Yp zub3SiwdNQdJ@&}2ytd+apRYC5h?o85!H0Jm6-D;%@aXBMOjv|*uU9?09k99uOOxcx z+#8cwD#OXY3myowIEk&EPn{(RyDB&PLd$tGVk`K}qRaWrpk@3Soxa+Ym5iE|{4Ap> zwxRX{Q?yfV75TnH6USE^2IdTtLc$D_eXuVu7Z^4y$4NpTXjOVE$H$;qLc-wP<)^7u ze}6p&|5v(ck}bWz0s>OL;p19=#ru}6a5oq!cQ-gs88^H@5ihtvG5o7w5+#w7?o}em zg7_VTSaxO_Q}M1Mtl@&%7D?;zvzoIqyPEUFM8Vrm!CWRlomH@Z>JDUnh&{47HD56D zgH*NJo|OO3KfzyKv(TV+QlL-7zokaa=)Q2*`jH6J#KGn+x6(J zqb!!VvKJ;qoHroPOi)1~9P!j~#oku0Rw^uUb&O>1XQ1kE{7S=VtdTVPIM8zc}&cuB{2UoITSSA^fRzlII&D4tR6N$yt~msjwl?w!)|fVFtGh)`oi z8gFt0F63oO5o3V#utB1bNzcVFJ)M3^>@{sx^T8biZCL^ObR@TvaPH7hY5~9G0TDfD zz3_b`Q-Ymrm+wyIjaIE5CCUq;tahF|2xj5_H@y3yL+AFMxtsI(-OMnrV%g1k{ch>t3z`mq8I2QA&t3pu)}FNLxO zy>BV=BPpNMk z1pP@0){3Rmec5pf^61eUvU?N_j%vpNq&y@zb}P}sflceT;w>BMs^0E>LI2UYr34ld z+|)(=xb61@NgPk>%!gabICTatyRoY(+f}YIGwa_lbsjZ4se4H`*9%4^Hwt?H(tGjs z`OPdp&E-I{i@jeJ=dPo1p)3|RArLBdl0Sp4GvXGVboLLQc`@OeQ27mgDc5W-a9=r% z#i<;QRb;}{!+Dd*WMb>>uv=!nlCk71>u>P2X+l!MVB%L+xbIbeLTB0mZUO|-gdO`g zM{-o;gI>6MH3g1K#aPktx}E(&D6y%P}Y^?#SO?uT=w7+mbycS+3qa$mNYi3 zdI>Bs2Qlrk%LbNkn~U(w-eVgs0cr?jUgoENYY-i!(8k!wva%0nM4xrj?cSpimQ|4t zCavmb$yg8w;gDI`we#AhsT16B6z9D$j{dyO_U6~K<%K?414DfWLJ433hmX7dY<}aq zdOyA#E`=S)hojz#1D%gH4Wku3L>Xt~hZ@()X0J34bHCJPDA0XKMLavd!}4nfE6YZv`f$F+6v_+KtBkZ- zEg^D>J3Hr#_n{H+^;wH*Php#Mq5Jp!Z+4)I`}W>~_SLkJ3f$;+uP8pLv9H(LEsgG% zI1q@~t3=e*ROA6IEmdMg``6+#r9JB8Q|k=~AcxU2W#l4VYuCauIl6oTda-lr9lLAa`!X?tn?#j%zL4M`c8LaXWBM z9QER{brEtrvbhQQ2zjsb?CohMdD~pouQ9O_73^O0@ygQgS}l$_sFM}*JD}*TM_s1h zu=u&?e24y|7d;6s^zur#liNMPYTz+3quj!8lv$KqbbOMdp9PvKd&3eAMTNX5b zumgcP&GQn!VhC#x*^3pu9oA~)FUp|Nim9T*X8!Xp6F*<}6sHg?>#_h5ddQDpRa4nJ+0_HE@H z8_wyyHt{=PQXpZG{eREWry3SQV|pKP34Fop_^z}r%>u>ic4wTg*F0cUQlyOg7KWonMs(fQ9wZ zFovB{vE+??th6dd&%w@2_i{aFvX=H-1 z@J}x0KlsQ?!R&Q+a!+X^4+E<}*+lmkZ+*+(uGSP$`Ef9tyQ1&~#;$np`ON>E zBs&*pReSds_~qqJBs)udZ#Vx7UI(#%t}_D&epnG!@mz&6Wgj*5=nU6XeWZ;igYs?p9TuK2Hi zI`|gjV*H-pGywt|W=f(BCSuArfgCjZiaXgv)Rn4rtLg9IULW22pTJptG?L%$^|1TE z=1`C?-HRv_QIyxVmq6vsRi5@r5m^|bU}<}El{oI^>Xa#t{e+GYU-AYX*b|@09!K3<>kNcFbrAD) zY_I<}pf}|mU2i)l4EZ_1z=6T$9hkKHZj4y+WX{L_!ZZSLP{I%8ZU}Uh~wZ8^bR8=;U47|Czk#SBoT2{$8 zh$*`d$7f;?8l)h&-CJKdlmr&~M_PC-&rbsvtX01@WMm=&aRv$@*DRE3B|oS94lt*h zG>qU7Dj@hz`dYt({1X4le~lIWj?A;55E#>>X{;$Lf9^ku4VvSClQQu<$|h51hlmu%=UvbvUoliSO^20QkyD3d8Vm4 zbAPSeISb=JZlGo+2AF4>{z@zl{4L`IwW!}k)7akbRmlx5$lkID&4Pgr=I@}w;Pkea znu&f+1|#39|N9mfHAA^i%!jqw9=oVLfKlJ!i{Y0#Mh9AO6spL$*m;oV#Xo@G5`4lJ6FVCZ<3-N|qD=9gz^SQvojs}}z#x=YMY z8~S>1|I^)7AMq6Ipo61d^5C&w*mW_sz8#_XbYk#~copm47W7|RxL_JGR=nox-1xxW z2$@GFPwacmPmjG33?jpfKc$CR*MXmtujI1Dzedc~;yEOju7{n=7%_L1{+f^1HBR#a zM?Bi&lxolw$Z8spa^b_jbX74RbLdgX1`k<>k?>aoaM2(}*dOF;Z zzOvs{sCROiJck>%^cJyY{`g*b9(uuKVz(AX%zGfqMUb%ZTF03X~r=%uRJl75l zSJ1w!x%N#n))+am{ENN}Vn8>0Uk@7M6}5+K5WRdU7aJldO22`ZuTU@SaC4W(ec&6~ zHLXC6#3Mi{H^N6kcg@T3mx!`3q=_MqQUebmlZXr)?EZND*KCC%X3nVb;Bl3aI84bZ zxkk|YmN^~dt&nxw@J+BE)aWNr?20cg-p=m#k@%9r#>}XA?g@c>?@4%GF04l6N3yHH zEzv={t{aYry9x~QdqKAOyvT&Q%KWVhRd>Cp8N!j-ksMZ>~Q_Yw$f4$ONfegLb1c>-yM#?>#Yn% zK0L&S%;Tcqypo-7c4szh3Wf78t2QE2ONS2w*iCsXpJa6E_GO=+e{`9N{|5ima!$q#N9rb^!#WmwY~gb|3ze$ZrxHojNcgg zf-E!*(IWYCJ9~WlvaWVmFn%C2?8Qj7YZqazZ}My(?kCGHM=I)->EY0*KRi&a*CFz_ z;aU=+WVtc2!?w`MH&uK1&(w zu8wT=OqSQ>tVDa{ZhjQ<%ADNlTR~O>abP);BiY}fF|-5M7bo>GWpYy1Q9`5oi{y>2 zRS_mV5wQiOHrBY6^@7V(lXiR+7I@t|2Kju@6p`yJSCddpaS9P7{2=+FYR-Bu%4I1Q zWKEbBhr^Y~wc}B}ga$Obw?NL5igh?t&*-3oe0Yv!1_|y}>sNm#k;Z6nV0v-s-LpTJ zwrag)8(cP~LXcO_I6P&&qrriXuC1MWZtDLjSX6vF7rk;wx=#ud!@; z7DGgo`{{hW{w$8T_3=o@_Xn_{j|=EZhSi*WSk_3;pPS`1f4)+s>_&IIORm{Wn__y%JD6ZBK&X4!!u8NSjAqv^8ETe zMd-CC-==(dW3l>WzDfAmRL+bYU2y4~wPVsHyKh(xAFPAP51Dnvs+bWVl`FTmj^6eo zfxgoy_sDU}@+fx8l595>t<+fLftomEU!4a8%6A!o-D4fsvYSIq8>S&Ukk_^Lc^e^QeScykEYrlOm!NzrDQ07`{!N@@#l_$as3W%oM(8+}&h zm1Q5Wk6o8v08JYD>y$Gu8<#r4NWtQmFj(~L8in^)m%Hyb58yv)_cj8VY)W`EYV^i51zQ&)$8-t zPGe+4n&!!l0@GmkeMW;b_1}1d>prlG$rbC(^2+J{3}UlPHno{*FsJl9hju zMMgh==FJQ~7kN~{;Leib;1^vt?hXOTFv+l09RDmTmt~F*ja+cHYhqS6D6VJ!u_k+~Ey}bu*GmZSU21kkCUkm=-&^`TPADKrvoY6}pu<(JK&qjX} zl4bwIK-9{HJ6?jBZ%yt_=rE1_a!_IB@`lS(Hd|`r`m@z_Zy(RSacg_ZD3}uRYFx`) z&aEdbWav_x*oWyBRpdOArt>;?G!-&S_GD?~sXH26qfgw|qEF%$BNmJK`9?HxQ;E>2 zb(E}aOONR3(JYc&JQ+9hWpqv6>(`teR8c7rCb{BMXBEiP+IZBOOSM@U96H+3(K^PT zy_>xk{<$h~C@?qUuhX;#jrGom=F^qR-@o%(OH35`yaG*>`n(yiX8uUFj*KvSKP}7^ zD(JDO9LZobN-iZSE$6c2L569NaKmZ~YI>72HgZ*t*Yo;j-ww|@@-6qJ6MN<5v~8tD z?mjfXFT!{+nJ96&^wnMWq_Ez4OaJ7aq2P8D$I`1M=xO`H#MDI`zemo?M5$N3l_hPHTeMRV7;wG*j!;WQig&iLHbd zEcq^HGV`!zF_~C(nd^HQyH);F-DrSM6C;f%q+1Iw8+J2)CkS&C<1(W9JRMCwT!G-*%Fpg4 z60;M&&_a@CxlANwu$QK0{DHwbAGMBA6Gi=`r4!Tk_C6P%5%NjpKTOLgmE6kX8e@*# z%H|p}E7ofqGh-%zIz{1FlPY?yD%Pwu+GTCLnYH#cjye0WzlOcTG!{@dhDg^BoK>JL zSn{gV@EOm+VZm+blGfh>&e=(KbTnJ3VM^1;#fMuL>X2QbIPvwn$yROKEb^}W#yi|G zsj~%(ud$a7Wa0>jT3PZCdcwJWRCWH%GDB`-L9e~_o4!fMCZC6rhTSJEBa7Ut$f+` zExwvqc3Gxo)mLPRdj^-D*uivkH@1d2j`ei`ypO&0K6TCt8UL7NBZ+j^!W))GxvvpS z&go}sy^Iv1rBRN)zX&Z7LgPHd1*M}5+3Ei2Xm6>N>0kPz4F)RC_z~Z|lLSSJDB}o+wY%<+bM4=1N43U@obFHJPT)?KqA6hw zaRK!^CMwQoR~H&nDJB5>|_j-_~IrmXg#*&dv*sQ*x^luaVe?F2dNw3zr(h zaKtwPI{vdSVDH?We|u?}{qfsU*Q$m>q;AzBe&wirbmvbf#nG=oX=FSumVuAAXVm8l z*i%N)cH`W$0plF9{?&c#DBu>)WBlft-4a$LaV!9u7(Gz87hI_3*C?uSvbNP zP>-rxrY_#cFwQYkra3?2hoQ749*(QcaOz?}J@rlq<#4CI5w~<7X&E#-K&8 z$Oof%#D-%zDtmHeM}dC*ulBNVTUArRbR-7e$QMD?=jTYxPs2d&EMR#=n({`E+ z9&ZDLOX*?=vqt>bpyX>|I+Aq^p|(maB%-JHl0|&(|WQd*C1{(m>S>8 z_?8pg=$_Lr+){Rnh6XGJ_jXe?dPT}*6sLJ^bOl$}97GJ5(3_Ldp2#>}9o`Jg9T9a6 z*Ty*O_w$%HI;kDbG%pS zdWqa#P7dja_&V_1vc^|(@_bpNR>_Eadwnq__mzL3=tOGwknsCv7`UN=^VvfSNjG(v zXYeG{sq@e^LOu;jebf>6$A*RMQzgGnN95=B%lK~MxQRa=`tc$r4=9+XA94&mUgcvM zJnm&n_7WIZbJgWVQ%C6wOQwld9wj?xb$P%&NZ+l$@~ORuX#e7cKo^_k@NCjlGFa!; zyo$YxDM_)*!A<@ok#`d%?s8TJp`$Rpg!YPC?F$L+ATN7U&uhbQAU zN8o)lr_AB9eg!X)-DNCq6HmIGV}vjM-XgB>Mpqe1!(-_zo_g$Lrbs5y3Yyl;OG-kV znaLt>RcrdX0?+Ql+wN@Ib;q%(HVrMQ-6hHD`oLKUa9mFKu`32b2k@4kZ|d%fDKtzk znVDI;Q*12JuLpfd|6^P@+4iT!h%=J0D|H}Z04*M0dfJPf^5zK4CW#uJtj`T31y1Kz zVBg&!mgDA`CIf1D2FpxQHOzM?D`PP%I@em5<~)#bVcQGn|M6%heta8lYH{cyYPF8u zootGyrA?#yeg@llF+{#}cJPaK`AVKP{m_Fnz=mXFy@+M3Dx|#fsGpqZ!Ot(puJ+3} z+%P)>CNMrp-q3Bq9h?^RuXo}!rX1=d5ynz~)7TU$moU5s8L~z*ix46^63* zYZm;QcAE;01$BEia&DFUa<{J)z61pv4i%Pl{ya=(j=C|`H3>tnLP~N7$L!tl-bdEUjq&aeVLxXW%7^4$l+O$IQ>h z-2CQ>_~U=iI#Pn|9lNt(J*))u1PuyBcBT|l`E`;3(ozUZv ztULZ&oagWX-S3*VJ?N|YTv6^^x|I?a^Qi<2(qV>{ z&U#E|CY6DBh3Bzx-G0?ZU&VS@O!RI<=WD{0M{}ReVa%If5l^P5o_~MJ@L2lux}yG1 zy*XhWo=h?Ur?fw~h+X$%=2FF=zN)wT5%s87el_+rX(n<=$=iB8p7OyHc69WHuu7 zf4V(W_~6GXV(hu|H90EqS9gFPtD$+mhPI32q?W6$j&n^Pr;ojFaZctNoqKJPy6vo< zRFo+?gGeKHI1Z7_sJ+x&)aS7yW_Q6Fg2LR*{3q&@lTGsYh8-9!gnea#MJTmzq3DBJI@sEG}N3Ba+iJ85OArQWZD(_s>x=;RVQinZN)D@kAnp3TA)E(w zm=^htywWUWn1k5uV6?!mH{rSGZRN}6Hy;U~Cv+2&QfC4%BM0mvHF}f9_BBo+$WBM6 z?+Fo>0y-T@Nrqo70#{6Ot+wS~Ou&&Y(BVMCPHeAc@j- zz31D0XWkdO*Sx0wVdY--o(x^semu`#lrTqO&o8iqFqd{5K8mmR(%gSEgL<`@xJ~V; zX}ZqIo4^j8oXoybtLe))iT(Eeoa)ft$Nwksa0!ui;=_B((q}u8X2`&OvusTbdu?rI zLhh|{nmt3Q)OP}Ltot-N#v@q%neoTTu7uQu4N*LDl{HIu+`mrgJIS92MSB176)}m; zk&x;=C&cMfo$E^anZ#yEUbWLRgo7W)6g5|C6kau17SZ^^H%vD|Own}_t2%RO@~7&& zH=~|>8+5eksr2v6n&VJHH+rym_nE%umyXb$XV$i5BiUy%JW{)7P&(@CdOAL`uJ1ln zi=y82W=R_w%dbU0JG`hWwBjH3nVAXXQmN7r6&##awSFyD8lar^doaV}+TMLg2{&m_ z^la!tkhM8Ls(y}B*rJMg_3QTsL!W{W;pFXHsv5-Hm}|+Z7WnUH;o7D8w8ylE1(Ytg|4{d&s~T zsu?Tq__cRjiHF&|-oY_&Qa#t`T@3Q8bN;pIbf_IR{<^d;>`%TGR1u9GJkv$YabltZ z-7xc+a=_E%;o#yd5;jzNwaOrAp0nCEGp|k&TLD5jg-Dq|13UTg2jJn!VpY%ngPpai zyJgEKyx3<>qX79rC*@S;g5VgJfVLyeY(#9~Xm6DQM`!ZePV>7^E;4co<+r8B%&492 zG5zly%wuyKQFcSfA?bG-1x%QE_;j)!&(SxB-PV@5wV>Z@G(KfyKnO;^HyfGPo`=%u zOPMa{)TIJ{CbmyC5Cp*!mXV`xqr4*s!bHC(j$Unh?Y4`xke9r`8< zrPm{(VMnotkyc%sOz1#&9_qWvju+~t7a(A%tLmCXH4}TSSEfnB_oh*EQa!fptbM7_ zvVB4EO{`VItxav_*`IB{GS@fHE|-_G=$3odju!4;y}mii#^kwaN6*&Cb1mx8;9=a! zUpB|&>0#)*FPwW)WW}^7ckS?A;YSKV;Rugj@-;Ih9sgT$Ph@Kf1#lmDKqnbw=UfMM z{uhC&2a)knrAsiAsX^bq#GKkEWoTNK)6$Esa^yGm=Zvd*HCbHA0SvW_sB#8=*}o#k zb{&=*%$7%;Rk_yq|tyz-vg z)VLib$QC_Qobs`Njby>A&!bn|@rqp2x!X8&Iw=}g$2>NRA0Oqjdv9W@@?!!BXUA*IC?M4bP7VWyg zSW{*L@{+rg5lsBz6Yth)U6?BmHwcpX@hJ)}MTA#`Y(>)9C(^cLXsNB-4$7ozj%GQy zhHFkdhe}$GSVcvoXJVhLyS|cA0kEVMhKPk%_6M^+tBZa3{_gHMHpWo z%ImQrX>(#x)j^KWZS3;Ia(s$TynV)$9+O&Is!SXYmTJ#m!;90d-A_R@;KuC6zXxeV zLPT|_6OQkDdtVsau@Zk^InpMag~eRdah~BQksYB%g zOYFGZ#4RSwl`Kfk{R9wCLI<9Ut6qQDWgx8ccUls<7WdS$CY?=inF`LWeI>R!`{gcd zZu?~T`k>!|#C--ZVL5$B6%Y12St-Ry>Dj=}Omo@V+2TbdOwj9|d10-50si0KH$_LV zAgqSw!K9~swT~P+ZGQeLl9iroDeTa2HBHM+HP+LOVX1r|}h4 zb(GBq`X{fBRA?uD6ii>h+h2Xpy@Hl0&8Y2WjyCiXuvLABevM0ESLL_!R!V%kEVp0e zI#uDD?K^j!TH)xYLx@YM|a;wuK~hdWRB zPvEh)%aIomd~GF__S=QAn*%;3pFc`Xkc=#yI?vD`4`usfbxtR%StpdFN(nn)45If+ zehsurat@73#-MD&6m3f3bJbfgQ!A+tH>_>W9--jmbg)Jza{L#*#cr#j;(|pk?ik72 zYlj+gJ}c`jtsK^-C6=4&-ls5W`Nn?5Az}08+QfF$rqBCGuSN)}saMKsX{(C!Ft<#! zi65GBxLy%?bgxi>2FamINB6J*P@tL-=&YJyLCulTbeP|^UAC${LhOgq)h@GRfl0lh zpo_bM;@w~&UuSN;`k(c38_f~rqT=NbEJYvgij_#CFNRaO3sg*{ej%fGpO8{MK+#hrVb_N$MqfXCYis?(VT~z3iSDAzJ?Hp;vzl#7q z2!aZjndqk{!0DAx!)+%A=r)`T0f#Wh!2~FN1z)>!K;@NT+2~r;*H+lw>aA{Vc z^N4srjc=xJhWgERf^t9kk{>3o%*7fs{505mldBM{EEs~U#MxXTOMph8P^HV)9+fy~BV5fQ&}scwIfZdUZpCS}wVG7mx1_(hh*AuO(BsmnDNWV`>>q~x zCNouW`(}L)6gSGYMdht-2RaHn1O*(^7>EhK zmvm%ZGYY`4i%(J|NhTsZVj)j~@NvR_1uN<>@_5778K>fpBoy%wf3hUaxzaiT;sz2V zX5xEa0O(OrPH=qW-(>#5Sk`i*tI>A!>YXI;p&{zAjuLkw3t_LZ0a~=@`cDj@%WzUd z+oCcV(JVU*&pSf30D}W=#M17s| zd`$G^81Qy43rl2gN3oAi*-vlyX>==~&UbOiJ_816je`tj2yZ_7`TLyFAqazwU(`>d z*zXd8DbyiD>BH;KkV&52gZ;l_H$7HA$AEN&CmHbb!Ft5Hv^ z{;z5OLFQs|CD9m&de-s6kVN1&CWXc@*{v%H95|W&r-l@Fw8>Kxbk=zq;M%_r1M;{G zx> zAD!Qx*#JMDF|x^-!8e!kdzMHy93KAtSz&T!fNg)7Uy%m;dP2(t27V_B_Z7F<5LNAf z=l)sKQ9!*(4>}oo!+3uBejM7MyHMuw z|7R5lf#iw*E2|(2E9;FOY%^hJ$NPE$@Zetm51{1y$5v!YgA{WLZ~ zmKEDbF9r`M76Ba*7J-=ClL+L>_;{J&9MSi@WZBm3WzbyTdHaFo?X zMx)P$i+c!56#)18>|2Fif2%uB3g7|@?j1@qIUK91YV+-Z#~<-BKo5I;0t4mXXX=BF z;3dRQ-aGttdU*5UuEH&4Dsb(nU5Q`Rqs3&Z#moa8sow%8ResdmJd_*{jDQ~Bh5lyN z4ISzAaF&Xh#?l5Gk!Iin$KS@^k89YtlF+!KknN%t3~pNg2%h+7_b*U1C){bk$vY33 zCC?@Dtp80MlBttQqhN3llBhuG8|oLiNy7HnF(TbSaSK27Tm(5pU6GfZict>1fa!&xmWr^BkOPcyK|6!|99vvS7NY z4<=i>2MV27XJqDIOklWgd&I2o6u6w*gC*Ym$O|Ojk+8=TJ&UGvh@!zUAMY8E~}2z>ng84&82p1)`g~@vDH*4R5M zqJ3~<0dTwtj;$OEKX(mpDbqoR2H;PO#g|ttG>-a7dSKPy%395$hjzk{#COtw$1wkP zkHEI)TL@ccXb{)K2c$+X7=_lqhO|bf{OSj1$Rc}shqHKOMJ6=oW9w;HrKmC8qll>F` diff --git a/unsloth/kernels/fast_lora.py b/unsloth/kernels/fast_lora.py index 26fecf8b59..f8f5967545 100644 --- a/unsloth/kernels/fast_lora.py +++ b/unsloth/kernels/fast_lora.py @@ -75,12 +75,12 @@ class LoRA_MLP(torch.autograd.Function): i = h @ W ### Backpropagation chain rule + See our blog post for more details + df = sigmoid(e) * (1 - f) + f dC/dW = h.T @ dY dC/dU = X.T @ (D @ W.T * f) dC/dG = X.T @ (D @ W.T * df * g) - dC/dX = (D @ W.T * f) @ U.T - + (D @ W.T * df * g) @ G.T ### Down projection LoRA weights dC/dAw = dC/dW @ B.T @@ -95,6 +95,8 @@ class LoRA_MLP(torch.autograd.Function): ### Gate projection LoRA weights dC/dAg = X.T @ (D @ W.T * df * g) @ B.T dC/dBg = A.T @ X.T @ (D @ W.T * df * g) + + Don't forget to see our blog post for more details! """ @staticmethod @torch.cuda.amp.custom_fwd @@ -141,13 +143,7 @@ def backward(ctx, dY : torch.Tensor): # DW_dfg = (D @ W.T * df * g) DW = matmul_lora(dY, downW.t(), downW_quant, downB, downA, downS) DW, e, g = swiglu_DWf_DW_dfg_kernel(DW, e, g) - h, DW_f, DW_dfg = DW, e, g # Inplace replacements - # se = torch.nn.functional.sigmoid(e) - # f = e * se - # h = f * g - # df = se * (1 - f) + f - # DW_f = DW * f - # DW_dfg = DW * df * g + h, DW_f, DW_dfg = DW, e, g # Down projection LoRA weights d_downA = h.t() @ (dY @ downB.t()) @@ -167,8 +163,8 @@ def backward(ctx, dY : torch.Tensor): d_gateA *= gateS d_gateB *= gateS - # dC/dX = (D @ W.T * f) @ (U.T + B.T @ A.T) - # + (D @ W.T * df * g) @ (G.T + B.T @ A.T) + # Final derivatives to backpropagate backwards. + # See our blogpost for more details. # (D @ W.T * f) @ U.T upW = fast_dequantize(upW.t(), upW_quant) # (D @ W.T * f) @ (U.T + B.T @ A.T) @@ -176,9 +172,8 @@ def backward(ctx, dY : torch.Tensor): del upW dX += DW_f @ upB.to(dtype).t() @ (upS * upA.to(dtype).t()) - # (D @ W.T * f) @ (U.T + B.T @ A.T) + (D @ W.T * df * g) @ G.T + # And add the derivative for the gate projection gateW = fast_dequantize(gateW.t(), gateW_quant) - # (D @ W.T * f) @ (U.T + B.T @ A.T) + (D @ W.T * df * g) @ (G.T + B.T @ A.T) dX += DW_dfg @ gateW.t() del gateW dX += DW_dfg @ gateB.to(dtype).t() @ (gateS * gateA.to(dtype).t()) @@ -217,12 +212,12 @@ class LoRA_QKV(torch.autograd.Function): V = X @ Wv = X @ Wv + X @ Av @ Bv ### Backpropagation chain rule + See our blogpost for more details. + dC/dWq = X.T @ D(Wq) dC/dWk = X.T @ D(Wk) dC/dWv = X.T @ D(Wv) - dC/dX = D(Wq) @ Wq.T - + D(Wk) @ Wk.T - + D(Wv) @ Wv.T + We then sum them all find dC/dX ### Q projection LoRA weights dC/dAq = X.T @ D(Wq) @ B.T @@ -275,8 +270,7 @@ def backward(ctx, dQ, dK, dV): dtype = X.dtype ### Weight projection LoRA weights - # dC/dAq = X.T @ D(Wq) @ B.T - # dC/dBq = A.T @ X.T @ D(Wq) + # See our blogpost for more details. # Q Projection d_QA = X.t() @ (dQ @ QB.t()) @@ -296,24 +290,21 @@ def backward(ctx, dQ, dK, dV): d_VA *= VS d_VB *= VS - # d/dX - # dC/dX = D(Wq) @ Wq.T + # Combine derivatives to find dX + # dQ QW = fast_dequantize(QW.t(), QW_quant) - # D(Wq) @ (Wq.T + B.T @ A.T) dX = torch.matmul(dQ, QW.t(), out = X) del QW dX += (dQ @ QB.to(dtype).t() @ (QS * QA.to(dtype).t())) - # D(Wq) @ Wq.T + D(Wk) @ Wk.T + # dK KW = fast_dequantize(KW.t(), KW_quant) - # D(Wq) @ Wq.T + D(Wk) @ (Wk.T + B.T @ A.T) dX += dK @ KW.t() del KW dX += dK @ KB.to(dtype).t() @ (KS * KA.to(dtype).t()) - # D(Wq) @ Wq.T + D(Wk) @ Wk.T + D(Wv) @ Wv.T + # dV VW = fast_dequantize(VW.t(), VW_quant) - # D(Wq) @ Wq.T + D(Wk) @ Wk.T + D(Wv) @ (Wv.T + B.T @ A.T) dX += dV @ VW.t() del VW dX += dV @ VB.to(dtype).t() @ (VS * VA.to(dtype).t()) @@ -356,9 +347,6 @@ class LoRA_W(torch.autograd.Function): dC/dWq = X.T @ D(Wq) dC/dWk = X.T @ D(Wk) dC/dWv = X.T @ D(Wv) - dC/dX = D(Wq) @ Wq.T - + D(Wk) @ Wk.T - + D(Wv) @ Wv.T ### Q projection LoRA weights dC/dAq = X.T @ D(Wq) @ B.T @@ -392,21 +380,18 @@ def backward(ctx, dY : torch.Tensor): A, B = A.t(), B.t() batch, seq_len, hd = X.shape - dY = dY.reshape(-1, dY.shape[-1]) # .view doesn't work on non contiguous - X = X .reshape(-1, X .shape[-1]) # .view doesn't work on non contiguous + dY = dY.reshape(-1, dY.shape[-1]) # Must be reshape + X = X .reshape(-1, X .shape[-1]) # Must be reshape dtype = X.dtype ### Weight projection LoRA weights - # dC/dAq = X.T @ D(Wq) @ B.T - # dC/dBq = A.T @ X.T @ D(Wq) - # Weight projection d_A = X.t() @ (dY @ B.t()) d_B = (A.t() @ X.t()) @ dY d_A *= S d_B *= S - # dC/dX = D(Wq) @ Wq.T + # Get derivative for dX W = fast_dequantize(W.t(), W_quant) dX = dY @ W.t() del W diff --git a/unsloth/kernels/rms_layernorm.py b/unsloth/kernels/rms_layernorm.py index e8d6b36f15..2cf3acb928 100644 --- a/unsloth/kernels/rms_layernorm.py +++ b/unsloth/kernels/rms_layernorm.py @@ -27,6 +27,11 @@ def _rms_layernorm_forward( n_cols, eps, BLOCK_SIZE : tl.constexpr ): + """ + Fast RMS Layernorm kernel + Inspiration from a Triton tutorial: + https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html + """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols @@ -49,7 +54,6 @@ def _rms_layernorm_forward( @triton.jit def _rms_layernorm_backward( - #dX, dX_row_stride, dY, dY_row_stride, X, X_row_stride, W, W_row_stride, @@ -58,11 +62,15 @@ def _rms_layernorm_backward( n_cols, eps, BLOCK_SIZE : tl.constexpr, ): + """ + Fast RMS Layernorm kernel for the backward pass + Inspiration from a Triton tutorial: + https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html + """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols - #dX += row_idx * dX_row_stride + col_offsets dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride @@ -71,15 +79,13 @@ def _rms_layernorm_backward( X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) - # row_var = tl.sum(X_row * X_row, axis = 0) / n_cols - # inv_var = 1 / tl.sqrt(row_var + eps) + # Get saved row variance inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var dY_W = dY_row * W_row rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) - #tl.store(dX, output, mask = mask) tl.store(dY + col_offsets, output, mask = mask) pass @@ -92,9 +98,10 @@ def forward(ctx, X, W, eps): X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) - Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") + Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") + _rms_layernorm_forward[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), @@ -120,10 +127,7 @@ def backward(ctx, dY): n_rows, n_cols = dY.shape dW = X - # dX = torch.empty_like(dY) - # dX = dY _rms_layernorm_backward[(n_rows,)]( - #dX, dX.stride(0), dY, dY.stride(0), X, X .stride(0), W, W .stride(0), @@ -133,9 +137,7 @@ def backward(ctx, dY): BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) - #dX = dX.view(*shape) dX = dY.view(*shape) - # X, W, eps return dX, None, None pass pass diff --git a/unsloth/kernels/rope_embedding.py b/unsloth/kernels/rope_embedding.py index 99a7a50f47..2bf7c1b272 100644 --- a/unsloth/kernels/rope_embedding.py +++ b/unsloth/kernels/rope_embedding.py @@ -28,42 +28,35 @@ def _rope_embedding( BACKWARD_PASS: tl.constexpr, BLOCK_SIZE : tl.constexpr, ): + """ + Calculates the RoPE Embedding quickly + RoPE is Q * cos + rotate_half(Q) * sin + See our blog post for more info + """ row_position = tl.program_id(0) head_position = tl.program_id(1) col_offsets = tl.arange(0, BLOCK_SIZE) half_head_dim = head_dim // 2 mask = col_offsets < half_head_dim - # TODO: Fixup int32 locations to int64 - rot_position = row_position % seqlen - - Q += row_position* Q_row_stride + head_position*head_dim - cos += rot_position*cos_row_stride - sin += rot_position*sin_row_stride - - Q1 = tl.load(Q + half_head_dim*0 + col_offsets, mask = mask, other = 0) - sin1 = tl.load(sin + half_head_dim*0 + col_offsets, mask = mask, other = 0) - cos1 = tl.load(cos + half_head_dim*0 + col_offsets, mask = mask, other = 0) - - Q2 = tl.load(Q + half_head_dim*1 + col_offsets, mask = mask, other = 0) - # RoPE repeats sin and cos so 128 = [64, 64]. + Q1 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*0 + col_offsets, mask = mask, other = 0) + Q2 = tl.load(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*1 + col_offsets, mask = mask, other = 0) + sin1 = tl.load(sin + (row_position % seqlen)*sin_row_stride + \ + half_head_dim*0 + col_offsets, mask = mask, other = 0) + cos1 = tl.load(cos + (row_position % seqlen)*cos_row_stride + \ + half_head_dim*0 + col_offsets, mask = mask, other = 0) if BACKWARD_PASS: - """ - Q * cos + rotate_half(Q) * sin - is equivalent to - Q * cos + Q @ R * sin - where R is a rotation matrix [ 0, I] - [-I, 0] - dC/dY = dY * cos + dY @ R.T * sin - where R.T is again the same [ 0, -I] - but the minus is transposed. [ I, 0] - """ + # See our blog post for more info. sin1 = -sin1 - - # RoPE repeats sin and cos so 128 = [64, 64]. - tl.store(Q + half_head_dim*0 + col_offsets, Q1*cos1 - Q2*sin1, mask = mask) - tl.store(Q + half_head_dim*1 + col_offsets, Q2*cos1 + Q1*sin1, mask = mask) + pass + + tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*0 + col_offsets, Q1*cos1 - Q2*sin1, mask = mask) + tl.store(Q + row_position*Q_row_stride + head_position*head_dim + \ + half_head_dim*1 + col_offsets, Q2*cos1 + Q1*sin1, mask = mask) pass @@ -90,7 +83,7 @@ def forward(ctx, Q, cos, sin): ) ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps - ctx.cos = cos # Don't need save_for_backward since a view + ctx.cos = cos ctx.sin = sin return Q.view(batch, seq_len, n_heads, head_dim) pass @@ -99,8 +92,7 @@ def forward(ctx, Q, cos, sin): def backward(ctx, dY): batch, seq_len, n_heads, head_dim = dY.shape dY = dY.reshape(batch*seq_len, n_heads*head_dim) - # Cannot be .view since the problem lies with dK since - # K.T's strides are incorrect. + # Must be reshape not view n_rows, n_cols = dY.shape cos = ctx.cos @@ -122,10 +114,8 @@ def backward(ctx, dY): def fast_rope_embedding(Q, K, cos, sin): - # We need (batch, [seqlen, n_heads], head_dim) Q = Fast_RoPE_Embedding.apply(Q.transpose(1, 2), cos, sin).transpose(1, 2) K = Fast_RoPE_Embedding.apply(K.transpose(1, 2), cos, sin).transpose(1, 2) - # We need (batch, [n_heads, seqlen], head_dim) return Q, K pass @@ -155,7 +145,6 @@ def backward(ctx, dY): cos, sin = ctx.saved_tensors # Q * cos + rotate_half.T(Q) * sin half = dY.shape[-1]//2 - # We reverse the minus sign for R.T RH_dY = torch.cat((dY[..., half:], -dY[..., :half]), dim = -1) dY *= cos RH_dY *= sin diff --git a/unsloth/kernels/swiglu.py b/unsloth/kernels/swiglu.py index 63418a82b8..037dcda84f 100644 --- a/unsloth/kernels/swiglu.py +++ b/unsloth/kernels/swiglu.py @@ -28,12 +28,11 @@ def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,): g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) # f = e * sigmoid(e) - # https://github.com/openai/triton/issues/241 exp MUST be done in f32 - # or else Triton crashes f_row = e_row / (1 + tl.exp(-e_row)) # h = f * g h_row = f_row * g_row + # Store h tl.store(h + offsets, h_row, mask = mask) pass @@ -59,23 +58,20 @@ def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,): g_row = tl.load(g + offsets, mask = mask, other = 0).to(tl.float32) # f = e * sigmoid(e) - # https://github.com/openai/triton/issues/241 exp MUST be done in f32 - # or else Triton crashes se_row = 1 / (1 + tl.exp(-e_row)) + # f = e * se f_row = e_row * se_row # h = f * g h_row = f_row * g_row - # df = se * (1 - f) + f # DW_f = DW * f DWf_row = DW_row * f_row - # DW_dfg = DW * df * g - # DW_dfg = DW * (se * (1 - f) + f) * g # DW_dfg = DW * (se*(g - h) + h) DW_dfg_row = DW_row * (se_row*(g_row - h_row) + h_row) - tl.store(DW + offsets, h_row, mask = mask) # h - tl.store(e + offsets, DWf_row, mask = mask) # DW * f - tl.store(g + offsets, DW_dfg_row, mask = mask) # DW * df * g + # Store derivatives in buffers + tl.store(DW + offsets, h_row, mask = mask) + tl.store(e + offsets, DWf_row, mask = mask) + tl.store(g + offsets, DW_dfg_row, mask = mask) pass @@ -84,5 +80,5 @@ def swiglu_DWf_DW_dfg_kernel(DW, e, g): n_elements = e.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) _DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,) - return DW, e, g # h, DW * f, DW * df * g + return DW, e, g pass diff --git a/unsloth/kernels/utils.py b/unsloth/kernels/utils.py index 34906a5a1a..8a7722fabd 100644 --- a/unsloth/kernels/utils.py +++ b/unsloth/kernels/utils.py @@ -13,12 +13,11 @@ # limitations under the License. import triton -MAX_FUSED_SIZE = 65536 # 2**16 Solves https://github.com/unslothai/unsloth/issues/7 +MAX_FUSED_SIZE = 65536 next_power_of_2 = triton.next_power_of_2 def calculate_settings(n): BLOCK_SIZE = next_power_of_2(n) - # CUDA only supports 65536 - 2^16 threads per block if BLOCK_SIZE > MAX_FUSED_SIZE: raise RuntimeError(f"Cannot launch Triton kernel since n = {n} exceeds "\ f"the maximum CUDA blocksize = {MAX_FUSED_SIZE}.") diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 95926a14b8..d9e5686be5 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -20,6 +20,7 @@ warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch") import bitsandbytes as bnb from transformers.models.llama.modeling_llama import logger +from transformers import AutoTokenizer from platform import system as platform_system platform_system = platform_system() @@ -115,24 +116,56 @@ def patch_tokenizer(model, tokenizer): pass -def check_tokenizer(model, tokenizer): +def check_tokenizer( + model, + tokenizer, + model_name = "unsloth/llama-2-7b-bnb-4bit", + model_max_length = 4096, + padding_side = "right", + token = None, + _reload = True, +): # Checks tokenizer for out of bounds ids. # Mainly a fix for https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha # where had token id=32002. # See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25 - special_tokens_map = tokenizer.special_tokens_map - max_embedding_size = model.model.embed_tokens.weight.shape[0] + # Seems like the Fast tokenizer in Rust breaks things! - for token_name, token_content in special_tokens_map.items(): - if type(token_content) is not str: continue - token_ids = tokenizer([token_content], add_special_tokens = False, return_attention_mask = False) - token_ids = token_ids.input_ids[0][0] - if token_ids < 0 or token_ids >= max_embedding_size: - raise RuntimeError( - f"Unsloth: Extra special token `{token_content}` with id={token_ids} exceeds "\ - f"the maximum vocabulary size of {max_embedding_size}. You must fix the tokenizer "\ - "or else out of bounds memory accesses will occur." + max_embedding_size = model.model.embed_tokens.weight.shape[0] + added_tokens_fast = tokenizer.added_tokens_decoder + added_tokens_fast = {index : str(value) for index, value in added_tokens_fast.items()} + sorted_keys = sorted(added_tokens_fast) + added_tokens_fast = {key : added_tokens_fast[key] for key in sorted_keys} + + for j, index in enumerate(added_tokens_fast.keys()): + if index >= max_embedding_size: + bad_indices = list(added_tokens_fast.keys ())[j:] + bad_tokens = list(added_tokens_fast.values())[j:] + if not _reload: + raise RuntimeError( + f"Unsloth tried to load `{model_name}`, but cannot succeed.\n"\ + f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ + f"Fix your tokenizer since it'll perform out of bounds memory accesses." + ) + # Try slow tokenizer which can fix things! + tokenizer = AutoTokenizer.from_pretrained( + model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + use_fast = False, + ) + return check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = model_max_length, + padding_side = padding_side, + token = token, + _reload = False, ) + break pass pass + return tokenizer pass diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 3e6aa36099..88c095265c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -135,8 +135,6 @@ def LlamaAttention_fast_forward_inference( Vn = torch.cat([V1, Vn], dim = 2) # Grouped query attention - # K = repeat_kv(K, n_groups) - # V = repeat_kv(V, n_groups) if n_groups != 1: _, _, cached_len, _ = Kn.shape Knn = Kn[:, :, None, :, :].expand(bsz, n_kv_heads, n_groups, cached_len, head_dim) @@ -210,7 +208,6 @@ def LlamaAttention_fast_forward( pass if past_key_value is not None: - # reuse k, v, self_attention K = torch.cat([past_key_value[0], K], dim = 2) V = torch.cat([past_key_value[1], V], dim = 2) past_key_value = (K, V) if use_cache else None @@ -219,7 +216,6 @@ def LlamaAttention_fast_forward( if (not HAS_FLASH_ATTENTION): # Xformers memory efficient attention # Also has Flash Attention v2 dispatching - # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) @@ -231,25 +227,18 @@ def LlamaAttention_fast_forward( K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) if hidden_states.requires_grad: - # Xformers does not support backward, so we have to convert - # GQA to MQA by cloning K and V - K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made - V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + K = K.reshape(bsz, q_len, n_heads, head_dim) + V = V.reshape(bsz, q_len, n_heads, head_dim) else: - # Xformers does support the forward pass though Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) pass A = xformers_attention(Q, K, V, attn_bias = causal_mask) A = A.view(bsz, q_len, n_heads, head_dim) elif HAS_FLASH_ATTENTION: - # Flash Attention - # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) - - # Flash Attention v2 auto supports grouped query attention A = flash_attn_func(Q, K, V, causal = True) else: # Grouped query attention @@ -714,7 +703,14 @@ def from_pretrained( internal_model.max_seq_length = max_position_embeddings # We check the tokenizer first for errors - check_tokenizer(model, tokenizer) + tokenizer = check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = max_seq_length, + padding_side = "right", + token = token, + ) return model, tokenizer pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index bb6a68f59e..3e3d5956cc 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -92,28 +92,23 @@ def MistralAttention_fast_forward( # Attention module if (not HAS_FLASH_ATTENTION): # Xformers memory efficient attention - # Also has Flash Attention v2 dispatching - # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) M = bsz * q_len - has_sliding_window = isinstance(causal_mask, xformers.attn_bias.BlockDiagonalCausalMask) + has_swa = isinstance(causal_mask, xformers.attn_bias.BlockDiagonalCausalMask) # Group query attention - # if n_groups != 1: K = K .view(bsz, q_len, n_kv_heads, 1, head_dim) V = V .view(bsz, q_len, n_kv_heads, 1, head_dim) K = K.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) V = V.expand(bsz, q_len, n_kv_heads, n_groups, head_dim) if hidden_states.requires_grad: - # Xformers does not support backward, so we have to convert - # GQA to MQA by cloning K and V - K = K.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made - V = V.reshape(bsz, q_len, n_heads, head_dim) # A copy will be made + K = K.reshape(bsz, q_len, n_heads, head_dim) + V = V.reshape(bsz, q_len, n_heads, head_dim) - if has_sliding_window: + if has_swa: Q = Q.view(1, M, n_heads, head_dim) K = K.view(1, M, n_heads, head_dim) V = V.view(1, M, n_heads, head_dim) @@ -122,7 +117,7 @@ def MistralAttention_fast_forward( # Xformers does support the forward pass though Q = Q.view(bsz, q_len, n_kv_heads, n_groups, head_dim) - if has_sliding_window: + if has_swa: Q = Q.view(1, M, n_kv_heads, n_groups, head_dim) K = K.view(1, M, n_kv_heads, n_groups, head_dim) V = V.view(1, M, n_kv_heads, n_groups, head_dim) @@ -133,16 +128,12 @@ def MistralAttention_fast_forward( A = A.view(bsz, q_len, n_heads, head_dim) elif HAS_FLASH_ATTENTION: - # Flash Attention - # (batch_size, n_heads, seq_len, head_dim) -> (batch_size, seq_len, n_heads, head_dim) Q = Q.transpose(1, 2) K = K.transpose(1, 2) V = V.transpose(1, 2) - - # Flash Attention v2 auto supports grouped query attention - sliding_window = getattr(self.config, "sliding_window") - sliding_window = q_len if sliding_window is None else sliding_window - window = (-1, -1) if (q_len <= sliding_window) else (sliding_window, sliding_window) + sw = getattr(self.config, "sliding_window") + sw = q_len if sw is None else sw + window = (-1, -1) if (q_len <= sw) else (sw, sw) A = flash_attn_func(Q, K, V, causal = True, window_size = window) else: # Grouped query attention @@ -317,7 +308,7 @@ def from_pretrained( tokenizer = AutoTokenizer.from_pretrained( model_name, model_max_length = max_seq_length, - padding_side = "right", # MUST be right or else attention fails! + padding_side = "right", token = token, ) @@ -339,6 +330,16 @@ def from_pretrained( internal_model = internal_model.model pass internal_model.max_seq_length = max_position_embeddings + + # We check the tokenizer first for errors + tokenizer = check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = max_seq_length, + padding_side = "right", + token = token, + ) return model, tokenizer pass pass From b7cc0f119fdde14ada0817fc28ae3f7bcad4ca5f Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Fri, 5 Jan 2024 17:48:21 +1100 Subject: [PATCH 0094/1088] Update README.md (#63) --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index f24487f79c..5d236119d2 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,30 @@
    - - + +
    ## Finetune Mistral, Llama 2-5x faster with 50% less memory! | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| **Free** Llama | **Free** Mistral | A100 Colab | **Free** Kaggle A | -| A100 Colab | A100 Colab | (59 more examples below) | **Free** Kaggle B | +| ⭐**Free!** Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | ⭐**Free!** Mistral + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 🦥A100 Code Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | ⭐**Free!** Alpaca [Run free Kaggle notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) +| 🦥A100 Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | 🦥A100 Mistral + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | 50+ more examples below! | ⭐**Free!** Slim Orca [Run free Kaggle notebook](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | -* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. **Free** DPO example [More info](#DPO) on DPO -* **NEW!** [TinyLlama 1.1b](https://github.com/jzhang38/TinyLlama) on 3T tokens! **Free** example +* **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. ⭐**Free!** DPO Zephyr, Mistral example! [More info](#DPO) on DPO +* **NEW!** [TinyLlama 1.1b](https://github.com/jzhang38/TinyLlama) on 3T tokens! ⭐**Free!** example * **NEW!** We're in 🤗 Huggingface's official docs! We're on the [SFT docs](https://huggingface.co/docs/trl/main/en/sft_trainer#accelerate-fine-tuning-2x-using-unsloth) and the [DPO docs](https://huggingface.co/docs/trl/main/en/dpo_trainer#accelerate-dpo-fine-tuning-using-unsloth)! * Supports Llama, Yi, Mistral, CodeLlama, Qwen (llamafied), Deepseek and their derived models (Open Hermes etc). * All kernels written in [OpenAI's Triton](https://openai.com/research/triton) language. **Manual backprop engine**. * **0% loss in accuracy** - no approximation methods - all exact. -* No change of hardware necessary. Supports NVIDIA GPUs since 2018+. Minimum CUDA Compute Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070 and 1080 works, but is a bit slow! +* No change of hardware. Supports NVIDIA GPUs since 2018+. Minimum CUDA Capability 7.0 (V100, T4, Titan V, RTX 20, 30, 40x, A100, H100, L40 etc) [Check your GPU!](https://developer.nvidia.com/cuda-gpus) GTX 1070, 1080 works, but is slow. * Works on **Linux** and **Windows** via WSL. * **NEW!** Download 4 bit models 4x faster from 🤗 Huggingface! Eg: `unsloth/mistral-7b-bnb-4bit` * Supports 4bit and 16bit QLoRA / LoRA finetuning via [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). * **NEW!** Want a UI for finetuning? Try [Llama-Factory](https://github.com/hiyouga/LLaMA-Factory) and use `--use_unsloth`! * Open source trains 5x faster - see [Unsloth Pro](https://unsloth.ai/) for **30x faster training**! -| 1 A100 40GB | Hugging Face | Flash Attention | Unsloth Open Source | [Unsloth Pro](https://unsloth.ai/pricing) | +| 1 A100 40GB | 🤗 Hugging Face | Flash Attention | 🦥 Unsloth Open Source | [🦥 Unsloth Pro](https://unsloth.ai/pricing) | |--------------|--------------|-----------------|---------------------|-----------------| | Alpaca | 1x | 1.04x | 1.98x | **15.64x** | | LAION Chip2 | 1x | 0.92x | 1.61x | **20.73x** | @@ -34,7 +34,7 @@ Join our [Discord](https://discord.gg/nsS4V5Z6ge)! -If you trained a model with Unsloth, we made a cool sticker if you want to use it! +If you trained a model with 🦥 Unsloth, we made a cool sticker if you want to use it! # Installation Instructions - Conda Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. From f1a4849bb753ffe91eface397ec7cfc7f3f6b1ff Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 6 Jan 2024 04:13:15 +1100 Subject: [PATCH 0095/1088] Update README.md (#64) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5d236119d2..7279dbebc3 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,9 @@ ## Finetune Mistral, Llama 2-5x faster with 50% less memory! | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| -| **2.2x faster, -43% VRAM** | **2.2x faster, -62% VRAM** | **1.9x faster, -27% VRAM** | **5.5x faster, -44% VRAM** | -| ⭐**Free!** Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | ⭐**Free!** Mistral + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 🦥A100 Code Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | ⭐**Free!** Alpaca [Run free Kaggle notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) -| 🦥A100 Llama + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | 🦥A100 Mistral + Alpaca [Run Colab notebook](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | 50+ more examples below! | ⭐**Free!** Slim Orca [Run free Kaggle notebook](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | +| **2.2x faster 43% less VRAM** | **2.2x faster 62% less VRAM** | **1.9x faster 27% less VRAM** | **5.5x faster 44% less VRAM** | +| [⭐Llama **free** Colab 2x faster](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing") | [⭐Mistral **free** Colab 2x faster](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | [CodeLlama A100 Colab notebook](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | [⭐Kaggle **free** Alpaca notebook](https://www.kaggle.com/danielhanchen/unsloth-alpaca-t4-ddp) +| [Llama A100 Colab notebook](https://colab.research.google.com/drive/1YIPY_18xm-K0iJDgvNkRoJsgkPMPAO3G?usp=sharing) | [Mistral A100 Colab notebook](https://colab.research.google.com/drive/1SKrKGV-BZoU4kv5q3g0jtE_OhRgPtrrQ?usp=sharing) | 50+ more examples below! | [⭐Kaggle **free** Slim Orca notebook](https://www.kaggle.com/danielhanchen/unsloth-slimorca-t4-ddp) | * **NEW!** [DPO](https://arxiv.org/abs/2305.18290) support. ⭐**Free!** DPO Zephyr, Mistral example! [More info](#DPO) on DPO * **NEW!** [TinyLlama 1.1b](https://github.com/jzhang38/TinyLlama) on 3T tokens! ⭐**Free!** example From 3fc1305d757464485736c6aa7881c319de8734fa Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 6 Jan 2024 04:14:38 +1100 Subject: [PATCH 0096/1088] Update README.md (#65) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7279dbebc3..b1c08d47de 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ From d17202869d25e6119fa4df9cb7caa4cb6eb92e76 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sat, 6 Jan 2024 19:13:39 +1100 Subject: [PATCH 0097/1088] Fix tokenizer, bias, dropout supported for LoRA (#69) * Fix tokenizer, dropout, bias for LoRA * Update loader.py --- README.md | 13 +++-- unsloth/models/_utils.py | 37 ++++++++++++ unsloth/models/llama.py | 122 +++++++++++++++++++++------------------ unsloth/models/loader.py | 8 +++ 4 files changed, 117 insertions(+), 63 deletions(-) diff --git a/README.md b/README.md index b1c08d47de..d93bd0090c 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Do **NOT** use this if you have Anaconda. You must use the Conda install method, ```python import torch; torch.version.cuda ``` -2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. +2. For Pytorch 2.1.0: You can update Pytorch via Pip (interchange `cu121` / `cu118`). Go to https://pytorch.org/ to learn more. Select either `cu118` for CUDA 11.8 or `cu121` for CUDA 12.1. If you have a RTX 3060 or higher (A100, H100 etc), use the `"ampere"` path. For Pytorch 2.1.1: got to step 3. ```bash pip install --upgrade --force-reinstall --no-cache-dir torch==2.1.0 triton \ --index-url https://download.pytorch.org/whl/cu121 @@ -118,8 +118,8 @@ model = FastLanguageModel.get_peft_model( target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, - lora_dropout = 0, # Currently only supports dropout = 0 - bias = "none", # Currently only supports bias = "none" + lora_dropout = 0, # Supports any, but = 0 is optimized + bias = "none", # Supports any, but = "none" is optimized use_gradient_checkpointing = True, random_state = 3407, max_seq_length = max_seq_length, @@ -174,8 +174,8 @@ model = FastLanguageModel.get_peft_model( target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 64, - lora_dropout = 0, # Currently only supports dropout = 0 - bias = "none", # Currently only supports bias = "none" + lora_dropout = 0, # Supports any, but = 0 is optimized + bias = "none", # Supports any, but = "none" is optimized use_gradient_checkpointing = True, random_state = 3407, max_seq_length = max_seq_length, @@ -209,7 +209,8 @@ dpo_trainer.train() # Future Milestones and limitations 1. Support Mixtral. -2. Does not support non Llama models - we do so in the future. +2. Supports all Mistral, Llama type models, but some are unoptimized (Qwen with biases) +3. Dropout, bias in LoRA matrices are supported, just not optimized. # Performance comparisons on 1 Tesla T4 GPU: **Time taken for 1 epoch** diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index d9e5686be5..2f882cc7c3 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -141,12 +141,49 @@ def check_tokenizer( if index >= max_embedding_size: bad_indices = list(added_tokens_fast.keys ())[j:] bad_tokens = list(added_tokens_fast.values())[j:] + if not _reload: + # Try removing the token + added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()] + special_tokens = tokenizer.special_tokens_map + import itertools + special_tokens = frozenset( + itertools.chain.from_iterable( + [x] if type(x) is str else x for x in special_tokens.values() + ) + ) + can_be_removed1 = [x for x in bad_tokens if x not in special_tokens] + can_be_removed2 = [x for x in can_be_removed1 if x in tokenizer._added_tokens_encoder.keys()] + + # Check of extra tokens can in fact we removed! + + if (len(can_be_removed1) == len(bad_tokens)) and \ + (len(can_be_removed2) == len(bad_tokens)): + # Yes it can be fixed! + for bad_token in can_be_removed1: + remove_id = tokenizer._added_tokens_encoder[bad_token] + del tokenizer._added_tokens_decoder[remove_id] + del tokenizer._added_tokens_encoder[bad_token] + pass + # Confirm 1 more time! + if max(tokenizer.added_tokens_decoder.keys()) < max_embedding_size: + logger.warning_once( + f"Unsloth loaded a broken tokenizer `{model_name}`, but managed to repair it!\n"\ + f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ + "We removed these bad tokens. If you think this is incorrect, fix your tokenizer first." + ) + return tokenizer + pass + pass + + # :( Failure raise RuntimeError( f"Unsloth tried to load `{model_name}`, but cannot succeed.\n"\ f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"\ f"Fix your tokenizer since it'll perform out of bounds memory accesses." ) + pass + # Try slow tokenizer which can fix things! tokenizer = AutoTokenizer.from_pretrained( model_name, diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index 88c095265c..e2361d389c 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -777,9 +777,15 @@ def get_peft_model( assert(max_seq_length <= model.max_seq_length) if lora_dropout != 0: - raise TypeError("Unsloth: Fast model patching only works with dropout = 0.") + logger.warning_once( + f"Unsloth: Dropout = 0 is supported for fast patching. You are using dropout = {lora_dropout}.\n"\ + f"Unsloth will patch all other layers, except LoRA matrices, causing a performance hit." + ) if bias != "none": - raise TypeError("Unsloth: Fast model patching only works with bias = 'none'.") + logger.warning_once( + f"Unsloth: bias = `none` is supported for fast patching. You are using bias = {bias}.\n"\ + f"Unsloth will patch all other layers, except LoRA matrices, causing a performance hit." + ) transformers_set_seed(random_state) @@ -795,8 +801,8 @@ def get_peft_model( r = r, lora_alpha = lora_alpha, target_modules = target_modules, - lora_dropout = 0, - bias = "none", + lora_dropout = lora_dropout, + bias = bias, task_type = TaskType.CAUSAL_LM, layers_to_transform = layers_to_transform, **kwargs, @@ -813,62 +819,64 @@ def get_peft_model( n_mlp = 0 n_qkv = 0 n_o = 0 - for idx, layer in enumerate(model.model.model.layers): - - # MLP patching - gate_proj = layer.mlp.gate_proj - up_proj = layer.mlp. up_proj - down_proj = layer.mlp.down_proj - - if hasattr(gate_proj, "lora_A") and \ - hasattr( up_proj, "lora_A") and \ - hasattr(down_proj, "lora_A") and \ - (gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None and \ - ( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None and \ - (down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None: - - # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module - layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) - n_mlp += 1 - else: - logger.warning_once( - "Unsloth cannot patch MLP layers with our manual autograd engine since either LoRA adapters\n"\ - "are not enabled or a bias term (like in Qwen) is used." - ) - pass + if lora_dropout == 0 and bias == "none": + for idx, layer in enumerate(model.model.model.layers): + + # MLP patching + gate_proj = layer.mlp.gate_proj + up_proj = layer.mlp. up_proj + down_proj = layer.mlp.down_proj + + if hasattr(gate_proj, "lora_A") and \ + hasattr( up_proj, "lora_A") and \ + hasattr(down_proj, "lora_A") and \ + (gate_proj.base_layer if hasattr(gate_proj, "base_layer") else gate_proj).bias is None and \ + ( up_proj.base_layer if hasattr( up_proj, "base_layer") else up_proj).bias is None and \ + (down_proj.base_layer if hasattr(down_proj, "base_layer") else down_proj).bias is None: + + # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module + layer.mlp.forward = types.MethodType(apply_lora_mlp, layer.mlp) + n_mlp += 1 + else: + logger.warning_once( + "Unsloth cannot patch MLP layers with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) + pass - # QKV attention patching - q_proj = layer.self_attn.q_proj - k_proj = layer.self_attn.k_proj - v_proj = layer.self_attn.v_proj - if hasattr(q_proj, "lora_A") and \ - hasattr(k_proj, "lora_A") and \ - hasattr(v_proj, "lora_A") and \ - (q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None and \ - (k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None and \ - (v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None: - - layer.self_attn.apply_qkv = apply_lora_qkv - n_qkv += 1 - else: - logger.warning_once( - "Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ - "are not enabled or a bias term (like in Qwen) is used." - ) - pass + # QKV attention patching + q_proj = layer.self_attn.q_proj + k_proj = layer.self_attn.k_proj + v_proj = layer.self_attn.v_proj + if hasattr(q_proj, "lora_A") and \ + hasattr(k_proj, "lora_A") and \ + hasattr(v_proj, "lora_A") and \ + (q_proj.base_layer if hasattr(q_proj, "base_layer") else q_proj).bias is None and \ + (k_proj.base_layer if hasattr(k_proj, "base_layer") else k_proj).bias is None and \ + (v_proj.base_layer if hasattr(v_proj, "base_layer") else v_proj).bias is None: + + layer.self_attn.apply_qkv = apply_lora_qkv + n_qkv += 1 + else: + logger.warning_once( + "Unsloth cannot patch Attention layers with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) + pass - # O attention patching - o_proj = layer.self_attn.o_proj - if hasattr(o_proj, "lora_A") and \ - (o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None: + # O attention patching + o_proj = layer.self_attn.o_proj + if hasattr(o_proj, "lora_A") and \ + (o_proj.base_layer if hasattr(o_proj, "base_layer") else o_proj).bias is None: - layer.self_attn.apply_o = apply_lora_o - n_o += 1 - else: - logger.warning_once( - "Unsloth cannot patch O projection layer with our manual autograd engine since either LoRA adapters\n"\ - "are not enabled or a bias term (like in Qwen) is used." - ) + layer.self_attn.apply_o = apply_lora_o + n_o += 1 + else: + logger.warning_once( + "Unsloth cannot patch O projection layer with our manual autograd engine since either LoRA adapters\n"\ + "are not enabled or a bias term (like in Qwen) is used." + ) + pass pass pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index eb8d4960b9..61585d77a5 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -24,6 +24,7 @@ "unsloth/llama-2-13b-bnb-4bit" : "unsloth/llama-13-7b", "unsloth/codellama-34b-bnb-4bit" : "codellama/CodeLlama-34b-hf", "unsloth/zephyr-sft-bnb-4bit" : "unsloth/zephyr-sft", + "unsloth/tinyllama-bnb-4bit" : "unsloth/tinyllama", } # https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading! @@ -54,6 +55,13 @@ def from_pretrained( f"to obtain the latest transformers build, then restart this session.\n"\ f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)." ) + elif not load_in_4bit and model_name in FOURBIT_MAPPER: + new_model_name = FOURBIT_MAPPER[model_name] + logger.warning_once( + f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\ + f"`load_in_4bit = False`. We shall load `{new_model_name}` instead." + ) + model_name = new_model_name pass model_config = AutoConfig.from_pretrained(model_name) From 97d952e834098b849a7026baa40e9e698024586c Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Sun, 7 Jan 2024 17:57:03 +1100 Subject: [PATCH 0098/1088] Fix PEFT downcasting (#70) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py --- unsloth/models/_utils.py | 52 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/unsloth/models/_utils.py b/unsloth/models/_utils.py index 2f882cc7c3..36ad64526b 100644 --- a/unsloth/models/_utils.py +++ b/unsloth/models/_utils.py @@ -206,3 +206,55 @@ def check_tokenizer( pass return tokenizer pass + + +# Weirdly LoraLayer.update_layer downcasts PEFT layers to float16?? +# For mixed precision, we need it to be in float32 not float16. +def LoraLayer_update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, + use_rslora = False): + # This code works for linear layers, override for other layer types + if r <= 0: + raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") + + self.r[adapter_name] = r + self.lora_alpha[adapter_name] = lora_alpha + if lora_dropout > 0.0: + lora_dropout_layer = torch.nn.Dropout(p=lora_dropout) + else: + lora_dropout_layer = torch.nn.Identity() + + self.lora_dropout.update(torch.nn.ModuleDict({adapter_name: lora_dropout_layer})) + # Actual trainable parameters + self.lora_A[adapter_name] = torch.nn.Linear(self.in_features, r, bias=False) + self.lora_B[adapter_name] = torch.nn.Linear(r, self.out_features, bias=False) + if use_rslora: + self.scaling[adapter_name] = lora_alpha / math.sqrt(r) + else: + self.scaling[adapter_name] = lora_alpha / r + + if init_lora_weights == "loftq": + self.loftq_init(adapter_name) + elif init_lora_weights: + self.reset_lora_parameters(adapter_name, init_lora_weights) + + # check weight and qweight (for GPTQ) + for weight_name in ("weight", "qweight"): + weight = getattr(self.get_base_layer(), weight_name, None) + if weight is not None: + # [INCORRECT code] + # + # the layer is already completely initialized, this is an update + # if weight.dtype.is_floating_point or weight.dtype.is_complex: + # self.to(weight.device, dtype=weight.dtype) + # else: + # self.to(weight.device) + self.to(weight.device, non_blocking = True) + break + self.set_adapter(self.active_adapters) +pass + +# Fix up incorrect downcasting of LoRA weights +from peft.tuners.lora.layer import LoraLayer +LoraLayer.update_layer = LoraLayer_update_layer +from peft.tuners.lora import LoraLayer +LoraLayer.update_layer = LoraLayer_update_layer From 89b5ecef36b7fc373985392e61418c24010bacfd Mon Sep 17 00:00:00 2001 From: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 9 Jan 2024 19:23:59 +1100 Subject: [PATCH 0099/1088] Add files via upload (#74) Updated buttons --- images/Discord button.png | Bin 0 -> 14224 bytes images/Free version button.png | Bin 0 -> 9530 bytes images/Kofi button.png | Bin 0 -> 18533 bytes 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Discord button.png create mode 100644 images/Free version button.png create mode 100644 images/Kofi button.png diff --git a/images/Discord button.png b/images/Discord button.png new file mode 100644 index 0000000000000000000000000000000000000000..3fd45d4600e44d784c6dfc6a6c83efcd2e77fd0d GIT binary patch literal 14224 zcmX|ocOaYJ_rDHZRwq?8TBEkwo0b-#Mr{ePiUy%ZsI9d+s5V4uO9g3(y=RS5yV8nH zRYQr`f)IX*_vicj_pE!*x#!$-pZmP-YZD_~mJ2s7FfcH%0Q4T1GBBJ%Ffg1reeMj6 zQp1K`1Z^XD0)sk61V7EL?rGs?0B1d}UTh9}RX@148#g5f;(`3Grjc@It_ z;MOK4YuD=97_Tu|wY~*6o-@6AUtH=s*Ka{8_dQoM_ywW2A+c9bs9BN)y~|0QXflnTw)xy}&DOfHWb<7bf_e zh%8MW{YpN5`L6~QW)M8Y*jFH+erxf>$b~l_Z{>7!DRJE135ZzvU5`0QlN|2!?IExD z6;14bN8pgz4e-u_=|KFyOQN~Xb6jjGa$VHAhgCf1Xp&z~c|02%3Y%{@Ixqx8ApFzO zEdb!R7tFj@wxd1Lm_GQ~dI+F+Xo73HiRgX1uZ{h%)Q9KJAzCMFa6ZLj;kM&}>@@DH z*DmnzISpe|;nrK`XWG;LF4XfWYb2=BhY)yWNZAAd6!n8+Uv6@saGLFc+)|6O@Bl!s z(Fqz~Wb-51LvZTr^+yqR8=ex#$^MyWMFDE`z4J6?$GBwiwAc2IeH}$j^d3f>7JcIe zdB2q;|Ci`kffc%alf0tCLx5FwM9=UlQJGU;7bBh^amu%vYb%7r_qLKA5525RTWWY} z;i~ro&R|{4mMa%P6TbBYy=pKnbDoQZCXpims;4v$?@Pgy86I8~jZ@Ia5#U+KEf)Z> zE6ABX&E2*oYC*%zXLY@d!jUG*wf$4+uLa>61~cw-bMujF}^#*egy_y#$a^|Gb# zf8jxyrp@JFY>roCrK_rqb`Z|?Uh?}wjkNH?Od8>`1*s2ltkR?ORbl9MQf}MmkdU-D z&vR!+;CU&bfI&SK@iP;&)MVfrWA)klbKstV%p2DtXcWJuoD`a+`|>;Y3{p1Bw=UP+ z+yG;1j58LP5`;K0SD&Tv?x`61*Uh!qu>zXh&mltegMm1X)Q|K)-#aHBm@)7BeHxTsji|LJ`-G4g5O?nKWD=iM?7?~vyo{6B*n4fQuSKcIQB=s10@HHsC0-?>zR zK9tFu50ZnS65YBIpHbyTI14ZL=|}&0%+T|}r*2*p>38|gYm~1xphizc`OE}8Zy3Ur z#t0cKNda<6?4n(@C$JU&Gxob+43P2l4glp_54!8G&s@z&mwxFGv~a7Y*yXl>p!tf} zq9g#+=>VcL`1C-v2NhPA-}zqFBFw@MZ$qN4dos~E(XE}9(?W}1Yk5hvaXq<$RIsVp z|E#tB5?>rFMoCovxha~6^v^|8{-^T*H6&lj%`uY8J7S`2g_E+0x=c6a%K{zglDF=Z zHU}a{>@Nw^L+^l%XG+oSBE9dBFy~oez53!<5|3a@(d}5Q?GQ0v>AkK`SH2D>NBWr%}zZcy?&lLuUL)}t2hRpxPWX}Sy z$xngOiF)Z~b^{meuA(;h;mw00%ye3Fc?Qg3G)`{WM0p(h8Ge2*knR>AN!)FU5Ax{a zKBp_j^Vumq-Pql9y10pf=pR(#M^_Z@HF15W>@=B?MxMlD7Qnn~npx{KOVq zxarq^z>2U(R3;71(6>c~wKzgm$zG5+l^HQ7NR3OJp_eYh+Dx5f%U;`{NziMw&IMZ2 zWMFW@4iOaXcdv=E!Tql<(6>4Ufi_3UE{ncIlhkcl!f~2wt@9%nME~ew4jR$j$;!t< zJ*@C%lf^SuH$-*yRI)eejhNx`qNZ)J7$^_5K}2}b3A7%@Pp1f?eEl@j^yz_R_@H8l zV<&js6GUyy2(siZA1~NYnn0%63TrZ#s4gIFFz7gbpB?tR{FPNry)bH4rl6X z!8^HvpAg*x70B%SpP-Ynx94Y7b0dtJnJM@G1m6%EOOorg^J04v1Ts7RC+H9FrFGF8 zqMA|=^`jat`gAUyA|mSjpNOE!!8oL_3!S7_B~^d001!d>dI>T)9Y^4DtMAO6Mz5zL zsExv8O}dbIOhw}gdc>rz0WFeoyosA!%dfp@olGmb#Q(zar6*KMl_+RlEVZaKaGWU^ z(XM(mB>^ea#uah?W5X6*zQ5PM1*i~bF&RB|W?|Jlc7<4ll3^i!clmHzogIx|dAdBS z@0OK3%WSYlO7-`9XZ{VRAN?B!9W4*PH3<1N0cvugXHoRmjo*W@lvF1a?u^?&4jPBe z*SVo8lnXJFxl!e!LjW)tCY_@`%}h}rXX0jXm+$cXxa3$Ag^TW zAFo*OcHbW!iQr<8-4FpxGt8F|0IPfEhzb*97k#S$yPaA@7zFaUJ?eE&m#!Lrvin0F zrD)xv!#h@BbQEH2^>1I*{q4AI&dsO0o$@!D*V7`x|9LJp?dA)W__prc~G1LkDY>O3r+YX*`VjSE2(xmB^*8q2J( z;G}@*Ap>I)4EwynicMo2`%P-=U$@)SHhKhlw%(x2;FL4_MD5{UZkL6V27#O8nF=FS zuk3!t9{r*b+g$3-m!Eb!F(bgbvG3#MC-XR!KUwPbeIB{whaQ=!lq{dBUua#iMcUA; zF=W|DW(Y1GwjQ>bRBh)fcXt0WvCsXi>M9q~5)9S6l^Z`Ytf#It|0=c$C6i=xMJ_#V z7*mGQF*NizJc{M|^|Z-Rz6U?1X}_tF+uW>UHx;%-Oj2n;7Z+<_y*^AIO63MrXop_17e*iA=1v`1pcHbnmEUyV&ncQr<}O!Rh|fo_ z(l>*P==*+7D3c(ihaZ@))bw5y4s)7{OH}K~md>DY?_CHBEw#HLZtgYDUi+YL7_aXZ zo`sj!I_9i&;L`bo7e?RBu+0ZHFI8w}?!LN# z(g_~t`QZxmk73b9P89=~n2wmS9Ptih`pE0(3o z&@ow3Vm-6NV!LfrLa$|n_m2Cwi6$V$*Bf)|-Ts+1DRA+6Cp@nH^NQE< zJosspbhbI2Yw|OP=2EG2h)IUkoO-XuE-6)4YIAmlq|QuVs6q$RnjOV29U*DvD2%bOko!$ZY$v#-A z3z<^6;-R+n)T%h|#hY6fi!^8OC@!y!=`U_`x9>oaL$y%%s_lRA3xXPy-m|?-lI{}3 z8w<@R_yomuHBwsMuH5_FIS3AUGW_HBjNJTM*CoHDs@)iccS3l_YruVL7Z`m<`xgXO zWANEt*$)f;x9+9BwVTFLNhH5NMbx;u_NL4sh;pD~_uL1xqH6Gso=$&#pBxS-XI)On z)^__jEb}g4S~{JJc!9Z7G{v0x#gj53zHLge`DdEPws=fJncDoTo~#jCtOZ5`mN-Of z@&x8wCdyJLJMO-^L(^i=Lx$EiuKmHoQ#O?xvV%#|nXe}q%=2Jc1f_u&&z0jXd#ux& zc%AJ|@L`LFe*h)yBYuVuXNP#_>R56_3BKrh$d7Ld+d*&8PPVSC8(mhF z3n{y6e{!f0>}eB(j*UO3;aFEMPXk;y$zeRX=*E)AuOS;se8gH}f^&})lSA(st@d0= z%LxiX?!&6IUP1gR)*Di}MVOw9PlPc#3(Zy8zXOQIjbwli?3tUQIalB#89KmjG^zBs_OPF&64n0_UwP{BXSkl zM||O{%J;!7F#{zpTy}na;t~3!-y$8r9$!Sxy?BSq+dQ{B=^O&^L#;^1Ea3ao)+BT5 zc2~CYslaacrR3%D3pj^mlxFCh{L$?e)~N%shI&;gC~)#8p;vpLnh-z8VmG!`9t*CU zagOjlY9hwkx_LMFq{0r>4nN9w@c}$$^UC_#51M?N`CIxrU+fRF1^4Y|TR~V6omzk5 z7pJoD6&cb^#ZNKJ8#ixMSYY3&p_^&93ic^}iNGuHEZb4+SsoIrz`0Q}cVk4R>x!k~ zU-DQ>uea4EE9FvhK+Tj%w6M`JWE-J7^^0MbkTV569S zSy~$AYso1TW*EPy>T(`&tGs61FVWL_zf?3%eVP36F>-%2eO#=>!}#y*IZMagEgzOT zs<6e8^KZA^^Cw++SX5W+mFmjS@?1YxC^Lc2FCfS>quVwE^$0}iOl6*-r_i{*b?QXm zXjd_k)>O6xzY^NLly`~_H%2y48eX6XNbSODAF5?=xDC_dWUfq|HKfOzh%N8D6xOmZ9QIls`OzvXDtUUi$`UHNZB5f)t!-BJIQCv_BJ;3eKWXACRZyjPLBlogr}YN0yE z8EF^a!}vcS`dtkC z3OeJ%%_j(#*i^lE!xidF1%FCHt+DuaSB%!#z3EUdyky$?BZHH;oN!2Zxya1`Hk{i8z z9{lJ+Z56KAtR$&>G``qaYH1*WSdqy1fJpuaqZ&F_JPdIh8Cu+IXKJ91QRj-JuG&)` ziZm|l22p`T(t6hqj}wApcMUM{XB#*@6=Wv4(yLAaFNz^x+UlzzSjs>Po0;LgL1vFj z)L!m9wx=x%t&*@EJ$n`T#&kyoF_MgY;aMKXG|!gNqI^Q5@=D7J5PY=V(O5oHZL?B* zU{Ncv>ErY7{x+(lLMOE=o$(@QZ~*^Xwpt+*R+FxzF{k__@8RL~s}*3sQdLib^VghF z>+>Mh*bKSvz-v3>>gk}UvLDOUd6lN)u)iZ!lMz$;CE`y(HT_Ok1TB857yKfQ)Sp6L zF@=u3ud&+3QjPXm(gQyoQ80SgZPLmt3Bh{OrJ~tY%M(-i+NwZd)fl4r(eSUfI-_NX-%!FpqP znbii^)WIa({940P;g6SHf|A6Fp;Eybps{&3L5a$0)QF{iLyE!Tz&`?Tz&okGdPTmL zr0)06#71H#!?)Nz{X|W2DTy%p9ac~U8x^xNJySOPG*CcAwky&w`bvKGK_m8$eZ`vM zLL`sjR@vLcezpCdg<><+#PA7Jm@6LAH2-Wm9;yIWd={>}uC(e*S=mxTgf`>+iQIOA zSklvp%L|c@J&&L*Bxi(JNDe4as06_cnp7#WRP9j-wwfp!G4*#uKs*m2^U6cH#gHsg{a07x_f=3mKZ!Q)5__gK_BTx%!JdDG>dF(F za!Sj-B8pDEv34)D(rA=<^}1@`d^cD?M+2GnZup5{E-S1*7Q zM}W~yc$XdtZKj!puoij6YL3tFszUecE;q3Cp0yH)h=z%l(rsLM7R>c6_7^meQUl!? z-|cz7^g!XU@hG+*^29Esu{6#`km>Ej%r@ogPQ9UHx7GEYpbDrNiAHIw*Nl<&VR7Nn zB#8G$?v9PeRmqSv=Bg05`mvDNohnZ|wZTKzw+oR;e zey+6G8llnmmuHvk+Hid$-<0Ek)im!~_6)nT^C-7OQB$P?ppipz)%X=GoIT7=Ce%<$|z0ZO4%{4tZ z#5Ke&7=4rM+fCekntpfw(`Gu1_RUn6(#vHHSv$54q6!Rqy?tmQ!hKJFfFSG7;nS0gM9kmcf1hjM28}Ex-Y?eOr z{?RXe-6QD%SSUJ(l<=F!wbx$kaO%D>@Jcmb!+g;FRQ}gR8zh%c$h-AO5aUeAm@G8g zld9|S?SMQI7X&;jJJ=gRTo4E){Q3VQk}Thrgysb$LusFy$>-O3oKU`-hEx{s@C3n0 zA5$H94Y%^UpcnnI#4DmvwA+T?q7bp^d6LJ?$Mdh!!`yUP+>2>&*qKDvc;XP2W={PaNDtTFFT$98wQo-VvT z(+`CK=|><>ogFI%y}~H+j>5F_Y{jg_S12mXqijpXC(&x}8N^v?@d9hktKsOLSMqF3 zM>}lfU6U;*JZp0#C1B{@tu3VW*hk$eDTJhi)BfPepm?$M-+0N=`;it8=pb zi`m$Ngnt~yP;;S|p*4rW<^sL51Z;D*U2gB*%0kNBtPW#{l%JGG^DSjhS92+B@_b3) zL)FjCF}-3D!>FGFUPCb)HS_hI8cmpnKPzQ;C*{%Wy_gc0lIH_gQ<7yK6g5HJzKiw< zt8f+b_CIvLays)UqGvTkHLRdW!zZ}~n;bNeIQZ|^l^(Vx-gL7PyTEs@t$O&+;`U`1 z5-t9{w62_JuCR6)s!m``qax>Jyg$`AxLdz3Pkr2ET`mXil^iV>!poCGkkyNSe_Aa7 zrNz#t?ysuvmclW`1k?2lPRdT)WhAY%u%GrlIzD-hrO9dJ`VBl4RX*3B_F?#F>C zKXKWtAW73Exb$H6R`rVIUB{8P6v`6x^JKV|%WDdZ&8%*9o{(|h(?hpL&9`24>eGdlHor-{G9~rJShUYaRl@<+U~otsm~~q7Ac*zMA7xst zbxMsC{RlKve8s!g= z@GD6wz3H2ysTl1u@k8YzBU7?lCf?k?^+1iu&C&RW*=N-i5?Gb>CviM23Jal~PO|Di znX3|ZXIIppf0u|}ctqQg9&m1KNXSp;=gBxFungHhN{`3%S6S1)LAYzZGljg6-4d&g zY^F>eviUBsHffgnt%m-zCi~jY-PMGU1qGs5wg+xH=3PE zcYvL@MNVQ|WQT+NDpwrAqF|_Y(VLK&_HVpT$y}Dx;bp5E7<7jc==t{@o2g^Wa2wJrW$5+K>uQCKqh0>~Tc2x;vv@K0 zC!n+o)yFM8wzk#SmPd$zejiL%o`WT8*=~h}`?6sjjO*r`sxKhMw)$rlZLTs~I4$pq zs|C`g5cpr2nqsHwcQYoQ%s6S?UkgHpB5@XQW0mGxDLeI*<<4F%u1UD$KeS_~j&F9U z8=2@$rpT}lHqU*InHtZHuPT7fK-C~+9&N=fvh(uuKwU9MtVvQXXU3z|-q;pR%3u3)pCD)E}ix3IHPWT&wn zzk_N{^%^umyDMtpkz+OYS<)(WFT=O@M_V&?ZwdCAN8lylJ7TY)%M@THtjG{mrr1tY zED3t$T@6X7=FA-WB{JRlc;2Sw1`%x5x9Yv}i!*yufiqdXl7y{<1p&8g1HEvsTdfHm z8)(n&Yp@CS(^9WpycWulUj9LKfFasd08FZq1fD8)ZOaa=EhF~4ykpneleBtxT}Ui~ zEAINR%tc)uatXCR@I=*Dmswor0%AN92YlCPzJ(MQJ7`ioMDZ(XMsb`fAut+mGY5;4 z_GDqcLW(+d$$U;-D3h_Z43R#d80q0h4sc?^)B-sZv3+y?gHo?&)TVI>@*HA_)_&}8 zz%2Z?9wKN&Yb9cIBHg<1sc~IF1zPec`C`qW$erTzC56*Ib6)A@LWX z0>(dCG2)cd7v!Gp8J8q6n;P*FPp72UbFwb#7}BhK=v+_r2+tdWQSr+SfwDe>MV+{< zS1Kkqh@Y(EGm+%(z@I$UV@S}2FUdT2z+hJ3N3j}%GTxDLb?@Rj)NWOuUw-M^i18Gn z&EtWCYl!AZo!b&+2?H%E&r@XLb*uufv9uMQoSIJA`=X)^(-!Rzg@Q*6HL!<( z1ybseJsx?D;+v7rtb1`*gWd^pChYEc;+>-Vr!@0e9oJNDeMpc^_ybf>oEe{mU}9)} z^U1=_H)IgV>CE?q?+12}Te<=_xS7ZF5z|@z(~)99n@^o3dQv%uc-$Q|*XDkleB2MW zp0^BYTF?J2 zZIHVxh2-C_F~@aYeo=iPe^uw;hiRQ;$n(QLiUiHO8*HKJ<(f~~!_OxV891ea1o6#c z=kQDATP~Dg-EX45EEm;D%PFJrVHKPGCyN=U?v6^=&IXdQKt~(P_2rgRImHQwo?KFm zwN*tn=0O8j+5Jn!ck*PN^4uH@T)5^$bIa;uE_kN-mr*)^;p%Fn)reJGwe2^_S7$-Z zxTj3YSA^$o(Q09au)q~Kp~J)~4!Od@|<{#uJzyJcBLceqBFZIrjiuFYCiih0`;H9>42Gz6O8pY+Zf^K^r!P{96Z z{~a6|ea~fi8ezYY2fuiYT{UiVZs4WbR3y6?6O#e*+6g3^ zXURrYgbW4EcyBiv{WEzftoXp4bg}0Ug*%+}Nj9jR4wITp?|XTtVVm}EjU+2p_<x}D}BL8&~YIJ8jyZYw+|;2DMO@s zSL$ot?t>^rEC_kOg{JJI8D50HM-^{sDT#KJXny{s=Ao$Gi!sa#WNd z=D_bE?Vr^+mrdcDm}weVz@5i3DZMk?`e!|1avWb^m9q1oIdjbz-ZNR8ZCBu0x%eFj z)tqhL8UH5bzffKAqiywG-LgvR-C~8nmF*Uz4Jnn4DbCH2w+T}$4Q(?=lv0kVbmoon ziQ-F4hXyGuO**dcpARq>ANg=Tk03@os%^;|zXd=1bOPmDi@rwfw7Gqn%gZF!Z}4XR zW6+t9QPl=CFBqR6GZ?=6qHLJGsxGo_f6H?v;!eU8rl~L0fee?{sufy7uoPBte6|F0x z;@FuF6?_}YJn9D?jVk0qyE+3;Odtw_x0g57>m&^l)us`|6_xi@COqNKnp__-H4Bm# z-5Oxuq$ak>qSGC;AL4x5;)+JytAD?m1l6GjeOVtkFK}0=5Tavkdx1a1-%r8pkt-p! zYfnT$iC_9*NoA1`yC>Lmee>X8^a670-|LvEU{V5@c9a5zF>XwBkYrSz9|iYkTr#Se zwx1j=3pfy>xnjZ?F0|;kH+A#6sHO|-$-v8hzeWIeZcbpv_9?PNYiWfu>*R*|t*^Ou z6bIO>Wwoc}xKqWrt)d-K(fh?ut@&6kJO(vAQwmAqXxa(SJ}hhNH1=QU3@Qq32>gDy ztDxgQZe$a-#lJo7=%gf3#Kf`lZ`{6S;oU9z*bl`O!N{4Z7c}%9S402U#U}1MF21w4 zHqdoXwck@a6Q>oY>e%m>(0(X)C$vxcZpxop9^tag?bP{9gGIn#>^dIivV4Bztt|rH z&(nLg*O2dtBOXt*Jq-4i%xR(;c^A9jnD(LRsnBfoC9Tv~U8B(_XdPBNaI&A`Lz@6b zKZOf99~kO@kt;D6hx_sKXaqYny;E+>md|V~^?8ApP!OTRXI8UEZjR#*w+D=-{0;69 z-_npKIlv}Is48_wy<$#sfmw}R)gY}$9x*vlL7WgWU z4ky1QKj6vc@?QE5%VlZ9Sp{$9_D*a~@m9Hy6^dV@%_Q>64ysYfdS|5HF3Hv3EX+wy zmGpkt@$8645BacaKz&UQ8NRXkBv!xT2n8^zt!JhCXSZ(@C77tNMwmz`c) zaJl;30yfZpX*%v4-I=cp#>~vgq8CL8la-(MdZ!nZD1(P&r^0yCAj5A9 zi@mKtAsctH_DZs=7SD1dD?$>nA$u)5{)Fq1N)Y#OTMyg8heitv%-o&gKD}vmNd;yNrLG@CWP3& zm~UDHEdbLO>Nz@A>K+34+t=~_H?6!qWw&E3^Sr`0q)Ma-#4xi^IfAa0^TnFAv3Zm= zuZw7q`Y(eLv}i zW2m5cj6vfH4HbuDZwyV^JSLfv`kg$?$X5E8<>alI|1fXw-8Er*gGr`Zd6USBMtb}i z&_9<fCl7qffwgj(n)+4 z#XiR*{Y`EFcg8Wy?)`<7RY`F--5wT1ub|!k9Qgb!uQy0gOwl}T72rO1K-Z0YP+_|> zL{wUqF<9!Wrf{?hth$Z>oSWf=;C1cjMhZqfc+c=#hn`n2v99X!CbhkKh`MBy^- zKy2>K978sR69^T4QA)1&V|v&D*(k2#M0-#?La0dLQxL$IKvq8Z10rXRXDqyO898|J z(i2Rw2RQ7d79Fbqr+En4UHd=0Y{NJmFMCb29|C7L59vP6*{|%i2iM-*)y~_VqbdT# zl%nW%`7VmtuDG)i*J5dbD1>WtQ)q8FXocIrgRB9wZ2pL)IXoi}rv7q<4tkB0Q|%&P zO&(qn?Pr_%dJMz8NANVw;(g$AGxq{weiObDzjhpTJ{EDSFoiGFS2E@S`YQUkwsSSd zfrNXadMz>^leCjjxW+q+7Xc*?8;{YL@8wGB)V9IN6WlONvg*Y+oiIN7MELHDYjX3% zovBxf*59l|^9xq}A541yXcrzV!#Z)DR&*=yt=Gc)Cyy7U4^}_vZ)K|>YlOlbbW7mJ zu0%nPlTvBXCuq^wXB<--qrVvfxN}*YPIkcZAd|v*YqT$SnLxrN7R^o@(8o1FqOaH* zy~*^9?_D?(xK@lb=ZGR5n<<6qsp#Oc=Ys#vjhm+Kr~;0JKCAPjALl8m%m&Q%)-T8W zB&`Nd9AMBp|HGhPJOhf`Z~z2$P@mqpWuDezFm8`ammZ}*$6z>z1HoC}``gnH=d{ud zvc>Vhv;GI2zjsl5b$MmO__lzEd770V2@x&iKvxkS;+9-(OhcbSR+@;6)a+gajCA~m zUjNEAQk$^}#=UdBterP$%74dzqT*pgUw^$)9;Am+fbtz5_zlrbNw}OYuIIVuaTYzV z#t~BA!b88qd1(EId-XQt{{L|8jn=PU$i~!nk zAB`4V9~GdRJ@2IaM-M5MRf2z6I8QUH3rG?Sv%od{k1b6uroRk1v?eh=#MtJV;l9B; z4Jeo9B|G_uVeR}UDQGBS`N_c@3 zUTr&vm=;g+uSZM6n{O>*1w7?w0+H}W!qUF8Ej5BOu0%=81!4)H6r*X-ZGpu_0jL^X z;=V+(|C_>vh>_v_*-NN$HEuX<^(=7HaIa@a^uxtH8UNbg#^HrPxW7D%vT;#VEHE2w ztUqpwLkg&;Y;n^Q&2(AX%6kSkJhz%#DrP1dIK2;;KCDfBnd)N_LsjtpueK{3T%RBA+{p-M^CFt`8@xmi-}J}PA+hGy)6>-%SUHUUF}k> z14R6jp&?fzD~UcyL^YCzv3~ITl<&`Pn|C7Z?$*>AgLBc{UI8}yeyV`E$?SI8vme?z z@%aD+B|cRCqSoo*w07K7zojxq_dJ0F5|@~(+2@7eI)EGe5WgMH#lt-vs>b$8#E9N_ zWQs9uPP8vTxZ&2=C?h{&}B!?m3@x&U4Pa_j8`tW+wWl+4%j2AXEiJQF*AikF^MC!(M>G6Nkb3d{Reru(`a2VvLRzu8 zmG*SjYd5e^Tl&WpyM}n<_;Je0J>rFo7z$-^t5>(*+(=u5Yfb)uNk=X}oFOJWe0&yZ zmvKfbbattxmx|1(=?!hlo>j*lgzlA+h7Pi}Zh2OahI~*icX?>qzR>^1N}7;cDFgZx zM<{KQM8^!=qpV=+jTZDam>^urMl`W`4d|jcN!mRNxeulAK}*ET{3{-ed&+h{cy8a} zz`)XPNpXWZiN#?8(8$xN0|iSdOdyuwvIQgmRpuhRIi(Zh;9ub?t|30p3$?A4iiv=5 z%kj6Q3%i%%MD0%S-_BqFsaDGWj&68Rt}WX)&@Gk)mfj-9K4YXAPmXFMP%tU}j4yuQ7g zkL({78<(7SfNmRwB+Zb8l6E(ghFB^U#-%PrTmo@nmN5rEnyI7}Wl4XCaOXrKM()Yi zmanvZX9kcryclOYwsFt?S0e>zyZkV7q6`<1M_NJGZK*^tP4$s0Sx1SF4Nw7%B>eB@BR_jDHh0?X9faQ=dF}ipnA4Q zg_C+_%=j3E@q&6S;W8!=IzzL3wMO%A7ybbvytz_y�U;XJn;&sE46N zuwb7D%4x*Mcl?n59eYPjQdSXvji)fb-27SSq%-O~2*lpI-Z704^d2tOU%o`v6m`$T zO)*Mw0H}Jcp&R0;^6`c0EJm0LR4Z}wo-zX)DA$`RcF0@ayE-H%NtKK6s>DqirJsYq z09^EUwe!&bM)S%?MbIenX-C2H$IGGd2;LuebSpOU*}xe;f^u+;49P4Y(C>hjZ<;^>LyBC{IqHtCK90S)$ojcMv_R>H2BjR5&Y@d*Ho;q6nWYQSjf{^%$*;~cOz zM=qs8ZIly8DG^H{&?3U0eFhNd3Kkmr{R7R-QnYZ3u;l%}suC4Ah4L}_RU+SOHDOX2U zKvQ+<3hKmwhZ!zs&59jW+Am#nT0xAg) z>tj+b1XG!WMbBs3YKl8UlOv=ZK{#8uS+jRNvIa~W*bPdVXiPceQ4;AC01 zPmN5+=n3zsK?~~q0g01wn9c3(Y(!N=DG}%Ew*>${xA(47=d`CX+P=>Vh$e`=oOpCC zf!;E8dUXasc=|b%5)qfJrx3BkkD<@71Nh8>ppL%Pe0-;!nZ!|v^+1qDzTo?E^*$8z zL0n4u5rqeUCPbyzrfJ-LebET7SGe{84G`$FoQ&w{yEd|M%y^^ziJzB`+_%=-^`P8? z0yUQ!aO1&I;_vb_bp4if7p}70+0Hvs#|#zz1K)js4q`5JV~N#()-3YQ!6IcUsm@J3HpC0Q$~t(5Pfd z^s@9kA5>xchjp`~DUq)d$M02;79+%?Y4>xCbn(VKgBmAXJLo;K7$RY{fj} zo`NIe78`3eY#%O?y$qI}j<7*IdddPv@m9k8((Wg=I6)wEI%n;hnpm5NCDEgpK^9cI zt!sb|aV)%WK&WCuaFciISh@h?Xt@SBby5i;G_+=f*_zmjgD7ChhVvus!{~R!usj0%HUp^JrOb z>3rYefi%?fvGQg8R;!%g;Lei26c*yfmlML4+^s$WVG&U)qr{mK9V9EY_rA zx5cPjYpQ`K9NUh%60A0~3l{kGZ5m?k>R7W*n)QsTrt0JZ2yr2m0VM3rjgMD*SUYUq z1YUZUki$r8jbE}9Ah&*R1cg_l@(-m}MFsmjg|`yG)I`z;ChAW+Mo_qwYVKa>fL4cd zDrCr>^5tl$x@?j_-FxNhMT6`Y7@tA#dg%*St^L-hWd?!V_m9T$9p+1Z%C7$R-Ild}9-%?Fyp{#s17vo+s4Jt`Gum_j#-zWh7<_nexa|dZesetv zqiSFo(d7$#9f8sxI!Y|VrMHEK3Q|`b+A?n>Pnbk_S$lRef~1n*wv&DMz}4?F;hhf~ z8c@k0W+Fg}>#9x-wJ}m3E&%-^7tU%U%&&8_W2?I!ayV}hCy#sCF2e6AIox<8=|0ct zOJ!>$wi3tfeZ_#u?TxT%!M+zY70NX)+jLQ!l(TyR@9%^N=A!BY%G3>oG)$T{%LZw+ zfw2yw)wg@SpONr3&q@SKKe=^XazKds(z<6e#rVT8n*K;aQC32 zs98!8vruE31M6>cGQDyy@R2oYX8Pka87TnXyWZe4R`itCD5YQNu)(CXJHg+y=k!JS`EtwH^MhfcvlFL~O;x)I zUUTZ+iL_f4O{HlSnu#6w%W>Ha`x86=#eer>`&(grF(oT-oPE}YUu4meZUd?BnFz~$ zC*(&r;m~^bMHSomNcZUM{Hvl8bFDnH&gY)RSoGZFY&>|pbjzCGwkUDt zRWmP>e%ryDIQGAYWL=*G#Y3#j2nzL=xuViGA*Ch$k-TR#P<|X9I_(Qfo@_Y-_mQ^Y zYWo%XmJW#c2kK89%YU}g78VkVtR}RI7w}cDp5<-QvTU&i{V75>^%UYWl0z<44U$%~ zZx$AFQa$K-aDmb|Mp$$fHb1#PI`YgOL7+`fgaY>de5s=E6i2LP%$T72fh^3R&b8S! z{fv_Uj6@?R3`R%3vZ|sJRE;CVB5DZ1e1eDAo4`%#**4FKlRRvF%C819p!)`K zamm)MDLN$5^6$p0G`1L2rSlg}FW*}S{V8~r0;y@^1r?(ym6wEg(K60|{d+=}_Y}C# zP3gUlnK-t~7WTDI$aLSxLa8Jn+Vntsnq(ugNXK%hz}R|aXdQY%z5-zkc{0yYvS|4B zgBZ$4P;Ytt!m9*aNPVA5i`%-tmDj-6#_7>lI%=s^XPX?hN{4-^FQ;6nuabK0(rVGx zn{-}Z$8)xUZ(G2-ymk0Fcix+aT6J$*SY!-zz?KV8?xtQ$s`=VfTu*x@H^Oc{AC z9&LnD{t3TU#mF=b204qKgEsx-Da?$+2P-6y*8G=za}@qmzMEV;zrXL(H^A|F`TLP8 z)Ui7C=C@<4KZ8xL5Fa{=NF-e6Zuj`s?jz|MJZ@9B9Kp-%>6kGvnMJ?KjnvtGuy>RRQ4 z3N&9Lr=*N)LlsPMS<^StnjDHI<(3nNCNbmm%`79hsfk>$XCR!)rtokzyFsRct1Ww> zGpUBiQlS1dNQ@mm+uI-LUo-EVs9a8nqwhUN-tq&&lpqFWSeXV!U#-V z**1=jt zEU7A>XNo9(P|)=F{=ds?{Nk0iEYJjRq4ayRlQT8RU&TdD++zLTO}k3xyp~M}d>m)| zglN)uZsxJIRB568XhC0-cutX)Y^KENbISQ{i#Dh~L0Uu0ofBcT`4Ptw_f;JJG2cydPq#X%b!x?`E7Rhjj^DjauT@Uss^Iy z04=d+R@qaMXwJCUHRZ!UBGul)vFi40B5K^CYQ5#7|A?D@VrQC!8mH~PT0F|uv?Kfb z8~((~#SZO9uCyAB^c~yvwLQzYgavC2T3Njj&sg@}+*?zmek1VePw|&<589&|o(NeH zsmLMXaXYxOnubA7jQ^<8uf4)AZvt)MA~MWiT!-cbzAXLU8@LIjUWvUY%WqP8XZfF$ zDSZUmW%W+Lr@m#h*z<+{=7(jwUc+>^8Q=Ek?vtL_xXNh_)tNn=3YHHpi2Ie3Tc_df z>hRJJToYtjvq|~W{u`6lsWR$-fmXp&Zq-a5B{zRb%+X|vs%Jb#1Oxq4Gyc1m?CDpk4C*pQrY`)P-yD+(~?%Ba6mM-j<8d7eL3$aq=f zaBKRO1CbHcVU@trq^MoHD(j5Y=DAJvF@^aQ-u+$*lUB`{obt?U%Z$?U9#8i8^JfQL z!&$b_mjdi{Y30XHexm&XXaDGIynjBUx#K0~Aa7RVgjC!kit$`7%hWXG(Bn5BP0HLD zk{k9NmN`j3sh&e86kYANTYLusNPLsJn=y?5x0Dj!J=8sb|Qd zr<-;zr9XaDO10$^IT`HlzD1k>-1F4TN(x-^^Aw0%)woH(ZRy3jCG0K@Sz;ho-FmmY z<7Deb)0KZcHiZw_wn=*qO=vP`#;)(gtHOBioTzY*BZxd0^*#gp_wt1B9o;m$4zZDK z*z9iwzuJk33Ryxx^LD;`%UkaO)+8An)yZiTUJJ80;}ek@YA^GX4u+eY&aMKde})LR3d% zWM!o%VM1hB{#_D0LZ+eO0^|#6W=+Tb#JXJ3U28`^1-?f_l1Jc(CjaZbmyKqT0plZA zij*y|&f-Br=GXW7$o~0et9L`>DECXiAzAa@^v-Z;2@%w2#^!J-ogHMUxDD88! zKqpZ+kNWP$fPUw1`qNK=|5%EJc%;&5l#37V+6a{Wu5z(ZEWYVG`1A%)YoA8?gYM}W zzgO#{Nu+SX{{1i^3Zj(HZJ$I|>x27T=G)|gtMLaJWjwPDEasOpoLofVgmF{p!jF3p zS(qSyrOZ&IE!Isbr-e(!XPmMwX%q|Sv?ka4wkW7&x0b*ce)y>^eDm>c(b#!FH5+wG z38WR=;O?5Sd1CVq0uKzf{XWp#`qqJoG$dQnQvM&+GZn6kcW)!f&A3LvYX|=8{U`(@ zf~&UmN2jpj&0NIYy6y)jA7vYwcci6eIM`p>Gm+dn>{JJj8mRZ6PIBuNYNR{m4KXg> zN|<~n!uj0PRmoTT*@4_J;p%HGYkYJ-reO-9OQM zB|+mHN_C_3Vqs?R8g&vMlB1P#_bp1>;U(Q5wZ=6N(y-7Ddp(mywUB_Z{wNeQGX1Om zB_p}_2C9Q~z?@gaD5yOsmjU>O)}W`FEUV;CTiPc3&$PQ%kfHy@Z_+N&^jt79OHib9 zCjwd-?5ga~y?x(Xth1)}IYR1|@0@vYl(rgHK*5C8P?Q_sAdVI}p?q6CemoSfan80F zsIa6!nji6uTw7Bw=I*K&6zVU&nh zsMbd*UrV#%Ei3m9>#~X=T#s=gnp6mAew@eH@b4{2J%Y0b-a%Y=0k2eQXu!zjb$|CD zY&%?D9P#cha8cvk+h0uh=QjO}9x$SDUUQ1H@C?XSQHbs3ofwWlpuB2O%RiE?&1Mbh zY!lV7eC_GI7^zl52p%%{KFz5o9`)kpFI$P+s6u*00c7Zx_Y;OgjdR|Dm_Tr!&Kjzd)M4aJQ?Nxo2(hKoa87>4S$ayeeV z=J+tzX~J1iX(!Cn3Kvm5O^$XsyPW;}+To@jO@z!yi_r_?9gY>Kr6y|cNH1;G*_=B! zs^b^Rek{yM`O1rj1p+FKH}(6^Xtb2v9BLow#5zcPVk)-!!1qG?(T~w*-5;od_pY*`NzuOBa_P|M^2J z4CJBKKvaY0o>ib9Z-7wTOWs)7bI4!#Hj?kvj%7aJ&qGw&byL_dL8C!ApgVmuRl{LU zwvp?NA!{rnY*DM5p)Kl=bG#yFTb8NL_Y1Ufu=QilweowiKq#D_x<9{T{!QDQFYD&= z8V}DIULN2xUwaAu)3{s!v6HsHD`7)w>I)8!tMmq5tbzk(Z}FA1C4#w?W7)(tcJG4q zytCu@>HtC(0iaHo3u;;Ps~!^@8~=UFa?l5UMmHYkNzJpbayDj9yg5lLpwC%nN@ZF# zVP4+g_v=7rT(gb&8l(wJU(%3wc&5Ly8cV3w;1h{uq)N;riYU?}Tsb0V+EoXOH++pY zPauKal8UnxD%223r;~HC4z=X!)wZ{5f}?|@!ANqOP9Vo=Uw}1If^N$w? zPexH9+g@&siPHRX6%cKCC_%I17xx3|=vT&-LJpq<`%)EI?$&Z7{R=iL>_W%-iJx>u z`A4U&j^1d?Q$^oMICUEz%m)j)J`_C4=BRNtuzFP}Ct96Y( zQUwpKt#EU$9Nd`6=gJ=3q1{*>K^SFTk4j$tJ2h_+IQG2JgzBR`E5hT_J7=p9dtGDk`mJk^YJfsiVv%Mc%NC{F}2oB*c+WQ z<`(b(fQ$3r_!wBZz!DBC;hwd?;)(hJ2XwryU+=sY9{<@>^k5BQpKxx}`2B4v0S(euhVm908m(7a*J7zM*<{nj_GT5wM_S`iSr=6OW^vWf2sM|exH-PUBH{Lg( zguR}~omy_cbxh999GLC13NTTH-iHCVy9Q4?(@S>^X#f`^tbnb--WU2*C3)>IX&0bg z$X#-JX=`7PuT_m z%jo(BRkep@5CrEjMOd$Q~?;hfx^>WYL7`{g~^oa zlp|ClEmo;1=iJ56vMB-QkAhYi%PS`>PB9QxIJBp59|ip(Tc_Tn4^9=Ju`BQ)02lk& zBykZyitKs?gPkgP>OLtV8P&bW8o^6-cZPqGxx#fUpeMf&Xc>8FhdjoeeSo3 zfVB6Ix>>;-PKR;QTmUma(xj*c8T1lPvLzkfD%S=GbM4+bnSo?QMyfLO@bg7rRAM++ zBFCX59tUv=m7W(ombrpOtr$%O^s_trpY(eg(*_tjks`~8bhm*UDWFLsa|_h~A{M8# z*~sdcOc%friTU>Agp&}Z#^MH^-Qj1+T`3r>OMn+3XjSR8(F^JJeu6R-nJu9-?q#< zUnF;}irc6DTmw?K?w{4EoJIzt+fOgb{h>H(4z{zA4n8Bc2eU^*=e5oa8Hb68t}*%f z817HzD!C9~RsUt-*lhnE08q|$iltV6h!g$` zoCRRsIywuWY{N_T>Kl0~t})mQ1|-y{!R-xYA6{Dl79A*)1~z@afbmJVzs*lPdDki% zRl<~Jr~4c?coB$zVH>{fmdL47kzC}iB#frSd={t!ApU8SYH~>Rv(0Hy=g3`md^UQp z<4u6eg@4LBFLOrv<3(7&PbE3p04n;FE=SV@uZ4ItH@VAK%w9(dNc`2&6>zJscF71A zQ`$};u9vF=@XnB6xLe(M>C61NC(^wEDcc-SB&!*~^_m&milMYA_IX&%fwOpW#m0oU z0Fc3p$n^Ck^w+oST;$)^{>VW2j|ZtzXKY=UPC+?eI{doA4Bn{3-4i2mAJ6#yT0vDU zDN9!_5Tqiy)aB-HLQVfS>6GhaZ>wK2JE6<&>{5q2AA5Zh*uXZw7*a1{d5U*qxykoy zM0VaBd)<6P^krN#zxg+a1o3jKGxR~F6))g4aUA{v7?)l!F=v7~*W(n#%T5;D1e&C) zryciOhu<;sA;L)w57bR4|Bkh+D@?+}n|oz_qahM1e!lrQ{^JoSdE##Qs{Fw3eH)rM zy1i`XFc`{X6e1nI4y0z2G`+ifb-6mpRE-5s?#8qlrt|Ch9R+QsPV-;}QSu#e%6SG( zWNjw~v!4fs)1xc9o9FF>CJ{6xbecD&JUIZ5D?A>6ixCI5rXSQdl4NV13djDtO}PX; z{I3t#7VJ|R!$CqTyXyq2D2R));qC|ANlZ3-t~$^Jlov}9f>7I|@b^V{W;|J)=+K_B zyy*<|2k*yKiv?|UnhlB_+~nVc`<1UF^bP7&j!|##{T8(svL`Pmq?@-wxPxy~U`IQs z@Olh02r_Ri^zX*7q?Y5*j648jOvJq!82XoS6c_->B?TVO_T%v?DiKSlRm0sNsHhk! zbq5a2X|(m-;X^5G2jZ5A{QbIQ#qVW^|kgTFIAyGllXA z&v#s zzb@^VXa&ZSoS+jE2a;lv3de@rrJyh?DFIrh`UJgHaCU(3(u<}RQr8oxq+$9G zhk|Ewj36|ATTDaRJs#QZ@@42c7rA*2kK7+fA?tmQPYu8!lm9;14hIo$F@Q7)4X^*2 z)(@Y@2*R0VjG6IIs6tⅅMV_+^dl>{RhEl+@qBoWGbl{l#824+{EC-S8{r5b#0b& zGU|?w?^wxXjML<^$^wVB5U4Pq9b>`vL`_KHXymx5%&-c(= zR-aXW9BOELFO;@2QdRW~&FtlKkNq#POm*G9b&EM`UFY+y+*^|43%h>if=Wtr;}z{S zH8s^Is5K6j+mbMS-8s-`ke=NRwI(0i6Yc3~^0k$)cbQU8&>EUIJCfNny9J~*IBr!X zjx$kFMaJUOn)Qt8!KP_48mqsR=QEnE(f2hIkjd)v{d9VMJJOTnGqE{ln(dl-{Lbc@ z>B!^_0T`D2=?oQ>>fFmAmv`5JU3&GYQnqMC!lI?7uYl4`XQga?sV^*?HW(`e3XHs^ zyZd^Lu_KUz1yv|k46iQnMOg^%t;rMu>7c!?<`(F-5{02gss!vJ4$o4`(Av~vja;7- z&k)A4<(ObkVy2e7y}&dW&1d0ny$z zUUPg&3bYhu39kqXC>^|@6`a@oQ$npa;)&)#D>z+X<4Shn>F5knPc-rb@aOx!9K2x$ zv;}A$)Pn7iLDm#JsCctS9)%=;Z3FF{4R5Qt1zFwVlE8~&98Y9S2K z=k*>r(?ZLj&fdy5@d;`0cj#zpik`L*D{!QPUP_afQ}UpNdcZUN%E89B7!teApQ>Zu zO{UeQ`lW)S3gEA#-v%wJJ$ntd)jFsLOI&_-+P%pqgZh@02;||tS8uGZ@yVdP)5No+ z#@3W)S^5&sQ3^1Nh}j(HtSqH^7t9~iLctV*{NvBGE$8&(n%B{H}Y-QB+iKzg&~3 zxA1sSJFjn`6oVT`I~K6`YYy_SU#iD4#-YwBwroQ) zfg?XXnLbfM0j0rf8*5ADq{m0=kCl4Db+It!_)} zsqDXwcL@M|pF{h#Mbwd>H zLkG2b6mpax zRCdQDK6}x{Y^XAekI15V0qb>iDvd;2Cbid^^+U|~h;Et{lP~A~N(T#8mk=KXo~Wc; z3N9q1`d#q-^w$pXu7FvTP zdLerqI;S0~N?9r&<`n}rodSMv-BJ>$8mXuG3kHAv5Pg%x0rX}#yl3>*U*xEKnuCJA zvWG|ENSg9#l7Er-yr>}|Yc`zBJoD43`ZT0d^`O;l!fRuB_=vt~{)-%^@sjG&Gjq-2 z`W-4$E1YvP_#`;uZx?q{SkwHsjzzW0#Gnhj|8;RAk?8YcO4vD$OVGI!{HZ|fw0h(R zP#QdY=gET$wP{&6&;MYh65zOn&tKwh_5n~_EKjg1v+3`M?tBf_RM4<8S(VM2VIgy! z{p;x^I$44!;Al(MI7>SO%zjJS_wVwja+v;QDr{pg*pfZY62kU3#!wmX4Q1MjgD#iB zdy;-1h%$Y5`|GE|6GG0enp|}WLNEcM@43G#AW~n{HoP-KJrm6`!|z-Q{_}6V`t!eC zB;36$t%by({r`rm+*A2Fr7IwBeupq1B^`xg2~fz0G{7H z^hY>IMFFrne@9wovfBe|=2<`Sr6f8PQ2BglW8ybF&gGhn76U-X;C)!263=x?PbFqH zSyGxt^9}L_TGPo?oCP3Rn@SL>Q*~g{6Eh?pV{V_GcYC9}e2!$i$QV-PF#X*HXSWut zB78bZ1JRgG34Cej8t0uJ?rtGE)%#VGD_r#q0Vm?bc8=>xbE`$97pvX1oJuyj3WOTW zBRzw0dvL)jYNrjr!oUWxViScp1PkhxGeA!l1S&eY2tjSjde8S-K-s3?AM{Lz z$a*G=Su>O5@02Q3)n(7Jf2i^KdupD1_cOsphj@Nn^-W6L7VpbSEOjUUUYR7ON@iS> zsh(##jQ|D=WQ}0afVQaWE2v^)1BU@H!eigEHsIc9@Pc(U%sfJ$OHhCE08n2|>ORHQ zYu{s-nGjKAw&wbSnqTG4r&uOV4xrc8)sh zu4t$@p)2{p^0lLNr{*sh^ug|$bi{G!(f(KjrkhEnR)NrA%qin`%_~7>_T(vOEKFm= z=~zdMux~NTbeW3kv(HZh=L1@2i}dEjl+F{PTE&h_w%^=+=jSadPsM)io{?_O279Nm zIeDseGLvoU8?IE}Y4ZooM-kd32uv;z7|gVN6XCkrtw_Y#BNmes{JyHQmsz>dL&f`Q zPyq$!QCl77!&YYzO5v0b$OC6^a}2u+kt;PqQog-x1v&aeAEbr7*KTVGcTelQYa?%0 z(5&wIU0{7vSK*|0qwF5j6Q?j$jawa|(-kZpoy;M3oJ6T~M}n}=fq={6Ap~T=>Y`-v z{yXu`Jv*RxFKg9iUlh(_YIEOLdgnq*&z6q3N=figcyQ*Wu% z4YiRFtzNEp93=i^)~U(BRt zgu=8=^CCrPGgW?a7)q=^%W3FnGf>Z1nTQ??mz{#r{a8_^Ob#E&YtwNl+9H3?M|;#} zLrQkVvs67$r$EjSYZC$q-_BonnePr4Y6Ws~@1@I+k>#A~&SH%2`nK#)XC|JUc?Xk^ z8t}d%Cq|nTg>_me>XbF(u%?GDO%FW=jOE%tXuHmK$Fm6C!m6ORMy=*1B2nRy#%Z_E zJ4LU-f6}lxKRc;!%`;_6)H;0YwPw%Oz0giLtsbk1T!9aC(iGOLu&BR5uUI4__vT|v9RV(AAVbVY%N z^l~Iw1u~x*mGJOWVPf2^&-|Bwfe?TbM2%)OW{%Kxmm7_-t(Rbd00=@?7TcA1`46Pv zjteZ1&_VHn-bgWYHqf_$5qEr+TeI^JbtGtIcpbRE74 zu40d)v(5bNB3u!eZ+afSZ?&+R1$Rlm&E&mzTmQ=%HGkJ~XER-ERo2IKo>XU=YSs@N zEH_qpJAQZ3AN*iP<>alva-MfbJ8%6Q$dhr;o8K-X{W>BJbrs4M;$eY$}k!Ymk;5oPBbcvgo@_#9Cby zw63BkxSEBA&U?wl#RVp+uZd&-f_doG7^Ze39d2@o_Ea!;9&a;#NSrG+-xiXPuu1)*2j#3?7xv~vYMn$zP;6uV7j7;&l}Pj*wD z8WL~U>F0?EMMbOy?YEiq{=kq5udTVdV}{C&vYvN^e5gbQwS5HJ4yp057CF)Ng2uU&8-BNt?j!Zu~1Fm@v}7eUId?(NdR05DITu z-0>iWj3ujHr?`CVREBvIxq0)G<>LYi?OL8%0n$fmrinz^`ClE)l%QJoZ9)MUaUg)T zOp2OA_YR?f<9gvnRt8hmX@N8{B9$l28 zD9lN1c|PQ41R7|p4chz}Xd4+K_f8DPP8$x>xPVmVwL6Hi$Uky@I?De8Tw2OEiNrFk zVIbrED#w4)Ua!-zTc?n==X`mLNaIYt5Y}g&JIRG>+%HH@EPDu26jjAh!^a!tr#<_- z7sA4v+a*Jy{Jz|Xu=*)_XqBV1mg}eC;{dc>3u7c)$nFmbQ;l8!wS>`~i-SyGd9$-R z(8qwzuPHMvL~{^e?tlcT&`K$v*oo!t+GP=WFm^sAt2P56tA$iTmwZCYESBB-0KVMq z;`8xJ_tTmst{5%I8+1Qks)5f4U^UGC6~Z=H9$83Zkuv8qBnJBO@B%AvY(9hlxtS!V z)-vZ0Kt(i(_v*G2ib>=Yk6b0sA}LpwKX0Bdwyl5nELCA3#FpIbqrnP4f7OODF61o= zm#b=at(kbvS?3nmca)rMW-&;2dD0h)agtx80QWerWA>nUcq8pk!){3cQfeg#U$u|c zuGf-~$i%qoM9B%bpQIZZe`p@L=1hiw=K@mZp*0iJ%i&&WO6cxnO(sQQB~pAd!gzH< zH$pkrg*!8kv&^u&3>lC~TrkgTG%mS-^3E@m{@IRQnIeJ_#kr6=m1FBQ!lCXb!by*y za%r3Iip$>bA8)esRE~B!`1}{`NnmO}Uv0dCfncpV1_5t-H{N*fG5vLRK#LoK%k?7N z559v~mg?xUb#87ELiR>Az%TtabXLjNvUy!0@CeAZ^OuE7djc+$u%hOuuG> zKIvU~OY(TM~fH8Am-Xv1HY_eqF@4QDK=s z)Tqv84?=S9K^43XyGyZFVn@v{Q+Eor%TCA;sen|eRv?Fp6^F8gi))q@M;Ycx)rfK) z_~8h;LB{!z)_K^=`IsM%<^(r|iz;oa9WwxaH~Ni6q4n!Ci-HX~ly#=<-cHP?+4!ep z4_tV}81TPml{Kb)5_V(`H;U2b$L0v~t7oIHqWst96(g5hc;0_Ug<9yQZ$9CEgPPrZI$<_43W0uq~?6_kcYle zB@avTWwVit0#gz)F1)6C;^Qm;JE1cs+9B=9lKCpf89UvsWdg2cU%SCsx9cZi=30S} z)B)WSLyTZYU2j?+W425%q-o(G0xhV*%N2%OmWcSZfF5(y9Fi~ldJ|oWvywMnGYnRW z8}_*}A1t}1r|||b{;ADAb0^(aqp%Qtfr+ViH}jhElSdMJALNyf`~}P*zeog?gRjYc zpZQ|00+V2UHoha$N6HII$;@lnWP3#<^=z1*^=>(!G17XVqj2)Yj3cXV05?B_a~k~R z%&dIb0Jv06YNx?kni(Y_2m zK7ZU+(xaKQJKuQ4a%kWj_Jx`1=wa^u?5|RE^(49s3r^7D;z;sRuEt`csH z6BahxR=7{6RsLHf^?B<)g@t`RX<1p<#lB7mN-4ocgAl=Q_&a264$jyO-FQ`QVJJvm zlJRYMh3XsQ7h0+#%nS7bUaV*Tnc=Aq8HYG-siM!zEZ z?tg6A$hgO3*7}{X_D-Q|)NYMU9q>$hdU4G$Vx!a9xSX}UTrB5Bm*#ROD7pDzvK;X zKR+|>nyE`OQi~Go@XGw%Ri9|jrTmyf$>?Bw4jmwo79LhJffG%<0pAbFWVMhL!vf47 zZOXiG&)y(4@S(ps;@Qo$x7nkYXIpOUT$U!xp|rj3=T1=eVC44;Dvra~5_uLPIy}d^ zRwl^HMhkGsB!BN53Bo|q-uj*jSGjJ3ZQoOQy=X!g_t|(F3tvDYeM_x(Xk?LLlsL-& zT|Cm-Yff1O-fQDOi_kxKUR$6w;Li}|9fDtS{$9k^-@aD>X0BzX!~W{B<#2}iP04*_7Lcy3!%${HeCCdR{acLPs)wV)$w3#%*EhzN=BM%ZzE4y=(;!D@%3aDz$I@I zvf1W@8WU7dm}q}(9vOv`5Xye9hwZNvE>{90-j*`G`{P;IFDJf+=CG{}5cO7&!C19G z#*DueSb|wG@F95pwD_$2(U)cU@WyK)znfxO?<(&A8lbP$o|5kh*2AU?o6`O4X=@@{ zJ1UK7ZTL+SC`(WA2vNQSgHMgciE(>g2di-u>Ry1hdON<}tfO-?>iR798~65OP5-QNi1W~jSa zBCf2^XNv&>FBOuX(>K-u<6Mte20SM_%M-OGs&Fa%OkrIkn)kkmgNQ#vU748LVX#&0Z&*)-sR6{yX-X6X8DSUClx&0ohU)I`N zstu))Rzav(vTJ9TFlS*l6Kp|bd6g9(kaJZhBEDa6oZoWey^+7JWx%LaKuyzb1T?0RfC8zbXC@>t)JVXe!}o#n56^^44`Bp%Nk!s?OY zpqLfefbO>gK`N*~?>~aL@7>#^5Ik@x#bWiX(icnFQZf-lR(CK~?13UmHpbmz*!tgu z{W1!T?Y>{Tr*2`@zJXIjR_|dg?~)ezdbmTA#n6S5R#i11uIaHJhjtnTSqP1WO22h> zmrb>aQmL#SWSxq1Sdp@A`mPh2EI$(iD?|f4qXc^x2Z{%+)!Znj+W9d-Y=`L||0qM^ zRod`vU*Up=#s^F&Xy@L&X047U_KcP*;)_op&kS)kY^~g!^!qaKsr&19V9@laD(@HI=`@P9(gSV>#htA)8~tYFdCS{H$C0O3eA?4B*p({b(%|DnSytL zh3xDw4y}AfCLjs3&NiV2skMZ_r+g8&eX?^uTRrhyAefHMHpQ(+3|(x^f8BsRJBu`2 zy*MV)@Yq*-eyko~-XT<5Q6YGuB85f`WYR>_v-j1f81tdRV1d{{lUEh@c|KeBC7w6* zZn+`e--jzmUy9aQOR`?)n{vNgs>!)9pnJ{JnPThbXtxzE^(Yp>U zZp#c+gD#pQ&Z!&5nR4x{GO|66or>cwGcy6#!>s&$t(?;Lt>T(cow8iNIl8WFrLAp| z@WC@P@e2<0y4}W=3HN0H#@}xd4MB7F@fmT{Cut*B1BGF*32sJft9#X!4V5h7{ZDKz z7dzKyv7b#6X4$HF;JG5Z#e^bs+pHEGTH}Fp>)*qcB=L1Vy-5pBaL93Y{!r_pTSTBP z;#ulU_zZj|21u=vw@#1}zmqs5UHCmX$@tX^L&WZg)w=bgM)x65GVy3u6&WIK^2fD5 znl)pqOZ3;S!@nIyhJY!~`C7lYB8QP!73)hLQE3O$8>8Imd7p*1CY)+r?^l4NZJ}lB z&R)S}wwP%t2YvrV7fdjRPx&6+_?h3u$C8vwzQ1n)s%2g0ENRMQJW`?=vY$j(w1kXx zIcfKf@@}Qpx>qq4r+pveTh9~6RnFFkDD91Kl66}fAF*lETW80UM@`MrA zW_w^xJAaiW(`m3kr(_*Z-pxOrA1AH-3=_|l)OWl6#nGI9^GaaTu}Xk&u`UEwrHnx5 z+`|lCPr5QOQ^Sxvn@TG7pY`s4zi@o8R=;u-+MOdNGT$6*htjF#tQui8XUe1omS+rH z4fKJzrvnmtb@|+*M!a(4KU;{{R+(x#>0H}efCQ*gY!t6yTjr zhspFRWE?oFRVKE8IZ#hwC>oL$;~9m9)w`yE=E~*b(8#j+%{O<34CtW(ipM3>m^vU} ziUU4v#nBSg>Vk$m1$X@-wr|6v zTTQ|@W)?Pv`rUIUms;U6%Da)l>=NrO43wmI6MW8p=KTz@i(cX<#lBdvF6f)f_K+9Z zlb@_T>Wl|7(9^$+z6{KJnb$l^wOz-xDu!B!ENBXJsJF16?L}-&S$uafscmML?6rzK z61}~2|D;}FnsbY{o_~D;^a+va0ba|h9 ze9Mm~sZqRS0przv^x>nJ?M5#*!sS)3MSJ#;ieA~)5ZbXdt$A9ui z)_MmH{A_ghzFZcfjhwq+2sc}MF**DvAvcVe;nBWOeNhuKj~^-8yxqPlmHXL6eWZ#* z(ZH}$7t(BbQv8{*EY}P>TRu^HGFE3*Fkr?il zW7+w@M`J1{p{Oxgot)Zeik|?V#zvL+RD7DkxISq5_8qNd@2fpZi1+*|=xTY#U-Z7c ze>`ikS5P-_q^>c%N%oJV8LQ_5kdiWn4p3VIgIzOqt!)^Z9rW}klka8*<0Q6x;W5aH z8nvOrBx=-&HWWG*-Vz++FyGHEKNqiSdY{J+(q=s#Uc5M*v!5==cnGR;PuTZ!|6iPS zaBXB($XR5U3?T??bR_QVgjTutELZtucv=8ad&-NzG1=O^+pLqT%=OWt07b>y^ z`=(pbSY^kjWB6Ttb>^p0Pz9OwGF9k>`T`=Uy=-i-d<- zBOKb{$64JDDvq0G$lVJg4iUh9<)>c(Nh(f+8!U6oYv{~Yjul*%<$95&{$%xs|27PR z7@Ow{rf<_|J}_~K@A?OA*DjlJj!Hg}&YgqEwYPhlWiQS;yom5Ho_tP-yp>VGDiafh zG4ay%I3~xlqQM(rQfJHZQ~#qv>>8w=+t90S+IRFpzw-kWd5*9Z-swEpE*Zgd$RuwECmN}vF4^O z9*b8sSVZ8I(jr#>gj@GPV^W76gb)0=mSJDi-MIFBMO=$);YoUE63xtM9(Nw!RGbCB zAp)_UYI*N_llWhZ$V@EAYRti&C~3qt_imdBqtNpvJq%3zxoIrPBtZhcZhe| z95d6_rk2!hB6_|ws5zsL~a2K;eG{@T3-PzkPF4y@g5wWjbVS03cMPXJa zkBlEKj*_dre~hwjJW<+D>^6~<*}OM8c^im;GMaoFa2R~;65Z_L!`1By9H&R(CCqLN zS-m@~PNA0_Gr#xk?JF1?Vu!z>=ysXT2UdYe#s2!>m6RKSi2SfMG?y{C3vZ!aQ_h%?eJ4gLty(bub- z?2l0rX=|T&ATN1V6B8Y3NiOM_M3_Z_U#aF4kX~M08r%jqsrh*Ik@%Ihr)74&(j5=4 zRptpQ%w|~+N?ECzP<9+$VR!w4MSDDINgtS9+x1gB{SX)#y*d-gR_y<^Go$2Z7xBpC-jLSBQl`X2@~u$ z2weES*XTCoM#i=tiHYb!^!&c(qO=Yk^ncIT<|mXy&dEd}hMlrkuC?ko_%wCT#y|Im zK5|74$Zo)<=`@UFcm0w-QmR2H%^dNq1WvsYfTfFTS0R`(MoD+yP;43llmA_GLR8>MX z*g&gTV%fWFe4A0xUvkpH|KS2%cMMzA@7il7uV8h#`U*NN_Wh$)Hj z2NM-q2c7Bq%9d-`mFw#hv+v;_PzFTevuEPlgKR(UJ2^lsQyNu4eyO8WQ%vm(pgR^) zA?k|NAE(dliH}QKTyD^bE&Oi4yuM0>U}?WP(a84QD(V`_lC+nrp4A2{e9N3&>>LDv zhM)gzRPcZ=j0fuMy55Z-00=R!;ANNX z!nR&9X%)+>TQyl(wT#lI@2gd4XMeY&DcowtII1;40`~7a?`hy?x2sv6jeJhM3F^D< zlyMlw7Fwa3Up^#(vZiEEgdThhC(kTk-Y_ZWkQUBItXfO!-2?K)nsXx-Uyj`F4llSn27F7cLy7kmg3+u@V+W-C#d(Pf6))CFL(0S zv{~Ap9#x)4DZA-}ZHCSfPg*u_&M62SHJNBYMxkO&IBne-_??kBVBG)qyC9j%c5B1; z`n52iE3G%yeK$K9E{Q4@Sfxw^E5!C?3-fQKfl zm5Yam+5KD{7<#%th3}D(dS2hV>mR;;c2gH~1fQr;;fH(~;gcCl-(>BT>jJrePutASUtWOyUvkc{ z?l%7_x(#-bt#nQ{pD7bf@2WJnxw8qtV#z+uCiZhMb=Y&2abhDF*OpD9idbaCSM|Ss zM(XjsKsyRol8n(_u#h-$@1H}nAh-n1#q!_rsi?UIob-~W!XgYap!_4`S-fwj$e zuj-8+eXzOLQT=#nBLw34sT@oyJ_CqKcn8`LG*Dy1AWRLC_HP3jME=>0I^_c{G zlL*6WS252_Ok~hOArMHFmuo3Tg0jOXb{x(z^m5*?JuywSk5{us^bF>&NbzKQxTi(f zj}j|=WHquqnqrbu8NFzoQ6SOStE}RS(Id@Ydjme#Rq#?D`xf@{%-1?7i8FbMFV4Cv zlA3#u;n6Wr)_%k?;;lsxq=em{X1r`GCVke1cRHadH7KcuVxLzI8wR(ksR3OlN`5?El8E^FJ%-2T_aVc<&wvTVUDXAQx&HdApK>wY3BpEa z7aZcb*P;P<>N*h8Hz>0Dt#*rL2<;uSF9S&fpnD;Sxz}aaV@g&=Ko8j2Xt#7fTvaE@ zzm0YAD@FRn{N@s+$4A^LRclSuMndDs&q)sfovsaV^4H#=I@|2m-u_8Pih^*_?Cc^# z2}kqz|2Z+FftgcvoX1+)0!Kes;i$nQCqIXjRroAVGi_&Y*CZ9VJwe>juvG>G_bb?e zl!gl)81o!{y8!b59+$t)3g9aHR8hpU=ieKpl+w!P7VHtZ`br$w@20dcjX0#v#s{{(?8X(zb%cZ>XZaCdxL2My$GXB4poYAc5uRTkf`NhTUb0mQ; zkDd*AJ4e+=(X+L2C10Y?k-0rd*mjio;ZgL1`YZ5tnca$PsLGN2{9E=BR_#Q>@RKnlF_vmZYD3rn0&|($&XPuYF&YX4c z^ysPb-0k}aXGo(Q)TE9|s@=lHW<$3}rae(dQm$s-Bd70%r$yWNWwUh8E@!Fd9c?vS zxE^xXH&v5wz22;m1DZs<~N+5ABq`a1t@x_f+h?lr_~{Q!<&<|JHS$%@qy76-_> zs@Sk(+%*1KagF{uk;)0;xb&HX?hWS0f!2dJ$e+5BUDxc(m!K0aaCklO8OJhJ{&V{V zC#3Z>=|y?xK=ZB4U|k$`MrpY20zj{7RD`8t<3>B1VI#BQ>H~N^vgK{oqFt#%-M_rq zVB_m9J?XTUBM~u-%?jS|D|#9yV<4=?0C!c`%OFT^u8^P6_%BgiiIoE1vblhIOxA_f zs){1?gO!4G4r+yzXehuLH>_YR2Hm|Qnf)v!9@z$~OCbLKG2Qh{>?4UmKcKB}jt@Ci z82+)@)`h?=>^^fFY5&;0{k9FN_3*-WNq@-sk9YEdkOR3wb13G|1tSDx~N%o;3xn`ni-33=#JYv1z=^b;-K5 z$e#1c1NX=H{sDNIjXLF~kh1yCaLWa9=v=p^mr<5VHm#w{FlTs8GxhKd*fl(p@x!AS zhY=@*k9%018uxN_8A+zvu#qE~gfzAJam)*bo!Yn>pcnY{ePtYBY_f^A-ei@S6B z)}(oUUG6M??qK{&Q679q}i;+0ONL1pBPrnX1(!Wh)oAw7SEx3~K=f&LY+q37_9iEe(~W zegM};h@j69UtQq-ax+;mjjk?P`Ji%~)XeRl79G0nQdJ6For#g6_pW0yA~xLU&B}e1 zab?0qdS3FSe-7wKgydP7H_v;Qv>YWaflRNB~*OLzZ< zhfKZmNZ4;`w3D(R2%@k5Dg6$*_kko*ubRfcP0F!$XKtH+JSuQw8fMk_BF=HF2y(h5 zswnV@FV`=w3!t1}4_3`lt5=_0>F%#Q5F6Qtp2>~ zh^X{ON8HggD73cNt&sTsob#y#lpDg)b@4{U?a^%kO=CI#PnwswvjTu|S}U%*LxxW^ zAH`^8+lWGTtjy>KOr&fOCeuIM@%I{LvcJaEYaQ?PtE_Ie6U+hvTBc`aCdbFqSK;oE z`JpVa@vbO(V?KD3Z{)t^`7b`-Vx;zV=qFoE1Bq?iFV4DE*|S6FH^-Kq>}Sfm*&sZXE44LX5`s;zI0MDv_e zazwEZnNQ|M(IKpRBXtQ{{THT>rZ;EI-U(K6-&;Dai)}ysnBbLZ1*?=T3@MBoH)J$kFc;4g0T| zg=2rythr`#xQoJaf=k$#x2dHvT@x>Hgl0)4BV$~BAT#m1<+b0HR1^a^?7An=*WT)| zI%WHE_1T%{V&rP%0fT{EK2y}FT{ABsj4{I`<<`}^z>p5QwP_jPudy%%mzhg&AFSpP z-C231eC5R(T7gW;=$rn13f~)rJ-mPPcxe6F>&D$>iS=fmr9m&Qaiwx_`*{HS_g+u1 z2%D2t!iGNmoM#80I}EmS#Pe48qc{*@vQijOL>O|PUgF|7KrXoKIWRFlqjv078a?qu zIJ`&4umGM^8CzcJnV&A&)p4i0-8P!wg34KgRu1ek+DCTVtPl2WP3)&996~ zuu^HAI#3~kPk^)=E67=ohUd>zR*1Zw*oOvlBJN6XWk7wgm{G2Q-3pik_}M)-cUw1- z%i?s2`|CO(yissbfTQ(j0Q0}QWjd`T z%&@(FT$!OkRW#pCv}t6!EYZ@)*Vk7rc>@E`RylfR+OB>9}PxX}wv?#47=YLp#Zp&dJo1cZJVNF7>~S8WaHHA21R&DmOU| z0Ypf$_5zpfffNr>dknFHL4d}WJ4E1f#Gzw^d|ks{e{WQBDp$Vt&HGC!O4d`|ZSI}| z+a=RuxUiwQ71P>GE#l;BoNmJ64EZBbL^5EPy-!<#sj4=_!_g(Jr2b-iIi%Nbx(0pE z9(=Pe(OMkS>GFEFcA&E{P@QgwN^$g4@r(HJ*U4bB!v=lAPo$q?Oz6t_O# z+|5QE3MkSP`MA&CIr<$@7BMTp72LW5s|Rc`Vr7E4Lw`T~n|4Hv3e(GaKtV1x*uDYd z$-5r!Ia;PPN80t$^8jb3t=>i{F^7ABn?d=_gxF)w%-o30G0*6g=lDLZ_~iX&aU&gx z%lcE;+L1SMosbic&M!j9Nin5Kp*fU1g6UxVye5H@kHK~IWP6J8_5Z^PFCHtI(uTOt zp!+|+cJH*|@buHH2bI-d-cz~qX`vS*9M1Eh&Mm;Qa*ix5(I2A<46PB{K{zGW- zczsF=VfN0C{wVWenHxcPVDa3_Lxd>faTSfYStF_vh94GtsIFj|*SE-2i z)$$|?H{ni~&+7Vc_KO%?rj57YIo_}G8gZYF+F_?r2?zuhyrxq+`1K)AB_bRZCFKD_ zatP`q;pVB1rZQB;#|<7ktX`v#an4v6R_MYeIDI-dA0}C==H(fWxBhJ0>|y(8kh@lo zQYMgOH+~xQvBpy6kHfF!_h$)F7>}x#6f=s#o5~$~dJCNe+7yF(x+3|Ad=zx@NO?s8K-9?*VYlasevfYj z^>0{xm@cQR$9l{AzBq*%=dpl(uVh0{#(3{7p?OR;)Lo^>F4g5swp&(^S znX>e42j|mgnO!%RUtFlzrQYwZPE1f*d?)#%ZQRsnTvryKVXQaM{yW=*jYxN6VNIwfB2gO?wDNWrwc=cj)r7uVqesMJ9 zE}}Ttphj&`0U_M`{}OiG%q? z>7TQl{0&b(ejL3a5{4@kAaZVlr`F_hD0pp&)_Mq4jj2|J@?j32qUQ^?zttjFts(il zM`y6I4ue-uAN^1JjwwmIOx$7MR|*Xkc(L%bOUp4its@LUndG2Zmp&YcPmadW!xSN7o719v`b((?|K)xB&nFVP-^w z>dS|cwv+R%F0&p14DEZxF4yj5C9qQJ#ujb6{d~4*#!d$f34uUqad`=2CRj0*#Rl`r zpaqi4uYFmOdoLb?NvCABN_%o31R8dA)tJII&Tg}MjSXM!by2Y|pO`iBY~Rd(*k25J z&OY}atM2`pgG4sb_)d@2_BxN;ci30rR|(gi%;f4N;M2*)&FqR&Lr6@7QFnc)$s3iE z^gElU1fj^0<8Mpoa5_q)A*Ai_UVy@7W=SFLf#1fafB%MP)rB_|F{#_=rdL!4o!p$f zzu_gV8)Qz~!c3{EdYv(4$Dq@b!j`gf#?hqR_UUo0CneIX27!u#*%Tq0LHa_Jl>Y?o z1#5ma%H2$?wM;jTuX}t=o2|pqr?dX4%XM&c=}n`B^x-4rnwPU*N*-BcMe79J zW|+wHg?!@<;oNo8_f?h*05?cqT)ROKWzkc`vae|=RvBootEMMS%g(+uX7qVDVbi^* zS(QwA!fHf_Ii;(qcFbAa`REdP4|-kJ%bOygOq9&|_j`ad4W8Nj$-#U4s2oUPvtB*@FF;vy&-7V|CDGG)q6*#d#ZuK!Ueb=x zY9{1({H23_qs}*T`pW|pE)y`De_j7w=fAREx zQ-a|%i-;$Uxo_5K!&I!TP2x8*^kv- zSr(GhH71IA|Hffg2Idlg+##u&qAAVvF0A}#;f(@w&B#n9F=)9a+BnVs`@6rk&;{zD zxzi9L4^%TLP0vJ1c>gzQ>OG&bGiJ(B7QPi=)PD0{W~FL}g-u6yveK4F&!F}sP+6Sv zD+5Z%3|kW@T*BJyUAME%*rDdh{)=6#@=%<_9*jNws{bHH)HzAkg8j6x!NRiE^i%W` zRMP8WG3b`ex{0Qevcz8%vsf0r%7#;_VY3rZ)Ex5^JAm*LE-{<$Cm<$_)a7k&3@UDD6C|ZL>uW3ybsoEv_ty zRJ357T-7W1%I^#ZBU}IS*?EaYDN-Z(oN(dlDHc~54f#&1knBg=(Q`NQqTz$<&f@BIb;KLM%J2SGgoR;hvydgoL z;s4C+dh}j}-}yRLLhf0DBxAKdaN{iFm=S=Df?=#UJ!1S0I4&Ll`c--8RMw?YoLMrkauy_q<4)& z>>MxtrKYcI>tBL3Re&$@MoXz#e{B{YrZ&y*E!f<(gX^^I$XCeKr500|q8^M9y2aQ- zb`9e_l~d7J+9(KLVn}&T3tbk{_ok&Aq)?BI&8`USsD=<}#Tie`_GPvV=r#YSc}xa3>7_0DO3`@T<_T~!@ZulHBWdloxw0Plkqa4X(( zN95-(r$Ua6pV*Y-HD0cQx~=8p)wsB>5XR+BkA#wZr!NUst@XO}N+1WE8LwQ*(wCq6 z=;_jkj^K{7GgLG6J^Nddi$QKzDER-a=;E3kvcD|aB8>0Pe*c0!L?2W<2Dn%4k@=#h zFMiZ7#(B-lX(fG2npJc;pr=Hb>VEii-ZD9xO+Db zpSIr^#t%$c6ThT-{`Q_F-SifmBJ3mf-8_HVdhXW(m)ELC(*E3@E&XziMd))-b_poo zwI^nC|McQlZhKoJHeZx}VH0}kJ~W~OxK0~ZSM5wo-YdJaif5ywzK2!JrFPY`+{e{G z20J)Ti?)07bjJT{uM<~27nuD~9e7HLRdkTMg&8MMn!)VlhfiHkr`ccqov3(ZS?+`{ z!N4uzyE;CClbzFo)2praG*1uL*?z^=DJTB4>Ri=KbY>wPbbdZv+D3^ z%TVB99G|OQFRh$ZHm!s=5fqaS47>LJ*|>SSd)&hX-`zW8Z`61itGv~CDU`o25@hv< z%ToIJz&%g>KV|M75?UoKOJ9XCli@T(+VteAE zro7@c`)gN!Jf7q?ZHs?Q8R!rk2iJefnbVd?zc6~k@03&OGx3X|r~KDvCEx(JkDF4m zdC6=Q+mn^w#TB>x`u6S6np&T(-{(NGrCE*QQEwj`>NUMmJi?Y8@7Z20|9--i2!Cr( znmXdqzog!K*WKSU4xi*@Q`}aT!}C@Vn21YOEcy7gN&%>%p`*eXc$~&Ry_b5&c3fjq zG_%?ET}$@;``R~JD;M752RSZ4dGDloQ@?EZo+#S-yx1rUcz8_QwV>Vem)P#zneqKY zm&-*~(Ejod;r-z~FE!sk=Jfo(XsPKOj*S;Dsb!k4|F!Y2W#px@d-I!L|9t>d$KbkV zs)_$4?|bvld(3+M_vX=q-E2-dH9ZqefTul7I#wy4y!U+niC-BX{{c()o@u};pTT89 z>DO4*lZSlTm+;=3KePYoq-mZ}Z)*1_y$pT-xx>iwzdXm>H_wB#8;wEcd%F6$tOFkP F0suB4T&Vy6 literal 0 HcmV?d00001 From b569f4b282a33fe51367a8a679b0abd64db3e9bf Mon Sep 17 00:00:00 2001 From: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Tue, 9 Jan 2024 20:17:18 +1100 Subject: [PATCH 0100/1088] Replacing buttons (#75) --- images/Discord button.png | Bin 14224 -> 13767 bytes images/Kofi button.png | Bin 18533 -> 18109 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/images/Discord button.png b/images/Discord button.png index 3fd45d4600e44d784c6dfc6a6c83efcd2e77fd0d..5e3b56d6dcb41d1969aaed75bb9efc76d462ab2d 100644 GIT binary patch literal 13767 zcmX|IcOaYJ*Vk%wsL@4BjkZ?RXlqr~td@vbJ3&f~+OL7*$aay2hK2#4s{E3MhBknP=G2+< zbQI)sZ%hsi4L1!ySy9hBZ54N+=9Yf?(K=}o>!r~G59JN{ct%M{=?b@^5;r%q=fEk& zJHNPJXM`vM`4ezy)0Txb^A4Prx=ICjma`ktsDX@2~l37pT8~Dk9kz5XY!B6oq1 zmczUp#2q_24Wbi1Be!y7^5Q}w*Ho#W0~vP&XM7vrWR`#KG(|Pig5d_Xz8~P!G5N_o zHm9B!$G=D$)v)!Mm|NxPZoWo?sZ-gdEQCVtV&c|!`XI0RlCJ`%S|GcgAKl~a>9~@% z($7-NY*d+`opJLw8O^-U2TYam5o#$;YyD-UPI0@e^R?5bEp|Y^OJMNxUHU$4fC_Lw zDrXTec8?;?QqO?92j`^TxO?N0b)3+o6Z}X8jfIzR@1CMa-+d-5VGR0@5QSdZ=R;5} zLx3lJ#a0^;IimGvN2v;Dx|hZ}YJ5o+hD@c?H|ArBNnGHJDIqC=?JyWi?HP&&b4Gp% zXVb)Rx71&w1C`xPoA)`Hs-WA87J$3cWrGw21*I7GZP&s~2w`dY*E^zAnGU=JzEzRq ztj2a_HijCW;QQfzEsyQ(LX$%dW9FlMWHL0kOb?A&(7i;FDudDsk)j^kyS)M>+n0|< zB-GIXs-U?^?gx?d)cLu#OE^(I!^`2ip2rHi;Jdtw>~J%<`?s8S0f%c8g}@;3I^VmShDl7{x4st75RoxC@__xy9KhvKPKuOfs%*b$ z_+ZUlI_r+U#cn0+$2`DS6Mg$XX|Bu1?o;#nf61H)EJr(UHJawdlBo`9e_Ar-G+NnE zZ2KNhlrlnJhjYe@-#(3{rO2`x_-g!!Vo9u5O0Trwo!}V>TcYZ3sS@ts*U zv~e>JEPvO+AZQ3rOw5lRijXGaTe*mo34CVC9Ka9&2KpMINk+e^!vdKSW155)*K-)J z11)wBYyeuol$-AyslhRe%BX<^7ViV@BY8qmbO2QR)_uUUJ7uy|gMT~u0&6E{qr-CI z&0n_Q-vF2ZPadAeQVWXvrlxyq=L>#wXE^iGj!SQajLIu8bqr7Ur6j4H$OE%BS@Kw5 z50ob+;zu=Ap!)5yh?k#CACwhCG`znElDclMai;nlMWRqKTtjv#?!H@owS$QKk|V>L z&iv0iZDY;1K9;RDs~!`S>81X-;?Fx!vlrM^<$Q}jC{lM9+bLZ0nD?JgSrx`2J}t%c zH|Sk11VGh%wb8%+sZ&lcQ_V41MwPpaQ)}9QfW#`rGu70ZqIonnimK=BjojCX^xc(F zLa*^iy!aPKmV7YJVW))2bA$L+{2&~5H(%u6EHt#XZy5|#o^K00mmv2hY~hQhs6lYQ zD4JSo|4#QqAWVLFY0o~0i|Xf%0+&<7i0_WRLoN6(1cl4g(~9P&{u3IQh#HnFmx$zl zeFE`>%%-IB`b!GdUriv|(DO{*&cFBHbc$tuZuQ+VR;-~!DK)gIlRvfGX;k41rjs}I z3I2j!yU{)Gs8HZX<*&_!X;c0CKPFic__V3m`N3;R0bP)K^=d+Yhxho<(hAPD?ZVwn4ZOj z8me}_5EG8a*8Ls{6+cxv$?*1;P79v8%L4@}Otzp&acW=Vr~2t6`~Fl>XQSzV|5B4i zw}9xCmU9)jby6kFto(Hi*Xmw|pf12+?fF!zG=~)pmx4%#)qg^C^ zY0z9<+C`PM;+c3?Z)`YIl@%sd7dCX?v|I|Pjp~WHQCRQpY*qkq7cvf(MTAWGlq?34p zhHFgp*=KHFdO}XWXKVKZe}Sr*(C#{n44#L{NBpSCnc9a<&?K{e*7HA_I;J$5Q| zzKsWf7ubo8W(a8INmtpU?l*<+P4gRu%~1>iHmvnjOEiILVYOuaGK&_xwW7EzRfon; z`Z;p@4Q@-&3(H9yVV8ya6sjadHgY{mLwL`_S-ONjr2a!O8}RHurlmx+gi& zewoi!6Jt8bqL~bn9CCE<-@QYKqd$lWNUTckvPCM@>0%{`3BmJ3KRHXWE{E~)N+?RKQ4Kute47^#yp8z*8u^dVeeEqKFwuc=5xRa&jE;cjJfu*|=BnQg^f zHPFfT$+HemsH;Vz+!DCwm_;;9=nr~F{T*1b5ASw4WD_8-3fkW9IZ_)iJ#+M@9L>2VFO)<1A*04yN*zp(qaCm@n^7R^6c5)yGLTUkK{*U>2M# zdE;fSMwp>}YPkfP)HhHJgMP~K<_MfK`f9B0z@KJ)(7H9!%r_St02#|>l3Oa?`bsrI zu6T<4z7~)wOy=k^FTSLqp+D}c9`jA3WN<;aK*GgO4S`rD>^C2bEcP8dQFDD-jjMa% zwqsIxN-VEbS{CY?Gh6CqKejCEJ2flPxB-$fXQmkaMabkiEL%erQyQ(freg*?@YC)?UQ|C&!GNn|(P7jG2aV&J10r|jE zk+s2YVEy*Wrs)@F>RL13HE`j&w}|V}Ae7EJGd7F*RC9u}Z~lInQvO;kTe2TfeOza! zi|E3PwJT`2^XWL{fkHq!6Up_<0K%j(ApDV6Z zEcSW8lyd;1!J|ud1?Zle_j)so3ol@Yt|7F-S13P%@9@Ys82Iu$r-9)>H3OGHu{6{GihGXt%TtLJUKpCDuiqj- zWzYIk&+v~l2ipt*haLxh-4d7Yd@DNZSc%M4q?^=*k+f}$ql zncLg?X3JSzrl$5m5I48d6fhLw>rqNdZ)`-H)nn)?zw}j#x-Sl%HTAVr%wtuc#&yx6 z)=6)Uu)`2A&C}R^^Rkv?;@Yn0u>Fcb8)Abu#)H0bJC?(DVZBdHp4zH>7Xkx8iIdY! zv_+oNmJxX=c8e2YDd%B_WlmrCx$u94&8g+0_t7}HUTqf`arJ|(k#i(n(J~m~JQMM% zI9VpQq5ln6+@({2HO@2y>RnLOmD#|dO;8P}U&vM6C zay&~-1y-U7T`MQdC;^i^miwSGTR-nuN1rVdA+Xa}IHk8Y_B;$=#}dE8K(v=#k|2*G zL}Gj%FK!L*yL-%vTnX5k@_4cpxSw{6OVZr0;=$?)8oZO4fB(X<+-Ij`5LH{O;Wg1a zs-49w1(D?;PVY2_%kKYaHFI9s=^{a^R-9#d8Nd@#fdNFf82)0yM`p>@sRsbAe64Q0 zdJu$s%jJ!1v#1uH49bQ*M)4Y8-~Sz}>3=8q)|xQI;Oh%O8Z@+Rl_^}igsTBPlS-_< zNs6^6uw}D-0V<E-J)jFbW1PDAXf07!V9I;1 z{Q^S*?EFQ~iyb9jSDK=a%TTM5J)h}+hMrYiZijXH1UE3h!HEaA_|qx`74`ZgPdN+n z-r~YKnPziT=WQ2gvtPm_vBSZ;tM#a(8Xp>Ft}wrR88K@cn|F)4F_ftj2>=+!;l@19ic$`KtD>;8V5EG^!sRO(((Z zM|@*XrsCf|U3ny%t8>?<+o)*CZm}lYwcj^#x3cpF+1D!n_?SgOe9TBTHP>w;q@_@D zLW+h*wQIH!F%`QGd+0i$Q?-BAsuX9BFb{mK%&Z|vkEdmtHN%%9iVXv5qCL3Z@st@q z>3^I{r1?dOu*KU)v~rf|Vc9_s$g$zFIj?HREyZOBZ_ONLb@FS^n9*6Mql9903?om3 z2dyw44Rt}d%_f1dQJh+x?X831SM`DS<4H#{0&P+!pCYFBe6qoCO+FaBCO22w{mFz>?KuQbB}SOm}bTb+$B3| z-~i^pQ*b9>2w z5^V6?Pzmc#*K!)~B>+R+D_9{ZeeZA6DehAZ#H1#-582AuYk&NWQ@Dko{UzIcfr_(b z7rC!L<~CcYLon~)xDejO5qo7111Y*bHWFQA-zcFz3)l8-O-w2$TF**n`6p$B3LNBTx=UVF;zmKh2cH~JpN zI0+wsW2IUlSH>LPLHpEfdlDts z`7~5*M+XE;LxKK%g?@>~5$s^ssc&Gc``_{yY2-v-t8}IAr`mQjrppssax?a`4@4Dm z-_>e#0$rGw&DtGCso=bHiX2hh$?%?2s#d9FAaRKh-2o`}JSg-0$^-Z41}8 z^?QkZ(b-_o7~EiW%zj}2jcd#jcWCd&c>xxWIhST4&i&wgT(7QWJ03qY9;F_zGWFDb zTH^Rl;|C1Du@Tge10=~0-Tc-1t9rJol*#1YRwbSB7yRo0=h86*ZHaDmZ%vBF(bN53 z5*^%{_L02hJ-|I^sGo}OUa)j#@iOVNO@<+WF`&I=qEE(S55)9#KHbhO4E3P@%OF^9 zxh!~Tqs{&pDu8AAb`4fks@=SCDwh(VQtaSq9_F1>N-NJ+5kVeP{(OER{b(u9d}_ih50AFRKoyg`<@S#>r^R zM)N5r@Hd_bx0V`Mv@EKq&4n-}VTM_dCp$w|pjoeEpDpg(bP=<8!0VA8!)2JwAoU&H z!CS)2(>556Ssb8UqQ$X8Yap(ZS-;L@H3j*VVISgbK?Q%F7qluTCxvT(9+8M)Y7^V1 zjAwD6baDvmvbA9s%ZIX-I#bb+zm|&jNB#6Dl@JeMY_*2Z_h-nX^1B}o46hKR%}r2> z8IT>0kx<`_pPHsH4KO!0T=hnKi0EZWYj*?&#hEMRMkiddSAmR4f}!pa&$;p)5V$gM zGlvF4W6yTq>$1@n&y2B;x%Ity-*copH~Sjby;1!5TL+_l&^%-V12TW@4CH+eRf4d* z5~+Xnkp$ioXhS#%>v$fsnf}yH%sI)4uzH5u3*A5SNsY?Vj+~;&S1d!)Z?o&|>qHc! zdkOi|EG~YU1P^CRO)jm=+JkP?)#V`ZtLt*3mE&1%QUL{j;W*Uh3-*LK)5OLmUQ z)%JTTct^obgagXu$XLTCT$gtt<7)dhyQe4(ec)!(Eweu_0FIyUke_2d=&G?Qu*m(+ z4bNQIniTRU4yJ^M$tpOsB;z~*wmTvSJG+a=U#w^`T)suR0nluyBeI(zOwYFa)9|>z zbun_%{9a;X*;uuMf19NBVNWOLhg?p#ct!=CmDBe~VKVB~$2;6jlt+ctn0!}5gR91m zcM@j)#WM4w@-pGThXm)z@~MVPOR*k4+cHkY@n6JyVci6->c=SjbVd$Ta{yD?3j#+G z@aFOu?MKg61O<=pTNc@5GZcz@y6@iWNEELj%7Yspo(?b_LP+j*YT(IB6;8OfZuOc6 zPL&x(H_Wb1q^@l(tZ<1hN7;iAyL%-b?O6W}+fpfdOu%7*hK*mTaW{kswG2Povx>~# z7G;HUV_VB_#c3!P(_7|`1&`J-Ca_tV!9opgIX)q&S?FR89zL8Ik+0W0)Ks$&^OigC zOOhh=$g#|<7RFDa%pFiVs2(n)oR6*0z_}G@E z(CWwq@oAk$y|iJ9s|*y)`N6A}e8f^ZX_^JnEc%;C z)Nyc@mGPp7QrZRVsNBoup7u%Wd9mv?`c8ynU^u7oKmx)QUNT_`Ui&UTa=LmZlwc&p z>~vi$HR5lAmF)5@TCDT(ob2oH@rG~0$m5YCr~Gexc|FT*BJN_01K3oCy<*{dG+DkWTQ2LQ&nOD7&9)F02_eg0< zC6Ls0@l5TN`O#kT1o6uB`+SQxfLP;uuE!s#jOK5un|b3Uk9U~{4TB2EKYUCv@CZw3M#C2Go>Ru*69 z!@G@@6mEIMnf(^dG(bHlncb2ckRmI+cSiI+jyU?QE7|$i_N70F3C6E4E}BD?%x?Kk*M$Ab=;;u07~idU){@8K)a8I5v{x0V4IrsW;o zs0-TEq`Lj=yZ#evaH>e+4e2JT7&g*45t~(NwLI=ULQ3^N_lc6hs9IRDeKA)XCwnco z7L7Vf3*<~_BVj-c7*#6dx8+hgxd)w^elPU6iKw>jTnGVkI5fsXDlA^dO>Bh^7_lU!Pf~7gYZB8x;l-1{_zrFf%TS> z$L+4{({F&P6FZ|toVPi&<6!;w=JpYI((gNLdN;eyD$uW~h75b|RB0y;RxC!m#s5W_ zD!=Kby!D0#^dE5Tc#COV?`Ek$JYQ^mwJjd*YbaYfo%3{_OBI&Sa&o za#>Vh1fAr}6v0EuO(V=^)nxyXII3n6vMg|?6iay2+`yvns(MtYp-WvWcIfgz%Pbd6 zZw*wzk`Pc5V(ye%}9RSTIr6 z!WvKL9y!(3_6^RMlDVe-x5!cy1v(kOaDCrDO)=%qDZp7j@&C42)|LMlVGRRP-?VW8F=#yifQC% z^!+!*GMc)*Z&RLeH*Z=19VU!s?_w^!9?9i>?=erV4oa}thQ2B*wS z?H7X&E#K}+#TR$bUA3+ju#mop)dH)O8dd+Q$1i@MN94vzeN1TS|9Qu`iDUBFJOh`# zZkX&IAU|}Bx0k8wmWySArTo>f_^)7u{?FhP;lZ8Gk^V}9W@>+A&(SO{f9%NodOf=B z|0gxLdUVIS@*}u>#;xqFh$L38T+ySw>aUGds+=%+U9NgSh73K7Aqwi=$Im-jU(=b3 zNRhSKgG$C1fDszRDX$$fRM7$67UJ9^bB2+p$6i`>!mT12c=M}+93rP2KF!4;*{nM1 z@U)qDC+Wd%Uhs1CWY-}{M2^hS;QWjBh=<|BkM}M#G{-yGZhWPACw*?A05<6F$ms>I z(>GA)+XL-%=(B7JjQ534HT3I5eaLE`p4~u9vt2x@zuJL$a}e3))B`SY-?sqoJnh5>S7!x_>mWG*V=rlj%$`y>u8MW&swkw6~;RH<5 zy3>+mI^8G_feeP&W5iRS_2>-+`?F7%4|*q4vLt-~+79Yj2pSea5t31c*w}iSG9%Z( zs+dJfMg!09q}ZbV`!tMPpyKF0+lziLQv!|+;yly-1~jzfT#$HjjMaGO)T*$woPX=v zo)1t?@aF|rvL6Wuyt&4llao}NDN)JHC3*E*rh&X$fAFlS=%7-Ta3HM9xR>%89F5`K z8k?_=BCyyA2Oh?p5+Wg1So@P#{=N*0s7kn3=l=ru(0=}U%jrMAdJ8-@9allJKR_y# zHX}>B*~`!9^YBNQ7dTIwa6(O?DF^M-Sf+_Q&Eu8LVkhk-H4{D^o9l7!`e+Ni$HZq8 zwIm$z$%QX%u=DKt>zp%8IY{WDC+l%;r)=vW)s!^^)if%i{}$3%(2mQxxs z8=<`=t5O2-%b-4rTkito8Lc9I`N}`oWRlHz~y>2}nQ_vW?=4h=&Uyz%q##=b+ zT<;j2dYw>=gu046vQJ=P$6nz%(3Ho9{o$?cDNaq(fHwn!y(@2i*V>OdmfQE5WM8Re z6ECc>G1GFh&J*{sP!sdoAxWb7JeSuRcHD&5P=>{rJI0z%ZRm~;AIIOm(`~;84c>_S z5O&4$pvUrR?@DF3tX>zk$BOB`JuVuq*Ww%MIF$|jrC<+f+*LE|JJ+AqKe9;a6le>z zu|DSb3UWKk@f4ti7#WaCC94ZoCYHhtNI^ZH~ z$W^~0=krVOy-7ErTnhzJxd6MM)`5|b<@!um$t>jsblD@4p^(rhgFyfeVnxEfWvDs0ty)h?BhRAKT zJ%{fz#~qW#zuuQ*Q6THxx-3M0eeaCC4405XTY~+zji zn=f^(vkQFgRJ`#}yZcp&hfKve|8j@9CkG`a4yiY$nLj+H6ubyX^2OY*{_9ORgJcFb zb@6@qInNBE@4x12@W)9XDkk_>W)-xB`A5(QZ-Du!pF6mAnK=s#-gYypbOk$FnH_7g zDQlo9$+yXx#JJOhJ;EtqJKeqoNJ-OYp$6=P-lcRx-|d;Pp>(MQ)@6L({v{T5=!|eZc)%;>`d|MU1Er z-&3PM2$6!rP`3jDu#Y`wEp23UttaQNv{)9DYs-t16&&W-IEE+5j9mMKoRECr-`2=y zd(p}OM%(Kixe)>F&cro1s>k7ti8I;YP7W_GyPXn5lVHP+HezwBbT3m9@2p-NWNB(Z z4%3IS+haqBKM_5vCURRe5J-M)xYQeTNIcBNAKd4nT-_MXp{J57VCi{WQEopHI%wS_ zRJGmQ$9f;*TILkHxVcZROlcVDu;?gQ{qx!r#C}18pi<$n5U1-9i=&h^ph3tCNLUyG70hp@J2qBdyb*o~6Py7i*&I*e$zM$8 z)25soCz>#>)sf(pJuyeu7^yJH%pb2hZiB~&X`Owb&nrll+9_L)~>O|L$Z-+kL# zjzElD^dERi)K*!sCPWw3B-VNir$B3t@f8<+ed}W~Z$7)MG0O-WNvJ__$1}d$DR(rf zs_C}Re>Mf@DCag-q6H!~H`3Wuog98RZNkE|4ih-7r_0NFy1Sf>$ z%^}YC>KhFF_vbV+V$KR!@;!y_#_u=Z3YFSV5c*){l+c5pV3`czGT5s~;ujF5mrH^S zgUV%3uRJC1c^vg`x|(#im$douX-!GTrQ@(_muZSqA6TqfC6VIy)a)Kwr8tN-ju-## zcN__ow;e?G*^U%@RV{Bz{Y^S8zdu^X=ef`T1Z@Qa!R{NV;*n#hP(xF4LK(41b_TYM zZE;>%JXfia%PC2pe+IP?{W=+J^}Ff*rID!e73U%`V-bUOMAzay8wnL_hs1Pe7UjdD zPL?w=bW*tSxq1oV-je(FybR0Zzbvud&E&)&>)sjZIMdNnWdljA5%Gv; zu7ah<(C@ms{Fpbp1AorpK=flBgcm!L1)S6-fRy~?UaoN_z;?eXo{lMs@YSs8|<&Oo?hzSeUT|^A>ws11g4RS z{9vUmra3NY=Km~}^>Ib9VHnYYRiKgbOE zi}Q-XWf{Wv{U0L=^nzJ;Zn@ld!gTO{*tuv zj~|szYkDDcX0HC4l8B<&eb1+?B2J9rMdKm&sQrgS@>83A8`4)dQKt($x59Ot1PT$- zqGDv>vakkeJlt8i~p-p>tEXV{HWEGW86PUPcb2VYinM| zKRec=t+;1ZxYom?5+J>iE`k$`^gPaac}-4+9|X?5?(7MTPMilZ-tM9_@^aODlSS#e zlNPI?XPVtEk3mPByk$59zZOn|vix0S#E)_R;Q_d<8l+wsbWfF)Nij+zWw4Y&g-^9LY z6^vom&A$Asurc%ll1+a)wDeoMpz~pxiSuV6y|Ftxg{jc(#U-=G1GCskzxs~lAgcfn zak+P6lX9`oCQ}EZ+#79jel{=vbAU%y*$*GAL*tyIE60PMq>>DF7AXH(S=r~7y{(&$ zXJfD{*!aeZ+~m^*+LyBKcQ>Y=*G%i0D9OC}+pZzW5$jlgBsqKZINdu-ugIi!embgA1-)MHsC2{CiHI-)kJ4auec zW&g2RzhEs+x4v#xmf4}Z$^$bRjN;EI!0vtX&*wdIl2HW>~bbM?F$b0!`5b?K{6GwWLhY59Eekf6( zaaL?@b&=O}|1k5bOnYx#l&1#(>&$AB)PHb<&d)R%E}+m4`o?k8OZSL!F<$@AljJ6x zTrXHMkB;}2f3;9>uX`=!+N4w-_E1yK%|mG42ja4x^)<1%Y@|`;csySo_j@Wvj7l!K zvg8N=TM|I{MET0#9NV;0gEpotnb2}D={WZ{yRMX-rH10~{YgJ?y;LyhR*`qM3bdyy z0A#~%<4vOF*dG(cHi+aL_THgVhIoFh!jC@I${cQgsW(KI$NTz?qM56iYeU)X&raCH zf}CyX7H# zJGI|ls`-_bQq|48DSqOGZAp+1hPO`T)sSOOpR%>qsWekBAM}jiK`XBkNSm)x2xL zzji-7A#sg*<7R7M4A-a(AM380RMQqya}K6*6rx;ms=z;TQRt~na0wBqj8^)mamVpX zx3lr=#elh_e{3k_aYU_TpVn^Wx7Oi!F{%+|_$-3m&GB<4k(8eU{?ASh*u;tbV{_csPE-RQ6A7uuORO{L-=^{z9xgUv06ST*F%&xZWe~=A~CqLcwB(v~}Tb_!y<&EC#LZ6|B z|I-_i5Q&{FrjX1;;6rNvJOdaV)s<1$Xnf?cpdiD4Y&`8NhIO|LK3cS7)*7J?{$(d* zi8cv!(ZFH1@(+pj-%2TMA6l?aQW$V^^Ki4AG-@`uuZh-8=*Ls&bV)hR|BNw5IoF;* znYD)K^nb?Abh(bmD*BmgPicw7RG;1TKMAbUL6P)3c>I_`<0}j;5oe-;MzOPnhCxq+ z6vN~p zzples&iF61oDY~Y0dzmxccP-Ig55gvA?42Ip(tz0rP~2N72F+}cGYvc5iV3jwBdxi z8%N+zya2c+ck6sPFGlW^FqG?EJRvvI`tlVKGHlJmDQgP3SU7ftI{r%5*zC8R2VT4@ zL>sfjEm7WG4k{dRw;L!L+K9`gct8ihBrYJSD7Q$7o6kM>TuM=11JFyK@b7+94$S7j z1*~=}j|w3ltKcV#sXb*-=Qm_f(5l@}Opr9%lnR_HXNL|3l> z&Z&Z~$MjOO`%$6LgiyoNSI}^g#|6%Iq@I-H#=CmY*5^OExVW%a?UYH< z-7EJxDW2;9ba@W>ivua6XdZ3Vq&*m(4O1=*A=;$y`UBBI0N{UQ_O5Q%<`NP9qLPpb z3gtd&#}N6VqZW{Yp!yN$>?p#d?zNX?p$FO(OLyX)7YBH$yn<3~#)l0bd%g9^sziVA z;SEDo8S-!lEIt4Lv&#WgvC^yU!&Z~S<5@M?9|^aP&`wF>#^_|YTvR(BHJ9gqsGNLL zfW3bhc;#qh;iEVN?G2DOf67e}GTS>kvGlM1@1p}SMi7t(OyOP}=P6yM4(8ELD(ujD zTj^c$_2cn7A_jFG-Xv)IHD~$4JBm~x$D`KwK-n-BqUcf_OZj91!;{W!3kDe0Y@eo% zR(@7lnCHB_v|6eoGIsfhY%H><2gv62H$>OFjJ}~LaDSQf*xnc{$!JOo>Wd*JWis)Y z3fR8SndX&c&x@hv#lk5v=c=hcS;mq!(5$%e zI}{BXSExR^81D3`>httu9Rl|bzD;=MW&FESYKw7`TSM`k@-l?J}ie8H!lXa z6SSSD8VBIuM}mv}gvyV>FWEv+8#e`QA(TQMhoI*Ye7PwKAHCO9>r~JY(V zi>IZ1qs?FA8h&{|I{6hwVTJDYk@RW)oyFFKPmsblUU! z4%Y+$>;rJhfv>Go1f1)D`||qpl}hMTLgw8*p2L9A?AXFYmMW@G OG=S%t$|Xv#g8mN=wg21z literal 14224 zcmX|ocOaYJ_rDHZRwq?8TBEkwo0b-#Mr{ePiUy%ZsI9d+s5V4uO9g3(y=RS5yV8nH zRYQr`f)IX*_vicj_pE!*x#!$-pZmP-YZD_~mJ2s7FfcH%0Q4T1GBBJ%Ffg1reeMj6 zQp1K`1Z^XD0)sk61V7EL?rGs?0B1d}UTh9}RX@148#g5f;(`3Grjc@It_ z;MOK4YuD=97_Tu|wY~*6o-@6AUtH=s*Ka{8_dQoM_ywW2A+c9bs9BN)y~|0QXflnTw)xy}&DOfHWb<7bf_e zh%8MW{YpN5`L6~QW)M8Y*jFH+erxf>$b~l_Z{>7!DRJE135ZzvU5`0QlN|2!?IExD z6;14bN8pgz4e-u_=|KFyOQN~Xb6jjGa$VHAhgCf1Xp&z~c|02%3Y%{@Ixqx8ApFzO zEdb!R7tFj@wxd1Lm_GQ~dI+F+Xo73HiRgX1uZ{h%)Q9KJAzCMFa6ZLj;kM&}>@@DH z*DmnzISpe|;nrK`XWG;LF4XfWYb2=BhY)yWNZAAd6!n8+Uv6@saGLFc+)|6O@Bl!s z(Fqz~Wb-51LvZTr^+yqR8=ex#$^MyWMFDE`z4J6?$GBwiwAc2IeH}$j^d3f>7JcIe zdB2q;|Ci`kffc%alf0tCLx5FwM9=UlQJGU;7bBh^amu%vYb%7r_qLKA5525RTWWY} z;i~ro&R|{4mMa%P6TbBYy=pKnbDoQZCXpims;4v$?@Pgy86I8~jZ@Ia5#U+KEf)Z> zE6ABX&E2*oYC*%zXLY@d!jUG*wf$4+uLa>61~cw-bMujF}^#*egy_y#$a^|Gb# zf8jxyrp@JFY>roCrK_rqb`Z|?Uh?}wjkNH?Od8>`1*s2ltkR?ORbl9MQf}MmkdU-D z&vR!+;CU&bfI&SK@iP;&)MVfrWA)klbKstV%p2DtXcWJuoD`a+`|>;Y3{p1Bw=UP+ z+yG;1j58LP5`;K0SD&Tv?x`61*Uh!qu>zXh&mltegMm1X)Q|K)-#aHBm@)7BeHxTsji|LJ`-G4g5O?nKWD=iM?7?~vyo{6B*n4fQuSKcIQB=s10@HHsC0-?>zR zK9tFu50ZnS65YBIpHbyTI14ZL=|}&0%+T|}r*2*p>38|gYm~1xphizc`OE}8Zy3Ur z#t0cKNda<6?4n(@C$JU&Gxob+43P2l4glp_54!8G&s@z&mwxFGv~a7Y*yXl>p!tf} zq9g#+=>VcL`1C-v2NhPA-}zqFBFw@MZ$qN4dos~E(XE}9(?W}1Yk5hvaXq<$RIsVp z|E#tB5?>rFMoCovxha~6^v^|8{-^T*H6&lj%`uY8J7S`2g_E+0x=c6a%K{zglDF=Z zHU}a{>@Nw^L+^l%XG+oSBE9dBFy~oez53!<5|3a@(d}5Q?GQ0v>AkK`SH2D>NBWr%}zZcy?&lLuUL)}t2hRpxPWX}Sy z$xngOiF)Z~b^{meuA(;h;mw00%ye3Fc?Qg3G)`{WM0p(h8Ge2*knR>AN!)FU5Ax{a zKBp_j^Vumq-Pql9y10pf=pR(#M^_Z@HF15W>@=B?MxMlD7Qnn~npx{KOVq zxarq^z>2U(R3;71(6>c~wKzgm$zG5+l^HQ7NR3OJp_eYh+Dx5f%U;`{NziMw&IMZ2 zWMFW@4iOaXcdv=E!Tql<(6>4Ufi_3UE{ncIlhkcl!f~2wt@9%nME~ew4jR$j$;!t< zJ*@C%lf^SuH$-*yRI)eejhNx`qNZ)J7$^_5K}2}b3A7%@Pp1f?eEl@j^yz_R_@H8l zV<&js6GUyy2(siZA1~NYnn0%63TrZ#s4gIFFz7gbpB?tR{FPNry)bH4rl6X z!8^HvpAg*x70B%SpP-Ynx94Y7b0dtJnJM@G1m6%EOOorg^J04v1Ts7RC+H9FrFGF8 zqMA|=^`jat`gAUyA|mSjpNOE!!8oL_3!S7_B~^d001!d>dI>T)9Y^4DtMAO6Mz5zL zsExv8O}dbIOhw}gdc>rz0WFeoyosA!%dfp@olGmb#Q(zar6*KMl_+RlEVZaKaGWU^ z(XM(mB>^ea#uah?W5X6*zQ5PM1*i~bF&RB|W?|Jlc7<4ll3^i!clmHzogIx|dAdBS z@0OK3%WSYlO7-`9XZ{VRAN?B!9W4*PH3<1N0cvugXHoRmjo*W@lvF1a?u^?&4jPBe z*SVo8lnXJFxl!e!LjW)tCY_@`%}h}rXX0jXm+$cXxa3$Ag^TW zAFo*OcHbW!iQr<8-4FpxGt8F|0IPfEhzb*97k#S$yPaA@7zFaUJ?eE&m#!Lrvin0F zrD)xv!#h@BbQEH2^>1I*{q4AI&dsO0o$@!D*V7`x|9LJp?dA)W__prc~G1LkDY>O3r+YX*`VjSE2(xmB^*8q2J( z;G}@*Ap>I)4EwynicMo2`%P-=U$@)SHhKhlw%(x2;FL4_MD5{UZkL6V27#O8nF=FS zuk3!t9{r*b+g$3-m!Eb!F(bgbvG3#MC-XR!KUwPbeIB{whaQ=!lq{dBUua#iMcUA; zF=W|DW(Y1GwjQ>bRBh)fcXt0WvCsXi>M9q~5)9S6l^Z`Ytf#It|0=c$C6i=xMJ_#V z7*mGQF*NizJc{M|^|Z-Rz6U?1X}_tF+uW>UHx;%-Oj2n;7Z+<_y*^AIO63MrXop_17e*iA=1v`1pcHbnmEUyV&ncQr<}O!Rh|fo_ z(l>*P==*+7D3c(ihaZ@))bw5y4s)7{OH}K~md>DY?_CHBEw#HLZtgYDUi+YL7_aXZ zo`sj!I_9i&;L`bo7e?RBu+0ZHFI8w}?!LN# z(g_~t`QZxmk73b9P89=~n2wmS9Ptih`pE0(3o z&@ow3Vm-6NV!LfrLa$|n_m2Cwi6$V$*Bf)|-Ts+1DRA+6Cp@nH^NQE< zJosspbhbI2Yw|OP=2EG2h)IUkoO-XuE-6)4YIAmlq|QuVs6q$RnjOV29U*DvD2%bOko!$ZY$v#-A z3z<^6;-R+n)T%h|#hY6fi!^8OC@!y!=`U_`x9>oaL$y%%s_lRA3xXPy-m|?-lI{}3 z8w<@R_yomuHBwsMuH5_FIS3AUGW_HBjNJTM*CoHDs@)iccS3l_YruVL7Z`m<`xgXO zWANEt*$)f;x9+9BwVTFLNhH5NMbx;u_NL4sh;pD~_uL1xqH6Gso=$&#pBxS-XI)On z)^__jEb}g4S~{JJc!9Z7G{v0x#gj53zHLge`DdEPws=fJncDoTo~#jCtOZ5`mN-Of z@&x8wCdyJLJMO-^L(^i=Lx$EiuKmHoQ#O?xvV%#|nXe}q%=2Jc1f_u&&z0jXd#ux& zc%AJ|@L`LFe*h)yBYuVuXNP#_>R56_3BKrh$d7Ld+d*&8PPVSC8(mhF z3n{y6e{!f0>}eB(j*UO3;aFEMPXk;y$zeRX=*E)AuOS;se8gH}f^&})lSA(st@d0= z%LxiX?!&6IUP1gR)*Di}MVOw9PlPc#3(Zy8zXOQIjbwli?3tUQIalB#89KmjG^zBs_OPF&64n0_UwP{BXSkl zM||O{%J;!7F#{zpTy}na;t~3!-y$8r9$!Sxy?BSq+dQ{B=^O&^L#;^1Ea3ao)+BT5 zc2~CYslaacrR3%D3pj^mlxFCh{L$?e)~N%shI&;gC~)#8p;vpLnh-z8VmG!`9t*CU zagOjlY9hwkx_LMFq{0r>4nN9w@c}$$^UC_#51M?N`CIxrU+fRF1^4Y|TR~V6omzk5 z7pJoD6&cb^#ZNKJ8#ixMSYY3&p_^&93ic^}iNGuHEZb4+SsoIrz`0Q}cVk4R>x!k~ zU-DQ>uea4EE9FvhK+Tj%w6M`JWE-J7^^0MbkTV569S zSy~$AYso1TW*EPy>T(`&tGs61FVWL_zf?3%eVP36F>-%2eO#=>!}#y*IZMagEgzOT zs<6e8^KZA^^Cw++SX5W+mFmjS@?1YxC^Lc2FCfS>quVwE^$0}iOl6*-r_i{*b?QXm zXjd_k)>O6xzY^NLly`~_H%2y48eX6XNbSODAF5?=xDC_dWUfq|HKfOzh%N8D6xOmZ9QIls`OzvXDtUUi$`UHNZB5f)t!-BJIQCv_BJ;3eKWXACRZyjPLBlogr}YN0yE z8EF^a!}vcS`dtkC z3OeJ%%_j(#*i^lE!xidF1%FCHt+DuaSB%!#z3EUdyky$?BZHH;oN!2Zxya1`Hk{i8z z9{lJ+Z56KAtR$&>G``qaYH1*WSdqy1fJpuaqZ&F_JPdIh8Cu+IXKJ91QRj-JuG&)` ziZm|l22p`T(t6hqj}wApcMUM{XB#*@6=Wv4(yLAaFNz^x+UlzzSjs>Po0;LgL1vFj z)L!m9wx=x%t&*@EJ$n`T#&kyoF_MgY;aMKXG|!gNqI^Q5@=D7J5PY=V(O5oHZL?B* zU{Ncv>ErY7{x+(lLMOE=o$(@QZ~*^Xwpt+*R+FxzF{k__@8RL~s}*3sQdLib^VghF z>+>Mh*bKSvz-v3>>gk}UvLDOUd6lN)u)iZ!lMz$;CE`y(HT_Ok1TB857yKfQ)Sp6L zF@=u3ud&+3QjPXm(gQyoQ80SgZPLmt3Bh{OrJ~tY%M(-i+NwZd)fl4r(eSUfI-_NX-%!FpqP znbii^)WIa({940P;g6SHf|A6Fp;Eybps{&3L5a$0)QF{iLyE!Tz&`?Tz&okGdPTmL zr0)06#71H#!?)Nz{X|W2DTy%p9ac~U8x^xNJySOPG*CcAwky&w`bvKGK_m8$eZ`vM zLL`sjR@vLcezpCdg<><+#PA7Jm@6LAH2-Wm9;yIWd={>}uC(e*S=mxTgf`>+iQIOA zSklvp%L|c@J&&L*Bxi(JNDe4as06_cnp7#WRP9j-wwfp!G4*#uKs*m2^U6cH#gHsg{a07x_f=3mKZ!Q)5__gK_BTx%!JdDG>dF(F za!Sj-B8pDEv34)D(rA=<^}1@`d^cD?M+2GnZup5{E-S1*7Q zM}W~yc$XdtZKj!puoij6YL3tFszUecE;q3Cp0yH)h=z%l(rsLM7R>c6_7^meQUl!? z-|cz7^g!XU@hG+*^29Esu{6#`km>Ej%r@ogPQ9UHx7GEYpbDrNiAHIw*Nl<&VR7Nn zB#8G$?v9PeRmqSv=Bg05`mvDNohnZ|wZTKzw+oR;e zey+6G8llnmmuHvk+Hid$-<0Ek)im!~_6)nT^C-7OQB$P?ppipz)%X=GoIT7=Ce%<$|z0ZO4%{4tZ z#5Ke&7=4rM+fCekntpfw(`Gu1_RUn6(#vHHSv$54q6!Rqy?tmQ!hKJFfFSG7;nS0gM9kmcf1hjM28}Ex-Y?eOr z{?RXe-6QD%SSUJ(l<=F!wbx$kaO%D>@Jcmb!+g;FRQ}gR8zh%c$h-AO5aUeAm@G8g zld9|S?SMQI7X&;jJJ=gRTo4E){Q3VQk}Thrgysb$LusFy$>-O3oKU`-hEx{s@C3n0 zA5$H94Y%^UpcnnI#4DmvwA+T?q7bp^d6LJ?$Mdh!!`yUP+>2>&*qKDvc;XP2W={PaNDtTFFT$98wQo-VvT z(+`CK=|><>ogFI%y}~H+j>5F_Y{jg_S12mXqijpXC(&x}8N^v?@d9hktKsOLSMqF3 zM>}lfU6U;*JZp0#C1B{@tu3VW*hk$eDTJhi)BfPepm?$M-+0N=`;it8=pb zi`m$Ngnt~yP;;S|p*4rW<^sL51Z;D*U2gB*%0kNBtPW#{l%JGG^DSjhS92+B@_b3) zL)FjCF}-3D!>FGFUPCb)HS_hI8cmpnKPzQ;C*{%Wy_gc0lIH_gQ<7yK6g5HJzKiw< zt8f+b_CIvLays)UqGvTkHLRdW!zZ}~n;bNeIQZ|^l^(Vx-gL7PyTEs@t$O&+;`U`1 z5-t9{w62_JuCR6)s!m``qax>Jyg$`AxLdz3Pkr2ET`mXil^iV>!poCGkkyNSe_Aa7 zrNz#t?ysuvmclW`1k?2lPRdT)WhAY%u%GrlIzD-hrO9dJ`VBl4RX*3B_F?#F>C zKXKWtAW73Exb$H6R`rVIUB{8P6v`6x^JKV|%WDdZ&8%*9o{(|h(?hpL&9`24>eGdlHor-{G9~rJShUYaRl@<+U~otsm~~q7Ac*zMA7xst zbxMsC{RlKve8s!g= z@GD6wz3H2ysTl1u@k8YzBU7?lCf?k?^+1iu&C&RW*=N-i5?Gb>CviM23Jal~PO|Di znX3|ZXIIppf0u|}ctqQg9&m1KNXSp;=gBxFungHhN{`3%S6S1)LAYzZGljg6-4d&g zY^F>eviUBsHffgnt%m-zCi~jY-PMGU1qGs5wg+xH=3PE zcYvL@MNVQ|WQT+NDpwrAqF|_Y(VLK&_HVpT$y}Dx;bp5E7<7jc==t{@o2g^Wa2wJrW$5+K>uQCKqh0>~Tc2x;vv@K0 zC!n+o)yFM8wzk#SmPd$zejiL%o`WT8*=~h}`?6sjjO*r`sxKhMw)$rlZLTs~I4$pq zs|C`g5cpr2nqsHwcQYoQ%s6S?UkgHpB5@XQW0mGxDLeI*<<4F%u1UD$KeS_~j&F9U z8=2@$rpT}lHqU*InHtZHuPT7fK-C~+9&N=fvh(uuKwU9MtVvQXXU3z|-q;pR%3u3)pCD)E}ix3IHPWT&wn zzk_N{^%^umyDMtpkz+OYS<)(WFT=O@M_V&?ZwdCAN8lylJ7TY)%M@THtjG{mrr1tY zED3t$T@6X7=FA-WB{JRlc;2Sw1`%x5x9Yv}i!*yufiqdXl7y{<1p&8g1HEvsTdfHm z8)(n&Yp@CS(^9WpycWulUj9LKfFasd08FZq1fD8)ZOaa=EhF~4ykpneleBtxT}Ui~ zEAINR%tc)uatXCR@I=*Dmswor0%AN92YlCPzJ(MQJ7`ioMDZ(XMsb`fAut+mGY5;4 z_GDqcLW(+d$$U;-D3h_Z43R#d80q0h4sc?^)B-sZv3+y?gHo?&)TVI>@*HA_)_&}8 zz%2Z?9wKN&Yb9cIBHg<1sc~IF1zPec`C`qW$erTzC56*Ib6)A@LWX z0>(dCG2)cd7v!Gp8J8q6n;P*FPp72UbFwb#7}BhK=v+_r2+tdWQSr+SfwDe>MV+{< zS1Kkqh@Y(EGm+%(z@I$UV@S}2FUdT2z+hJ3N3j}%GTxDLb?@Rj)NWOuUw-M^i18Gn z&EtWCYl!AZo!b&+2?H%E&r@XLb*uufv9uMQoSIJA`=X)^(-!Rzg@Q*6HL!<( z1ybseJsx?D;+v7rtb1`*gWd^pChYEc;+>-Vr!@0e9oJNDeMpc^_ybf>oEe{mU}9)} z^U1=_H)IgV>CE?q?+12}Te<=_xS7ZF5z|@z(~)99n@^o3dQv%uc-$Q|*XDkleB2MW zp0^BYTF?J2 zZIHVxh2-C_F~@aYeo=iPe^uw;hiRQ;$n(QLiUiHO8*HKJ<(f~~!_OxV891ea1o6#c z=kQDATP~Dg-EX45EEm;D%PFJrVHKPGCyN=U?v6^=&IXdQKt~(P_2rgRImHQwo?KFm zwN*tn=0O8j+5Jn!ck*PN^4uH@T)5^$bIa;uE_kN-mr*)^;p%Fn)reJGwe2^_S7$-Z zxTj3YSA^$o(Q09au)q~Kp~J)~4!Od@|<{#uJzyJcBLceqBFZIrjiuFYCiih0`;H9>42Gz6O8pY+Zf^K^r!P{96Z z{~a6|ea~fi8ezYY2fuiYT{UiVZs4WbR3y6?6O#e*+6g3^ zXURrYgbW4EcyBiv{WEzftoXp4bg}0Ug*%+}Nj9jR4wITp?|XTtVVm}EjU+2p_<x}D}BL8&~YIJ8jyZYw+|;2DMO@s zSL$ot?t>^rEC_kOg{JJI8D50HM-^{sDT#KJXny{s=Ao$Gi!sa#WNd z=D_bE?Vr^+mrdcDm}weVz@5i3DZMk?`e!|1avWb^m9q1oIdjbz-ZNR8ZCBu0x%eFj z)tqhL8UH5bzffKAqiywG-LgvR-C~8nmF*Uz4Jnn4DbCH2w+T}$4Q(?=lv0kVbmoon ziQ-F4hXyGuO**dcpARq>ANg=Tk03@os%^;|zXd=1bOPmDi@rwfw7Gqn%gZF!Z}4XR zW6+t9QPl=CFBqR6GZ?=6qHLJGsxGo_f6H?v;!eU8rl~L0fee?{sufy7uoPBte6|F0x z;@FuF6?_}YJn9D?jVk0qyE+3;Odtw_x0g57>m&^l)us`|6_xi@COqNKnp__-H4Bm# z-5Oxuq$ak>qSGC;AL4x5;)+JytAD?m1l6GjeOVtkFK}0=5Tavkdx1a1-%r8pkt-p! zYfnT$iC_9*NoA1`yC>Lmee>X8^a670-|LvEU{V5@c9a5zF>XwBkYrSz9|iYkTr#Se zwx1j=3pfy>xnjZ?F0|;kH+A#6sHO|-$-v8hzeWIeZcbpv_9?PNYiWfu>*R*|t*^Ou z6bIO>Wwoc}xKqWrt)d-K(fh?ut@&6kJO(vAQwmAqXxa(SJ}hhNH1=QU3@Qq32>gDy ztDxgQZe$a-#lJo7=%gf3#Kf`lZ`{6S;oU9z*bl`O!N{4Z7c}%9S402U#U}1MF21w4 zHqdoXwck@a6Q>oY>e%m>(0(X)C$vxcZpxop9^tag?bP{9gGIn#>^dIivV4Bztt|rH z&(nLg*O2dtBOXt*Jq-4i%xR(;c^A9jnD(LRsnBfoC9Tv~U8B(_XdPBNaI&A`Lz@6b zKZOf99~kO@kt;D6hx_sKXaqYny;E+>md|V~^?8ApP!OTRXI8UEZjR#*w+D=-{0;69 z-_npKIlv}Is48_wy<$#sfmw}R)gY}$9x*vlL7WgWU z4ky1QKj6vc@?QE5%VlZ9Sp{$9_D*a~@m9Hy6^dV@%_Q>64ysYfdS|5HF3Hv3EX+wy zmGpkt@$8645BacaKz&UQ8NRXkBv!xT2n8^zt!JhCXSZ(@C77tNMwmz`c) zaJl;30yfZpX*%v4-I=cp#>~vgq8CL8la-(MdZ!nZD1(P&r^0yCAj5A9 zi@mKtAsctH_DZs=7SD1dD?$>nA$u)5{)Fq1N)Y#OTMyg8heitv%-o&gKD}vmNd;yNrLG@CWP3& zm~UDHEdbLO>Nz@A>K+34+t=~_H?6!qWw&E3^Sr`0q)Ma-#4xi^IfAa0^TnFAv3Zm= zuZw7q`Y(eLv}i zW2m5cj6vfH4HbuDZwyV^JSLfv`kg$?$X5E8<>alI|1fXw-8Er*gGr`Zd6USBMtb}i z&_9<fCl7qffwgj(n)+4 z#XiR*{Y`EFcg8Wy?)`<7RY`F--5wT1ub|!k9Qgb!uQy0gOwl}T72rO1K-Z0YP+_|> zL{wUqF<9!Wrf{?hth$Z>oSWf=;C1cjMhZqfc+c=#hn`n2v99X!CbhkKh`MBy^- zKy2>K978sR69^T4QA)1&V|v&D*(k2#M0-#?La0dLQxL$IKvq8Z10rXRXDqyO898|J z(i2Rw2RQ7d79Fbqr+En4UHd=0Y{NJmFMCb29|C7L59vP6*{|%i2iM-*)y~_VqbdT# zl%nW%`7VmtuDG)i*J5dbD1>WtQ)q8FXocIrgRB9wZ2pL)IXoi}rv7q<4tkB0Q|%&P zO&(qn?Pr_%dJMz8NANVw;(g$AGxq{weiObDzjhpTJ{EDSFoiGFS2E@S`YQUkwsSSd zfrNXadMz>^leCjjxW+q+7Xc*?8;{YL@8wGB)V9IN6WlONvg*Y+oiIN7MELHDYjX3% zovBxf*59l|^9xq}A541yXcrzV!#Z)DR&*=yt=Gc)Cyy7U4^}_vZ)K|>YlOlbbW7mJ zu0%nPlTvBXCuq^wXB<--qrVvfxN}*YPIkcZAd|v*YqT$SnLxrN7R^o@(8o1FqOaH* zy~*^9?_D?(xK@lb=ZGR5n<<6qsp#Oc=Ys#vjhm+Kr~;0JKCAPjALl8m%m&Q%)-T8W zB&`Nd9AMBp|HGhPJOhf`Z~z2$P@mqpWuDezFm8`ammZ}*$6z>z1HoC}``gnH=d{ud zvc>Vhv;GI2zjsl5b$MmO__lzEd770V2@x&iKvxkS;+9-(OhcbSR+@;6)a+gajCA~m zUjNEAQk$^}#=UdBterP$%74dzqT*pgUw^$)9;Am+fbtz5_zlrbNw}OYuIIVuaTYzV z#t~BA!b88qd1(EId-XQt{{L|8jn=PU$i~!nk zAB`4V9~GdRJ@2IaM-M5MRf2z6I8QUH3rG?Sv%od{k1b6uroRk1v?eh=#MtJV;l9B; z4Jeo9B|G_uVeR}UDQGBS`N_c@3 zUTr&vm=;g+uSZM6n{O>*1w7?w0+H}W!qUF8Ej5BOu0%=81!4)H6r*X-ZGpu_0jL^X z;=V+(|C_>vh>_v_*-NN$HEuX<^(=7HaIa@a^uxtH8UNbg#^HrPxW7D%vT;#VEHE2w ztUqpwLkg&;Y;n^Q&2(AX%6kSkJhz%#DrP1dIK2;;KCDfBnd)N_LsjtpueK{3T%RBA+{p-M^CFt`8@xmi-}J}PA+hGy)6>-%SUHUUF}k> z14R6jp&?fzD~UcyL^YCzv3~ITl<&`Pn|C7Z?$*>AgLBc{UI8}yeyV`E$?SI8vme?z z@%aD+B|cRCqSoo*w07K7zojxq_dJ0F5|@~(+2@7eI)EGe5WgMH#lt-vs>b$8#E9N_ zWQs9uPPaYdBBBRGD$f-3{c|@_B)N-WJQ%2_P=_uZCqE5MA zz;s2ig*Ecpmx!=C$r19Noe@^LDyw|s5lOf1lTwaQ+qz1939|5ohFm5l`r0+{REi`! zfayopkNsC`YtU&g=CYVZf;#4~D~yba-BYyjxSfE`w9HDS&w<*a;u%vi}p(T(=)^4P$uk?NIT6x-%Q zCvg>NsYE7L(HGFyc24}3qAcG^ChRN2Eqf{xiR)C)i0e_l1NFyojgpcmoJhb zY6$p^Lh0Hue1|uo6%h_)doL&1oZjEnwM^Y_2N#3T#@EP|0wQed?mlyrw)FFgM*fFq z-F}e>FLb_gP(3{>7|oAa8J*Q%e82Q^SMM=~)lmjq%o>uVP3Ze9 z!TybaWY(|e9FNC6?ky%*Wiy)nUV7wsXa7tj?ua1wJ4mVK3;&@3@@`4WSzFF(>AP&_ zZQy|9=t%1byupZ4$6u(3tDU?DH_y;y{j|-V@l6hW=u`{#x>1C0B{Hwu@JL8u3!S$k zBM-OAMjNYK@?kVoE_)>iHX{lsd;O#*N3-{yniAwuCQe)xM#aYGwdJP$gB0KThRzxE zp>}xSexcJu;%)U)eHAGs*m^R5l0HjhJV7{Z-!;(iPIig{Z`?EHE`6zCuFYT>KR&|9 z)B~?LB8+JzdDr5Zm#kU~!KbV;oKDR|u2h79S8I7~vKFs!6h70Xjwp@ayvPGTPrhIy zLS7~GRhFP%3eu_Yx!Y)U;#6FI_T(lbK~>UBQ;zcXRlAJ-cdCNGWN#3dgH_|kR~(_s zS1X9b%#x#KI#-YmDI`C6KN(!li6eN%16JO)T#Q}T%u)w@P_o2iW(liyQV&+SGR;ir z*gwI_0XMf8h2I_$&D?r`sZQp%w|GEkdFa@(si;CW=;Lf7c^zJ@8VG=eUYVvPG;I%h ztBk2gwal6}I9DM@}z^nn(zZ?G8OjZoYL|7A*~KE!W^Tk}n=P0f?FienxaA zHkQ>PQE=b1^?o?U5wx`t@?XuL<oscH6~jwsb23N?lq{+o-lsWMmH+d9)s zwxX-S6sDm$usHa46^T++i&EmRsLfjtVm}|7O3XpPqHGRT0E+)A|O)i&f^KQx1_U8A0G{Ck+NP=$mzBq)MeJu%I@4Se#iPoxIk+PRdSUvGK}o__|L zu}xgFG9wK3pkfVAz8l_pu0R2?Oxov}4Tg{dHDU4*)TnlMCr25Ipn1GtGoR}j>li;u5(V}R_d=G0EqwX>x-yT)nXK$Ufs z{w?<)$iBJ_#B1oOKng*soTSwEQWAQ6l8BZxac$FW_A8BWvSJ{3GSMT>TTD?!#B%=TN>JSP&p+9X%;5(y%l4;LFEeK?J-}H%44ktw+WLaic!(^6r zo55|%yM&e`#vxZf3HmC|SbBBfqfK&3!dFN{+D_lY8?9eq6_^YTcpmu2U*zYPB~3>C zdPpI9tmhvh{-~hX7pUR46I6Ik2@%z-Eo9HP{Nsbov4t693dSyWbzw%ZA`H!!Kbf#AGdsZ2w)){hVW(6rOJLBmFyOF4T|nScjQ zWqo5e2&|-(L&0=`Euyuz539Dh<;o;xcrAYWa(;ILAW+}D zL-IP2sIV--$)ol&FG&7M^Pm&}V>=dm*3soHtzubx@Mm($p?3>=P9K6JFn+8PuoFUW zq8R({?kn(n$JgNRlQoN${_tHjHq1?RDO|j(K9-`dvHmCEU9YJ)8Zl>No3fHJhjBGL zCG@qrRV*>{v(Y>PBhD)N^Urv16pS67KF;VJV9T6*DV3A{$7pu1(^DQ=;7OqhFbPu@ zLf51y`@XvGr`7zveO@b zWqdl)Z>TPACOCpv`r|(GwDk4%5G#fudj);M#6;I$e@+pC4^pc;sv`0X|JZ}Nzi#Ow zLgqQdDv+0tut5-Q_pguyWp{;_`qVQ2TcoF+!&cn^<<|o)Fv;w&lcI_G-4Gw>$%jC* zNi~BB9klx`U4yCJI;P#fmzBr+LS=au^ju=Ex&#S|9M@~N z9)6wEq*HHB-`9cYc5EwSPbLr^TMCh|JLQOgR%c2H&BhoIs{y5037$W-u;p=?&sW0< z22+ayQ1n43rEy({8n)SYJ)H`1V5pqPf$f;CCb=eG4FFa?-5AKios1k9aQaQRkmc`GbrA{**tbL88n#Cbow}Eixl6hVm>(Mos-d{u+H(> zqGKNEX@h`FeZ7Kuj!DeB8wU(}mYB60FVM|TMDSDnFuLwA>Wxx}sauJyE)GbO%KO5% zxxQ6DCRrLdd8bRFOr~f=<^PMXWhfSs!Yv5;M2%0A?}b{8jj1;2);RJucRzp)z%LnOM};z z&wMtB(R+5Ww<7Ftpd!9|H&9-l={I|3Xgbs=7!^kcp_VGEs+w$^0+hqGg`5-WR0MkSG&#&koE?sJ)bT-JEH$FQj>cF}d zio96)jL}s{^vp0S*!A`HQQ(Li1JN)27UY)IyrnN{)$DVth|p1b?o7Jc!(sF`IZL>VdzD&l?X#cxjg0oCK%Uh11MN}8irLvp%k~ZxxsXm! z$Vwy(=}0*RE;yb@?mxJ%y)kuF(S}52=uVEQkX4Rc4}kZhh=CRGi2uyM(z5y-ht!fg zo2P2@m{+t2Ihe>6wEp&~dS+)_2I1c|zZNRz^`__72LAeL%UiQ^4)htUn5jZp%lctN8f5b_k{JUXQ^!Qt+SF}M-B%Ga9 z_X=oyG&D3?&vu6nraCVSgdK)oA{k`L<;Hul!Lopco5%H%(UE$=)-ln^&2K46c@4DX zy{C^ymqXG6nd_mU`tFp7Bt(E-_8{)yO%i5`EBn5>@=HV-)Z`IOsSHF$rozZTwccR_ zCOr1E&^>s8(W&{{H>aVnJYFBa=S3>P*7M_mw{BU8eg6?|&1EuA|5boilXjGzUT%(f z^Ng|4BU|gOaL!!7O5Nm9u75fE#pLzF{L`LLnFn^hDI!m62L>WghidSMSR2geTk5wA z^z~Px8Qp$twPCC3N2Pe|$=U#6nw6Jc?-@pqtjBZ89!y4jJqh~h&{d;qs2j0IU4lNBY08O#qSfh-D#KGoi8Z5DSm|U z_8n=0u3(%ho%*4bd%Ctjw=dKKp2dIp$w$K@FW_s!Fv;Tc+ zpuTU`58*wdf5^5=H3vzzVzBlVlEF-*9|`I6_5sz{07u=5zlBn~*5lnV8xtDDk*VKX z{Faj^kK*&pua<$Tu|b|dKsD2KJ&SA4+qixijJ>Y4q#`Zl>k|1{Wan4~n#mz8$1mBS z4`-U5@mdre;GF=OZJJ+Y@kHhE-ipg`rVy7L>Y_MjqW+92p@7wQ<$RiWq>4;@LoS^y z`SaGs`yT1*6@GFr*GoPJPuKcfa6~Wfhtl%8PZhIRE~t!U9n>Lmf9~ZuC6KT*Z?M~p zPi2O#72@jf#(m@KTeq5Sb&0e0`d9IjDn}ca;;cJhj zT&sflm0ne@#^)Z*+({%7L_ z?E{48QyJ7w%O@1JvotRaTXYWUJl}NR$%9{>c3uvhoY7f$>jVQ_>hL+w#Brjs!5*d% z8Y*4TY~@4p+~HU3!Ss;@>GyBdQC44FS}NX~ReTnI?{V_0SQ#v{#g~L`ZAkm?DUm8$ z-t)U%-W5WkOxdC59~+z_s97Y(U5Z?a%2b_@w_%hEMNWEP6L^I*%mT@4 z%qdeVAGic`Qi>Z#p1*FiE>A)lb8LQr?w3Nr5wdIsr)_RluG6nX^TMF|GhwGe(uj%U zag=l0DcxG==|t*W(AiF(52h$%jYRH9p?9U3NZNjonh((aW$sX#?t-ODgktMvH($=m zvR~A436)W#+;T#RuSL|aEVT;iB5zlE1We3o`x^N62Wxn)U5zrgijErL=sj|PcoNWeEATy z^KO6lPHDXv!zAm3p$g6W?x7n{CZ>@2g8^b#eqK_Cf2s(4Y{@HV;h~7(oLpwFv$KKM zuN!4qULQ=|Th%uY=Q4M5GAu&_p89X$y^7wltLy|2u#|S*F?Dd3KY^V5Y|QlBV0Xu^ z{r1;Kk`SZ1_-*-DBaxbpC^wQKafgk$$?B!HGFAdMb zWHwo~e{KvK>If}ui5uLIy#a4wQ(aMTy-fun9MWp4#kdm6SMNBtotpwL3@+bzZFiBZ ze^h4X3N8c$o3w6GVj8K63gUJ3K>8;0`wqQe<3~i&^ZKJ}q96-cbiA%vn@CXhKGyPM z?}v+{0Hm0j_?cqa!NIZ z@Umg42?}Z#>AreO4j-<~%8-8zA3gT;Qg1nE>6<$8?3#C&v)kLvrW^@qok`iOZPW)k zQI)HLT!h%Aqg^$vj04*@prbj))k=L+EBgRpsFrx&Z2zg@a(iV<-8Ud1pDjPM-&0j< z9CMXE2P6-S<}{rd$@Vow;fgbwG2+;UB_E}4LtfhK@ApD$Ed}VTrJA2%p1x}^Wfe8q zDb$ICS(Q~ET9sr)!PG+ep)I-y-v*HibZO@-obIKqjOM*fm79>!OCzI~g)u6&$|_;! z#onVdu%YTh8Eg!TDxC!zYR7o4eZ2N@QMM|_hSXS^|AjGZ57}wd3$9e&2M%3csGj6o zrZt?eYh6^89ZEe|I@OXp_YS7dR_ATk%#{#Y;%J=k;h3lbMnCu1P9rm<@YG?pe||NK2zQ6f7}UOjdc)Z{(9IFO>(F}HVdN2^(2V8(ib7q$eYmzb+9uiSn{I^mRfW&mYb<#vhagK!cM|AIH8oty^c%fXQBu zj8g27)hY(L-QBPmO|2%B_1$@=b zGWXXq#c<|0KbtWMzi-}d?_mMAs$3oOj~1NDpZFX;hMARvD`O6@1NV<=h)h0`rm5ac zADxAN$0jhciMl6MwLL$vN^2M~43V2c=Il#DBM!g1(X(%yzVN?5~@hav!I^ z>?wI&CUd05CEx~yFp9bMf6-~%YIPYrY?U*oJt%Z1Bcqke5#n7RoB7dSGyK?Vy~&=> zBy3lkiif7n zV4h^O*4u{m-&FELE~;@qvYs@0v}$+mJ2 ziy9rN{A6m=9UI!mx7 zPK5EZZIt|M@R9MuB+rU6N9;g)WdHJ;*HKg}GXCZ`bhiK7cW?Gp&5-Tw?5DXlWJrLy z@pR9|>lEP(y&AM#XM)mc&KK*s`R5kW6N&{HUYTCj;;LulMK=KCRJd;)k7q6H1b7Pr z_Hp6|-Ow1?#&3;K+WCe~L6F&JU*>0{FARU$nnKVQpQ^LBJPuf#%fYHG=iDn&ejc_1 zdmYJJYq-bP`z-qC5( z6Z@f4+|$l^Vh)`izXmiBi09O7l$A3{G_a8*5x=7*`^Db+Q!Wn%g&SS2AbwxL@m02F zkK(!S$7RUs&yXuQNQWT<^938XkHm+HiMJW=#vJQTAsXZ07l|CF8!Wj~mTK-4TF250Hnlh1)l9Vb zVQvT=J0`{Nq|Pe}i}oKjCx%^|9^_8Cu9AUiR9sTsZp7DiUCOyxz0tVHyXyca{;~L# ze*O696nZrrSS_3(x`V~dO_Cu|T+mCvKzM1chm>}cBm|$8#`f5pPk}aFGo*$VM!#72 zANlOeFV*oY*t_gnw6{pRFq#r0h^t)(*Dbze#W+0=3HQDF6^JzUXl_xAs&LN)zO1nz zI&KX8ZO|s#+WM|8#BMJcNNEEhEBRh`)9q#~UN!s?O~wq-8$Sf094N1Y9IPS{kxh9; zxnw^;(FRMw_2tw}KJ@Bz83X;dn)~+U-}|VguvP0|(xJF!Kj$A#0eQkh&j~?Un(C3j z4zphLZhB$*NXSt0K4Ww_c)m&X{PJ0q`qV-esfF1$Bi*r;^!2Dbh5xFe;NW}>bl!KY z=?)tigi$y~zO!ddZ-qzHkse^K^HB<1(p)cdAs-PYn!r|moGwQg~c-52w&vqpy_ zY`SXba1edfx(gjWd9fdIIi?~ODVOmz`3q^|@pKlt4R|^Ha|=;N-P=kISrWAG+7tr) za$qs*uaW#EHjCcWAZ6@5A1@ncRQ2ul5)?b(Fe5f*?AcRr!JVhBp7Z0@*&_eOt6j0! zg7&4dR^7EN)yj8_8Tap}MA&V&fGaYm>7(AN1OCL<&cCtZaAPa9iYH)ae4_yAHlBMn8=V`IHA8%y?U=pLJzm#1 zhiMZz4pmroV0N#Q&S+d4WUH(~Hv`*bHtv0K9($O_uM$4{MG|s%c5A`E-@i~4t>z%( z;ucX296IYeneyrvxw*o?@yX6c>BOt$hT+DP8o}xD=_55O;EdDsu7VQta`!%RqiaviH z&k{IUsU9%-vrySFxM*~A{0&XHOOmsRAMn#+6dYJ^R-!*tPh~1TRJf>j4dX$#{>k(T zb27~vuIEx9ksnukt*y(eS9m6-c*fV*{cZm2@PbB7 zxT6?3KK%gp)l&2gjjIt~;#DS_>BH3%>X8?1ZvF=M`CJLxEh69+G5rfcikdvdKq_U5 zv@&imx@vsM#zv-8!AI=?bwzD}HB?~7Yd~#aT329=>6SbRuTl!G@%mYtMW45ynt{Qa z!)HAIdkX;bEj4%@SLBGg=_3&EaA|^FJu~Sw*<__z#xB%&!NY*u{=}CZcg0#s0?1d- zsXy#h|5WueX|ma*(h|h6>!u)aR*t>ThlAcF)A+qqCgHgUb%0>Js>73dE>(d}wq3-M z8=p6H=#&~!`fmJ@$f6KlZ)$`R5UL*u|Lx_aJ`@q++PN;4+lqpC-hp_jjgLlf_TM^< zbbOot2GQQ~hH7iGPwhaIg=!_9VndQ?ItU^2;46D9^}#-NMxC8|3>zo-E62i$x@;|MNLBx9O z&e!w@+k4D6YRg6H6xLSn+r4wkwr&}CTPN=HWix%`-Yr+2O-8O7TQ14vOeAGB#GPbfwL)udG1Ml=SiEd>PIwfo2_h_|cw?1^)w=`W~<5%drb_3Vp-s+ZS64$hI#bbV|!}tT* zFUL9c1tXynu%0B(bx;Sar~j>a?!o6afGG9oh!F7f=(iDyV#KdIfi7s&b=K9a<=@sq zT`}oqt6jkM>1MXAlygG^auoJBEG2tQ7VrczX9~HRo?JWFQoHy=;?uUy?iAk`92v%> zU&Qliyt-BQ@{tOTxw3iuzy0C!)5}-48&Lt!RlUczy4hXCH)3ygZy=DUuJO4IrFzqU6mDD3P9@bRTzvb^ zQbn0ef+SE)i3jNpjLWVv(xD?zpL(3TmvVJ6-OC-aBi=(%2xWD0@Tx+yy+vJZ%Xh$` zCW*bpH@5XQa~eBtJLqp+ypsk&6YqNc(8^0T6_PUvMh*sJY9s$86kV>m`6WMet0=P` zpD%HhLN&_DPy9=D*bK%sZhUr1kHyfjrgX2pQElUT0v>+0+Em?ie<+m;mOGYer|f-p zzzod0j7Y_bi;6m-+XLvs4vR7Xu+PuqbUQ2?;oe%!LKPdIzkSOz0Ql%R2?4Tv*KVN> z7ZG*9yJROb{B^E)eSaM+ZPp*An|2im@^QM!47%}eCFW`8H-^lTzxPF|ztS1aS#F#Ot zuIbYB5qe97!50bdToGXwX#*vUZe6VBvkUCi6`5AUxlH?$7O3P>Wkt=T7PYZ{&+Hp* zoVXWoP40#(UWv{N&54uCIi2}9qeFy?fCc`F#>DdXq?x{fwPbfal?xw-8yf4G(ZyGh zXOeN=&YrRNEuw!!7SM{eU|d`nIhYx<*Ewn zcA-zE=zY>mj)*`*v)@Oz-{%>S)rq-#_1tv&&59YW58-CNUX;~EtZFr8q zBuU>v%{*0Hzs)n%0uhT8LDo3%go{ki0>qaN&&dIyb<9hy-M9~pb6_L*nB$P4{9vGO zdl@;IA8i{ct+fL*(AV5PN6r1Q;(_7) z#;=3KhJkS;E9DDg&5quMpu z{%3TO!@4B1>qC#`=ZW7K+GlZ`E=fzCv%Jg9>8d@UoQHWsx`D47+Y07ecyAC#5>m=s zlB?y&aHoK|Uh+8Se7O?MvvBXy z--6Ytjs6LVa%1(=|#&NgmMr$K-*hi%1 z)b>-WECqmWDIGo^MO@NJ+{?6VZI=}}vP48gR>R5oe;jt^hjw7(%MB}IFN0%^qt=!~ zN9qstM`^1@J9zsiiK%bUn=+ln@t5mAbywBvZArpJmosPElhzD=Iw_^_3ZChK3GHx? zrmQvZQzDbE&IG2ggu2&b(_*K-+`DZhv#b@WfA;1~w?1r-NI2_B#hDSL{^x_0u+QS_ zXAY-+>ziYK^i_&8+P~#Sndv*<+OOQKj;1+nNi6g@`*IuNMTS4%6pYArWxcwqOZ)83-W0P#REf-w^X;fTBJq4k3AO4Kr)MYqhD~@ zA<}iRw&G*wcGteW_!rRbqHq} zSCCDf$7X}<>MQ9bAQGJ#jcafsWQ}!1U|?XKVa!NYNR|02Z`5vocgXzsf2NOe9heX~ zx)48)TGdi}y}4UxuhF#S6Qg^tb{7XumIT5|o0^*Vfa#6|J@!eLE}op&CXFJiFu3nkR%Azpm(E3ulhW^@S2Pizy8<~< zM{_B_G0{S2@112givw71s@MCR5Z5RWt^e%RWr%TVPAl=!qyH%z?|aY@>q`!);?IvM zGL`_zD4+Za397vxKJjMoB_`0!cC;NfQVGizg&Aq>h^DfamzT$p=n5!d*fPwYthPdY zIC6{2P`HDQ#z9cde2M4eym}vSTh*vmltnKD5Vy%s_dJPm4k8w+lOIpq;%T$zfztQL zlWY=O?@46R>LK5(ZZU@c)S&V==JnP&>RVPe!)9cJ8-MQu?(j=O)224ebA58E5tTyg z_gy8$yQ8PTd4;GA+iEiD8LuK9PqCyNAkUwgGQP|dURhV)$AAzb3e=71@JH)qk4Dj{ zA_q{$;wXT&lXaUX#lngG5 z;uMe3s$Kj~Ts1#2W(~Hq-QW?s*7Spnl@YhR{JG?onB7F6lyGg_lhpfw|X`CE?&xJLvg9Y+gL7vV7NO6|Lr@QYU8aWVrb(3e)-AEIy z6i;{dbk+T__Qs=bpiZH>pDBtQSvPfz(>y+W#u^Q6Y2oBf&G8Yf2ll~-kb|Ci33NTN z8;WhyNx-@H-Wf+I=~w@5-h9Wgh4)x4KLS1^j`&s)J)@I9pxP5Ott z@Ai$H40#5=<-4~ygsaYZq}$@ECrP-4!q?8IBMaIUej;-a@Y;aA<)gw^qjHu~C(|90 z1wf#ROmXeFqVm<1LR0N{Nwsec4f|2B!i1X>vNc5x9r&A27cBH}5`XTLSRFIa02M)o zF0^wS9hF99Z*Bbk&fhf4d;W3zReB>kikwls=C>r(hVbwNNgm3nk?gYzjtP*O2fNnn zmquc{$1&8>_bExWTkQKx+^_t^XZUEtD>3~%>%eX4F)E&#QOK&V7HK<2M*C_OcZWTY z)C=V0jGf>Yi@2|P*Hv@xtfz%0NR@XXd39&tAhRaF*(;NY3iq_7l~*mZDP#jOr9T?q zWqw$DUU8m)JkQ~KnNj}lgrhL2|I?P?meWmA6CV>Y$icz$Ga}j3jgCeQ_cjBBbA$fe zgM%;kn2k-iD&5l&QedwRE~eMdgqoVHCLf<{)Q_zlc3wi6gEoX3jY~#9;k@?7x7{!4 zV-J_pzMJ~>q;67{Q!qA=-D#G&GO2f(p(DC8UqxoA7N_BMt!TkOOLq@|u7p z!^iH@KNoDN$WGO!{G4`9Iv70#*ea*8I7c1SJWLw*6x~W@YF8XK2i5^d>dtAu?oHVy z$!KBasl{2%CO#dSztNx{$t_(7RX!z7p;+KUXrt|l^Z5-7R68##MLZ+NMeMdc5bNY~ zKmdo2kJ^gG1E$4usiLU5n|&iABc}BZ@o4`A3YooO;hpLxc1)SU40c5S!@a_j>QFP$ z{By-$XtS1DDpqlp7h=Ifo5VG_WYF({A?!$-ot|`;X-&?avjN?HpD+%8A?3zfzPxh{ z{weD~ty$7KfCppUg`11t*$dTh;@f0WK?zl_Wh{{i=W4}NDGlhuxF^!Im4goak6G`8 zWn^e-RrW%ClyIWF5u@o5CK44=SZyFyuH!{XqWCDUMBa*L7G0Z;h>J=2dO;IIP2y&5 z<2NohM;FZ%B)_3mS#EmYpuyjK<&it~w))jasG1+i#Cl$cDKLZ`lb%zw#q2a)vp+xH zmg~Swh`^f-DaGLXlUG2<8o_G;gF>`^xfCf>t z1hNS$Rl!}1en95i64~au;a+Y`XUiei{7@FmpO2OnnDa`y-}XroaK`tpCwl|SI@PGJ z6=a_RNvs-JKg%ISMAdxtG|j4hzy2=~!xxyXS`m=>>5*%Q8gH1v(&om-2RYScG7iRr47FEBA)t}9?pB}e5qxMt-s9JD2j$M1g5T#+!uUR`#D%2d+ap9eC%T|Ch;ZH zovm);YBxXH(|WJKbviNMHNmEKIcNE9$L9w1Z@?i_PX@z_Uol-yjBVRtbp>^-T|)`; z^?7EN4O8_6aL1?c3dY!{M$-Ki8nTCp2XA>0cF=_5>rbUE`^YA#kC#7%7*3B+?f<)8 zUmUJmU3=y?`vg(_y;V2v#xz4Vjl7FG@!a(|pktMVvyS&HsQbSI#VvswDQrH1MWnNm;cP%B6QbDKBa zJocVRqUw+688=H2Cv9hqs`jRHO+Q;rRoqxjk)8rGRJG~d9E#W9tQnU z(Fadxb$^Q8&AFRm{tA50GwUurn7PZQzbNqaEiy&6z>$iKGS2Ge0-jKzWyq&^mNUxY zmf<+^JJOC!ms5?;J0-MgrW&UXZC=_w%B5X^QlFUUjQ*?9R`KcR3R)<(r5RSp`uJy$ zL=F4Mg54n+DHrS9PRLDg6j{-0BrXbu@}v5`?Bo3fOJA$_qKVRJ#4*93J4+6h#3EPm z&pJ=bE9i+ugd2T9%w z=mU%ariStgpZ%x_`Q4{M8Z`R-f<4uN79Yw|Mjm)TQPuwHW0-Y}+TC6ftHKj78H8{>hWoluG|1iKyiUU?rAqU-ugoS#rR<() zl?Y?(QA`pa*XtpY%{+je@!0!H_rRI)hr}ZRb=v+t~r4C&kT2X zv%dreW#PDM%d$HHa#Yr{Obtwet__0U!=^f!zbv0_cT?$g7&WfTmPyaIC%aO~I>-bK znYAsrY@2xO?~NA89ENCBP1vWbc>(Ia$@aUxaxE3`-~3M$>1#&yjqf&hpvVsktQ&Jf zd)?T#CG%V zCfwJnyg`w+L%d3&T9N2GHaPm#oT#M~Ve*nqu6l-*sHfSR+((;LIWs3;uQZ5~g3lNo_bgsxE2 zb&k5A&BiI+P>9X=WP|zl8Ip#phlaI1GTq06X;Ot@g3tY@cg5V&^+|`GaNVlQdhSC{eedGt%=e)*bTIZt~Kf`bFi-?P_T7)rxnl6hioyQ5tuBN z;~O*e2V-Fn&A6(2(D=0k*~ju`WOfeg)cE)IKJgQ8na`41@CdC>6?Wv1Fk8zE!-=YV z{mm{*^)`x?yr8NCsWncS(~bI{PD!kBbOMvl=`6rm!j8^AKRm31_lK!B&yrgX5xzu4Nh>#(JnXc^0=&|L^~*`4S_(^$!aMp$B4O3e;ORT z@LJuFbOjkAnI(~qiIbCtjSUU1$Cs_GQiM`d;P3YwPpZ~U!*+|xq?d<3VF6ZgS@CIs zzrO02FfJmVGy2ezjb9~tv%6CO2Byquv|63;s90JT;|sGVqKNIv$K&;9o1EeiE(eya zuYOqnkk>|i2}()GnoWAWZYmWddQo7op@jEZ%}_o}`2g1Omo{LJw5@&Lru&6ce;BH= z3ZPLRv%$QW2$*@(#wa*)WGD=HUTu+P=C^91XJljxv3wXV~OUI~%xP z#KV&=v7x|@CDlgM`5veEbJGd9Tw$w#MMW8?n8aiK%`hQ=*fUDiU(9N_-&MatByWQ8 zdEzLCiQdhQO2*k-k|q=dEnDb0&0eo#>~_f z`<5}cs&9N&Ly#i9%)+4r1Dh*3tuz`x9_yRH$2FhMHe(qV*}5`?5Gpxig(Bl9n*$A9 zpZM_w8>XH-w;pF_nsS!UX!I31mavXY8=v@CgDKU~PP2K4e{ErOK{Abs>IoE`$Bf^m z3?$u>%j5z|xO?3!^Q7tV0&wClV}fEL4l=iv_Ol=Lw@OXam(G;NuJ84)XPcIOt`jhJ znBXzc@9B1m{f<4Q0Tb|5*JjX~)lPEwPO<_RCKYcDViSW#?r3+KDj<6jK`WhugcMOd zaQPsQqv9IqK0lsBM3gv-u^XX|z~{vzmH+TZpXi141Va!Q=RXLgPq(lV^ujAv&K4D& z#h5=pF5;7Ot8V`^w#@9gfAFmT?1$55!ZAb1dA5HBA==%~P2nR+@RLVGj>QJk2>zu>F~%3VK;FgrXBl~P5n@Gnx$ zWcHWcOCuZnF7;6@f-+-256q4utPIfu3bRKJ?O=!I_!PCo(p-*?tiKR5^54tmilw`x zkY$c!shK}Wus2tvK*O0S!QmLQ<6%yzl6`#-BdITuD$MDF2Rp=nYlSjsx^2#5375CuZ!A0;CXM zm6d=-d%$%#TXNO*_A!B#sLO5!OHtia@ngy6dRh8@F1QpR`v*n%KoF4L&dQV9$^?* z^8c)u_Wdad`Z2mtiY1MbkNbWqa7kmrR1Z=CNe%h+zNG?Z8$zN?$(sA+ZGS}jRZhzR zKc-bRh_rI1Rg)?wt^Q-9v7a-$#(P=A05I;qOG;W{py}(vGkKj>I5XRiE#&=y27&uQQY&Z}=O!dxHH|j_ zH|F=9_CH_YgL<{xVMF~Qyq_tLJ~JhVEH7wWP{@*QqlO3Dj)=3$0KQHAG0T3fE6%;h zIZpRF-mm*!2DU^Z3G75Vhjl3vze>&PK@Ul>!gx^e(}$Svx(P0XkL(TU zsz=?_U4?*hg`1FNfTPgV)<4*DYK|L+7m5*lNYh~D?3S@l^X-pI_?ICEOq)q6P4MXE z5iDCsn#UhlWZr7(zCDKvu_pd0$PPBQ_p}nc_7Bz_snZ5tU`5f51F4)7oxTB+Wd%z4 z?&bWkNrBCT-z8grf&N2Mh$q9ghYI8WAl$S~nwWxQX0j7p^ux?-IW;MHnDYNX->pl@ zl&Smr_i*~g;ibpL;B!{Kn_rs<>msjh;((YM&@Q+Iv2sSIiSUqH{3Ot||4Wq2!k`32 zL#OfY&R)ZmU$#mTA^$`Ph1c~dX_GO)L_Fwx(B#Fkz6xFaBtg|T@78_mewJz)-UQWL zhewvPF|j-O2#WnNSgdnn%KIuxulg97%t3l3Xft@_|B&xvk|vz{qW*&qGY7M#rOjX( z|HR%#3V_I|%N{95aAjCIBUnM~7-|*9t2HtN0e>~dYc6N4*nO(g*6b3o?f_$w^KSGm z6Pn&|HO3=#_E@&}fM8(g21A=-Wb-hVEs7fOa?5>Kigbn@sgTTEH z_UT!g(KFOhhOw%G8vc_eD=TeF`dtJw%bUFNsvAUW`o8~ALaYbJsNYBH!Ib`WtZ{uZ zGQbVeaf#=|FPAtg6F=Vn{@?Q1k|%?#bm>W*xOi^PCW}?p_NNY-Jk0al?Y+w(2)M?K z6J+kp>@uT{)QP(n@7v6|tkz29Z^R|PNp*+*1cUV2Wfre``|e4Kz-gt4?gnr3w=Vgt z;ybY<*mAuvs6XZ+gkLB^v|t zc|b1EoMzqlMSRk=OE&KWd_D(jzjR5@`EkWyAGolXS9Mb}>r1rfTa(;8iBq%XtuH+D zz7>2a$8-PV2cSf*wEXT#rpx1`{^Z=hvOH(TdvVpdI+^c_D#88>(kZc8Rd>7~ zHebN!@5XlUK^|-T!G}HEndY&}_maHo-V^)2h#kCAUv0^FRc`XBzh^;7H}#3GddIED z^WQ}L7x3xMZd-DA?s0Y1P`8(7LDn)}*L@l3$#2#F{6^wY;7J~`vnN+=SaMiJRx2|m zL>i=DV`0CFtFVdQ&MBb@0JtHTmjD0& literal 18533 zcmX_IbwHEf*GCaWkPrnFBo&cXNoi4(?j8dL0qGoF1})tUI+_6kHefKOq_hs$Mym)o zYD#RthB5U4Pq9b>`vL`_KHXymx5%&-c(= zR-aXW9BOELFO;@2QdRW~&FtlKkNq#POm*G9b&EM`UFY+y+*^|43%h>if=Wtr;}z{S zH8s^Is5K6j+mbMS-8s-`ke=NRwI(0i6Yc3~^0k$)cbQU8&>EUIJCfNny9J~*IBr!X zjx$kFMaJUOn)Qt8!KP_48mqsR=QEnE(f2hIkjd)v{d9VMJJOTnGqE{ln(dl-{Lbc@ z>B!^_0T`D2=?oQ>>fFmAmv`5JU3&GYQnqMC!lI?7uYl4`XQga?sV^*?HW(`e3XHs^ zyZd^Lu_KUz1yv|k46iQnMOg^%t;rMu>7c!?<`(F-5{02gss!vJ4$o4`(Av~vja;7- z&k)A4<(ObkVy2e7y}&dW&1d0ny$z zUUPg&3bYhu39kqXC>^|@6`a@oQ$npa;)&)#D>z+X<4Shn>F5knPc-rb@aOx!9K2x$ zv;}A$)Pn7iLDm#JsCctS9)%=;Z3FF{4R5Qt1zFwVlE8~&98Y9S2K z=k*>r(?ZLj&fdy5@d;`0cj#zpik`L*D{!QPUP_afQ}UpNdcZUN%E89B7!teApQ>Zu zO{UeQ`lW)S3gEA#-v%wJJ$ntd)jFsLOI&_-+P%pqgZh@02;||tS8uGZ@yVdP)5No+ z#@3W)S^5&sQ3^1Nh}j(HtSqH^7t9~iLctV*{NvBGE$8&(n%B{H}Y-QB+iKzg&~3 zxA1sSJFjn`6oVT`I~K6`YYy_SU#iD4#-YwBwroQ) zfg?XXnLbfM0j0rf8*5ADq{m0=kCl4Db+It!_)} zsqDXwcL@M|pF{h#Mbwd>H zLkG2b6mpax zRCdQDK6}x{Y^XAekI15V0qb>iDvd;2Cbid^^+U|~h;Et{lP~A~N(T#8mk=KXo~Wc; z3N9q1`d#q-^w$pXu7FvTP zdLerqI;S0~N?9r&<`n}rodSMv-BJ>$8mXuG3kHAv5Pg%x0rX}#yl3>*U*xEKnuCJA zvWG|ENSg9#l7Er-yr>}|Yc`zBJoD43`ZT0d^`O;l!fRuB_=vt~{)-%^@sjG&Gjq-2 z`W-4$E1YvP_#`;uZx?q{SkwHsjzzW0#Gnhj|8;RAk?8YcO4vD$OVGI!{HZ|fw0h(R zP#QdY=gET$wP{&6&;MYh65zOn&tKwh_5n~_EKjg1v+3`M?tBf_RM4<8S(VM2VIgy! z{p;x^I$44!;Al(MI7>SO%zjJS_wVwja+v;QDr{pg*pfZY62kU3#!wmX4Q1MjgD#iB zdy;-1h%$Y5`|GE|6GG0enp|}WLNEcM@43G#AW~n{HoP-KJrm6`!|z-Q{_}6V`t!eC zB;36$t%by({r`rm+*A2Fr7IwBeupq1B^`xg2~fz0G{7H z^hY>IMFFrne@9wovfBe|=2<`Sr6f8PQ2BglW8ybF&gGhn76U-X;C)!263=x?PbFqH zSyGxt^9}L_TGPo?oCP3Rn@SL>Q*~g{6Eh?pV{V_GcYC9}e2!$i$QV-PF#X*HXSWut zB78bZ1JRgG34Cej8t0uJ?rtGE)%#VGD_r#q0Vm?bc8=>xbE`$97pvX1oJuyj3WOTW zBRzw0dvL)jYNrjr!oUWxViScp1PkhxGeA!l1S&eY2tjSjde8S-K-s3?AM{Lz z$a*G=Su>O5@02Q3)n(7Jf2i^KdupD1_cOsphj@Nn^-W6L7VpbSEOjUUUYR7ON@iS> zsh(##jQ|D=WQ}0afVQaWE2v^)1BU@H!eigEHsIc9@Pc(U%sfJ$OHhCE08n2|>ORHQ zYu{s-nGjKAw&wbSnqTG4r&uOV4xrc8)sh zu4t$@p)2{p^0lLNr{*sh^ug|$bi{G!(f(KjrkhEnR)NrA%qin`%_~7>_T(vOEKFm= z=~zdMux~NTbeW3kv(HZh=L1@2i}dEjl+F{PTE&h_w%^=+=jSadPsM)io{?_O279Nm zIeDseGLvoU8?IE}Y4ZooM-kd32uv;z7|gVN6XCkrtw_Y#BNmes{JyHQmsz>dL&f`Q zPyq$!QCl77!&YYzO5v0b$OC6^a}2u+kt;PqQog-x1v&aeAEbr7*KTVGcTelQYa?%0 z(5&wIU0{7vSK*|0qwF5j6Q?j$jawa|(-kZpoy;M3oJ6T~M}n}=fq={6Ap~T=>Y`-v z{yXu`Jv*RxFKg9iUlh(_YIEOLdgnq*&z6q3N=figcyQ*Wu% z4YiRFtzNEp93=i^)~U(BRt zgu=8=^CCrPGgW?a7)q=^%W3FnGf>Z1nTQ??mz{#r{a8_^Ob#E&YtwNl+9H3?M|;#} zLrQkVvs67$r$EjSYZC$q-_BonnePr4Y6Ws~@1@I+k>#A~&SH%2`nK#)XC|JUc?Xk^ z8t}d%Cq|nTg>_me>XbF(u%?GDO%FW=jOE%tXuHmK$Fm6C!m6ORMy=*1B2nRy#%Z_E zJ4LU-f6}lxKRc;!%`;_6)H;0YwPw%Oz0giLtsbk1T!9aC(iGOLu&BR5uUI4__vT|v9RV(AAVbVY%N z^l~Iw1u~x*mGJOWVPf2^&-|Bwfe?TbM2%)OW{%Kxmm7_-t(Rbd00=@?7TcA1`46Pv zjteZ1&_VHn-bgWYHqf_$5qEr+TeI^JbtGtIcpbRE74 zu40d)v(5bNB3u!eZ+afSZ?&+R1$Rlm&E&mzTmQ=%HGkJ~XER-ERo2IKo>XU=YSs@N zEH_qpJAQZ3AN*iP<>alva-MfbJ8%6Q$dhr;o8K-X{W>BJbrs4M;$eY$}k!Ymk;5oPBbcvgo@_#9Cby zw63BkxSEBA&U?wl#RVp+uZd&-f_doG7^Ze39d2@o_Ea!;9&a;#NSrG+-xiXPuu1)*2j#3?7xv~vYMn$zP;6uV7j7;&l}Pj*wD z8WL~U>F0?EMMbOy?YEiq{=kq5udTVdV}{C&vYvN^e5gbQwS5HJ4yp057CF)Ng2uU&8-BNt?j!Zu~1Fm@v}7eUId?(NdR05DITu z-0>iWj3ujHr?`CVREBvIxq0)G<>LYi?OL8%0n$fmrinz^`ClE)l%QJoZ9)MUaUg)T zOp2OA_YR?f<9gvnRt8hmX@N8{B9$l28 zD9lN1c|PQ41R7|p4chz}Xd4+K_f8DPP8$x>xPVmVwL6Hi$Uky@I?De8Tw2OEiNrFk zVIbrED#w4)Ua!-zTc?n==X`mLNaIYt5Y}g&JIRG>+%HH@EPDu26jjAh!^a!tr#<_- z7sA4v+a*Jy{Jz|Xu=*)_XqBV1mg}eC;{dc>3u7c)$nFmbQ;l8!wS>`~i-SyGd9$-R z(8qwzuPHMvL~{^e?tlcT&`K$v*oo!t+GP=WFm^sAt2P56tA$iTmwZCYESBB-0KVMq z;`8xJ_tTmst{5%I8+1Qks)5f4U^UGC6~Z=H9$83Zkuv8qBnJBO@B%AvY(9hlxtS!V z)-vZ0Kt(i(_v*G2ib>=Yk6b0sA}LpwKX0Bdwyl5nELCA3#FpIbqrnP4f7OODF61o= zm#b=at(kbvS?3nmca)rMW-&;2dD0h)agtx80QWerWA>nUcq8pk!){3cQfeg#U$u|c zuGf-~$i%qoM9B%bpQIZZe`p@L=1hiw=K@mZp*0iJ%i&&WO6cxnO(sQQB~pAd!gzH< zH$pkrg*!8kv&^u&3>lC~TrkgTG%mS-^3E@m{@IRQnIeJ_#kr6=m1FBQ!lCXb!by*y za%r3Iip$>bA8)esRE~B!`1}{`NnmO}Uv0dCfncpV1_5t-H{N*fG5vLRK#LoK%k?7N z559v~mg?xUb#87ELiR>Az%TtabXLjNvUy!0@CeAZ^OuE7djc+$u%hOuuG> zKIvU~OY(TM~fH8Am-Xv1HY_eqF@4QDK=s z)Tqv84?=S9K^43XyGyZFVn@v{Q+Eor%TCA;sen|eRv?Fp6^F8gi))q@M;Ycx)rfK) z_~8h;LB{!z)_K^=`IsM%<^(r|iz;oa9WwxaH~Ni6q4n!Ci-HX~ly#=<-cHP?+4!ep z4_tV}81TPml{Kb)5_V(`H;U2b$L0v~t7oIHqWst96(g5hc;0_Ug<9yQZ$9CEgPPrZI$<_43W0uq~?6_kcYle zB@avTWwVit0#gz)F1)6C;^Qm;JE1cs+9B=9lKCpf89UvsWdg2cU%SCsx9cZi=30S} z)B)WSLyTZYU2j?+W425%q-o(G0xhV*%N2%OmWcSZfF5(y9Fi~ldJ|oWvywMnGYnRW z8}_*}A1t}1r|||b{;ADAb0^(aqp%Qtfr+ViH}jhElSdMJALNyf`~}P*zeog?gRjYc zpZQ|00+V2UHoha$N6HII$;@lnWP3#<^=z1*^=>(!G17XVqj2)Yj3cXV05?B_a~k~R z%&dIb0Jv06YNx?kni(Y_2m zK7ZU+(xaKQJKuQ4a%kWj_Jx`1=wa^u?5|RE^(49s3r^7D;z;sRuEt`csH z6BahxR=7{6RsLHf^?B<)g@t`RX<1p<#lB7mN-4ocgAl=Q_&a264$jyO-FQ`QVJJvm zlJRYMh3XsQ7h0+#%nS7bUaV*Tnc=Aq8HYG-siM!zEZ z?tg6A$hgO3*7}{X_D-Q|)NYMU9q>$hdU4G$Vx!a9xSX}UTrB5Bm*#ROD7pDzvK;X zKR+|>nyE`OQi~Go@XGw%Ri9|jrTmyf$>?Bw4jmwo79LhJffG%<0pAbFWVMhL!vf47 zZOXiG&)y(4@S(ps;@Qo$x7nkYXIpOUT$U!xp|rj3=T1=eVC44;Dvra~5_uLPIy}d^ zRwl^HMhkGsB!BN53Bo|q-uj*jSGjJ3ZQoOQy=X!g_t|(F3tvDYeM_x(Xk?LLlsL-& zT|Cm-Yff1O-fQDOi_kxKUR$6w;Li}|9fDtS{$9k^-@aD>X0BzX!~W{B<#2}iP04*_7Lcy3!%${HeCCdR{acLPs)wV)$w3#%*EhzN=BM%ZzE4y=(;!D@%3aDz$I@I zvf1W@8WU7dm}q}(9vOv`5Xye9hwZNvE>{90-j*`G`{P;IFDJf+=CG{}5cO7&!C19G z#*DueSb|wG@F95pwD_$2(U)cU@WyK)znfxO?<(&A8lbP$o|5kh*2AU?o6`O4X=@@{ zJ1UK7ZTL+SC`(WA2vNQSgHMgciE(>g2di-u>Ry1hdON<}tfO-?>iR798~65OP5-QNi1W~jSa zBCf2^XNv&>FBOuX(>K-u<6Mte20SM_%M-OGs&Fa%OkrIkn)kkmgNQ#vU748LVX#&0Z&*)-sR6{yX-X6X8DSUClx&0ohU)I`N zstu))Rzav(vTJ9TFlS*l6Kp|bd6g9(kaJZhBEDa6oZoWey^+7JWx%LaKuyzb1T?0RfC8zbXC@>t)JVXe!}o#n56^^44`Bp%Nk!s?OY zpqLfefbO>gK`N*~?>~aL@7>#^5Ik@x#bWiX(icnFQZf-lR(CK~?13UmHpbmz*!tgu z{W1!T?Y>{Tr*2`@zJXIjR_|dg?~)ezdbmTA#n6S5R#i11uIaHJhjtnTSqP1WO22h> zmrb>aQmL#SWSxq1Sdp@A`mPh2EI$(iD?|f4qXc^x2Z{%+)!Znj+W9d-Y=`L||0qM^ zRod`vU*Up=#s^F&Xy@L&X047U_KcP*;)_op&kS)kY^~g!^!qaKsr&19V9@laD(@HI=`@P9(gSV>#htA)8~tYFdCS{H$C0O3eA?4B*p({b(%|DnSytL zh3xDw4y}AfCLjs3&NiV2skMZ_r+g8&eX?^uTRrhyAefHMHpQ(+3|(x^f8BsRJBu`2 zy*MV)@Yq*-eyko~-XT<5Q6YGuB85f`WYR>_v-j1f81tdRV1d{{lUEh@c|KeBC7w6* zZn+`e--jzmUy9aQOR`?)n{vNgs>!)9pnJ{JnPThbXtxzE^(Yp>U zZp#c+gD#pQ&Z!&5nR4x{GO|66or>cwGcy6#!>s&$t(?;Lt>T(cow8iNIl8WFrLAp| z@WC@P@e2<0y4}W=3HN0H#@}xd4MB7F@fmT{Cut*B1BGF*32sJft9#X!4V5h7{ZDKz z7dzKyv7b#6X4$HF;JG5Z#e^bs+pHEGTH}Fp>)*qcB=L1Vy-5pBaL93Y{!r_pTSTBP z;#ulU_zZj|21u=vw@#1}zmqs5UHCmX$@tX^L&WZg)w=bgM)x65GVy3u6&WIK^2fD5 znl)pqOZ3;S!@nIyhJY!~`C7lYB8QP!73)hLQE3O$8>8Imd7p*1CY)+r?^l4NZJ}lB z&R)S}wwP%t2YvrV7fdjRPx&6+_?h3u$C8vwzQ1n)s%2g0ENRMQJW`?=vY$j(w1kXx zIcfKf@@}Qpx>qq4r+pveTh9~6RnFFkDD91Kl66}fAF*lETW80UM@`MrA zW_w^xJAaiW(`m3kr(_*Z-pxOrA1AH-3=_|l)OWl6#nGI9^GaaTu}Xk&u`UEwrHnx5 z+`|lCPr5QOQ^Sxvn@TG7pY`s4zi@o8R=;u-+MOdNGT$6*htjF#tQui8XUe1omS+rH z4fKJzrvnmtb@|+*M!a(4KU;{{R+(x#>0H}efCQ*gY!t6yTjr zhspFRWE?oFRVKE8IZ#hwC>oL$;~9m9)w`yE=E~*b(8#j+%{O<34CtW(ipM3>m^vU} ziUU4v#nBSg>Vk$m1$X@-wr|6v zTTQ|@W)?Pv`rUIUms;U6%Da)l>=NrO43wmI6MW8p=KTz@i(cX<#lBdvF6f)f_K+9Z zlb@_T>Wl|7(9^$+z6{KJnb$l^wOz-xDu!B!ENBXJsJF16?L}-&S$uafscmML?6rzK z61}~2|D;}FnsbY{o_~D;^a+va0ba|h9 ze9Mm~sZqRS0przv^x>nJ?M5#*!sS)3MSJ#;ieA~)5ZbXdt$A9ui z)_MmH{A_ghzFZcfjhwq+2sc}MF**DvAvcVe;nBWOeNhuKj~^-8yxqPlmHXL6eWZ#* z(ZH}$7t(BbQv8{*EY}P>TRu^HGFE3*Fkr?il zW7+w@M`J1{p{Oxgot)Zeik|?V#zvL+RD7DkxISq5_8qNd@2fpZi1+*|=xTY#U-Z7c ze>`ikS5P-_q^>c%N%oJV8LQ_5kdiWn4p3VIgIzOqt!)^Z9rW}klka8*<0Q6x;W5aH z8nvOrBx=-&HWWG*-Vz++FyGHEKNqiSdY{J+(q=s#Uc5M*v!5==cnGR;PuTZ!|6iPS zaBXB($XR5U3?T??bR_QVgjTutELZtucv=8ad&-NzG1=O^+pLqT%=OWt07b>y^ z`=(pbSY^kjWB6Ttb>^p0Pz9OwGF9k>`T`=Uy=-i-d<- zBOKb{$64JDDvq0G$lVJg4iUh9<)>c(Nh(f+8!U6oYv{~Yjul*%<$95&{$%xs|27PR z7@Ow{rf<_|J}_~K@A?OA*DjlJj!Hg}&YgqEwYPhlWiQS;yom5Ho_tP-yp>VGDiafh zG4ay%I3~xlqQM(rQfJHZQ~#qv>>8w=+t90S+IRFpzw-kWd5*9Z-swEpE*Zgd$RuwECmN}vF4^O z9*b8sSVZ8I(jr#>gj@GPV^W76gb)0=mSJDi-MIFBMO=$);YoUE63xtM9(Nw!RGbCB zAp)_UYI*N_llWhZ$V@EAYRti&C~3qt_imdBqtNpvJq%3zxoIrPBtZhcZhe| z95d6_rk2!hB6_|ws5zsL~a2K;eG{@T3-PzkPF4y@g5wWjbVS03cMPXJa zkBlEKj*_dre~hwjJW<+D>^6~<*}OM8c^im;GMaoFa2R~;65Z_L!`1By9H&R(CCqLN zS-m@~PNA0_Gr#xk?JF1?Vu!z>=ysXT2UdYe#s2!>m6RKSi2SfMG?y{C3vZ!aQ_h%?eJ4gLty(bub- z?2l0rX=|T&ATN1V6B8Y3NiOM_M3_Z_U#aF4kX~M08r%jqsrh*Ik@%Ihr)74&(j5=4 zRptpQ%w|~+N?ECzP<9+$VR!w4MSDDINgtS9+x1gB{SX)#y*d-gR_y<^Go$2Z7xBpC-jLSBQl`X2@~u$ z2weES*XTCoM#i=tiHYb!^!&c(qO=Yk^ncIT<|mXy&dEd}hMlrkuC?ko_%wCT#y|Im zK5|74$Zo)<=`@UFcm0w-QmR2H%^dNq1WvsYfTfFTS0R`(MoD+yP;43llmA_GLR8>MX z*g&gTV%fWFe4A0xUvkpH|KS2%cMMzA@7il7uV8h#`U*NN_Wh$)Hj z2NM-q2c7Bq%9d-`mFw#hv+v;_PzFTevuEPlgKR(UJ2^lsQyNu4eyO8WQ%vm(pgR^) zA?k|NAE(dliH}QKTyD^bE&Oi4yuM0>U}?WP(a84QD(V`_lC+nrp4A2{e9N3&>>LDv zhM)gzRPcZ=j0fuMy55Z-00=R!;ANNX z!nR&9X%)+>TQyl(wT#lI@2gd4XMeY&DcowtII1;40`~7a?`hy?x2sv6jeJhM3F^D< zlyMlw7Fwa3Up^#(vZiEEgdThhC(kTk-Y_ZWkQUBItXfO!-2?K)nsXx-Uyj`F4llSn27F7cLy7kmg3+u@V+W-C#d(Pf6))CFL(0S zv{~Ap9#x)4DZA-}ZHCSfPg*u_&M62SHJNBYMxkO&IBne-_??kBVBG)qyC9j%c5B1; z`n52iE3G%yeK$K9E{Q4@Sfxw^E5!C?3-fQKfl zm5Yam+5KD{7<#%th3}D(dS2hV>mR;;c2gH~1fQr;;fH(~;gcCl-(>BT>jJrePutASUtWOyUvkc{ z?l%7_x(#-bt#nQ{pD7bf@2WJnxw8qtV#z+uCiZhMb=Y&2abhDF*OpD9idbaCSM|Ss zM(XjsKsyRol8n(_u#h-$@1H}nAh-n1#q!_rsi?UIob-~W!XgYap!_4`S-fwj$e zuj-8+eXzOLQT=#nBLw34sT@oyJ_CqKcn8`LG*Dy1AWRLC_HP3jME=>0I^_c{G zlL*6WS252_Ok~hOArMHFmuo3Tg0jOXb{x(z^m5*?JuywSk5{us^bF>&NbzKQxTi(f zj}j|=WHquqnqrbu8NFzoQ6SOStE}RS(Id@Ydjme#Rq#?D`xf@{%-1?7i8FbMFV4Cv zlA3#u;n6Wr)_%k?;;lsxq=em{X1r`GCVke1cRHadH7KcuVxLzI8wR(ksR3OlN`5?El8E^FJ%-2T_aVc<&wvTVUDXAQx&HdApK>wY3BpEa z7aZcb*P;P<>N*h8Hz>0Dt#*rL2<;uSF9S&fpnD;Sxz}aaV@g&=Ko8j2Xt#7fTvaE@ zzm0YAD@FRn{N@s+$4A^LRclSuMndDs&q)sfovsaV^4H#=I@|2m-u_8Pih^*_?Cc^# z2}kqz|2Z+FftgcvoX1+)0!Kes;i$nQCqIXjRroAVGi_&Y*CZ9VJwe>juvG>G_bb?e zl!gl)81o!{y8!b59+$t)3g9aHR8hpU=ieKpl+w!P7VHtZ`br$w@20dcjX0#v#s{{(?8X(zb%cZ>XZaCdxL2My$GXB4poYAc5uRTkf`NhTUb0mQ; zkDd*AJ4e+=(X+L2C10Y?k-0rd*mjio;ZgL1`YZ5tnca$PsLGN2{9E=BR_#Q>@RKnlF_vmZYD3rn0&|($&XPuYF&YX4c z^ysPb-0k}aXGo(Q)TE9|s@=lHW<$3}rae(dQm$s-Bd70%r$yWNWwUh8E@!Fd9c?vS zxE^xXH&v5wz22;m1DZs<~N+5ABq`a1t@x_f+h?lr_~{Q!<&<|JHS$%@qy76-_> zs@Sk(+%*1KagF{uk;)0;xb&HX?hWS0f!2dJ$e+5BUDxc(m!K0aaCklO8OJhJ{&V{V zC#3Z>=|y?xK=ZB4U|k$`MrpY20zj{7RD`8t<3>B1VI#BQ>H~N^vgK{oqFt#%-M_rq zVB_m9J?XTUBM~u-%?jS|D|#9yV<4=?0C!c`%OFT^u8^P6_%BgiiIoE1vblhIOxA_f zs){1?gO!4G4r+yzXehuLH>_YR2Hm|Qnf)v!9@z$~OCbLKG2Qh{>?4UmKcKB}jt@Ci z82+)@)`h?=>^^fFY5&;0{k9FN_3*-WNq@-sk9YEdkOR3wb13G|1tSDx~N%o;3xn`ni-33=#JYv1z=^b;-K5 z$e#1c1NX=H{sDNIjXLF~kh1yCaLWa9=v=p^mr<5VHm#w{FlTs8GxhKd*fl(p@x!AS zhY=@*k9%018uxN_8A+zvu#qE~gfzAJam)*bo!Yn>pcnY{ePtYBY_f^A-ei@S6B z)}(oUUG6M??qK{&Q679q}i;+0ONL1pBPrnX1(!Wh)oAw7SEx3~K=f&LY+q37_9iEe(~W zegM};h@j69UtQq-ax+;mjjk?P`Ji%~)XeRl79G0nQdJ6For#g6_pW0yA~xLU&B}e1 zab?0qdS3FSe-7wKgydP7H_v;Qv>YWaflRNB~*OLzZ< zhfKZmNZ4;`w3D(R2%@k5Dg6$*_kko*ubRfcP0F!$XKtH+JSuQw8fMk_BF=HF2y(h5 zswnV@FV`=w3!t1}4_3`lt5=_0>F%#Q5F6Qtp2>~ zh^X{ON8HggD73cNt&sTsob#y#lpDg)b@4{U?a^%kO=CI#PnwswvjTu|S}U%*LxxW^ zAH`^8+lWGTtjy>KOr&fOCeuIM@%I{LvcJaEYaQ?PtE_Ie6U+hvTBc`aCdbFqSK;oE z`JpVa@vbO(V?KD3Z{)t^`7b`-Vx;zV=qFoE1Bq?iFV4DE*|S6FH^-Kq>}Sfm*&sZXE44LX5`s;zI0MDv_e zazwEZnNQ|M(IKpRBXtQ{{THT>rZ;EI-U(K6-&;Dai)}ysnBbLZ1*?=T3@MBoH)J$kFc;4g0T| zg=2rythr`#xQoJaf=k$#x2dHvT@x>Hgl0)4BV$~BAT#m1<+b0HR1^a^?7An=*WT)| zI%WHE_1T%{V&rP%0fT{EK2y}FT{ABsj4{I`<<`}^z>p5QwP_jPudy%%mzhg&AFSpP z-C231eC5R(T7gW;=$rn13f~)rJ-mPPcxe6F>&D$>iS=fmr9m&Qaiwx_`*{HS_g+u1 z2%D2t!iGNmoM#80I}EmS#Pe48qc{*@vQijOL>O|PUgF|7KrXoKIWRFlqjv078a?qu zIJ`&4umGM^8CzcJnV&A&)p4i0-8P!wg34KgRu1ek+DCTVtPl2WP3)&996~ zuu^HAI#3~kPk^)=E67=ohUd>zR*1Zw*oOvlBJN6XWk7wgm{G2Q-3pik_}M)-cUw1- z%i?s2`|CO(yissbfTQ(j0Q0}QWjd`T z%&@(FT$!OkRW#pCv}t6!EYZ@)*Vk7rc>@E`RylfR+OB>9}PxX}wv?#47=YLp#Zp&dJo1cZJVNF7>~S8WaHHA21R&DmOU| z0Ypf$_5zpfffNr>dknFHL4d}WJ4E1f#Gzw^d|ks{e{WQBDp$Vt&HGC!O4d`|ZSI}| z+a=RuxUiwQ71P>GE#l;BoNmJ64EZBbL^5EPy-!<#sj4=_!_g(Jr2b-iIi%Nbx(0pE z9(=Pe(OMkS>GFEFcA&E{P@QgwN^$g4@r(HJ*U4bB!v=lAPo$q?Oz6t_O# z+|5QE3MkSP`MA&CIr<$@7BMTp72LW5s|Rc`Vr7E4Lw`T~n|4Hv3e(GaKtV1x*uDYd z$-5r!Ia;PPN80t$^8jb3t=>i{F^7ABn?d=_gxF)w%-o30G0*6g=lDLZ_~iX&aU&gx z%lcE;+L1SMosbic&M!j9Nin5Kp*fU1g6UxVye5H@kHK~IWP6J8_5Z^PFCHtI(uTOt zp!+|+cJH*|@buHH2bI-d-cz~qX`vS*9M1Eh&Mm;Qa*ix5(I2A<46PB{K{zGW- zczsF=VfN0C{wVWenHxcPVDa3_Lxd>faTSfYStF_vh94GtsIFj|*SE-2i z)$$|?H{ni~&+7Vc_KO%?rj57YIo_}G8gZYF+F_?r2?zuhyrxq+`1K)AB_bRZCFKD_ zatP`q;pVB1rZQB;#|<7ktX`v#an4v6R_MYeIDI-dA0}C==H(fWxBhJ0>|y(8kh@lo zQYMgOH+~xQvBpy6kHfF!_h$)F7>}x#6f=s#o5~$~dJCNe+7yF(x+3|Ad=zx@NO?s8K-9?*VYlasevfYj z^>0{xm@cQR$9l{AzBq*%=dpl(uVh0{#(3{7p?OR;)Lo^>F4g5swp&(^S znX>e42j|mgnO!%RUtFlzrQYwZPE1f*d?)#%ZQRsnTvryKVXQaM{yW=*jYxN6VNIwfB2gO?wDNWrwc=cj)r7uVqesMJ9 zE}}Ttphj&`0U_M`{}OiG%q? z>7TQl{0&b(ejL3a5{4@kAaZVlr`F_hD0pp&)_Mq4jj2|J@?j32qUQ^?zttjFts(il zM`y6I4ue-uAN^1JjwwmIOx$7MR|*Xkc(L%bOUp4its@LUndG2Zmp&YcPmadW!xSN7o719v`b((?|K)xB&nFVP-^w z>dS|cwv+R%F0&p14DEZxF4yj5C9qQJ#ujb6{d~4*#!d$f34uUqad`=2CRj0*#Rl`r zpaqi4uYFmOdoLb?NvCABN_%o31R8dA)tJII&Tg}MjSXM!by2Y|pO`iBY~Rd(*k25J z&OY}atM2`pgG4sb_)d@2_BxN;ci30rR|(gi%;f4N;M2*)&FqR&Lr6@7QFnc)$s3iE z^gElU1fj^0<8Mpoa5_q)A*Ai_UVy@7W=SFLf#1fafB%MP)rB_|F{#_=rdL!4o!p$f zzu_gV8)Qz~!c3{EdYv(4$Dq@b!j`gf#?hqR_UUo0CneIX27!u#*%Tq0LHa_Jl>Y?o z1#5ma%H2$?wM;jTuX}t=o2|pqr?dX4%XM&c=}n`B^x-4rnwPU*N*-BcMe79J zW|+wHg?!@<;oNo8_f?h*05?cqT)ROKWzkc`vae|=RvBootEMMS%g(+uX7qVDVbi^* zS(QwA!fHf_Ii;(qcFbAa`REdP4|-kJ%bOygOq9&|_j`ad4W8Nj$-#U4s2oUPvtB*@FF;vy&-7V|CDGG)q6*#d#ZuK!Ueb=x zY9{1({H23_qs}*T`pW|pE)y`De_j7w=fAREx zQ-a|%i-;$Uxo_5K!&I!TP2x8*^kv- zSr(GhH71IA|Hffg2Idlg+##u&qAAVvF0A}#;f(@w&B#n9F=)9a+BnVs`@6rk&;{zD zxzi9L4^%TLP0vJ1c>gzQ>OG&bGiJ(B7QPi=)PD0{W~FL}g-u6yveK4F&!F}sP+6Sv zD+5Z%3|kW@T*BJyUAME%*rDdh{)=6#@=%<_9*jNws{bHH)HzAkg8j6x!NRiE^i%W` zRMP8WG3b`ex{0Qevcz8%vsf0r%7#;_VY3rZ)Ex5^JAm*LE-{<$Cm<$_)a7k&3@UDD6C|ZL>uW3ybsoEv_ty zRJ357T-7W1%I^#ZBU}IS*?EaYDN-Z(oN(dlDHc~54f#&1knBg=(Q`NQqTz$<&f@BIb;KLM%J2SGgoR;hvydgoL z;s4C+dh}j}-}yRLLhf0DBxAKdaN{iFm=S=Df?=#UJ!1S0I4&Ll`c--8RMw?YoLMrkauy_q<4)& z>>MxtrKYcI>tBL3Re&$@MoXz#e{B{YrZ&y*E!f<(gX^^I$XCeKr500|q8^M9y2aQ- zb`9e_l~d7J+9(KLVn}&T3tbk{_ok&Aq)?BI&8`USsD=<}#Tie`_GPvV=r#YSc}xa3>7_0DO3`@T<_T~!@ZulHBWdloxw0Plkqa4X(( zN95-(r$Ua6pV*Y-HD0cQx~=8p)wsB>5XR+BkA#wZr!NUst@XO}N+1WE8LwQ*(wCq6 z=;_jkj^K{7GgLG6J^Nddi$QKzDER-a=;E3kvcD|aB8>0Pe*c0!L?2W<2Dn%4k@=#h zFMiZ7#(B-lX(fG2npJc;pr=Hb>VEii-ZD9xO+Db zpSIr^#t%$c6ThT-{`Q_F-SifmBJ3mf-8_HVdhXW(m)ELC(*E3@E&XziMd))-b_poo zwI^nC|McQlZhKoJHeZx}VH0}kJ~W~OxK0~ZSM5wo-YdJaif5ywzK2!JrFPY`+{e{G z20J)Ti?)07bjJT{uM<~27nuD~9e7HLRdkTMg&8MMn!)VlhfiHkr`ccqov3(ZS?+`{ z!N4uzyE;CClbzFo)2praG*1uL*?z^=DJTB4>Ri=KbY>wPbbdZv+D3^ z%TVB99G|OQFRh$ZHm!s=5fqaS47>LJ*|>SSd)&hX-`zW8Z`61itGv~CDU`o25@hv< z%ToIJz&%g>KV|M75?UoKOJ9XCli@T(+VteAE zro7@c`)gN!Jf7q?ZHs?Q8R!rk2iJefnbVd?zc6~k@03&OGx3X|r~KDvCEx(JkDF4m zdC6=Q+mn^w#TB>x`u6S6np&T(-{(NGrCE*QQEwj`>NUMmJi?Y8@7Z20|9--i2!Cr( znmXdqzog!K*WKSU4xi*@Q`}aT!}C@Vn21YOEcy7gN&%>%p`*eXc$~&Ry_b5&c3fjq zG_%?ET}$@;``R~JD;M752RSZ4dGDloQ@?EZo+#S-yx1rUcz8_QwV>Vem)P#zneqKY zm&-*~(Ejod;r-z~FE!sk=Jfo(XsPKOj*S;Dsb!k4|F!Y2W#px@d-I!L|9t>d$KbkV zs)_$4?|bvld(3+M_vX=q-E2-dH9ZqefTul7I#wy4y!U+niC-BX{{c()o@u};pTT89 z>DO4*lZSlTm+;=3KePYoq-mZ}Z)*1_y$pT-xx>iwzdXm>H_wB#8;wEcd%F6$tOFkP F0suB4T&Vy6 From 1e3f77208bdebaa66630664d833974972cfbaa9e Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Tue, 9 Jan 2024 20:20:53 +1100 Subject: [PATCH 0101/1088] GGUF conv + TRL downgrade (#76) * Fix tokenizer, dropout, bias for LoRA * Update loader.py * Fix LoRA downcasting * Update _utils.py * Saving to GGUF * fix * colab_quantize_to_gguf * move save modules * save module * Update __init__.py * Update save.py * Temp downgrade due to TRL issue --- pyproject.toml | 2 +- unsloth/__init__.py | 1 + unsloth/kernels/__init__.py | 1 + unsloth/save.py | 207 ++++++++++++++++++++++++++++++++++++ 4 files changed, 210 insertions(+), 1 deletion(-) create mode 100644 unsloth/save.py diff --git a/pyproject.toml b/pyproject.toml index 2bceca566f..70b0322788 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ huggingface = [ "datasets", "sentencepiece", "accelerate", - "trl", + "trl==0.7.7", "peft", "packaging", "ninja", diff --git a/unsloth/__init__.py b/unsloth/__init__.py index 879b092330..19902a3055 100644 --- a/unsloth/__init__.py +++ b/unsloth/__init__.py @@ -83,3 +83,4 @@ pass from .models import * +from .save import * diff --git a/unsloth/kernels/__init__.py b/unsloth/kernels/__init__.py index 711169e06d..5de19c86c5 100644 --- a/unsloth/kernels/__init__.py +++ b/unsloth/kernels/__init__.py @@ -17,6 +17,7 @@ from .rope_embedding import fast_rope_embedding, inplace_rope_embedding from .swiglu import swiglu_fg_kernel, swiglu_DWf_DW_dfg_kernel from .fast_lora import ( + get_lora_parameters, apply_lora_mlp, apply_lora_qkv, apply_lora_o, diff --git a/unsloth/save.py b/unsloth/save.py new file mode 100644 index 0000000000..f94a1b8566 --- /dev/null +++ b/unsloth/save.py @@ -0,0 +1,207 @@ +# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from peft import PeftModelForCausalLM +from collections import OrderedDict +import bitsandbytes as bnb +import peft +import gc +import os +from tqdm import tqdm as ProgressBar +import shutil +from typing import Optional, Callable, Union +import torch +from transformers.models.llama.modeling_llama import logger +from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters + +__all__ = [ + "unsloth_save_model", + #"colab_quantize_to_gguf", +] + +LLAMA_WEIGHTS = ( + "self_attn.q_proj", "self_attn.k_proj", "self_attn.v_proj", "self_attn.o_proj", + "mlp.gate_proj", "mlp.up_proj", "mlp.down_proj", +) +LLAMA_LAYERNORMS = ( + "input_layernorm", "post_attention_layernorm", +) + +# From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html +ALLOWED_QUANTS = \ +{ + "q2_k" : "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.", + "q3_k_l" : "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_m" : "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K", + "q3_k_s" : "Uses Q3_K for all tensors", + "q4_0" : "Original quant method, 4-bit.", + "q4_1" : "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.", + "q4_k_m" : "Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K", + "q4_k_s" : "Uses Q4_K for all tensors", + "q5_0" : "Higher accuracy, higher resource usage and slower inference.", + "q5_1" : "Even higher accuracy, resource usage and slower inference.", + "q5_k_m" : "Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K", + "q5_k_s" : "Uses Q5_K for all tensors", + "q6_k" : "Uses Q8_K for all tensors", + "q8_0" : "Almost indistinguishable from float16. High resource use and slow. Not recommended for most users.", +} + + +def _merge_lora(layer, name): + if isinstance(layer, (bnb.nn.Linear4bit, peft.tuners.lora.Linear4bit)): + # Is LoRA so we need to merge! + W, quant_state, A, B, s = get_lora_parameters(layer) + dtype = quant_state.dtype if type(quant_state) is not list else quant_state[2] + W = fast_dequantize(W, quant_state).to(torch.float32).t() + sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32))) + W += sAB + if not torch.isfinite(W).all(): + raise ValueError(f"Unsloth: Merge failed.\n{name} has some elements = infinity.") + W = W.t().to(dtype) + else: + W = layer.weight + return W +pass + + +@torch.inference_mode +def unsloth_save_model( + model, + tokenizer, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + state_dict: Optional[dict] = None, + save_function: Callable = torch.save, + push_to_hub: bool = False, + max_shard_size: Union[int, str] = "7GB", + safe_serialization: bool = True, + variant: Optional[str] = None, + token: Optional[Union[str, bool]] = None, + save_peft_format: bool = True, + temporary_location = "_unsloth_temporary_saved_buffers", + **kwargs, +): + logger.warning_once( + "Unsloth: `unsloth_save_model` is still in development mode.\n"\ + "If anything errors or breaks, please file a ticket on Github.\n"\ + "Also, if you used this successfully, please tell us on Discord!" + ) + + if not os.path.exists(temporary_location): + os.makedirs(temporary_location) + pass + + assert(hasattr(model, "model")) + assert(hasattr(model.model, "model")) + assert(hasattr(model.model.model, "layers")) + + # HF also uses a OrderedDict + state_dict = OrderedDict() + state_dict["model.embed_tokens.weight"] = model.model.model.embed_tokens.weight + + print("Unsloth: Merging 4bit and LoRA weights to 16bit...") + for j, layer in enumerate(ProgressBar(model.model.model.layers)): + for item in LLAMA_WEIGHTS: + proj = eval(f"layer.{item}") + name = f"model.layers.{j}.{item}.weight" + W = _merge_lora(proj, name) + filename = os.path.join(temporary_location, f"{name}.pt") + torch.save(W, filename) + state_dict[name] = torch.load(filename, map_location = "cpu", mmap = True) + pass + for item in LLAMA_LAYERNORMS: + state_dict[f"model.layers.{j}.{item}.weight"] = eval(f"layer.{item}.weight") + pass + pass + + state_dict["model.norm.weight"] = model.model.model.norm.weight + state_dict["lm_head.weight"] = model.model.lm_head.weight + + print("Unsloth: Saving tokenizer...") + tokenizer.save_pretrained( + save_directory = save_directory, + is_main_process = is_main_process, + state_dict = state_dict, + save_function = save_function, + push_to_hub = push_to_hub, + max_shard_size = max_shard_size, + safe_serialization = safe_serialization, + variant = variant, + token = token, + save_peft_format = save_peft_format, + ) + + print("Unsloth: Saving model. This will take 5 minutes for Llama-7b...") + model.model.save_pretrained( + save_directory = save_directory, + is_main_process = is_main_process, + state_dict = state_dict, + save_function = save_function, + push_to_hub = push_to_hub, + max_shard_size = max_shard_size, + safe_serialization = safe_serialization, + variant = variant, + token = token, + save_peft_format = save_peft_format, + ) + + # Remove temporary location + shutil.rmtree(temporary_location) +pass + + +""" +def _colab_quantize_to_gguf(save_directory, quantization_method = "q4_k_m"): + + logger.warning_once( + "Unsloth: `colab_quantize_to_gguf` is still in development mode.\n"\ + "If anything errors or breaks, please file a ticket on Github.\n"\ + "Also, if you used this successfully, please tell us on Discord!" + ) + + if quantization_method not in ALLOWED_QUANTS.keys(): + error = f"Unsloth: Quant method = [{quantization_method}] not supported. Choose from below:\n" + for key, value in ALLOWED_QUANTS.items(): + error += f"[{key}] => {value}\n" + raise RuntimeError(error) + pass + + print_info = \ + f"==((====))== Unsloth: Conversion from QLoRA to GGUF information\n"\ + f" \\\ /| [0] Installing llama.cpp will take 3 minutes.\n"\ + f"O^O/ \_/ \\ [1] Converting HF to GUUF 16bits will take 3 minutes.\n"\ + f"\ / [2] Converting GGUF 16bits to q4_k_m will take 20 minutes.\n"\ + f' "-____-" In total, you will have to wait around 26 minutes.\n' + print(print_info) + + if not os.path.exists("llama.cpp"): + print("Unsloth: [0] Installing llama.cpp. This will take 3 minutes...") + !git clone https://github.com/ggerganov/llama.cpp + !cd llama.cpp && make clean && LLAMA_CUBLAS=1 make -j + !pip install gguf protobuf + pass + + print("Unsloth: [1] Converting HF into GGUF 16bit. This will take 3 minutes...") + !python llama.cpp/convert.py {save_directory} \ + --outfile {save_directory}-unsloth.gguf \ + --outtype f16 + + print("Unsloth: [2] Converting GGUF 16bit into q4_k_m. This will take 20 minutes...") + final_location = f"./{save_directory}-{quantization_method}-unsloth.gguf" + !./llama.cpp/quantize ./{save_directory}-unsloth.gguf \ + {final_location} {quantization_method} + + print(f"Unsloth: Output location: {final_location}") +pass +""" From b52278199b7ae2764f242622275bb8a85ba7b721 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Tue, 9 Jan 2024 23:40:43 +1100 Subject: [PATCH 0102/1088] check_tokenizer --- unsloth/models/llama.py | 19 +++++++++++-------- unsloth/models/loader.py | 2 ++ unsloth/models/mistral.py | 19 +++++++++++-------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index e2361d389c..f310d102f4 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -619,6 +619,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, + check_tokenizer = True, ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) @@ -703,14 +704,16 @@ def from_pretrained( internal_model.max_seq_length = max_position_embeddings # We check the tokenizer first for errors - tokenizer = check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = max_seq_length, - padding_side = "right", - token = token, - ) + if check_tokenizer: + tokenizer = check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = max_seq_length, + padding_side = "right", + token = token, + ) + pass return model, tokenizer pass diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index 61585d77a5..eadf026cc9 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -44,6 +44,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, + check_tokenizer = True, *args, **kwargs, ): if not SUPPORTS_FOURBIT and model_name in FOURBIT_MAPPER: @@ -83,6 +84,7 @@ def from_pretrained( token = token, device_map = device_map, rope_scaling = rope_scaling, + check_tokenizer = check_tokenizer, *args, **kwargs, ) pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index 3e3d5956cc..e15a3ae9ff 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -263,6 +263,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, # Mistral does not support RoPE scaling + check_tokenizer = True, ): if rope_scaling is not None: logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") @@ -332,14 +333,16 @@ def from_pretrained( internal_model.max_seq_length = max_position_embeddings # We check the tokenizer first for errors - tokenizer = check_tokenizer( - model = model, - tokenizer = tokenizer, - model_name = model_name, - model_max_length = max_seq_length, - padding_side = "right", - token = token, - ) + if check_tokenizer: + tokenizer = check_tokenizer( + model = model, + tokenizer = tokenizer, + model_name = model_name, + model_max_length = max_seq_length, + padding_side = "right", + token = token, + ) + pass return model, tokenizer pass pass From 82e6fece0b78011707090639823d2d7acf5a3864 Mon Sep 17 00:00:00 2001 From: Daniel Han-Chen Date: Wed, 10 Jan 2024 01:02:44 +1100 Subject: [PATCH 0103/1088] fix_tokenizer --- unsloth/models/llama.py | 4 ++-- unsloth/models/loader.py | 4 ++-- unsloth/models/mistral.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/unsloth/models/llama.py b/unsloth/models/llama.py index f310d102f4..73c78ba0bf 100644 --- a/unsloth/models/llama.py +++ b/unsloth/models/llama.py @@ -619,7 +619,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, - check_tokenizer = True, + fix_tokenizer = True, ): SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported() gpu_stats = torch.cuda.get_device_properties(0) @@ -704,7 +704,7 @@ def from_pretrained( internal_model.max_seq_length = max_position_embeddings # We check the tokenizer first for errors - if check_tokenizer: + if fix_tokenizer: tokenizer = check_tokenizer( model = model, tokenizer = tokenizer, diff --git a/unsloth/models/loader.py b/unsloth/models/loader.py index eadf026cc9..48200c3878 100644 --- a/unsloth/models/loader.py +++ b/unsloth/models/loader.py @@ -44,7 +44,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, - check_tokenizer = True, + fix_tokenizer = True, *args, **kwargs, ): if not SUPPORTS_FOURBIT and model_name in FOURBIT_MAPPER: @@ -84,7 +84,7 @@ def from_pretrained( token = token, device_map = device_map, rope_scaling = rope_scaling, - check_tokenizer = check_tokenizer, + fix_tokenizer = fix_tokenizer, *args, **kwargs, ) pass diff --git a/unsloth/models/mistral.py b/unsloth/models/mistral.py index e15a3ae9ff..e48a982b8c 100644 --- a/unsloth/models/mistral.py +++ b/unsloth/models/mistral.py @@ -263,7 +263,7 @@ def from_pretrained( token = None, device_map = "sequential", rope_scaling = None, # Mistral does not support RoPE scaling - check_tokenizer = True, + fix_tokenizer = True, ): if rope_scaling is not None: logger.warning_once("Unsloth: Mistral models do not support RoPE scaling.") @@ -333,7 +333,7 @@ def from_pretrained( internal_model.max_seq_length = max_position_embeddings # We check the tokenizer first for errors - if check_tokenizer: + if fix_tokenizer: tokenizer = check_tokenizer( model = model, tokenizer = tokenizer, From 9faaf5b388e025f8ffc302450a12ffb84e7e1750 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 10 Jan 2024 20:03:01 +1100 Subject: [PATCH 0104/1088] Create FUNDING.yml (#78) --- .github/FUNDING.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000..a6cda7d034 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: unsloth +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] From 01d7f58e11373ab07b9282a42bc14f542dbdabf0 Mon Sep 17 00:00:00 2001 From: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 10 Jan 2024 23:02:20 +1100 Subject: [PATCH 0105/1088] Update logos (#79) * HF Perf Button * Update README.md Adding new buttons cleanup * Update README.md * Delete images/Discord.png * Delete images/try live demo green.png * new transparent logos * Revamping page * Revamp mainpage * Update README.md * Update README.md --- README.md | 24 ++++++++++++++++++------ images/Discord.png | Bin 18382 -> 0 bytes images/peft x trl button.png | Bin 0 -> 36919 bytes images/try live demo green.png | Bin 14424 -> 0 bytes images/unsloth logo black text.png | Bin 0 -> 58002 bytes images/unsloth logo white text.png | Bin 0 -> 58964 bytes 6 files changed, 18 insertions(+), 6 deletions(-) delete mode 100644 images/Discord.png create mode 100644 images/peft x trl button.png delete mode 100644 images/try live demo green.png create mode 100644 images/unsloth logo black text.png create mode 100644 images/unsloth logo white text.png diff --git a/README.md b/README.md index d93bd0090c..3d3bd82cce 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,22 @@ -
    - - - -
    -## Finetune Mistral, Llama 2-5x faster with 50% less memory! +

    + + + + unsloth logo + +

    +

    + + + +

    + +

    + Finetune Mistral, Llama 2-5x faster with 50% less memory! +

    +
    + | Llama 7b | Mistral 7b | CodeLlama 34b | Llama 7b Kaggle 2x T4 | |-----------------------------|-----------------------------|-------------------------|------------------------| | **2.2x faster 43% less VRAM** | **2.2x faster 62% less VRAM** | **1.9x faster 27% less VRAM** | **5.5x faster 44% less VRAM** | diff --git a/images/Discord.png b/images/Discord.png deleted file mode 100644 index e1a0e14a567462d744824f3957aafc15e4fe4bc4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18382 zcmZs@byU>P7dMWgA}H`tK%|v!l$2T&knWV0mTr((P$Z-qq(Qoo&ZR`US(aXqC6-!x z*`=Oc{Ql1Qo%1~WxpUulW^T=$JNHGHnu;tT0VM$r4i2HboU{fG4lWP}=gz%{4=}%E zyfI3}!FhorFa1vIbH?5hej=%M7UD2H&W`NGh4JnMJ^MrUK+f=$@TTOYN!!L^KBu}? z>pg!y;Iin{aUqmbTmpsiO&bUB!{A#GVLRV~OV?rdD!)Ni_h_EHdJU{V&8CSH8ZXXM*%C< zIuQi-?+&PW)ayWNf_FZbJ=@+AuBgj%&~GIgQf(Eil%kop!-I{Te@X;Mn&9KhYUMb3 z{Va6!YO@+!mQE3eGPeL*CFWe9*U~9yHQt=W(ux!Fdzh()?v>rGRNIW6UsjMxmN+-s4RmQf>A*B-?`j`L4Z&Z7v4G7od#*BQ$yc{9eQiUdZd60NmnyOW(7tGb+^GOA{Y;B{$3lN1927a@ut-hUW@BfzvQf2$>$x8e z`R@Spp_*i%cuVWfgA)%Y+F8{F{?A{0^CF+R-i?SE*JZ33b=(V?{eDVC(j(+YMpc6} zkU7MDqZ4Q(V&fZ1bmS*&U)|vNTWw5#=HWBE+^R1Ph)CqMz0u|6*i0|?Y?Ck`HWm*D z=L^A8YO&6C_~mMr>KNVrb-6AvQc%U;^gpzHgNMP-n^J*^N4f#DblQ!fZ0VQ@zTE3f ztK4r<-)z>6p@m=Nn>aZ$kivi>l@efP!p%j;|0XO$lf9@_nfKB}{^^ING& z`V|&%`GOblOw%jzB>fJCmb!aQeazT1%n23rE2{waqFKnr*j09GXrd-*B1}<7ngv)N ztS*ir?3C$xceRwx9Cgf{`f8>%5jRs@(&U*%&xu#EOqHLP-pP>^lxhCS8HV&rAbx{g z8BL2>AhMl<#RqE2a6bG5&Fj4K@n=t9?}W{x6zM|VVC+g+xl_@?9`R1Abpy-yu}1ZV zX5%4{`D{(w z)9vEa8UoE_d~}vXmTt_DmHI|ZSsUSxfmI-i=EDL=Pvd{L!V7=Ww(fJIC}h0jTR*Ad z{XQ7vVe%(b0_nPSvEa$hB*VAst`a;IYlet{qm z^PgZH3oW>eRuDNE@fCHII;cNz_%~s-e6?rDO)y*+wUNAihkI^Z$XDfjxp#;m;h`Uv zhyAk*vE@dK8KoPNg3mdM+$WiDQteUwyW`*NAbq1ux9eezUHNXB#Y3Ax_BsE3{my4L znv-|}VCxy9Eo~}|&qrCKODCFfYAiRhLhGmt6FLlGoiAjP?LdP!7g5G_*hMU1e6!Xg z8;=yt5ysN7Yo*Ur_W8S-fCGrO5z8M4WOEoLp=tHM;)um{Zl*pcEv^3BVzD!AjL%sS zQdgYcHOM+@Dkf-63OcAg@hfJONW2q`H3J+X62Ff+D-*x++Bq`)%*2R3eap4v$Rn{N z2HYwS2d@<9?kV>^Z&1jz+^a-WHP&1T`gc8UpmsCJ`U%$HaNc~_Ggl4O63d!qjzMyH zn(@(4p#O&OF5$tW) zWRZ_gJNw`KzueLIz#1-GZ4spuH7BJ3=Na-g^j2JfO-r;_4a#P0!%Az7H{Hh$-7ikMWDqWX?o_{GAm~>o;^~4lr+WY_OR{{ybNvq< z@cg~@Pl=m7lAyi75Kzlf7^R>IRxoP07RP65$V#w>z*1d{Bu!wT3c~L)6@4MO3Ns4! zQ^vAzCK!;4k06-K2n+^60aPICA)&DAV4qu6I@1rlkB=b$*98WFdWZ57pjYNF*FulG zn9shHCiyW6m;4vneKy_*v~EKtjD=YJQ{PIW{(yXZ$}WJhAryr5+)SmXw|dQC`}aF* zLkMW4Y<0e>;WyqqiW0jDwNgeNdB>VsR6{VRp6V|HHhNm?vtWmwmnJPTq6R9ZEEKmC znUe%JgAbJcSyH|>2TI;GktTF7*+gWscbvw<_iunh9|D^ueQ{;D9djBw8gJYmIzMQH zMi^V3BCsO;;>s+naFhsknm8IqE)L})_$sgm1Wt|4!jB?OOPej<(~E5*aoQjCeA-K^M9gaa$raZQ48lo91x<|2B%R}JuQ{W!L9{*Rv} zzM>?w&(6#oU`#?a(tJDhr93D;!?G7q!+6q;3GB==C@hyw-uftE4kzzW5DFnvxvWl( z7`P?J*^wKKZ%ab(40D_WOR^u#8^lpW9${_m%epO-ll=bRCix~AdSjFr>Wn=}M!B%u zKByig!z%s!T}E>eD_#Rc$Iqk--HfqcbpqmB83WeQcZEYiLeGM#EdPmkwzA=Mz&c~m zc%xIm&!KLniINNhETigd7|(!dbDNqrE+1LGiMs`!qVSLc*ag zYk1vA!Al6FtBQ`J%+Snye!mrA=Rnp#Yr#sN~CTG zpVV?HW6;$RgyoSdXFiRx_>FYg)OAOld5`YlpvXJyN<$d?lsg86ki#87oh?y+`nh<* zxgp(_Sg%5*8)Fd6W3pXmO8srG+zSk`Gv$Cw{3a&Xp>aY(#|VISLce8R)z`LJP@Rlw zeV+jF$)b4UDDe{~IwYfHusbHwY4w!ay**IU1rN)|8~x4A-dO(<%V?J2)`fSQZI6XM zwm*mF;!**0s04dYZmwK47v1%(nDhML#u8*Wsdq8v^F@K#+i~}kB`sPIlW=zz<-4zo zmRiGxPd!pur>DmYS?^(N@{7Vu?50ZgdoTU1g+_@9OGINBx1WN7CU;l)(`dX^?4gn* zd6WTb8KtRjFKP6f2LP#r`tWMAs`!`OGpi+Smn_W-`eY zsxSK)odv70-|xc)rzF_0A$E>S4?mpjkjVta^&tiG_wTSe$RW*0-BAe!k+39id13An z&d(Ghl#DoKx)L?=$eT>lupyEC3ru9dtHjHP{lsA@d27fAG}&D@y6aqy8B=?SG5pMZ zt*r|y-!`7?FO%`}EJu>pyap&RzTBR6(ah-6T%J+Cple#`VJ4S|_*&m+t@ z<7)EVVe+E-G}cttD{VkiLe|8xS|7&%f|fjFyo0isL6=>22<*_f&4vMJuEU^7DJaQU&{o?W@8^QE71x)c+NDNRDVCwHXu2wl$=tn-51r?zLSFPa_|?}r$^X8MlCc-FbVW_$)*(g>K?1t8qZm2 zYsV&}gJeXnhQ2iG9Qb|gEAG|Qjs5E!%#A4m&|d2(WFU}8AcHFJdPbC#XJ@T^wH5tk z8**{d$+DQHVa|O}-0EoE!oR|wR;bm9(Ab-Ana;U(^Z>5|PLUE7I;wz0jjKx*ZQGOk z4^*o;8NVWG6x*!DITP&Um4%d-yba!h-NCyK_i$h@la5a~z*+4|dz@tD>$RifU(dXr ztND@eoThmW=9>$<2FXUzjBMU<7Qr{*DmAkGtrmlZUukbonAEHBRUT4A17K^HtF#_n z&9qmPrqY4X#4xe9_f`x`G9dq5Zw3_WFofvw+44Z5-H9WmJ<>it#OX~O^!C9x zTQ)pXrH?gRe73Rb66nPkcP9SFOo0ZV@RB?yQT}{qYO2ED&*|-wkJL5f3_--A#rchr zOSZ`q$GKW-bSgSquA!Wm2{(h&nu<5J)0({SbFxrLIeAoCM=!>YY^R>FAKY1gE3;== zx*3Bg8?>vyTuWR$7)PY?J{y))K%Mt+Rt)2h84=z@n5u@F0vNNIs3&X}{l9?gQF6_o z_Hg8Ad@-fqY+eX4pw*`+_AP5cp@c!7Kk$nY%=Zk1{-9%!*YDFjN+STSj4ITxcV%;W zRx4;Zc=nz_ytIWqht{97kxNf7fJT=C5ME+ec%BIHJ1VSIV z^(b6}r{$NYX08TlvrX*3m(01%u918>mR~-m2E^OvM4NvA9i{udO~hhkNkT%aD!C$Zo9y#Qe_9&{^Jnnv#EktW4o-peQI_X!5T!r zllK@20Si8mNV^ywLInyh(_MbNbF+}Knf*!oiG!NbVO=k(M;tAP-|oY2Ut{R;XE9Jt zx{AHvY*(>Zwc~q5_cc*ewwE7TsvZW1G3+|q!9Iddjj_-Nu>ZB#8F}g zTXrJZ9vV3ebP`}Nu#ls|BX%xR!T3u5M*3zWk^d{|CW~P@HeK#hJ zccSRQ30SMs$(U1W)VH<45nNPcAW@m?tl@AilKYG+YO4!M5O9+MwmrvG6AeO%Sik{S zmBtm&Pez+s07Srb4?DP0`50j&@+085Lj`s!I^z&oc5_S@&|*Ajz1cwJXS>*I+!H%h zTA=6w-_^h*ze9kNumXfE*t-W47Sw5b>|&^6T*!T=e->;&e;tzhf|sDjRL=t9qR&Y> z#{6$2nq`cIz(sf^N?5Jsw-wPXZtLQ8C>D=_zL$9=OWWgfANdz4oopjI?8uLIQet=f zAftqkYD`7rWTo0uN(o^;OF_>VtCGGug`nA6uq_In*y#7lFSL^qb!yLDedB*FuY|cA z{L0T%L0u2J#9dF`XAmp(;5hbm?rENy?ten-F?8x*?_h1%{%gEt=T*kLqbwFm?O|)T z9Le&{gd9u$nwdptJ%hXDQxQ!VVn8)l<|ecoSMw>`C!Uc9eA{J4J3R2EH{%0yob8(fw8l5_2CDrIf;aeRtUpdh3I{I1a5wU72&4tXp` zhc0K%JVZdL)$+!WTaXsRelI1f$4aTiC1EK(V06^$(`|z%ij^c!B^m1mzWs31U)NdJ z0z~XBG@n3Un%bXQzdpURn+fSq>edh`Sz|>~c)xA@+f(dg>bNQsuy=l47#*=F$~bJP z2uN_HyBPo)h7Lu+#4(k1#uMS#CA%R;drQi54UN^|(v>1r_^_$8^1S2b_iXA5T~Y?% zTzpZXCd;~Ko=x=Py;k?h>6rb8#fmb%V8plF_#bDVz%H>YJ5K|Bz*;4nCWJeAa0k?f9{yS(C&|kM zd5CW%h39?Z+ek)yu=}a#ULOYw>ckv%MD;`*XMP|WQI*uk;k~Fv6@bZ7W?JpB@5VR zvuj`|``T`)d^&Dzhefi!Q(lI(?*(q!{{| zT4(!UVvt~L2Nap=xcWkq8Sji^*jiBp<_fIlJrH1mWZ0x!Oy6AE)cY%sCX1D$kLd*K z#!6p+6YJoW3%J{r^Ov-<^K&XF;L{&gBf}k;R=eldgG!vD(Gh-?mDwAb0iHq}T?&~n zajpSet-fEa-xBO*72=9KP=l8Fze?QgaMYjIIA8N1swN?0Fp&x#%}JZ4W5EuPe@~bN zSE!P84Fit0ZYwsuTGn&E%QlKpTxy7|EZ*z){$7|w65%XYL_tXWuCiq@<@Dh@dbVIr z_MMQ9_4FnY&+c>a3qNj5nto*<)bF7o^U)RGd@dHDn4OhIxhT;J2rAbFRpO-2T(l_| zy?%|vQQz{sj-h+N8Nek9>S$bA(VZ%ys1hAXsYAs-YDbF`6(}Cjda2{p_==kVOcni_ z*={^=QP~oH+fYNUL=%E4d}3cF2uCs=GeT1dF0VtZ>BdzQnNYt((^JlV3sAn2L;b}zhi9&W#xpkD)(03oX z7XL{luK{QR5?GCB;pMOAb%o0xDypDqlrGqbynmj5n;*u_cK?V;;EJ6ZSWYySOYUF! zcW7K)G-;3Rry0NGHX;)Rzu6X8v+WRZeg*%5^Md9{OkdO9VUUJwG$Z)>8-BYc%Y5C8 z)+P}*xS^u3jl$EyLhEmf4M0)!qI&s03`9_!G>GSWekN9g*rA!xZoI|{s&@AtWi z))CzNEm=MEy^K7p9`iDn*qi*UnU`JvWuK3%ks+{7q;LjE0qL;jeaf*j6GC`Y{KW*m znKQY&l%fUnq3p0han8#N2Brk^T0sUd$^7(Uq$ zTRfscwewZVn+)hiw_=(MyLko(s zct(p;DBzz!C>3d_BRx$s{>^GqHSObqFJJ5_9cJ|6Kfc*+>kb77&yZ)>Ex(ID2DvC2 zir*+~EGYF~q_{QxWoW#XHYseRY7g+<>%tZDnaJ^`pk8ogA=XZ=jNI-^Cny>b_2(_Y zqtCMo3u4wQvsm|!QXVMInDE;8J&^Ao)$a+@2l2&7kZ44t+`vT{I@a1Htez>r?xZW> zD!HMlHZM3J=FNC%1M5EJC(eu2vb%}+#vW@P{@@MjcoX5+wj%HLa2Eg` zTT-OU_vQX8Hx{et4Pp|9K@C4L z?dfEx%-o(TdXpyX%$iHur6o>L_hfu54$0-mc^cP;LLz=ru4B!C%se z91nnOwm+#Noz!c3W2^6KB%-y((9Ds0eSC<{D0($CVsciH00&sjH?SF&c!NKf z8P4oIm*@9<<$v6IiTmB1bn3ndTli|CLSgR#!=I6BBw7p1`)*2A) zy>s{bW#y>}g%@u=*CcbRVxy?K?VwHr!$6V(fJ4P7Wg1jbw!$!@ah_+Oy9Rk2N;N~Y zL6xszAwG{Odx)6CO@{J7R5+^oTvdIq1&-AoZ(=uhS6-5iK!xBd?3;R1cY+6xhQq{n_h%N$Vc;aP z&C!@G(@D7nLH!A&a{~N(&4cbzqVGzmcy%cT*PaxO{oogw8KaqzCOccN^^hdL+{1G( zE4`gUPvK57u&WriaaO0?{@1A~R@;2n`Q~^PpDZ^D5~#y|gnl35;n@9NFE)Ha(UyNN z1ar&X*@>v&-zIe zzWIaG^TtZH`)D*U)`R_NU}T~9V8SKHW(GLRMX8wfV9L;bmQ<*{tHHtO$CJP=+{KW* zRa}rH}OR>o%qryB*4TgEFVE!MolaSSe)SnXf8_2ApyU^9rB;9ti84> zkop9xCXnQ_f+{^bD8pMeAueAv3i#qgl4n;yNnGzSZ}7nPvbVL}`Nx(z!13tLlp6H$ z57Q#O`ROuQOLDh&J`M)0N|4}S212MnYk%@%E4$o$hZ^Yo&DHxy9?;A?236)YETwbb zMCW|R@O`sq+&#*kY5xh}_8p!BtLXJK*pwYkyUx+){vb|=rCGa+CznppRhY68jbJRR z-ZnSd)h{DY?cExRMc?wushr55`I?(UhB8jP79NDj`ZyN?ehDA_HWa)kNY7NyxAdw_ zYj-aF6(O)h_4a|;bwknBNe<_iy(Ze2XMu`QZ;{DwrLqY@Cd+fn(xTl^w6z!B^lqp@scZmP^6Gq< z{iTdM}YaHpTIjf_O6|vz)4k9a+WPh)a%N=HH}OW=jDmE_m!c(eBjgypV>e%usqfG*R(QG0CA`0BB~a@*Xmw#? zU+u-ONc%CZ;+S)${SQ9GJdu0AHwchH>0n3MrorGFOsSnvl2roFD)A(3A$&EnL9KlM z@dF=AQs$|mS=1Xp{#;4Z)S1s`JaN&9b29b)QTN#Kbw;wh=7>viF0uz$Yl47Ie0#As zKgmjX88V#u-^Kt=7vz9I4WQ$kSrBzG#NnN4~lai-e^*BrZ+DY(x}N#DoR6W%}Krf)&CD=R$n*_R=!>L$>BD zhpWN^yBrJ6{f_fFm(#@|Q|(64+`ry}@48L0vm8%5#qS6>)Of$QuT`&kMx-yU&c4UV zU%T^Kb1653F=aMn-`B!s}7FU1%YK!>UsY~CKz)MS9Qa+z5ceJ{@QtWpnN0ZLCtovk# z_a>l?-9}!g*K9dhRk;6plbfmX!_249U2|#a0Yv_-YZgo#Xyb?N@^cWK+a-H@Y@zOJ zh%gii(1dG!c{;;OGgy2)Myly6ohN@aD=2$Lz7tzW4I&%0DPW5+Jq%nd;zwN(f)CXE zD^vd}wx*;-*3308$zdv^j9Lfa>T8BSktbVs)YzH5wzo9yB&_%pZri4$@pcsnI)?9b zAeC0D553-7*=2}MoBPHkDv`Eky6zTyPK`5LvYRxDSs?%TEg+G3z#F|O4W|)=Tsw#g zC%o14+!xig%}<*SXn;MOm~(l~f`*zEaJf5&N@utQlGyx|TYZ`Qw;AgQ)-w*p;Ek zpFHw{0SlUs$jUUzfG}Ng=yUpXk{+j0vwDh7IhZ*i1?glm zoW*5q;xkwXArdc*L~UAyW8Qivz}aQiv-o~_WxH(Z7aC(uS14tFWu^eQEcB3Y4<=u5 z=C8O_n-+AAAfn1fs?hLzz@J}ioGFAt>=|I@eQrF^{lgY5*_qBnveX&xQ_^hA+red< zBQ#Zw1Hc)jTyB}&0lnnTufoj`XURX?6IyQ65UBi>OprUWSJmuWby&D|^wy#Ot#!FM zkEbZ!8Q(HBNNvH7zi1W{5Gu7s?H&S+C_}Dr5_#BKOwSVE7@Z2%j!dkVw78G&(Q9T6 z3$6k%DJ;V1{!XexG&h4KgX}EiPQv7g)kXOLsW_iiUPsN1UKK5=9Xt6`*ZmHToDf3Z zs#&&l-+!w4AR(~v#RbtlQhIyi} z0YfAmj!mWSy8No_V>K$tC_?_UZE4+Xh>BzPk4k5DT>yuJ>+=9)_` z($xRCC3z8t=|?9mrb>Gnm7dN(lea1*hIta4@#@UiWXT;XK4v0*0ecrjk&33^qZo%z zDA(2>(%HP*w+wfkyf9l zHQhrBHeO#fdg2fH+a)y2#@pJBO30nB{F!DA$Aj|5Kn5T~@Zv_Eu!&PlS#=j{aVEq( zNndviRLsPdbWACgt<%>)_!bq_;7>noY!7(KYa7Dx+ zLd=J=1aS3}ztA@6r+}TgZHN3XQur77)yR#)gmrcSGu~AJEN5$q}Ha#*y{A@Vfq<=L|KOgTfEq zhlA#y>Z*Sm=Ez+QbX!W6XiM-7G-PmW`%`qi7tp8{;G+6s5BT%>UYl810{9OX6 zI?7DSg>HDL4u*^O$XEPX&x6)e`gg&_(}Y9!soadEwvI2fitVfmKZ|sQD7zbH{>&H+ zRzkhv(0`ftGL`iA=bOAA#@mbE%R5?j&fdzXy79Rz5c+!z1pmnt_IABlFKhxV;h)9E z1rzJa5{iB-bx}x+tos0-Xxj1Yfb)yP{p81?wPTm>f+L^F-JxeVy5JET%kw%ric8bq zN)%8}l9V~~$Mj)rx0!A*_?5coZ&E#CK=+hMCT#!F(S0LW`uE4EsNzpsUA`o7P1tIt z{R@eGGdmyT(bD-lhqpYh@+ga53`+F}pf7Asy@+)v_CwRxEJ4&i#3L9tOSHB~+HLFhGD45e zj;KsMFnuB_SBdn^v)J|z)poqXoiYQLJT&(CF_e(LNsx(<0&LNMxXKq_7!i%zJrr7I4X$qq-@u^ILJ# z4jz88X;c`tSIkEoGC7bA7)d$iADfobY$*w@j&`@b+5gIZx>C*#E=X|kfRK8BJRF@Y zUcL74Y(3WLK(CLLf)mB)xcMy`lBDR_7Iyh>s-+}*w#Up1iCyyl8$~*vY|>l~&*%;M z2OVWz%*B+QhWd`wyN!^)o`TOEEnTc4?|L_Cf?8{>(n2Zm=Uf-l76|gjb^J)dkY7}B zJF#sn9D&(>kZUuYr0qHJE9B$RX?|#C%h2gQe0d{C-0owx?(!R;z9ReiK!QeIk%LMc zfv>gC`bagUNb!8K&fr6hl7vS$dqOBeMX@5eCn;kP>ob3--cxn)R=meWzXqaA4{#dM z=^e!yJnc`^IRL|zc}zma&3A^3l^hdP>crrQC81@2^%hGZ3iSjJHOw+x4(0`2^8H^?)EZMo5E$Z?)Vgi{s9meC{I|-Z7&Si$O_=f0B>qH zJZM^FH+gqfQtm$KP2&|OtK2kQ<^ygz1rG4OgoI(5z)Q(Dvf6{+y;Pz_fU81}oZrbP zJ|I3R-ZUmEk%F!p%gF%7zCCyjtt~hzm~#itks6nB5pO$PiZY&G9r*f#I~og(e^3py z97KtIN?gG-D~oYbi>GPrJQf@Xa5I%Aj!dnqby8Kny6)W_JFE~o$38^YEj0F@ZnR5? ztli0r)ENS*7B*69!$y%loEE=zY1?=$+w0^(itS%DQ%0~I_`2w?E_d;*j*c%$Dma~# zx4p822O<1Qm>wL}CBLOP)ur%v+!{}}(^@~s%eBMxJGe~2tRIt~QdqWnma2Aa6}6@o zgi~4gXr`nqcPy)4S$&*2jE_D5N<1tc;yx^GK2Dd2QI?-=^)FS8qFK|fP&$)Sh&v6g z;$giy6)rIA`ulwwQk2Wsq|Typ-MU+bY$Vny5yi{C=vp%X0NIw8sgi zrCTpl0tH$G^i<8KReA{od}o26n~xc`m+9^)H8zezN>q+Y9*v$BBa>ypKMUYxS%czw zg+@eFF!|PNMcNh4n^&bXPWGURxjMJ`b%00L!h&weQH4h;%@;JA|8?i(bzOMe0V2-+1dLX!w}(Xcm;2#NNr)dufa{ZJ|P>|xEHxY#XaPmVxs}{vGK7s7OCD_u%igj@!D4? z7Ur>ld`*d7J0dp2Jn@Y8^&a3lsA#TJ+3k(JM4x}_vuZ8PDRIH zkd~KGjo8mDqGv1zVHnUsL665wKni=+(F;l1oZUqlT{{1#Q$?fc_d7#Au7PE}hGmRl z7xkV}oDOdiJjRD~WTsSdQ+8U#I$N(~I?`OCRQKl6)mkx^cf`*+UE1-1(;OoM<-$1l zjYL9Dj)eW0rRB&p~ublbqE+#uPxM~#K)$!G6gD=1XIhQd{hz8Yw?MkBZOD;Jt0 z1T<7kH;c1vGiFLA?`87I-e>wA?$`ZkyMFS1Tj4H|Z-H0~`9oH5{(8mQmo9D=+X-`JWuE_yP&@to@e1rc zY`UzEy<<<0c1-JwQ#QWUP|a57M&CLwZ$0 z71{l-t~`lxCbf6@*)a9QKM`Swe2}mm;?$n7(407mG*`8zy)1u`DGWas-7B$U zn&|OVv!urS^4=l~e}nwQ#G9d9VqH~8A(exo`TR2C$M(`t<`nL8n5qcel`Fu;xQSI; zBt3q@j;)wJJ3^^4YVu{u$0;;*tp@PI)6{eR%qqa$N<#BBMt@F%%HPq~18j4-l-D{IeXquxl^7}ikB#1Gk-e8w> zYkh^jfU}JMzK-2t^t(a}o%-~Nr22}7*TKAo`15bjMy!Z9BUCv6WiLl(TAi1VoDugsO-ML17!1>?B@9;BzRBUIXMeyOg z;u}b6^M2hX(>J^(p_}sUbQ{}WamfTPgIX$zP->+4jYh1NbKp_Ny*lV8 zIo)0s#lzvfZ9v`>d8H&w^&NHhrT3Z%ap0q#ULuGVtyI{gMq=X z%YTfr_eNNZ)N5_+E%l&Nsh&WgjxAYg+z(-X< z(yXA@Y>9{0eXl)++&d_FUmRTl8tV4 zmtGfX_>}6Bq|FjH7J4bC{hN)1vOAm1S41Buo`Oif?8g-=S!UgFIVL1tmD2!%xGR_a z`dXq6(;JRUIsAN{)cP&3WwWJ@qKUNuf<+%e8tOS#<184rKBXx!=;_PMyQw-XzI*a; z_mb^28!`#-Y+OnlPAY)WE`ImAvaq{UDL7$|KA#a-xT-3XLgb$^E_(QAS<$4MwmITY z{&A{SUo;aKgezZvn%)Gs%b@A67^n3$k*=hP6D(TrR&P@!Rkuo6-iU!oZtn1-H0{bj z%G}mcb{piQdaLnYZ!l|G{L};vQ?uVt?%c>K= z#QS$2;#W;o#dF1ql>!%r5nZk{#z;%k%AV>JSTmnVXOfutyFdQ}=(LYDvntQo*BOUT zQP<)(Ch-~q&NIN<)YYaIV@zbM9F4us^ea?p4K31 zwiY33_jC}{)WT`gka&A&>bNL=%?IiNRgngbpB*N2jka!>W8CJdsMP6c21qnsi$#Us ze(go}m{}Eh+}-;%M&A-WoG|^u+S-IS;-?fzZ0;tG?0e&PHsk(Mk!S4ae8c;ELN9f6 zkPG+f#fQk*_bd(DZW$6hWIM1$_jYM~LsANto1k)qUD$vt@>5;B4 zkss%78UmoTRlTlC(bS5?MwpkjS`QcC&VPxacvknhX+-B+YvAd^949mqarVMHMSa$_ zl~r|?Pp>Ut!vl7xv;=vwDGfgpOw;q1`+>D|w!&qW5fToNoV4_1wg60ijx3By_m0;a zOpA6F5E9uq`O75}8Qzk3L8^lJ$vlNjlzu*E^AjD#xQ@6=#jHe=MD z4$i@XaW2DPO&dggdQ+mkXPJk~5*O0KcD*MN)nc755izeo8<$2eijAR{|T&Ir`$ijD5Y+Kk~ ze;kz;xA+WY3_c@FeyvUM`Q9y7k+EgI~er# zZLpQ$ns1a%7Iu!0%{efx0VCAq-!{qYCNu%`GD~2e=6?*MVVna7sO#=Fsq7}WK9u8s zO0f*Y0*;b{K(86Z>0&70djAaL%l{9b6lPF#VPbKM9KbnxWs*Zydj9VSVgC2>iRhaHZO8_&-%@#wRfH%mLNxiGf&; zTym5BC^#jCRsKq14vcX?(Ub1!0%nO~%dO{F_~?^MEWfw+ndszIFtBn~xh)cjmYy4n z8^Cisz457Xs2mF~;WN zSf}=!TvbEj)com({}|y|@eFHx%wvJeOhUHKlh=o(edxZnv zM$n>v=UnecNwjxtza(hIjvETQ92ccSElq@vdvKZIJph;~okWs5Ke2!b& z{POfkhQO4B!BON?+D_9r3l>Y7e8Y5(-5Tp1pB2UR=S40STbfa~*wG)`_?X{2yH40L zRj5}LUFRg zGj@Ae7I@#3C=@->8frHR~ZH_Xuz+qbYum}b`bNL!xqi6RmCC; z$?VyB7WHEc$QM_1`)|O`|Dd+7#O)Su&@1Jgc}uOf9tB6EitHz0SXdAp2IcF#hn{UN zKfaaJi>@anWntvF_5iG zx2GP9eI&W{&*u+SvNzIoee)zfRN@tvOJ#p(eF-odeYBd2g^lk0le+VFY|P2_P7fIU zp1WVa?x!+{>7TaHuyHefymWZxJ5l!hc1CvNgVLdPr?EDHgYuz*r|bT&*y&FS$}e~U zm(3|sP-p|+MEU`Cms%MgF%hu=lVg8&JpT{bqTqc@MuyhFTwz_;6stm=Uf$Xx8N=tH z)9aMm09ufFgaPWCL9(PrB96No{o9xY({5QMR;>Jp%hGOCL3Yfx+rgdcvLeC>foybD zVNu@eBmK%4;}ORXz|8q}?%`Rv3*t81WBvyKpb|Evf0)Z-dEFXUhUw?xT{{nhiI0AI zf+784SZBZLt* zqQ2ywTaCKEYDdl;rowcIb*O0a43UA>ga5N#ng7ghZ!eX7P%^jgLV6PDP>$_luIu8q zm{_%f{b1o&aLVQVBH)V3XXWo!ynXt`r2oS7M&PV~te8%pU-S+YBU4a*+IJA-4av{I z#szR`gcxu!dF+LSdyk!nlWN(gGY7OK{@kS(Q{U@#$JK#$g1Y?&c?Inxh|IvZR_7L; z=UJ%l4jgh(*NYUK`vr7_z#(~{;)K}`fQ6ASa58nekHqWPQ;Fx~7V7(h_KNBOcZL2q z&ad=%<}Xl*5aGP60QJa{QRF=^ZN^hJLREctWnY|lSXAXswn$vwDd0^C#f z3An;O{6}rh?gUn+`Mj^n)U)hfL!~G`_C_?(<)_ z26!q-;R?_p8#k}zcK%y^%|$=vZ)J@LqX1Ctg_n-`S1R9KUF=?E|MGhD?L&zh`Oyyr zN#N1|s?_SSEB%ae9Ecr5tK$ppNuWbpuIF|1M(j5KLxEp1;5h7^{kYjY*O%uGaKqw(bFn_Gz^0*tC+9cd z@R9U|>w7B`>~k+aV0_mLYWi5V9(8|FtZ|d04)ahIJ2z%vI6dGvxNh#&y`X7^z16$6 ze|`D%->&%lx7lVsx{byuF=z)xbS!zsu@30nbjk);@`mNfGEr z9g#I$JDiN7uCH92U4Py0*1L7FTlSjXy;B;LIQgxJ7x1_gpA%9CL5T{qoyxjK<)&L* zpPcuc;x{c;?*HCAIq4Fr@r0}8&`Af2Njxr#PYHLc+kV{f=>sT3XvFJH4+5?*t-P~t z#`V=nb@R1FKJ0dCbTA5uU3>Mk)$TCeD;I$aKHP6_DU+QxzbrSV9W>#-fo(mH#?pWG Y3(Vp|a^6fd1RBKP>FVdQ&MBb@09cfSG5`Po diff --git a/images/peft x trl button.png b/images/peft x trl button.png new file mode 100644 index 0000000000000000000000000000000000000000..55be99b7f9d1ea7b391ac76f39283e950a0d537e GIT binary patch literal 36919 zcmYIvV_;re({>v)Nn`7dZCh<@+qUgAw(Z8Y?WD2oJGQMa=Q%y^_jj+o2iIWDnl(FA zR$2u1JJ$CvU%tSKi3-Yp`2q^@<;zzv$Zwxdyc9(LeEEX^MNIIgqHD%!2SftNfz#$w zS!uGu0X-8B2Z0F!J|WbvFW^B$!}+2TnyHKp*N8?E7GfgyitM}=5*8ISYS*-jXo=Lr z3Dn|1Jv4B6!jK;bV307qyWF$0vmEWOrRI}wrP{%%S*eRWINC0ocxs1Pi&bTv&!w#! zZ$4~l*{{1~atq)%-3zuSuuZhTkxfC>`)Iz^e_@RQLvnvY6_P^{N)>`42f?L>Q~Xt9 ziwz$1>*p4Kb0$=iUS@xTjA5QkHx?1x8qC`FHLz2tru850c`{MCR4s%rrQ4;zk_yXcAw?oFO@AyesYmZAQeKRw1I9~p8G|V zBkKz5it5UHD^4yIgq1elS1Aj_4e;)ufO-4pRTLq$(U1-|gXT9`S6uBwHH6qp%g4KB z?Y()Cfu3W6s}Gs47qDH{E5*n;|41?tB!KqA2C69=Ql=B{k@6CC%c47E1C1&=%s+8& zLKdc-?H2CvmFn}&uw1Y|{6|B5|2bNTupTF0%u9(0@hV!I`z zrb)YOb*u;dKyl?58~j+Lg!!K~a!DB-IhjuQ^=_x!6Pc{0KMs!Z_xS`q(? zbPEV2+SL%<*3;)Mc$dE<(*_Wk~=lo1i;ikD5W^Hz`zTtpaXpGRMu%7;>7 z-v5*t3Cd%t2OYrmyb$hs#a08He&vPt#B45s8qE72aJl8U`tV^|j{{TEYcZCBcKCM2 zT`=um+q?en1!j1eU}qqmP5U#^PyJ+8O!c->BxRg>%$ zyS~5TRM-6<=W;FQAm_cRs)NhhJ4!fb&`FT4d=`s;fk`0Yw>;4KqODq$b9^EPcqPB2 zwEblKPXPQA13qg|?J%C69huASNDxq4F1~zVwNmuB|1KN75kif0;jLtc-q|Oe2mvfX zJE1&tp$7l8ey$~e7cr>LMg!iY9k;({Cr9I8gA^nDuWMX2lhuph>71B!E)4h*2Y8|$ zn)}uKizGRAa^)25TVMk>sD%GUB=`VU%4jUCj2F__m(WN(+l&5B9C8&Pj|GFA8%?NxTW=6@6Kjv^zlKNIQx92x z)?Y%`ld4wk@5$?C`!)XmNtz`{<>0(bsi)L5;KT8L@x=VY@_!#1<%pj5sQe;J^V(0- z0(e3`LgO!k8T<>KT*5%yH(oqiPd1uyYB+CD{tnv6DyR0YGtrv+Q5^j1pT|>Z4U?AY51m zdTcx9WAvOAciT^VOv3e zrR^^1FCE;||0b=GxU0HuqEVL2Q4+ZRv#a2h$sd1dC85+*ec;zOC1>m*K>*K}N*;Xw z`ZhHOS29(*DMx=#N4E@JpZ8y}grFXerp~qHlhi-MVRPP=@IQknXH1<((eC~HTzKWS zT}8T3_)pwX#I_goBzx^7dB7>Sw;9w!(O;}a=AYPda5{;p3)mlJe(=*}{bgN_5jq8@ zgOnr=;D&0~x%m1Y3%d=xm7T?L`4Im<@}>XDBZoHT19x_PqEFG^BLl*Aa;NYYSEU`& zt@uTc%M(-xoNl@kp5K4DmQBzF-aqBGXxm-VRl0onH* z6P{$J=$BOZ`tn_fzYlo+E$3fE#b;x{-uLt&>QwEfAYKDDK>w1NaSX}qbYk#VFK3UD zwzTVj|LZdaM+jVRI&{Xe;U$5sppQfD@Z@zhR_Uh9VFzx;@fQX)7@Zdi70Py6i|r!C zij@0K|LSbCjNtLM=!*=^(XHR#GYKNs5rvJ+@HlWeX1$#v)P$hq0xke zi2qU9C4*px@Ap$1Svreb%*Tf>$;j?-ilDHtFeM`+Uz0Zd;UD==5LSC#2)AAy8zYs< z^xU$v&4ko=`w?CaJ`KGKxj@F(^4m|Kq8k%G58d7X|64nx@BO{=^Yimkmk(b$s}#RpKDQSk9(de>mPlUwL?{ALK0JF=Nys<1p&Asazsnful8H$oFn zeI@Y>8wHHX*i|O1(B<$!wlBB)0PMEEdU}3#gnyb{`D>ARY#H&N-Msn*dqpDF>3dr9 zHjkEOvY$>5Xs?->bGfv8puxo*UKl2(7$3bt6lD|N?f{T)dK0z&*Woi=4DPr=GMoHy+D;cqZbbupU6DJV#%-`{{60gjwtlHosK3EZ z94&pM!cc;NlWf*8xlU1E&ODtfb!l<1n3!tRRGeAgOOz>G@xrM0!yb%<-DhbWqxpVCQ8rtO$G0ykh* zXOQo~2*V9}khG#!v_z(lo@`AwLddJe@Ox zdMIRc@(gWh(L|g|%8GHiCWUkH3ZMNEdu@sWb~#~a^zvzq6Ue3|!_vaReQ9F39uywv z@#+p=)n^c@N!EA`XH2g==|EuIvS5eqpDGBz;}UMnys))V%DG-hF1V$^D|*A~qt zlf8j@<|0wsxa=WJ3?)pJ<(RC^>Z# z;!`&;|JQc3u)sCra-0kQ4m^LUXiZkHxaiB%y2g%l(tmavs8MM9ul>j|6{J@q zw3&M_XVy`Y6oXmfbYgVAdmi~~7_qhCI!-0RG=my6w5Q@TJ`O?9Y{nQ%$@-oyaq7fj z0x0Q(?=3$mA);lu98t=AzIZsUB@ij4Ve=s|?CB?BDgrJ&BA0F=L;R_2waZW{yojr| zrbX~rdUc)m2z*}Y+poB6UmfVZIq4S+a%yFhl`JQVJl3(GAR$fuOe*N>+n6H_s8M5`k%m5VDNUtOaGTz#LbH4NDSrkY9(xm=bi z5u#3(&!WVmp*NATdw4|oYRJQ!OEJ{4h_&$e$s|Cc!706NLw%>~jBwiL)&%t9;r5gs z0Y}xllW_%H)+C)g-{|dxk1cGi>RG59ZZJB|TM`8-niZ!|`pU zRKKbBV%x?51n;HqQPjX7Bi{?m`)C))+hj0mfHBY834s6*{154Gn(+BFzkU)gxYtX* zaMW$5Y2CohaMfa0dbNJMi9Xk_r421^-<_rjp;fKgYR=zDmTnNMO|D7vSFe-1UX75= zcg_59gyhm-Mj^kX7a!=l#ZU`}st`J&+n<(MvQG9yi6>=b6CSS`-|4{c!NZGy{X*q- z97kX5bM8T7*m_blV0}u0?DMjlSkKtL<>3gjmW;oM01jBibLpmcJQ))V;-u%l4r(+* zME71PumhLp5BAdtk2&ij`-jh3;%B?AQ9@PIuBzT>7Ib{v@?^%1iVwOkuNzLEE!^O& zmGr8ehxhEculkXN0g}OpiNV_qo83NVA3^FYsDQztx{*G9 z&VT&yGG2p|!{%gC#la+3yo4arbNwEch>57MIT*FgcN4v}wperihWh7}zAYwdg6~#< zPmnQnH`OF7N4=u*V6>cE9GNI6U7=1?;x0Q)M1nv}rvbDGQiCef&;rVS9z~4G7}u_$ z^-Fgj+XvC>ak|NGYFBopY|-UD`y8Y|Pm6@B4GeVWB6hdv0BEC>C9I;fsFY2#$shf7 zehq-dJokG*1JglCryIh{El2s4SW@FaK`jz{)_78l^7+x`U}OQ#>wwJWFYNGb4^(ry zcJ6~6EjeR58PiR?=9zrW&+5KtVBz!&Xve8QB&IaZ2< z;_w6PrwbevGPKH3@=p{^SU%f8>=}k~NiuI4I6-2~i~xC~!BIrmq4z<&-T{?u-;XaE z*;yMct`hmPb&z1=0*$ljOrvPGZUhhZA=C#o;;{SdsR~)5|LA-OMaAwyumg z6#HtYI!SVMNwa+JA7(`x4%&mYuE-5>zPXUJRI?4FuGYe=C9skRLu)Z`TT0&lxWL_0ohUD z+NBKz`EC-*2=Cvz+GnsUN#V-rH-D%Z{0V!Cep`wjNBuVR0rK&b9XzcN(@Y#7syU{m zwloLQB6!(4SE_9_si8`@DlV+&0pV!$+Xuu)cWsw4rA=jm2wV0GH!pG)MX1HP9~q%yr5;Vbz74aqfYK`w4ha?Uixnw4zFc?=L8!qp zDS9^nf8L+6uv`Iz+WkqbwEi;w&?g6X_-cvKjw zIe+OY zNu~xJWwy-Pc_4>_lXnYrxq`#%OZqMlNZ6QEr+H&YYOuyuoz6wt7=I(bpH&6~&Uwb$ zdY`Ua_1ltX^7=az`sB$Z+gtT_`h0P02;;^Z;?Rua-bN-+u=k_Ks?*EH9J3z3Q{l1O*R zB-!*7^1coQyn-FI`=bBO!j`={fnOl;juJOIES=ol$1qq$65Wz5*9Qhx10cFI^BI}> zpCX&KcQIoCex6@N;%OJTk$4lR1zm>**4H>%`vMADnwj)W&;&<$MJ~n}YG{6fqz`w! zpvB7D<;i5uV0`H20DnkU<4p}~arNsn#2e6`F~q1YT)Z-VZW7-RZ3t&wHPOycgPNRCit%L~O!S!$apo$NmX=*P{@PLn-+Qi~?#g5}Aes|FD5ZLA>d3VObi{ zxv*K4q3HwT>~OcfIRs)-rJJoNx$Jlqvyzk#g%c&zsKBa%qB)p`Y>&`CyEB{cBdtarrWalIM^M^gfOeUSzy{xP+)(<*EeJk# z9_^32%>Z;CR6=jtz>_EN5{!i`>zFKX7MHUs+7w;wMP}nV>(c=~PRP8c7Le^TK7T>7 z1<)>VW!ZEAu->EDBDb@KMdde?&%PsjsCYldi}bX{e zBlq{PX&a3m%1}t((qZt+k}sIix|l!#?81H?ZTigs{lxQH@CDvB|SmJyX|XSG$+tHTkd z=<#qF;;`I!+)H(Pgi!Jv`sr$WXocbc*#8}zlCFc_lB5ip^2GOnMiAingYN0m2d+Fn z9(?#>MO`VPKQKLC(0jg1cuPb3gDq^F|HhJY!fEJEzKOpH6{)$u-@?QERr4y@qA*Y~?7gl`&zEXay8i%}6H^ zH$qUb>9vBr2!COB^^O6>hbBWpP-Lst^s#dPUeKaOqQ_LrQ%p*b^;w~M6iph?{B+G% z)H%-jQ|-|ltWnfBqctk^pj=#~iftEYRLbPktY%GZlf$_!UtgAxL1~gFkRb)jlrX`R zMah^-tx&f(eK7VN-^};osB1v6opL&4f{<;ZRy8G=X8AFN^G<8jD@6A!*K;-ez_YZIe`(faR^=!PnE#_2l}7-J#= zd4^R;d?Ey4XeJKGYrW^|s_g@3y)!sr@bA_ z86PyYa+&Rq!0TpE@@+o~(8Xhu{Og`R7JH*vfbq7<=V@+2 zS|oxNWtiko0`NA%*v-}v&ub4Z-cgILj&rm5_=d;Jt6J29L0&te(maXlJJ)3px480r z0B%Pj_vrE%d3*`ufNNmdj+~Psy=G`sEHhieb0rN7B(1;JHM_G9G*O#8DZJ4!_9kI2 zEMHxz78BDzkUrTxQHp z*uDz4&UyEM#c-3uRP!90+{lV2=f)(ikjZ0mO&Lt6^E9B$kw{8GFvBX>kFoGJ`T$b(^r@3;Zcw-|7s2$lej-1sEC|&pfsU(bTG$% z+(-%c+u!^T{3wN`F|?WO0UrZ8-R{?PMaT)8H(3w1yMGX)@c1@h@>Ecaa7p7x>Tr&* z@U_Bf+)5Kq4gGkYaPN~?V~Np}9qh1KTUEXzf;u-3>Qs*ar0*Pyyy5^**hu8n88sR+lHBgj>s6JJ|RFCi&;hd&w> zSC_dr2Dt_wWD7?`9pj)w83utu&wCcNx{dml29=&J5k*TXYlec>MC_>1Ol&3`Z&3g_ z%WIzxkSOIX1+>Zd&m;AalV5teAYl*u`xg8}0CMJ|h3n2NQ)sjIRP^S`!Z#fGL{tA3 zdm;Q?V09gK6kJNFu(5Xj(@khqZ~6z_+PlMk#f;SR+l3Eb_VZj(v0}Yub}lp5lVBiG zX!sNq#3;Hc4Q`GD_jqJ{`m)N3U_=Uodk1*=1RcvlJwD`!46bz6j%{Ujc#D{@yxYh$ z*7Odm^K@&g8!h-AAUtP%7++YPJ3}sgrH82S>k9uN?xg+0F6}J$yLo7(gqYX6$pLN1 zLw?%|zf1$F4ddlg1cw`GE;w!fAf~u9LrRt*Ig1cM&3pE65_}W9717ZEHj+hL2sIM$ zP=H3#sX!E^&{pStBUfMy1>E&?Myy#8a0qsHtIv=KjV`7#Clm9PAAAA^9Efk3u-|okQQg=M&3VCUv;Ns1eYro|iDcVQ zz0;!#n_&-h9k6_(F#y;@m{I`pU6@DD;<=XOt;x=Vp~+`yMw1ToOr=Z^!9HlzP^E{0 z*_Z6IW-Ev!N$B8nj;__QRVnUEgsp^|$oxD(pSn(fnY>N92GvC|OkGSiYGbIoGY)8z zVZ?i&}3+dt7 zM`^*p-Z6Fl)n|pCBCcY4M#bk-uv*~~$sj84oHK9SPKN+7v)PS`y_!Yj|J zzU3&WgF|}$V9rYIo6RJmQ#gvYM|g;HpK@h3gd(6^ju=X<{oDHbdJj}YXQa=PnW!Z36(57I+C1h z-e7V3fv0DLLF!lOCOD=v3T-8XG15JuDhgS^qKT(M#_&e*llJu=N-}A^5*mb)`F5Fd?u4tr zD_h4$0uP9wpTFb+R7?o z%FZ#M9GLKAYa{bGysX_FN}*qpz}~=2w%y`Q-vpF z8T5+xDBfbkN@ZNnm>e2R8BxXvm1amAQ;1+Q$*oI>TUR?HkO9^EsVrMy`kSt2SY$Os zp5!A_B~+0GQB4hLpB=(D+@EzzICE!TNPownMU%cWoUXT~D5Xuurw zsa^beke<(npjWV`kY{IlU5zQygB~K6I2e zQfp8ek4?%Jdov^aZBC`k%7C~J#VBg;n=6k)*tF&JE`nlZ9n3+#PJhl$n4ci-PNxw= zHJ=ggD3biH5|yjT#18ePDv(384GPOGMwup=KcBx5i_MF~a}*wXltyP_P(KAH$}kY5 zvMwplr8IH1pNLuBV1$+lC0&=8cUYHka(NCCd6@8N_n3^6ODQ~9#F{>uH^w5DjEZjRAxzxmwnXc7Hny0^z z2Q0DL;{D?@wAuS26J}IqxNO;Se=aWDS^0IJjI{klg;4RzqFMPQ|9NxDWSPz26Q0iS z)xt3OUTq}u)$)qgp1{S#%?88`dmLZQ(xm)_LR;({OGTj^4s6wEKvMY9Djts6AV1j_#g?n~Vuq1?gsq z#s1N`=i91do@&o*@ik*h4MGHnD5YYDPCtt*X*Ke zwMcA|52K?QQ_>jqxB1;O)W-_;AB7<+P*F>jiRiO*8&M^|;7H!A<_~`k1+j6au1bt`h;?}N;Nqz=5@(!Hi9vYuFS9Q+E zXm;b3ht8kvC$yRnJZ0eqjyL&)t`F_F?6*U5JdaIc#iVY3U2r`?pZ9s{t}8McNi-ZF zgAZrvVQ);IgbkV7n`lJ#%Tmg%gTZtlt6{MGf|D-D=lOEO-(j-5+oAf-?4}Xhy1C~z zhTZe=(kUzBv$yoc&J(b$mrpY}~Qv74r zG1^}U;m?YtXkAxD6_)44S4*JaJCDwFIx;%a*9^$$3+^97PK`LP!Hv~SIM%; z)1~-p!G`KWl>jReX71l<>e1pzn{r022;{HHmNdGK-|A zrJ1jI@XIAMgeRb(+S!r_EDQ1~^U$74$%`%bdtq0}2Zakj^By>INUM-|46Frl-Zfes z5ca{IsgyFJBI6z)8Dv<%@@M8MrCZXN2?|&+2+1&rc#sHaFpQHnjk_8U$1XkiE4|EH zDyoP?>BLT^nn&IvyX9)rP2+G$Mu{AGKvY8y6!7A^=&Xs&M5hoRTj+}^5k=p$#btDM zfwxo6A85~97mV23rn1M}tGzgurpIfwIme0@_|?F6f}nSCu_3mQK&84?k9pHj)51f9 zd3ZjE#@+ZwA%0PD-~TfjT0D7GCl{3wWxqOZRD#)j$-y){YkTb)AW)P?oSO{of7BF| zy9*2nz#w_-PAd5U&rLfHCQUV*>~N&dci~CM#qAr#Bwr z%6e6nz}Uc(oo2<*OcrT`%?DZ_AW~C=wh>IC6%k*Ll<-;>Oq5zp*d|QS_-MFpK4(Cj z#)dW!m;?p)1lq3|?8<@#8O-mOWe>eRP;Mr_s5XpLmp7{o-iwPu6;;lr7IRXyg;M5f zCZSw-3A`dfhe!>@Xp*r?N4|DsAug3FRmN%S3M`SLFb{^?&R7&QG*VkaKlatv0<58Y zQ~R8<3_w};ycypw%lWwYEuU)UPKpPpFuTETGxyBLA?}vD^4m8>B)cVoC=h@MvWhpc z9Tx-{Ez)dgWriiXo+KOzW?BCK{-umh5T&wik&VwWd*22r??tWc*6mn*cR z8%*&^){@F8#av*qmx<~y>j<_^QM5CNxrk{!C!RiI)L{gV0r;mPA7reTd;KyA}?8>m}G_S&d6`x-y*=QYT!2qA&SaGQ({qz;62Y99T<@OzH0WkUIOsT=sk|Yo$H_uP&lY8$?fV7jPBX4N zG1WGZqd6^HE@Cef>0Oi;1%g+Z^efKM58}rd>G4x%R{6(b#m2GHFkSHdn)Evs;Y080fFlWw4 z3$I+w@~-@ zzt4CSOfG|UVC%Iy`W-m+V1QJW3E@h7+Wi-J@L&N(=?~9+gec~!>FQFZQx#lPg-yRu zdLi%p7?!ja^>uU-J%{(#V)_I(_6xO??(dcS-a*3)%xl`J#8uKq%S3>N3b-1an(-dC z`uOHSSWsSS_U42b)|+ZjX}O5rBZ(Ha-heJeSZ*>#bHtx}SPKivqMAA7Wyr1?uc1?y z$vQMq5K7_vcGX@l&dBP-8~%yHak+a-K$`1h_DaQjwX*p_A(;ksmQ{Hik?UNRM^X!ckW*j3|Q)5 zo4#!;(Ak9+kWVvOn*g&Q;IsV*D$QpCk(=zgCQsM(sv!CUtevS<@| zxB-9y4o8q>`3e2p>Heu z=R|fZ$I$l`>;HWI+#4T%euP5j%s4i~b;P+mO*I0fGKf}pa{eC}Ba6X;EFp}kl zie8V4(Hjw+@tIgy<-<|ba6F$6Fg))MVug5Cx~{CGG zAsJ6wKe~<>;CuRL`%V;LOZm`2XVTus(+L`2l}P~Y=QU{bZi}9a+kT2Zdo9x^EmZ9CU~fHv~Z4*+MDfwK5*<7FNa$ zUteDi;O~Zp`3Si|HXQAx{6CLI*m!sOW%wDl3`d7{ONtypY8e67&wIGH#ql4VK` zmJ~9hYD-SOxywDnRyZEWFUzuyX^?tL-d(eOjVyVkFbXU zz^5F05MukXCJ-G*E+MccSPiJ#B&~fzphv^yat<@N*JgXQ>YxD9uNrfiQZ^7jGSae3 zIA)^wr*p)*StdODd5V2#QpD~2M=!Sz@1<^5_VXcz+>!R$*Ow-h$0-kL1w_Y<{H;J; z@E6yQKDR7ad>O)XnFIRQc=%|$efZmIw*h-wO}Eu4s;&8MxOZw+n~ky0!Jk?n)s{WC+F6n&@=;jF+zXl= zzSKtl7aVMC16$iE&x&yWaCADXnre)GdX%24375|J&dyE}oEQCn#!xVDa5|1X|7c2J zEaSMW)sz|}w)osHo6xmw8cG*@o)6)91UWOc8?IP-hvUiIp7xT{doAYX{^Sj3e_&FM zPijI0HIZFr{18@*&gjn4boC9G;15a(fO^_1bEkw<@Gan~ZSsrkM)OZl%8W=JGd@It z50x5;M0CPgD;=c))o`_ZP3nGG`AtBZ+;^4prZ^nJq{e+6N73FtLrn4zwYQ{o%#d3k z*Y9Bax`8I_Y*OO+4lNkAlA)mHydUDP#kn~ii~%+~_)Db+&)bUBYOTI50xX9~MPm+O zN45NzDv+36FEuF4ru}Qz)vUnleRdN)GuAdbAFD7kuusvnVReS`hxruEPtf{j4p%g& zWfQl;liJM7#{)c^JXt@*wF47gMq!x;lC1BTb*?b`(D`@%;>aq}Dfd!ODpZ(#*=Zzj zg%iDVM@LhH6nHNMbY5mky&V0`NYSwYi8K#(hJh-|?$LOB6D0~oV?1Qv8bSgF%HKzL z9a`3EqdO11`u=r>*WoS=NtD%BVAL#7nDqv?VC<`uh=d50oU#;ShZd`! zy}zRleylDd#g+!!v=KX78xUf(RIXt2=NsQ#Uc%~aFASEUh}rlPMZ5Ux6g+2M(6Ao} z%Axr({MvP!C5S!Y^#4RxmCN}Qa!?82_d1N}zX-3=m8ViNMg)(<@Lp$!kpUypL>9kx}HowQ_V@%Gib8`Lq zZ5paYFoq5PK>i^j<$^?(3o$I0ik!@?i`l%|_+Eqp9$BPl!1r?sZA6)B!GlD9YY$Wr z=CHDowr<`1q~f0`NPh@;EIUFFJ$?P{apyD=-cC7z2Fz<%;`S4F?+qB{KJH5J+)5m0 z)MfyM2FEn2Q7#)Uib48VUtnNzCG*p_y>p9uHpuD-Ma*!TaxD3}7}}vF-kD*oKCUH= zV-`H+hM)Ey;Da-8FPSz1opPA(KJe!?_7CsCzKK; z81NU_xI~<4o7F1oER9TvC$pB$aGsb_({kIP@)& zIAbG)a1K+LzmSnDmuG@)X+&B>pH+y4tr#P3qOx1#@ zK@9?*qp*lX*0Q)=cn^+PJeYN_lYtyz<{3S+&xE`Igzsu_qtrnP^X($dPU#cC(*>d+ zU+QiilwDm}LyhBE5MVhEWg|_DXqWbism7TxJG+9^_pUzarN)uC238_ICl956m1#TA zjh4zcU8=^Nya))k8L3e(ahB z@tMb2R0Rg$B5v6yZVUxR1QGUwveqb!*vggGK@$d_+5N8}6g^h;SZYf6qeO z@NUm!dT88gSW;zyCrcX~HL}~Mv7E+OzQVZ3STSiOU?YSPtnR1mNr~c__I23?w0jxK z4JjuG)k;Z@(`I(;8*X;0NtKaJjL##?K`Ui%8``WP#m*xT7RC6j=y(b|G-EicFSd`r872xUKo){Y5GyJp?p0 zykU;Tod$CSSOTQX0;RB9J!KYnT50&Lt*&tZ+(lG$e^Tz|Rfdc(OC+S(ly2p5hxYoiGH5yg9+&ply@@#EU5Mo$EZ(1pgAz{s><>`Z^Z+#BxrshJf? znVF7O(s2M|6H{a*TS+>rH%lu1)8D<~{|OwR!2I^i4uW?shODxdiZO8ku6}%({I6m|q;i zs#Zzt#n@A$Zv>Vj1>*8ac#fpB%X+=njDVp4u_pw0zOnBMl5D$s{QHwF`P5|^Q0_SF zhz7f&CM(9my;+@Ckv154ck04j<;NpWqQlZ=rSq$+tC!~W#KxnfZ}9nKz4FY>ae$yI zpSKg^tmM((C|W$57~x7$FU zeFU0rFqL|V+s(x{12C#F7+=r-@p4+@>STw|K6eWI0VA#^SII_;+;=74C#j8$D0=}y zJ(!+Ak(j^3b3kifXoJhn!)w~gtBW1SObp3;4>}X~BqmMR zok@+;(75=Z_%3&eCDlfKj+4)Q7P=ODzZYY?=ud?rx{>U19|xtk>rf(!L58>%;d);7 zqJxZXe&qL}L(-kfX;vjw_jKgY4-t(#* zexeZukVi5cp~mtTIP^*pLoEdPu0v51Hlt7Ubpy}q&h=T>j11sS)Ka5`lNYcTAmb7` zjM@X+f}Cp|YUAP9u%n6?5%1d%{DVoU}_BVx`rH| z*vRvG0xT>69;wKlnPCGsL|h?T%+yo6nxE5T%NmR@8!^2vbK&ga#qtH@62HVmCypdp z$;v3<@_S7N^i9VIaPLwnS6LH;xhPKWV$WLwIe2wefV*QUP?%(53v(41hP?orl%{t+y{3#x@B zCxx8plzNPMSvj|BK@p=A5hBBRw6oNS2w78XQ=0w0UM80HrtQb|pz`P&e7)u+5xOCr zT)a!BsyAd{5FA9Jcvc<#3wp;boykQ1bzFj?^Z5+LgjRUW8`&<}ox%(evc%BUdNlJl zx5wRT{hx%15hf*r1#zp7K&&NuYoJw3RjvH%aaq;%<4&}W17F;48Uit--1^wjcFK{v zm}{iuisSGeu4xF@ zB6YcTr*n*g&JX)AP(MGO4y$uV1tULibHlh!24L5XvtROtcy&k9$ok9ZT4&o>R0RvC zh9)D$WP6hjy;SsnvjFOK7@75$07rI5&Or%?9RNF;f#VE=*HW$7MePP-Y>14^D&tUR z7~*M5Z2+5!ry!GjZJ#IA;@0ca%_vutlCb_8$xn8L=BVGIFWG z5ihR!ztr0DwQ~F{Arj74_TB!Rp(Lh~Q(qh8TiCl&sj=8a9z_>kNRp|ss?S^%X{>9S z!FHv@LZEwhtYbtk+Ew_&4(MVV%^_=Jzxu(|$F&{c-gJ_a7>TEXSp%9ov@j_k zZ0~oa3*-50(%t@PS=6b_eVdB|c9d@P|Fx zZpYn4a$b2+Zn+DRwxM;Z9F^)N=2~ihbU!&BF9=b%gu4v|spwN^P`Y)hu)Z=XTkUN& zk=72THSR|5`>>Q{$v3@lSQ1QZxh-U>M^&Zh>2r)=zNMa6GA<4ei?m@uL~Spe3yCTN zSQx$NsBO2%su?;m7tJ+edss?nQNc!@KPH3#cN?<3gDwd}$9AX*!R79LA;A9wyg)<0 zEUf6cvSLVV_cWPpa}8zbX;!Xsd0K)s>kZdAQV(w1T_za|!ig@8IPt@1p>)D09rvku z7KO6KWFcyd{CZ<%2}_cGZQ0P9FsC2eCBqj_!?vC(+k0w3F2hnVJukCDm4uoXY}QQp zl9lS6bHHY=RvTw4NHj`xBSUJ*qf^oFs zQ*|uP>`Qa-sZNf)tDS+(P`hvj@AI!=9Xf%P&*Lm2Cu2EDO+|Uf)LyC!$Tcv{`pYhd zA8cn}r-k>$SFfuhv!W~krdYSe2Q(>jN)M*CKr{|+dB*-Pg%4XhtZOP3eEoN%e!UZZ zLimdm3N_fjqr$GfDhE$S$)x;wjr#g)5fK4)Py%)K+pKxsJ1-VgY8Ln3T*8qurUiTP zo&#%ElQ_3NFV;*~WcgX88iZ#>>RqSfkl8-H3o(3WdvDe&622xLfO^T#tK}WY&+3(L zw3fZ1ES!jEll4<`#epQFCTvD_r-CT_^ZnS1@XAH14qM6{4Oc0`lz*_8bcz(>*Jo(Q zst%pw86FwBft~p-;(oC1=rxdJY!ZwszDZBZw?;X9e~ts+*T%_vQcO+_;Jx;@_xKjq-Nz3%VV-gFzm!tPF`&F){Mtf?Sn+r;CtVpR%`GEHBmvo-(=SQifUA`UT~CNdppiQG zgaSc@fubyqh;NhfV{;66y9QG|Q6{%0u}Vd(!NomyqE*{?4i+K~B)rLl6gi6vQB9ZQ z^p53taBv?xOWkH4W}jCJtC^Kx94&iP;|>>hr#W$7D+eCSa;9wz_spAEe|iW`p2exH zTqUK&k}O2IV`AHa=H7HfE_o&q`P6+ms-OD;(Iyr@%Em#(K@%g%qG!&W3B*CX4Kll6 z>`ZXgj5S_`p*)-$!5=RMuhkoGPF3(nrr<&ptEUAvwFIl?Zur0!LtA2eB&M=_>hp-O<0i66Zhr$%rhB=#vRhhxe3H|<6rtWOCfG+gL2Obm_EI=j`L~SKoRdG>eGzz6&Wz#@P0WN1g}d`7 z%P>3vqZzD-Fw?Ij~HRUTgaKCt)RGBoA*(P;QMuK2vQmPTrm2%|}~!W7j0(`J?ci6GTp($IcfQ zG@jL_Z={7ywT^~!J%R}Vr{Hnqu?|Kf<2Z*;HtRtb=`-5 z>hFVpn*jYP{;>-nM((AVBXy&<1trOeAx&COa$YL@v~EBC}&K}6~1&6`ag%Y65hcu?z&_huv4LK5gDL4I~+ zw|c|BdbjER{{E(#3CTHde0$cv1CQ;68&bH@2(@FUu@{kVAb+`A3mwMTUsza?&4g8G z_%}99w6J%$hrJWM?5uW~pdq)x{si%YhvmY^<|Kz7%<|?v4yWwXlux~Y^OcK4N7iN{ zZnSK3_t>uRd0H?z?94 z2nfH*2kC4B*qas4V>9#q!KuJnzo7@;t-+B&m?&J51W2k`AMqBu^Un84o@%>n8fJlsUEFe@f*BAAZsY>DbWc!_B}0z<;nnqWQC7 z5^}X_@$v7Q;KqIB8DStEjH-u!VmJ^6vnvY;`3j5{@O!dYySnfTRXEZQh00v9G`owl zU2k@F$t9Pccic56Lqd#|wZio@u9DKEA`lvBf+ok21ZL^3TwO#B_WAZF#~0(SzRJ6l zNRoAJ3ElMaHz^6(^|@LqFM`a2cnX(ttZN0ik3?{ap`Xj;%vKi&GQV;SMZC)V%oYw( zPFK9DaxFyI)n%6LDd^3YY&$owFmAX1gkC5JON=6Lmy zI0v_lFm~Yu{O3;-DU^t=@lL>fV{P0}+R{+c^)wPzRq{Cca64yrS@6FOEcY-KrBU37 z=~S*X9rD3<-+lM-*kg~)46W;4NOwcw;*21eZBL1zA}jv#sUYDqRd>c1miLcO;U5!& zy9N@5WMA_8bvzgUROr_3Yca-pYrD(Oer%MV`^_!}Mj|*#&x8m4vJX#%zc$RH}n|PeEj2sFgQ8C7l5#&>xFTA z2`!S!6(S6rSp$+S9)(O?vYlb7ba^g=ALiz7d2-InPe1)M?d|P6^UO004*vh`y$N(2 z_kHL2t?CQijr$@l-Zv?c6sgmeb^4TSuN}vB93P1td*^L3vpdO~-ErQG_f772X5#E5 zlYO%@>m!>N=ic==PqAc4mSpQ7C5obW9{@>!xbLIy?yCR3@4u?h4G;hU5Jv;}ef-Lx z8ineA{kyvA_x=B_^_e6_gPeU1*w}1J)4f@s@j5;}u2*H3qLVIgn;`|H8dj9H6i%K!7vKdZcY%bmel(6%;K<-s9E>+3r~+=wGW)iHOJ0(3qTG+m_XlzE zt{83x&mnj72K-lIYa)+eJ{URR@bG2?;71DcU-{?!u$f!i`!`|i)NusXSQwSCu1Wjd z$Pl53;6v|IpZXMz962&|P=P3n+Iyj{|C6&aUTB-}KHFFcV@Iv?-?21Q#}F_a&nGbR z=g`E|m2*|l!QU*{m~wD)5Jr>fUUk}^22m|lmtoie2 z_RPN{M-AW`Sp)wfY2uLw;`sVkh7k(6;Y5Zj4tmG(!oYjlSp}XEUxBz`e!=wf;tQK9 zVKh|$k)Re!>LNpq;!`~b+_$U|sNU~XgYr2Z1FlxS?|tvXwr$(+z3+WbuR^Xjxavv| z$*x_y=2ft81Npv9v*q*5pBRPmB)Za zFt4k(-g;|8=Q40kY0T3DaPi_r+rok{!>ihkmcaPIMV*Tw2t`85??#6RMFKqrpa1;lv2WkL{D69Y26X-i7`g(CUd)%? z^zdr*SGZj(AtA@K)BVqd>$Dj3k|yeorJ=3aWD$MtPGPM`YsR z`xE%xfQ>i)yc(al8UmQ09GccYpum2=1^2n75+>dU;P>A@fn%Q^L3O!ZAPh_(nRRj^ zjA_E4ECf_UfUy)1OXkmK`gyT&+m;#_TdJMVDXvm6-=aL^h=alb@@Fmq{N$@ZNfl6Y z7})-Cpz?tJ_2$i+@#Qan8Q=NNchJ|@S9D*Mxe_vG!&|YZ5M{h~p)ocWEbZ!r72PQe zWuoD?9Po2m>TnXybA5gNl8=^#F+abh6f$cO8vbm^M{m#8ty{H+2Cjg*>#n@#&5Hfsb4wqp8EEdQy6M4L8?3e>-aSonZ-Htt5Q)! zR0!#X1_Kxhok1s`qb3o>&f$9O9cjd#bfdmaF4XAtLW$C0A36`0q2q8AVRZ-wYhS$b zI?U@G3mab%W7q~B8rY2hd<&a*1`_VgBQQg}yy4{<6c`sd3kGBM8H=B>*{1l`)pGt#O*xD`^-iUt@g&rh zULaK<5GImPV+kkeH|O*oc>HJzk8T=(`NJ}(S4)9$Rt{3mHtj8@!1qfVx)PDGJp(*7 zfu|meIpazhQ;l_2T4D}?mpj0}tq8-|-3UYiI^RHD>H(6|!h}3eNK*xj&DBoWk*)HJ zXCNMkLr`PSx&Qswz=dzaIQG8*`@W`MV#o~z!k_%fpA=o!WEzz;_Idrx)~CRG zo#8{=xY|mOL34AnUJa)xy&a7A?&#>yC0>iY`5DK$R#xJdB6nlGK=H|I@!fad#lsIj zJoy>Y!$l!L0l{mRo`Zu259<664_=d}PoLIB9E!OFalrD!Z~Jt)Ps4V4(Ddz%A{{lM zF1IbZwab=R&15R_LXKf68_{GCyRX;ded7mEm5u7x=hex02IvjRg-ohJBrA`eaO6i0@e9S{9W!CFgYmD7z<4wc^;#4- z69xu+K*n%Z?I+*FaJszcR>~HzD+}D01m2SXYI8u=m?{k5dMp?YRpORjOzn}>N|75Hc>Xm;E!wdj>ep$av4+0J8 zXPJ<$>2SLxQed zyM`S*c4+Ym*K4zP?_PZk()g#hi@z^~9tV1i`1@k6fEBQP2r6f##2=s|o`R8@<(|9-INZM(A>~JA0i|OJL&Skw4kbfC3>Jgt4_2sJB>I125KpECD3aPTanC zTpINJ{jM~O-DzOl4|MsVZU%utpOcN8a~m3zSuh&1z?KZK$pS*QBOsFIt?@_fQ^3kf zg&{-QYY=kIi{U}a4V_|=vFRpAc>4wXFt*k?g2yT_5Lo)OZ`7&KG(8F1+f5ZzGvc^& z8K^h^4UA0>P4+){{PD+e{P=NQL8Tbr_2N})q2WK1=YX%#t3Xk}Rau6{G&D5KE5~4R zifkK~E?v?qx7-9_xu9{+1miPj&S=5wRSm1Gti;ZpJ9TK$p+kqJBrVfZ!0VGgmm(i# zISDGiE+49N60t(8%!l#jFzlXQn2TIN4rY9)PnRv&kf%844~}B2Jc&^=iH_P~yqvy- zEhAOfH`a_D$$BjmFj*kgUeG(_f+nnhiL!AEPky@-dw)8NH-5Pptskz1{n?MfJbE{* z=U#%kat+2BD-j$@`0&8kZsg`__BI5<$kqMV7vlg z9KZKWrNL|#b{uvELsbN-+kHs!*QuQDGhl3+wldAbTkgTaHn2Yp;~*1yRbI$=@5QEO z+@GC~)3cPUq)}@tsNPZM?+cTQYsx2oKZ)t|HqgiQyc#UQS{9rt3@A88le#h5#w zDHVF|+&P^v%2nO9)R#b5doU!70)aUO*RNlnl7GeT9uvbiZOZo};r^#iozjB6Sd)+o z2`H|~o7&W7zQIII0256SSZ}`y|01hZg%f^65nR+Nf6||W*LJ}+6cQyFTx#sY<%T}g z#>%i`q!!yJ>d=y^MTH9McgX3!2J?F_9cC0Xbk0{xTLPc_-&=9v#UZ@)#YS}Bw+p`A z+mZgq=YiAbfZpQ&{5OPrhHdn8jTFgX{plr z1G?O&>4*vad(T^;_cMUnGRF(S-+3$P{o_Dwsk2&d&2Tfck?&97S>Mx(X57x-tylNQ z^f^ol0~-FNAt&cyx>3sUd%X|%Osy@2v8nu4j{)-&TKk-^D(|>Cw~swV}=MqGAyT9M5vc)>9NP2Am;9YsG8e@KnxZVS9g-}3rc6RFPv{;jn3t=*{PdVRNR;U?m2_s%-LS48zt?$hI*`BC} zU-=hosB-VDf94f?4soDtLT~S6JJ2{$iRSSN?3-xDmUOM|r(nB*NAo2e@#%Q`d!8S{ zj+5hf=VNs^^$QIc{n$rgy!T$D|LH{-=P$z;UtGnel_He%J{0gZYbq-TV84yUbS3FzAU4;l!iR7qqShrt~9(h=%u2kjsoF5?|ILRBVY&QvuDpv z=EAH6C=M7_WV+>+M|PDVo*IEMu=w%UBuf{VhU~zR4ES!}3nCM+&|W>Hw=>BuRK_DX zFxrGe^0Q&(n)xPL&lei;Kl#Z|v_ZO5-JH1~ zz7T!XdZeaTr)-jrzrxqyM;Z4qQ|cEh*G zkfK1=hs~*)1!e#Z8ciS#vW8ZE=ok!{$Xs@M4x?B8Hd)=irI z&9h+>R?tL4dkRng#~axH!Vq5nN((w4*@?*Z&B&g32ia#{fN#9Ga!AaKfg@wv+#-SV zpL2R*f^i!ihbqxl+5_K}8_POQLAV2k#B)$2usq}>(I%j52QYNOF=}at(4gl^ep9() z&`xd^P~wNNs}8Cx4BQ-WauR%i>YIRJ`he}V`FMSL0h+5FgP9$JSu&J)2raIwUK`6y z_6-8{6~N7*d}xuuz6V1vXs|Ozf2Q!F#lSMEgK;OMsL)&DGhj4VIsFN8p3!eOPqp?T zM@aDB+iJOz*gSjUsCzHMJXa^E1;ml_(y3?HGfz0{>qJ>%V4!X!r;pG=0OF-oI#sf>B=I7>O! z^#F@BFu#Ba^s^<;GxvZYN8F0B8}dw~thDY&VAwX|<$etA2*bYEiohZ}2A3y7D9?o# z-1yA!atDHo6S_H{g|SFdZ*2{OsGW%7z(_N;j@O_eTZOO+>iw+QDjxCiW#&VTcApx@ z=JS_v_5;;8@zu>3dH5lihxQ`(%u7&jzY8N-9OaYiMx(eV!KP;m`@9U@G(w^Eu^Nn> z`4LLw`z{nO+H-JIKA_cyW%y_u=xRlQG8ntR2sQAV`El7c)Qtgc&@*>HC(nAut!tzk z4P`(y0Cjx`m`FGRp?4Gpy#iZn+>jd^Mrja=p*kId`VOr+2Avxh!)y#U_rO+qHC~Ox zoJvJr--A>Z>iQszy#*eLJHgEfK>MKM*(l64pofB9gUK)A(6#6gnrJeFs0uH*Gcw0O16TLMxkD_|MqkAQ`YJ9V zkTtPov<5rJ>d_LfMoYFvHwm+Jt)rP2lx&}YOxOp%rSQNH2C(D!I8J<_0T({rfY|3g z1+c2o^FM`ZyT0fqaZ3R|vwKFi>2R!t_BLRy!9-&e=XWL$c)n}Z&Os37!A%hdMFJm! zlPi&YNMuH))%kY>f?mmwr2&SQRnG?YWp zYYX=ri4n%RSC&lj-gg32Nb>jc6XL%DIR+ z6#xJr07*naR7u=b9mDzj2YMuW#DQi){{0t@}`=^bc z$IZr|Ui)i&G<;3)`bU~%=EMiy6Exv>fn z|C>KqCw@JLO$o&fuVV@Vig0=b3SozCmMvwoNH5fg_fLK&DRS%T>U83KF%z);x=Imm zFvD9*kRI%VZ;{5LX&WfZmgod$Yk^@dG|n?-4l1~OVs|2%#`$PBu2_AjNVcJNtQ5ya zwqs|i-Yw^fTf7Ne$V!Jz-2-CRsd2Pih~wh(LpbrJCiFh`D9rnhAobiUz{z)EjILZL znV}3E8`_4j3Lv#Wap21; z&U_P41xc}Xfw$PJ9>i7t&LO?}Z!}dqeGFW$fHtTp1U5V2N2-0mNoMXE)&?`0DxB5+ z>IjV5C{$m}IVPVi$xOcLg>sF4#{Dz&XQ-fp4@+%OOfV#e@1w2vc}|}@PIbK>U?=3l zhymNli7=QrJw2x(naS_N3a0c1FgL>WTSydYNUK6z2!F_1_Qy@@WP!uqR zo}xi|7znEktAVkx9~+&qX`ctLSLVX7Wa;_y=kef!4^IA$<%QWXncoA7+hPv=V@F-x zPRwVZZ&w7?^_%c7GTA?z4xlO%U1SeqAZ})EZ3?*zWPKI}OJjPwUO$M6L*c?T+AruN9#?@Dd5a3ZM-ePaL#~I9NUC7WYs+uK;5`QyYMV zgD~pL9E&xcf|^L`#$@_;v9#0qc(@b>lZwkq^2y4)!!}mH2>PL}4*-d*lV3oCnn}ep zei<4R3OWhOk&x3TpwK|nNz5jU9d%GlVD^%AU4fvwhMa`x`WcM*nb5k!gz6Y@DkT+q z40zA2FLy)@$1tTwf}u;5D!192QB&sRByc~U3Dq^`By5xN5RC2hKt)Nu5|Q7jOr(Yr zfDR!_0JgISmDsnpDqgFea`6mCk3y+Us1oEQW5Z3~^$vz(Nu- z>1hz}fKY5O37L5WH1@Y`+csr%ZEbB@7|>I2<;oQ;1`0!tjvP6H?(S|aB5vHcQMA>I zN>mVrt&k}5qkoGZ#%qgj{1r_5(3q`S)ZhByL%)9vma(!60_i{w{S`62z14IBjiVJf zG_o0+lQpQ$mLdSZ4m$EY3b}v@%WopW(x-pkt-CBA|57uqJXH_-SAPk(bQPH&JO{I@ z=hj|0OURyq21M=9g2jOj>Gk<=>D~~=&YwhB5|M@C<|+!r!BzRNRu9I{PBMNKu+sUO z{K|l_rPis&!)iZ`X@AF?el)1_DfIqcE&|-@MJ)9i3OMJ=F3BM`@tFJh4IrL%N~N;gtF7|g zo694PAgYQuy%e~=>ED z_xbX1mHeKuXalQfvBDL_`RUWA@#K?FPVQG;UakcKMKXnZu{P|}{*>bF!o zJqrdWp!&w0CSR=4&x$+jg4|LKg9fkd_C+WS!r0sBgwc5Q{k`|F+%Cg&8TOV_-qhIb z3WQW9Z`@O?bPhXRndd5)!TmEiT@FThGMs?$fn6`3ub$`T0)*58n^#r*FVIaSBFi z>B=wf>Z?OTu3~W^MJ!*sH-zC2?MC=mIndsV%(WXZ2Zv#I^}sBW60L&ZqV<4ho}HCh zSv{t#_(q$Z258DPzAYy{Ua!ngHZ!vwgPN=36ah7*PJ%G|1kflRO#<;=Cs%;wNxiPY ze2Oq^W_rjD=2-!c$1V%O*s%%fa<^kFdpQtH0_OYD*q*IkH!J_JRA_8kqq8WFSwFQr z>s-t38!dhtnB?vP?aog0?gy!aE?KLQbWo|EBx}-zrynY+>96vnP=GHx%yl$a>Ydiix}}(dMn3qJl%%c@lqTa*^1rqMnvr(awS|8vQgie#)rSvj(sl< zl}U zlg^ZB>(pyg88FLB5xM&S6rY{%eE?P>fy}jbsE#gJH`-zJ55b5hmNkz;5T;>G_Z%eM zyn}r*Q!5W$k`Ek%lWljMN)`lwWDL}}~=LmQrp*#eXHozE+ zLG_FP6KSWS5R-+Y!8>nQHi^=Pwk`phEa*=_j{x%-C}5_C2GRItencWOwF@#spC~Bq zL>Nqm^TMh{{S!dI04l;x$QQFjJ=-&f|ZkR#O}dN zjuh@3@p^1*=!IiqF$DoDfVH-^PMHrc;eL7oJ;R0=UPPfl&q5&-87zrPfxtxOViEFO zr1x6}(vcv#YZEY!FW$2eO@**2S21g2u{p<~tpEipv^ES+CQZjm(t2yJA4YlnBAP}k zaBOrtn$p#XM1nAFVE4OmH2<$xaOOipIQdIW82G^B!29k)`fs0sdHymGTX+Z;^Bu0# z_o1_N7>{-C(-n&-4(8KW!ZHnnV>UkWd)Lu-Ad0TLO3`<)6!F>stgug~T{9xoXZqmt zmm+%4A>hbeS}^3|F=WnPLhiyV)tx+=z*^g+_0EnO?zH{cgfCSIw8w0JGf&X;l%Z zq#M_1xaCzDk`tcZFu)N9Dwc6WehOZ$EzgHNEfqXh<{r3-#(J)NhMQYqeTGYlX-HOYR_T*mryk*^q&e`VhJfmZJNv zDEjwBFt{^{WPJcRzYmlPK6gDZKu_9x9su6^Agpv6nf6XtSFgdk(E)R03~F=?MtXU= z*9#k#$rOXaI|E<*tXvZ-&y#rdtwJ&75O!C+PTnpW}`2ycc>Y%5v*XM{Yg1;Z_v#u-!a?lHk%egrU%2 zIn#QE17p|cfRGcS z#cD;uokN42!hp5ymqToE^_b#-E6XAdiW4RcFQg1_A^c1i`?sqBJqIOeABH!FknCzl z$zsRATW$^rBZLVoyh<6Hrp3)P66V@AF4;ffdi9{*&Lz9CDOQf!u_%s)cA#aX3?KW` zE*$*H2;TT&Bd$KZ6Zp05$i4U~tXJQJuYYjC?`QdJyxrW6_UIs<>^X?L;#-hKZq6_x z3WQ9=d7#s~QP-PBLwgcOel&nYg&%`kO30_$2k&^G{MT>2*ncg&39;FQek>cH}Q9%}dukdEc!_PN;2 z@}yhcPQ*C{gW*N2Qq&&<1{1nEkt!L};WOA&0i(BW;nJ|vm|aAQ>y)F zC@hvM%SE~Wms9!jq{9iPvJ(w|_H3Z781}gZu4Uw#$F@l!As5o$D88OIMJ@1Ub=5Kj)>1+Mwe8Q$g9wxCc5n?TqE+e?Jg zHnyFMVcXdl(mxzPye5c|rXX(KU5fUj-wNS9) zN#y$ak?ZP#F*t?-kx3p%? zjfH3Id_@Ti#{GAXItj>(C~kRC%WY=RsMZ$4nfh=V!2+FkPm1obIO_O1m%2LR9!lDy6{o<6bC+&Cs`q-Sj0gw zg-*;DCXKS$)pEj^bP4|wiUprC;R}_Zy*dV8=bf$eGR)=0T^G^SJBp9?9YMqnyAt8{ z*E{#K1MBl?uS|uBNE(+TeYjRPh@Wf@VOM`0j=tW4pFcf>^B-x%+h1-0KKW7D_Z>m< zzdr}xl{Od)%zI<9^oecP(Oo)oEta8 z<9kP8u%mHV$T7(6f&}EU1{fo8orj`S%i z8`kZTWqG$qm}%t9^_|?xu%qG91l}xd$2(g((J)$uyPn;ICysB$<P|8M*B~ve5 zg}UAW)yE2cL#&_fKSZBRhnxF}Sur~19CbCtf7@KH?v4jgZeU%gV|01Vww0j6296Y3(hUMny zn(T)kJ-!iSd>h9=%=#_dtR2-`S>iex{`ze+z1)JK5AB8TgLeb>9Y*{gpNIA4DVW0x zEAh(Y=x2AG#m(L!Jk@^$Av-YV{NE0G6)bwMxw(M_L0h4vHIA(pVzB<9ALG>lbnlCz z_h1yg`=aRE8%3-+h;-2J2nB{A8xP5-UB4kP?vjw+ES;9 z7Y$s)HE?szeg@;_Y5c-VLSDYrWL@Eh9fy6$cP*}tIMtJMRiFZYwc-As4u^^Y!?AoU zKl3F<5{}Wnl;un(2kKNVGIrIu`4=kRXpKUJTTW?_qMOYzQQ0VPw=-F}q6ygdwaNSq zu10rtb;)YIAP5V>Z~M?+Ya!R)gTN9`T{@V<@ojAwjKuI*_kL{7)#_x~nO0p4D5Qf~ zZG>(_CL$S3MA%r}T3H8?RIiHZ});6QT-#6~8*jGhiu*)yQjkK4*b`y}@;V+ln z%9RY3BxM+rS(sj6=)0F1jKg4jK3C!=ld2Vzt@56ZLTF%j4neWH5Ir8tQIDMO?R5cF zt|&0v@~Ocx`JrU-G%0E_F7B$O~;fScRZ}3~G1+C=ENS?kr_m;##)ejcln`wskm@g6bG@s{IsJ8lv&t$MUM9 zP~NJ0VW^NQ7*FBf=}JH;;N}Rhz19g?T1tW7c`Xd&1wkSUj3l6j#+|$gFLBxXHrQ^T zz;F$WhyFdV`wLSJ`O-@-X@RiZ%qPj16?ux0Vo8vm?>N@-T+f&Joa0(dY+p+xf)>V- zSKv=CZ#d6HG>sqaJd4(zUVNhOUX-Yyn<}fEn0WSx@D+D=wE>>?+7q0g>WK=&6ji7d?|r@o*6`}(kYqRd3Px~?k&gAo(K|b#1=H& zH9iYw$dAyL7JZJvy%Y;6WCwz$A(@Y&uUIg&P zjXqN=&CQ!P@ys*N;OVEIb{^2|BXI1ufXe;ATfYqqU33g-Kk}hG{DO|fp_pe73c#pv zyA3nZmWjc>0%OKj`OK$@XgjnzY`XDe8o23M)`yR91H)gXtI81hQrt#HMiWa3{BvZiIi@>={ zP!;7cw{1o?5P-jb5XL+WW7||{Z5qV?Q~oM8$7_I$jnYgQHMvrh+Ywk+PUj{>?Gl7k z06sRzvjgxMCj1C!8DY6zhXAF7VIrrjY2u-vHj-B^)o17p?wTE6FB{LI;@vEEoti-A zulg`r7ew#wFxu}cN6(Q`j5mgmtO~&Ln>zGJNBtT>gjyPbmPV8uyBk(IgWQdquseH@ zJ$KP*#1>=M=D8ce32TBErw?)_aqwdi2P+Q^kDvYQXSyNUv17+3-{08z3s4R30nYsw zpzZH~kygi$^*jmQogtHT!ejc!oEUrNB$S1nGM(%e%vA-J8J&FYdq(^$#*SxT^Yhx5jXJCRg~Gsl#u?ML>3Ch9Rl&bKvE%Nqx`tpR(m-PcP#(^gK2>v+Wc5UV=N+)! zW^D9qq_R3!17j(uc*=PV1z}n!5CvKH-*rcUeP4vJ|9=GhQ=7T9wzlFg{^BomrK07A zWj@>5+AugcIJKHp0bI$Z*NNM5bQfL>G%Q(~mr3@_rCDupzy?`5muC(**UTZIN9cCq zAe~O%_ISkxuMOtOyz|aGC@CqK^LiQ|AJ_dmL?A2=xuAlT8lUmOyIk~EjpJ`3C-C6S z9e8x~fEEZ@(}Hh}UB|0}Qt*$C0jI~{3k38k{X)MPqoqlVmUiCqx?utSoQY^Igs4@b z%aB&2BdD;#$mVj0ri0jIRiRXsXi?y|43y_0h^UZmey7*Hkkg-M34+KO7Bb5DZ~6nY zKf-Xf92=>6&srO(?MtEIW)g>fGK5$~07IKg&~-41&Lia**c-t_V+g5ozxHSN-FyX} zzn@Lm_U%UaiT5HqG>Y`u3s8N7F#7rd&Jz>4B^Pfk^5f}aHh%S2e|7q64X%qpo(`)3 zyTv~C*kgGA``>?S7{kap;MzYxUHv{Vbj5jXdade0;~NECh*?F5{SsKYC*+6)gT}Y# z6`0IbaFU#PqtXe-EH%oeVH5@Yov+}1XjGRrIs-@#09Ga+k~1AIB#HthPzj@KJ5;g@ zi1p+ps&`C!4k!+|MME%x9Ixw5Da@%*ZZdYxhRV8PAjO{<=OK_7jeHPg9DInT%%kbG@m%@Nmik2H1*W0#j)9=ES-q{Q_8nV1b!r?GFJ3CioY`25` zADWt)v@yzUHH}Qp59g9UJ32bFVLu%j!tAv`gP$VncH`y#%n{;zuN7)*Yf)EMr@c&b zeJ0*~@tUHrlLmiFk8SAWF_^1hK$*(G=H4p&e9!%`OohMP{W=Ds(p9%;Q9vl?M^O1; zDH~?iK)F?d5<7@kE{XDV7+Z36@GBFETndfpN;KuFwYMN>2M|y`{kRMwpsQD9wGYD9 z;Y?aEz=B~b__7NAjDnF>R}f+G$>&F-Z*`3sFa)BtgL(^H7^d*6Gep~Lxm{U+rgjF3ZUhq zFenP@?{j(?OoOYe&p!JsUU}seZ4|Bqym@i8cOmie6cMwn2(KhGEQ`g9;lT$1jpCUe z2n(HSxYxv5>Yu>#TS&gdT=ON|8OktVtn$3pjz+P2r~zl1ZXzBQ0l1dYk2$(CY#?O$ z5wSwrbCAqs5X}ZrW0fJtdsSMYAyUUz|-Gd;`F5ulNv!Db&=|;1i$t zgcb@6eYk~O0vNdnwEuUgjvoTUSAlH8*{d*?e!5`5v}mHje;d>OUHIRgSHKJa`+f<= z1AhRN)ZW>LHgX>7wSNiRe9;MzEL`EAYpm04Ib(*`)ih5E4Zu(tP)G2zXjmmO@DDVad6c29tf)p zSNyhY*@8zNc?A3S??-ug`OE{d(m?+SsLp4BzMlc3H-JPxkjvz~2Iee+VK!8OAW%+O zPt@KG9Qt2i?E1ny4lqyH4f_0lg1Yb>VDw5pAHkgFv6u@RQ}Y_&{#_sngzJFnJwWr5 zFq%IE)Eu7mKwN=uZ*SM}*>Ak@#){+@2!bF8LNVd9O0SJ~+s1HnM|>1B z%Rg4>R!5;D6HceRjVGKj{3jcgCEdL5l;}w((3x0x1$~Aq<`TWqQI^L7u_uE}-}WX> z)-W-N_{>8_2$o;5?QdCl>jU%QYv?6UX6U#b#xTV9^X*56{yt@S0uu&% z>5lFDvTU0Id+&gi4;2#jvZ4{5FlJKAwF!%M?Mz0$5x1Zx+whUkY~29bnWc9=^s_S1HzDhKYV$HC7X+k93ZUO~V2@_2`Wh}xCN z=ISlwS5=n(MCoZMMoaJYmAJ5ZBvuc=b-jt?HYXv)ZV!pMT zu9A7=%+1N%ZrfX1M->eFgys6YOXHv>OCP_!D^b|CZFkac({-Q@4wY`!hxBY7(L3Y1 z%_WZ13N=zy=6XfDef_Ph&n4kAn_J!(ZNJjJ1Fc?p4rMk4>$3bS4cwb4Vdi}-lrgG% z%Fj9dkZ)`5n8Mv5p^LD>gJVv=Zbi1~BdizdF`^s;zokz+){9{YoJVzm z7*6Cc1v*MlsgVEi&tUZg#B1hf|L619X5s-dMepy&t2$zI+uMZvwk=&4 zGrz-)F8kfQo9tjtd2-a%E%7BQ85J%AeG?0LMXQZ^-aQPwdNtQIcmpvjN4=79=G=OY zK%oLp#P?1+G*Vtbr1*_pT!SH6t+hu0#O zXBH&_Rfo%tnjWuu(Juw!z3 zoe#&F>gN89bEW@`9_%hQjc>jzsVmGRNFD71C4x z1&4m1g%RC&Zt^7NxWaPvf3|*I?#s~`8ue)&U`NV#GAeh{E6%IhkT3={H7|o-6>S6L>tYqW4$RkC@rwox);6bvPUaZo`%Qz z0Z}r9hoWt2e0?~J<|>-Ke9KMwWIR?EwlL)1<|5!p{4SX{l96=guVgFCZ58V@eLhJD z&OXB>uRS^bWl$BG2O4+6`3PN#To})n=365!x9&uDc)%9!it?k+E9Z{zv@rXF_JL&! zWA;d-=9Df>D9O{94>4HtF)V5uXeu|MC08;Ia;JbOaY8XZVZE|LbCTmu-7q0`Hv<6# z444C^0t&iuWrDJD$V-JL38NJB`vq!4j^0achkciQ;Udu7_#IAxgCzl8ZGB02Y!P5A z${|VLwBY+K5XtL9l8hpbG1FPFFTnIPG5954u;K%GQhk+=_OWK-W@NBWbuN5*SWO|B z9}%|W^^E5Ss{78K)lNdx?yzfub0+>0RH>aZT5OY*q<~U}VP7$RdO?FKiBQl=`6!Tl z&y20J@|kF|Ql*3Dw1p}mg3EtVa>3Z{+1%d`dXn8FM}feH?MpyZ{eOd)M(#jC!0s8! z{=Q_WhFC0?0F4r!5TSfm(LFRs_1~OHMqzHuTvlxRJTQddn3E-rZrJPM*YIT^Yvgp0 zX0>uF?0o1%=PKjmZ|mEOnN+%HSC#I_#6Z?W%8%V>OEhQz7>jc*OWzbMt|vwEI)iE> zK0ot4n2bne14Agznv|{}&IlLSDaK<1_a`3Q!lErPU@3wkNv*KZQ{D)Vf`x+Nh1&r~ z+x9n)S2;k~vI0>9LG&Ve2tTf@tm@`Q_pq-H>0Qt#12y;eZMH%qDAm7gla;KLOs$>s zj(8NUqv(L4kYLl&K%P)95p$Stfs@z(Eia7M(xv$0aDT@r$VCEZ_<}QVB-zJu17)-L zp05M3mB<+8YfU#+>|qg51=f$^r0!7RgBVT=DUsqu@CXAmMP+HE<5r<*MPLrTFn5{Z zgecu#`iu?PjKT4NOG>kMJ|rd5>by?> zQwsDWIQGL=1<5s_IypeTLaJvRGpTcBk1TU7w4kcIz~bqBkwG2v@|mERS94snJ)%50n>9tQ`HSw`7`dGXjcXQ2=-y79~pH9(f?Tf$#ixfPw}qfZUAThf8RQw0AW5<&j#&_hK9` zV2WIWxT~l0)86I-ts=oNaAI<CJ;UE=ljS$b3OS>tl~K7C3B|u;txp~8UX#QEbPn~hdkr{4?ya?$N&HU literal 0 HcmV?d00001 diff --git a/images/try live demo green.png b/images/try live demo green.png deleted file mode 100644 index 2df56933bd319710f9c234229d1349c3612e853a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14424 zcmZ{rbyU>Rx9CSu5D^tnkX9O{kuDV}DUoiZYv@i91f{!6>4t$BIs_Dkl-f6mu;iqZsk$nQWP5CU15H}4@3EE@>q#_iiUpybcv zf+Gmz2}JhID^>Tj?O8l;)s?2}{opP9N&eTbiLe5F@P)CS)h@jMhAP!e45`)MgwI`V z!{2YhNuZn1rZLY@4Z_X$l`c0VB?(Di-kf}O!WMW_hFezV^~v`2ITgfHw33(aEP&UHw(o#Z&?a~jWQ0OaYk?0qR8<1W)nPUt5dv~nxHwkAR zQd7=6AbU9UfQrafh>`xn*+xdwMOUVyFsGs*B&VY5AZ~AcsJdc(=;%w*5*Z;^=gPcg z_fA0TYVfm^P2$r`_JP>U0Qd@a#7KHKOM!5_Q+4yR{ARe|YnFJ*b@XirRvH7hK=o5$ z?y@IL&ziIBRJ~taujgQUh{m4uC>dFpNM`Lm@slv*srwUz>V>K?NpBahlkMv%Z2Z<6 z&27)Y4S$CNv5%GXTnPG==gZH^=raCgqM9oEk0-R}%NI7I;1p8-J`?&RXaQ@=8Ps&Y zRza<^lVnuGoTwiH(Kkbeg|U%SjC&2A1%`za6LF8rMH%O(;`*0{QWceMU8{NgsD?lK ztkP1(oTfXdnkn-V8{*6*XQXNJ^VV0b%{+Ru(mGBSbC;I&zrbF?1v$L-UCWo*t61_-N?G#T_ybtm@q4qxoB5ay|O|?S1_#*^M8` z@qGmQfiEB*Ghy_BQfc)f@avG|!b|bGXKZ$Y1RFj#`8YtkXECNsr+wP`uxl%p+*dMS zI6NdQT#hcYIP9s>F=#S^EvlP*0o!FP6bC2!dgCo#MguOrt2ZGxA3#qw;2Qz+ zb8h_NuNIV2&x!>`XQKpL%cTkHUlkr5%%Xu=X zClwwGGJ2*v)}4s?3#tA7)TCKFNUKNnWFEiiUB9ISF*<`?ig^1Zy=7{wREjY*%GAbM z`^}kLrqh1LoEwmZhdo>R0SWzm7B5L|2`_A2vPTwiVA&lJi~dWQ{ZeUQMl-&8Y`F1q@eChxv)qMj9v ze_sSxmVV!>u3u<`dirBYOugr<*-Jw;v*T_vhgF^g+THAk@K&?-ZkR-(sF!;?HpR2h zU&JF2$R1u#U|_EuO;`HfRxZn>m}z2Wc+G*c}YBEDq-S&(YtY znIIvtsb6!$9lL@d;4D2iQ?Vfsp|AJuJ@tn2^%V9ca9a*6?+hsH`9UKJJC^0}|P_R@--uAjJP5tyOghnK^<#??@>@OYI|BoL{{x1FQ@$8q>>K9E6?fro``i4|80yQUJ8r*M*N(*0eOiF<@&o8WZ>X8Ul7=S)vR(MR#D0- z>^!OD=6j?osp6A?$A=4nB*_?QE?>3WZ8^A-Q#toR4Q&4z_uu}Rox1C3GEj8us0j2T z%&K%RgX_9~i|cx`To`GGJzak4d(~lp&!-FnTPD!%XxBgSxrOY-EQg%mnW(r{Yc z#c>3xGmoa&^v8lgvb%(G%EPV}m851TK-MjnGWw8 zA+AAR%SpmlyyVTki$Zy-3H3ypbl1ZKxA_7)X#a~Js}RUXTV`N~*N-o(?NSornx;xJ z`>Xx8dk#K@`ZoSe=TDv1_DVK%f&Ih+pScLI6%^!)Gzd^u^A{d@h!T=;OJ8OifS6 zT^kZdAPk+#j7<+oCIo?@mDE0@qqJDFR|S0#66tqdbdk^C)( zIG8;?BkY;))O8!in2O_LMs#EFKVig(8s)$5$tFn77*p@2NwNDjr@I$tbs>4@A_grS zV1W7b?J^1b#}P+hEI)VRmFx;w$#;8BZvvwcV?u^}(L7TqhHq0&J?*=Ru_pV>4?%x* zCdd~tQ14?dZ|)64X&2-dvdt?*NCjAhdcvL}q!L_CbJvQ*SS+Nq{&~EYlp(Knhvz0bsB!s+SAD&<7(+2|;ycNyr`9`n)hfu+t8 zjt-;OLJmhRI~aPc?(yH}9t5*&&0CHrStg9cn#P$>*pH{91saj!wAl zXKCxn+*i?Y+fpH}5KN=?5hhH?cRO3`HCWU5(){xZyX0V1Z>Xc;U~`ai(8f}IAN<^lUQRa8afPGd z;RFe(ZMxjQHl;zE@xy8NY@|`ooDx0Xi39nB%*VvGbG~tu zMmUBH`)`N;N#`HLNZ2oX1|7ji6A2If`18LPibZ3F&KYF#9I4sgm=x;|T?-evcMBA$ zI!ga??D}9ht*64bY)zV<0XC&Q~Nh+29~B&Rvm>?U#H^r`{5ZY>HD>}Ht*yq^6Bo%v6U^_jtS7pS%y0cWR-dJyh%saPSoTy)o?B~; zU;KFm#>?$G1B&#K9gG9nFYvs=M@LH}0yB#NLQ%vM ziYop$&yQsc$?~vd7rOqN2#l}I<%^NrhhRC9SLF?b-T1&RLFyIPY+M;Dd^A*q;`l(r zoWcE*Tk?j}YI;Ip|F#?apY_dE{`Weu%Gz&v{6Nt6;2BRppb~ncQAO1^o^cDPZkMnt zUyC&B6g3T#+_s46qt9jtl2W(uS5OZ!Z}azP;@QFBWn*dRLDwW`eXJCgvWuj(j7GMFDn$hnGc1 zly^C%sx3wBjs2JU+=1J?;&$W&pFW!PS!f#yYwCzS`R#Ta(pqENPx;MLx?{cB-2L5` zdOn{Da*Ba3R5#hJrG~Mlqw7@!De=M4Mou1Em%auKTS=I5fj$_>6kysi(wDq%J-4&>p#y;$b$07k9GaVoFG82P7xSfGAeyT@;*P zFL=j{egdvf>;ylMmsNT#wt~h&_W#%dCZyFXH8z6%fA&XERvo{{)-og)qffoEFzt8X zYO3^4L(=GrKaU_@5Kb)ch)T4LuqD%t-uGN7^i}@)jFnl_Ycmg_t~T?o6rqt=z#rmj zp>jFPMaQ6_mn`&BM^#AldwvK3p7^F(kq$Y$dtdm3x0&oi(ZOuSV zA*66VXB?vs@`PMq`_<=pFc}|J$CuDjp+VDW^4k|16P4WOX!`O~j;7$E!mnCI2F~A6 zkLC`0t+kQ96)#3Dk7+a#X^wjiC?clnn@hFgUyKw60L%12K? z!-`Q<{NmZRiFs&RSNZhWSn_<=XTz(HmZLQUz4BXz|E=sH;#D8!5e6 zp)l-lG_Hg75h=NY`o4nKl(3>rW*oJuAKS!wZ0wYpCHBZni8n8wn4ToAu>RT9TLiqI zSsPePjV5CRm4(klm^*Tzc9wQ3xYo}{fIYQxSX57pRPb8k$Ll)X zzW5zwzh$cv`U^nY1aR`V;9hN*s_^+?XMI zPbmggVkoDRs*=jX*IeW2-m1#W^7OD#A7b8xpbwU`Wa0N`b~y3w?)|dkzj`fOba)CdHbhR4v?b}AZ0ywP z*$`(=cC)mG7#{n z2f{+^df_2*DNs)V{(q*0i^>aOYgpwv}AKATEvfoYcs8J4R zzKU1zVa?x}URzFLKWsBIJ z@3wCe_{=|lk6WNG)TH#sq}`H@S>TUSX9Sy+Z|165xO&UFpa0^#f=AA?8xpU>kHlPa z!R1u#5OzplZ7sO!lub0^N6;kS;E>$1##4UUOL0s_uW67JR~qyyr-13iZ)Hg3uCpl6 ze-LZhnN`1Wuhz*ST3P@WltoR}urGDx5|j~Gx@?k0{HXkjbNT)8yK8(AUr~>nhhr0d z%2mU-6eMFMCOXm`d4-8mdfD6N2ItjBQNW=7_L1JOjNbQUhLPCZsD#|thl&FW9*l6 zOV#f0Y?Rzc%L$eFx_K1hZ(r*M3L%!=h?wQfFo9?BjVc-jEG`GSx}9hya@>Y)zlB>R zE*#22JUvEiO3jhS!6E&z_`TS5#3L7tmbNWHX&s~yjoSDeb2DRQ0)LyGjB2L5roN-S z$Y`9kti;>rl-dl`P11ID8ARTGsqXx*AtT-4e!Kj#KR=Vxm>HKmGkP zST%YSr&|+5Q~UbU;-?=x)exH(WaI8mtGCcCubn@SQ5Z2`BXOme3Xi@#4qmcdUPc!;;=}!XS0?xp z5u(I9hz+P<(G+AT4J?=z|~g7mjlnw!4+pr&OPoyA!d%EQa+jV@2;%--GhIW z!<+QtUu39vqKZcb&!26#AjQ@x9+gwY?knD%O8l|2n=!F8y#Ib^%#YePm(Q%HL3#Tc z1jq`m*Tp4yYvAN3m$AEz(e2DO(R^{KAN%qnctS^%0&f;lL!m;VIv%0-+AuOq*k~GUP2~GaVm$$ISDhivsnc*NSCy#=|3VIjq~??QQ?wsq12W?ERIG zr<=O2wObzELOE%8dXv7~&1=lU#d=jH{?VgyFQdfR+#_A8!eVF7#$a7e-*mTcu4|?F zm*~&l@UPerX5QVKU9;%4un(>rFrq2nF}oNcRgH~UM`D_|Ee(E?yDFF2&~h85>C1p> zk;qP%V2aHXe~q?mYCj>@EiM%L1;=v(uedVlh&~YLMDe3r-WzB}tX~NV_UDrRjfvCc zeIkThRrqW(UWF5BTHQG{)*r-+m&-(MJ#tb+1TRH(B=3GuwJ#tip4TCWkR6M#GdFYU zIo{y|sXL^IhKOYi|I@ z@J-xXaPSXSe3_&>vfL@dJ#8yt8vVhobDPSP)Z}EMjufjdt1EJDcH`44p6cXaPo@uo z`(>rf&YqA8Z$7dImyV0-+wj~MpXTrs+2hNUxsAG_RdaVST-n7_U6(f-<;P z!_)m0RJc(UP(2B3Ks=v*=5Jbht?apU=46DAHWoe(*ajA|> zyFVvf*|`&*QN{1^y1C&!{`=-)shbM!k-#ccQ2)46+kfCItr%7Q&{nOg*=Kl1uYrx5 zcr&E>M*UMstf-@#`Yroxi4DgqMhC5KG}hnR=02Og8+hnq?%Lcw?)Aj&ySmWJ-A_+< zpYFO0oUE`LbS*E7m>*70nL~%_e#D%{pC)ea-P(wLW>+@s!$ikO)4ToB6_UnQaHv=^qxWWEF`QamRh{& zD@zgGo{Vsh>sw|QGgtUls5sTRRG;Q4KO6Ig&uK~H;N17`h5DDS*BVwMiGHKD3tPwC zI(7#psSJTgg|DKr-;}M)to+(*$bJFu59A0+#AosMqC~WPuDYa-Lua4sH9zQ@Egf-r z``d$(p6UCk;+EQdPt6x+8vzEziFax&NMGwLfAZ6M>(|hfbnrz6XAgC(-(}t7)#c^! zTCCXHtSPpl@w!AG+lQM+i5+LUND*~4H;lw3I@u!7Osru3Hb?^0M0XK+D%++VmhNDG za5ff8?SdC6es;2JwO?6^`kb9Q<4ff>lM!!M!;hTuI?GnLw*Q-_>=KyOfbd)3%#!mm z7`D!G&HfO062X;HW2cjMyj5S{=Vs$)fS+>mI`s&qu%S7LwaTEy#>~we*ospg6*|$D zojRg|J63yqAdV7MB?uq&YpgB%{!X;fJojlTdq8`x#>>bNNUXNu`LvL{4(%_J4TrOZ zW#4w3EODnZ3iy0KYv0WJP3ZjJw$5B!H8rQ~J~vnbwZrM_C!@RD3Vcx&1*eZO2GZQ) z+$RJdPlM1Er;UA?Why+QnNV~LkcMSxLE@ANdC=GgO`jOvziZr@A<-xj(iKTm$P z5}G^>C&?A1p1LeNIh&y-yXNa(_`u7V07P^>s$zIcKgA!I?va9E_3LIV4GJL#a-c=|UeQ2}Z4y7fIR?D%3Z+w}hKQgSL2BbW!*) z`GJgK)B0Wd=d@kR*c!536q#A#lP}Fie9f31p7m5NvC=gwMI41H?oC;H#Wo?-r;Oi) z@<|OAoFsf~KI#yg)qgH%X8JuWzp03b-Si+hpGRQD3B;muyu6=nrx6x`c;>;n54f6- zelmL9hG)Z>rkde5_G1a#YdQK;6LD9b%1>>Lmgt-p9sb&vvDrsi$fXD;O%Z7vGIk;Q?$wH7_VIxtO)KRci&JlBT?j8XCL9v-=b4m8 zhYHBIj_x91mU9J6s|xRnZ8XEKN;Hx$_E44EL5ZdIBR$)eWG#4D8+ zwhtrU^vQ!O3nn3!wR`)q61#pzuX^0h-`WS$k?oYnW$s4OE?-GtHqi~gpYoY~j0oG# z{qDT8w;f(}5O@TeY`Fb1SjuV%<^8HHyLW74sec*UE0%zsvf0lu)7E`I-?&pN!#6XK z^|$`85qckvVm5!;102h<7HW1P58i9{z1#?d9Ez|QfisTC&>y=bPOERDkaQWZ%)mTAHl%uq9 zwCzJZKDK(uG8i0{1MYt#>1l+^tADQd2#{7!Wj`qYA`QDC5(hPBp{?8V;oDFz-!j6Z zTomFFRg(_S?q_5)nMzOR6Jb=Ks#`Xnn^1*HY2QzedE`hacf3edbwvBt@^f6p;e6F0 z>kD&~3Nk6DFwB!&ttFU{kA4M$^!$Rr=-PQtR-5|WsQ zB*>J9oR9EcotWVY6=CftM6c;aF+e7~ymK^G+ey<3f7?E^{u&ALu8I2i_@Tc@z#ZIPrF2DAx z3kX5Q67jfrF{CGRE+};>YD9Bciq9W^9YZf8327@esT<@5rl*&DbJXP-? zd?wU!^7>+1Oz283)k+O2SJgLvyk7eaJM0NC)1tJ$COolU@~e;Kv^6h%`S%FKfFh%2cAaH7FRF*(8-~i zc0RvtD*R_V%nth^OcJL3MT9i`?mZ~yH1eju+_S-+IB(M>%V6ENIakqI-AQr4w`LQ% z@oMMbTDwB|a;H^0(@sMpIwLhaKSa&Uq@=I;eiIUxo7z%dw|>V2n}u?-_vbt~f~ukx z7)Q}7(l`e_DYZl;RkTYpzB3$45B&I-iJ;zzHQiI1G}6DoAGHWNuO54{pLZio2QOu% zVoUO<1}CE(Zh0x`a9g@1`QKfJS`4@Nuh0HGE)l9ykK8Tnr{RB85D<}jGJ3S!%;zOw za^N3#Pk_H-Qits|y9W_gx)xg-7y(a=19U7op6GkIV$U$OvD}~mp1tSKFsN+g7vdM4?P-!oMTaqR|p-s76DyIXFN_c{V=V)E=jkK9k>gWX>~6t&c#F^yM+Iyj%1S*jTOh&LfR8M+y170uk^U6lTAo`=BomSpfmwV>jRz3=^&CqTE zcyhW~s1UdVEDlVn5yjC^317)fEdn35&_7c=-?!oVdBdWn%LrCA|6qs8i5WxBU3D_0 zH8yS%6?MTumC+dsH#Efjc$;$KT$;(l&V+d(D1DTnKA<|66J=V6z^``wyleH=rL)qb z{&sbXXQ6%QRC!g7@){D?$niA|bX!G-{-{m}ryDH0-^p4##mJW3ys8dm9B#BD`{foZ z??Fm6rCTIHzvr(NjpOKK(kw6yGPCLPLB3M=4* z3STK&)nESI^Hj6V&u3$#pV^tJ$T-V8NSbwIIp{h7iP@l*-PR0QRQ;TUbLSDV*B&qS zBVh?ysgvO_jWgC+l89B@&yeGtncOuLlaG|AX}0g+HCAa%LzNhdXhFE?@NkQHj0Qrj ztsfID3XMgRUrk9xX{B>F43pIT4p-``5T0yJc9DRB^EY2OiVJ%3i?!RPPk@3)F#HLz z@=3;+0sQGR0oB>)^zvJ*bhGnr-^m}8UZ=nUr`~e(MM$<*rahVv6rT zKP!AnP36+Cx~9P6oV{YJOVV_82?7k)5f}c$_^vJBurC_+xXaWeE#Ck5o9!k~_it#t zJtv2-LDLHq`SHktoIHQhoc13H4gq8>FK`e-D6B?we$Lk&&tXp8lt9w#n$lt`f)KZe z-NEfe^{KPWo7?CQBjyQ9X0GQiXFXlEshW1?o|%7YrJa=LPJ!X@alPOhZQ`L@U~pVx z50%^0yOWb>W}7(}Bzh3_5)*W(ZgBK~vOv)L9jWOkE{`hr{PQwxzxbNk-}c9bEwVx3 z=IE7=g;VBDBhrOQGC z6Y*;%(kq|MM}oXMD*R%z8m^?ru+6m1?6R`;nU^K*d1vViCq!x;%p#2qgsrv>ciU{n z;%L-B=0mRLO(5zvm-r^XY-!2`aiUj}?Erb{n0%eyY&?(pX&b{gBhR;G?a~&098-lr zbWLjNJsdUTJRSG$CXVaGLD8q+9zVr$I0E00aDM)~=jWSWpONn+%s!NN1XF9sc}!s9 zb{sNUi5cRXmB!o2vz*6@Q1lF*EjPZ(>#RTn(V>JfqsBSf8ym;psfr2}TB6EO%z5bf zb~CH_7JQ;AO55<8ym)W}`DO)li6{SKb?%Iv0ck$><0{$;=YBJD+eaAa;8)6bPC6}a zbNc}k7cDEl^xTY=mVgAZ4|WlC`l*l##Ge9JqQp>F^Yr4H9-AL=dls|fTU#=ExqKA; z(ZQ!BBkW-(s1U7jowT^9Lf7%QLXck@k!HZ{Zzw?i{L@{1Lp$m*P3E`w9D6gJ8ntxQ z0inv$X4)zdyROHpNAj=S-g>NK+~nyTa}5+$26{4Ae{#T)sYtl$5RN40rdIh>)-;N@ zJaJJogQsm5Uz+=^#7ZZ1#9WNLhoQ6aVRw2$9JEG{zM}h7=_*a6c}7hoF9byBcf-RZ zsDAUvroM|B=NZf$rOM$y{d!rfmqCv`@nNIOEQLqfNa~G^vYCmEva%1aKSAChxbVXF zU#U>0g_%C7xXJi8IWgXE3~S5ryQ?|$4a>*B<8%~oBAkdW$(=+mF%>^@&pL9-Sff*= z9)wIqEKQx^+krIJVtOj)gWBQ2*+w?U!L#8-yGGc?Ca_imR)znlf=v=psS^OOk3s)>@G6HCD(_v7N!y(y>G_turN}xK z6C@rpX%RQ57|iFtPZV|yU6a z+xq?s-^r2ExM^`5aZt~09X#*4`*XU6Z8Y!H3*9z2$ECP#;O1R0D#k;H7k$u$B*b$e z+6h|)dcogy)nbiCqbO)7Ba7%dWHh%!QgJOeH~E@#)f1==Jau}OcKoh)h@ltH46B`Z zCbj&buV1fv-dS(!UAFa^Lztopv8^e~jMv!(k$!5vs+ShS;?mB^^Za{&(R-UR zs%IbiHLyp(^?V=4@?T&I9qsrC;}OS#WZKc*yiqjr?h28_^6yHdS|zAs4aUyp3g#M zTl=xV=*1}$8duSjz3mu)LnW>|{eC@io?pr0Judcdd#oMC{Q||3f zJ(*_#{m^tW!s7RB*{UY*3J@#lEnW>)Z{fSMj@agG*!^d~F<6_;;5}dI_d9BAbuB=^ zt~+=+d?iHrKz=l{swC5MEeGIW=6$U~yLEvVH#WQXz=u65q~Tf+Z>qn7yi)&V8sfj8 z1HdKp$!s))98iNgVx}8ihHI5%6YOuh|8hTl@#i5FU>yq-F-`OVlb$@Y$9CX0-|`8Q zzuzP9nv~UQFxaS5w;Qq0F#NBj*6~3Q@26clbo84Vt)72Fnges21-wKFe_`PaE(78FiiY&tML=x0BvK?Fhc?84E zxnn50Fm3g{VQSih`Rqte3H)DOfYB(pR+^;9`78AHbQ`?k0O-L-9abu>ZU%N-MDfr; zLgd%orq399!am%D8Tr2KN;DBWwN}`x7ynT^x~uAj=nv-$Zql8(cG=-ztZ8(U zvfOdP4Lzl;j39o!47MQxz^v`9@4optnA5L@x)Wv3e`?rRSa?MsAOZCOSj~jY(l9kHChU%Dt22fsh3WRDfSfcV zci1<7sP_94l!w+|4qVUYHy|Hd8MI`$qoakhq`kZvz6n>C!3C`jQWBe40Dtyj9UtIF zNxD*RdLvv^c9R}HqSQ8DP9UFZXEed4!7#4>hH>K;Em_xkMP4J}gleZyHow_jgFoTp z0O#VQLyBdTkXl79M!qQpjDT@|sgDOiq!eID-IyX@Ybe$ESlP(9!}-OEG(0B)ZT3K> zZS~LsutCmFBuJ6DJJ#Jm+jQd-YA!O;9Psg=0L@8-$^1j;BG=&3-_!eaug{rMm8e>J zH|;QrCSm-?%dg((PhX-JnyP84-RwB{X`$5A6E}Gr-C6d=4pTz@AZ@pZGc4RKQ#JDD+WrXP41A-oKXcvRK831+Vcv|{~702 z=6~2&$kRy)?k^^URX#?;xA?dKjeU%PKwlyhHiNgu z{1v=4Vb`d?Ug~UyRN$FUJ&o3V=N}7Xhvjd3bmr1iYXBXu3}{(2hKqHta$lpwOlh5j z6=P`*rhg2tbp{1$xScW*d}%dZtYap_i7wKHjnU z4onoqNPlLFL?n6a@+nN%AXp`bRPJ!>ldm!zUHVx(*U9tB9i5Z2B$?M(V8LvcYMVi3JhK>RY8G zPd%TlNE*>2>cxZ@5(o)(Iuv<1uu^G?EXtJPD(w!ODiGeS7$ozSOTJ^on8wOT4WIOb zRmhVCEnw3rV024&9KZj;KbQJRdK9TB^knJxG85Me80~Mjs21=+4`I9gf8WQI z#62#ATB5Y($A|_erpO0J=E%pU#z;o;rUI~hu5Wxy#=s>%UcG{T5dDWd1Cwt(w)*sNjD-5(hWnWAksagfKp1gbc28(Eg;?9aQ_}X z=e_qn&wS#;%x3Ss*80_sP*Ik_#vsE0fk4=DvQlaw5F`);0t=#{0)Git7d-`ms6ld4 z;u;jt8B^oY(YEH)IsmMu$~0zFrpxZ@Z97K;!DuL`VGw!Idt18 z&DEbX34W^JH9?g}dWHA;jM9Db{+RLo?bi2-R|6um=RSJQH7D|3k1pBnF3+*R5pE*} z=-pHj{%E$Btv`+g?&f@0VVr(r_q3)1M`+FXYa^WvvOW)KZn?TQ&8T;K8ipH0JCpmI zBSxk({BQQs6;>u78>onH9+du;VuBmErKn;L;7&^8yf%5+WGeaK@7*z)5KE-06x2`o zC}b6sZKz^;1yHfgBI?~g^~MFVn2IcNeUJIMH1SB6SRHVYxEsR~ipq@5Zb z9F!}8Afc`y7IOy)gHPI$)=!Ew*(2l0RYVd&9+Kwcv~YaG3IY6PFTo7 zL&I+bS(6-J>oT1Y5=*DDiSeyII<8-w#Mxc^#nL9!nM&KvB^}&1kXeJvMs0pr6yr<+ ze}kXSgeEq?eZ~Z`n}z?gvQQ>2ZM3V-1XT$ zoRHXrF=DJ!XclZ*0YXJ0Wkf~=n?3|5T=O3jq9ACwezJjBk%k(omsoHnxa<%u)TVPG zvV_%=*X2`OgOjF}Biuiom7;tpXR9v9r_Kce$e&019hcQ%ITL9rOT0*v)n{y(K${nl&dWzM-fz?6C#)9l?9Ld_YS0&Ao zJFqv(AL@C90U=8A{b*8vq`71jF4vG96ao{(FqQ#VV#0@=NQ+p}CQv@2$toc?L=jnK zd;xLAm{VkGm)$c?A+RN-ux3AV1CvB)qI2_F z!5I>c9%o+0Hz~M7&M< zzce^NVC|MaTr!Mf^eIh66#Rz{iJwF?zE~nYs*UXAW})p?GULl4*PkdlH{%i*`L>O7-cs^yD?#Yls!xTA7yI>R!~_#gZEl+wzb zO7R1~R2+kvf0B#;bKz2UzQ!OR_DC?4V3zF||t3cZ$V#1_4v`PnLQg&*u^kd06RjCam7+YX_L#D4nqu!&pCwQn^Yh+hU zzjF6_%=J-%f9M6(xK}k{br?0CK4c|M#|t_eEQxK+;9ClUe`FfTnD^!OA@0s}jZ;a6WCc%50Fexnso7U21)~DHIp? zkNJ+2NE`&a)MLF6{hY2U3)67n8k2=sb|2rtg6%;ooD6~S5NNst-9VT%_(zM*4+ZmaoP8Nu+xw8C308*9He)6womhCV|OrL|NnkJaooCdT8zAvah( z8%RkT?a%dp6|4CKS#kiP*h(Q)j`rpgY#A3RH@=x7m|6KN@DrZbFt7I%#r!~-JGp}| zg9^L$Yz(WOtwqyMk0~HMtv4B zif- z8*bTkW5Gq(G0-xp?~VW4Dhyq;ZV22wXn6nFQhdtCJ^+Vwht$5wmv^%AN+saD%(D!-6l)_>hs@kB6j{g=;_z8wJ zgF`znBqm3wi5_5v()lYKHgD@0_Wl5?ks^5Qh&g?i`g0Z(EsR6uLuCFPu(C3QCu@dO zx)~*+kaZ%h`XP^R?Zp|@i>#WNxgkw<{k-C-s_Ip+*Vq63Mg~mr5iUo*8*Eh~Jf5~q zM%lN zt04LvspNqhY!h9q4h>`5?TmHY4!X2P7Mcp(;2KtK`#N2%_!xK8 z#y;ZM71kNq_`#~OOIQ7PakBYhY5qyMze|u%)8XYu?n|MBbj?wu(cq9jjYIJ){MT`H zOqi8!J8!}Id(!cFAsQG_9IL2F3-O^V9$rLiOt+B?etFJ4As!7*1O}%dZRZdkO+)YX zvvaeCu(i*19ML4voCe(o`eF`vMNI93+J*cq&m@@(Miqu=vwrPzMS;e-wm+vdiJFV1 zRZ&qRSTUfnFOI2W`Fj7F1^vtTfIDEe=nf4KN`{`9SEK^nA z#jI0%@{bN@m0{cg&H__x%JqRYbcuzU zsCK*<@pjQux2)0i(@RAGeNkyp|GC2AQ{u+*&1<6(1GhQusMXCH|Gj?AM^^tk@g}^$ zzGKiM85PHkF-xc_VE~^rnm#r4jFdzl) z14~AoM<5B#C-}oz-0KbsF~$=f6x>cA)&ZD*i-AV?Ijw|Z6xufiX3Xc0a8*b$D1izw z{46$H_E^pLtqAa3WDLC3XRvV_u9T~Mp4ONKTfQzZSe5Zmr5tt^za#4 z(qDOZ&M=;|Qp<=P9&yJB4XBE19fHrYCUh@y!^vsM6sO0<8ES^3{7Ui5aNpLR&Pc}^ zNWyLJQ(5LvX#&c}*AzB}TH0l?FKxR~KGGtUy7#dm#W%pqT3Jk=s2UvF-o_^zs9*YD zYg7pS!3rJul2QDD#Wg`_G`R21rr41V5irxnBr9l+tWbLT2YZXfr#AnfDF7I_FHZ35 zp&(ZLf_{MocIX^il-qUdhuI9XV%=aoU{bQQGo9m+fmmI?lSCt;_kz7K|F+=X20BvQ zaja3C_B1FXi+y&hkl2750iA>o={ISFvml!DqJ%DcSn+eIhF$7tirvwL)0NwC08kY(e z|JGgOFeU1DC?zmb>8rF zJ*dQ7yw~(kRfftv@0OHQw}YC5y69ft^EV|OUD^sm2trJ+(8B#?;54k$IJ5+}AcEx% z%I=LHIJB0cV&jHQX-q-@W)KNm423{RRi(I@4{5 zTloTc`IfO0Hd~qC2Ezl6`74E(b~Xwuz}`Ml`o<$Tz#2zd`_{vc4Y>Ns3^M0L3geD=WN(5v;z@E$>v+0itn3L_K~6Ey=f{-e&Do%D`;^GnjFRzB z*G}6+vYs@{2j&bVsx=Xu|2>qHkj7d{sVndFh5pr)Wmp3(4w3mE&fkRg5)Bn7hc$4& z>98gURdao$-XpreTuO&9E{s+}pBRs}eV3XLI^smGBQ)Ldq;!|M2_?l_5RD3Hq7tUa z>fV1}*}hD+7Ko+@!z<__nV5H6^CgmrDNe``Cl2}prK3YsoRHOr zAXt?Y!A-4hNXxAf;S_qbxUdRLv1>OR`)s?1o2}xEY~MrZbX!)8h{ykb?OJ)yU|Dzc*0N^Dd!SMv;bpEA;ig{cLK9wk z_3M+OPaRu@Lsz|Os`>2Kt}D8qtY?b$J7O^z7oVhqs^%+&q{fqa{0uxdK8qUOh5o`C zqg&4qcl&RzR7dIR(Z$62s|g}uSil^5Xtr4IEG=2^?aiSvwHg*CbDBtDEWS1~b5 z#zqQMv6qh4>Wx>a8r9Z#67ck-Rn~YuYcXgf|Dt&VcTe_@4cXSpm5aAfL6{&6(NsyS zihLXHmM~+CynaaI%F%J-8(vNr2L6$R&|}4b!~F~+aSx9w4wr`O9G}fOT?MXzTWZF? z)$qUdUSE)t2;Zh{s8JXMhvuPmp(L#DLwQ9RJtz^(xe0M1MAH&8*<=jlQaciHW3zca zeWi$cy;SH)wb4H2m%M{CdiAMdl*j&Vb__Do+zAUDs;5WQ`IoWdzM$FA0`*_|D4Qx{ z9}>z_BGgHX#(CIxFQC4LD~rvsIM0;C6xs5H+AdNm+QleJ919bnDH;GJ#bb;mUTTZE zt<<_U{E<@@wS79wm0nHy`c44%vHx8arF_XkCz2bO;u{b5eAUf;nGju#%gcCu)vAIn z>i@v)k~TM&s%_T-t_s_R>E@7yCMaZraA9mi*wp66kpAU;bZp#{V6vf=A+xWr0KD*r zbg+23L)f^_E|d|SOU|dyBq?btrTaeP3Oq9@m&lb_;%Tk+NO>A1I(1gy6p|o?SX1dE zD9jz@BA@Yms9$rMl`>QpE687|Dtgkir&FnatEmWC5;ZTODt&1s=_yq^(s-lGi_-4J^syOU)$mr|x1C+Tt2 zn?gzd9gS8Z=DY$+BeejYt^(!cMrlu*X$kVPrM#<4zQys$pqk>%te5Najz?=FEwc%j z!~X+l9k~f)S&hsecc~|GIgjo%;)+EH>XgLDI{=Yn?dJ?myD;u(lMJE2bY3p;eE%%M zTRPLQw?Hn+MCthXr-deTNgKgje{(@XX1wq>Y4t@7{$9zb*0%hS zY`gEYqf)aIF8fCj$=RfOEroJSwDG5rxh(%SlCafh-eK$IICD4>_7hpkzx6zVp$*o+ z_&ZSp&|?6en`f3v2dyy*4DBl9Nn2{^2hpga|>&}*dpL25JBWt5()Mktdj zQf@jhIyVOosdnZx~KG&TD6@Z#!BYs5I$7)zH+;Ei8PV{p~}xUnIZt z+Ry9#RvA@QJRZB5{CgHw*6-3Zw>wkiRH#^_@T;qsWL3m?pl7j$tZ8=?FaAwd;BSLYZIrB;gIeIc=^6y!HKu;mGTJ18DKF2}qZ%n#XE zTD5YaFyQFArqrsfvosX(xd=J&n`yQd)3xnf9bT-+6{FwP=HtP1-WMdktv5%Q#O#LA zmRymMkr?yfo6{*H*CmU3)U?7V3H-OdS0`UnIL+Q%ZGOmWXqY(_k|p1tt9kN#()HWJ zk!Ns5R*{@L(PS1QuTjrr`DD6orS(5E6@%a-9V3Y!IzR3xlgk7RuF5WyuWyGSJkSan z7a=5OtTD?*s`8zZLjDQory2r*qazH0JA5de*KBjF;A$oQg~(T2(HnRadkg8?w{OcVW@_@uXQ$juX&7nx z<%i4sPjZPA{N2Bb7Id+o{#_UB5!VFMiTcpBvxlAI&CVV|n!0^vOq@|UMXCMbN=BS z>c&5UjZ1c^C3+#zBjC0eq-ACGYxCTHRM$5!mpxDTS_9BSe~(RHxkr(7c+qx?(8lWR z&`5IJYkCv6<00N{lk6nQkjSFrCpQ_$h^vek_ck!pcoZRC=4=z$b>bt={Jx_TFODa1 ze0xt|dwo^L=rZy|^VPo#5YrB(_GMIN#~mdTNXw@PCPTGm+sFQi4&LxZ$0(o(HUg6x zeIQK8$!M*_ZssVkADpQv4JO(ZWb?~)uFS$lJHJW_W&EO_`8 zO7$}@R%8r?#s^KLb^CpD;Z1wJ^ST#?_?B-k#RLUYgj-Q#>lsyicPZa$=!*jIf2V`2 z7h^2ty7LsEn{uEr!Wqn;{I?b{ApxiZkc7#wQ5;i=4|be&N_mEJ`CO8v6pTX8@FO-x zfDMn*`SN-x(|tiaoySf^ODk+;h(QS1NWcVPza8FzBZx)!Jo-Rr{c>A#Lm2M?G`NUq>-%QqmUGOU-KqI` zUqr%xD+NUjZ`~h>Hbu4Pr$ZTei)+szpFjtZFw)dnfQh-N)uNQg(OF&-`{ zfZbNZP4QIiG@jW}Er8hRxJZ6|{DtV-DMN215=Z)-wUKP9>|~bVB6liu8Rju0>AFiI zpT15*K*&n`mM3F&b+YLth|a#CTLT-p5h5vP&5Y%@NeXp&i5b22Bg|&s%YN}Rwz*|y zgY$=NOJWCc(AE~9w{XKTunBbd}5#G?1*n8-eves zWuyYxxZJRr!*xqzr+z(a<6B->xsAkS0D#oO1&N2-uLnq0!rp$wGovvPpPf7DJfgcl z3;R)0on8=n+>l6CNc!qO5t;?V?C0RaXY)7`L~uPnj0|x2$75g;J;9oIAsBA=QP9|F zM0LYxet0qD037m0&2K@0y*W7DzCq5RTX`=#x$c;5=RJD62H+fJecXOkQc`lzj>&F( zy<0owOr*YaT7dax;0E__^0Tz|Y!g$Jw+fo2&6H6Epy4Rf?Pf4>_Wq|QGx7(Ac_2~w zUF5KV;-n@*Xh63&E@XKkFKUK^)B4VLXSLWBpE>>Ntty)W^pRU( ztc4^bu!u#pbuZ?bsE*&@OLo15W#v|2w8iVoGAMKsr+pboKE${N=>U%? z?65&a83`7N=5ocA76FI=WhSFSVpr@$e)UE7dp{aVD7F-so7lwFdQTg7Yi1AD(jAt2 zsQf36uNlMG6FFYAZqJ{^+16hP5rHk$baW!1lpdgtP;ANTv)$8oQxzT#m**)@uThAD z8PaNAjmtPcFBjM!zEo?Wt`E%lh54E8Uz>$R;?0Fk?Ugk&&XkfGzu5jtEppL29%tLN zCKW(Av>?D>$<>xRsUEMPh^66v%d`$s6DPrk(_gVQ?mQjL64ojG;5IGojyWC>)8aBNFnF>0aIX4*z^7sBL)DoUjH^e) z(04Uxg{S3kT9;0di#X~3xl*qtwrkCw;)*y|zyCK2pkfDgdld#|5`J@g)PL9W|qNki)4OBL6 zwba#vhBF1XQOr;v1JMtjoiHQT9LLZvv6io@0h-xxvcE{R(i7WL=eXh`{-eyG(IdM9 zaZ_Q!U6yjqV0d*Zc++5I_k1lKw==0!#Bq9`y_tP*ne%^WOgek_s5=v2t^#hzJLo?N zK;Mrfq$%mM9^nO~_CmO(ipxxxcc~R~F4ALe9j!;=7~djeYH>YmR{xs(vAl)Md42f% zN58Zl^p0ChI$V|c1)J2_6x#}58mtUH8|iGh-h1eP$i|4hy?Dfyek{;p)LP=Isqn3e zJ6-hdb#mhRoQReaO8U4X%KwN)l>EcnHG?Gbudog=C0Ueaan+)ub{7M&C?DrH;Fsbb zFlrS6X>7X)W8-Tks_{L}?a4BSxHbS1*lz5b4xbOEaVt37#n1<`5j`HY%A6IMn`|sS z_9`nIU2G0lkYPDkjdqZ*zC6TkAC)8ozAwYs!r zwVlNuxQ|^PCaFn}@-BEl)-#-O)`rrPlU9;#1>HK0NPJDW{ z>pf-#U6RIzzu-oAn<`qfAy{Y$a+Mw=e|3WyBE8FU&>M$y?s_k^b^9^mkYacZu-0x6 zi7onF+xgtzo*%BfVg}4CpJk^@ckrw{pBhDNBa%E`{Oy8q&Jh}6%hR===; z^_GB_N)MfPcAkb8vg)6=4G5Mt?su(5l4AYB_ze4(GAF&LUsf>;c(edY39Q{rNbJ__f!A$z=2gSxX(!FgBGbBtK~x;;kpyV zv9gMU6eL3YKO>++jVG?1l9eF8zsfX*WvFz(fDhjNHHz=n}n7yLLt8>{@ z6TUf~P=C>t?$Ba|7jS}z%uybR7tZLunl==v$#y;Ze39wQBo^=Rj*;rw>*%Zxa!S{? z63sO34pS6bSU)gN9~Gan4$4UB^ewr%XYgl9w7zY9f_Lnl_sBatQNB^}57A%<(hi~i zbPF+Oe-j0gf`MgYt;HI@51~|HiUEELF$v)oPa6H`C)$zEnbIKn*&p4Hkr7Jd@<<%7 z2_nf*Ypps}4N$ks5Jp9$g9%LPCmd4N*x%;n$VqJbr1W5_8(KTX1K=M?jjYaO>S$$H z2J3zCkGGXJH$7oz_8UP~v(X)GX*jIDWIA*+mAloy^+9L+zv`f(y$N1m`x|8Q2c&pF zXrXn)<-Jf{h5%z*7m7yh=e2d3J*1hW2PWy7;uh}UC3+C|UuV+MgnnPWZvB0?}x0qE+f(O-uUWU*`#z z*ZzBn@haXpta+KNY<_Atu zd$@p{_HW3fkufKSX%#GA9&JIUjKcAwg`twZSx8J8S>9VI4O$fJu6qRH25fwRvZuPy z&ewa9w*n`p-zfu;XEXSn-}KUiZQO#3g@dW^k|RftF|b{A?OQ^F>eQ3@-&(OEsl48u zsRpPksxtp7p<)^ry6D|S-oD+7{c1Wl``PLPB2`rvv7v&Vj9!}DKTxY)p;O>X!V!qz zvt5+24f2~7S0&R=dZb0Jt-tZSC!CYY1aa1%eRl*hCpkPfKsE8Od%Vb-Ac1jzvo30O zwlmEn=n9C^w-{ogrYZ(yWj3WZ2OYR|n}z8voR*s2ryX{ZIAX40>~Prkf<~N{cLsm0<2m~JT+TQ6aCj+fP!{_l$W;&sh3SFT zBm~1#sxnZknbnp=Qn(}>k#(^CWPyHxr2PsJ zsP;g1ubb@3n3vUr1@N;f6`i$Tvy6UHz&Eo6Oltn#m6Z`93)~7pctvm~a}V!i>UVMX zqV45vdsWdk|J!Y&6dpS=9@`)9G%X9qy~+BEgKtH}c!5wuFL|f+=1xC&i;Pb^s$qEz6 zOuBaW&J4GdUGyhDaa>K*TJC=6(3!?<;}hmxh(ELWh&~ALb@u7j!G%MQvwZ^N=wz02 z!YGfbpOlDrZ~weKB_=>oP<8Rxefxp(*SjaZWq_Zk_?i!??FdFSeksDqkKEcS*{O<# zN(ldvJ6t{B%!UgqVFXyv?o!U08gtrsX)dvh1Gf}OuwUJN%k>+85B(NT0#JAHpanx$IdH8?Vv8zjyCl3K-xs1ta zS6Rm8aSVjG!IA`fOb|LNfRfb)s3+0vtv63MX`bYET_~CP>o*Uj1oHc1O1P8=Tp}VO zVZSi`b}ZIlZZ0l3h-*>{G-H&Bi~#7eFxJG{!lOQj3l=0Xro{6XxWokUnUpz-&}>S5s zZJ{og$**y8+T5FcS-h0T?>VJV}I(T!+oChGmVIY@Y zhzovPAt*Z4$XTzD zO>2sCIxIunTHtfz_GQtP4{ zd2|Rj3gW?*XA^3bJ75wM!-3&o2skgE@ORx^7stHn&R5go)3sqio>`djb%fHvK&Bw_3 z4IE}2TJ7*$(SDv32vWXrgJgDBfChybGcJHDcEZh$9|UzL|JfL8-U72y_cT4M5$^v% z^ao5^@!P(XR6;KT#?e5`7+hI=M%v^j@--X86{ zGjS8d1kO)j*qjofl65`^rrrCMz4g>puntXvfp@yjF>k@_p!h>pJ3vO_hY#H|YsBpw zbxh`!XaqE}`Av{&-y8qqi(2uMKkOF_4(R9imw?bBtcx%G08y&b?Y_^1SGvx`QVh z`8^mK8p?7eH=ISkcH7WFtIm5n|( zb0)LR{%oEaOL7PV^Ivz9KK)k@9b}GS_ z>Dmr(oPq>!4qU$$Titva5&4KIbOC$4^`cf^^wQ$9tDm7oVDs6GfBhQ6QBY9Jf z!zbw%eZOm7(Ygw9a1JY<#|K#)G&5qcNQ_E?(gyDGzpe8rsf!fLnIOh&bm#@j#&=9r zg$tR1kQ)TEmsJ7!oyWANq4fPi+iyEz{KJdb#(;yR1(eH8E=}JoTLW#|>hGFGrKQsM zcek8wJKE8toJic<+#3tcjjL!Axa>NH4_B!AN#^Wo=MRPj_Yo~E3$Bf0vT=LHqE#1| zEc&fa9m*?A&m)#g8!51VvdE!zN1L$x6I5jIWsKgAr7`S5VL`@(b^#}wDP5#0`6Vx- zlcY&sjMuT~5!DAdf;dUV%naF9q|XYT*f`cZ*;Qg#;=+r>th%o<9nd63cRd+UjOpa{0U$L*T_V)20+w|jW5-r4m~DJRzu z3t$0ejo4k6_vxwbTHja~)HM?B%53Kj6nF+Nj0=KDkufFy1S$uOU6BEpS*d)@3is*_ zWDz5^gn`k_4NU#_a&rC1rLJ&K&5u`5vz&+d8|XG<=1|a271vN3-pYlHjj0L$dY920 z8j9NdV*pT{RMgaR&!zI0d86{0}_jpjja! zOXK+Qt3pLdcaQC7M*Fj&QN??c3?PiJ8*46%leoejBPCqEEP8+WV$NgbZOeS?UoAMp za_zpzhDN0ag8#AIW=pR>JnvZ0bQpr>Ep;a9$W(LH?z9lYh>?^eqg2}(@vA>Aq(wKv}K~PTzh)A zZFK*c<*~R206Mp66Vtwo-wwFTVgU`Rj8xRG>4T+c`Mzbe=XzF#z$`Tz;;O`ByDSXC zXJqvl@5}DL`3;oq$4X(ZcgSGhxfBzwFRfl@w|a!XHmzqsr(@L-6=PgM=+r3LI_yG8 z8a;DHufE_!M!uWzPR>U$EefPtMHQ$Ius-(aT~DRQ(x8Bn#BCzi`~KEy4ZU}{ zn~Qp-Neo^Q1WX&>-CSM({ss~i(@UsYDEYAH?dFS^ZAPxM04QY}XAgPM=g*&64C4kJ zJFs~-Oj-=z(Roa0205Fy^rheH$o%NW6w^qez~5ET`gB6@YvJ$aK4_m&Sy=1eY`5k@ zaO(ye>Sq)q!QO|*hmaXBF0d_|rakRevXt*hKE{3tmMxb<#wpzmZ=fHl>r2o# z`4Yif+ay-Y*@v7vWkZ0k0zt)U3n4KqT94WNJixZ`{iBx9Ir}V&ejUdFp7+^K1cmz? z-ju#G_SV*xS{7$@)eo~+Vf`ufJG$yizSRWvr(2P)F`WuFF+uFixm|qKJ+eEaJf2H= zig=-*Xm(hxZ~F0lMLa!72l-Fgy@E-j$uB>lX~U`J_i%K_(IgqAZU4lUwS&{&Wo%jq z(#YsdF~_N}Wv6bHGPlTCkDl1)Q0_8d2sAJUX_4{ z^n~NrzRajTq|q0o0DdyNa+2oiO0LJ0)gkS+#mRpOPg5x7Q{;*W&y`Qn@IAUcUp!wu z0lT@6+NF+@mg~0xvkVg$AiQPGXSy!CGu##nM+sr;B+RcRFMWRf`nnM>BkiFUKsK*F zoMB|&u*uLY?kys@$Mv$b`DY-e&`99I_b%E%S>jfDAX(%x`?<*_JJ&E%Awfv|Uu^fF zjTw2@6Ygy}I>R3p&|mM<*UpzHB<6w1$Bhfy@-t```@T{OFz*G5EBa<@4cGU&_!mU* zJ_%`psc|Dp1bPV1g4cHG#qGX-EO6!-UajgMQAJIX3ETgXTUnnN*Xb?#9Ag7kb@rHJ zao=stto=jFhs@;8M}3dCeF=|=N7IwY`qO2w5@%3=Rep!>^WPK-euak|q_w!VJ6EN=nkIVErXfa@QxgbA3+WxBw27K(epD>8RKDfh1<+6{yoyMp|Ed zcmXZX7mJZ@02O@VtxC?B6Z=9Tqf+!@xkxonE$dl*1OWBx*eaT(Y|A8sY2xcE@}qHV zcVhQe?Ig71?)1fywV3*OFmPEH8uXYj#r_UrQ+H&7y}2w-1e*#a^(Rj_x@s|oFp!{} zx)lfwnCZ~|&M^UKC|G5iKRpkBQyG2#+f(gI)2^grcbMm)4FNUQL@bED_cpHvHi1Js z71IBE-acQ4KyfDK#ho@S?=*-KBy5Hl_Lb>vLVlteB!`BECaf!>xLJBLYf9#{COD)x zWO!{AH}sz6(&KjlLZr-*d&bCi(pRCj{2t;s9`1OYg~SfYKjP-osOp@i!Y)xo>$bah z`h4|DlFNw(Al)Pdn*4X>=q=2z6w!`3h{v-?G^=Y0=)Dspn1xY1OYaDI7#rKDS&dBGM9qU2_!bLWlKr@ zu4fmwa^GxC6nku!)*daV5fFa%8Iyaw$@-{?xyN2_9MQPgYO7evh&xt_yyXC;^j5Yd z-&NryZi~VhS{)KMu`>L#-I`G^(bBRjYQjJJf~Tz%qScPj?3RaV6MWERmH1m&PV|Fw zFV*qjYsU~l`?p)P=cY6o(LqN?b4NGt(E+2f2>`b2Y^^s=%@UR(;uDR$2oT@sy0fnLkpDIslp1nzI9KiqYl|CsxT4^c^FZ^ZGW;28!! zlC|FZNml%tA!(gH(&WOUKl^&5lpEiH-%)j)4@y3C%7u1B>`$SC{9JXn9bhj$}`jkp>sXHXVZ^Y7lI6J zn@=a}uC_ipP8qmw?lExZjks>yN(&oM-bv?v zfOB=l8-OH;Q`vT@HAu#2LD;I3^`FQDYj?LUjo4Gz8ly4dy%ZJ-k|Q?hcX$W6SgC*X^5I

    }=V- zxb5mTHxwED#{X$)X`eFjst*uCLM7A+_OI9^N(DtOenLi4i6YQqW~?i01`?_wzve&c zxWN=VR*75Q=p9pq^w)d+)&5_rM0RcDT3cX`DsY#CXg6WjrzkXG_7!!RWRyg-n^A?6 zWT|5ISZ-+OzHOfI)>^W)wjN7&Ngqx#f^twWuG`JkIg$!_yzyZ_ULTo~p1(TXK5Y`m zSI{ts*JV|~Bf$5j$Jwm(I<;<|%E4N#;pXAV1#}266C(T<8u}r+0#!CsY;>2d#S%1? zt(QX%2fyN9Fw8@U@~t8m4qRf31RfK;;s;O)hmEGSwARx;U<8zoIHudxI12awOnZ)1 zWF1?OKAAJ^vA8DBJu3DEmRq&eTPbMkb3Tx4kRi@S_E(}rRemWcGk`Rx_U)Mh@G2IC zT7$-VPft<`&DLlN{Xk_9<=-p-#XyO0IqIrzTqCiYAR0U4&O5U1`*qR#uK*4xj}qxW zr?pTUy3S=zrC*k<@DlfoIk`9*U_TsFVBdVn-{`(yc783mfe1PwZ`AMe&ASo9FFuJ6 zE+v_W3B+iqLY^3K45M&s12|hCl*$K&D|7(&^L0kSuqtHd>@{%vSV%^MWcJW&>C_za zNF5|EH9NHj`bh-Uf&n61H`1-h2@1!=h1_Den)8%fck8i+@LRGqdk)G05DO{?Zks!3 z4RnzfDO5W)Ab+nt##=psTl~!2iByz{Ip^)+d04xKCejZnakB29JFw9!{eiCTe<8YwOoiiT3pe@oDwPp?PQEts=wESHSCh%t>61 zmSF~Z6TJA}0l$V8nK1&tt>O;{yS0nDF+_`c;cN)3dWB!8V`1(yK~(-X zm0fG}&Rs-W#z5H1mFV)MY`&&}rtg}!5qN|k{ocb9AXf75GWc_(Re66 z6=g3s8DEie;q0`O4LMK+-)DV;a#^cON*6yTGo?{ngIol+c=MEPJ`A;d)&3#Ok!sT& z{{Hql*dy4y`__BlLhVQxgJs3Lm+^%6j(B{Lus_IY-(Vg0O7F1du=YX+^uU##g+Fjq zr4URvcLXw1qeQUu3(bnt_d$XSQlhmejdNX5SbAnZ@AfvZk?PDcZKAhcEJta7d9C#P z2sDqlVZ-;QKlq$Ddk%n%pFe|eC+ELvJZuO z?~Lc)kMbUqkS^B6uRNn@`}zPDAmwgtVN1SmC8_c*r4Fr)E%aGi{%{S5jeAbZN!30N zY4w3ec@1}qWzJ#*FZc%Ir{T)N6?;gQ$mU&XD#?*gLitjJT3GX@K$^fMo)!op= zQ(6N&w2pd8+@IE0KP>9TfoB_p5nNieSl{#&Ft;(~8u&)%f-x0Pdt= z44ZO&ZV=&KE`WhqFe(tQh}otOKRN=KY5YXQ0N3tp?Nla9f0wfvKu3GrX6;C<2`)MK zPDUk9Z<_Qiuf=KF`i@AvH7Aw;Z;wgAR|BFHC-z+oLM|^QC3hNU7FtIby_CzTXp@W4 z_MjWu5C+~!3}Ju1k=|Gw`o=4?Vei-GDWy#vKOu%o4xT`|U2drQ8xi=_BfQop_pHa* zzDyqE0P^v{`AOap(Z_JIbT13K2OmJ-HsF)~TZNurw2-$i^1Na|_8^4ckACD^kpAoc zL(^GB#npAqx^dUw?!i5{G!DU?;4Uxj?(QxjxCIEoLU4C?clY3~XY-x$-{d06Mfd1E z)|#{GsoLTM0(f_9b75DMHbA^4(fm_@Hao=OyaI&`Km5RO{-vjATI|2C^sJ4|Y)1w& z{x3VEi;a#Z%deno1Vnsl2sxDHqZgednPi$RV8Wk8>gI6-hJK5`+Lq`91OykrNp{lb zsjX6)gf6EcSjVUx0{E*E^2%1@b;00-9(bpYdVyH3S=4Jw$}ZBp4AF4tUWBko6 z#_3WHKWEy!*s0k6{O4q-Ah%!2)r-eF`BS+t2&ZvAWt_VUp5Gq?USM@A=Sm?(kTLEf zTH)n^2mkO$&K7m`K5#=XNeBV5ea!(teo76mRuEU2mBTBkn5`{SRzgb7j z=kxMO;UikK_M`J@;z0&4UA^(GIEXN_c5^yo7=>6&uxE9rIb?kwt?j@lcw8WQY>_%| z>WNL5qB{!9{Drb7#S3^v;SOIt1GGg!!z(L#;4$`jI<${HRUmA!A)lo@>Z{FXTwxaZ z8R1S`7gnVX6DUM*cvnG@Whup*wrdXA41EM1`S9jFME$bZN(H$h7gyQZwevE}9&a{+|6Lmthe z5tzK}CJ%?`dVlPiQ9yib&s)7tLPq?lfL7Kh&j`MgCsBIfIC5u&p~YM?fljR)h5N+m zZ@D|_^o9c?+C*SkqSiyhW&Upgxcy|*x&GJO>|pZW-%Hj91AV7y8vmwF_$_yhPN6q;zW&zaB0IW955Y#L{Tc6$z4FpPnGQHwN zMcY=v#^YJM0B<1K71^owqrU?X#WI9^y;{rsWJRujt_M9ztE;D;nX9P2%b6g8zkjHV zxWGKFt{-fSG3JF=dR`+0myUWaoNx{OT>}ia6FlJO=~8YcSvma3!~=&2~`IaVK@Uha!*A>g+PSShHtgYzTB!cpgbM3H)I2J&2w zayXvW3>;%~zlthM17LOJgwS&ZS*47uh{ArVrheqr6|yhHKQfR!V4=Pr1+D)&*xlrc zUCi_3zPRV{nC}6v-4WU9a71K-OFEBCBRX}F{@#p8NV5H~;h2Pv9(KDOv~h}>Qe{l( zM(2fp6%@wpyq1*c7d5H9BpK_Q6zx~=-?4X;)c#_M=TvwO>0q4D(<`QE+)0LlaF3dV z&e0=igkd-Dm>}_EhHp%0epPY%uNRS3QBPy@HhWBktX~zaFssh%GrLTS+dl_DuQO~| za|6B}@wSw9ho4{?vtD_0u+g3yfj(;J<} zk3Lz25aIZfj38R}p}ums79z8UEPudpj)cS#*R-e|;{+CgZH&FsbsMta-v&#rtCd<2 z`yMuHls@$^0@OZz$wA9}uwcg-+Y>JidrvH4Jsd$n&X7DGfr0ey^G3>1nE$~qK2r)# ztYxnH7a<)C+=f!%M>x~lCaL6IJQ=C@8mk;Uy{Vf^tzxC}9(?JF?P;xA*nAVRqYi{`Y6 z`$*KmOS$2nzAxerLy42Yq1OvW7g!pMqnRRNMV`@|C;J;aj^IcNvdIf}{FZYsBYuZ|l;P$xv7L}U+_NQH);X3P#`<&C32 zb!QBoBxGt3@EmH`cuH%AzoQO2flN|CLfnGn{jVMig|uZz#H7az>M#8vpZ)AVE%ZiHQq0d0-9fyAW~lAQ)!K0 zYL_l+*pPb|h^Gp4PT^bGC>+eI>@wyyHrei+iXzDnDw@{?AJ2$|D?pp527I}g+BS@3 zeM>b0W{cDXPkd4I*VSba06cTGz^vUKcK$fbK<}hgk zRs`YVJbe7V!zWG-sxSMmf5`|LXI!g8f~HD`O}etD|Ss@r z$bNi~<}Dgqaom1Xi`R#6!RfwOn59oBJ8>=$mQi z0s7VwGLC4daAp~;!QJ?zP@8Y@3I1;Op(yi_gk@)XsH2p`hv&CzAh+QS)j@bHj0<}Z zKh&n5z8p>dHrs5Jb+Q-o>nD3;ZTo>VwEbUgCQYgKO-%7LV$<-6zkdlEQeSs4YP}%|L@aYXWwJ-B@iZ=jeF^jYh&~oon zFdVlqtBwQ2H3KBxd74%FUM;9VESJ@67^{tdfj6FA^2Ti7e#5^iT8n$QurC6! zGafC|8j`iWy-150xeLJdl1Y^o$3w|^gMXSm#AxCd@07{+giF%yBkm}h>@KMhNc`u8 z|Bs;*y$7Chm+jMJL!GASL+hWeBM=Q%r>7}oxyM6D;?J~>ahT+j8z={X0RPhKRbyYv z!!tGbetDwIR4_Rb?-EiWJbb7d-2V>w09Hn9y@H2Kjw!cFnJ z<-hgYwEdM}<^bYG!$3jQ0T87=XjI7CZ&YkD5+;ML;;=Az2yFvaXZm$4Wt}BjqtA_j zH28IxmQMrN5{b;yGzxvjjWo zEcM96HhN<@pI($p4dBvWp+m0*(?Y!Jmn_mDG5&lHq0m3$7=FF(Fq13cA?+kS-e@Wi zV$owUp1eW(2B0k>FnKQoLNIx@gy{?-vAfxbhJ%dXYN2z1ID&r1kB^{);Q1)iX?zyT zF`;xEP9zzD6dFCcSg1H)816M@Bj1u%RNdpuHK8I1(PugVQou1J53)2?j?*w*Bf!L5 zE_A$MC)3DZwJrP^=R;y?zfl3ZGkr0M?iL+_aeeOYw4ptp(-P9u1D`lbd{)*bn&PtK z!v4(^oA3@{mXQ_Gy)h2$pi7X<&o0(SM8bJxk@RkIG^V@bn0wg}fe7&rY7<^~Z$&S! zI_cMRmZt1Xe^maHQs()j1Ig7*X*g$_U;VIrWQe+MrwpM0|l;L;U3cIWWgA_K_+GRx4} zx7X*w`gt9-13uuVF|Agy7$eM(B9T&x=Yc6b0t7V)?RMNid&%fFi~`X@e$EBJP?+K@IsiG?Kmx&7s0 zRUS(-e*@tjkQly8B9`;*&GJU>g}SfsG&^uf&pL!L>=h=-d^K4yhG>X9ETI@NF&5%Z zjC4LNRG`5|>|pZC$bb#Is824$1^r(+)M{J)oqJQ3!b@+h@k)a?)J+(%M)t=ZG)dW% z2|)F65>g@DX`7uHoLMP@@neyrA(Uv?HZrDyZW!(p0JBI#Yv6hww2$Qq8@R(<<2${a zwcYCzd7swQ>;44-Qr#4gs-cL}jl6M9sMw}W0DH|r11T@9tIE7(@OFjU3WR99enf2b zgtx@~H0A(qPk$Bi428F!O}}%i^>0}{sX^3VVQ&s?u(Fk4$XWvBthe|JrAYU%e6zZr zbA)_|>^SSra{H_$FK-7xjRK*zzc?m6Ua$-8wYc8wG72uLK&WhS{8U3+40HDXt`pY& ztn&CKVg$*dnwA$&rsH+!-0)y`;_U=qHzph$OO{R1o1*QtEm1Do*(l9!5R#8O*N|uE za2Vj7e<=@nR!3Y2e0w87++0My({(NQfnolh(o`uu!}&@`|3#|07$}pPC}>5AO6E-Z zjFHOdPESg8Aic-e<*Vat``EHh3P*#)?w zL_nTr!COE>Lqoe?Igq5B>{yZyQppva4w>iM+`=7oDxUrd25BX{lE;TD{S3A_I0T?9W5%B}prvth%*3$Lh{=k=eEL z=^OXnW>#&WIj3c=xwuIY6z?1^p)wSfsGk)M%oY$7k>t!HH$Q^QKo87hd`PKQ{%h5G zgvHks;3N$GjLUkVnYm}14yojf5dh%+ ziRb~DdmUv{zt}yC&CBzS7Dt%SJnz@sZ{J*q`CKN6yMYuT`M=~m$*V%C_;G&jg!Onu z!CNdEg{;K_-aM+|eV^CA`f_1j=<9%nm3E88_cc{2wkwW;d9Y>`no1ANiN4K}@t0`q ztlIi3kBF#9GE!$A1?y2#Ev6gv_xbMsD9ik%DDSCMCmV`!@p+Y(9s+)R-J7U}=^IVB zPp7|uxbP~>7(JO5s&qpSe-v+0&Sp)XyyYE*=N>mF*?X1Ls`7>icQ~{ODPdq8xWly01nHr{(i|dGb%?;%So~c^dYsp)(P*kCMkPl z(vXiJ;Q0u!HwbS7Htmdm$u(vfYc2lSN`ysRv$8QQZz> zh}cdLGFblrj*c#uJXA{M5)0FRwj>C#C;^ZBQq&GeXc4oSkK(mWjwO(bLEdg(Z-2MQ znOgaeDmYGRP8@h@419&==H{Bg#Xik2NI08q_v<}#ng+l52WqGU?>1Fa1c3~K$J^t< z?Tfq<KX083Vrrlhgj{Lhw)ZYH1?K%2PC`$+6iwW9?)Ihd@^ty3 zP34X+>xI$U4+7kTLGDdc1k|5q3F8iKnY_z?r;WsJ45dBjUgjjCUVMcW3mN#KNGnx6 z4-Eb~c(elD;28j1E|eBm`nJnNKZwE@BFgqXNED}xTfaPZH=RkR1XnhHmDA~P5W5KW z3@b{OH_3K3SrvxRma}FCPVQu2Vu-v6S{%njlD>e ze2v$G0)-L;v|uhwNl;Dq7yFa-U_ttGxBJ@3wLAX9b05k34D zLJ^>04YcS{fVK`HoA)99+uDU_u1Sp-Wg8O0YA_!xD`f2lE=ZQWU*JCLQgD=k0?!uo z%NCkHJkfZW=r}HKf_}oaRH`_gJDNN$F3DPW!u-l zRY2O-B`_09pge zHTmVk0seh@2Bo6Byc>Aj%QQcsbOV`)D90U4+n}Hb_J&Ir-$7n^GoARZI1?-UNYjR_ zMfn<g5Mp;f4aJaRnn=S zWMvoL%W`D>i07z#x4AR6`L}9a=Qv1x2-V__r+bVXib0^AQvr0*f`GbHjF339p&6J( zj5@K`GuqE*046WzNt)s=<9nmB2bhHeKz1YP>W&JUj&v#(u(|YE<}tB2EZ8w)OPD~x z=nq=VC(@v4-bPKF&@H{rhz+6^3^7UoTPCLKN0> z)l(q-=J7wu;pri$QB%CquW~a=nppz&PLEf6_&G9fB6h2GEim$9(qK=2wNh$gAwVim ziGENz+rl;Q*=lYtD1Zb;HL2l0E3ONAQDoGb&yc z*cFk72`Crc&oL+Hkq`D^gRGBTq_IJtng%~fzSmXfmut8}-0`wNIhS&Y8piLiB#Z7e z;4dBoA>-yEB=bsUST3dvg$icG_20@bd{JB4gqx9YM7 z!CuWSC-d@EN-`3%z`Fc18jHKsP?j_A(;nQiL5JV#P5*(K;a+tQs%)7@Bm%bgShL8&cW96DN!PpoMsw1X6!Q<| z5^DDL_PpZqPVk&uh&QB|7Err6pl6>*J$-uf&7i}>*&n}h5gPp*7Lzn+sTP}&hW9&j zwxJQy8acJ&$yLNJelpn7;0%-zBpM*5&wrPe&5|SF#NWPcO1K|^F=3Y#3c*FQh5V9G4Z7O_ zvn_-G!J;GK!EY-R*9AgiPPFWO9gwE}F*eUQfD(gBWzB4ACzXsJ&A-ux{eZ)BIr+#( zB^w1ypcp^2150`w3vj3 zNR|7-$xDM~1XKfc)sG>h$ICvch0uJ*E3yTi1FzTUARt}`#tm^%uv4($Lh35wNLw~6 zJ^zxLB#niEaXE@@ql`PL10rC~FLluB17cyGLQtaBVgWU5J;nSyviD6bQ>1Yuva#3P zq#^`c4K$KrK9wni4<0%C!cS6n1R(eD)(vGwXdS^D?!(;#7E!Sx>J^b9$j~rJ0U;h$ zOX@GE_A-Y(PXIxhR)e4LEVY%3g~;o1VW`Jyt^%E@^y%?zt>tH{U{g9Ujd_1vQ8}~# znppG}l4ZYkkE3Av!x1@v>^ffEMl;yN{Bsim+%N-=)S$-HfO}L6b1_H$65@2S9_g!Y zf1MUkc}hZ-2Y_xPL9qV?46@33wEhUR#=jx#g5zL?6P|^7Xj9gB!|ln=f(;QVfV=S#l;L7rZ*{``z%J>zg<(H-Vj6H>Lpwn={65-z%ugk%O~VOFS-Bsi zjIXow_>TsU%SKBZ#sXju12^8dZo7>E_IoZ{iJL|~vnYJNT#51-n8pZ!LW5fu6z5`M z@yVK9GX#`Wuw%Kji1mEC2hzW%mZoKeHHv=AMpm zEa5Oxj<;2O{Z9n)Tod0(We z=nXLO%@7WcDB&@E>3j#gn16n=MAmQs(7hLzmYUjBX+LfJVAb>ANxSb@r>zeU(D4=3 zMZ1DnMI=8TikwW$1@XYVO9|r&%}pp_?S%03QYRSD0qHS^9;Qf;uXDD=5IQhVo;#(8 z4^V2h(?FTVPyd{3;ZH)&?9~%j@O4a9Voi)7`C$r)2iG0G2{x+sR#E6beE=~V{vi|h z9bj+)%0~5BB}^cFdXC3-B+pI^$8_dcj`!J7CbyMnes06M*Dn%8KI?f#U9Y3!Mu3{t z9XK%NkH{OR^1a+lrIhZNQ18=OuG|HBae?4hIT9`B*mHWWQHC6b8{L_jSCGDaM2K$j z@L=)F&%~WO*Z@_?eX)JYvV&-zYmGqN^y-O1*{BX~xvgq)D9_xQqFqSv(aj2kEaA8$np zH}jhtHS^YA90%*pr|1Lm29RW1y>cRIW>ez*D1G!0w;X(;AU(wy2-))IhLh7@gqAsu z=m}TlvIwPQ;fUaoudmReD}7;@JkBp+xE-NBZP0PlghXBG2aU!NzF}IK)64CX^mR{1 zQi}h{8rGoMHLN5W$^C#!8@y-UmFVQZ)@pL8CC50$lT$3e*cZ<2bP^8m4FW1zh(7%1 zvE$ziRA(LUet=fZVtBUNG|C7#9&SeabU@dP&b+tC?FY;mDtNaDRicoMh=jvvc!^`# z8Wi^%Pbj`|H%WFAfDghc<9G_lzJ)D1^I9ep%B_c5ZaKz~3a|Snu}1_C-g+oGj@_8` zqFHvkJUK@X&?S>h@YPOYZ`t=iDhz71;fAJ@++Nx%{41u`IZuJoq7D;2F-~MJ8y) zLX`G$+wgUlggqF`%_YpYHiK4L$B8tgQIp_EmpxE{%(N2Ma75ou`~%Io*Rg+c#!Ctf z*=1kn(-9hK-6S&-SL_7l&Ep5HLW8&+#8L)oq<`APtrNE)3K&?E4@RWI&;-y23mR6O_7&)ZRB(#JhaN!*U#1Z$oIvd?t{$sRC>OT_|%RJw>(np*zR{Bfh_} zDke2YbimT|KD6>$x%MZ=?ovOtHnTvfj^-$PXxSjN&WH7iu) zS3eVE)Nv+cg>tdVyqc)D)4IsGTHWAbXk#b9&!>$bxT30yHYD7i6RAPXl!<%v#85|f z(-z}_A;Vrh0LqbN_^ma7dlqkSz0KQw0ps=g-hOIl1p7n51q58mv%QK?;Au6hP_)Zi zvRi4e--t=1>yZ52@w*8VJXo#4_RWxsJ})T^M8EkJgkq^}-7G?e4nNj|Nm46Vi>*8G z71S7fpOUQxIgpIx6bmT>clv52Ut$>g_Cr0+01N#qk(mVavzlAborwK`3?qX4Pt1Rf zJ8>5)8*gd6(ND!%O%UE!)tsCwDv})m{jB;^XdW?kW`+8lPMW6};-X>H#U}k&eEeTH zE;99fF8k{5YqcEFi@_k6`X%!pG1n2(lRm>>LG@1aUXeyjFx$`HvPyhkAc9H`$&Gn=!yb^ywp7WA4A$y@CY|y zT&jpz4MEXt;6KU6kR1QcfTeT4d`Sg-YcrTYURk~gF!GyWgm#1U!5Ff7i2(qPtix*D z3qSwI9q@mP&}{D!fYFC9%XR+SC~Kc$Ej0>r9KIatvq(^G^9w5ic~viHdm#&-dK_~Ms#>+{P&%}l^&fT^)7oG;mxl`WrYcCE7-qdPEGAL zCs0uQNLlm$YykkC}2Zz$KvYPK#YU=82a8r9D&L{2zIT9{jeXrZk*Fdy?w@xt0Yt)*E z%LZ?RaTL4#4uUH@W1kf#)sBVCp`~YVB!1n_8%|N*BU^GGKkG{NBKvs;gG`2Tm_PATJz#nSA8fN&z4oSbj zu4Glsi<9lp7O>!`UId$T>V?ye%;~Bl3sYC+%mAg{Is&%EO87J>?(}O1EuWbwWL2ae zDr6r1rT!ML-RIl>fjcvIBApqQcJc7)iIUJ_Wv7((OjIRGzIcGRgGo~>eg-8^hmq!*jtnkMEtd0V3+tseF4yHod>!8>jkC`0OMthL7)%r zK9xMIw!w;I%)DxZ;D7>@v5ks&6Ce*;VSzDZmXTgW@vVI)j$dn5qO#N1WQpGH*&m1$ zRUpqJbPR)|B|*ediDwsrLTXXp@TJUSBJrC2P#yEmJDLCG+g`&W1i(9 zt4ga+9+3HsVD$?u&>|d2pYiAMXE?jPkZLvM!yjeYvJbk>eatX9@eySCo8W`gu|216 zno`Gx5{aG^$jBvN>_*bmMDVJ{a^GJpaD?Jb2{<6VZw?`Vro$ir${D?mNQB>hun9Qt5fI;_jA+?_&Ngo{KiEk!m)I6YnZvjqg8jfB}|Fz&;PORe;{oH?`TG!*hzv`*1_ zC+cxICyi`p*Tl$e5lUZ$>2_>ansr&egN9j3R6(j=C5fxbwk(%kD(OFAE#GB>X(bq; z13DZ;T?tr71((9#n1nCcL2|Up>@E47*Z-6^AGI;PaXdE6C|A*k;m9X%Q1+%Xqf2lN z?6hGA?$+_WO4wpW@-Q2L=8~VSIYZ_hS`WZ+28*lfATo7@oV^H$4_knSX(q{F2Zoyh z2;W2|Qr{b84d;UC%*qa>O`!J3sGnVWay7s=N1FaeFwk3C;M*8(x9g8vp^339Awwv+6G0Z87?l~}>oBbFlvOFGiD}oc zYtH9oE+tD7cxOp=k_#73d=ZS@j z{;*vN3=_@haPLO=-1*@EmiQ=S^I_8{=A8cXS&oSj6&{waLB^34o5`nNhgYvDVZ;JU z%Yw$FzS1X-{zRTFxs!QzKVdO_U%2g^N9!MXcW@17hqnb2GZxA7Lpqy2sWxE*f*e5^ zR6cg!m&nP-9*1dgYD(tTWu*Ti#OVsxeV(JWpGvDhqSj zAIdrc==v@B@&qhX#I8 zl6{&?MdaX^FV^#&fGmgxEI+K#$6%=3kBlr6g1Aa>ajjNZX7rYvbASONAa0?+q7d{x zeKqjdg87K2WEe+AQB`ut%V!d*<&G>eVvD&1xH=D}*0Ad$-O9MX&|nFkE~5K=L`{eXZ%d{sQ+%NQM3tH3*)W zVW7c$k)?2}HdiQgN?q1z85O%~zg#~Qy4j_G;)~UE5sq*g5HI*d;{W290hEWxmBkZbGeD`-hMh=jCw)jTYQKtHKC8F_{B@mgfC^EFvwnjR`1w~HyAlkMGaU%Q z)httXT0^Z%wB;JYT^Se+njkpjHxjX$RXrZO^G6!i9Ly2gH93$3HsYvBz9*tS{5-)S z-DkS_Vuz}&dg%Wne2c&8ghA!>eHdExmu630<`)O0d)sdtoA|GH0jMJOxDuOj-w)}d z0{4p{89%iF^-)^>M3@O_uxawf?-b<$`bGP|$YYLVj$|BpvfbB|T8#$e<(ZpyG2PaS zk&$f%^oIc>0gXm31GLj08w^A=5IM(`iFch(L!4hA;D|a?bXjJpDXQxnfAMg#Ar%Q- z{Y0BwL*bMs%{y_#pQzIswUVC1>Dhf{9qF2${5KRE2OcD3Bhh`QKDTfA1bRCyEO5o@ zaJTkc;78hk>`aBpx#p^DOj|jBnDO+tBCm-Y$lFCaPpWtcm|{q6ZAeqvXT=iHW093m z0uhT_%vXf+cbwhR}~`9azSnA|FXy2JF(?r7hSk5RkRCRcw)84uMxv1 zq6`cuf0YM0RL7@k5PzK%bO}OpCh3(GA3-P;71-;!^qU#&@D8Q1LDaNG_zeo9C_RcE zp_ZLQdSEFc#}9`=`9WS%Jb~Ej(-jtGoOWvwUo4wm%X$M`hxkCpSxSa(Vi%ExF7~ z>AE#1cC!k^g%V8Zg@H0)XSp(Pls>|i;O16Zz*PG2^{9Y?{_e6yoiM%W2D)BCrIAG9 z7;?gs7$l%kR9o-kfLR}!eprJ6Z}$*+j~NFS&}~kR5AL&4K+t=fTiyFJsbDW%E_o@` z>(n_W6fy|a0Xrs=jW*yywqEgT8m zdyFF_iVjeX(My5%5xZOjGR3kdpcxPXIkSEzPoxtJ!8!oTAp>_&i+Q>$m%G3noOx87 z)_mPurecnc->ixwI`hExH_7soA$j|FJ#zUuoLFP%eJJpMCU(~Wabo_E0SDTfKI{-b z3^!=Ib|}!<^E2-^3sy`fAPs)2w9-9{!cjzY{9tu>yobqIK-+8*KI~~N-H)}}Cwkzn zO_PGfMjW_k*Ao36d=&7--v{=avRxblb;1qp3y&is+ zV}kmdp12&5U(OU?xlGpHk807WH>E7N+l7i;BfgPw6*QqiU?#p9$ zZt%tg*C7p|!KkYibihzZ3G&$tR;4wz3B+m>y${S~!dJi?#@m*k6ZIZW2>&8PRCAUZwVr5u3F4s2`H?`rwH?LGEASzhk7cTJ4XkPh_{ISTaZZbDDclP)Vc%P)oM77Z zF?Uz9R`41{7=zQkA2IYKm8I1%SandJ%QK+fPgq(Vqj(|96oa%Zana|9)g}F{v;GDp z(jba#8x!;p9Di*QP1bSQ(g+=O_*vBK9@lbuOJg}(wbOZuRhVk{~>#v6#zTjK=7$| zVj06Tn@%Xh9%tbaK~J_IbN-D{TSt8Vv8uv9;pk;s&#AU>JGPqA#j+_shQQ(dS_Vlr6?U=RtkGtH5~a z0mv=11zq1SNRtyc-{^24uvUotUoNYG-iBi0__L$5!a2xkuXz=6V_8slvO+5*m>!0h;nj-)AnRO%*c+)v$1= z6H)*EJsOdc#V7T-fFN?6Rd!f&U8tFs5H-34ZuK5O?CUFFo~F0MA;;w3HfD~ zq9Y7s8!x^b$_*3hMRdfG1Z%Zt9KSCP;~uSw3y z0vZkd4V!C~1fu)ob@G|5QiOi~gHkhPhpQj3^9@)^3k-6u2nLrhK>cM1dx-z8PS2y> z$wf#%!3G05_Fb2PdFOf(v_ss)9pB}?iceSmPMU8PfVg?b?0wcP{7cmn-m`4$Khn6L zDY8NTc|KO=mhPG*Qwj)u@&lcxR~_Id`n8~Jr{_*!d5lxNuWZ4&b=nc+vST3#iO~c# z$4@&dC(59R8(Qhrqb3wbnX)4Q1x^-~1;A>@e?m#M8wWr_=CHvK{}$0Q6quMb@Jko2 z87dng0s45qiOCNl=ru6Gf;7(?5H)wmR_hzaprhp3>gtsVDGN;3WFS{;zL!>>JRqIT zQ2awh(Ye4Ku?0;wft|Ox9dJRN$m?j0?t%N%b^4nY=V!-G{BhU zntt#9XsWOcommI$!7Ydn&qF8!R~wov*mTW1`nm0=&#U3QFmC%_k6?t(f;XW~qw?nY zutbnk>~U{2o|RrjsDJNsywzB{au&S4ITT=DJ`T zy#~C^El!xf&K^xu-;){|avfXgZzSG}N(bAy5cr;5$+JS(H=t4+3uJu4HA16d2VMP} zKG+_|(Ev~Mh030|(|#*{R}!HKU~Oz6Ky9k{d!NwO`J6t3TZ zrBwFP3l~1&fWIqbA~S3dRmalW9)IBx0*AE7u^h-hOVRXQsrLc#g?*<(XiRYWrzcGF z^~S$k)$jg~o&9NbnPJ}p*Visc9a@}(%2KxoEJ-hDf;_}pf!cYT+l(mSRHLccD~M(` zwo3=L-p^`kYIV)mjL=;6>!U)N-^N}AKsnW`|{ZF_4tl1Wact+$40lG?|#r=>h__(p+BM#`WPi8@8qcg zc*_iJ?bTH5{odv@2sGpI?cl3jyuk@x3kt}P_}Z{?KBat5Fu!i<6Uc7p68>^w2;b-X zz>f6#7PWg{Cs(VDqd)20Eea?lcxBB}0g;(;`pwIM`k($=ixm}%X7BpWsF^XW+x}AV zfb-v)MbB6SlDu3!4fQjvv8wQmqC2@ado=H{4cGvdKZow8z3&P(2A!wkqBzwx#m6J0 zv3DfTy=Wi@ab5#~7o^FQ7_sx=xEzy71WfyKfN;{7HDQ1aLn9r>p=$XchgW^B$M6xv zb-DrL36WANF2wVCpY5{-%R@F?1`~w1%jqIU9PfTc-G5}p`aLwIvDbbVoSp%jBB1P$ z(RsMTMWIg+E^%Mpe{7p(daY$B%2DvP_|11)X4Xz}$-_cyek1MR9fxie;frK}4$5uJ z_OD4`fpw&QqO)TUGMmeV06$4yyj=2wJaPtz9|CL4Zme2zG|`c1ul&$dIA!Fd|A7Fs>6)HG^EmCX}iG`xVh+2Qq}wC>E!nb++dqUonP;2 z$=|SI=K!qc=2`A$ja-xvqdA?P7+|Zq@bZJo&n0&dd`7XV;P~E>BnNoL|6>6x4pD4Q zJdi+V3t(_}+A>iA(+eoRnq-q=if(S^ z`$*1YARSH!6ZR*2$hRT~eJw+teI(+5N^ER6Pk8gQlpNF6!Y!Nlg! zpNsUw%Vy1AO3od*czRN;wF^7BtJuvDT9hMD`~*#+IpX1$g+Y4?yFTp<+MHMxXRa#b zfXaWJEcoNh>|F!(L4-bo4UEvz7>w*HMNh}Fq;Ub2icI&Pe#wX%IhuIg%c=G7nKm{t zbDN%M;75vY0z_CCF#P}~@%eQuY?=M$jccJvW!mSq3Bs2DQmtuW zKVUT)Yt4j#IY#~&PYGCFv@%$YAWs3Zuo@r?*;CPn;DpZCTDU1k9}~!g1F3VY=PLpM z8nZfAHjUBfTh!uxq&e76uga&v5mSiMuWKTATso7wk9Q#3u^+!Wy^wMCv1tv{KFHHm z{j{y_mus`g;Hb*@%P^R!7!|n|cwWnOcKXDq|1$NeL%3APa)o?0V~r%=HZdZY_}AQ| zh8h93q?tsycWREHxgc!sNFA)%MdGyN&A#tOp`TEt<(VXRQA7IoEDUl;oe=!}SRjSN zTo?jL9tv-qn@1AsUshM;)0)LnTYjRH9#ca62Z$$m+(nxq#aQ40z82EYjb>K{@xr6~ zZw^?j>ts05UBJr&@V*mpm?QJEs1+vQ0qLxaKIOb21-23C#Kl2{@0T7#K%xr?=RM%n zZ$jWYMkcE|1al+2jy918zBSDfbPbUH)y|G_sgYZepL5`|`P~)B{kM7j=KV|c;Z6!} zeO6}KmHeMT*&NrdeE!)o-p#(*b*d%o!v_q4h%CCiB3U%IaOR;M*DU8>+v=DMMh7&O zt^`~}Nj$1y#w5OCxMR`ft=Vsqwy1(Ob3Z-t=hJgULFb!RfR%t-I%9MYaiWTL>T@Mt#7iog}@tsHD=(O_$%=1e1xkUd3&Q)I4xpm!Pt-SJf*W{kgO+zqMP4U zZ($&y0GZ(+`T2{=$$tvuJ3#g}>?5vt&}858kL#nF4UPZQ(9tZ*8Zh7FtaRIbJe^Sm znWB{EGwgImH^kV(#4BrSliI?DiOtuHHmH-pRLs(=)kRnaV%@Rg7^IZT4fYc$eZV0? zQlgkUzAxqk)Fq)XXjYoG`LaKDy>8aPV}7?bFmRsWU9g?*6WMGc^4v{SQdRBoV&BUF zI}`4}G#;1#TKs#Z0);_Xc+2|Byp6n_4z_99b^VtG!w!}sdUGk>-*ZKD@ExldNuHg& zp@e$GH!5r;bQKADbr=bX-_vP670Y;!lKWZPDP_W}l&qgmAVGM^ckbSvM$Ft_JnB-j z(8&39hxPc+W$8pMb-FKnI-Ck=wN4@)YjwNj*eN?IKqMkAra( zv03W$Iq0MiMF}6?t8&1p1(4bxga8gF%L}Ha0&R`U43jnh5qJO2jQxHPi8qaot08y7 z+PhVwbasnkR*h(7aS?C2J#~S+Wblj}0pi)c%83Y7L8`PYb1h=$|&N z;=QSP`XP(LYkpNkWPsKL_$nY=qo*8W;L`bbSm%ufmbpIO=4J&cW>HRe)kal zIBLkiZf_2u@FR3+(DKWgV}#k_c5&QlQ(X1YbT(tF2+(3KXn~|!s7ZV6oR{90&1WoB zUFF(&n?8W_K!6jVeJ@Ek#hIB|N9gbA*mt%P6hBsVUFO*;_ZdiK5i0Qo?v2p;e@+w74`dSa3-g+UFK{5O&%E=;NTm(JBrZPGW*%lUvjI@=(ICd8<-&+zM z>H(}aYSzDJ;!H^yw@NU1?oBZWzU)WjU*^27pAsgB(v7E5`x3o8BL2>0RenBe>DkxS z?<1co$gOcMBTsZ}-eU__56~b(K$4?iFC8%0kKfO&)J$$TbgSm$YA}yOh}pg|4D|ft zek|EQ=ap7WGIAmK(Fg7V$_Kj87qDU1xUD~@Ep+SW(a+?h2Pgwvx@v2!Du+p8o(TN) zX_=01qTS_S>Q|~tcwgTe1NtQ_XY<3EAj!Xg6<6~l zv-Z&4v*T-&1x!*k_c>EgY=cXCueIKqKm6_YLX#l*O$?=0*l9@pdK_G&oy!t2<#C;@ z#^JR*EaHEM;I^4qH{qH0M9#i<14oiJ>b6!P7nRH|%SD?4Xgi1Let-+Fu;y3j_JQne(8G*l3mW3ni%a46r0<>B*Vr&? z$6}z{26(ScS7R4M+1>d5^F3%pi3R#d=Ns#5wO>Xa(&rlXWxXZ()f$2`9q%V&E;#%5 z7MxVsY1mf=koyl+Qj6b_9;_6d@71e`aS*V20XV929}F9FQYclC$~*Pk_8|7$9E(Xp zh|Q=?=TRZVqU(YFSvNZWVDE5KoV@3@GMi<26&+NkH1shL^_{+xoc^#IALPat z#QCrse;v|9AeY$~P6-o{Tdtx!*}Yvm5j}aD0bf0I{e;f!^&eLs{LE$sGdTGqa;`vn z(>>R2#c#>)bNlZJIK-H5U-(k_#%NDlYTP4sD%#N4{!4~=>*bLodRmwnb5MqC0TE@d z{LKSNp|~}xe|^7cjD&+Oedr`zZW{yRQRzQJ%+=#L1E~2_HD(wb6Q!^&glW4uUjG(G zPfQTu`cr1BUnhlnubvFB)tz!E$u(AkUw7I(K6-4ZU-y-dXk7fQa|(YsUPHOX7wD5X z5uI^1I~DFSWjg+@5x*fOtLgXp-j3;h*@OkfyVHHz20wXt#c!cBP^lUU^!o4ox>9xm z1e5OCX@+M|s#=dQ`p5#Uv1G9yhB6Na!|Lk_4_Ux-Ru6`PydjvVCl4m*=3W~UgC;-i zAzox4k9`*lqBKHE0D=eZH|uH<0FPl zlRDwwK)hfzyK^bD(8*SUPQ)7am(em}b`p3^ymk#ZM`dQgAh|Na&}+cmKkDz2Ok>~~ zax+#1binuO>CoQF{_MM1DvgM$3PbX{`tkTA45{B4eE_)BEjGS%GJf|p~ z#CmBSfpL9tIDI?3;;8o_VVuc!tb1ZIN}>#b#f>~*j%d_hV$9U;4j>g^DuVf`b#IG zU?MFpW;nQs7W8Q?pYg`I;i{)6M=SuDFHg1XV>~dJY5x$>lS**2?h+h{08>uNkJ7evcHk#r%E$LwO0h`_}hS$$qRU*iU^3Bn9G{?|(DvI0~2 zv7J}7quOE3LKVR!xtTA1&tD%)K77B;hOtD|O$<-+hj)3sLK@gC*-?m*+hi)V>G9)| zpZetTD~NagHh4o9>xEY~I;i^~WxXtW{DQMvjcsIFu=kjyx6~+|=ZHLXkF(bO>t@wb zazkolXzkE%VHVk4hA&W11Fw$lBiha_7P34=|G{LA1G%a5)Zb@ zMH3qgPB?Kd;mQa=5X=HB&piXVss-;ns8Ck8k_Z9sR$Y68>bv5htd+rauyr72>Er9% z0fJlP$)Qv3?Y(KT${)M8)W)@M`_2~_e#pF-)M_tzL8&|L<4lP(&E|=7TA4!`FFkR( z&$0EYmY730YrrxZZku2O+Px2I{k7iW=IuK<9@DvmUReYK7Y!f$v5(#O~}G z?{q(Ta=o+l5t8v|^K!oXcZ9rWD6O-kHo$Boi8%LjKj&(e3v_#4tk|f$8c=k!nOL(h zaP1FZE5HunuN>Tby4q%A&F{!yq9cu8!ezEMN9FC?w+9F=RfPKmur(ff1V%Uv(g2aW z99yhPidn>W`_!`fX$Seg9r_0d40l zk^A4C6dy^9=Oz?mUmp0uK_JsP#-{X;>hKIcjTfA#sttzF|~-^^$F5M&RH zTx7ZexABbv$aOBbf0;h0!~bCBqB5V2lY9A0q`7x>>gh(!ObS&^5fEvX2nl+a;m*0d zbGB1GQE|RXbg7YG9#NFMI*C+>fq1lS)EfDjEw7pHj7Op&^$< zI1$Znb`7bm7CT?W*JQkBAd-I=mku*rvrMfl#FkW7C_WtO)41zyq7>ip#*j*1nZexZ zGmS(~HuiBmQoOT3t~8XXQeRg>{$iF;p)|9TVzO+nb9Q>|XAni^A~gqCbKh))09YYO z2tdIRhUmkkW|q4LjQ0Bb4ZDnMe=e8b9h^823Enm7`gtAXVl|&i)klT8rth(lyq4|} zRI1D`BD=|X?opPfzRTF-IM?{;WybK}G6y43pIAqYIJ`I5jzxMpgpu%QG0;x-HPB;6 z6(&96bY9(Ab#yWRnE8QqB}(1&&ChswnloFHSyeltVHkTYd1te(%EOq|f+yPbIQ7^K`SVR%|OT^v_MKPu*!+T#RM0I<5O%i2Eck5FzN|&D>S3s#Z>OV5Qp4?v1x&5|a zcJcW10%WFD{ayQxzR98RHurV)@ITNs++1BR)Jv{EqzZ?TfJDw@irHzo{@;Nm2`0fr3rDkQ}#m0_l92bi6+R$-IGI&*eeq065e?{gg=0PQV;jo%_*XJy~T|Ps)}@-S1hZ{@6u>M@&Oz%821b#Qed<1TPnv>Lw$sHqr(2?_)V_G7ISwNBNG(s_Xds968G%&X= zwA{n^lRcJidg5u^!KIpx<$IYFh7)~ui73yZu9Np=WfXvQxePw{PUM@iNJ3wz-}_H!kZ zygvBbwY-`0baljPhgOydyA7UTdx-i1fK@?&4Nyyj#otX3<=QWDfuFy4qR%A5Wj-l; z+=*-W46KP5p=I#)i#B5eJyf+d-V=-IK)8wut10nF8Hma5U2=&j7mm=q6!Jzy=fLgT zu-|%mR87&sUvo51m9k1K(?eo?A?Y)mgS->3inHL&FLiS)DUfOSt)h_LJVq#bwHTZI zw0Q_-E_EC555R*-MzGv=vE1Qo*ogi9@aMz*a(8(;*1L)HokBHe5_&VstzUqx*u~LQ z-NO%$jd+jny6aP{+aJ*I_&Lw^DQc;Vy>V0|fI>om^VwGtP(p7UeY*-v9l8_5Z8w@Z zdhr`9NFqlfgU1{DWd~TgZv|qF9Dk9&VHNvp^0oL^ruz_e0?dm&-`V4{bTtN7mY2s4 zo^{chS?7E}^Oz5tHXF*#8QJCH{tfBzXeN%}qhJU-15{LwD!LxIck`FsaYJjkL>qKPBkG$!eEH^x;mNG!v|YmH2e z!4}_es6h6_t8(E144eVm!3~#Fh6!Ja#XpYcYtc*oY3a}4o8ddkl8!OgMTC}jD35J$ z5iSW+p#F16u<(K2ufDchxF^FqvcLA+v0R*KTOY3q9SJ(^-tI0fjAHLcFxy(pwVq-! zO#KLk?%o8bb~a!1CV{Sj{q^2hsDR5X;fIQIu)vHh{Pu*0|FFBge)89+wTU)ZfPm$@ z{PVoUsyGQELZAf5Az*(cK%X!2{J)zfC{Q6ktRdJ;B>cd%3S5q4IBoFls!6%Z;V+8j z3tz$tTwA~#NmPOUBHu*&CNyD~Y1%<0>O z6f%_9U!GhL6zhzChLl$IL<$pQvc;kiq90;Zuw_+A6m4(ZSe4o-qPvz=^r3F-iEl&T zajsKG&=m6W?DrF6;{n1)uiS5*H25sqYX6}wqeXiNhYJg~^gHeU_`CS=_fqwOjd(-h z`fW)G#0ESHR()p({9Cv~+by)0b}gcW5^En8AKTLsqi(!&MPGJRKIePUnMtVscqLDS z_)`loQmvk`GeO`b2;@Rt-=E=X|7qmC^xxeWVTu2K20)oMKs}wI%k+3%4jH%5TKWP| zGl(xABdV1Gg;R&)lsYELb%6ElT(E`I z`UWY<>h|GnFTRh^m17n;R6r4gbT5^VT5Kdsr6n@?GdT~EK${H@O&%SLpHOwTeDVC& z9pGqr)?c| zeXkQX(6DGvPjfTg$$cfu962suXDQR~+US%}g!L#D`F#q=_mY$q%(H$0lcOx%NWz)F z%(`ME@mI%upOTA>$cWvqc&rn)wu3U6y3Kgn+sjYY0Ts?#E1=#a0*0)f?DEoWMN#C# z%YPbohGL-*$;D}aCol&zE)vTxEI)@Rydg#||Wz5M7_MDCBhMH z3b&__QoLGz%}-a^r=WEtXx}Xt{iPK{jcZXKfH$CvJ~s3Qk#K#wG77|W?-dc+l?Iio z?Gh`LR(;npNK;4EGnT$_n3#Iqr{CJm5e6z`h;9zChp}o}56sD%)Qrjhe26GFn0%;{ zBVp0bs6oSTD3~PamMEFET}Jw>L{;8P7xYi%A`HukF?Zg--Q54X#h*r@?S^;l1pbD7 zaR_&Laj`ieEalw#{^K0-=f$q?vq*~G(kLp7E>!0eimZIy!Av4ub33!(i|@tE-D zvG5&+6atAt4vGk05(Bq@o!r4DDM|CZ#r1DRgLGj53o4&0w+{g)H!Evzz}8-R@VrF+ ziU3e?I}fGt?z3PPk}mA-mdph%jC3SKhLOqtm1$Tc1N#NpvVoevYxEniuaCd-6dZh{ zhR{_q-Bl}|QaQZA)f^v2M3>W_wfbL3fvJ0uHP zSt4A!u$SqBZRs*xW0wr$HyH)$=`llmoBeRBXXItg73AIEC3>^=xur{;*wn?1 zm?Bt=`Ctk^tsaA*hmKy2dJ?3iqV$gnleofJEfjH`Mf8!LxLO>P>4aPZ=C92I27TVU zzI&S|chI;q_qf*l&ttX46t(!PZz0 zm3hcIXLOhL9N1=a@}6au!5z(r{uLXYjd2)v%A{Op3juzQw}tTzF>u>f3o=BytL+QV za7Q4hDFS#Wseq|3UjQC!r%Cran}IS7xfhVD!J><#TgU^t;9 zhQghsbW^z6E*bT9Mei9Q?8m=afR|B1y#@DF#Fr4BB*_G6_Z8^~;goH#^w8}0*jW&s zZzgFIwLTS|@cBeTwvi}j;K^~}na8A?B`2?6HW9sNGE!?s+v5S=Ph@z)e~KD}MQ5b~ z26=?r*y5>IHzM$ZuWRt}XxGleZ=~8Ajdw3UtsRfBj}3j^=uWBFhB*~_RkF;$K76>& ziWt4CC$?fYxf4KsX|tw?wX?BZ8+OJ>&fS2aacWC=hZx63A*3Nmq3#sc_jf(+zfY$xcWCsC z{LR}hC#Cmq2nrUg-fe^yQLW)`{c79wI+*hCT-sr$h2>jrS9BzLzfae!cpmlVaX%N1;T z1S)^J;24>e+@4~x*iS zD}=jrI_2$kIp9bw#ow1kdT*LUgNL@$1p0DcZLWQ?2et|9HaL%9l>a$7_-S}kv^g-~ zUH=OpDK#dV^s7!@2IDGk{CGSE=e$B4kz~BvtK5Qdf>ZZqvh%Sfa9D@89QpY}Z*`}o zWYT5H515_|t&mNn&I2i2iLO@*ZS3>yqBv20LsLMW-c;bpPm>sP30wXttJ49|VWFjT zlI$_5iUhHC?>h}X-A}L#bvFI!aXl%3R#M1A8kpuR#!Jqb2L{I-c3M_i9?2Mf@ssVO zB2ej0q~_KA%F%z-pA5=&%vS=1f&I_ykV7!)kX_py>??@d9!GE@m z`tS7R)*Oz$qZL|Qr}xym7AXtL>Sdw0|4%#1msp30E^S;*dt=JV>}y;Glhyk}dsp;I zO%XJzx!dY-`NFqmZH%l;W7G`mMjICa z-%llF!e{(AX&A>EqW;F+q-bee+`|RN)pAmWNXqMBI>5-)ANck7a*q4TodeGUzQc^|0B+BSiFA+=n z1Vt!`1s#FdIoPU-tJBgw>@Z|qw)VDyQ#M2Tl}O-BqesxWd)5oz7q2Md7%4n?Fj=Wc zNkux<1BU-T2@n3Ybrd8s=6<`k#$7eID z8)0oTEPE0T$OiEnynknpyH~`I)jXScJ~3p`oGebjR^yG}+VP*NwBS1o0utEsvO+h` zK@pVE&5`zzVdeAtW|Qs7x18X4nj#PyLfpaANUTz~=01ldJT$pIBLp;~TAcZaFoI0g zG5?(zI%y0={0aXj+(-9QzUIFEgNd>yeORsoeXsX~<+{kG_gUb51>w#w=wHwo>PP?w zTp&P2ER{L{hVoGp&%=c=j7f-BP0D4C_EV^v!97HCUHNVcJM8o%}l$Z$@&*;oBNWy2`QZB)#|M6v-!t z`Ej(#Y2Y?c<)(-GkV1kGm@Z}eD2`6#i@KkZ%b)%(m;s)wY`{E-yef*!L^eTcDZ1k2 zaz;b^?#h0dkj>Ty>?VkX9dvW&8D0*TjYOZd&b=VOCwT#Yua?$a`BE_Am7d?@NL)VC zDhA8F&w~S9DwiwpQ}e`#FGM2i%Al}V{Xg8lV6U^=qrIH(*4&Vtdh|o_)*gxXUv{nI zthnUeJ8pwNFE4{-Cy_PSdJx^R9_}NcE-xq`L@W{&LFgPTmB_6SiFAc2T%$x0K%1)= z_}Tqvmf7yh%>kJx4Nz3=j`Q^hJj*dd`9sONb^9aEX$o%SsQp-$f2u!Z@FA1}7+RRR z${#K~K|b&nhv@4FV^s)2YIWU&p`YLd-_JH^Q&w%P<$0w!@*Mzw7MlM(6^}UYoQ{hQubS@&%@P<&8B66M1 z`m+~FWYGs=O4$e@_VAzu+j{J{;l+HWAa^zPXTvO#5CvavbTF6b zK!KVKJn;9O$LKtGxC2)25&+tz-RwyJYed*&s zF>!4oR0z;h+`Bar)03_p>Eu1d%<~=kgLUzhuoYg@@r;v@-=Y2bNC6HpAkwOj%>xqH zrIV1`1OY*-#B@~_NxTF~(;JQN-U2WPuMvvn+&)_FBPnc(6jD1lgVZnc`sL{oezcNC#*1!)rDVI*OfEn29ccT5M&4 zXK?1VigiS9%NvYRtCWV!(&1fzeZ#}~@0pGR-H{s=Yj7!4rD%$b&g#eJEIG&R1bzN& z*^8pAd7bNNRNsnRt0#0s3+%ZC*32Nu1Qd=A#EP2#G?WEd_MiT96TV$Vap&IEbP@34 zVaqOtq;rF*TQ57*qReo9)b1pYpK|wT&y%%a!toe8B%Sz<=fsB;1Z7#F8o#cK$c&4 zjugnZ6{BbZ#dg4wsnr^RLAmTx5+F)?W%(m*s>14e{EQL+9j zuZCIwgdtw`6XJsQ_V4*dawK_7KDrN|{8dKqpDkRo&(El0eM_tN@mnE#8DdXEnq`i> z*+V~L?{a5%&jM@;`q85dT|61{#@~yx^CsXQE-hj7ljh`NUE8?SwnA;_5&ezO*-mz@ zBZ4I0^kf#4s6RB0GO^DL z(5vhPr}AuJrKbf1#L4nWRMBkYI7@pok;~sUZ4Q0~iCJyFW)|1#j|9oYn70d0s9k@j zK$V=h5pmnkrQG<$x#$S3Ft?z4R=55-PC~Kjd3B6B4e1jcBA?zkR$#p{C%7M z)-REg?@`Ax5+(H2U9oQ%PCYvmGL34qXGEhEv)a789H~{F8WQ{enR(Iti!B5(a5?KS zY-6zlq?Uo4rz%4z`1tf>UDoN{R`xhWaEG~W#9&6Mq~2tN2)e{Exw~NMvmiYB+CBw9 zI7QzN^*~_utOxYhliSbq1n>bj9P3aZgf}~ts?^A)fJf9HtPQgfDb-lqwfx3X#?HbN zFTB*_2LW_`Bo-q;&jwG9o)9qmyt}-S!Z9==HW>eTRUMQoi#a>Ixz)!PsfzdqkiO(n zbtnZ1Qji=n9j)_|6H2&N-y5NHC%0Tq7r+@5JJ|Vj2dcB<>Kma3#WmxQC)mydX9 z1TL-5S6oZVK96q0u$q@S7Q4XH$a&^L+2X8@%w5GM#z1xd2gz-zYsFF($;k3g19~)G zwop^sxCzgNrElTz<%*1GI>|qlXO2AnNur(n>~bGhCQPp0nWyQCp9E3ouqp@z&O;CF z>ix#c{A_b8XUv}#;N;WIg`l@`{gG>O!i-rp_-*)_l=jK>*s+TeIwgvp8l-Q%AUd-f zR=w^9)+zx#h6-6ww!U6pO*sb}a5D}m)b7RbjV z;6BVlAwnj64cUOME<444hwx-Oi2AT>dqEBQR>C#HL4G*Q{+@u%L5wDnmjTh+ucQ@) z{=?wtLA=^6#Dan;*K<9m6kl6oe_c&fDhbw<@w?`O*dOCOgV?8siaaZQwVIFEN4<d}jNJt%f!!S7yXS5k;+4vqtG~J#*@492Qv?Ur%LsOPE#RE2;#C? z!DA+QjTA^XmLObq%0>J$E%)d|#;rI{ai8+=L`KUg|8yzeNGtqTkO zpa;iY4;e2sNVN^9cSJm;)}Vq4-jW-h<$%Gyy+ypMfIttGnN z1W#6C6f*?sgVuGrV;T$}B}YC3{f=XQB@L+9XlX-^frcoB3GZE4jMzUNKn zoID?HypUBU46iNfk$|b#vzfZ(miT3mE2LDd+)M&0^aLl9h7&V!KPmd(28M$=2NwF8 z&^z#S#w87ZTMnPv=7&Nv)l+Fw#H*~DTtX7-zk}x}&v_cW=Ue!HskvOmG$CxD-Fl}7 zeiBH#%ydw7A>rdz^O~e6=nktCftV;n*|&5kPe{Q>C0}-7vH7L=SG0*DYrxH@ap_xE z&EG#n^PgA`1_;zg;eAHXi4f#%BN#;b6jmumF-5QStKuxJsptf8N8&-2;&0Y>4iK1X z6fy4=bsz@8#w3`O#b}=zjsVAr=w)(D&>GH1Mf+Z0bs$f^NPfhPPCRTG=OHK%36!c%J#u6n6A}d)f9<$+ zM5T`Hi!o1b49HdyQX5x+88Ktavq%d?_Gb=HlVS`}_{Yqj+Wc3;YGU9zJZp>2)q?Gk%Uy*7WPcAQoy_(df zM8!MS>R8J}#kf4!Gx3BZMSV!bMM-p=n^Xu*D5%=YmIkW~5_f3&EzZO-T!`>7E1)Q%X0oXgJR1}+6RX7DSUl81BD!*sE_9q{qtxy_R9v!vt9^2qR*b(n>D&vV8f4>MA%%vJO z__RHu>WMoquPXTRl@pTrjuBYiIeZL#_A>OJffD=_XFeJWZN8qHbo3)_+1W%+YA>T% z25P}ws8b`n8BK`&FK|N&vY%dmu{_^>CxH#UxH1e6k=ahP$1hPnSFkG%O_J&FRVJ@X z41*F%q7{#J7sC9DejF0)=EknT`-DN(wStS8QOMfe1t85pkpsy@UEdIDcrn6b@_CPx!M? zP@xlNy0xtaXj%s!ES8JxvU&o+k4)9wZrtfvCTG`{4e2Mk_cx7!)h9oV{F~swG+^io!8p zlK~J>Mo{T2Ku6Th$JmDVN8_SlqIjVh&uw);@3@Gy`tDPAju6JyjiK0jlkAmDs+{-g z%o0wm4}WPfZrz5Wb@Z+`h$EhtM+`o=sb&dD!~uKyYyWdWxDWx4plyqYndRWR-!!vt zCdsf|xh!O{3hw0far{X7(p3EQ+pbB-h}9oeUT-MkhHz0o_3?i=f$Ss?!+v9nsDGFz z1d(qtv--tNS-_f_=Kkz|)wDP@#(O}Vy_#1@Ox=^IBBb8R<_Ub8$miU&Bk(}HwwiNr zO3v=8HYh#_DU^oNlB7mufjGv)8=R+;znTRee=99N+H(9(j-19FdjaZmw0On(|2>8( z52E2x8cOnkyRpb9?;DL4+_Vr0*H4ht5W;8}%=ZZeet~MYP$4w+ZkUK7y|$-cIg3u(0X89WF=}MiA?|4=^$}T5q^>Bm>Vptn?Kxo*2yxZ{l#LOAlPdyMpfDKGt znxTR{{QcGl!owZT!6MejRHvceZnStk!i{cQW}&XB9pzdj&RorD2jK-rDu|$J!)A|4nJV>VIGL(m^)%YCr5_l$}YN+*JrY!_Uf=wx7m4v?ocfDd;N)y0C+yM`KB&!^!Lh3^Q zp#VgRxRlUyF`9V(O>WED1txV_csxRFXK*pA!XY;i38B>JiL46k(rPV`B- z1d}SOK}iGbm5PQcE%$7vOmBVH4u$<8+`!*GPdc7JpOu6Ej z69EK9n*8V)A^iWu5xZ$HKMHbfyl)M{BU{b>I>d+TB9#94WyE5W(|^rl;=&Tm3|d8k z!FLH#{ULi`hK_~hFjkX=ULV)dT4nEjvFN;t75di#Y^qE|v1IfsBYYI%kR@IzY2s+< zI}{Xjq^?=@hA;Egw6`@`Ghf$glSa{=g_KKz^~JxEb5McD3=CT=u7;^vPA^lG4}Mh) zhk_c8AnEtO_paLekG?T~$vr+W8 zp$K|nb344>=Pf6?YRCu?LIbYtLwT9db3T|;Nb!9-t6_rl-$ZLgEHOO z0ONBs{8)lJx9!;P>QYo69THg4g>BPs!Wlof6$^2xE1b!6#bzPD+uQEDLweO^4BplW za#P3}_-XyQqyIDz6Ip04OS1*=gaJ+N2i<2WUoMx5>gBI0IpV=_Psp2S!Ks7h)hxkZ zr~q5B&ziLP6nlaR)c*=+_EBxrZN}W{^Hn;Vue9s$FgrRL#4|8SA2qzR1AZ7$XSv3} zx?>S^{yAv1XZx3tF1X7{rsYea@fcf(IcrEMG>`lb!3BGxkq!3QdZRUs)$!%m;(}`W zxO&+g)|JKmI4QgeIYmAk0|VY=oy-=tdO$mm8YNcI5)V>i3I78fK@$`Q0VOz<3Tgc4 zqs?gHSF`vtw_~!=;HP+CPo7|Q6_DWgZ!->v?pA{0%v0OiF>s&W+2Bb;by`JEMp>K7_Z=84{R3W%PK3#WEA*GM*XHMdWjZeyR z%kUnn2u~bf9-KR*$1^06M@kd#KKq8vVy0WzWRZKQ#DRT-LL}7^o3Y`J0JM2}R6}Zr zNMR`heef|KvQe}Yj%nbcNP{iB+2q$K2@pT-$hXxrSww-v$@U*{LDtzqBRDZ@W{yND z`l-g^KL1`6ZkC-IMVwlSsc?Z3=gJlraS6I_uG8vKThOI>6$s&Ek@>_74C z87ro$A%jQvaNCWe`tn^;Ektk=Q;=_k&#Rj zTPvAib4chC^^r8B!*@_gBP`t1AAsrp;2ctoTP)uE^;7E(+O7I-5_3LCDWd?kviOr< z*U9o1204$-=l##%m}i1mM)1HJvPygCZt8)6xCU}{I5i%|(_QqSV_y9ti=U=cEcG8` zA-azUt=FYyzjDkX7UBFhw=?e_fW4nV0YY!&#GHeO(0Dvd+%6$|v1tUouW(3~@E`g# zy~*+o=?V954{?0X%Z%p2fO(mPsKu(mNn8(AlgJAal}UMd6z-X!d01|}ejuEqEvpDt*Vda${-`kEKx^|P~OE|Ra z(thX2>$etUxf(N0ZKY(xWUPNO{U*kSu3-6VF8g;TiI(e)fzhbrdWMA&<6Q(?wz8|Q zk$!kwhtjE2A#dI?ugwe~~j(HxEU#um*YGM+fYzUdwh|vO6S8?irSb*_y#Q{qTIn{0Nj)utwKk z)e5GGdbR9kVdh?ezh~&FTF%TjJKbNfeSg!)h0$IeRs)ICN{$$x-vn@Emv`p=s|CP- z>XVW1|NR3Gd&}>S0Hn#tNk8Y2mLkak-K^!o*^SSKVLP%TQcwFE>rb?)=ODcvf52uy z6$_7)d5+UXsVOQ5T%%({(e6u*8C+705Cd8a6fHH^c)cvV|%8}>x#M&M0lJr zkgUdPp_T$7EFM-nub7h$qXWQ?8kc~~4$(@2!3$9K6-Lz!Wwj;FO;*|P#pP#03P;fC z&(Bo{qU6XAOh`dUX)#L(zY>T=kW$rF_#d>k8d0lMNe3OlUE-jX$oJSG3cP>syPkuY zubs>NWD93rejKltX_HdJM()?NYVcQj!Z`}c^jw_5cm!f&ZOzNM3sXvFHN>ghK8P{d z(n?kQFlb~&kbHnCh1kJOj%?Vo+3;^futf9lA-Id`<%$n}&5h17%U-!MB&B@i+JPuW zhaHpccz8iC9rBE>3GXAx5u=?}*5P8f;Qhh9Ua&JM_z)3dN{V%PiN)o>BuWynG5mXD zOn2Er2N5B@bGHEQltPKTYGb>RdHwEwm5aK~y3X0160uL#`mKl)u*~F!x!|YOZxo^A zGHP&tEm8G9pLA-Y3aaEjslvi`arvxhQ&zZhZtug-kmzUB9~pa~gRN)S@%a+<)f@za zLToe=lA%hNCiK-n{);vu-jWGGachB?Y<`aR*idxA`!e@Y9X-xKDU0n=GqF_G-%zwi zVv{CxgdaAi2J%W+bLkw8hbg>=x~@JJKSSR+XH#_Z zZj!GhdYH*JtiOMoO^8c6M5Mh(&72FqN~FH}S^-y+oP{)kg2T__IG*m z`ui)TFmCmB;!s)_Y`G&(ME3h9-9kAq2{Cb(vyBUG=7bx|&I&#PY{p-RnYy-*03I!* z+m&R}T>|Qr1l51*|5=*~)T4kaf&(KWMNj>~erDo)CH*b)g$D4qn^Ke#o8)zLN;9RO ztkbY#j&EMa)loH$iOANFp(VbNOP(UcOb?0oTYXF(4P?Uj7=BmaF3k3V*6CpGlH^BV1bjv$) zONZ>Q^(o`BiTv+&^|teI;1oMdT;)G^O;W;}ju%XBoAJMX9+(jIL4zl);H~J8W+0|4 z5Cf+`R^APdUBik7Cbo2Mq6!pnrFg%SHQ@6UXGUXlN8EL}ZYu%(qkVcJL)$J`kA-;u z3o54vy{@my7XFHpQjSFb=V_Kc?zfs~7QJ(~+;r=e;n*nE?7-kR<)!YSrSu+_P{d2u zSc&nWGmCCcE4eE-KT;@~BSNc!3UXjrLqqYkOMT}Cr$n&bSu<|#3oY4gqbXL#Ltyc@n z$JaQGmBh6qBCGDB#Jbv#JZy(ft+%5S-mow2S%<*PC8ROaf5({EN77u@8Xy)k%FF_0 zbP@+#;D3j{3lHt$#nFWU0XI=%%6(p(5krQeq)1%(D=Evdo*f;52N0M* zpV<&pXf^QoBelM>#=jt%Q!3irfgHK^9#}Yd;C1}-#)x`&x&^?fm<3=j+X1zuB|p z0r0*dr2OU6CItL@eaN{INN^(ertk6@Zkn1oQke5!W+;`S`bEMn&;Tz7v42i(^VkV& znC~GI<2I3zOFQGb(gzd$06eDBJ?KjVYKExgHoR0IHp_HY%|on+IJ7UYT!7>DxKN(; zS?M-C+Yr}|7(u}pzCGSoB60G3-q->&K1STWXqw=fGB|MMv5@rP#WYq1NJJ=Vsy zOY4h|D-L=>J{qI=?~l0g(U?lDttA!71)Tg)mD*MY<(8WnQMQhq$RCZSCJ=H3ryCoz zVv-Ls?)~#jg{R;DbsCLG53wcqse(6r)5Ciu*VG~d3<7vTBLb6IPrd+-E>N_e$uojm z0HC9f^UK(X!_dVZUoE>a;4cD2;AQWPVDtn1_Db_(i| zH;sb29RLC}3ZxKXfLyMxfbe_|`1dc~4|yJ-v_XMfzZus%?@oD2A4Y6IJ15^Xy!E_- zh-Cc;%%z^-Z0q6+v1d->2iH#Ua_?jysBjjcJS1pMiMDtvDcJ{Ny!V(07Wb;;D&xPS zbgwJEqhIXO6BbzN!EoL@>s2)u>OD?ZWsCoe$L7;ejF;^jT)xRgC9bZRdlgnKQmR&r zWtf+sOz27<$iL`{UUW@xt@tpU29ogy0a6rGwn|(W_LAEz=*{bsA1a`Cli5Fs*3sbj zNt_{^|J@lOT@6JIz@*-bkNMuh+mb^P0c*E|aUIP+N89-cZ)g2`bWjA_#+6sUs=Nev zuy?=WT*Nm$&DCpm2N-5w5Z@}9k|~xH!LIPl_Z5~@Naz~@o|B1@LkT<(%t)YbxKUC# zLbErb}#tP)NoYG-S_X` z$mE288p?vE$&qK`oM*sD)Q=zVxq3R>41*)RZLlD~vh)eJ`-}*h_Ew_a{o@GGKMJLS z0CUbz-}(MC9gq#2zO#f;`_mEe=98QA?@NE`j6IE`mPt>^723kGYv#xHBG@rQc4uiJ zeq6wWwAU!Jm`xR}5_mT{&?b{~vgLjV~i9e|mJ68qO zxZ?~)fkOK|+&D3b`#Tj=wn7dB@IB-|gk!Y=P00Sn;v;%{3EBI`cc4b}6<2G!<)`gt zI=}s{rf-1#0lT#Zvy;6+ew@Js(Bm!9)DW3p&(eQ`mjh%EZ7Y<^$O-% zvhaZbBHenI<^`Rw|xnai~Ccp@&?lQond1!~(9b%NOY2OqFt&xDF>dM2R`u^~pVeDiZM3xas2xTdf z!5~}qY+=YQqU_s@wXtT&8X82%l8CZLvLyRb)-S@?vkeC0N6YiOf1c;u=e+m5=f0o! zbDqyVC*(%gczaUh&#jY38=B~!g*~#whV1^bdv&$Bhj5CAKR(#-GOaSJ#)yD;*<>Lw zq?it}{R{XK8b&D)`Bk|MNu#Jj@dJV8i@}6l)-|5E5=O~G!tgXaL1g82Unk6?^j_Wd zW4*~^i@g=;^76^_$))*|O8eL4zJryrIveeK4I5^43*pnkroXz^vt;qgCF9Xi1Veq` zqgB%a_>Q|Hwk!43I79~}51d^(dmf6Ub<(sF-#RjAYMC7Ur(umNN8H-nw>MNDv8vMg zqS-hyh+K7|v{gbYz_5_q(i|+oZlafpRs#k}P*4S{rm`=++S1|YG$`;VYqJWVtg2tm z>0wteWd3R`$CE0EtsdyrhAOIVFhrWKCr7i7Sg!gwE#h6c zoxvRs2IO}Oe)(0s73y!2V7%i9i7$Rj_x8#fPwoKxJaw5pB4tbYffW?7A9%$gVJY`A2`R)}k!8zrQ-~OfB#1q4&ArY0@X7#}Vx;c!8Nnj*vi?S3X#4hYo56FwSXQfKv z>dPO3dzE*3VrjZu`Y%jDmE21V2Hrd^UI7{;LpbN(HMqADpc>r1K1(UFEArhu=z>Kj zN@OCbo*V_z;erTll^vLZf%FV(2WJIun&;zDCdR^-x;A3ZGe&D4&gYES3YBwx#9e9< zqH+3|#pbN$KWdVTwgZ*K48%(vYpavc<|jpzmEKrx8uNswb6*)dJ^d*2OKJwr$Dh7J z@$E?O!L4_4VZ!XK&mD4}+@}g38<%uWc-@kLQkBny6cvIKmhVPK1S4NV3Fb z)cO6l7ii1nD|}|=I7Z5R9@|PvD6jc9w5ZcW!=%E^+J3sh?CccDPo#1l!b7(#CR2-Y?w(T+ z1>?Bpr6k8vvw={AMOy8t`~Cw|#`TIVqPBlPn8En~Z|)X#i?TjTE3)QK&Cl!ssbS49^)* zcy=NEr}|gH0y!Pn%s!2Ma_Mg-qrX0G>OY;>YIsqy&?rCjCTc_-2;CN-?1(2T<{IYL zsU>G+#Id+r@v`Hf9`3opmg{UPBqpJ<2Q`Yr@|`XMyCz zBo&u>?J`vhX@8>|H73#W{Lq3~wM)=SXB&0u(bA;1S3W&l|B8c2^wDqya4#yT6cS$# z#!5orc7$A#Goa{N0f|74%~j5#b;&n_h8iN3J(Bag<8rX)=QJ=c4{rtZG-VIiPhbu) z6?xVWv7@}%AeoEkoJil##=;&h^K3EOzx=O!n{6n@X1~3uh~uif8N2N8DtfG4uiug3 z6IHHZ7%QzX3$;a_&ub&3?hXxeDPM?$&eh!DpUU7YhnMtO zH7q6JIaN?z z8RzyyR=}Z&bccBpCg!wOf1Xo_?7lm`=(tyQO=@}EtzxNkb_BF+Upz+F(v-53r%0&U zGvL$7-qTZTh^%00t9+PyT_!^JlgWyzf+f63B<#>vq~?l)nACRkN4GKr*hQPFVdNaI zoWVUh=u&RACX*+7=eX@y<9360c96$rV$HPAv0Q$wvo{6LGx|^3RY38a$2}mlUjGay z5YMWoO1jO1P9A-_SN+m6RA#<<2lpT5km#cRvZ8gJ_p3|>4Y+WlO&JsmF4Uf)eb81D z+SfjxGv~_pS&QVo93`_O)&h6BTb|EsLhw%VfY_-{^NcCn=zXVHcw<2-UetK~d)#V5 z6^wtjy{%GY9O3rys!k+LloX?^my6}`PNFJz;M{xYxE>X@kwu3U{+^A7^sNy1&O2@7 zBB_pRZ-adV5#~>dAO)SFO;n4aSkON`3Ikyif1a;7L80-`zee;?HX%QW%sg-%Wqh1b zP}KCY3VbJuOl%EV;6)!Heq?lgD=)xFEZ&_(A~|(YIlk}aRQbdH&iR~N@p|LEb{iqg zFL6~W7hT9utk9T+hi-wl%;8bjjRwldl51{>_p9z*LCN{8BCz?k@dxpqD8Ot>#e!vW#K?ZPvatG4&m}; zhL#pg`e=5aearW{QtKI?xeiGea)yiOtu)46w=%ChuQH%ELV)VCj|*qon%qlIn;TkH zY zyh#C;c_~0Nofg%+VktganZT6%%H3n7Jqip3_zWT6gHtA9RB495uF#j;-j`6lX3`zQ z%^3l!ja~~cXqmp;Js-4^JXiwyCVlPw1~NmU EeKwzJk%}r}FTabL2=Um*ip5t8c zyskjUT)DnSBru3lR0yFb`)6cA`0B@#%4D$k{RBlL(gw_bW#{k@?-PFxC^wnoy9pRx zF#(XlC^&;u?3+xpD+~O11Ci5|L>MiL1QLztBi4T8aqTdQBCSiy4olGKBL6xt9BLpTMhdN7J zMGI_xK6YX3hpPr>E=G@i>0@)+8oYg4nchv6(f+j%JNhL4NHW5ey|8Xg?&}>MZ!<;A zw$y7m?)4u>NBY{rro`?fXMGCnl_1Wso8vk(0vZJSawvTIN1Xy-TkFOSdfxlQuHa-( zRYU!;W=VL)W))mKG0`xO&cET{TKbr(fd<%F1g&P2Hl`3`CKdW<*QSq;aXt4Vr%|uZ zx8>2j67juBpCET!Gs>QYLzKLukxqw6Y4_5gW%>QDK0fya(H0cF<{X81dXK7&Av`Qd zoNdoA<^DyxgN;S}(e|K1_>TDFQDTA+O5Lx}Wsv+3?~3yMQ5l_W{BHkT23naC+ek(a z{;mevi8IS$gxilo?}%iKLP30Mbu?#u0?RKAc#X@~c;3am9HN`W*>I2XwZ5dk~6-J zu!k9o+0JgL4T2U_#s0l93INRaG}s}9pEP~JlK%W3_svD4f0*=)oiMACI9QFG{Y(I z2Fzk_00DC=Q*wtO7u)L(?`7{de>A4mTm|Nu&9Nz5src5nN9} z@I3^Xua654t23Y`5x|-wo1-cP?aC~>aVyT$n0pWEn!_fP$?kL*3vMXnj#R7KzTx&^ zx3%V8{k#8We4khdC979Ie&ep&d;??-UZq6tcw1VYuo7j11|T}%zc%$; zPSHl;;y?_cSS39ozrXS+x1S-w_LgZHtyNx9%n@78b;?>^RJb?@jrb&g~}{na~FI^i5v8 zMr+XYh-QR)nNRsKOK$S|(&4m@Chsw6nlp7AG53e6(xK`JrP3(5 zjVTjRPDf`Up+}|atkSQ1zfdAGRDL;LK`(>ZlIhgl!9MtdWI=55=~fNTW)*?qxyLB{ zj#(0_VG{T!nKWw`b0teue;YZ%+wX=fAEi}5Ok)a~K1A1=!j7@)WiVxxwCXkLnF2?7 zR076=-Fu~~7yrU%bv`_$fcktT8Ds-mucQogHJ!6?=iYKE%aUzukl&q$XcIb|VOG>L zLgUsh*fil9uc|e(N~ht_C-%Mfr^fs`=xDiF#2X5uuZFy5gkM7ti-pn2{if!?E?6w5 zssGY_;J5RD6YnrElvwp+@aH znl(xzZodwD4?8ORbbIebXI()Nf^@Pl8$)55iQ77UQ24_=V4;+D$5hS78GL>_7wy8i zMm|y+ic(Zeio7tKLqwKblROk!{C*CItW!3foppLOlknLZQw zZ7BthWY#WSSs(a_$}fx_6%@NnH8-n{9bLAXO|Z+DkQz#5(*`$G>9li=tFMvQoGwA5 zdb(v_?)z-0@@oO}m5p5SswI2+y{G2gX&Rb0zmm(s(%h#}%#U977wj!=&S!+5H2A3i z{PYTE(9lOn`cHnWzcWj*8!ufsu`((0ZPFB2nX>H@d98!jmtir{jPD|Jm7?ZOoX3nf zRA`S6t@Y=A*i;aSJQGPbw*vc^6q2~&8WH6} zq4vt?)*90U^!;pgKotj!uRWJqC(2N=1;hM2`uHN&D}rC&bz(n0 ze)3v^hT~{g@xh#dwL~-LCnFe_0mJT#u5k|o|BHcheQpWFr7Y*g`BvE)mn_-6?DS`@ z)7A4Bf&Ar$bUlULLS#nQ%SjkI=eYbh)C5q%;YDi_2jq+VAv$tgLTJr^-W517|D|Lc z<#aUcE&Z288$D?Bk}31s+W$Lp9PNX2fjW~zP!KWv$Gc5#l=s#-gMMqp0O9$Yd=ZED zN@PAfYAe1Ji7Q%&Y?q3wTQ_?x8FL{(CC75Q^YeFc;;U?OTkFh=NpU`n4&#~X6RzV8 z&g~NimG-ymeOn1qpyrh+drF%&yiSi14sJ#8 z=?UpXb(@;NxBh#f(F;B4HH56`8g@N+I)2kdIX`hsnos$t7>2g_{`xIo(RJ~za?qQY zEmf!A@c@Dt)H_mX!@P0r2PmieU9ys($E}(IH1<(S&j&o*#d$zc6 z8Oc>UZ5a}*Zlbgekt(-MZ=V!3K4{#IP2z~4ky%&tKeB|LyYOQN7vj4;O=udW!{DKW z4_9<~S@8B+<3}n4|*Bc=&-hwFtT}a6XXU) zMeAdpY9q9FVrNwuwp8svG+(hvje=6nFtp~BQV>MP;_+TD?2kbBf_RPP5z3V5yeLhk zPay))5Rua7j;IGoVY*>y2?DOQzlQGv7x5qr1;M}_W%GGqJ~f~n8yhA zY9}Wf|KmN=<21}t|8eELV@3@1)xI?CtYDO@AfFt&wK2*t7{I{(*5+| z&RaotHt2cHM0m)*eyG!a9Oh9&LH?RaR}1o=Wh~DY_UbC88Ir>OOc>xN4F(ycqpTK7 z+I1Yl>%Bat7|F#S!>dMGNyX#1M+f@7UIbvD22(7s8a4~}ubP+3IC1D9@@&M);;Bi0 z8#4fHL5dK2c_&hjx-)G|8@H9}70BagZSUO9^KTS346?C<_wjOtgkz3?Z*&w@MvrnV zgZHDeNqeeZT-g1w8Lj<>QP3qxP9|?A0nkuh2%(xNvzt-4*H@E7)dc?&C4deXhGK!+ zl^%ZW4pL8BVQ?ic>>T)a4gw-YqtO+ z#4K@JtcH{l(XZOfU#L+0j$io%0Gs87 tpK)>FaMEq9`+Pcjr4qV-n&6Mn7$UbOq>IL>-`47^O-=bOqD&DP@;?T@Y-s=h literal 0 HcmV?d00001 diff --git a/images/unsloth logo white text.png b/images/unsloth logo white text.png new file mode 100644 index 0000000000000000000000000000000000000000..2e37c7b19d01b31ffc9a2c40c0395d25d18b5b9c GIT binary patch literal 58964 zcmYgYWmHsa+nymuknR{#5RsG=1O^;HBqWvYZlpVfp`}Y;0O^+Q6hyj_7Nkq0``aEp z=l#~=hl?M~Gkf20)nlllf;0{mB^C$-!jX9aR|bJl13)0KFa!hm4MIii1O%c3$-u=` z-9GH5V}2lByXf^j*gx^1hJFl0D8unfX7Ed5$VnoZ&!^QH4T(R`Xc>l@M;K1SE14`K zT8s{6Mj=vRhWW1ba~pT=!6^${!c}`NXBl>H=l2JMc@75V(~suzuZiWVeYde79g4F_ zo$GhSg9oeSk!M&0H^O(E+5DW*KZQ-aUX_G**D2*4TngIUm%FMgtJ=pqG+A%x-mo4J z^Er#mbw~vrqm5|zLhl%mBGsHlsES}@?2EY)U12_Cke{ZvbTNn*m6%ZURPv zeb71VcUtp+G$$8keGe>)pL0ME$+5f^G${Pg$P|7!jW%xel8vXQS1-;zvNdYpipe{r zU+^}``l`ytj)rI{Y00Ork#jHI;#Rk}Zg|jxJ;J-UJigycPPVTx-T9z`dc4u(^xQqO z>d7a&BR+gI@!vo5k79NRqmU?yL|z;LgZPeLIj#pJMj0^nRz@1-SyAv|s*jG*CthQ1 zq9gJA7H6(g+bs9UKFj0IWY(oeFR5A)vdZ^7X1L<7){yum)=Y=>Jh9@vHmn^5e=z#9 zxhM?VkiByFjn|U{Uz{#X{w8cLS_e}i17jkC^)=hM5VH5Chvu{!?4a+={3`fud4+qJWoDuq&8dyXR8EG{Jneyo$0voUrhEUKIBE7ceq0x;7oKlc&anWcK#M9 z4F->a{*^lnTc6km@1Cd=mn9OPJ}3~m2#9w!R~Noq)6!+`R7smE^E5p@?>mZck8-;( zWvG{CxY>aWu8jKV7EXbMhNmge<+yR;wg0WHkwkoY^R92!=!MhZ%T~PiLXGYkDFL(3 zAJyogk(VPcDUO~$%SJ?C!`vWPJ1#@)>7%UUUqBW}U8OF%NbTLLL-IH1xu^6aqN882 zBD^jVIUkXFGGllcu3_p8qzao{V$ihg$qEp5h*(m{`89^Cw>2;g320CjOn|X|mo^<_ zi@q>RXy4iITW#Ra64Vk^tX1NPj+ccuBqREyNf0~C?=p=_umkf1ix1JDfnX?{(%G;7 zk<_bhd{6V6>w}D&m1Nmz%5myvR#Mp)5jW>46@ickAf2*Id9>66HPL+kM&R%N`J{%N zLF^8>Z{i!1*?Lcj$ojSB~{V|y$X z>Y}0DSPYkVe0B46$0>EX>Ru-;gFPwtJq|rqiLH+YUI`<-$ow!E(EgPETj?~C0E%K% zxh33eJ2b)9k4~u2Cnqg14aoA}3$>p&qe4Vf1Y@<)kR}ave%FaC{8c7vsdIP22D3>$ z#2BqV1)kW^x@9EpWaJ9L=DhTuO8GEIDE>2{=v}L2=*{8Z@X1^e;f2;S;`MBg-@Lqe zfI6U0GKhhM*;{SMeG@U{SZ#gp%%kMrR5QY#0{QJP+&u-q`jR8qA-hSsH&qejqn*?f@k(>R58;xfRX%VX+}yZbYWm8#G=KIHU8(xiMf?pi{QQ1>gNK$bd*b$|TC$ z^byX4?Wlgrt1%W`ER-=en!QzOXTRp|pK$btry;&O3} zXfwv7)CN`z#BF+$20hO7I_k(3c-GvfzHszy%Q*UHC^U%Zf6v7HoxZv-!f6L2pIssK zh`Jb=2an(0DiG$qH!L?N{=T1%^_5yP{RhO8D#+_fpyfvx8t2yo#<{>gNdj ze`k#(BU+Ue=65a3FHl%?G|236uFzu^S+tQRT*Y0w>uh^qG*HUMVZS(fLPnn!3v$XR zKNXb{Q*`VnvC_NPOf-43*fiAK#4)UGLcQqFHY-6q67k=gR3erb!Ph{93wM831>R^v zPGA9RI1E}!JQN9YcCGROdN?Kn6cmE2u-%w{q9Y6`FEC)<0WI`2U3vNuW-Ysn3=`)Xdzd;01fPGe4|g}C}$vOdWS*~=rT0rBF~v;4CW-wQLm^D14n zNKHJJAGeAlAJ2AwNngZ}*4E`#zOuu>O6W@)*%tj^@%dF5!%HZknxz7~Fpc4*v%tad zOf-QAZ{p{72F(T$1SSL8#3OoWvGU1V{a;K{Vw5djwmb-TdtxU|3YId~A>}z3uB9D2 zGkO|qp*=tdtv(oTE^YlVkUcQ7xO#3pL6{o#>Yjmlh8derv+)(zV1m9*&z(vU)k|61 zf?2bxXkoF-{H~W*uF;g4s9s_|MNdp^TSM>7g{_eK|LHIZ+ecpz`n9`sC==kQ*DrO5 zz*1={(%~(*T%yeHct_+W!J65g6rTd^rS>~N`g=q>x8$w+My`G~Jm{XX6}-vUOTWw0 z2zT|`emVE88VAfm@Nau5DN2kC<1*X`!SsX?94`yS9xc}q)LcM5+hE4Df{wM5n1ms& zV2f~!NDHWRiBwi2T;Ca&0KW2d!_{^Bt3DTu%&-oP?h_OQyy0{%^*0 zKstUn`{f~!>=H@9OqN00CeF3@4=9Q@{)o#}kMjvx|$V*;Ynf><3kp=^D+=y*w$^fd1jA1+6z{s3G1^)_ga&rxO@G{`WdglTSV zM`0w=+JK9Hdeb-sZlqnR0@`?kSd25;%CZr4O0&OP=gAWOP4Y<*uZPJbfoJQt{lz1x z&9<~zv-@HPj;}|SEB_!iIy445#(-}{sH(tOk~ZnCz`Dd>1(lZxEG%jG@}`)hjm~~t z*hJE-*)rMh9ktAk2di5XcInvqb+X@u`|JK4F7(ocdhEdg2a$ z^W~vraapr(X`ad)%t?{q-$~7Q5QNl&>L>2eonO(s{GNzV_CCtO#kZFN?P`aWRR`*^{r)9iaH% zK>AWVG2#|K%rr(ifZJ($hW`auLaV1R(-j`?AzH-XU9*pYqEZ7dqpA$RHpWETMXmgP&V&M;GmLVhg1KE|CIT-@P z$6XS}I6(xy4E268qERY%=-6Y)KMmJmTM>Y(9xVGEI&X zJ5*g=Ht_jXl6Q8IUhuTMg5#7LWwR>NsBLc-C=W}ke#8_A4UK+9h!f#@kB={aJO4d% z7p2S-=^R2Tp&ZE{4Ax&ptGlUf0@$-nqd~Kb+7*%`)WQ4wqidw^!W*c^9}AH?hQfQk z2tBlN8up-xg3t}b2wP1I5tQ;`zy1`(SN51~x|fxIHDHn%rPQ_W5lWmCSj1Ojg0!f1 zyDv|gv#Ig&?zA*^^UtvZ-s6^%639*BGN1yxvyGim@$6ADj@Bd8}}VAC{2Q`ro1!N&zBb0b7;FATmZ^+&a?92th; zzJ&jaOMX0KAb&M}-vHB~6je3RT(3e1*6$L>@Un$$6pn0y236XbXxb^LL>2&@fBeRO z8w9Ou=d0$T;sd`29t6xAubBxXsl9=`0g4JuW75q;w^0q$Ho|GrDtybWr}@guFY ziUy@_gBA{~Qd#&O%r-1g$>Zrgy9ZOv4*WYv)!#Csl|se#3KI#^e#%04nv%n@`NxH9 zQs@a|exg?MMHWV|IGe?{;TLk}A}+Do5i+3J8<1lRh9#L_#-cO7So?YM1E3s@Vk8G1 zbQ4jdaIHTXlJ)yc?f8@Ey_9P2BhV@jKa|n*o9@ySyX1red-Dxw@ZbhbUmL9lz0kYA zM5&au-_GK0Ceya~;wDJ5ijJ83&#t0-z_?SGRX8h7{wV$RomkRYo`-0NnSQpCoG<#x zFodgwZ;$TNcg$pb+96)4H^PVtAHUigH-I(3O9JB9dg=O|?Fo=B1SDLM6V3oIw1rx= zmL={Owe>_<*|o{;qj2TpdyoAZd?n2$yuZzf^;At&`T4(C^fhCEL-@OTeOX?$(9|5S zy!cmEK^!X)yB#7p6E;)}G=@u%31OuIgD9hKyL1eqF8JRW{VH0q=uuj-)kqIl<1k)w z$g2-1J(orsB2CA(!88bK{~Q5HXMZ=*kK5UxH?vE(8zWK9lFg@iPn|I|aVIGo8pMQB zP!2J{jA4(bq~2Tnev*gDF{7s}`R7PMbjFg9S*ljFLey$kbiQeno;IphUx&LkIHmcu zv!Nbq(F84{VzgwuWyLg@GW}%>FrY1v;uTo1Dsun+)NaCZUd1E+M28=rT#~G7u zxn~P2;r|{9P3#FPTGx#D%noSKaA*{SG4&??g65>OiFk>N$vQ?Ih$Iz$8ef%l<0MY2JJ@ zkfJ6XRmv}kA$k8VIs9laX zP-YMpWymD&ra`I_*o&w$;z4Doy!R^TT<|Nb51zHU5|6atj_}~{b~*cn7H8Inx+`Il zCz>0pZoL;R=bv|*O}sp4n85k_9UYXTAU!7W;h<`lHQrne${69cFPTrb&8${_^>FmM z4Vmqxx$aT>d%zz}8b>`#-N=?xF0X0yZFPgH|+b!L3cXE!F5SWIa1 z?G@FOzR3}W4Xt>#-@4}-zF(B^Yd?JU@SEzDk=(YMqvE_TbiQjk#{b)-IS+PYn$hU= zi)vK(>67V&A)Kc^NBf~l1#ORWzow&9DDKhib`Cx=pYPGi)}(lWdP3;eGxh^zj1=rl z@M(e7alsgvk-PrB;~|XK31!>&@8T1 zmuJGr|5>vl&xGi<&Xi(N@0NX0^Av})RLkG=$uRQ8IP6BTh1ELB#@i|p2P)a$734eR@@PeY->hTVAwv$c(3ZmG!;bPXS>>rrcDm+yZc!ay=C#C zhG~s_E(_1&HNU>TjislZ)Q1I>LQJt+=R)vNhtgv0-KA;mh zm4!`*QN~MVZ))oX?ol=x)SZeb(L9n=v0$R?XVotAT0i%>v7Em>xvF=#ZAz_hT4c%x zRkyNon0Mugu|M6Mmvy3_iG5}GKe+RgBtRS;*^n_TKk!ZESwbo92s7eMBl&-y z>_?Z}8%33{!rv%e#RAE(AUIxxD#uDUxI$16k+_WHK&^Ze(mIn>*|c#o#Gl3ztRXho z+n4#QEH`%6&X7PFtdkWvD8O;m_IL=vuJO%!pJjIT|9!mu9%ZU|?aDDKuYgmf+Ytp6 zX7J=)9eIm1b%+!`clEVd9|cXDs}ti08}zJt>6zwo$DYV;AOD-q6kHL zejKKGs+#CMH~vx{$2=QrdmYiqeOQqBiqiaC2)5JYl;al#@0ObacRm^zxH^2bdi(;F zPm)0(eD}rmO^7qV20X>rXyMF)CZr2W_J;vQ42a3%UP7k(YH=4#S-2Y z4RdR@YPx!jkdT%$q>JeOS@j-_D(yhieR1!h-Q{_4#W<0E<*GbDf$ma9>ekgB}z@nx!F`L@({w9PKufg{)Kl!>Z&PEfBa-?UyDM|kFD74a~ zIL$`h`0b*S>VLqw;#rle$7Ruo3%+U2sna$HXEd2Brw*cUdv_oG129-?opHl4-jbyE zoUV-$R&$n0H2r$XoA_EkI&X2N)Z5M)!DqEn-ujU`H^#iZct)tYx*FNgpmBAeB(w+;xEy{>6&ctKO{M9+jpw*q?7WUr)1BR8|g%q!O@P>hJI8 zQ~I)0H~&`lJ|!HajhF*mAl0u12Ehg72eER@0sKzu{VnRD(%rs4vX9Z7(r zGn(6}1*u^!ep4>RMmpZwb~2@PKHYYkd$0Ma8celTo2(KeK}9UWO!{eeF$qP>7@|v^xpL0f- zw*#=m1ihO{#OnAjN>4Jzg$H!AV-SJFnY!WV4w1!U&KNz z%a8UvGjjcdi&1$?hy9@4-DPw7=H6zd4KnVDB~U>T9>P%b^63(dz_TS^TD#T@=Ix!G z(U}=Ua4?3`_QX9s?-LTyyK6`JO@}=i0h8|>nc7FaXTQQZTiZL|Ve^T9u`gowLemoh zRQsLeE?nXJb9QYl2TwWGo3xMM#M+8QWgqRPLoA1%zO7SB8ssHY4*R!~P}qd5ma%=0 z?6Bf&p~!C^7q>9g(U&E2xmeqFKrQ327PyhRFofTdGnD0i;ZDT!M%+$*=k!lwCvn0K zZv#oA)*X_DdTD*BIoqy3F|Vl4u&Ys68Xbs|M$Laj=l@Qs`jd&s(_jp?7eT>; z!DBiwvisoL&3lXC*bT4S$%_+icm3vL$C ze_?J{b{$5GTZ`R4y6LVGZ1cv<6h)9B&e?Cs;YBLO%TtG^WvIKlO8FX8)C-PUs#5%z z){OKx<1NcCmVoCGxsl>Y%VmpBCR@4{0}(qaO*fdXb;%5BJ2c8B;5UbfVh^Qfq(1e#@flg`=HrN1st#wusiK%|j1 zhfF8=MDnpy@wniHfi`sFd3m`5QOGG!tvj%ckx~VwuM*E3qbUFbC_a8CRKNmd@EKu-e;5y7``tJ{H zjdsi9N_q5oF2JZ5=8+ask(3Ej33D}t`KKqhVFz4marr+lwll|D$vy`Zl|5@aJmnG7 zb{R*?T^m_m$ObMpKn9L3uSYPMzA^R#1>SRV)$dIZs z!2C|acOTHP@7}H^g=6eTm0Hf`6qbBFAbjvH$ER-5VY#;Xfk?7)P?6W3sRDsWi9jQpu$sl|Up=voi&TGK5Xn++EqY zCb8eDaxtC+5dP=akaeZJkF_-eN-xR9<$yt1{slz$ydU9eSDC>EMVK3CR zyR2zHOvmM$WH@OaDy?^4PmSApFQ>ysHs0ar*Msp{J*5%CEB#55v&gxh+r3uDyrb%Q z-N=76_-#{}!hf>>^9D+EIeHCzY0iEw!U!!e^^SAiH+;X)wQsik(8&eT`%=$8M@bBh>`;8JZsd4k*yem1^~Dwv~hJ`smG@HzBqQjTSgY z7-;wJB0;fVa%EFZUUYAwNv{2jhsdIzzm-qC^ z&)lCxJzY$5)SDd5{`@6DXr`#-;kw6ykW#~05W zlRy(hsg1VmwJP6;f^RxXA_3~@HSWOe#1Bu1QT{MyMl2=ZA|3LnXavVV^yXM)AekFA zTI4j@2lqRp&xI;xvbBztqONWf)MW+?j4A$8p!woWD{4)MIL;5)=1&HtZN?B8QzdP0 z`^q`$d5KN}{s&tn(|1pL#GYtinwvcYaiSs`@4;r^`a&pNw{}r`bP*2Dxod*RqeQ{1 z4Me<9KuLs{zS^TX<}a>CC{)NlS@;a-2vRzCGvEpq~_vI5kf z1mG4zBjs?ER7qor>Tf+Jqqv=~K)-f=15xXcR>6X!L>8PBERUl>0V^nM$!!(K3;5^Z zs&O&yh@IEUTl8oYQov!$sjO5AzyDT+q87P1o*#YcNJ~uWKYVjca{KH+4G;_7o0%m^ zB1lVH&fcX@4SCt7$lnhR!wUP)(M!yqKeSs+@8ebEaV6xkIJz3R*WUeW0!Sx}!4|eb z233J{H^~X3%DW4dcu{+m(A2N2fjT5NStmQ?ZBt`J4Y>A`MQVp@J!f(d@ivoFXWgBa zsS9AEJSGmtqb=jxFc9nHVm_fVVh48dbn>q)70r;*T9ncyww6?%6%y)c`JENz~%Fg#T<#yb|%?G^R3MM3=h`dV?7-5 zgL9vJ`&8&#(%cN)Tv{~LINe?2ddQs@`G{9NJ{l_gx_I1p{1#au@MC|^^ZPGx = zzNJnHxLQ1P(d{Z__34S$T@xXC56(TV)5zt+kXFCy{NWsa4N<0{!p%-G18Fj<$ zz2iWxJzZGyiqQ<-RNa)f{iQO}*6@$GF8+m+O+s;tK)mXH1dAi3d4-u!+_Q5YVU~7A zEc2zWnwcSP#3Pk*ujZ<;gv^Km{`S74-*Elr+bI#5)cVa#Px^=z+7K-lMxdoIT5Dx_ z^DnF@;eEkpSG!f}b++lxuR9o7ljVph%fB<6Mwp!5eCuTh{doj{94xG9C?S7y#G@5uL;m*9}3YNhxsd7AWa_$V6}(- zhIA++v-f)_MytYmffT}!(U!dK7Y@+iDE!k$f6CQ*f&ocX*WT{3K!6*qL>~0-eyjQo zu-G)KS>D$_hbWmxoKRuXI*>iUnRvSq_5g<$f=$XcBDcxea;(fNAb`1O`$Fg8{F7Z6 z^@5R>>N^BE8Ea9twQ?6uzO5ahe=NfvHu*k8oiP9ix6B&3Gr{XqSSs%ZrogjfLOSSl zPZ|7do&EN=J~9(*GB7lp?hN<{8=U3ovP5HqAD3u15CTxYJ`vZF)V~j~A>cYfTUmt= z80YAlU6hdl0E$QVbGB($CA*z}4?)H5>>0z-7P7~a+vh1_hhs1@i3sUvjkRetd_?@s zByiSFjKpt)U?X0@qU$Tcg=95O0;~Amy{80Up7% zcoi=9U&g;Nkbj*>%R$skgHH6y8#vRbxN{PXWA^{fL$EWT3}r2sHxz1*)^l?fuu7=1~l{jaT@;S&mT)y{`7^>JNLU__)oTJaIZpyKvz)GVY^#H7{%?CsN0KCcm*us{Tr>d0r@eJD|9GU33ypd^Tnkq57=P&X)rP&*mL#sz>Q265@?clUKQ$ zTV?&zEndVIu`fAqz9S_0n~QiA`omEr?>8NYmNNe=%+J7@ydV-?VJFgq{W3?+krVhWYP&8tYZh$+ zlnXV|j-|9cCkMCputlnRD9G_QRWJG8*f%%F5b!TA{^b`-r&96Iwv!oj!gsANAVQ#C zEe$#`0jv!@*c4MJcNCzWQ4g%HmfOULv{QhCnD!=Iyu8y#p-v$^`~Lf+SK!-K+Pn7S zt?^2qo6Blto`tEa$XD7NXNP@L?vW(1T`?U1?;^ar81@~>k&gbj+Rw-&SUPO(RlEJL zx$!}=9~(?n=3pqPs3`2P+NI@!2WoObME5ZQ>t{zi9mEEkffZFTp(AdwAYSvan3RMl zo!C-Jhg;8CBm@Ic@URP>gLXIHV3JSZc8cKe0^yW;z{?9|_>{j=&|^zJIl?|7saI); zURJ+?H4lIk^%f83Wv5|PW#OA3<7D2EbMJVe_Ua2j-{7{KV43&5yYYAVykKTW;o)b> ztyCQkm*$_h-$qN)6c5D$8pt?=5yaNNJbl06l>`mF*b+sz8YGz5PmUt3cS8qCx0^kp z3q*SG_!8faf5=e75%dT*NTs{q36!Y=z>N(0Vwtkg^~rY-PC*Z15u!4v@_1z=&$bJ4 zs`1bQMp{d-1n@*a(EeiV(+_b3do%XBfT@u%CMfkMdF5LH5eLfT91G@60hNc*&Y+V& zJ)?>SF#~}snWB`O;H+&>UmNZSIp%n)(Pd>_*&ag7rUtY29)9U}?`MEkQNJ1`Tz%P& zl;B+Wg7j`UTx_1VO8H$hDgIRGm50~kuf(9{zBfg47E^?hvJb|pP6xW8tNEg03jW&9 zk5>o@s3OH~tYtir#!~j-33W(!eX={0 zz~c?y@>xrr^iq+FOc&|ye}Ya+J+zK(Exm+)T&VMl3QFqB_Z=@<9^-4;gqmrrfFok3N57ul>9Ty~1du0@NA- z+tv$H>-l)B}e*8hJ zO{9p3YWLr_uz$Eqxt)QLs-7e=c&{6Q0S|I^lW zwAgL(@yxu;F|hS=@;;Vq?yG)my*wkq+*RCQz(|<*>sJ22m7Z2S64s(Dpa(^&4zJ$Z zT+YDQ<^Tcj{x8&|By%hzlLOX7hJhSe=?2aWN#KqUbIZ?pl4GFYdXI+1Xaxxo)DvKj zP|;A7bv$c+-^0&jMu4o?>Szf5qSR4!b)f7OBc7XD6g6WN`!}N-H1)abw{UGq=y%;3 z0n2yK_XpbTSQZsW5tDohw=*M*vg+O!BG1V7+jI;hn@q3NKE4nDADV&vL#h;lAJgj8 zi@HAwtK((>9pNcFY&~ebLKzJIyxgIKfqq)aM$B+B`7qITyh$SauY^<@l4Dgs54N#D zo|;G#jPON3(VoI{@6K@vWn47xA|jeb`4(sbp?r^OB(zR{_ZdTt@?o4*|maq z%9?QTM2_E|(q-CKV9Vn~}`o3ELop9+#k2U)-C5 z%W{`44W-)YKhgC*Q7df?w_#{z@K3pv$e04V019LvCf;v#y2|52ZRfAGeuw>r4a2Rm zT$y3F%c)ha0)F=!OK~Ma=m^)%M-e#a&&O8d^3#epf#4D!nZ{tRj*5U$>UA*%&P&6f ztiTPjXjVdLPhIyZh++RcKczViPn^iaIx_XfTRAMBbd{jf>j{20p=rK zgkKAh9e`Ga7B%f&MGWSe;cV zZ{Keq`|K|XxrD%sz8xSkR}t2)Ec193$33fFIO#|$RGN$B=>S2v5YqQ)Nw^l95m2hS z7apif9y{y+A!nq)f%|RhBm+9#9-!0p6Kh#lqEk{*a@jA#c3aLCXMC>qlmLlGx5Yyv zs@=z}Z+kXq*}i4jwrMX|V9ARJFm5vD5~N>NLr!nP#Xe^{KgK%6n(Oo~aw^V{MiuOT zfM#^bMet9c@xADWo|C!zy1nl}a0uzWS_D#OL?eijDiylM+`3GLeoPn=??s)$Y^cEP z91h1IdB5;@hb!x-{UeSc`Ki|x;60Qc4f^39d!);Q$wT2QO7el`ykY4sv-i4!ak^rp zX+mQdr7f*e)6$fbSMHNmg{*vd>Nu7fvTVHA?4EkQuBs#!JmK6~DH5`JeK*8fU%Z)c zP+0WUSB=~D2h&cbyX(7f@*s%Bp9u1HY3L0L72Gy!5caR@kHFHbevDko=a70y=3-mG8Mpx?|Y^rG4+|p98oB4_Q=%m5`87 ze|LMO`5bEquhN%6@UV2|Mf=!Z1Sv9n`XR2j**6inAQGzQFZTGc= z)CBadbh&Tw={L}D|HozZ62jODGV2AUw5XP;zg|Nw!(khmYJ|{5`#x^eq)gWQUcAJ^?aYW27V2sJ$~l{a^l;m&nH_s7RC!W?^NKs|C|v6CSS~y_}-w z{r2{1#|aIoj1~9rRS(cWA(ckGnp2hbFPLe@{H>U@WFeF_5qIakVr=~y_=|vFmiofP z(WTY*9d3Asj_U%+YH#!z$=(JId0?g8V!|6UDrPKm-#3kF(OuvVc2#kRr!Mq=R)aCx zPF!9%g)mr&LGH=bMa=gjvR9*WY!BnOvWxBcKjm9Vyql1<+U24Yx?t+4XFW*V}Xr^^s(-YTsFs0F= zXARAD2Lu`}GM$5rno(4pKhuvYMCS5~i64^wWmtr-pSw$|DiFOk0&QCpcsMwilKF9k z3RWqi0#}J7wIi&JnuGGT&euJjH-Ubfv7v+rIEfn(^4LZ(J9O_9m$$w}2$VO%%DMZ{ zvX?wjNfiSza5~?S=@v{z?S1O!*qp1gYd+l{1p>w#!5ckqK1wPol?-kz z&0S#oX6!WF#@L7|JY9<~k&<*om(IOF1&I}9=L*)GvFwg;xUc3|$J_&x>mgM`E{}Yx z;$%Q!kiX|=;HNhy)Vl|EDPRmBiyrErM6X70SrHFYLwN-PLn%;3NJAqu)qejF zqihI~Y@mbL(vU*qO3}d3SJk%gLzpWh%&< z@|Eyj^+%pfrAfa-=vB>Exp5iPlUUbL4tdN51dJ)*uOxFM$QUpM(Y+_kQ7^e+XbL6= zgp>y$quIA<=3$&ir0-1)$Q|Kyr6?2XUk44cW#GiFoj>;D4w{b?kS}PLBXB*~9jR8| zVAlhy^*7}gE^An)Jl-5-wDbAVfXm4;KM=fgAsCCf@Gv^<=SLK(1;;^cMcd{>sX=7R z`|q2)uB9!@0oY!{0lq~*zs6Aeym%kOf}BoFSi4F*-g|9YOhgR++kxmM5&({`$Mm0V z%J)yTx`@qnnB4|_=Um=(_NzFUX2hBBxxO63+8rhlREH&}5o^5KpDqwQ5wA_|Ys}{_5wf1Wb(hsD9?NIqNF< z0-Jh=fxd^CB^NBDtn)Z0z;YR7nDntH`q-+r_sJ)bLLT*G{+;C;*UPM?`U%W`9nmjb z9EDklwjV;Cj=}mK_(iRxkK>oI|PM3Ur z`x_71?nHBq04Z?=n8g#KJGnRZY{ z;MI<+oYsSeaQwgU4xD7l?cks$g}fKVtC-{@?8)C3~22`*V1)qy|f%dIF2cp@h= zf@c*q{j;3Yy*L*EXcE|Vds436>UFVx;KUHwri{)$Y&J`I2B2--gAC5y6uQ6~34~ zX^jVAzx~A@Vh-SPEmA;ZqS2Dv-eIO~F8&zu_ra1CXFXH8Hi-2@)eq}Tv7sb9_<%C< zX+B;8O9gG18XRF~@HCM}+1wg$rYB|HHexIgleKVs4psN;9ATSrao9QEH#QL-#efWgbk6Dy?g z08t5eKk1^*zoIyk-EzGx%v;36I z(cMTfkGK3!h%4 z?N20@vdwAl?0Im@`j3i^WSK#6rAJ2Xe&F+?%)1h22>S!|cn2c#90EDTdV~AGEh4wBZZv$VlAC0cFpF?F5aKl6dxF~SyLSU|d$~3M?BV6mP zcU)%qvjz6Sw$8YZa4EG7xHyyg-4(zeRr!yq2mQ4*w zS>rbAy!%GFrc>PD<`MMv!Y~>x#+sc=zAz(lobpi_Gr1DDg+Ne15b9)A|07v^h4Jr! zM66E<91LorOLc+k2A%o1Bbw6Y5p{dhfEHjt^?f{Y;(?)q+nG%@rV(c%hfdKw@B;#$ zty%Gz?>)4@Qt;5kQN)U2j1rXC0Z|g~!&3LmA4y|odi|0aJZ`P5f_A zP6gD-_Cy3oXDhu32Te$70O6Y5`Ld9MxRq?l@Si}I$aE05GaOhh`d{N8SuC`0Z6#bt zJb(VYV3THA{*z)qI@?#V+jFDRE_2bY$jExsBHETB$_kwpUmfdK?Dq4S?XTP%1mXmK zf5P_^ofa&w3ee2WJSok{|G8JBfa5F$3IxuR^S;N+fG~TKDOa#^`bBAYHm&uTi0l6R zM^b{*vqCJG=?GH%eAxGP)q}eRU)&l-fuVm=43jm@?s#B z&#HPqTbzR5ntXeE`>l(V_4f{Ew|VizoR|xUmTU9Xep4&YVYa(+4U6L2 zJu|YSe!i)x8{Q&x?)u%@)^~?!2};S)8 zH;Eiw~g3$pA~nzCs^fH9gV$#Nv^knA6tRwQ&^h>xP|c6J+adnqw}S^TeGLHz3eC= zP*T5d5=x_9d`~~yNt){0i3^1Z2zK^mAg(_%1fvg9n0<&5Sdh7xZ}8j%l0o~zwg4Wt zQCoG*csR|H;XI|0nrwRL1N`oL(?P3M&C`?y0OnUN`CPe&dEFK+78gqFtkvW{-$O z5_4S9RZYk;fFivkEq9}oaCG7_emV||giIE{(Rw8;i!$Q3)AV)_z;~g6c%tN(P^b6n z>AhRorT3&CYb;;f6OZ~*X+Ie9f_eB|PVqIL*SY=m8mF&}W=<0J%&GC=%IA#f7gq&U z6T?AbGEjX=V+Jg93KdG|Ytgs*Yr(E}O|!%#)^P;b=`x^wUd|Us2a{s)%5hJ)%1fTZ zr%?p?0(jMJg}R@qqWSH_=cmwIy_?!Q%42XIE*ok00iZ6+rwTI^4RY=_CGwDZ=Fd99O8d+ z+_X`I^LR@o-XXeSY$*LVyL9(ulER=tS^`RSwjb>mLVT>B2qefr24W%?M%332)Oszl zK4_Y~FO=fCxIeuX2^HqIc)&3Sj5hIbV+ZyOGQc~50%UP6BK3<|0H9i^77zDh&fzJ5 z6<)W^_u7)WPl4tQYncj+y5U;XBib$+eYD`QyQcj zN$GBe9zsewU!=Pmh7_c6XrzZy8fghh=~B9-Q@Z&MKR$l47A$5s=REhl_qF#(LiIp_ zigH5ulU4qP)k(q& z4A6r*TEyi!f-b8i(}?I}hQc`NX=j$sA3Zof-%L>=CZ%9VbBV<;dgE1nk=;CxFc56y z`ucNXVN5v>`}wM=(*h8g?Cudr2y%Fb5Hnsa+&&n6{`Hx8vzPGQ4EfJyx3gc&I}Jv* zq@v!3(XUN?WgNz)v%sm5=5>P~)(9q2C1BsS-&vyvQEiM;vNpJ!IHTde6Jt8iXTU@U65!-a z;(*GrYjl*SK1e0fQVx0Z0K$4Dsd%jA|8%>v2>{*Bzvs<7Mg4P%wx-K7j-th4j_=j?j}1+Fp%uO>5^93q2UW-mVL8)|GuwL(km=XqX7?J zDA*6JD;^}e^UKe@VoFiYlL`NJE5C`{m`_3OdGdhrWzhuj z(2$PkUB`qlP_y*A=@CVkDb1B6{$qBuA;Sj82Yh-_D7y#f0l)Ptn7v2JZ4gaJu)YU53%EEmYk>RSixQfZ!aMM*Z#-5h z0BWOx+CYb3YXhLz&A{HuPdAxm?7JRms5l0kbQA#9B}VPPOPAxnUtU(_9FMF2vpZSl zD*P|HgF}~8Q_lAA>_}o2@pm_=?vc_^R4NKBP%E~s6;qGX2G`=AYuH2uo@PX}Fw^;w zI5F?T1P^f+njF6{?l$2^ZL~|YMx^r(5R0|69UHga&=Vd<6goU_;Bt;C2?C%5r7MR^eb^8M!JAyKl;ESUU@i>VFAq=k^R4`;@pUzXgy=SH zD(ii8Sxhwb1WkJY@vhzJ$q@}GP(jY}+*}fjiKi*_!|2cr#r4s9Vp{%M$!fD1kMSq> zHF~b5><$sswHzr9Sfd#M6g=TFGZ85k#D76q8gVC&l-c63vyo|&%PNYWQDOIBxp5+GBk@9cZ-uVoD{8^ujWBNEB9KJ2owil=w=xIq+{+ zw-z%Ck(k_!a>(giCsaLky0}?)>)}rp)S2d#1S)IjfMXDroz$)*H~T$uH=i@B8Dpft zkNb;i7k-w_WfZTIV5?E|?%t1_WKAWnJY$;cxRqs5q7r`gg)y>jzS1aB2IxQj38)uT zsTL>67@2lYji;ZV>3L%J^E*jpL+$r0uJ-hcWHsf&MzT46+nK_^eDFOXTV`Ep>IFLo zY`bQbDrJZ6!Ekr=#9xD-3dri4)c6tMG=ER_{KK(qw__D zy&Oj*i?4$s=qNzDsmY zzmK`8u*FC?l;pput=B<|*1(q*zWN3!L@JilU{SbmcDY8HD^XHE;h+~~T12Cxcg8+7 zroazHeW!THPs8r({VvDPX|z)myAwj}%9pIu zBFp-I4wp}~H7P{X#eFhuEo%Wyt9+B7ZVV1`Uqs+;tLbQyiaz=J1{q@}4?5LMB?riE z`dPnj7G-)J5hq%!fo6&tENRm6imv>Hgg;f$oGPRTS?gYhC^ob2 zaJC($=ol{nbESN;;9^z{>HU#QNGuSyk%SxAYId!KZ*Jap+%AhzPyeXH`SG<~Lhu|Y zjdB^sWT}*}Kap(gp5j(u@r*(cu0}thm(pAM+?U1DVclkJw|YC|zYHl@o0MOpkS9XH zRbfmO<)r)S`Z=EO(?hIcBkN2Z`87i<&m{R4+hxS9)57m6S@E+3yNRR8AgZR%MLj- z94>^0^%)iOb$7G>rTxp^*s$0w3BvB^`fD%Frz78Vd=@(>@*tICL5p=IyrC$p^UmxaARzm(r?Qfeu=mbHWuoBN3|7K7h*yO}gDIH|D(F1k; zA?-1Z-R~41R9R7e=w$TvdstY~Y7LrcNfGrdF|_l)l6BiP;>civ*2telPQnDXz+t1W zF`ivT;|wz)VK?HVng*i0sY>Dj`Sz;K|vA#9j{$8%R;t9bqN@~J%Cvi6N&ZO zzVI5zgG?9Z{|LD52T}$6-J^(AASx1l#_1|8BL~HP)_5I?&^vin^(l6dX2ml!3hHT{<}IIwS(IF#Bs0Yq-}J z3YZqk@^7_7>}RSjb>OfG4C6z5QkD~Ahxd-F48BHZ>*UdMYlC`qI`UO6Ms40sGG@$thhGQObevoF z-UI@3-@-V8PrTPx!pFn<@rEdo=cnsVf$otGsMv8c18^MSzL25y2Wv`Zq%Ssxm*v)A zQD)6G&#v!N%_gvuP0D?H4Y0>@#CVK2SuTj&9Ub3hmPo-`awrAdYYktp_=w2F-wxZf zYq?O3D@>grtm%wwLCUw}ddmd_{72QeB)%s*2sYy*z5d8tt*!j(33heHk>|rHOx4v( zFP!MuMTJX4S9k=|ilk`4JNxn$ff9Gk<;u?)CkW5KMIh`S!AAvVjf_Z2k>Xqi2q4F! zx1lqZUesSVa3rSXhY&2HC%C(PC`sZ`y?vBJz)HA0nB4?IN;Aj^iObDy3;-Mzmlu48 z;jw*Vgabr0ZoG3j7+pua73U!tqo}jW~+c1mSf3V=Q^8MSQgH$ako0~unO#CA)#{UGfUHgYsov*85DbHyb^BaNmt;gK~Jl;jXDq@2eGk?i=cuFrM3UMm9Q$cW8-(jxIZUX0yiY zBB1KXwzuK8x@f!d_jUH3Y8y0z-z_o;P%8TXH2&D{gJcshhmmo$pMnF)V#8{vw9tUS zNfc^`=9iP4psV@j?JS2nUX`lq96=}Y7rQIRV@yQ&Y>?rzGamplI!d`St^g|6Uo>I9>I9ffBztn=a< zP0j4T`SSo3L+=_hHeH3JA2-Q)R%z`I5H+nb_nceYNRgM?h$0r9oX9@LIxv3R1E-0} zdE?Z6bx74x;S*`nh0+KI{Jp6e_5XB22r|`zJPPeb$<3HPORNtbSM!q2zZx=&-Zg;F zEK$q|1!{@#Vc6q$ ze6D}iqh)_90sWL@LES8Rqc>IliGj|aBL0|U@z}Bt-?bw_ijTld8~)>Jfn9Pjlh4Ll z^&yU0Lex_h7Pt;%rTh9`o8{EcyN)s`q8eaR2j8rdN(HxX|I;@yaUvUd<=-4$_IeLp zIsiF&8v5BQ=tx61KKYf&>B<>i{0{gx0)07^oPWSVJENA-@5x4>x&mypZtzHV-Qw9B zdo1sRf?r!bgA7XFQ$u#|o-9X~Sw0k*PQx$Fkis8F@o$n8Tst{sO4bl zIFq}`VJ-vF5Gm;<7V|lpN$S)~<-Diu(QG{;g+1eEuNA)?yPcjT0M#^nMLt^t$C^be zv3xg~<*G(Idph6du^pBud8efETEcNd&e*MwI77^*s*f`)GWURHCFv=2Z1js=u`$*i zU~k+x5B$OZMZf!JGr|e=Ga3Hw(r2%jiYi;Kjd!+|r#=1Zka9OjxeXt2_kZ}sFn^_L zt2PY>>fE3^!ek$|NTJLG;||~-&w;jcfVnCQCsO=^dN5h?yU|T$N%WqWl=&~qMj#rE zBCD6f8~J=H_>3cbhT8v=3#Zm7Cv)LX)!%HU&7aopDvw`f=8dt8E$zj`-zPClR~Iz0uFgpv-r#3J4hSAMjtn@Dl@SI@K*c}D$_1= zM2Eg8UUpZ}bB@%uBQu)eCg|KvwiKdzz0vkN*^E#|QR4*leCjGzVH;wkXK5hT-IIS5 zV}q{@{t;n^3$nmDryL61|3R}MJL9TYpA3Ln638wtG~{>oFQ8>+;5~%3O_+n!6Tu@m zZD)9PhZ{b_)AFrI2eMpLdg(9ZI*4tI>SkXxrg9od(~PSFIZ7YJO@e7Tz_dyaGRf!F zZ8)Z%w;f=3ZajY5K=qnsJn>#}XlwE}OO}9sxB3i@h#sSO9rEs*?pOcei2O}zxav+L zc6{Gma?!NxO(6;RQl7_42C=+#!sNUkqFp|HU~^sG0iYT=YV4i)Ds;&P}tA-+^WWn=uBeE#oq}8)~uGaFqf)FrzV{P^A3=qB( zFPOJy3{MHj}U;O?% zwFG@ZK|%Xj(ZMGtG%UE|oMMX9!&-FhYXL70C}% zaFy@Ytj5{>_@W7zb)jOV8}4chSm5J@p}iDGxa-arA8U@RUv>qQF&E)QU$jfr`vB4i znCTVFZ%)^DS35iZ%)9^th6~#-0ho3GiOUdnD-7{>9Bl0%X4kVv%#7bPfJQN?VwNZK z4lP^W+N>m&)Ui2!$~++D1E^NiP9WV<^i`>NrAJBw?^9^!Zk}XQmd51cDbA$Psm&Ii zKVKa8hdb?mo?_hAuMz~e4wewzxrMyPr#cKCj{YprPe@l^in9-!CUyc(nZs?pxttQ% zouxz8{ef5Xz=wZ$%h+NC=+dwL0QfF4t#J`F*9!{E6qV*`M4w=JGcW6A6vg0`)BnQq zl?D?)@kt3x;1$e!9cvJaSX#iG(_-WRgv{ljoNh8t`)8#xNB3j zMd1F^4^rM<0m;GM-gP_E#(r6@$91>h<9|!d z5o#Kwn%e@o)wqi4+Vg*GY7X>G`|0PuzVWvFW^lj!iwhy+=MzK4ic>8uk+5SR)p4Tk z)p|pjqksZBJF=ej&CMdAra#JA7!v)4(m@7?NhU+8mdDw@3gAF;{kt8`K}DZ#8t3$- zEmbRhdtP7-&HI_Zd4>!}LZE>+^Q8d5dI5(X+k`D89ZN}W0^@$!O>IdAjS6-q*?gQ5 zbfL8c;5FSq2rc96iUYZYdNTH?aZ%mj4*^M$H7`MY$y-nl-km=6fj;O=#Mg%;lH?V( zZ!!NL(Q6olylh7_(tVM8kDQH2bgB6-%MS$(mx*cZ@n2JcQ6DMu;;qS=2q$+r4x8S3 z=%4#uuj3dmK3%h2a&C)!JF3*>6|z*uL?wCq_cJW}#`S0&;2!o{ zIsle5;ILM4sD9CF)?~6)woAb#^AriFqn?RxZ53UenV3@2xnEXxFXmW~L9RyfG%39} zz>7f^k|m1xxz+?+4-dF$$6i8WSZ&90a`ANi=|vNw|MnD*oDOpwdRU}o zkAJ|1&lpRn@lWeyPz0bGgkIboI-=jIx_YD)hB=xP$s-?}0i{lft)Tw7!>?&$;r^`` zhx6J88o$(37MfhDV~Dt#EmZhFFtVWoorz6LT6~f_sO%r9y!9cYD%j2Q!)P?cc`wN& zsfd%pa*ERX%w3P8&E3iJkgnnK0-0l(#s`oIDjN0h6;*I~YYs_k#r)T2xm-XsoMmOO zuZTktXjBWQQNlY*9Dr$Y?c4?_OQ7`T9n#xsn->s z^*fca(h1KuBXdJTkV=IK4{SwGlaK_%ah#t*C^L77eu2Gxyx=L>NHCL)!knFJ=dnZj z-V8rUSROKEc08H`pJOIZb~N?0(Pd`_u37+Q+|0CFyPpy3Wi_}J8&Ymfzk$B3O}6r# zKkgT2WRilfB1z#UQ%*s5)EQGh!K?zeYc-PNTedddnzi^tR z$3skg*IHT&p5b7hSdzC>^cIfLF}qZb{<(_KNc>Lg4KZL9vjvU*5 zZ)0ov>Mq1(pguJ=M)J$&{-WdL2M{Y=%JdYHMTihrGLf|8OO2whAmr7&Xu{>5t>n8v z@VEPPtFnNJi7A4l0%w7*qUI|(=Jm{FJ}A6Rcb^J$!9oW`A+|H_SXgz#xj_S9(^16qjG0FP2McNf(-F$Ue$30*lE|KRwl zSh@5@vdeKkh`?NYED&13W(_KI-$&*z_$7zxwg@~4vVt^Ut}F%ge>qFAF)(>IzYRyf z`J9x4w@%bgJbJou)9%6w=n8Vu*NEaDJ!~i_k>>xbKyo!cjY!GSnvj9BxK?mJP2=x# zD?a#zO7N?W5gznT0gI5lapC|ZDb9g|kM_Lqi&fBR*0?rJ7t$rMA=l~kgjtZ}y=lRJ z{W5=%I9|Q-TN1XkV?zD-n$WlY3E0R`yphYEEN+?8z{h~P7srjNjEKT5*J0Kf?B|lW zzlEdOcVm5nOmO@|aft1pztuBHBC}Ypv0CC(W$*h$BMakS_NjiKV>4H2K zd6AEZ2846iNTt&n$7)C!F^`6B&V{7bgfhws(v*?umQux7Gu*z5Uu#34loi~?0lhA) z6weaSfY@k3^73K1a;)#&1QkNe#VkbE8CKXmm`GeekGhOecj9^ATMI*w;*zA!p>VV@ z)PAjN?1zMlp(Y)>Ab#&k2d?Bze%V&JjQQ?F!6O~L=|M+9-h-aKLF+DG`bn^+*`j_&my{~`Yxk#B zX+2f`C_6j9s4~%IK$`b+iVboEhBInyzVp5p!9iKBRQ<~k;+B;;*;~}LPGFol4tQJp z^5_3|V!PPrWHQTKj&neMptUo$kG|A@Pl&c|j%vA>=6h+&W?Gi70m1`sfNExTB({pt z3BUwP4J9z>^}9|as1d@Nm^!wi2ILvS>cwSd=Qr20!)K_7jzQm8JYj3PqAPlrK7Y%8 zlNgCGGS7)TV@ttig~&1?NE8~cra8`|)X@$>#)m?g*mk zPe2Mcio&_Mz(CPsMpLonN)6L|ZwC;~dLc3ZUd#M4m8}CMFz!@lpWN_T*goI%h3h5K zJ_Q`>Tl7}JbSUMbcs1HEl~^vH2CDP(@rWK8H^!I=iKrx0ZWn(iw5Okm9J&?3)bn9a z@mg74QXewupM&vjRPE_z`%ccKdq`8^i=SlByWK52?_rB_EE+i(%kpXdbPVLCjAcIa z!UWyMl#JnsSNTCe5HhZvfA&M43$4r-As6a{c?xk~ZV&R&G~fKQi1iEKcwqCajoX|Y zC3xQoNM(SSU9w=Jua^*8vN#{}X^aUrf`3Z<$rQxJ4tS-5CL3O(N< zr=cMn5{d(=+&b{gcGh+IlGhA4bT6(H6ga_n>R~{rC?>P?5C{<%oS^(J1%z!RAf+Lc z!EZ+}@IE;L;J*3N<=tG6`uObbtxU^zF5YC;kI4m5_a5wlaTO0k*n6BGD1C zUcV&lPe~7esF|7BliwX{COuUTga>(fg#*TNme5VG7@%j3O2~W`L6+O@d&UrHzW6~l z9I$v@I<(|*HgH*FM^;9L&X%Zi_29`uHq|H?+H5!A2d9{_hRDmzP()0>CLT%&6iFIVK+A!u zo!n=bqehBV{wQ5Zqfz*;<psd}b{C|q4J@?y z-&oTL_eskmIPtGnMaoe9%jJA~;+SH8vcU1A5u+Y`f!Zn7!KWpt&m&+Mj0Yf~X0m`G7WhC~*T27EIfc@^w94pq$|dH( z&!cPuT$n@3EKOxYoBEsDz(Bd6pBarG~U6 zY11y6xI$^2rjU6H`xQDpT~ErCX3@i#`Y3jL2|zps4+R*M2}f(uLJ?nb9Q}TBcD9mM z`BiM@d>gcs82K4|a1{+v$QL&HviyAWFgz`JbP;0E69_c_jKFS^&;C~rHU;?o^Fl{W zlw}ZHmxliF2yhn*JH0+WrpzqnHCJAZ9;PMI67)C~;70^vd8F$_A1{hBq{gIiY*V%;&i^484Uxz zFu;S)zH3*fB<}A|DHg9;;HgZm9GPo2C*JZ{3|*NNHy*Mk6KLNi?lg4exK+WqcPJ?c zTwx!7SOHf-1>N>KF}YGB5IxpCeH;?Ln2;r-^Zp6Aa~Ob%$`YIEid0e6^AYzWz)}-w z=K>}eAKE07iu-}jK(4%}f8XQA{rQ4J^;UyKivNg1J5aA=U~N%_-hf*p(#!v`w#>5) zfo$4k@@>ju6|Pl2XAso9D9Y~&VPTtT+ZK}g^5(B$g9i=4=;SqrxS-W0=&+|Cs9lBi z?PUjd=l$A)U>@t62aar%OQtd~9y$U%xO4_BDq$TGeXz2%d4JL=g{9PlWb(%9J6`=Z zX8dTjP!CXa5lilyh)f#3R%J?xt(b#)^%HazVCdtw5st&evfL% zjcBWlT}!#Zu82oA^{K2WiA0Bn7W!>pBsElYT%Hj#Ln~?_W6oAg;J%#gZ`98}1MDnB zco9h_7<2BFt` zx>zbjziWKbD@_L0UTs^Yo1g-*S)AAu=`&v6Q*&J~i?d`hGRapxTn5T9plQMvKx7P9 zt!kkWM8wT%*;6PM?Se0!k-?34ironM;lg)z$GI8=KtLdzWl*4x-mFLHKm&dzZ}Ta6 zuwc4E?^h8;4K4wdh9+4r!5m<)yi8m_ga0dGIL1}clrRzW;Ll;zQ7@R*Kg8`FZ=~?d zuc?@vm+h`c=D(D(+thCKYBuFXfSFxrfuDRFwm|LAOx4aXS9`9PDAsG!-pG}L!#m_Pg zY_Z^>oU~Nj-L@bPmu7MW+R&5&BtDM@7n~-BJRprp-XDf-kuB5$NJP=6br?Ov<~;xof3M07s6yO;UQN_DZtr!b`Aj2Mp?t;Vu!5EujuK z)#i$(O$}ya;tshnX)MmysH2ToqbW^vD}Zfqjmm=7xI4_vEY{J(i}>HWPhs2ADt zc07C@*cmh+$rIK%x1+Cu(Pr3Lq@(Tq4T5SUO~hyJkuOHc286<*hhGNUqsLYz4=a0rMhRV96(>lR&)AT$T?5lg1ln+5jjc0`Yz zvCAduVG+P)_p_ZHbE&V?$uGd$KA{S_m^^Lje2iz!11dnm0IscBEKiW3h&$R{n^eH2Bn;}J=@thS&P4m9^QS9<{-LrPq+{22{nUt};HfjNJS$Ob5 z14CmW?20a;%$*@p_Z3))5>#@!wF=XExa0$Y62Luf{7b0ND)|ltU)3gru1KaUQ@>}; zaVjHi2pT@vns{RksaIL@-H-g@G4+Bz+J?QGY&+0YaQ;oL^()n{2CtMWcD`r-N7gq7 zaKC*-yo^eHh*#+rv=0Dxg#1FsdJv8>N6GX93F&pf8IF3qT1=Gp$`2g}B+NZ!#N-+C za*niu?f-ue zD~{MdU1`O9A+)Sk`N!LDAG!h)!$JQgcM(}>jIf+&PtWeExs_^7{LvxL68X#Lb4Yk0 z!_ESuK&OM@@n++l@zed~2kl~jI?S9h(HMcLRKVR6LblV(k%Lei0o;SKbnbd~<@UkQFPlO#&$DgtHHt2B%YrZ=_z0{p@c6m)N}Mifv~_4eC0Xb-nac@Y@@z;dnf z|M*)LWO0TVY!OjRlA{-_!V4y}?%aro!jS3Y7$Q{PL@cSHcL#$TXgf(kk{qssJ}Bd_ zImqz9`yyl)ggdD+!G*8KaPoI96!Uf*aL$C3G+0E?>6@D=SF^r`kYT)jyx@s5?&K~# z()edzwZFwY0VgCb2P!sSKk9q02Kl`7n^^!LCO6PpM=In@8SB?o;}>TweR&2bpdYqA z1F#IM$A|9>uTiiWB#xUmaeRRquviG<7gvCo=kv%EaQIVWIYKfT6F%rIj`-UMKV~BW z`L~7oqryaS@WOVljPDf!#0~Gt@5>!X+w6O4C_&ArJcwX{H&JEEP)DY7`4-y7!p6e# z7X%)(1`irVB-OVVM)MG#plWmg?Oc%4A6dZNEOv_BSBJ#K{$v9hIJy>a#xc~|4f*0Jj@J&F6I&$STJVoW_J<-bYc zU?|mwT791DjAqNHC>H@<(OlSknEpnqxr{T@Az-V8V{tY^zxc2sAuZOXpN;U1M*!*n zpBX%sYd~Th(WL1;8VaSD1x+W=XvEDl`|5iPDM!UB2aehZ;$C9d9+Iaw(&a9ar#R{2YhsDS-=zDWKE! z`TVFqr2-l6pXUf^L8N+lP4aeLku(2$(vk{ASUAt($qP`{4c=8{t^E7rJEj$mEPinV z2eAx-C*Bv`ADGUWVyIDSeptcFbt@j?+tU{B49gJ$4qnuWpRHc-&E=$524ukC#Gq70 z{$C7piXw+mZ;c0kP%T=n<(yPnXvfup1JgT1lvB#9C69M6EQ~&DHI>XIP@6EYqlElp z$YWMOK-#JBS3NG?VejYwa18NkeDrg&%4g*}J-U4Rsxa_}fT99F>R)SLE0D)se20~5 z9A+hDMAG+&qyvz7Cy2dr;a}2&*3UUmf|XhBt-jz|En^~zYx}C;XS{p0IdnZIVnYzN zyhbV35R=Nps~fIjPO^_JUK9n&*JGHyf!(dG;IvDdM4(8wmiq#bC#VyE!!<=*K`e>L z@yF#KSK#jZ3530WjUjvi3%RU}D7_^B8-HYDNLfB!ZmXBFCmS+~y7$BG+N}ZCCXDr{$POCOLoFJSd zS6(M4Di6v%XM^m?8qc$cXh0JC8c(&v1yN9#@xZ}_u>ZK$g+OaABq<0wIl*sJW8a}0 z$gs+%zw7+6LR|z49=pdIMwmszqffN0ijk{)H$Z2}vQmf2T@4QHj?Qnio-TAvS(cAMqrV;#OEvRZ?{bZTb-oFsf3$Ra%K+zZSs$HLftMQ=X~uO?f|OiF zoTw+@dI{0G)gfK0~u%ODB1Qi)NLUg+KFrZI#(NZXnOP_UxD3Abz-@B?t+Wdf) zw{uBF>W2WjM&%Z$@GHvVL#T*>O_5)qT%6RbxvsqastN#-*`x>AC{72>kPYRgJ3f*4?n)M7o&wy}{ZSrS+>0fg&pbMzlc_ESa zaoCl4vo0f#3!|qfl9dUN6dP#mq+VwN!@HqE78qL~NO|+@`$jfJK(M)kHnI&W|4Anc zd>JWAjLRGs6V#r#%9T3h#>(;mi-Q;idXLm>e%$#G`!Qb$L6;-PC~U_eOlyG}w1U;e zO2m_exAC!X$A{*|4mQPG&7=eRB>+Q&2Y(DBLGe8ow%#NdClKe*>?MtG?|N+(*nNkZ zOschDXjJqm_C*+cCA8FqxWj|VK!Gd+LaE{6*8a0Gr2m$~p{#a0oqGPB%wrD;nF8UW z9(O{rTHjWjb?9BYPS>Ba#LXS(w*1ynCUkp@Y)Os~{dc_bPg7uq7 z7&?|#tpWLn%I{O(6{7cp(0sMTj2NFiFiSCCuSK+ttr%>5-q{f0AnO<1xI+p#AB$o~ zT>%0UB3fy8>ogB>M`kQV0)|obmvz$Z@q36xJURst zZ3*}jc_;Ym($%QVA)PeEU!$iSyz8P98}z3s_qfYsL(CoSq8M4^@!SyL7l{aCdBivI zzu7^q!L^C<1sBq+5+AkvwOiT+B9bl<j84L zmu{0T-A$GpC6+V2>4dTAKh>;ALqhCXnSZy%U!N#>LLKedl+BR!%=vqO4|Tu+^p0O1 zsFB7z!9S6kff3{dwi>k`rhPB%mkrDt)*Mc`->oF%43YxcxS70^ydf|kE0^gJL;>l0+a2IQ(-n;=u* z8Cr`0I;91PgIR-;({a<0DuN{Y`w1j=e30A)QFCW+sNfBZl-jqib-Dg4_JX$gjSXZZ z(e~yBf!5w3jC|FQ&X7y$|16VANlfWx@-Zvo{^S=>6P4Cr5J#a~Y1pR(5_SD=JPYjl zvM#NE8s@S=xXYAekEDnGmtB+~ig|Z=up>gf>{0t}0=V)WIOfcp6v%AvI<%r$5a!N$ zs_P9~Fl`5B5ys-9EErheiYU#vimvsMc5Hu5yOGi@iMWBH6lT<|wUR#rJGYQ2`OkP^ zLZ;(r%~<@2x4ByW($M=sbO`GeVh0sBM#ZIp#j6U(YXF;CZHd7|iBLus$_qwIL|Njv;WjPO&ClA0v5kA$c)C=V75k$Vp!stEx?|`^(~|3g%U0 zjTq=0)548Ox^n6!VN+p&)3NhLu&Hc>HAT>_S#%H^jOJ$anyL{=3NIDs;ai%03^|w0 z#bWd?m&nO?;=~%pq0+eOsMAP6SRe>%Fg_0oC`_=%?-Sm}PW*lj6!t;0(WoQsw34#C zQ<(M30s*I@@hojsz}G!eODGS)7W(0Pqt0N7my#vdDP`tvKt#|<+tGyJDTpf}z9jhd z>#gH=CbtM^^R9g<5PDJWk$wz;|8bD;J4P)OYu+8sqbYshMPp3_2A%r1NGSLE6rmSv zNNVq~|IzS$)v5UktrXFr=D`OwBDdq}pgzm827Oc%Mn;6}v9^EMvjDNA^GZc-fQY+} zsIHb`*qJ}G!0*%60zCXc9%+usc+6b5sO{Mh)BYz`_{<*Py4YzfDrCv0Gs|NIyo-FbkXNb4za@{`-F)g`zDNEdapTuuo#uTrhyaa*4L8f&5U5xF5%Xj z1R88&F7es^zSKooqlP0K&Ef9CNnTfA&m7${r5nEt@@~r{ouKX~=|SL`)q>s7;)bWb zSUQ5tD9_W-IJs|G_+eQc(a0l{Wl-ucSI;*h<{;KyKhMEC_d zC}U{Y6bpaZ4ULX5n(JN?%Um7d;oH0J)gy#sSvpWLTbTc_1~QLEhuWOUk?bW}YC{^q z090-pINrmhp8h3{tZQxISSJGXfiJ+YX5i!Y%klH`3p*R_N=n~*Jj_=)88rhbF*+jm zYG{Qre>a!WpRHxIU1LMxucPpaET`Cjs|zax zT?jHrp7THjVGRmvqBWOX3q}z~i#+RJ^;0$%?4p>QhiM&pNwn+RjK(KQVs@?b8Sb}? zN8oWN-x`H_f1JHWh*QjmtsLl$v%njue9HQGCu%_&WjB~aAD$>CmKYGT^gz$LYp;3o zG$HRZLo#V2-|qePng&a>u1T#4ZUt%$7yh0Q{Hu=qFUoh(>7o~N{{JjM)7xDhVnm>W z%4QfyFAOhn0&0Q}(C=jHwzG#O=)5#Yso(iP z%)AL^{W7|#v_c=@IP%}3b-322yvKJ4<|m+%=QKR6GiRTOq~x+O#4Zt(1>h5kW&P2f zmm`X8#IL9c!My1)2AG`I>C}C{`@uT`8~6Uibc}7^Dkx>pW$GR*5tNQtA68dvLB52b zj**!YCDx;^XLZMS=>OQoH0iL4Yv2{JG>(TncgcGoS62};@dvDlq&V5cp{pv(2@C?4 ziq~AG#uJw8$Uu|Y7ztxr=@)NuU`Fa~ZW?doVFK8KW5#$sNi{1{(>CKti_xjM+lDgVeribY-2Rg z_-S)EEq802q|=Zxg3OVG>SEkKfA%)+Tgc=xBEAtm>PnfQ z-3*03kFrFx#)S00Tf>6G#&#frE1m+UpX3-r7XLY{=5=1xC(_+v8qk^u7I})}Y5kF_ zp(l#+(HoC4^7fL7e`)wnki6R^QAS45tu}oH@bic-h+NIIx>z?bZ)510`Y{~>NvwIb z2{0`LnyfGJ{BZ8N`gkqt^*;6B7m6C_OJi+%M9DWKbGfHDzOg~!lR8^91;h-AP!9&^ zlPYkGPV3!*}!&{N|CZC)vBjB zt8XQ19xiGTDN*`O#gReFn7;iH*I_o(xTcQ0EBi@c{W99P7LW1r4&I@u`c@mz^>dfqL>!+WD9 zXDaO{>W~MwE6RJsFB*=ByO)8$I9sC_a*`4-^2(UJve9e)pWjUgA3iUjLkl79^1BEk zPt;=1h+3SxO)rcV+RQ6s==g>8tF23bPCE&NNl+qbT`BaO9#GdMDw-c+*-7gC%FqAn z$oCJeumG-pRPp_Y?8voIwx|{y1n4SK7Y_;>LtwSWR%I}tW=CoH<35|f*RXYdjbT+V z>Y&mUhict%fR^4Dp2*J<2uJBg^2=*fJ7wVQ|G^lpT}V5KoM4T-RJ%OHnLN;?Vqg^Y zImR|>L_X>@^%YJg@+A8LIH^WS=y^aTg)5LK>H!jxSj%nqR319UKuOn6Kr$LOmJWHE z=g#eQ*s#Xafmr2V?PQQe_x^AaJGn0?|NYh6%TdMDDnIxdA?tax4qNANE>P8OdjC0C zv`YT&dwnKR-6Q(;xL8(<`PA9M_H*l(Le|iJ!NphfgMu z(1L%t8iry>HWC{XN_W6%G(MS!CcpWuXW|=z&GJR(m|6FWZ3N2Y%5i2{)c0Xm4-@CX zKb5>BvE46*33IM5J>WikY$<1OL+K@EsAhx!Gpo zmhW5i?oN7FO_dmybNrj@Lu2q@9QYWPF0*Pn7_^NDDW?xdiOBxGO?sTA)0m;%gpekv zBm10ioG_h1F{?$-m?f*uogWM|c5!dyg~hy~EbqskU0=^8*g@cu1Ka_Uo3pz{F0lwTO0x(^Y~}zu-az;fh#VBu zP6p(4^bGih19h<(QX!HI+C>WS|LsKD6A|=uhojXCWAe|lejsBdpqOXnqfYF3S>`rq zYF1BTm~**f2-7q+?$=@nV#2!|VoOcG{U#dF|a`(f$4X-x1|KeoOLr96{Wb z<|lK6^)hs4$;SjU;3@S11$6Ot=d%&j*YGzI7*58#C_Lir0M-0Wl~=F9ZW{HaICBn& z70ktpR@h9JsD%S`3PSKhE%$EM7=yuJM(@3o=sg%UI#EI-dKb|nI-}RH}ZPnBy71m zNzQN)?h5Zzb+3>zM9L#oD@}uhOb^|}o@~f#Fxw$#iy!_I?QV%D9CkxIDz*bY2rC&i zL%!yluNU9(9uZfP%2B}Oskut*!0pGj#=9BJ`?e0T%pvdao~(WOKHBhH&XVyO1v8t7$>LG>iRcxoe2Fq99Ur)KZ_jtu# zi|MbU8$ivRGp;we0Isipniv6B#?PqMvjNh!9(4xqlkd3-XNpV%)UAzQuccY7R93lp zcxIlL&sj{;otiM?DlUg~6R4{lh_KG7kRAvWz4+Ov@njBwhgtv-E+-k*DFXw8zsZV| zKRHqu=7{KH8Izh$PejggJ?49O2*D$3i3)_Mtu4pN6s|K!_D@Q_cA(GJQ=OJW9c482 z3CmXhy+F5^Wg?H7&wEofsq$SiPo>gQ)GVk zJ>CE9V}`58zjK#U4T8{OP^MVmJdWAUIkK?37XD;ze6fSvT?TJv3DC&<*cl__x51KG zrk#E}#xN*T6<5ha_hdhF9^Z#F7gt$Vns!LWuog|OuWHFT>T8*)%tl7uh>nrLJZQDU z+t2}>>6g;!KTQCT$o^6!!|!pPD4T$dMy?bjhW;o1k9;Xj4^8`OHENq~{GETUe2_08 zq$n(1o(8Ip=^6`#fm3Jfdxuu6u`2s5q|*909@1sHEWyhn`uYIJ8<#(w{qr!YVB{|i zkZWo*8h-n%p4^dGpq}@~b!yZvg?iXH3KX-c0fs()QZ{$CK89mI%teB%N=%j3l5kQ` z(%7_uJl9wmI9Vu$X&K(7Qg48rCjzNPD0$DmOL|sIrk##45s>tNR z>oWxRf3B|JGTj@QoZ}{urUsef1lOxACWc`DqRrESUKBzScFi6f-z5VZGqBfKEP1~P z4hV^!8LWL+)|{)o_tEIycEKU|w1a?7Qn<{A=)pb`NR|7zeF;?0IZ1L0<{kz^QMTF5 ztCn~xB9nnfzlF(r%_ZzbZSqh_?GN&hLQO%<@xUQ{FiSp$t#B+NqCp@`R{0IRzUHGB zWU>4ih!I1@=ZiTfu_&}^oYo9@N8o-$gso)oR*$?J{Q>3ridNw`Z?o`kkSHOA3Kfz5 zgT|iNmAXC-Sq_K$UK*W+`s<3xe9PkH)l>Jc&YqIc-EVqEJ)2Dy64&nHjj=dlQ;jZK zJK~q`s{`IIoEZf_6yamw&4Irx2xi?^;eF$Tuv-CNC*y#=VFW*Y>ireRcKNniQ5Ai@ zycR>ujWGDTR9(iq?GE2fK~e*azi~u93DLfnw#&B7#)gZmv z$d1j0n$xfRXdF(51L|pstNs-JO9$)}E<*6c2ynuTPZH-%n@FUc=~71e}s( zvtY~)ie0P*f4XkEz@?>PXN$M z>W|e(`l$H@<7uu1S0#nSa7}%CpI&{m!+k6KM4U{nqmgGE2*p}L+2??3Kpen0p^}(| zXH|dZlQ0WMHEk5devc}+IY_NcgZI)Zv07%=QbM%LLwt?t8AVuSs?Q(kUDWF%9lsFf zMj1nhW~f0E<+Um+Lr1?^M_6C`ODPmKj4S8~kbf_#R&d&^Od_l-$dHR;4!sZjKn~hR>{5Nci-*$-d6JTccm^0zGu9C$eivjLAB4A||*=Tp@~<|>^2087j)FuOQvq!p-C6aaJwtt#Vfw~hM*cc$6r4qp6{ zW`o9la!s7d%}l-y@;5_zLj75f)Ep7Y>H-b<^}R-oJZX8^CHbZD_FVp4Hf7Ww2Glf_ zMs?w9$vW7Y?~N}vp317MexoN@Dur+hZh%c=gdw0W_xdq4zUqNop-k4ATy&}Oo565m zmaqpP&<$u22)q!95aU`Wk*{7|Oa9x(c9e(yen^iGiXAwnW<<${d%&4iKNE}R=gL8S zvexm*xA7%PpycrCzek^KdVeX5)WB!oIK-PA*@oVltV>`|G&teXaHBS|vj`$!RN%j; zg~{G*e6Fsh1;0Piw(KFuGA$S?2x##wO+IxQb~eLEYz{%RKNz&l@p#bm;1%~-x2^>* z4|zAC!3uDLT6%(=@76=*`jyPE7$I95HDwB%D?~*mXslzafu$&7PgsDPV(f{rUKQ;6rU_6IojW=LO3th-sCgMH)N!a}1i0 zTd5|vZ-e_`it&t?gVlTV9-!A)OPl_YK``3t{xxPt?d;>MBaf|OsNA|~UmB>eV;DHz z_7C9LEYi+5zYGN~#&BOhI8^C<@$>})Iw31~COv8(L4X?5?^ba``u=o5O!o}{4hy@b zoLqR%;8A+~xk;c>EsC-{bOOiARvoHue|4IKsUvVf-Z+}^P$r6ou*D#ZYs=SgcRtWg&%6l<*6ase2!2$Y zV$Eg_)j7`WQ(Yc6^Y`Y-zBIZ^1U{nHOi3!%tX_h*hZ$}B%fG|?p#D&J?JnkNu4h z0|)B6*uVuVgV~`1plVW^rE#%-m7OQ`>YMKZAZ8?h0TNTH@XWq6w93l+FPDy!cImiA z5$AxRxf9S0bwxdYl7SLk@m=$KFmsGCt1dg-BdP^jOV(yZ`Rz58RSj^E&wWb+v8N=e zuv^Te57$UOwyN-8_Yv|1Jz+q5-^)__9fk*0mD>@wSRJ4rdwU>UQ9}Ii*DKCR5|6ua zR)77}M8_Vh6wu*!eT!!-XN1ed3(tg@n@Z$H%!yO_d6f~}ymHVd8qf<8kh#b4!lzcB zL=`YV-*mkX+Cf5nCW04~4R?RvfZEU6Q7Bn|FP^J^B|9z8eL zH}c)YpRZPMqtv~L{T>FFn3lA%Y_gYH&g-@;&%5(qCaRxBj|MN4x7V1;I=KS8&S-^z z5EOC1fMx-}9)EeXUJGt74g7ULSZTch&e=W)Qr4tTH-Ox(^AeQQU>gg_Ir2^rMTHs> zbZf;!Yq7u6?Xjh0GS-)C83VLD7Oi;kZvafKRpqC;=cPKZR?`%QDdDe+^JHzEA@^ts ztv(S;MsNea2s~ki$aNK(4N?cpU3r8(O?7aLPLxvPtGD);g>aM_F>1gyO!~DRou{=` z7*av-Tg!`NWTC3M{G0r+OKTD*RZ+PYElJYVKeExU+a8l8$miYvV!vKM)6duoE#v5! z(%A8^DV`v?77U{_C}3qJ-ihzsei$d~vlb*{+^0^S;;*{T_RVanBwx;zDXKvvJV9)I zp@=S3mo*&F^F7K;?rKB15b9iXJuu!mqmqO6Up=)};}UiIc(7}YvD}pHuA|0-fesKz zC^X^>-~NLGT&pk!CLF+8?r=@Ge`42Ev2t)N?AzddT<072Y0Mts2Ci#4Z0-AM7{q!H zx9giTrKsL)PEM!4y!;_|4nD){Q(TrzSZS|%k*4`PL1cz zWDWshmeHabjDara%vgoE3U!*qb;&5VbUj9uK-S*`jrWJ7{6}sso z_qOr{0%kfR8EhAvxfT{JzA>Fb=sic}Dh4dc%ip~VrZ_BkCD5zuQB^`s(ckIn)<|{C zAnjMHtHpd`q+9)0-j4nth{begG$#sUfL*gaPVkZ?t$ImvYA0Bf+wgH}c z#{KZW0^=9+gi<^0mrXT8d=voIz5~!lrO_c85JH}qtxFL!i$F2kf06t36tKvYu6zk) zwGE%u9)*lZ=z|y*t5hG)s>;17N*Kdy=pJ z^55-klpO*$cRklyAq9h&ATDw2M@n*l47kek=zJNsIwpclg7d}xnWummX$<66DN0a| z8`Jt8A3MdnOU}ApLhXa>tGaHW(LeO=C)CetYt!f4rS*gyz$gJA(IqV9(ct${Hwd^x7j|E=Vo6@{X*O=`Vcovd+@6#^+;LnVz^TbgZKkczShC!$4aD6Q!XpeI@UGfpc5A@Ik`eMCM$>X_R!V}7i z(G)nL$=ir`&70le3Q!kq;2=nT}?34?%}_DKjvz_*hsbOJwaw3u;@YqhfsI z(`Pw`c_wp8vKU)m>(bUj(LI=pR2bGH1mN-mT^(r~X&VSXb-a8)0G%RD5j$Yu8*6d)Wy%`OSaB zeG%ERH=Ek~e*-(J&{vh-FiFD(6x8hx!!w|DopS`}g8IY@%(%@z?#(}u`Smx*64?5r zJzC=wc@fm17l4l1U0K&7PpV+0R`%64=CNQ9MoxB5`xZCu@x}zrDXlmqHjEnld9RkE z;lceoK%`(-)>E}!-AY$i6ON63jdQ7qoXPOalEGgcEHc)=`G3s4W zHmTPsJhahOH0BpnX`+r=`Ggl$$*701O@sbYNMA%i5C>U)ZYd>q?x}S?eOG=xZItk( zWJ1KCK!aB3cbhu-w*}6VteI$YUFMMNVWsgsPQ2w>K=J$Ek(HEkH1xavPyP>JE3pbr zv>H5_4Xh1h>b7&-pIam;e69qywb*Mv4+eZB^xqzA^Qv^uU-cz{fKG7GPz>aL`2vjZ zegWaSv(R7TX6v@NFn9l|=$0MT24Dci`0GLBi6>1|WwcyuN4Wxy>8=+a7xsLlui1m0 zN0d~WosPkTVLIdV5uFy(@9y$z*3nG-=A-5t6MoNA`l7!`XKhf~6r^WUkgw%DqY@^Q zf#aD~ZpKdF^bI~_`T)@vjS#xEd0BwIWog^epEa*d3y7;*roT{Wh}&pMxu#D zmk+&X4z)53jGS8i2za^vGnjC!-vQWdtcMTyDY5Oym@~p= z?93=@rhbjqD)b7cB~%2p@DUN1YMc-;gsz@*rOGPfH8sUPA*L3@rDI7{)WsV9uB&zH zjcbp`waQk^NQ)E+J*FZ=r#7Ktbagb<)wl~0*nDaVIXYNzqWoY^{dRj^Y-45z_`~cy z|LoVkI^wMG~M8NtT3%tv&4rYD!$BbDfZH-SHGmSnFzuw$* zR`USBD_wx}p8)6|yRynkbd*HGM^+ktIuFJ`z+y|AQ|}o7rwDfmZX4Vgoydhb>WxCY zHF>o_ayoMdqHH()gtG=i9QpQoK}6 z0|Q>5f5sw_J9~&GNnlByv*aJusIa^DxeI@L6Ca0eu=f^LRKy$Yyo$dPx*1+x4rBps z0kF!37F@Y#Q;mmvcLJCyY~s4cwaeZ?z#U)!4IlJJkX zuJ$F_#>vUJGCwo&x*Sy?5)k|hD38{Kw+rsB?K1f-LL>~U`yxRh>U(Ab5+@tDIJBOW zJFiANMoZKUQ~diOqdS{cjs+#(!`VJ)KgQjCUxH{)Jek(w{wi8@$7fv`NG>-%)Q9uy zXD==~Dr{8oPk|~($rSC}1NcSplV$YpwrXh`mZ_TU*G)gn|QzYI#N?w``YAN)zuB*-fob!d2j5iuMF?9 z@Dq6Rp*&U;#{k(4J?=Bv{DitwVE|+5lsv zZ+xwCR8LU^-n4mn@CIJC1}iU_PG*Nn$9eNXu5@~|N7&AAs^UxQ%wijqWAw?K@gkFn3ZJ}ZDp`R{>kH3Jo8aK z_UWHyI@snMPNU0Ic-pH7lc;V$!_zU?cGImUw9<^-S zYy^eeOBHrudIAg1U=@g!5uRgnh&v|bss*VxSNVOI<;L&XEvg?qL3h8eCF&o$TXOLD zHe*sXq##XZwO;99=b@nm|M_$w^7(yF5Rhs-=acJ~Cd^VV>Y-xKRVmukm1xAjOjYk+ zIsa5BQ=wTYrO~ddlYZ;;HvB{U$^KSM!4vUTX(ffg(+s+^@V4DgvYu5#JJ(qR`#o2( zgF#`v8Q1O5!jwGb<-;$2U7>!oYZ5C!>Wn#HYZK@?Y+Y2UY+MNRv-KAO9A2rThyKAm z61{Y{;%a^=60`@f(w`=mC zuF?=Y;B!KWe^3cefFe}J;N$`g&^K%R)O8g>kBQKX0TCo+-^gS+z~u_bJYnwS=yNU& zi#Xj;%5Ft2dwoc^7}b14^AczvkUBnij*JxTt3Eu$!0|fBWAClm<7rEJ;hfG4SDjm8 z(I1(`p?DjLczMe#`mfRdemlZGb%f&h^~vTvX_=t?1vswkc=CO5Nw+T_eMMnr(mB!( zDYS-YcW50jKlCf7&)_73Igw7X3%7 zV|u%K40r&;z64y!=ZLYf584VBE7b*zSzTe#7qklwLswlpvMD=mKgi`kDcSkd*dtdM zDV=1g?n0Cm+Hf*i-WwJaG=RL276OBF{Uc^XyolC*LSCzELd0mEX(cI5jdDM&W z0&}jUrOC?W=ulSwa^pO%+*x#3UJiE=y&oP)p&eGKICLV8CE881_$z#&g9J08>D%<@ zS1!{4TZk^&T-5d+5UT>*!16Dr6HqsZzm1;g^4gVb0v2lOj}AXd<9bsxsml31Bo>1) z7XjplRX|YOv9L(Tcd{yr{umLsHY+8Sg04n!vS#{K1{;R(giJIjLZM=g={zW9CY7lugUdZj1X*6#VV+`Iiu()OehS%Y zp1CXYFw}WOXl!CAEUktX`JBsUQL1Mk905k$`5cFIb?~gEou~2#TQtl(wgk#~haX zJKr7lpU?Yx*W8WOJbk#X%miu?k zkW9!a5Hw_U@hfB&r{dakh7`R`K@57+TG~UqRM2BY)8pe%O-;AZW7LLp1Fd`&%@+Bc zpv~dWw%g+sOPnKe4i-udW_v@B5En`-f`9QK`@jE_A@b?=#4g*oZH zrjX_Yb7BsvzR#1U&VMRHa9jHWVf6f`P{d_ahjC;TW{BwY>`oS2vV@kYx?wCC1G;Ah3vz z%Uad!MuxG4^I>9WFr`_Va*@igRs?X}gedCb2u#zL%k39C$!4uO0H4Ts0L^(c&S{7x z!o`@sWgFb01FPjrl|YFq7BU%+e1W#xLw885xfxA$^XxD}2t^294hH5)?&g`Zsq#$L-(yWL`+hx1E}p=QQRKOoT7=5}HXz-n@l{A2A0rvN1i7Sriu!`YA{C zkxtki&im{WfPvzUn(m6M&Ige9u8(^U)ORJlJ^NWRKV688aw5aR7TXlGBCLvsl(TlC zdjXLZ$!9k&}Keduq24krkBK02RTmDX#zg}fLcg+m{f-ktC4C+$kJ|p!A z`>tuxE!mhibBA-?WCIR8dfsevYk7>}E~ZU?Wu7QCtPdIP!>KI$6pmMCw+6wR7}dc_ z+E;pn5hudW93_BIfsLuAZiaUf=$F;78Mj$ZCj9ty<8wKFK?oVmzp*m}?{cGW%36!Z zD;@|xErB~J>iB%+{O_Ynw|Ve3!N<2k|!Ay351W*_A`cK|@wX%R_>{Z|*cO)x}W4i0SF6!Y**-r{#V z!V_=leGRY8%bFIuZ2?SHi$GGPHD20F=PX>t<=~sM$f{d+f}y(?z$F8XPkKQgMne?Z zX2){->^VDza8Cqp$(SleYW-b}lYx58t#r~!q*iXT;vPhGobA=tcz?e@>pDX zDLYh{AiA;C$$-pL;Y>jrwMht^;cm@ks*^r6Qa<4A*wDJr(Ul^X-Lx{c!cq$qUC187 zgOPfK5wGLTZ^_|(TEOVtY?~;7IAA=(oK-E*fZo$p(KJXzD9pVc7(f%c@nR|IQSL!w zro`x7LKNqqQDW87#O)iiKC*Ymw+VXeH`#0$8UQapLrtNcE!g=0KFmc%hB|x_Wcu%m z8}s--f*?MKwVW?K);96D7bcIBy8L0N1U9Y>&ZNO|$_Gps3bE|%WC3RpcnRiytp0!s zD;)T{V=2F;#oe9#5DMtcgbc^55~M{rh@+%YBfL3y1=V?fRKSnCE(4F;KiPloc+CnI zuaMd-PMH>v$glTY7H@odM!%oYwg9D*bio=cWmg_ZjEW{6;!i*UK7vjtyA$z8*GUNf z&_^L5jFMW5Cl@J07K$yHES{?KtijEj*ICr(&fEdbOXn3?rTp>)t3CYFM13r*x}7T` z^PE`mqWp)EYUWlfr*6QK!mMr|hcP2zZ`69%Dv6H0*8H_%l(U0?w`S;L+l!z2{c^g6TS9{NXUp_G~ zu#s}H{1&>p)2@&ZwxT7INB6vMZ;op~&PUlStO;cd8Rgy<*6)mR#b0CR&w`p~=Cp)$ zP2NH-NZjD(lEjj%{=9Q0slmFT`&2iABP3(wwD5?n3xCb#EOELG#Q^f)&AD#2e{s z8%oeMG9LQa*cZzi+kYGj4aV zcT3}#x7Eb*OubB|q{DSWbG4lUXjhTng626#kg4Km^{owakX8x$-*?87X{TW31ph|N z#t?iAzd8W`EOmpF`&7WO-D5WGAp?492)D9Js>)8F?EBRvL(&h}jHQEuh<}S0y+)Pk zz1^&?(xJs><97%dv(F%}g(fM#VJ)yy<^>Rb*U|awPt!WR(PyuFEJjA9zN{4m(s2x~~PTkPBG@Hhc?DtkrB&nj~J z%sBkC(EzH4^>qXWj40+Ij!x+zRmbk*emh@T#vXC|Qeu^N$vX2fA?5ECkB#!_(?G$&olW)R4%^O>$i z52j$RLpzSZnbhFfLBIxzt6x(1@1H+w>A3?ZPCOyQnZ_0V>t;BV>RHqo+B+b)L=QXL zVHfB5@Aewg&+WCkug$(7?M5F`XonaY4q2nlsc;$Li@0kr*@?H|ZaL{)wbEYhY{8be zkt{l6wLG3XGpX{wbwd{w*l~sXpBZi4uwXyxKH$|c-NPp>6$s&-eTHl$MaF{IHNcsq zt^T^QXm%S+Uj)+Rb?l-WywgUXqNF&x1iMam)SDzWBFcixUU)#iy|jl?aYggxAjQy? z_2Ic2Ns8(M)VxtTIBQ&XUU}d1V6$I9P+mX=>pvs>!BQ17G?{yPw^b^LVe9*nlt1lw z&YMe+#}DLZ!^X&rQ%Neqjb@YkQWnU=k8Cr2mxQv(QZ6CO$CPG*oX0i)K6zw^K-M!Pk|66zQ`y_kD+;G0+&|v2{k`+mwYr3b8ww$4L-h%^z>-q<4PRO3}`H*ohZiy{>uQ>r8bf z+~!OIpakH6<1NfZl-H^?(&0*l1=a1Jr*n`<{oc}1vcwhnad&ayCINMy?1dhbk+|4w z{a(a{dXP|m!IQdvRddCyKft1tcp7Dm#=c)n4G zb@Wi&bY8NKBE2N(6vEoCC^7ZSNd{f=*EW-}%31tvjQ z@{K3kdG^C@@R_09U445-A+#RfbWqg`3)hTaHNfrMsDLi+(~rAQI`2J3;`V3*9!lKn zTHE-mYg=se3kV0wRvFDnRI-E162@^hN3i!NC`>iNaeHeivcOhuH~sSXi}X>G(&W&3 zvY&kvPY5DqvHyM%;tj9TpJvdPlG z{OH;JTmcp&wioY#Cs9Rhd*XfEap%vSup=^R@MpVz@Ct)PhP^_4^Gb-cBTAL93;x<@ zttbSd9HwC3?T+^~U-pgcs4JyN=!(K)P=E?v2t~IZ99c$>lmmSzPXE5mk&28b>0o_z zAsla>Y>dZb9T1%DEEzS>Ju-kZZX^te=(OTh3{VJIS-$X4*1SI5cCj2QUx19pLnM=A z#aJkf?ZNf|*QAL5)ihzqD8}OC4%dVc27fB%rItFZr_2~bVuVFASHnAb**}a+_h(cX z5n@aF+!Q62dCrLnrql!Z+aR*k_L=Ax7TfmSu1D6^_nh>$!j|p0d=NR3eW^&xM1{C~ zsE0^Gl7c=s{HiP}u0{fNDF>R76v7{64sI5fSregWA1Pw?HY-G260%b%zI>F~K0Yxe zaxYuc))nI;4PLG|0M;r|2o_{9F7Bg}4&fB-UY_ zoDY4ydwV_a90_KUS`~bFpY*FXc>4PQGOzcyTJ7$({XoA_B}0%LGpa{2j|BOMS&hnM zBNf)cHD+2P@{T{=R3n@(*O=Pz^wh1B;}G0sp^gyNN4}xe_rfIjIjE*o!8yp>kSfRe zQWKX2Dq{wYY7JbLSlcUL){-v|$c~C!tVWLuhcER=_|r9q^w7{0eko+CweOgF-xGg% zU3J-fj{pPF?lg~_hoXNnCwh>E{jVhFhsHtG&`(rnD7;cP% z$Q)JB@B^eBvAsknarGtD*{uo~mm>%pw|G=+3|lMiB;<8*NRs4WpA!6IR6GPvOh@6w zN?E|dGP4_cHkz15VxO$~dI3OWP?c=|2aa?;8DHr zo$y{F_a&aDd|xVX_0XNHd+Z;rOhb&nK0+ttH6Hcev8rmTjxktEv4#=?cakN;_;>zr zGC>|wb2DZ{dEM=xKC9ZSKK@u+NyI-b({7uF8t@4|J>dF=ZaGfN2%1%8+$I|5+M(Ho zMY1;2DBmw4;?H#klOPa8wl3ydlvUQkso#Ytmu!k11W33Lq&-xWOS{-fBoMfXR4uE} zQ*dA4EEK&7;ifOMX2o0XC6lY5C4IwgS!?9J)Y)X!Mt9I(UOAl8_$l>8b46evF>-6a zr5G2@{eL~;IC;(xMrU@KTcKst>#1BXSeX3P0MAovT1CKLn0m)DC%bl{E5S(?_9zi) z0qv@Qye4n}brngY+a~)T*Kw_39QNVx9$6<5m4hPU`Oj-E=9r@``e~KR0|XsXuW$hu zR68y@@7O^#f31uLfv|-^`2Gy#gNz?hHm4?VMir8gx>SU!BwED_yx3gES_rt=e8?i^ zY-ISHC3yVJ^$&VH7eoA!#__ZN&T+ZIVCXXhBq!E>>Tdp%x_8dr4^-mo zL>#3@06sD9q!6Y*fZ34nQH9as!pkYo0iw{vsG|9@<{hD{v5Ctx;jDTsLB$x6{Ed}j z0zwLg%9tu^_7~Knl*y1I?XqnuO)zW`$|Nk>nfq*TB@pBaGgUWzCaGzRckQ z*G?h*KzaNwF-49QA|RS59oU z$93P65mi`I2pTBn>+UVC2oe`f&S#1W2w$q1FpPr0`x%_y9|e0g{Q+9$EUc!KodB{^ z?s}CHs(-{QpQ!~3$A~M{IG8oEZ%X z>9tCn^phrn#X$c(n_4Ru0%y}{g5SS|_WbMD*NNz1N&7aN3ZxiLznq5If-BxcS!pvG zeRyw+TV4#$4jbk4o4fxAw@mp88L5Mn5XorkQ1)nmN^npj9FS4RDO!ivQ?)_E6A>*@ z6AJbS`Q$_D$~0X4+XnrK5qGS$?u4{pKyQC_Yz>aGzMnMy43}{bClzL)3pBudF@1v2 z)tfwqx1wuR+A`;!U+dU%1mW$)ktF3BAb^75&fp^0oSEZ-B+fz$_(KqmXl289rV_i`+On6{Gs{>stjlX;m`h3^k6lcsga3bqaQ;zb z4>c)hZ8Wo9f5DE4d*Vv-9%^2Ca7&D=Nov=3%*-aS=MK(=={Cvgl+_qo;fOa^l#wo( zr};*d2K zWW}Y-eQFPtD#}VrAnmclqtAdT@u4d4(zcs};a}x<8r2$F2@{^0@!!F0=#}exPh2x(#+U{fkVc(@ z;w|yzAHsAg5CE*_>qD5keR>o%#sOI_ZA5o&V-yq?`ElqO zZ|?{zhgT^=L)VxivMcr)Ih+S_Y_^7`M1yn*Cf}MI9ev`Hxw60AF`ELy9r6HPa6TyC z^V?U?)hid@Kze*E06F-djy%wkPo(eQ#eF@-=iL#oaD07?`V6q!t~~PEeHJ?4?c`KA zOZ>mKa9JlM)LDTY`LP6NNhf8G5n;?r6eh8WX88S17Fvi!tSi}QT(J2HEF}Y)kX#;R zRJb2u+!xI}f=4FdXh3DX67{Ge5G_kO^tOBNS z>f$S5?~176<6~Sa%FrFEEk+vcPr8)kY=!=GwU+FO!r58=xuCjat>LX*bu~rp6w?$x zoDZooNQFlY^sz62>q8mSk8bn12WEi>9d8Rt-yiGq2g&@a1>g^M!wY%E5?n{6A-c-v z3ij@tp?Gvag}6IR{R{N`XXwc6Ya)iyijJvoIOkojHP3aXCJmpDAyTRxgJ$J^&UF2* z|M^Ot50jvrNGEW}IiHP8r$zbVglw&d9XE`WI7)4CY|OdFurAgBn>+H0x~+iiNWn8| z?wpRypGPwC^!G=itopWux&?JK>jiJaF4U8G+%>3#hs_&2S2Bd_U)qG2hpLhNZyV=^k`sXZ^(I_C~+)R&4k(jfcnc4G{B(2@*&AfR{ zV_L~qCL>607Som<8@8F_qK{X(j&6~G7t$I3Q@m8C) zruYh^hnvo*NK$6$o9%!dP9)D`f$Wh1&jD}zG1^uxf$^z*I%PmHWEo=rgj2?ZC%jCq}j^+-$m<|Vo#o}WAl?^6i485 zI#pH}Uv_1_kI>FJzFDS)qUZ286hZZyn7ou24{nF>hlzDAgwo_8(FYqxOD^7lw)j{E z)Rq68K8aL5h>mAmcn7BUQsjQK&aTW$y~caFd&~vWpS9Bs2$o4j#XRih;?FnA3T}}Y zWehYA@7-?BqVV=^pn**^)*WJmgDHWZEEX&@@Mz0$Qd~r|(P~W*bLSCO39+smb@USJ zK#@5y(k(PEy%H${yALcOvvAVPyxWb!BF!-ywy80!ou=vgq#4(P)CSEM~Jj8)beQus^7U3vpmX!Kbkh(PtB%~ znWBbS7lY|HoLbc<522uZtxG3&(3){|1~e0kReyC`6%u9#pmH~;R>wBjNU2b^7aSbn zwtS3Yd2Yjg9!~;xyHuZ&7WwEXzy(!9H&mlocPMOFf|zd%;MOyph75h_poEw zGHp#6IZ!*b&T$teRvhB|Gtx?}^)Rze?6LaKs6$ia47`Vz#ili!oXL8Xn1zWFS3Mh| z!x`Z9-457CyA$6Dsmr?+w(up&PV$27PcfN0u=3tbl{{##y!W9hh*oy*=1*^2ehLcI zd;$W_So5I}@@SB}DXUJ_|9UK`6c3`!nV@LQZq55n4{l!D`2}1e#Q=ME`rK>Qu4!^( zY)1y3_q7E=wRaS%j5#TkT7^Zh)D+oJ2=&$?{Q;q?Vo}Bu>;cjTh%I_{-tQfZC`8o) zQGsFD@U^Vp2%1*R;us8Or7RSRq9K-*_|*YS6d#oq+obkl_NJiHu0ewOI zGmvABVzlgX_&ERLdAEXzip}cC1znkNqqTlJ_zYuq>XFL5#u%~&%*h9IP!MM+ux%4|( z4R6jRqlII9ZrfTtP|0sN&&~~{>p#!5H2H9-wDZJpa5MWRY>=@rA`VdHoOl%b+{~s} za(qSZsc(XlgBcl3P316-f>*;J(0;5{FpBZdejo(!-J_6wR$`x}giOvn4GmryXr5@C z0m&sA=Xai7rJtWy`n|G~xEVZ~IE(hwxj%(*shkt_iA*uN(ep*n%@WzG;Wn$yj#zhNno#N328+r_bFZY$2%zAlX;vAH6RY6n2UG;0Oo^81`HPimr)8!H4hS z*ykiuT(0LvGQEBbiqgIMrrrBZrZx1#dYj&$!>_Sz(C*6IET7BaFkayUV;)Lksb3Sd?6!PMvBJE%=QPe4KCDC`<)s>Jtd7 zf=-%}SKlvzRlc=F?+}--5)lJYJ0Q@gFC}xP(xa_=!IHYz6Z8u~9?#u=xUFB?@9g~e z*>ZY4M#cWk`$_*B90SlWpUsM9Esyq(*h!2nCE;;`FOJmjCt(8-2f6j-nu>|Sl5=Mp z`*bPxLSq{I3iR zb3YCK`SR3y_hO~;TinawwQZ0p2~v?SLb>=00WoDnvK<-zOR6|Dkp*+{nLlE zRVFwY<-iz!pWjQlGRpjB3Zuv6y$tGJXh7BDvsSmE8CmLAC!1164}V5~^GU+qB1cXT zcBZ#;+6b%Qlzfu6*XK&zNX&N=4v&bKkj--1xzFq~#1w8kU`L4(Y8Sx=^D=yEn`aMu#l@m*TAGV4Eg)Wz&8(p5$65V*1aMj;a|kPt$_! z3kK3Vg5x7^4#h~wZ@r{wBv*%gRn6aE)qIcgeTOOBqr44z*Ho0M^KveMNZGg3!O;R5 z&}{t}YJg}%o_6Yww2;bcYhR^W9n}Sy>lo-O6NPeGJ`!=8Vh-jG7m9(ZF~O$p5L48B zgAI?^;stm0-eDfoRiNjkrA>fptgnb|>h5MQKc6mu3Nl)aZCv*&WgJ|jD>90WyO=b0 z%kx~{K*t0uO|JRRGlPtyJa<{B8whK*d_-~uz0axY+2>qsDQE;dZBMG4tVwIW0ISQp zAqVv3urKA(kJCuva*Ef8-XlFyq5DvBHA$?-@d5V8qf_Y zMU{rp2%dj1Gn2Hqb@_FuOz=51O0w?TgL4P;2|4mb+haEU5>$*Ozav8eP5<+4%p;lq z*VI*qMb&iiyUT*q(kYFAbR*JBgS3PK!lEE7q97tFv9NTvN(w6=-Q6KC<%)EOguF^f z!_u(u?Th+6-~D^;+%q#Le`oGF`C|L}xeFyCg6rJbfx`GIdVC?7G@4>oLIr}sMSH`E zB};`oUkX+V56PrYE+TGFtKQ;Ie!?ADRiR%2XM`!yKi7u44NAqSG&$_Fv|r@7oYrMt zg$%`#Pyc(Tl)A+R=n;i0!HAUQ|F4qzG!4V9@tb;ANy)Pr^>eEGcp9>jxBrt0aO&@nj+ip-kFeDJOKPTAPgu37u@qpn{YTUkx< zDfQa3U#z>Dt2IEgiP}psE$*LV=k9`6(!%&-&k|M{`*b}c?Cy!u{?r&H?OIVk?-Q>z z)y;C!;UqIvVU{`tLg_V9ujim1z&R1w_{^273C~b{T8Ba%(gTMU4Z6=`Olk+OEZ_Qi zodfRuqh;;?7;W@K=>{ukaG1E8p>;G=7c1B0lAy%FOP~l7Q+a82O|m^*q4fy2|63dy0^Q&V%jdz>T#mIWU72vd7e@4 z>x(2Dnky24N^@m1X%Z~jipUqpCQwdNp16M)8vek_c9+RWPRxzi*3gBPx{Tm+|3@)> z4P4+_xL~-XyW*p-oYzpdX3XqgO1MuPRKpuuqRhmuwV{65fw1A5XyCG6NP z%d&0tV{CeQovbl1YDQ413G`PQxPo%4+N%RZiA+s^g@DUb5Ydbb-F@S1b`_B(qb(_~ zXfw8ncuP9mejN5X%ig<$Or1C_fM*|P@H%wN1t68fGW2y{e)qc!cB3 z@oOtLCmm1g5KVBza*!Sm``Py^l=xxN5y`Ak+LmV za_&##FW%v(U|7AcuT0k^>w*ei6BT(zFbcfqA^oSI06i?PCKIz<1D{8vsl78Xg1sS4 z8x1y=NpP~_5x>RpP*@)i=qm-Az~&K4Wn^&OgA20Bi{oc8=y7GWh@A8Dwg>MUP8yvy z8{9W*^<>NZpXhyTYRxTxD-p&k)Uk-7%mkojr}5+s!1OY~CE*4}Q0|)*&S+=HItTHV zgoFC$J5aci{lw8%6@MNT=g)8cnVQ<0y!q4_jkCN`5JxxC3r%oT{6s4AA^zV!JI;Wl zD7L+wk-H*nW*>Zdy6}l)|*~OqmBY7Ly-j4i?Z$0$u0C~{zcG2V1EbGbB z5!gF0Vj%SfRC=yg280h0`HF*xntct~co`1JK36s@G{5>s;Jw`OXYQS-Ns_7x)t2}s zfXmxml?iTsWJdilo1@m7_?!C5sh)ez@poLv?aDno-J6!u%W2)^ZB>pQYC8gXx{{J+Ka5K3U9(`BVW6gzj>mrIiXbDpCoJRphJkl}}36{)9uUZ!7@aW?X%ca_R)=nd@c@>BqiChn!dDtf3eS>a1TZ z>N%F$Dbvi_1Oo-|jPqap?%YqrZH)>NoH9YcRqS17G?W&fO8bvg7{%0Xxzh9=ay2EFY(hwou|nZZ^0$~zhsZ&^@tWdNZ6tPQbcC=))S@h0l1yo zGHwZQ{dT8Bv;&E6%*%HOYkwe@^djI`)}`5j&MRWmDwkRP;iqA99)y_XgVOzV0(?tW z?^w5?7^D{V71H`cD3KCG6Cyr?L9T8yBkRQH!jDK6Fd_5SW^!gm%x|7CMZ)_jmr2OF z`t}CD*Qf>$-o4KMFkT_DJy`ATzh-+;z5_yHN>4}68p*PJRwE`wn1#kUQI?aS2|b`c zu?_N{lB={}fVneq|BfGSy^gRe&JD0oPg3<%FaMry*uH*zaJc2Sol@QQvPt%FpV4|z zb_`9&QG*^>GB;q==Lw0~X6FY;sOG(tXrjnKQB~<}(-3#*$l5U3i%b(-(nrT#{aYbc zS5_prNCg+A5Kyk{0Pb$7$E@WJ#tgv`IY>vc@CRnhW6NUYHrQ-!LJ>EiuQ3SD2pv1BjU+WDnS^9& z6;J(|D5~2`dt|;3H-zG5s_f=I_HuY3W)(DnE23b2E$C~YNQ<^wthM7jkBIP#m+8GL zmmDmW)QHrO(gr2<7+Styhs*vjt7WE7AYm`(Q1R&Qv!^`%8D74A;d6DwFL%xC%>-y# zdlk7Cc|hXh9gW>}Vpv z_@!0{EtYFs;}`$yebKW5ea3zBG{lh3oY-*FetB8b;>j@?yVv&lo!Zqh0xp!IDr9~b zTrg-_4D@*A|K4MB9X9I6gdkxKH`mg1_aZ;c#@FFalH`lSy*is-8(6078@%7`W*!Na zCmd??dimMBku6wBO!gowAzmCh#z@0I3e#MP4dZKY+u}cY?w+ATFwp;ks3?`b+K-JiI~16BkV`83ut!_R0Ef&3p@EJFAR~ zQ{Jh7d#fuL4f!kM)wVR$ZsLpn1}CeB#+Dl zZ&ATiRL@s=QFX*>0Q4)y0@9PTRmg+7bMJX3&VZ6MoEZGy$(!{uQ$G&jx3d7%Xs$x(HjN+*llx^&(ln|>0JRk8tPX=uSt;Ff)F9? z`_USWEkJzE9id8Fii@0v@pl?$qq8h^GBYec*&eCDg{Zn@1n_+nG$68%y>Z>&+^iH2 zIWWm>@y3uIUet*dEE&kNymGSxjJU|`J5u`9z7a~y8SxDLl`uY=zd|6smW)&VCe?q$ zq8G(Czw_mlT)ia{Pn33m*hZEpO@&iklq-3$nIZvH)C?|Q=`P;mNzayr9TyTqZ= zz9O0CJn>;7TtD5jM%Uch+Lr3llRvj2e;Ewof2^`OBb|_#HzDWKJ6SdqLM2pFqJX03 z3)0|%OtEHhoWIId67JuoV7y;AP$3Ce&6!~Ljr7C+TWNQscs!VYX8nylY4E}Ej5a3Q z+e`^6jg^P>9(KNJNz!y7&~;(tn%vg~IkXx1FoX@iN78DlA7wCRO7KHyXF8T020QE& z9+OcUmK6o`B*1tBc&>#C67J#KmQBpnKL0gi%^>D`tGKU7d-oANtUVAyQ33b~O_vG# zOWT3@Z%C1%dEIdI9V<M;L zhi6=_XSR>9$UF4B#u%MG@>?%}i^px0@%6P)4AH3LgsSJuV;;_A+#x{|FNC-d?6U_3 z93ZRQ^v%fme3SDh!o7x%KW6%y$QQg;)>@cWAzd`mqufVM*F3~FH7C!@zw8e@jg%R7ufcX!6WJv5Pm`QW#2iD?%#C=Pm|BfFRFcPGGJn@3ca=Cnh;=$@Jwu|K= znNm4~#^EEj+2?55ElaN5W9v|u+5ZGz-JmyGJ~iB+Zn=n#u*3im*ren?d{(B*b>~#` z5n@Uu{-%KtdH(R2Xm_Bq?75mC!7sAgiu?SLk64YA<0i z9OGX~Ai~8!(+Mz5(??rp^Y1c^YLPjxF_ciXJYT*=V88%V z$NYruqz#^Hw71@&R5F+LRa&1v++_71nT#8VVfT}ixY3pfIO-92YEU=s+aRDnLx@Ni zA5;zdd|7_3f9NWmSc_+hE~jIw*{Na~6c5>8ut4@n6Kr$z+qZhxmaSNY^ESIL^Xxn3 zu?0RNXc5%GS{_5k>TW)6i4)Z$W9xmsWusC8x>ET5atFuVqtNE`Qa_{x;&R#MDM8Zh zuVY@4pmFEP`doW8PU@uEtVhEOKm49f`7de397))(!)u)JT*FK#Jy*U@VK(r?+=@S$ zT5(u1Fif)H<$QRBm+!W#Iyh(_C7H%lP7=)X@uSh2DL2mZzM5 zq)33avojLxNZ;}3+PyVkc1y%9bl_7PP3zIur2Rh5YSK>{w{NZv8AUUlzEh9G#Ie3m z^qTu=H44LU2hH4L>07)CON7opTrpftFlwa9w|}{=)n#dY6kA~&?UWaKVv+-+-oNHW z%cfYjvEqkapg+9lGrNwlV2IL$H;>&i>9_?hAhWO4W&rUR=%4bU`m>NexU6Fz-+VGO z_>U_k&s5)*^!9_ug{=zSgu>yVntDM^kPTR88oIk0WH{p4!5eH6{Iu^h?}fhHkE!yX zy&Jnov8->+!f6he0x!!RWu>c3t$LbQ9>$Aq-{iBFgK#$(ZRwnt-G6*jaXA8(x7_VC zb_yY_q=Wvb-&*l=87DY|d+ftS+w$)U7tv1BSX5;Pyx^&L2&qUsWA4S{3NltIWVIKG zmQ2DNOv)3e`V{NYNZebyI*|h=FC$Ln8=}lU;QJm=NPcIq@e%G@u3tD|RZC^IsF#d+> zoZs$3*!hi9F~-^PZ=LK;ha0e)h*(Gnvc9*sEwMvoR{lNB5W<5?oSa5enRfW$$ zD`j7v=3}zqQP(?-=aa5ZS=(*zN9U0GuCJD_{+fiX0&XBy|L z!l=bzD{Ei_EZ(!V!X}Ek6e9G#4mEWNoKCzLuwXGclpcbB#6bw1 zN_qZCgY{_8RpbxVZyaCGYB>3%C)(ioZK9NSqv;3jTRpT5Qyv58IejoQ1f-v+f@OjX z9mUd~Fa2}SUH#?OxjQB2U!$HMNIYQ#%>Fo~CM>B3tUQ{QiY6%jO(pPmXwDD~j(Bi$ ze{vpqxQ#rN7gTA%vZ$U$R27q#!G%`27C0{2>!a#@0<%8EBDm;1M=2s03htu)lfA9BVzHW^v=%nHGHNc+uAn_Sj7@u z_SlDLf0|nvwydB)!4(%1&bOep#J{V`6=?FFiJdX>_@J)8 zhRiMMP)|?rsC)B{?LFioJtNM2n1w}_u4L`OTm7$ZR5xmdayO)Rlg)@j58*)jhBV-$ z%A~zlzAj4!($@!h;=1}YrT&C*rGdPG)q=J!7)U+~qQ>0V2#3jEdfuDZD_f7U)*k7Ayn{m-C^m|ffW~?BXlDE4I!y{|B(CNl^;C$IGbszpM+MveH`~L1`D~a{`j2$4<_a>5w)5LvVrX=P&q>ip^{o_) zXLWil$M-9n4H`1%Ka5zn)op50=s1jgZv68U6WLqmX1OzEpz$1A;k>muweiz!V_w-k zIp~>s7%MD#gL-xh+n}#s;k0GD5Y;wV*oVuEVXC1z2RgH^HN6gKvqfp0J^n%b9tfM|;);-SzBcZ*HPdi7-j#jsq+9~NO8RT@`^UA0O1 ztJ{!p)51p^EJd1Ut6Zp`Q+%dC+`lAYG%DtX$Jb1c);F|B(L$0_;-Im57Jk$pF{+_3 zq-OoChm4w>W#cCC?hfzMlNMLdm|MdvcJ-89;ro>XSI>*gqK;^9@{5jkiqzeAVw;o7 zj(fJ+N6r2`?WKnw*2>=@_q#6cH5$~FS1ga~(p2(DeC7_egv6IDYk(B@h`CVXn^;dH zfs5dq+5Wk@uX*LDI`*jlLLh7LXR#7Z5Q}Ch!^DrmE>}kTI3Slp0Tds=J(V%_ zz)|lV`{YIYc;sc25M4M1v zRbQKJJjY(+G;7mm(U0RBO{=G zEK(Jo(I#eKRiHPQm>pPVuj^sfbmKRn9h~v-#vi2qj;pSdoX@t(l0>OmPU5NVEU{ zaGAm}+M!z8is=*{PXFJ_UJr&rtkd!BFEm?6eq)~ipg^Fpcd@m2mQ^Yt>+#<(T}?xc JGBvxv{{aQMCV&6{ literal 0 HcmV?d00001 From b5d94d9a0ad9532494e1b3c7badbb94fa92c50eb Mon Sep 17 00:00:00 2001 From: shimmy <107991372+shimmyshimmer@users.noreply.github.com> Date: Wed, 10 Jan 2024 23:10:23 +1100 Subject: [PATCH 0106/1088] Discord button redo (#80) --- images/Discord.png | Bin 0 -> 13767 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 images/Discord.png diff --git a/images/Discord.png b/images/Discord.png new file mode 100644 index 0000000000000000000000000000000000000000..5e3b56d6dcb41d1969aaed75bb9efc76d462ab2d GIT binary patch literal 13767 zcmX|IcOaYJ*Vk%wsL@4BjkZ?RXlqr~td@vbJ3&f~+OL7*$aay2hK2#4s{E3MhBknP=G2+< zbQI)sZ%hsi4L1!ySy9hBZ54N+=9Yf?(K=}o>!r~G59JN{ct%M{=?b@^5;r%q=fEk& zJHNPJXM`vM`4ezy)0Txb^A4Prx=ICjma`ktsDX@2~l37pT8~Dk9kz5XY!B6oq1 zmczUp#2q_24Wbi1Be!y7^5Q}w*Ho#W0~vP&XM7vrWR`#KG(|Pig5d_Xz8~P!G5N_o zHm9B!$G=D$)v)!Mm|NxPZoWo?sZ-gdEQCVtV&c|!`XI0RlCJ`%S|GcgAKl~a>9~@% z($7-NY*d+`opJLw8O^-U2TYam5o#$;YyD-UPI0@e^R?5bEp|Y^OJMNxUHU$4fC_Lw zDrXTec8?;?QqO?92j`^TxO?N0b)3+o6Z}X8jfIzR@1CMa-+d-5VGR0@5QSdZ=R;5} zLx3lJ#a0^;IimGvN2v;Dx|hZ}YJ5o+hD@c?H|ArBNnGHJDIqC=?JyWi?HP&&b4Gp% zXVb)Rx71&w1C`xPoA)`Hs-WA87J$3cWrGw21*I7GZP&s~2w`dY*E^zAnGU=JzEzRq ztj2a_HijCW;QQfzEsyQ(LX$%dW9FlMWHL0kOb?A&(7i;FDudDsk)j^kyS)M>+n0|< zB-GIXs-U?^?gx?d)cLu#OE^(I!^`2ip2rHi;Jdtw>~J%<`?s8S0f%c8g}@;3I^VmShDl7{x4st75RoxC@__xy9KhvKPKuOfs%*b$ z_+ZUlI_r+U#cn0+$2`DS6Mg$XX|Bu1?o;#nf61H)EJr(UHJawdlBo`9e_Ar-G+NnE zZ2KNhlrlnJhjYe@-#(3{rO2`x_-g!!Vo9u5O0Trwo!}V>TcYZ3sS@ts*U zv~e>JEPvO+AZQ3rOw5lRijXGaTe*mo34CVC9Ka9&2KpMINk+e^!vdKSW155)*K-)J z11)wBYyeuol$-AyslhRe%BX<^7ViV@BY8qmbO2QR)_uUUJ7uy|gMT~u0&6E{qr-CI z&0n_Q-vF2ZPadAeQVWXvrlxyq=L>#wXE^iGj!SQajLIu8bqr7Ur6j4H$OE%BS@Kw5 z50ob+;zu=Ap!)5yh?k#CACwhCG`znElDclMai;nlMWRqKTtjv#?!H@owS$QKk|V>L z&iv0iZDY;1K9;RDs~!`S>81X-;?Fx!vlrM^<$Q}jC{lM9+bLZ0nD?JgSrx`2J}t%c zH|Sk11VGh%wb8%+sZ&lcQ_V41MwPpaQ)}9QfW#`rGu70ZqIonnimK=BjojCX^xc(F zLa*^iy!aPKmV7YJVW))2bA$L+{2&~5H(%u6EHt#XZy5|#o^K00mmv2hY~hQhs6lYQ zD4JSo|4#QqAWVLFY0o~0i|Xf%0+&<7i0_WRLoN6(1cl4g(~9P&{u3IQh#HnFmx$zl zeFE`>%%-IB`b!GdUriv|(DO{*&cFBHbc$tuZuQ+VR;-~!DK)gIlRvfGX;k41rjs}I z3I2j!yU{)Gs8HZX<*&_!X;c0CKPFic__V3m`N3;R0bP)K^=d+Yhxho<(hAPD?ZVwn4ZOj z8me}_5EG8a*8Ls{6+cxv$?*1;P79v8%L4@}Otzp&acW=Vr~2t6`~Fl>XQSzV|5B4i zw}9xCmU9)jby6kFto(Hi*Xmw|pf12+?fF!zG=~)pmx4%#)qg^C^ zY0z9<+C`PM;+c3?Z)`YIl@%sd7dCX?v|I|Pjp~WHQCRQpY*qkq7cvf(MTAWGlq?34p zhHFgp*=KHFdO}XWXKVKZe}Sr*(C#{n44#L{NBpSCnc9a<&?K{e*7HA_I;J$5Q| zzKsWf7ubo8W(a8INmtpU?l*<+P4gRu%~1>iHmvnjOEiILVYOuaGK&_xwW7EzRfon; z`Z;p@4Q@-&3(H9yVV8ya6sjadHgY{mLwL`_S-ONjr2a!O8}RHurlmx+gi& zewoi!6Jt8bqL~bn9CCE<-@QYKqd$lWNUTckvPCM@>0%{`3BmJ3KRHXWE{E~)N+?RKQ4Kute47^#yp8z*8u^dVeeEqKFwuc=5xRa&jE;cjJfu*|=BnQg^f zHPFfT$+HemsH;Vz+!DCwm_;;9=nr~F{T*1b5ASw4WD_8-3fkW9IZ_)iJ#+M@9L>2VFO)<1A*04yN*zp(qaCm@n^7R^6c5)yGLTUkK{*U>2M# zdE;fSMwp>}YPkfP)HhHJgMP~K<_MfK`f9B0z@KJ)(7H9!%r_St02#|>l3Oa?`bsrI zu6T<4z7~)wOy=k^FTSLqp+D}c9`jA3WN<;aK*GgO4S`rD>^C2bEcP8dQFDD-jjMa% zwqsIxN-VEbS{CY?Gh6CqKejCEJ2flPxB-$fXQmkaMabkiEL%erQyQ(freg*?@YC)?UQ|C&!GNn|(P7jG2aV&J10r|jE zk+s2YVEy*Wrs)@F>RL13HE`j&w}|V}Ae7EJGd7F*RC9u}Z~lInQvO;kTe2TfeOza! zi|E3PwJT`2^XWL{fkHq!6Up_<0K%j(ApDV6Z zEcSW8lyd;1!J|ud1?Zle_j)so3ol@Yt|7F-S13P%@9@Ys82Iu$r-9)>H3OGHu{6{GihGXt%TtLJUKpCDuiqj- zWzYIk&+v~l2ipt*haLxh-4d7Yd@DNZSc%M4q?^=*k+f}$ql zncLg?X3JSzrl$5m5I48d6fhLw>rqNdZ)`-H)nn)?zw}j#x-Sl%HTAVr%wtuc#&yx6 z)=6)Uu)`2A&C}R^^Rkv?;@Yn0u>Fcb8)Abu#)H0bJC?(DVZBdHp4zH>7Xkx8iIdY! zv_+oNmJxX=c8e2YDd%B_WlmrCx$u94&8g+0_t7}HUTqf`arJ|(k#i(n(J~m~JQMM% zI9VpQq5ln6+@({2HO@2y>RnLOmD#|dO;8P}U&vM6C zay&~-1y-U7T`MQdC;^i^miwSGTR-nuN1rVdA+Xa}IHk8Y_B;$=#}dE8K(v=#k|2*G zL}Gj%FK!L*yL-%vTnX5k@_4cpxSw{6OVZr0;=$?)8oZO4fB(X<+-Ij`5LH{O;Wg1a zs-49w1(D?;PVY2_%kKYaHFI9s=^{a^R-9#d8Nd@#fdNFf82)0yM`p>@sRsbAe64Q0 zdJu$s%jJ!1v#1uH49bQ*M)4Y8-~Sz}>3=8q)|xQI;Oh%O8Z@+Rl_^}igsTBPlS-_< zNs6^6uw}D-0V<E-J)jFbW1PDAXf07!V9I;1 z{Q^S*?EFQ~iyb9jSDK=a%TTM5J)h}+hMrYiZijXH1UE3h!HEaA_|qx`74`ZgPdN+n z-r~YKnPziT=WQ2gvtPm_vBSZ;tM#a(8Xp>Ft}wrR88K@cn|F)4F_ftj2>=+!;l@19ic$`KtD>;8V5EG^!sRO(((Z zM|@*XrsCf|U3ny%t8>?<+o)*CZm}lYwcj^#x3cpF+1D!n_?SgOe9TBTHP>w;q@_@D zLW+h*wQIH!F%`QGd+0i$Q?-BAsuX9BFb{mK%&Z|vkEdmtHN%%9iVXv5qCL3Z@st@q z>3^I{r1?dOu*KU)v~rf|Vc9_s$g$zFIj?HREyZOBZ_ONLb@FS^n9*6Mql9903?om3 z2dyw44Rt}d%_f1dQJh+x?X831SM`DS<4H#{0&P+!pCYFBe6qoCO+FaBCO22w{mFz>?KuQbB}SOm}bTb+$B3| z-~i^pQ*b9>2w z5^V6?Pzmc#*K!)~B>+R+D_9{ZeeZA6DehAZ#H1#-582AuYk&NWQ@Dko{UzIcfr_(b z7rC!L<~CcYLon~)xDejO5qo7111Y*bHWFQA-zcFz3)l8-O-w2$TF**n`6p$B3LNBTx=UVF;zmKh2cH~JpN zI0+wsW2IUlSH>LPLHpEfdlDts z`7~5*M+XE;LxKK%g?@>~5$s^ssc&Gc``_{yY2-v-t8}IAr`mQjrppssax?a`4@4Dm z-_>e#0$rGw&DtGCso=bHiX2hh$?%?2s#d9FAaRKh-2o`}JSg-0$^-Z41}8 z^?QkZ(b-_o7~EiW%zj}2jcd#jcWCd&c>xxWIhST4&i&wgT(7QWJ03qY9;F_zGWFDb zTH^Rl;|C1Du@Tge10=~0-Tc-1t9rJol*#1YRwbSB7yRo0=h86*ZHaDmZ%vBF(bN53 z5*^%{_L02hJ-|I^sGo}OUa)j#@iOVNO@<+WF`&I=qEE(S55)9#KHbhO4E3P@%OF^9 zxh!~Tqs{&pDu8AAb`4fks@=SCDwh(VQtaSq9_F1>N-NJ+5kVeP{(OER{b(u9d}_ih50AFRKoyg`<@S#>r^R zM)N5r@Hd_bx0V`Mv@EKq&4n-}VTM_dCp$w|pjoeEpDpg(bP=<8!0VA8!)2JwAoU&H z!CS)2(>556Ssb8UqQ$X8Yap(ZS-;L@H3j*VVISgbK?Q%F7qluTCxvT(9+8M)Y7^V1 zjAwD6baDvmvbA9s%ZIX-I#bb+zm|&jNB#6Dl@JeMY_*2Z_h-nX^1B}o46hKR%}r2> z8IT>0kx<`_pPHsH4KO!0T=hnKi0EZWYj*?&#hEMRMkiddSAmR4f}!pa&$;p)5V$gM zGlvF4W6yTq>$1@n&y2B;x%Ity-*copH~Sjby;1!5TL+_l&^%-V12TW@4CH+eRf4d* z5~+Xnkp$ioXhS#%>v$fsnf}yH%sI)4uzH5u3*A5SNsY?Vj+~;&S1d!)Z?o&|>qHc! zdkOi|EG~YU1P^CRO)jm=+JkP?)#V`ZtLt*3mE&1%QUL{j;W*Uh3-*LK)5OLmUQ z)%JTTct^obgagXu$XLTCT$gtt<7)dhyQe4(ec)!(Eweu_0FIyUke_2d=&G?Qu*m(+ z4bNQIniTRU4yJ^M$tpOsB;z~*wmTvSJG+a=U#w^`T)suR0nluyBeI(zOwYFa)9|>z zbun_%{9a;X*;uuMf19NBVNWOLhg?p#ct!=CmDBe~VKVB~$2;6jlt+ctn0!}5gR91m zcM@j)#WM4w@-pGThXm)z@~MVPOR*k4+cHkY@n6JyVci6->c=SjbVd$Ta{yD?3j#+G z@aFOu?MKg61O<=pTNc@5GZcz@y6@iWNEELj%7Yspo(?b_LP+j*YT(IB6;8OfZuOc6 zPL&x(H_Wb1q^@l(tZ<1hN7;iAyL%-b?O6W}+fpfdOu%7*hK*mTaW{kswG2Povx>~# z7G;HUV_VB_#c3!P(_7|`1&`J-Ca_tV!9opgIX)q&S?FR89zL8Ik+0W0)Ks$&^OigC zOOhh=$g#|<7RFDa%pFiVs2(n)oR6*0z_}G@E z(CWwq@oAk$y|iJ9s|*y)`N6A}e8f^ZX_^JnEc%;C z)Nyc@mGPp7QrZRVsNBoup7u%Wd9mv?`c8ynU^u7oKmx)QUNT_`Ui&UTa=LmZlwc&p z>~vi$HR5lAmF)5@TCDT(ob2oH@rG~0$m5YCr~Gexc|FT*BJN_01K3oCy<*{dG+DkWTQ2LQ&nOD7&9)F02_eg0< zC6Ls0@l5TN`O#kT1o6uB`+SQxfLP;uuE!s#jOK5un|b3Uk9U~{4TB2EKYUCv@CZw3M#C2Go>Ru*69 z!@G@@6mEIMnf(^dG(bHlncb2ckRmI+cSiI+jyU?QE7|$i_N70F3C6E4E}BD?%x?Kk*M$Ab=;;u07~idU){@8K)a8I5v{x0V4IrsW;o zs0-TEq`Lj=yZ#evaH>e+4e2JT7&g*45t~(NwLI=ULQ3^N_lc6hs9IRDeKA)XCwnco z7L7Vf3*<~_BVj-c7*#6dx8+hgxd)w^elPU6iKw>jTnGVkI5fsXDlA^dO>Bh^7_lU!Pf~7gYZB8x;l-1{_zrFf%TS> z$L+4{({F&P6FZ|toVPi&<6!;w=JpYI((gNLdN;eyD$uW~h75b|RB0y;RxC!m#s5W_ zD!=Kby!D0#^dE5Tc#COV?`Ek$JYQ^mwJjd*YbaYfo%3{_OBI&Sa&o za#>Vh1fAr}6v0EuO(V=^)nxyXII3n6vMg|?6iay2+`yvns(MtYp-WvWcIfgz%Pbd6 zZw*wzk`Pc5V(ye%}9RSTIr6 z!WvKL9y!(3_6^RMlDVe-x5!cy1v(kOaDCrDO)=%qDZp7j@&C42)|LMlVGRRP-?VW8F=#yifQC% z^!+!*GMc)*Z&RLeH*Z=19VU!s?_w^!9?9i>?=erV4oa}thQ2B*wS z?H7X&E#K}+#TR$bUA3+ju#mop)dH)O8dd+Q$1i@MN94vzeN1TS|9Qu`iDUBFJOh`# zZkX&IAU|}Bx0k8wmWySArTo>f_^)7u{?FhP;lZ8Gk^V}9W@>+A&(SO{f9%NodOf=B z|0gxLdUVIS@*}u>#;xqFh$L38T+ySw>aUGds+=%+U9NgSh73K7Aqwi=$Im-jU(=b3 zNRhSKgG$C1fDszRDX$$fRM7$67UJ9^bB2+p$6i`>!mT12c=M}+93rP2KF!4;*{nM1 z@U)qDC+Wd%Uhs1CWY-}{M2^hS;QWjBh=<|BkM}M#G{-yGZhWPACw*?A05<6F$ms>I z(>GA)+XL-%=(B7JjQ534HT3I5eaLE`p4~u9vt2x@zuJL$a}e3))B`SY-?sqoJnh5>S7!x_>mWG*V=rlj%$`y>u8MW&swkw6~;RH<5 zy3>+mI^8G_feeP&W5iRS_2>-+`?F7%4|*q4vLt-~+79Yj2pSea5t31c*w}iSG9%Z( zs+dJfMg!09q}ZbV`!tMPpyKF0+lziLQv!|+;yly-1~jzfT#$HjjMaGO)T*$woPX=v zo)1t?@aF|rvL6Wuyt&4llao}NDN)JHC3*E*rh&X$fAFlS=%7-Ta3HM9xR>%89F5`K z8k?_=BCyyA2Oh?p5+Wg1So@P#{=N*0s7kn3=l=ru(0=}U%jrMAdJ8-@9allJKR_y# zHX}>B*~`!9^YBNQ7dTIwa6(O?DF^M-Sf+_Q&Eu8LVkhk-H4{D^o9l7!`e+Ni$HZq8 zwIm$z$%QX%u=DKt>zp%8IY{WDC+l%;r)=vW)s!^^)if%i{}$3%(2mQxxs z8=<`=t5O2-%b-4rTkito8Lc9I`N}`oWRlHz~y>2}nQ_vW?=4h=&Uyz%q##=b+ zT<;j2dYw>=gu046vQJ=P$6nz%(3Ho9{o$?cDNaq(fHwn!y(@2i*V>OdmfQE5WM8Re z6ECc>G1GFh&J*{sP!sdoAxWb7JeSuRcHD&5P=>{rJI0z%ZRm~;AIIOm(`~;84c>_S z5O&4$pvUrR?@DF3tX>zk$BOB`JuVuq*Ww%MIF$|jrC<+f+*LE|JJ+AqKe9;a6le>z zu|DSb3UWKk@f4ti7#WaCC94ZoCYHhtNI^ZH~ z$W^~0=krVOy-7ErTnhzJxd6MM)`5|b<@!um$t>jsblD@4p^(rhgFyfeVnxEfWvDs0ty)h?BhRAKT zJ%{fz#~qW#zuuQ*Q6THxx-3M0eeaCC4405XTY~+zji zn=f^(vkQFgRJ`#}yZcp&hfKve|8j@9CkG`a4yiY$nLj+H6ubyX^2OY*{_9ORgJcFb zb@6@qInNBE@4x12@W)9XDkk_>W)-xB`A5(QZ-Du!pF6mAnK=s#-gYypbOk$FnH_7g zDQlo9$+yXx#JJOhJ;EtqJKeqoNJ-OYp$6=P-lcRx-|d;Pp>(MQ)@6L({v{T5=!|eZc)%;>`d|MU1Er z-&3PM2$6!rP`3jDu#Y`wEp23UttaQNv{)9DYs-t16&&W-IEE+5j9mMKoRECr-`2=y zd(p}OM%(Kixe)>F&cro1s>k7ti8I;YP7W_GyPXn5lVHP+HezwBbT3m9@2p-NWNB(Z z4%3IS+haqBKM_5vCURRe5J-M)xYQeTNIcBNAKd4nT-_MXp{J57VCi{WQEopHI%wS_ zRJGmQ$9f;*TILkHxVcZROlcVDu;?gQ{qx!r#C}18pi<$n5U1-9i=&h^ph3tCNLUyG70hp@J2qBdyb*o~6Py7i*&I*e$zM$8 z)25soCz>#>)sf(pJuyeu7^yJH%pb2hZiB~&X`Owb&nrll+9_L)~>O|L$Z-+kL# zjzElD^dERi)K*!sCPWw3B-VNir$B3t@f8<+ed}W~Z$7)MG0O-WNvJ__$1}d$DR(rf zs_C}Re>Mf@DCag-q6H!~H`3Wuog98RZNkE|4ih-7r_0NFy1Sf>$ z%^}YC>KhFF_vbV+V$KR!@;!y_#_u=Z3YFSV5c*){l+c5pV3`czGT5s~;ujF5mrH^S zgUV%3uRJC1c^vg`x|(#im$douX-!GTrQ@(_muZSqA6TqfC6VIy)a)Kwr8tN-ju-## zcN__ow;e?G*^U%@RV{Bz{Y^S8zdu^X=ef`T1Z@Qa!R{NV;*n#hP(xF4LK(41b_TYM zZE;>%JXfia%PC2pe+IP?{W=+J^}Ff*rID!e73U%`V-bUOMAzay8wnL_hs1Pe7UjdD zPL?w=bW*tSxq1oV-je(FybR0Zzbvud&E&)&>)sjZIMdNnWdljA5%Gv; zu7ah<(C@ms{Fpbp1AorpK=flBgcm!L1)S6-fRy~?UaoN_z;?eXo{lMs@YSs8|<&Oo?hzSeUT|^A>ws11g4RS z{9vUmra3NY=Km~}^>Ib9VHnYYRiKgbOE zi}Q-XWf{Wv{U0L=^nzJ;Zn@ld!gTO{*tuv zj~|szYkDDcX0HC4l8B<&eb1+?B2J9rMdKm&sQrgS@>83A8`4)dQKt($x59Ot1PT$- zqGDv>vakkeJlt8i~p-p>tEXV{HWEGW86PUPcb2VYinM| zKRec=t+;1ZxYom?5+J>iE`k$`^gPaac}-4+9|X?5?(7MTPMilZ-tM9_@^aODlSS#e zlNPI?XPVtEk3mPByk$59zZOn|vix0S#E)_R;Q_d<8l+wsbWfF)Nij+zWw4Y&g-^9LY z6^vom&A$Asurc%ll1+a)wDeoMpz~pxiSuV6y|Ftxg{jc(#U-=G1GCskzxs~lAgcfn zak+P6lX9`oCQ}EZ+#79jel{=vbAU%y*$*GAL*tyIE60PMq>>DF7AXH(S=r~7y{(&$ zXJfD{*!aeZ+~m^*+LyBKcQ>Y=*G%i0D9OC}+pZzW5$jlgBsqKZINdu-ugIi!embgA1-)MHsC2{CiHI-)kJ4auec zW&g2RzhEs+x4v#xmf4}Z$^$bRjN;EI!0vtX&*wdIl2HW>~bbM?F$b0!`5b?K{6GwWLhY59Eekf6( zaaL?@b&=O}|1k5bOnYx#l&1#(>&$AB)PHb<&d)R%E}+m4`o?k8OZSL!F<$@AljJ6x zTrXHMkB;}2f3;9>uX`=!+N4w-_E1yK%|mG42ja4x^)<1%Y@|`;csySo_j@Wvj7l!K zvg8N=TM|I{MET0#9NV;0gEpotnb2}D={WZ{yRMX-rH10~{YgJ?y;LyhR*`qM3bdyy z0A#~%<4vOF*dG(cHi+aL_THgVhIoFh!jC@I${cQgsW(KI$NTz?qM56iYeU)X&raCH zf}CyX7H# zJGI|ls`-_bQq|48DSqOGZAp+1hPO`T)sSOOpR%>qsWekBAM}jiK`XBkNSm)x2xL zzji-7A#sg*<7R7M4A-a(AM380RMQqya}K6*6rx;ms=z;TQRt~na0wBqj8^)mamVpX zx3lr=#elh_e{3k_aYU_TpVn^Wx7Oi!F{%+|_$-3m&GB<4k(8eU{?ASh*u;tbV{_csPE-RQ6A7uuORO{L-=^{z9xgUv06ST*F%&xZWe~=A~CqLcwB(v~}Tb_!y<&EC#LZ6|B z|I-_i5Q&{FrjX1;;6rNvJOdaV)s<1$Xnf?cpdiD4Y&`8NhIO|LK3cS7)*7J?{$(d* zi8cv!(ZFH1@(+pj-%2TMA6l?aQW$V^^Ki4AG-@`uuZh-8=*Ls&bV)hR|BNw5IoF;* znYD)K^nb?Abh(bmD*BmgPicw7RG;1TKMAbUL6P)3c>I_`<0}j;5oe-;MzOPnhCxq+ z6vN~p zzples&iF61oDY~Y0dzmxccP-Ig55gvA?42Ip(tz0rP~2N72F+}cGYvc5iV3jwBdxi z8%N+zya2c+ck6sPFGlW^FqG?EJRvvI`tlVKGHlJmDQgP3SU7ftI{r%5*zC8R2VT4@ zL>sfjEm7WG4k{dRw;L!L+K9`gct8ihBrYJSD7Q$7o6kM>TuM=11JFyK@b7+94$S7j z1*~=}j|w3ltKcV#sXb*-=Qm_f(5l@}Opr9%lnR_HXNL|3l> z&Z&Z~$MjOO`%$6LgiyoNSI}^g#|6%Iq@I-H#=CmY*5^OExVW%a?UYH< z-7EJxDW2;9ba@W>ivua6XdZ3Vq&*m(4O1=*A=;$y`UBBI0N{UQ_O5Q%<`NP9qLPpb z3gtd&#}N6VqZW{Yp!yN$>?p#d?zNX?p$FO(OLyX)7YBH$yn<3~#)l0bd%g9^sziVA z;SEDo8S-!lEIt4Lv&#WgvC^yU!&Z~S<5@M?9|^aP&`wF>#^_|YTvR(BHJ9gqsGNLL zfW3bhc;#qh;iEVN?G2DOf67e}GTS>kvGlM1@1p}SMi7t(OyOP}=P6yM4(8ELD(ujD zTj^c$_2cn7A_jFG-Xv)IHD~$4JBm~x$D`KwK-n-BqUcf_OZj91!;{W!3kDe0Y@eo% zR(@7lnCHB_v|6eoGIsfhY%H><2gv62H$>OFjJ}~LaDSQf*xnc{$!JOo>Wd*JWis)Y z3fR8SndX&c&x@hv#lk5v=c=hcS;mq!(5$%e zI}{BXSExR^81D3`>httu9Rl|bzD;=MW&FESYKw7`TSM`k@-l?J}ie8H!lXa z6SSSD8VBIuM}mv}gvyV>FWEv+8#e`QA(TQMhoI*Ye7PwKAHCO9>r~JY(V zi>IZ1qs?FA8h&{|I{6hwVTJDYk@RW)oyFFKPmsblUU! z4%Y+$>;rJhfv>Go1f1)D`||qpl}hMTLgw8*p2L9A?AXFYmMW@G OG=S%t$|Xv#g8mN=wg21z literal 0 HcmV?d00001 From d3887c7fd93d9b910bf6ee3ab3c7fd485fc55e46 Mon Sep 17 00:00:00 2001 From: Daniel Han Date: Wed, 10 Jan 2024 23:10:48 +1100 Subject: [PATCH 0107/1088] Update README.md (#81) --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3d3bd82cce..2a3322d629 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,8 @@ -

    - unsloth logo + unsloth logo

    @@ -17,10 +16,10 @@